repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
firebitsbr/termineter | framework/modules/dump_tables.py | 1 | 3439 | # framework/modules/dump_tables.py
#
# Copyright 2011 Spencer J. McIntyre <SMcIntyre [at] SecureState [dot] net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import os
from time import sleep
from c1218.errors import C1218ReadTableError
from c1219.data import C1219_TABLES
from framework.templates import TermineterModuleOptical
class Module(TermineterModuleOptical):
def __init__(self, *args, **kwargs):
TermineterModuleOptical.__init__(self, *args, **kwargs)
self.version = 2
self.author = ['Spencer McIntyre']
self.description = 'Dump Readable C12.19 Tables From The Device To A CSV File'
self.detailed_description = 'This module will enumerate the readable tables on the smart meter and write them out to a CSV formated file for analysis. The format is table id, table name, table data length, table data. The table data is represented in hex.'
self.options.add_integer('LOWER', 'table id to start reading from', default=0)
self.options.add_integer('UPPER', 'table id to stop reading from', default=256)
self.options.add_string('FILE', 'file to write the csv data into', default='smart_meter_tables.csv')
def run(self):
conn = self.frmwk.serial_connection
logger = self.logger
lower_boundary = self.options['LOWER']
upper_boundary = self.options['UPPER']
out_file = open(self.options['FILE'], 'w', 1)
if not self.frmwk.serial_login():
logger.warning('meter login failed, some tables may not be accessible')
number_of_tables = 0
self.frmwk.print_status('Starting Dump. Writing table data to: ' + self.options.get_option_value('FILE'))
for tableid in xrange(lower_boundary, (upper_boundary + 1)):
try:
data = conn.get_table_data(tableid)
except C1218ReadTableError as error:
data = None
if error.code == 10: # ISSS
conn.stop()
logger.warning('received ISSS error, connection stopped, will sleep before retrying')
sleep(0.5)
if not self.frmwk.serial_login():
logger.warning('meter login failed, some tables may not be accessible')
try:
data = conn.get_table_data(tableid)
except C1218ReadTableError as error:
data = None
if error.code == 10:
raise error # tried to re-sync communications but failed, you should reconnect and rerun the module
if data:
self.frmwk.print_status('Found readable table, ID: ' + str(tableid) + ' Name: ' + (C1219_TABLES.get(tableid) or 'UNKNOWN'))
# format is: table id, table name, table data length, table data
out_file.write(','.join([str(tableid), (C1219_TABLES.get(tableid) or 'UNKNOWN'), str(len(data)), data.encode('hex')]) + os.linesep)
number_of_tables += 1
out_file.close()
self.frmwk.print_status('Successfully copied ' + str(number_of_tables) + ' tables to disk.')
return
| gpl-3.0 | 2,490,079,865,939,088,000 | 45.472973 | 259 | 0.719104 | false |
skeezix/compo4all | spaghetti-server/singlescore_handler.py | 1 | 12920 |
# update_hi - receive binary and i) parse it, ii) update json tally as needed, iii) store .hi file for later
# get_hi -- fetch a bin for the emu
# get_json_tally - dump highscore table as json (for fancy frontend to display, say)
# get_html_tally - dump highscore in vaguely readable html table (for web browser quickies)
# get_last_modify_epoch - get epoch-time of last tally modify
import logging
import json
import array
import os
import pprint
import time
import traceback
import profile
from paths import _basepath
import modulemap
import activity_log
SCOREBOARD_MAX=500
logging.info ( "LOADING: singlescore_handler" )
# "score" should not be supplied, unless its multiscore sending its shit here
def update_hi ( req, score_int=None ):
#pp = pprint.PrettyPrinter ( indent=4 )
# base game path
writepath = _basepath ( req )
try:
logging.debug ( "Attempt to create dirs %s" % ( writepath ) )
os.makedirs ( writepath )
except:
pass
# pull up existing tally file
#
tally = _read_tally ( req )
sb = tally [ 'scoreboard' ]
# parse new hi buffer
#
if score_int:
hi = score_int
else:
hi = parse_hi_bin ( req, req [ '_bindata' ] )
# is any of this new buffer better than existing tally?
# if so, update tally file and record it
# if not, we're done
# new tally update? great ..
# .. store hi-file
# .. store new tally file
# -------
# does this score factor into the high score table, or too low to count?
if False and hi < sb [ SCOREBOARD_MAX - 1 ][ 'score' ]:
logging.info ( "hidb - %s - submitter score of %d is NOT sufficient to enter scoreboard (lowest %d, highest %d)" % ( req [ 'gamename' ], hi, sb [ SCOREBOARD_MAX - 1 ][ 'score' ], sb [ 0 ][ 'score' ] ) )
return
# is score same as existing top .. if so, its just resubmitting the score they pulled down, likely, so.. discard
if False and hi == sb [ 0 ][ 'score' ]:
logging.info ( "hidb - %s - submitter score of %d is same as highest score .. probably just looping. (lowest %d, highest %d)" % ( req [ 'gamename' ], hi, sb [ SCOREBOARD_MAX - 1 ][ 'score' ], sb [ 0 ][ 'score' ] ) )
return
# okay, so the guys score is at least better than one of them.. start at top, pushing the way down
if False:
logging.info ( "hidb - %s - submitter score of %d IS sufficient to enter scoreboard (lowest %d, highest %d)" % ( req [ 'gamename' ], hi, sb [ SCOREBOARD_MAX - 1 ][ 'score' ], sb [ 0 ][ 'score' ] ) )
# determine desired sort order
order = 'highest-first'
try:
_order = modulemap.gamemap [ req [ 'gamename' ] ] [ '_general'] [ 'ordering' ]
logging.info ( 'hidb - %s - ordering from conf is %s' % ( req [ 'gamename' ], _order ) )
if _order in ( 'highest-first' ,'lowest-first' ):
order = _order
else:
order = 'highest-first'
except:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint ( modulemap.gamemap [ req [ 'gamename' ] ] )
print modulemap.gamemap [ req [ 'gamename' ] ]
traceback.print_exc()
logging.info ( 'hidb - %s - ordering -> exception .. assuming highest-first' % ( req [ 'gamename' ] ) )
order = 'highest-first'
logging.info ( 'hidb - %s - ordering to use is %s' % ( req [ 'gamename' ], order ) )
# create new score entry
d = dict()
d [ 'prid' ] = req [ 'prid' ]
d [ 'score' ] = hi
d [ 'time' ] = int ( time.time() )
# old: insert with manual assumed-ascending sort order
if False:
for i in range ( SCOREBOARD_MAX ):
if hi > sb [ i ][ 'score' ]:
# log the activity
activity_log.log_entry ( req, d, i )
# insert
sb.insert ( i, d )
# drop off last guy
sb.pop()
# if we updated the first entry, the very highest score, spit out a new .hi file
# (mspacman only has a single high score, so we only update it for the highest score.. not a whole table)
if i == 0 and score_int == None:
f = open ( writepath + req [ 'gamename' ] + ".hi", "w" )
f.write ( build_hi_bin ( req, sb [ 0 ][ 'score' ] ) )
f.close()
break
# insert at first, assuming a post-sort; we can drop the 'worst' entry after sort
if True:
sb.insert ( 0, d )
# update activity log.. try to find the entry match and publish it
if True:
for i in range ( SCOREBOARD_MAX ):
if d [ 'prid' ] == sb [ i ] [ 'prid' ] and d [ 'score' ] == sb [ i ] [ 'score' ] and d [ 'time' ] == sb [ i ] [ 'time' ]:
activity_log.log_entry ( req, d, i )
break
# post-sort to games desired sort order
# reverse=False -> ascending (lowest first), lowest is best
# reverse=True -> descending (highest first), highest is best -> most typical case
def _sortvalue ( entry ):
if entry [ 'score' ] == 0:
if order == 'lowest-first':
return 999999999999
else:
return -1
else:
return entry [ 'score' ]
if True:
reversify = True
if order == 'lowest-first':
reversify = False
try:
sb.sort ( key=_sortvalue, reverse=reversify )
except:
traceback.print_exc()
# drop 'worst' (last, since we sorted) entry
if True:
sb.pop()
#logging.info ( 'hidb - %s - sorted ' % ( req [ 'gamename' ] ) )
# update stats and write out the updated tally file
tally [ 'hi' ] = sb [ 0 ][ 'score' ]
tally [ 'prid' ] = sb [ 0 ][ 'prid' ]
tallyfile = json.dumps ( tally )
f = open ( writepath + req [ 'gamename' ] + ".json", "w" )
f.write ( tallyfile )
f.close()
#logging.debug ( "received len %d" % ( req [ '_binlen' ] ) )
return
def get_hi ( req ):
req [ '_bindata' ] = build_hi_bin ( req, 0 )
req [ '_binlen' ] = len ( req [ '_bindata' ] )
logging.info ( "%s - pulled generated zero-score hi file (len %s)" % ( req [ 'gamename' ], req [ '_binlen' ] ) )
'''
writepath = _basepath ( req )
try:
f = open ( writepath + req [ 'gamename' ] + ".hi", "r" )
bindata = f.read()
f.close()
req [ '_bindata' ] = bindata
req [ '_binlen' ] = len ( bindata )
logging.info ( "%s - pulled existant hi file (len %s)" % ( req [ 'gamename' ], req [ '_binlen' ] ) )
except:
req [ '_bindata' ] = build_hi_bin ( req, 270 )
req [ '_binlen' ] = len ( req [ '_bindata' ] )
logging.info ( "%s - pulled generated zero-score hi file (len %s)" % ( req [ 'gamename' ], req [ '_binlen' ] ) )
'''
return
def get_json_tally ( req, raw=False ):
tally = _read_tally ( req )
for ent in tally [ 'scoreboard' ]:
prident = profile.fetch_pridfile_as_dict ( ent [ 'prid' ] )
if prident == None:
prident = profile.NULL_PROFILE
ent [ 'shortname' ] = prident [ 'shortname' ]
ent [ 'longname' ] = prident [ 'longname' ]
if '_general' in modulemap.gamemap [ req [ 'gamename' ] ]:
if 'dispunit' in modulemap.gamemap [ req [ 'gamename' ] ][ '_general' ]:
ent [ 'dispunit' ] = modulemap.gamemap [ req [ 'gamename' ] ] [ '_general' ][ 'dispunit' ]
del ent [ 'prid' ]
if raw:
req [ '_bindata' ] = tally
else:
req [ '_bindata' ] = json.dumps ( tally )
req [ '_binlen' ] = len ( req [ '_bindata' ] )
return
def get_html_tally ( req ):
tally = _read_tally ( req )
if '_backdate' in req:
if req [ '_backdate' ].isdigit():
timeframe = 'Specific Month: ' + req [ '_backdate' ]
else:
timeframe = 'All Time'
else:
timeframe = 'Current Month'
html = ''
html += "<h2>" + req [ 'gamename' ] + "</h2>\n"
html += "<h3>" + timeframe + "</h3>\n"
html += "<table>\n"
html += '<tr>\n'
html += ' <td style="padding:0 15px 0 15px;"><b>Rank</b></td>\n'
html += ' <td style="padding:0 15px 0 15px;"><b>Initial</b></td>\n'
html += ' <td style="padding:0 15px 0 15px;"><b>Name</b></td>\n'
html += ' <td style="padding:0 15px 0 15px;"><b>Score</b></td>\n'
html += ' <td style="padding:0 15px 0 15px;"><b>When</b></td>\n'
html += '</tr>\n'
i = 1
pridcache = dict()
lastprident = None
lastrun = 0 # for an RLE-like run count
for ent in tally [ 'scoreboard' ]:
prident = None
if ent [ 'prid' ]:
try:
prident = pridcache [ ent [ 'prid' ] ]
except:
prident = profile.fetch_pridfile_as_dict ( ent [ 'prid' ] )
pridcache [ ent [ 'prid' ] ] = prident
if prident == None:
prident = profile.NULL_PROFILE
tlocal = time.localtime ( ent [ 'time' ] )
tdisplay = time.strftime ( '%d-%b-%Y', tlocal )
# units
unit = ''
if '_general' in modulemap.gamemap [ req [ 'gamename' ] ]:
if 'dispunit' in modulemap.gamemap [ req [ 'gamename' ] ][ '_general' ]:
unit = ' ' + str ( modulemap.gamemap [ req [ 'gamename' ] ][ '_general' ][ 'dispunit' ] )
showrow = 1 # 0 no, 1 yes, 2 ellipses
if False: # True -> force to full length display
lastprident = None # if uncommented, forces full display .. no ellipses hidden entries
if lastprident == prident:
showrow = 0
lastrun += 1
else:
# if not first row, and the RLE is significant .. show an ellipses
if lastprident != None and lastrun > 0:
showrow = 2
else:
showrow = 1
# last and current are not the same, so RLE is back to zero
lastrun = 0
if showrow == 0:
pass # suppress
else:
if showrow == 2:
# so our last row is not same as this row, and last guy was not also the first
# row.. so show "..."
html += '<tr>\n'
html += ' <td style="padding:0 15px 0 15px;">' + "" + "</td>\n"
html += ' <td style="padding:0 15px 0 15px;">' + "" + "</td>\n"
html += ' <td style="padding:0 15px 0 15px;">' + "..." + "</td>\n"
html += ' <td style="padding:0 15px 0 15px;"></td>\n'
html += ' <td style="padding:0 15px 0 15px;"></td>\n'
html += '</tr>\n'
# showrow == 1, or showrow == 2 .. show this line
html += '<tr>\n'
html += ' <td style="padding:0 15px 0 15px;">' + str ( i ) + "</td>\n"
html += ' <td style="padding:0 15px 0 15px;">' + prident [ 'shortname' ] + "</td>\n"
html += ' <td style="padding:0 15px 0 15px;">' + prident [ 'longname' ] + "</td>\n"
if ent [ 'score' ] > 0:
html += ' <td style="padding:0 15px 0 15px;">' + str ( ent [ 'score' ] ) + unit + "</td>\n"
else:
html += ' <td style="padding:0 15px 0 15px;">-</td>\n'
if ent [ 'time' ] > 0:
html += ' <td style="padding:0 15px 0 15px;">' + tdisplay + "</td>\n"
else:
html += ' <td style="padding:0 15px 0 15px;"></td>\n'
html += '</tr>\n'
lastprident = prident
i += 1
html += "</table>\n"
html += "<p>%d unique profiles in the leaderboard</p>\n" % ( len ( pridcache ) )
req [ '_bindata' ] = html
req [ '_binlen' ] = len ( req [ '_bindata' ] )
return
def get_last_modify_epoch ( req ):
try:
filename = _basepath ( req ) + req [ 'gamename' ] + ".json"
return int ( os.path.getmtime ( filename ) )
except:
return 0
# ---------------
def _read_tally ( req ):
writepath = _basepath ( req )
try:
f = open ( writepath + req [ 'gamename' ] + ".json", "r" )
tallyfile = f.read()
f.close()
tally = json.loads ( tallyfile )
except:
logging.warning ( "%s - assuming new score file (all zeroes)" % ( req [ 'gamename' ] ) )
tally = dict()
tally [ 'hi' ] = 0
tally [ 'prid' ] = '_default_'
scoreboard = list()
for i in range ( SCOREBOARD_MAX ):
scoreboard.append ( { 'prid': '_default_', 'score': 0, 'time': 0 } )
tally [ 'scoreboard' ] = scoreboard
return tally
def parse_hi_bin ( req, bindata ):
return modulemap.gamemap [ req [ 'gamename' ] ][ 'module' ].parse_hi_bin ( req, bindata )
def build_hi_bin ( req, hiscore ):
return modulemap.gamemap [ req [ 'gamename' ] ][ 'module' ].build_hi_bin ( req, hiscore )
def done ( req ):
pass
| gpl-2.0 | 5,346,319,761,198,488,000 | 33.453333 | 223 | 0.516254 | false |
wpjesus/codematch | ietf/submit/models.py | 1 | 3263 | import re
import datetime
from django.db import models
from ietf.doc.models import Document
from ietf.person.models import Person
from ietf.group.models import Group
from ietf.name.models import DraftSubmissionStateName
from ietf.utils.accesstoken import generate_random_key, generate_access_token
def parse_email_line(line):
"""Split line on the form 'Some Name <[email protected]>'"""
m = re.match("([^<]+) <([^>]+)>$", line)
if m:
return dict(name=m.group(1), email=m.group(2))
else:
return dict(name=line, email="")
class Submission(models.Model):
state = models.ForeignKey(DraftSubmissionStateName)
remote_ip = models.CharField(max_length=100, blank=True)
access_key = models.CharField(max_length=255, default=generate_random_key)
auth_key = models.CharField(max_length=255, blank=True)
# draft metadata
name = models.CharField(max_length=255, db_index=True)
group = models.ForeignKey(Group, null=True, blank=True)
title = models.CharField(max_length=255, blank=True)
abstract = models.TextField(blank=True)
rev = models.CharField(max_length=3, blank=True)
pages = models.IntegerField(null=True, blank=True)
authors = models.TextField(blank=True, help_text="List of author names and emails, one author per line, e.g. \"John Doe <[email protected]>\".")
note = models.TextField(blank=True)
replaces = models.CharField(max_length=1000, blank=True)
first_two_pages = models.TextField(blank=True)
file_types = models.CharField(max_length=50, blank=True)
file_size = models.IntegerField(null=True, blank=True)
document_date = models.DateField(null=True, blank=True)
submission_date = models.DateField(default=datetime.date.today)
submitter = models.CharField(max_length=255, blank=True, help_text="Name and email of submitter, e.g. \"John Doe <[email protected]>\".")
idnits_message = models.TextField(blank=True)
def __unicode__(self):
return u"%s-%s" % (self.name, self.rev)
def authors_parsed(self):
res = []
for line in self.authors.replace("\r", "").split("\n"):
line = line.strip()
if line:
res.append(parse_email_line(line))
return res
def submitter_parsed(self):
return parse_email_line(self.submitter)
def access_token(self):
return generate_access_token(self.access_key)
def existing_document(self):
return Document.objects.filter(name=self.name).first()
class SubmissionEvent(models.Model):
submission = models.ForeignKey(Submission)
time = models.DateTimeField(default=datetime.datetime.now)
by = models.ForeignKey(Person, null=True, blank=True)
desc = models.TextField()
def __unicode__(self):
return u"%s %s by %s at %s" % (self.submission.name, self.desc, self.by.plain_name() if self.by else "(unknown)", self.time)
class Meta:
ordering = ("-time", "-id")
class Preapproval(models.Model):
"""Pre-approved draft submission name."""
name = models.CharField(max_length=255, db_index=True)
by = models.ForeignKey(Person)
time = models.DateTimeField(default=datetime.datetime.now)
def __unicode__(self):
return self.name
| bsd-3-clause | 8,802,075,955,322,732,000 | 35.662921 | 153 | 0.680968 | false |
esben/setuptools_scm | setuptools_scm/version.py | 1 | 4137 | from __future__ import print_function
import datetime
import re
from .utils import trace
from pkg_resources import iter_entry_points
from distutils import log
try:
from pkg_resources import parse_version, SetuptoolsVersion
except ImportError as e:
parse_version = SetuptoolsVersion = None
def _warn_if_setuptools_outdated():
if parse_version is None:
log.warn("your setuptools is too old (<12)")
log.warn("setuptools_scm functionality is degraded")
def callable_or_entrypoint(group, callable_or_name):
trace('ep', (group, callable_or_name))
if isinstance(callable_or_name, str):
for ep in iter_entry_points(group, callable_or_name):
return ep.load()
else:
return callable_or_name
def tag_to_version(tag):
trace('tag', tag)
# lstrip the v because of py2/py3 differences in setuptools
# also required for old versions of setuptools
version = tag.rsplit('-', 1)[-1].lstrip('v')
if parse_version is None:
return version
version = parse_version(version)
trace('version', repr(version))
if isinstance(version, SetuptoolsVersion):
return version
def tags_to_versions(tags):
versions = map(tag_to_version, tags)
return [v for v in versions if v is not None]
class ScmVersion(object):
def __init__(self, tag_version,
distance=None, node=None, dirty=False,
**kw):
self.tag = tag_version
if dirty and distance is None:
distance = 0
self.distance = distance
self.node = node
self.time = datetime.datetime.now()
self.extra = kw
self.dirty = dirty
@property
def exact(self):
return self.distance is None
def __repr__(self):
return self.format_with(
'<ScmVersion {tag} d={distance}'
' n={node} d={dirty} x={extra}>')
def format_with(self, fmt):
return fmt.format(
time=self.time,
tag=self.tag, distance=self.distance,
node=self.node, dirty=self.dirty, extra=self.extra)
def format_choice(self, clean_format, dirty_format):
return self.format_with(dirty_format if self.dirty else clean_format)
def meta(tag, distance=None, dirty=False, node=None, **kw):
if SetuptoolsVersion is None or not isinstance(tag, SetuptoolsVersion):
tag = tag_to_version(tag)
trace('version', tag)
assert tag is not None, 'cant parse version %s' % tag
return ScmVersion(tag, distance, node, dirty, **kw)
def guess_next_version(tag_version, distance):
version = str(tag_version)
if '.dev' in version:
prefix, tail = version.rsplit('.dev', 1)
assert tail == '0', 'own dev numbers are unsupported'
return '%s.dev%s' % (prefix, distance)
else:
prefix, tail = re.match('(.*?)(\d+)$', version).groups()
return '%s%d.dev%s' % (prefix, int(tail) + 1, distance)
def guess_next_dev_version(version):
if version.exact:
return version.format_with("{tag}")
else:
return guess_next_version(version.tag, version.distance)
def get_local_node_and_date(version):
if version.exact:
return version.format_choice("", "+d{time:%Y%m%d}")
else:
return version.format_choice("+n{node}", "+n{node}.d{time:%Y%m%d}")
def get_local_dirty_tag(version):
return version.format_choice('', '+dirty')
def postrelease_version(version):
if version.exact:
return version.format_with('{tag}')
else:
return version.format_with('{tag}.post{distance}')
def format_version(version, **config):
trace('scm version', version)
trace('config', config)
version_scheme = callable_or_entrypoint(
'setuptools_scm.version_scheme', config['version_scheme'])
local_scheme = callable_or_entrypoint(
'setuptools_scm.local_scheme', config['local_scheme'])
main_version = version_scheme(version)
trace('version', main_version)
local_version = local_scheme(version)
trace('local_version', local_version)
return version_scheme(version) + local_scheme(version)
| mit | 6,615,941,135,986,023,000 | 29.19708 | 77 | 0.639594 | false |
mlundblad/telepathy-gabble | tests/twisted/jingle-share/test-caps-file-transfer.py | 1 | 6395 | import dbus
from twisted.words.xish import xpath
from servicetest import (assertEquals, EventPattern)
from gabbletest import exec_test, make_result_iq, sync_stream, make_presence
import constants as cs
from caps_helper import compute_caps_hash, \
text_fixed_properties, text_allowed_properties, \
stream_tube_fixed_properties, stream_tube_allowed_properties, \
dbus_tube_fixed_properties, dbus_tube_allowed_properties, \
ft_fixed_properties, ft_allowed_properties_with_metadata
import ns
from jingleshareutils import test_ft_caps_from_contact
from config import FILE_TRANSFER_ENABLED
if not FILE_TRANSFER_ENABLED:
print "NOTE: built with --disable-file-transfer"
raise SystemExit(77)
def test(q, bus, conn, stream):
client = 'http://telepathy.freedesktop.org/fake-client'
test_ft_caps_from_contact(q, bus, conn, stream, '[email protected]/Foo',
2L, client)
# our own capabilities, formerly tested here, are now in
# tests/twisted/caps/advertise-contact-capabilities.py
generic_ft_caps = [(text_fixed_properties, text_allowed_properties),
(stream_tube_fixed_properties, \
stream_tube_allowed_properties),
(dbus_tube_fixed_properties, dbus_tube_allowed_properties),
(ft_fixed_properties, ft_allowed_properties_with_metadata)]
generic_caps = [(text_fixed_properties, text_allowed_properties),
(stream_tube_fixed_properties, \
stream_tube_allowed_properties),
(dbus_tube_fixed_properties, dbus_tube_allowed_properties)]
def check_contact_caps(conn, handle, with_ft):
conn_caps_iface = dbus.Interface(conn, cs.CONN_IFACE_CONTACT_CAPS)
conn_contacts_iface = dbus.Interface(conn, cs.CONN_IFACE_CONTACTS)
if with_ft:
expected_caps = dbus.Dictionary({handle: generic_ft_caps})
else:
expected_caps = dbus.Dictionary({handle: generic_caps})
caps = conn_caps_iface.GetContactCapabilities([handle])
assert caps == expected_caps, caps
# check the Contacts interface give the same caps
caps_via_contacts_iface = conn_contacts_iface.GetContactAttributes(
[handle], [cs.CONN_IFACE_CONTACT_CAPS], False) \
[handle][cs.ATTR_CONTACT_CAPABILITIES]
assert caps_via_contacts_iface == caps[handle], \
caps_via_contacts_iface
def test2(q, bus, connections, streams):
conn1, conn2 = connections
stream1, stream2 = streams
conn1_handle = conn1.Properties.Get(cs.CONN, 'SelfHandle')
conn1_jid = conn1.InspectHandles(cs.HT_CONTACT, [conn1_handle])[0]
conn2_handle = conn2.Properties.Get(cs.CONN, 'SelfHandle')
conn2_jid = conn2.InspectHandles(cs.HT_CONTACT, [conn2_handle])[0]
handle1 = conn2.RequestHandles(cs.HT_CONTACT, [conn1_jid])[0]
handle2 = conn1.RequestHandles(cs.HT_CONTACT, [conn2_jid])[0]
q.expect_many(EventPattern('dbus-signal',
signal='ContactCapabilitiesChanged',
path=conn1.object.object_path),
EventPattern('dbus-signal',
signal='ContactCapabilitiesChanged',
path=conn2.object.object_path))
check_contact_caps (conn1, handle2, False)
check_contact_caps (conn2, handle1, False)
caps_iface = dbus.Interface(conn1, cs.CONN_IFACE_CONTACT_CAPS)
caps_iface.UpdateCapabilities([("self",
[ft_fixed_properties],
dbus.Array([], signature="s"))])
_, presence, disco, _ = \
q.expect_many(EventPattern('dbus-signal',
signal='ContactCapabilitiesChanged',
path=conn1.object.object_path,
args=[{conn1_handle:generic_ft_caps}]),
EventPattern('stream-presence', stream=stream1),
EventPattern('stream-iq', stream=stream1,
query_ns=ns.DISCO_INFO,
iq_type = 'result'),
EventPattern('dbus-signal',
signal='ContactCapabilitiesChanged',
path=conn2.object.object_path,
args=[{handle1:generic_ft_caps}]))
presence_c = xpath.queryForNodes('/presence/c', presence.stanza)[0]
assert "share-v1" in presence_c.attributes['ext']
conn1_ver = presence_c.attributes['ver']
found_share = False
for feature in xpath.queryForNodes('/iq/query/feature', disco.stanza):
if feature.attributes['var'] == ns.GOOGLE_FEAT_SHARE:
found_share = True
assert found_share
check_contact_caps (conn2, handle1, True)
caps_iface = dbus.Interface(conn2, cs.CONN_IFACE_CONTACT_CAPS)
caps_iface.UpdateCapabilities([("self",
[ft_fixed_properties],
dbus.Array([], signature="s"))])
_, presence, _ = \
q.expect_many(EventPattern('dbus-signal',
signal='ContactCapabilitiesChanged',
path=conn2.object.object_path,
args=[{conn2_handle:generic_ft_caps}]),
EventPattern('stream-presence', stream=stream2),
EventPattern('dbus-signal',
signal='ContactCapabilitiesChanged',
path=conn1.object.object_path,
args=[{handle2:generic_ft_caps}]))
presence_c = xpath.queryForNodes('/presence/c', presence.stanza)[0]
assert "share-v1" in presence_c.attributes['ext']
# We will have the same capabilities on both sides, so we can't check for
# a cap disco since the hash will be the same, so we need to make sure the
# hash is indeed the same
assert presence_c.attributes['ver'] == conn1_ver
found_share = False
for feature in xpath.queryForNodes('/iq/query/feature', disco.stanza):
if feature.attributes['var'] == ns.GOOGLE_FEAT_SHARE:
found_share = True
assert found_share
check_contact_caps (conn1, handle2, True)
if __name__ == '__main__':
exec_test(test)
exec_test(test2, num_instances=2)
| lgpl-2.1 | -8,873,281,189,688,629,000 | 41.350993 | 78 | 0.6 | false |
OpenNetworking/gcoin-community | qa/rpc-tests/proxy_test.py | 1 | 6147 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
'''
Test plan:
- Start gcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on gcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create gcoinds that connect to them
- Manipulate the gcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0']
])
def node_test(self, node, proxies, auth):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: gcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: gcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False)
if __name__ == '__main__':
ProxyTest().main()
| apache-2.0 | -8,708,953,383,756,219,000 | 41.10274 | 146 | 0.644217 | false |
Daruzon/twitter-profile-mgmt | htdocs/index.py | 1 | 2566 | #!/usr/bin/env python3
# coding: utf-8
# enable debugging
if __name__ == "__main__":
import cgi, cgitb, sys, os, locale, codecs, io, importlib
from datetime import datetime
from time import time
#cgitb.enable()
os.environ['CONFPATH'] = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+"/app/includes/config"
sys.path.append( os.environ['CONFPATH'] )
import config
config.set_incpath()
import request, session, output
#output.render_headers()
_error_dev_ = True
_error_url_ = False
# Bufferisation
try:
__real_stdout__ = sys.stdout
__buffer_out__ = io.StringIO()
sys.stdout = __buffer_out__
#output.echo(config.headers(), hook='http-headers')
controller = importlib.import_module(request.called_module, package=None)
_error_dev_ = False
except request.PageNotFoundException as e:
_error_url_ = True
_error_dev_ = False
except Exception as e:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
#print(sys.exc_info())
# Restitution du buffer vers le navigateur
finally:
sys.stdout = sys.__stdout__ = codecs.getwriter("utf-8")(sys.__stdout__.detach())
out = __buffer_out__.getvalue()
__buffer_out__.close()
# Si erreur 500
if _error_dev_ == True :
import os
with open(os.path.dirname(os.path.abspath(__file__))+'/static/errors/500.html', 'r') as content_file:
content = content_file.read()
print('Status: 500 Server Error')
print('Content-Type: text/html')
print()
print(content)
print("<div style='display:none;'>")
print(repr(traceback.format_tb(exc_traceback)).replace("\\n', '", "',\n '").replace("\\n", "\n"))
print(exc_value)
print(exc_type)
print(out)
print("</div>")
# Si erreur 404
elif _error_url_ == True :
import os
with open(os.path.dirname(os.path.abspath(__file__))+'/static/errors/404.html', 'r') as content_file:
content = content_file.read()
#print('Location: '+os.environ['REQUEST_SCHEME']+'://'+os.environ['HTTP_HOST']+'/static/errors/404.html')
print('Status: 404 Not Found')
print('Content-Type: text/html')
print()
print(content)
#Sinon
else:
output.echo(out)
# On renvoie les headers de la page
# Ces headers peuvent être remplacés via des output.echo vers le hook 'http-headers'
output.render_headers()
# On renvoie le contenu généré.
# Tout ce qui a été print a été capturé dans le hook {{main}}.
output.render_response()
else:
print("Content-Type: text/html;charset=utf-8")
print()
print("Bad request")
| mit | 5,593,159,257,900,130,000 | 25.905263 | 108 | 0.652582 | false |
firtek/flap.py | flap.py | 1 | 4358 | #!/usr/bin/env python3
from copy import copy
import pyglet
from pyglet import gl
import settings
from sprites import Bird, Background, Floor, Pipe
from utils import get_sprite, check_collision
def main(callback=None):
#global score set to -1 because on first pipe score is increased
global score
score = -1
# Initialize window
window = pyglet.window.Window(width=settings.window_width * settings.scale,
height=settings.window_height * settings.scale,
resizable=False)
window.clear()
scoreLabel = pyglet.text.Label("0", font_size=40, x=window.width//2, y=window.height, anchor_x='center', anchor_y='top')
# To pass to the callback
def click():
window.dispatch_event('on_mouse_press')
# Set up sprites
bird = Bird(window=window)
background = Background()
floor = Floor()
pipes = []
tap_to_start = get_sprite('tap.png')
gameover = get_sprite('gameover.png')
# Set up game state, which indicates whether the game has started and how long
# we have to wait until the next pipe appears.
class GameState(object):
def __init__(self):
self.reset()
def reset(self):
self.started = False
self.t_to_next_pipe = 2
# reset score label
scoreLabel._set_text("0")
state = GameState()
def update(dt):
global score
if not state.started:
return
if bird.alive:
state.t_to_next_pipe -= dt
if state.t_to_next_pipe < 0:
pipe = Pipe(space=75 * settings.scale, window=window)
pipes.append(pipe)
state.t_to_next_pipe += 2
# update score -- problem is for the first one
score += 1
# directly setting text on
scoreLabel._set_text(str(score))
for pipe in copy(pipes):
if not pipe.visible:
pipes.remove(pipe)
# Move everything
background.update(dt)
for pipe in pipes:
pipe.update(dt)
floor.update(dt)
# Check for collisions
collision = check_collision(bird, floor) or any([check_collision(bird, pipe) for pipe in pipes])
if collision or bird.y > window.height:
bird.die()
if not bird.dead:
bird.update(dt)
if bird.dying and bird.y < -100:
bird.stop()
# reset the score on death
score = -1
# function to be used in key & mouse events
def still_playing():
if bird.alive:
bird.flap()
elif not state.started:
state.started = True
bird.start()
bird.flap()
elif bird.dead:
bird.reset()
pipes.clear()
state.reset()
@window.event
def on_mouse_press(*args):
still_playing()
@window.event
def on_key_press(*args):
still_playing()
@window.event
def on_draw():
window.clear()
background.blit()
for pipe in pipes:
pipe.blit()
floor.blit()
bird.blit()
if not state.started:
tap_to_start.blit(0.5 * (window.width - tap_to_start.width * 0.37), 0.43 * window.height)
if bird.dying or bird.dead:
gameover.blit(0.5 * (window.width - gameover.width), 0.5 * window.height)
if callback is not None:
import numpy as np
buf = (gl.GLubyte * (3 * window.width * window.height))(0)
gl.glReadPixels(0, 0, window.width, window.height,
gl.GL_RGB,
gl.GL_UNSIGNED_BYTE, buf)
array = np.frombuffer(buf, dtype='<u1')
array = array.reshape(window.height, window.width, 3)
array = array[::settings.scale, ::settings.scale]
callback(array, click, alive=bird.alive)
# draw score
scoreLabel.draw()
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
pyglet.clock.schedule_interval(update, 0.01)
pyglet.app.run()
if __name__ == "__main__":
main()
| bsd-2-clause | 8,000,193,271,814,344,000 | 24.786982 | 124 | 0.539468 | false |
mauriceling/dose | dose/copads/statisticsdistribution.py | 1 | 134396 | """
Classes for Various Statistical Distributions.
References:
- Regress+ A compendium of common probability distributions (version 2.3)
by Michael P. McLaughlin ([email protected])
http://www.causascientia.org/math_stat/Dists/Compendium.pdf
- Hand-book on statistical distributions for experimentalists
Internal report SUF-PFY/96-01. University of Stockholms
by Christian Walck ([email protected])
Distributions:
- BetaDistribution(location, scale, p, q)
- PowerFunctionDistribution(shape)
- BinomialDistribution(success, trial)
- BernoulliDistribution(success)
- BradfordDistribution
- BurrDistribution
- CauchyDistribution(location=0.0, scale=1.0)
- LorentzDistribution (alias of CauchyDistribution)
- ChiDistribution
- HalfNormalDistribution(location, scale)
- MaxwellDistribution(scale)
- RayleighDistribution(scale)
- CosineDistribution(location=0.0, scale=1.0)
- DoubleGammaDistribution
- DoubleWeibullDistribution
- ExponentialDistribution(location=0.0, scale=1.0)
- NegativeExponentialDistribution (alias of ExponentialDistribution)
- ExtremeLBDistribution
- FDistribution
- FiskDistribution
- LogLogisticDistribution (alias of FiskDistribution)
- FoldedNormalDistribution
- GammaDistribution
- ChiSquareDistribution(df)
- ErlangDistribution(shape)
- FurryDistribution (alias of GammaDistribution)
- GenLogisticDistribution
- GeometricDistribution(success=0.5)
- GumbelDistribution(location, scale)
- FisherTippettDistribution (alias of GumbelDistribution)
- GompertzDistribution (alias of GumbelDistribution)
- LogWeibullDistribution (alias of GumbelDistribution)
- HyperbolicSecantDistribution
- HypergeometricDistribution
- InverseNormalDistribution
- WaldDistribution (alias of InverseNormalDistribution)
- LaplaceDistribution
- BilateralExponentialDistribution (alias of LaplaceDistribution)
- DoubleExponentialDistribution (alias of LaplaceDistribution)
- LogarithmicDistribution(shape)
- LogisticDistribution
- SechSquaredDistribution (alias of LogisticDistribution)
- LogNormalDistribution
- AntiLogNormalDistribution (alias of LogNormalDistribution)
- CobbDouglasDistribution (alias of LogNormalDistribution)
- NakagamiDistribution
- NegativeBinomialDistribution(success, target)
- PascalDistribution(success, target)
- PolyaDistribution (alias of NegativeBinomialDistribution)
- NormalDistribution()
- ParetoDistribution(location=1.0, shape=1.0)
- PoissonDistribution(expectation)
- RademacherDistribution()
- ReciprocalDistribution
- SemicircularDistribution(location=0.0, scale=1.0)
- TDistribution(location=0.0, scale=1.0, shape=2)
- TriangularDistribution
- UniformDistribution(location, scale)
- RectangularDistribution (alias of UniformDistribution)
- WeibullDistribution
- FrechetDistribution (alias of WeibullDistribution)
Copyright (c) Maurice H.T. Ling <[email protected]>
Date created: 17th August 2005
"""
import math
import random
from .copadsexceptions import DistributionParameterError
from .copadsexceptions import DistributionFunctionError
from .copadsexceptions import NormalDistributionTypeError
from . import nrpy
from . import constants
class Distribution:
"""
Abstract class for all statistical distributions.
Due to the large variations of parameters for each distribution, it is
unlikely to be able to standardize a parameter list for each method that
is meaningful for all distributions. Instead, the parameters to construct
each distribution is to be given as keyword arguments.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
"""
def __init__(self, **parameters):
"""
Constructor method. The parameters are used to construct the
probability distribution.
"""
raise NotImplementedError
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability. CDF is
also known as density function.
"""
raise NotImplementedError
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution.
"""
raise NotImplementedError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis.
"""
raise NotImplementedError
def mean(self):
"""
Gives the arithmetic mean of the sample.
"""
raise NotImplementedError
def mode(self):
"""
Gives the mode of the sample, if closed-form is available.
"""
raise NotImplementedError
def kurtosis(self):
"""
Gives the kurtosis of the sample.
"""
raise NotImplementedError
def skew(self):
"""
Gives the skew of the sample.
"""
raise NotImplementedError
def variance(self):
"""
Gives the variance of the sample.
"""
raise NotImplementedError
# ----------------------------------------------------------
# Tested Distributions
# ----------------------------------------------------------
class BetaDistribution(Distribution):
"""
Class for Beta Distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, location, scale, p, q):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale: upper bound
@param p: shape parameter. Although no upper bound but seldom exceed 10.
@param q: shape parameter. Although no upper bound but seldom exceed 10.
"""
self.location = float(location)
self.scale = float(scale)
self.p = float(p)
self.q = float(q)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
return nrpy.betai(self.p, self.q, (x - self.location)/
(self.scale - self.location))
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability
for particular value of x, or the area under probability
distribution from x-h to x+h for continuous distribution.
"""
n = (self.scale - self.location) ** (self.p + self.q - 1)
n = nrpy.gammln(self.p) * nrpy.gammln(self.q) * n
n = nrpy.gammln(self.p + self.q) / n
p = (x - self.location) ** (self.p - 1)
q = (self.scale - x) ** (self.q - 1)
return n * p * q
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability
value and returns the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
n = (self.location * self.q) + (self.scale * self.p)
return n / (self.p + self.q)
def mode(self):
"""Gives the mode of the sample."""
n = (self.location * (self.q - 1)) + (self.scale * \
(self.p - 1))
return n / (self.p + self.q - 2)
def kurtosis(self):
"""Gives the kurtosis of the sample."""
n = (self.p ** 2) * (self.q + 2) + \
(2 * (self.q ** 2)) + \
((self.p * self.q) * (self.q - 2))
n = n * (self.p + self.q + 1)
d = self.p * self.q * (self.p + self.q + 2) * \
(self.p + self.q + 3)
return 3 * ((n / d) - 1)
def skew(self):
"""Gives the skew of the sample."""
d = (self.p + self.q) ** 3
d = d * (self.p + self.q + 1) * (self.p + self.q + 2)
e = ((self.p + self.q) ** 2) * (self.p + self.q + 1)
e = (self.p * self.q) / e
e = e ** 1.5
return ((2 * self.p * self.q) * (self.q - self.q)) / (d * e)
def variance(self):
"""Gives the variance of the sample."""
n = self.p * self.q * ((self.scale - self.location) ** 2)
d = (self.p + self.q + 1) * ((self.p + self.q) ** 2)
return n / d
def moment(self, r):
"""Gives the r-th moment of the sample."""
return nrpy.beta(self.p + r,
self.q)/nrpy.beta(self.p, self.q)
def random(self):
"""Gives a random number based on the distribution."""
return random.betavariate(self.p, self.q)
class BinomialDistribution(Distribution):
"""
Class for Binomial Distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, success=0.5, trial=1000):
"""
Constructor method. The parameters are used to construct
the probability distribution.
@param success: probability of success; 0 <= success <= 1
@param trial: number of Bernoulli trials
"""
self.success = float(success)
self.trial = int(trial)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
return nrpy.cdf_binomial(x, self.trial, self.success)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution.
"""
x = int(x)
return nrpy.bico(self.trial, x) * \
(self.success ** x) * \
((1 - self.success) ** (self.trial - x))
def inverseCDF(self, probability, start=0, step=1):
"""
It does the reverse of CDF() method, it takes a probability
value and returns the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.success * self.trial
def mode(self):
"""Gives the mode of the sample."""
return int(self.success * (self.trial + 1))
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return (1 - ((6 * self.success * (1 - self.success))) /
(self.trial * self.success * (1 - self.success)))
def skew(self):
"""Gives the skew of the sample."""
return (1 - self.success - self.success)/ \
((self.trial * self.success * (1 - self.success)) ** 0.5)
def variance(self):
"""Gives the variance of the sample."""
return self.mean() * (1 - self.success)
class CauchyDistribution(Distribution):
"""
Class for Cauchy Distribution.
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, location=0.0, scale=1.0):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location: the mean; default = 0.0
@param scale: spread of the distribution, S{lambda}; default = 1.0
"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return 0.5 + 1 / PI * math.atan((x - self.location) / self.scale)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return 1 / (PI * self.scale * \
(1 + (((x - self.location) / self.scale) ** 2)))
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
raise DistributionFunctionError('Mean for Cauchy Distribution is \
undefined')
def mode(self):
"""Gives the mode of the sample."""
return self.location
def median(self):
"""Gives the median of the sample."""
return self.location
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return self.location - self.scale
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
return self.location + self.scale
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.5
def random(self, seed):
"""Gives a random number based on the distribution."""
while 1:
seed = self.loaction + (self.scale * math.tan(PI * (seed - 0.5)))
yield seed
class CosineDistribution(Distribution):
"""
Cosine distribution is sometimes used as a simple approximation to
Normal distribution.
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, location=0.0, scale=1.0):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location: the mean; default = 0.0
@param scale: spread of the distribution, S{lambda}; default = 1.0
"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
n = PI + (x - self.location) / self.scale + \
math.sin((x - self.location) / self.scale)
return n / PI2
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return (1 / (PI2 * self.scale)) * \
(1 + math.cos((x - self.location) / self.scale))
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location
def mode(self):
"""Gives the mode of the sample."""
return self.location
def median(self):
"""Gives the median of the sample."""
return self.location
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return -0.5938
def skew(self):
"""Gives the skew of the sample."""
return 0.0
def variance(self):
"""Gives the variance of the sample."""
return (((PI * PI)/3) - 2) * (self.scale ** 2)
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return self.location - (0.8317 * self.scale)
def quantile3(self):
"""Gives the 13rd quantile of the sample."""
return self.location + (0.8317 * self.scale)
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.5
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.5
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class ExponentialDistribution(Distribution):
"""
Exponential distribution is the continuous version of Geometric
distribution. It is also a special case of Gamma distribution where
shape = 1
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, location=0.0, scale=1.0):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location: position of the distribution, default = 0.0
@param scale: spread of the distribution, S{lambda}; default = 1.0"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return 1 - math.exp((self.location - x) / self.scale)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return (1/self.scale) * math.exp((self.location - x)/self.scale)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location + self.scale
def mode(self):
"""Gives the mode of the sample."""
return self.location
def median(self):
"""Gives the median of the sample."""
return self.location + (self.scale * math.log10(2))
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return 6.0
def skew(self):
"""Gives the skew of the sample."""
return 2.0
def variance(self):
"""Gives the variance of the sample."""
return self.scale * self.scale
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return self.location + (self.scale * math.log10(1.333))
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
return self.location + (self.scale * math.log10(4))
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.6321
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.0
def random(self):
"""Gives a random number based on the distribution."""
return random.expovariate(1/self.location)
class FDistribution(Distribution):
"""
Class for F Distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, df1=1, df2=1):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param df1: degrees of freedom for numerator
@param df2: degrees of freedom for denorminator
"""
self.df1 = float(df1)
self.df2 = float(df2)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
sub_x = (self.df1 * x) / (self.df1 * x + self.df2)
return nrpy.betai(self.df1 / 2.0, self.df2 / 2.0, sub_x)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability
for particular value of x, or the area under probability
distribution from x-h to x+h for continuous distribution.
"""
x = float(x)
n1 = ((x * self.df1) ** self.df1) * (self.df2 ** self.df2)
n2 = (x * self.df1 + self.df2) ** (self.df1 + self.df2)
d = x * nrpy.beta(self.df1 / 2.0, self.df2 / 2.0)
return math.sqrt(n1 / n2) / d
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return float(self.df2 / (self.df2 - 2))
class GammaDistribution(Distribution):
"""
Class for Gamma Distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, location, scale, shape):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale:
@param shape:"""
self.location = float(location)
self.scale = float(scale)
self.shape = float(shape)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
return nrpy.gammp(self.shape, (x - self.location) / self.scale)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location + (self.scale * self.shape)
def mode(self):
"""Gives the mode of the sample."""
return self.location + (self.scale * (self.shape - 1))
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return 6 / self.shape
def skew(self):
"""Gives the skew of the sample."""
return 2 / math.sqrt(self.shape)
def variance(self):
"""Gives the variance of the sample."""
return self.scale * self.scale * self.shape
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return nrpy.gammp(self.shape, self.shape)
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return nrpy.gammp(self.shape, self.shape - 1)
def ErlangDistribution(location, scale, shape):
"""
Erlang distribution is an alias of Gamma distribution where the shape
parameter is an integer.
@param location:
@param scale:
@param shape:
@status: Tested method
@since: version 0.2
"""
return GammaDistribution(location, scale, int(shape))
def FurryDistribution(location, scale, shape):
"""
Furry distribution is an alias of Gamma distribution.
@param location:
@param scale:
@param shape:
@status: Tested method
@since: version 0.2
"""
return GammaDistribution(location, scale, shape)
class ChiSquareDistribution(GammaDistribution):
"""
Chi-square distribution is a special case of Gamma distribution where
location = 0, scale = 2 and shape is twice that of the degrees of freedom.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, df=2):
"""
Constructor method. The parameters are used to construct
the probability distribution.
@param df: degrees of freedom"""
GammaDistribution.__init__(self, 0, 2, float(df) / 2.0)
class GeometricDistribution(Distribution):
"""
Geometric distribution is the discrete version of Exponential
distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, success=0.5):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param success: probability of success; 0 <= success <= 1;
default = 0.5
"""
self.prob = float(success)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
total = self.PDF(1)
for i in range(2, int(x) + 1):
total += self.PDF(i)
return total
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability
for particular value of x, or the area under probability
distribution from x-h to x+h for continuous distribution.
"""
return self.prob * ((1 - self.prob) ** (x - 1))
def inverseCDF(self, probability, start=1, step=1):
"""
It does the reverse of CDF() method, it takes a probability value and
the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return 1/self.prob
def mode(self):
"""Gives the mode of the sample."""
return 1.0
def variance(self):
"""Gives the variance of the sample."""
return (1 - self.prob) / (self.prob ** 2)
class HypergeometricDistribution(Distribution):
"""
Class for Hypergeometric distribution
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, sample_size,
population_size=100,
population_success=50):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param sample_size: sample size (not more than population size)
@type sample_size: integer
@param population_size: population size; default = 100
@type population_size: integer
@param population_success: number of successes in the population
(cannot be more than population size); default = 10
@type population_success: integer"""
if population_success > population_size:
raise AttributeError('population_success cannot be more \
than population_size')
elif sample_size > population_size:
raise AttributeError('sample_size cannot be more \
than population_size')
else:
self.psize = int(population_size)
self.psuccess = int(population_success)
self.ssize = int(sample_size)
def CDF(self, sample_success):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value (sample_success, an integer that is not more than sample
size) on the x-axis where y-axis is the probability."""
if sample_success > self.ssize:
raise AttributeError('sample_success cannot be more \
than sample_size')
else:
return sum([self.PDF(n) for n in range(1, sample_success+1)])
def PDF(self, sample_success):
"""
Partial Distribution Function, which gives the probability for the
particular value of x (sample_success, an integer that is not more
than sample size), or the area under probability distribution from
x-h to x+h for continuous distribution."""
if sample_success > self.ssize:
raise AttributeError('sample_success cannot be more \
than sample_size')
else:
sample_success = int(sample_success)
numerator = nrpy.bico(self.psuccess, sample_success)
numerator = numerator * nrpy.bico(self.psize-self.psuccess,
self.ssize-sample_success)
denominator = nrpy.bico(self.psize, self.ssize)
return float(numerator)/float(denominator)
def inverseCDF(self, probability, start=1, step=1):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (int(start), cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.ssize * (float(self.psuccess)/float(self.psize))
def mode(self):
"""Gives the mode of the sample."""
temp = (self.ssize + 1) * (self.psuccess + 1)
return float(temp)/float(self.psize + 2)
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
def variance(self):
"""Gives the variance of the sample."""
t1 = float(self.psize-self.psuccess)/float(self.psize)
t2 = float(self.psize-self.ssize)/float(self.psize-1)
return self.mean() * t1 * t2
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
## def random(self, seed):
## """Gives a random number based on the distribution."""
## while 1:
## func
## yield seed
class LogarithmicDistribution(Distribution):
"""
Class for Logarithmic Distribution.
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, shape):
"""Constructor method. The parameters are used to construct the
probability distribution.
@param shape: the spread of the distribution"""
self.shape = shape
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
summation = 0.0
for i in range(int(x)): summation = summation + self.PDF(i)
return summation
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return (-1 * (self.shape ** x)) / (math.log10(1 - self.shape) * x)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return (-1 * self.shape) / ((1 - self.shape) * \
math.log10(1 - self.shape))
def mode(self):
"""Gives the mode of the sample."""
return 1.0
def variance(self):
"""Gives the variance of the sample."""
n = (-1 * self.shape) * (self.shape + math.log10(1 - self.shape))
d = ((1 - self.shape) ** 2) * math.log10(1 - self.shape) * \
math.log10(1 - self.shape)
return n / d
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class NormalDistribution(Distribution):
"""
Class for standardized normal distribution (area under the curve = 1)
@see: Ling, MHT. 2009. Ten Z-Test Routines from Gopal Kanji's 100
Statistical Tests. The Python Papers Source Codes 1:5
@status: Tested method
@since: version 0.1
"""
def __init__(self):
self.mean = 0.0
self.stdev = 1.0
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
return 1.0 - 0.5 * nrpy.erfcc(x/SQRT2)
def PDF(self, x):
"""
Calculates the density (probability) at x by the formula
f(x) = 1/(sqrt(2 pi) sigma) e^-((x^2/(2 sigma^2))
where mu is the mean of the distribution and sigma the standard
deviation.
@param x: probability at x
"""
return (1/(math.sqrt(PI2) * self.stdev)) * \
math.exp(-(x ** 2/(2 * self.stdev**2)))
def inverseCDF(self, probability, start = -10.0,
end = 10.0, error = 10e-8):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis, together with the
cumulative probability.
@param probability: probability under the curve from -infinity
@param start: lower boundary of calculation (default = -10)
@param end: upper boundary of calculation (default = 10)
@param error: error between the given and calculated probabilities
(default = 10e-8)
@return: Returns a tuple (start, cprob) where 'start' is the standard
deviation for the area under the curve from -infinity to the given
'probability' (+/- step). 'cprob' is the calculated area under the
curve from -infinity to the returned 'start'.
"""
# check for tolerance
if abs(self.CDF(start)-probability) < error:
return (start, self.CDF(start))
# case 1: lower than -10 standard deviations
if probability < self.CDF(start):
return self.inverseCDF(probability, start-5, start, error)
# case 2: between -10 to 10 standard deviations (bisection method)
if probability > self.CDF(start) and \
probability < self.CDF((start+end)/2):
return self.inverseCDF(probability, start, (start+end)/2, error)
if probability > self.CDF((start+end)/2) and \
probability < self.CDF(end):
return self.inverseCDF(probability, (start+end)/2, end, error)
# case 3: higher than 10 standard deviations
if probability > self.CDF(end):
return self.inverseCDF(probability, end, end+5, error)
def mean(self):
return self.mean
def mode(self):
return self.mean
def kurtosis(self):
return 0.0
def skew(self):
return 0.0
def variance(self):
return self.stdev * self.stdev
def random(self):
"""Gives a random number based on the distribution."""
return random.gauss(self.mean, self.stdev)
class PoissonDistribution(Distribution):
"""
Class for Poisson Distribution. Poisson distribution is binomial
distribution with very low success - that is, for rare events.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, expectation=0.001):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param expectation: mean success probability; S{lambda}
"""
self._mean = float(expectation)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
return nrpy.cdf_poisson(x + 1, self._mean)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability
for particular value of x, or the area under probability
distribution from x-h to x+h for continuous distribution.
"""
return (math.exp(-1 ** self._mean) *
(self._mean ** x)) / nrpy.factrl(x)
def inverseCDF(self, probability, start=0.001, step=1):
"""
It does the reverse of CDF() method, it takes a probability value and
the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self._mean
def mode(self):
"""Gives the mode of the sample."""
return int(self._mean)
def variance(self):
"""Gives the variance of the sample."""
return self._mean
class SemicircularDistribution(Distribution):
"""
Class for Semicircular Distribution.
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, location=0.0, scale=1.0):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location: mean of the distribution, default = 0.0
@param scale: spread of the distribution, default = 1.0"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
t = (x - self.location) / self.scale
return 0.5 + (1 / PI) * (t * math.sqrt(1 - (t ** 2)) + math.asin(t))
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return (2 / (self.scale * PI)) * \
math.sqrt(1 - ((x - self.location) / self.scale) ** 2)
def inverseCDF(self, probability, start=-10.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
if start < -1 * self.scale:
start = -1 * self.scale
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location
def mode(self):
"""Gives the mode of the sample."""
return self.location
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return -1.0
def skew(self):
"""Gives the skew of the sample."""
return 0.0
def variance(self):
"""Gives the variance of the sample."""
return 0.25 * (self.scale ** 2)
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return self.location - (0.404 * self.scale)
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
return self.location + (0.404 * self.scale)
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.5
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.5
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class TDistribution(Distribution):
"""
Class for Student's t-distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, location=0.0, scale=1.0, shape=2):
"""Constructor method. The parameters are used to construct
the probability distribution.
@param location: default = 0.0
@param scale: default = 1.0
@param shape: degrees of freedom; default = 2"""
self._mean = float(location)
self.stdev = float(scale)
self.df = float(shape)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
t = (x - self._mean) / self.stdev
a = nrpy.betai(self.df / 2.0, 0.5, self.df / (self.df + (t * t)))
if t > 0:
return 1 - 0.5 * a
else:
return 0.5 * a
def PDF(self, x):
"""
Calculates the density (probability) at x with n-th degrees of freedom
as
M{f(x) = S{Gamma}((n+1)/2) /
(sqrt(n * pi) S{Gamma}(n/2)) (1 + x^2/n)^-((n+1)/2)}
for all real x. It has mean 0 (for n > 1) and variance n/(n-2)
(for n > 2)."""
a = nrpy.gammln((self.df + 1) / 2)
b = math.sqrt(math.pi * self.df) * nrpy.gammln(self.df / 2) * \
self.stdev
c = 1 + ((((x - self._mean) / self.stdev) ** 2) / self.df)
return (a / b) * (c ** ((-1 - self.df) / 2))
def inverseCDF(self, probability, start = -10.0,
end = 10.0, error = 10e-8):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis, together with the
cumulative probability.
@param probability: probability under the curve from -infinity
@param start: lower boundary of calculation (default = -10)
@param end: upper boundary of calculation (default = 10)
@param error: error between the given and calculated probabilities
(default = 10e-8)
@return: Returns a tuple (start, cprob) where 'start' is the standard
deviation for the area under the curve from -infinity to the given
'probability' (+/- step). 'cprob' is the calculated area under the
curve from -infinity to the returned 'start'.
"""
# check for tolerance
if abs(self.CDF(start)-probability) < error:
return (start, self.CDF(start))
# case 1: lower than -10 standard deviations
if probability < self.CDF(start):
return self.inverseCDF(probability, start-10, start, error)
# case 2: between -10 to 10 standard deviations (bisection method)
if probability > self.CDF(start) and \
probability < self.CDF((start+end)/2):
return self.inverseCDF(probability, start, (start+end)/2, error)
if probability > self.CDF((start+end)/2) and \
probability < self.CDF(end):
return self.inverseCDF(probability, (start+end)/2, end, error)
# case 3: higher than 10 standard deviations
if probability > self.CDF(end):
return self.inverseCDF(probability, end, end+10, error)
# cprob = self.CDF(start)
# if probability < cprob:
# return (start, cprob)
# while probability > cprob:
# start = start + step
# cprob = self.CDF(start)
# return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self._mean
def mode(self):
"""Gives the mode of the sample."""
return self._mean
def kurtosis(self):
"""Gives the kurtosis of the sample."""
a = ((self.df - 2) ** 2) * nrpy.gammln((self.df / 2) - 2)
return 3 * ((a / (4 * nrpy.gammln(self.df / 2))) - 1)
def skew(self):
"""Gives the skew of the sample."""
return 0.0
def variance(self):
"""Gives the variance of the sample."""
return (self.df / (self.df - 2)) * self.stdev * self.stdev
class TriangularDistribution(Distribution):
"""
Class for Triangular Distribution.
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, upper_limit, peak, lower_limit=0):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param upper_limit: upper limit of the distrbution
@type upper_limit: float
@param peak: peak of the distrbution, which has to be between
the lower and upper limits of the distribution
@type peak: float
@param lower_limit: lower limit of the distrbution,
default = 0
@type lower_limit: float"""
self.lower_limit = lower_limit
if upper_limit < self.lower_limit:
raise AttributeError
else:
self.upper_limit = upper_limit
if peak > upper_limit:
raise AttributeError
if peak < lower_limit + 0.001:
raise AttributeError
else:
self.mode = peak
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
if x < self.lower_limit:
raise AttributeError
if x > self.mode:
raise AttributeError
else:
return (( x - self.lower_limit) ** 2) / \
((self.upper_limit - self.lower_limit) * \
(self.mode - self.lower_limit))
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution
from x-h to x+h for continuous distribution."""
if x < self.lower_limit:
raise AttributeError
if x > self.mode:
raise AttributeError
else:
return ((2 * (x - self.lower_limit)) / \
((self.upper_limit - self.lower_limit) * \
(self.mode - self.lower_limit)))
def inverseCDF(self, probability, start=0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
start = self.lower_limit
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return (float(self.lower_limit + self.upper_limit + self.mode) / 3)
def mode(self):
"""Gives the mode of the sample."""
return (self.mode)
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return ((-3)*(5 ** - 1))
def skew(self):
"""Gives the skew of the sample."""
return (math.sqrt(2) * (self.lower_limit + self.upper_limit - 2 * \
self.mode) * (2 * self.lower_limit - self.self.upper_limit - self.mode) \
* (self.lower_limit - 2 * self.upper_limit + self.mode)) \
/ (self.lower_limit ** 2 + self.upper_limit ** 2 + self.mode ** 2 - \
self.lower_limit * self.upper_limit + self.mode ** 2 - self.lower_limit * \
(self.upper_limit - self.mode))
def variance(self):
"""Gives the variance of the sample."""
return (self.lower_limit ** 2 + self.upper_limit ** 2 + self.mode ** 2\
- (self.lower_limit * self.upper_limit) - \
(self.lower_limit * self.mode) - (self.upper_limit * self.mode))\
*(18 ** -1)
def quantile1(self):
"""Gives the 1st quantile of the sample."""
if ((self.mode - self.lower_limit) * \
(self.upper_limit - self.lower_limit) ** -1) > 0.25:
return self.lower_limit + (0.5 * math.sqrt((self.upper_limit - \
self.lower_limit) * (self.mode - self.lower_limit)))
else:
return self.upper_limit - ((0.5) * math.sqrt (3 * (self.upper_limit -\
self.lower_limit) * (self.upper_limit - self.mode)))
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
if ((self.mode - self.lower_limit) * \
(self.upper_limit - self.lower_limit) ** -1) > 0.75:
return self.lower_limit + (0.5 * math.sqrt(3 * (self.upper_limit - \
self.lower_limit) * (self.mode - self.lower_limit)))
else:
return self.upper_limit - ((0.5) * math.sqrt ((self.upper_limit -\
self.lower_limit) * (self.upper_limit - self.mode)))
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
if self.mode > ((self.lower_limit + self.upper_limit) * 0.5):
return ((self.upper_limit + self.mode - 2 * self.lower_limit) ** 2)\
* (9 * (self.upper_limit - self.lower_limit) * (self.mode - \
self.lower_limit))
else:
return (self.lower_limit ** 2 + (5 * self.lower_limit * \
self.upper_limit) - (5 * (self.upper_limit ** 2)) - \
(7 * self.lower_limit * self.mode) + (5 * self. upper_limit * \
self.mode) + self.mode ** 2)
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return (self.mode - self.lower_limit) * (self.upper_limit \
- self.lower_limit) ** - 1
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class UniformDistribution(Distribution):
"""
Class for Uniform distribution.
@see: Ling, MHT. 2009. Compendium of Distributions, I: Beta, Binomial, Chi-
Square, F, Gamma, Geometric, Poisson, Student's t, and Uniform. The Python
Papers Source Codes 1:4
@status: Tested method
@since: version 0.2
"""
def __init__(self, location, scale):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale:
"""
self.location = float(location)
self.scale = float(scale)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability.
"""
return (x - self.location) / (self.scale - self.location)
def PDF(self):
"""
Partial Distribution Function, which gives the probability
for particular value of x, or the area under probability
distribution from x-h to x+h for continuous distribution.
"""
return 1.0 / (self.scale - self.location)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
the corresponding value on the x-axis.
"""
cprob = self.CDF(start)
if probability < cprob:
return (start, cprob)
while probability > cprob:
start = start + step
cprob = self.CDF(start)
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return (self.location + self.scale) / 2.0
def median(self):
"""Gives the median of the sample."""
return (self.location + self.scale) / 2
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return -1.2
def skew(self):
"""Gives the skew of the sample."""
return 0.0
def variance(self):
"""Gives the variance of the sample."""
return ((self.scale - self.location) ** 2) / 12
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return ((3 * self.location) + self.scale) / 4
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
return (self.location + (3 * self.scale)) / 4
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.5
def random(self, lower, upper):
"""Gives a random number based on the distribution."""
return random.uniform(lower, upper)
class WeiBullDistribution(Distribution):
"""
Class for Weibull distribution.
@see: Chen, KFQ, Ling, MHT. 2013. COPADS III (Compendium of
Distributions II): Cauchy, Cosine, Exponential, Hypergeometric,
Logarithmic, Semicircular, Triangular, and Weibull. The Python
Papers Source Codes 5: 2.
@status: Tested method
@since: version 0.4
"""
def __init__(self, location=1.0, scale=1.0):
"""Constructor method. The parameters are used to construct the
probability distribution.
@param location: position of the distribution, default = 1.0
@param scale: shape of the distribution, default = 1.0"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0
to a give x-value on the x-axis where y-axis is the probability."""
power = -1 * ((float(x) / self.location) ** self.scale)
return 1 - (math.e ** power)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under trobability distribution
from x-h to x+h for continuous distribution."""
if x < 0:
return 0
else:
power = -1 * ((float(x) / self.location) ** self.scale)
t3 = math.e ** power
t2 = (float(x) / self.location) ** (self.scale - 1)
t1 = self.scale / self.location
return t1 * t2 * t3
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# return self.location * nrpy.gammln(1 + 1/self.scale)
def median(self):
"""Gives the median of the sample."""
return self.location * (math.log(2, math.e) ** (1/float(self.scale)))
def mode(self):
"""Gives the mode of the sample."""
if self.scale > 1:
t = ((self.scale - 1) / float(self.scale))
return self.location * (t ** (1/float(self.scale)))
else:
return 0
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
def random(self):
"""Gives a random number based on the distribution."""
return random.weibullvariate(self.scale, self.shape)
def FrechetDistribution(**parameters):
"""
Frechet distribution is an alias of Weibull distribution."""
return WeibullDistribution(**parameters)
# ----------------------------------------------------------
# Untested Distributions
# ----------------------------------------------------------
def AntiLogNormalDistribution(**parameters):
"""
Anti-Lognormal distribution is an alias of Lognormal distribution."""
return LogNormalDistribution(**parameters)
class BernoulliDistribution(Distribution):
"""
Bernoulli distribution is a special case of Binomial distribution where
where number of trials = 1
"""
def __init__(self, success):
"""Constructor method. The parameters are used to construct the
probability distribution.
@param success: probability of success; 0 <= success <= 1"""
self.distribution = BinomialDistribution(success, trial = 1)
def CDF(self, x):
"""Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return self.distribution.CDF(x)
def PDF(self, x):
"""Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return self.distribution.PDF(x)
def inverseCDF(self, probability, start = 0, step = 1):
"""It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
return self.distribution.inverseCDF(probability, start, step)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.distribution.mean()
def mode(self):
"""Gives the mode of the sample."""
return self.distribution.mode()
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return self.distribution.kurtosis()
def skew(self):
"""Gives the skew of the sample."""
return self.distribution.skew()
def variance(self):
"""Gives the variance of the sample."""
return self.distribution.variance()
# def random(self):
# """Gives a random number based on the distribution."""
# return self.distribution.random()
def BilateralExponentialDistribution(**parameters):
"""
Bilateral Exponential distribution is an alias of Laplace distribution."""
return LaplaceDistribution(**parameters)
class BradfordDistribution(Distribution):
"""Class for Bradford distribution."""
def __init__(self, location, scale, shape):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale: upper bound
@param shape:"""
self.location = location
self.scale = scale
self.shape = shape
self.k = math.log10(self.shape + 1)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
r = ((self.shape * (x - self.location)) / (self.scale - self.location))
return math.log10(1 + r) / self.k
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution
from x-h to x+h for continuous distribution."""
r = (self.shape * (x - self.location)) + self.scale - self.location
return self.shape / (self.k * r)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
r = self.shape * (self.scale - self.location)
r = r + (((self.shape + 1) * self.location - self.scale) * self.k)
return r / (self.shape * self.k)
def mode(self):
"""Gives the mode of the sample."""
return self.location
def kurtosis(self):
"""Gives the kurtosis of the sample."""
d = ((self.shape * (self.k - 2)) + (2 * self.k)) ** 2
d = 3 * self.shape * d
n = ((self.k * ((3 * self.k) - 16)) + 24)
n = (self.shape ** 3) * (self.k - 3) * n
n = n + ((self.k - 4) * (self.k - 3) * (12 * self.k * (self.k **2)))
n = n + (6 * self.k * (self.k **2)) * ((3 * self.k) - 14)
return (n + (12 * (self.k ** 3))) / d
def skew(self):
"""Gives the skew of the sample."""
r = 12 * (self.shape ** 2)
r = r - (9 * self.k * self.shape * (self.shape + 2))
r = r + ((2 * self.k * self.k) * ((self.shape * (self.shape + 3)) + 3))
d = self.shape * (((self.k - 2) * self.shape) + (2 * self.k))
d = math.sqrt(d)
d = d * ((3 * self.shape * (self.k - 2)) + (6 * self.k))
return r / d
def variance(self):
"""Gives the variance of the sample."""
r = (self.scale - self.location) ** 2
r = r * (self.shape * (self.k - 2) + (2 * self.k))
return r / (2 * self.shape * self.k * self.k)
def quantile1(self):
"""Gives the 1st quantile of the sample."""
r = (self.location * (self.shape + 1)) - self.scale
r = r + ((self.scale - self.location) * ((self.shape + 1)** 0.25))
return r / self.shape
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
r = (self.location * (self.shape + 1)) - self.scale
r = r + ((self.scale - self.location) * ((self.shape + 1)** 0.75))
return r / self.shape
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
r = math.log10(self.shape / math.log10(self.shape + 1))
return r / math.log10(self.shape + 1)
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.0
def random(self, seed):
"""Gives a random number based on the distribution."""
while 1:
r = self.location * (self.shape + 1) - self.scale
r = r + ((self.scale - self.location)*((self.shape + 1) ** seed))
seed = r / self.shape
yield seed
class BurrDistribution(Distribution):
"""
Burr distribution is the generalization of Fisk distribution. Burr
distribution with D = 1 becomes Fisk distribution.
"""
def __init__(self, location, scale, C, D):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale:
@param C: shape
@param D: shape"""
self.location = location
self.scale = scale
self.C = C
self.D = D
self.k = (nrpy.gammln(self.D) * \
nrpy.gammln(1 - (2/self.C)) * \
nrpy.gammln((2/self.C) + self.D)) - \
((nrpy.gammln(1 - (1/self.C)) ** 2) * \
(nrpy.gammln((1/self.C) + self.D) ** 2))
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return (1+(((x - self.location)/self.scale)**(-self.C)))**(-self.D)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution
from x-h to x+h for continuous distribution."""
r = (1+(((x - self.location)/self.scale)**(-self.C)))**(-self.D - 1)
r = r * ((self.C * self.D)/self.scale)
return r * (((x - self.location)/self.scale)**(-self.C - 1))
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
r = nrpy.gammln(1 - (1/self.C)) * nrpy.gammln((1/self.C) + self.D)
return self.location + ((r * self.scale) / nrpy.gammln(self.D))
def mode(self):
"""Gives the mode of the sample."""
if ((self.C * self.D) < 1): return self.location
else:
r = (((self.C * self.D)-1)/(self.C + 1)) ** (1/self.C)
return self.location + (self.scale * r)
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
def variance(self):
"""Gives the variance of the sample."""
return (self.k * (self.scale ** 2)) / (nrpy.gammln(self.D) ** 2)
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
def qmode(self):
"""Gives the quantile of the mode of the sample."""
if ((self.C * self.D) < 1): return 0.0
else:
return (1 + ((self.C+1)/((self.C*self.D) - 1))) ** (-1*self.D)
def random(self, seed):
"""Gives a random number based on the distribution."""
while 1:
r = ((1/(seed ** (1/self.D))) - 1) ** (-1/self.C)
seed = self.location + self.scale * r
yield seed
class ChiDistribution(Distribution):
"""Class for Chi distribution."""
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
def CobbDouglasDistribution(**parameters):
"""
Cobb-Douglas distribution is an alias of Lognormal distribution."""
return LogNormalDistribution(**parameters)
def DoubleExponentialDistribution(**parameters):
"""
Double Exponential distribution is an alias of Laplace distribution."""
return LaplaceDistribution(**parameters)
class DoubleGammaDistribution(Distribution):
"""
Double Gamma distribution is the signed version of Gamma distribution.
"""
def __init__(self, location, scale, shape):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale:
@param shape:"""
self.location = location
self.scale = scale
self.shape = shape
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
r = nrpy.gammp(self.shape ,abs((x - self.location)/self.scale))
if x > self.location: return 0.5 + (0.5 * r)
else: return 0.5 - (0.5 * r)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution
from x-h to x+h for continuous distribution."""
r = math.exp(-1 * abs((x - self.location)/self.scale))
r = r * (abs((x - self.location)/self.scale) ** (self.shape -1))
return r / (2 * self.scale * nrpy.gammln(self.shape))
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location
def skew(self):
"""Gives the skew of the sample."""
return 0.0
def variance(self):
"""Gives the variance of the sample."""
return self.shape * (self.shape + 1) * (self.scale ** 2)
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.5
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class DoubleWeibullDistribution(Distribution):
"""
Double Weibull distribution is the signed version of Weibull distribution.
"""
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class ExtremeLBDistribution(Distribution):
"""Class for Extreme LB distribution."""
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class FiskDistribution(Distribution):
"""Class for Fisk distribution."""
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
def FisherTippettDistribution(location, scale):
"""
Fisher-Tippett distribution is an alias of Gumbel distribution.
@param location: S{eta}
@param scale: S{theta}"""
return GumbelDistribution(location, scale)
class FoldedNormalDistribution(Distribution):
"""Class for Folded Normal distribution."""
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class GenLogisticDistribution(Distribution):
"""
Generalized Logistic distribution is a generalization of Logistic
distribution. It becomes Logistic distribution when shape = 1
"""
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
def GompertzDistribution(location, scale):
"""
Gompertz distribution is an alias of Gumbel distribution.
@param location: S{eta}
@param scale: S{theta}"""
return GumbelDistribution(location, scale)
class GumbelDistribution(Distribution):
"""Class for Gumbel Distribution."""
def __init__(self, location, scale):
"""Constructor method. The parameters are used to construct the
probability distribution.
@param location: S{eta}
@param scale: S{theta}"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return math.exp(-1 * math.exp((self.location - x) / self.scale))
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return (1/self.scale) * math.exp((self.location - x) / self.scale) * \
self.CDF(x)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location + (GAMMA * self.scale)
def mode(self):
"""Gives the mode of the sample."""
return self.location
def median(self):
"""Gives the median of the sample."""
return self.location - self.scale * math.log10(math.log10(2))
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return 2.4
def skew(self):
"""Gives the skew of the sample."""
return 1.1395
def variance(self):
"""Gives the variance of the sample."""
return 1.667 * ((PI * self.scale) ** 2)
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return self.location - self.scale * math.log10(math.log10(4))
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
return self.location - self.scale * math.log10(math.log10(1.333))
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.5704
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.3679
def random(self, seed):
"""Gives a random number based on the distribution."""
while 1:
seed = self.location - \
(self.scale * math.log10(-1 * math.log10(seed)))
yield seed
class HalfNormalDistribution(Distribution):
"""
Half Normal distribution is a special case of Chi distribution where
shape (also degrees of freedom) = 1, and Folded Normal distribution
where location = 0
"""
def __init__(self, **parameters):
"""Constructor method. The parameters are used to construct the
probability distribution."""
try: self.distribution = ChiDistribution(location =
parameters['location'],
scale = parameters['scale'],
shape = 1)
except KeyError:
raise DistributionParameterError('Halfnormal distribution \
requires location and scale parameters')
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return self.distribution.CDF(x)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return self.distribution.PDF(x)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
return self.distribution.inverseCDF(probability, start, step)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.distribution.mean()
def mode(self):
"""Gives the mode of the sample."""
return self.distribution.mode()
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return self.distribution.kurtosis()
def skew(self):
"""Gives the skew of the sample."""
return self.distribution.skew()
def variance(self):
"""Gives the variance of the sample."""
return self.distribution.variance()
class HyperbolicSecantDistribution(Distribution):
"""Class for Hyperbolic Secant Distribution."""
def __init__(self, location, scale):
"""
Constructor method. The parameters are used to construct the
probability distribution.
@param location:
@param scale:"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return (2 / PI) * \
(1 / math.tan(math.exp((x - self.location) / self.scale)))
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution
from x-h to x+h for continuous distribution."""
return (1 / math.cosh((x - self.location) / self.scale)) / \
(PI * math.scale)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.location
def mode(self):
"""Gives the mode of the sample."""
return self.location
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return 2.0
def skew(self):
"""Gives the skew of the sample."""
return 0.0
def variance(self):
"""Gives the variance of the sample."""
return 0.25 * ((PI * self.scale) ** 2)
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 0.5
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.5
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class LaplaceDistribution(Distribution):
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class LogisticDistribution(Distribution):
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
def LogLogisticDistribution(**parameters):
"""
Log-Logistic distribution is an alias of Fisk distribution."""
return FiskDistribution(**parameters)
class LogNormalDistribution(Distribution):
def __init__(self, a, b):
"""Constructor method. The parameters are used to construct the
probability distribution."""
self.location = a
self. scale = b
if (b ** 2) < 0:
raise AttributeError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0
# to a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return math.exp((self.location + (self.scale ** 2) * self.location*(-1)))
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
def random(self):
"""Gives a random number based on the distribution."""
return random.lognormalvariate(self.location, self.scale)
def LogWeibullDistribution(location, scale):
"""
Log-Weibull distribution is an alias of Gumbel distribution.
@param location: S{eta}
@param scale: S{theta}"""
return GumbelDistribution(location, scale)
def LorentzDistribution(**parameters):
"""
Lorentz distribution is an alias of Cauchy distribution."""
return CauchyDistribution(**parameters)
class MaxwellDistribution(Distribution):
"""
Maxwell distribution is a special case of Chi distribution where
location = 0 and shape (degrees of freedom) = 3
"""
def __init__(self, scale):
"""
Constructor method.
@param scale:"""
self.distribution = ChiDistribution(0, scale, 3)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return self.distribution.CDF(x)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return self.distribution.PDF(x)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
the corresponding value on the x-axis."""
return self.distribution.inverseCDF(probability, start, step)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.distribution.mean()
def mode(self):
"""Gives the mode of the sample."""
return self.distribution.mode()
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return self.distribution.kurtosis()
def skew(self):
"""Gives the skew of the sample."""
return self.distribution.skew()
def variance(self):
"""Gives the variance of the sample."""
return self.distribution.variance()
# def random(self):
# """Gives a random number based on the distribution."""
# return self.distribution.random()
class NakagamiDistribution(Distribution):
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class NegativeBinomialDistribution(Distribution):
"""Class for Negative Binomial Distribution."""
def __init__(self, success, target):
"""Constructor method. The parameters are used to construct the
probability distribution.
@param success: probability of success; 0 <= success <= 1
@param target: a constant, target number of successes"""
self.success = success
self.target = target
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
summation = 0.0
for i in range(x): summation = summation + self.PDF(i)
return summation
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return nrpy.bico(x - 1, self.target - 1) * \
(self.success ** self.target) * \
((1 - self.success) ** (x - self.target))
def inverseCDF(self, probability, start = 0, step = 1):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.target / self.success
def mode(self):
"""Gives the mode of the sample."""
return int((self.success + self.target - 1)/self.success)
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
def NegativeExponentialDistribution(**parameters):
"""
Negative-exponential distribution is an alias of Exponential distribution."""
return ExponentialDistribution(**parameters)
class ParetoDistribution(Distribution):
"""Class for Pareto Distribution."""
def __init__(self, location=1.0, scale=1.0):
"""Constructor method. The parameters are used to construct the
probability distribution.
@param location: also the scale; default = 1.0
@param scale: S{lambda}; default = 1.0"""
self.location = location
self.scale = scale
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return 1 - (self.location/x) ** self.scale
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return (self.shape * (self.location ** self.scale)) / \
(x ** (self.scale + 1))
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return (self.location * self.scale) / (self.scale - 1)
def mode(self):
"""Gives the mode of the sample."""
return self.location
def median(self):
"""Gives the median of the sample."""
return self.location * (2 ** (1/self.scale))
def kurtosis(self):
"""Gives the kurtosis of the sample."""
n = 6 * (self.scale ** 3 + self.scale ** 2 + 6 * self.scale - 2)
d = self.scale * (self.scale ** 2 - 7 * self.scale + 12)
return n/d
def skew(self):
"""Gives the skew of the sample."""
n = 2 * (self.scale + 1) * math.sqrt(self.scale - 2)
d = (self.scale - 3) * math.sqrt(self.scale)
return n/d
def variance(self):
"""Gives the variance of the sample."""
n = (self.location ** 2) * self.scale
d = (self.scale - 2) * ((self.scale - 1) ** 2)
return n/d
def quantile1(self):
"""Gives the 1st quantile of the sample."""
return self.location * (1.333 ** (1/self.scale))
def quantile3(self):
"""Gives the 3rd quantile of the sample."""
return self.location * (4 ** (1/self.scale))
def qmean(self):
"""Gives the quantile of the arithmetic mean of the sample."""
return 1 - (((self.scale - 1) / self.scale) ** self.scale)
def qmode(self):
"""Gives the quantile of the mode of the sample."""
return 0.0
def random(self):
"""Gives a random number based on the distribution."""
return random.paretovariate(self.scale)
class PascalDistribution(Distribution):
"""
Class for Pascal Distribution. Pascal Distribution is a form of Negative
Binomial Distribution where the 'target' is an integer
"""
def __init__(self, success, target):
"""Constructor method.
@param success: probability of success; 0 <= success <= 1
@param target: a constant, target number of successes"""
self.distribution = NegativeBinomialDistribution(success, int(target))
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return self.distribution.CDF(x)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return self.distribution.PDF(x)
def inverseCDF(self, probability, start = 0.0, step =0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
return self.distribution.inverseCDF(probability, start, step)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.distribution.mean()
def mode(self):
"""Gives the mode of the sample."""
return self.distribution.mode()
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return self.distribution.kurtosis()
def skew(self):
"""Gives the skew of the sample."""
return self.distribution.skew()
def variance(self):
"""Gives the variance of the sample."""
return self.distribution.variance()
# def random(self):
# """Gives a random number based on the distribution."""
# return self.distribution.random()
def PolyaDistribution(success, target):
"""
Polya distribution is an alias of Negative Binomial distribution.
@param success: probability of success; 0 <= success <= 1
@param target: a constant, target number of successes
"""
return NegativeBinomialDistribution(success, target)
class PowerFunctionDistribution(Distribution):
"""
Class for Power Function Distribution. It is a form of Beta Distribution.
"""
def __init__(self, shape):
"""Constructor method.
@param shape:
"""
self.distribution = BetaDistribution(0, 1, shape, 1)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return self.distribution.CDF(x)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return self.distribution.PDF(x)
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
return self.distribution.inverseCDF(probability, start, step)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.distribution.mean()
def mode(self):
"""Gives the mode of the sample."""
return self.distribution.mode()
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return self.distribution.kurtosis()
def skew(self):
"""Gives the skew of the sample."""
return self.distribution.skew()
def variance(self):
"""Gives the variance of the sample."""
return self.distribution.variance()
# def random(self):
# """Gives a random number based on the distribution."""
# return self.distribution.random()
class RademacherDistribution(Distribution):
"""Class for Rademacher Distribution."""
def __init__(self):
"""Constructor method."""
pass
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
if x < -1:
return 0.0
elif x > -1 and x < 1:
return 0.5
else: return 1.0
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution
from x-h to x+h for continuous distribution."""
if x == -1 or x == 1: return 0.5
else: return 0.0
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
if probability == 0.0: return (-1.0001, 0.0)
if probability == 1.0: return (1.0, 1.0)
else: return (0.999, 0.5)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return 0
def skew(self):
"""Gives the skew of the sample."""
return 0
def variance(self):
"""Gives the variance of the sample."""
return 1
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
class RayleighDistribution(Distribution):
"""
Rayleigh distribution is a special case of Chi distribution where
location = 0 and shape (degrees of freedom) = 2
"""
def __init__(self, scale):
"""Constructor method.
@param scale:"""
self.distribution = ChiDistribution(0, scale, 2)
def CDF(self, x):
"""
Cummulative Distribution Function, which gives the cummulative
probability (area under the probability curve) from -infinity or 0 to
a give x-value on the x-axis where y-axis is the probability."""
return self.distribution.CDF(x)
def PDF(self, x):
"""
Partial Distribution Function, which gives the probability for the
particular value of x, or the area under probability distribution from
x-h to x+h for continuous distribution."""
return self.distribution.PDF(x)
def inverseCDF(self, probability, start = 0.0, step =0.01):
"""
It does the reverse of CDF() method, it takes a probability value and
returns the corresponding value on the x-axis."""
return self.distribution.inverseCDF(probability, start, step)
def mean(self):
"""Gives the arithmetic mean of the sample."""
return self.distribution.mean()
def mode(self):
"""Gives the mode of the sample."""
return self.distribution.mode()
def kurtosis(self):
"""Gives the kurtosis of the sample."""
return self.distribution.kurtosis()
def skew(self):
"""Gives the skew of the sample."""
return self.distribution.skew()
def variance(self):
"""Gives the variance of the sample."""
return self.distribution.variance()
# def random(self):
# """Gives a random number based on the distribution."""
# return self.distribution.random()
class ReciprocalDistribution(Distribution):
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
def inverseCDF(self, probability, start=0.0, step=0.01):
"""
It does the reverse of CDF() method, it takes a probability value
and returns the corresponding value on the x-axis."""
cprob = self.CDF(start)
if probability < cprob: return (start, cprob)
while (probability > cprob):
start = start + step
cprob = self.CDF(start)
# print start, cprob
return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
# def random(self):
# """Gives a random number based on the distribution."""
# raise DistributionFunctionError
def RectangularDistribution(**parameters):
"""
Rectangular distribution is an alias of Uniform distribution."""
return UniformDistribution(**parameters)
def SechSquaredDistribution(**parameters):
"""
Sech-squared distribution is an alias of Logistic distribution."""
return LogisticDistribution(**parameters)
def WaldDistribution(**parameters):
"""
Wald distribution is an alias of Inverse Normal distribution."""
return InverseNormalDistribution(**parameters)
#class DummyDistribution(Distribution):
# def __init__(self, **parameters):
# """Constructor method. The parameters are used to construct the
# probability distribution."""
# raise DistributionFunctionError
# def CDF(self, x):
# """
# Cummulative Distribution Function, which gives the cummulative
# probability (area under the probability curve) from -infinity or 0 to
# a give x-value on the x-axis where y-axis is the probability."""
# raise DistributionFunctionError
# def PDF(self, x):
# """
# Partial Distribution Function, which gives the probability for the
# particular value of x, or the area under probability distribution
# from x-h to x+h for continuous distribution."""
# raise DistributionFunctionError
# def inverseCDF(self, probability, start=0.0, step=0.01):
# """
# It does the reverse of CDF() method, it takes a probability value
# and returns the corresponding value on the x-axis."""
# cprob = self.CDF(start)
# if probability < cprob: return (start, cprob)
# while (probability > cprob):
# start = start + step
# cprob = self.CDF(start)
# # print start, cprob
# return (start, cprob)
# def mean(self):
# """Gives the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def mode(self):
# """Gives the mode of the sample."""
# raise DistributionFunctionError
# def kurtosis(self):
# """Gives the kurtosis of the sample."""
# raise DistributionFunctionError
# def skew(self):
# """Gives the skew of the sample."""
# raise DistributionFunctionError
# def variance(self):
# """Gives the variance of the sample."""
# raise DistributionFunctionError
# def quantile1(self):
# """Gives the 1st quantile of the sample."""
# raise DistributionFunctionError
# def quantile3(self):
# """Gives the 3rd quantile of the sample."""
# raise DistributionFunctionError
# def qmean(self):
# """Gives the quantile of the arithmetic mean of the sample."""
# raise DistributionFunctionError
# def qmode(self):
# """Gives the quantile of the mode of the sample."""
# raise DistributionFunctionError
## def random(self, seed):
## """Gives a random number based on the distribution."""
## while 1:
## func
## yield seed
| gpl-3.0 | 288,445,279,155,761,150 | 35.911425 | 83 | 0.581699 | false |
janusnic/21v-pyqt | unit_02/con3.py | 1 | 1328 | ##!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Code PyQt4
In this example, we create a simple
window in PyQt4.
"""
from PyQt4 import QtCore, QtGui
class MyWindow(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.button1 = QtGui.QPushButton(u"Кнопка 1. Нажми меня")
self.button2 = QtGui.QPushButton(u"Кнопка 2")
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.button1)
vbox.addWidget(self.button2)
self.setLayout(vbox)
self.resize(300, 100)
# Передача сигнала от кнопки 1 к кнопке 2
self.connect(self.button1, QtCore.SIGNAL("clicked()"), self.button2, QtCore.SIGNAL('clicked()'))
# Способ 1 (4 параметра)
self.connect(self.button2, QtCore.SIGNAL("clicked()"), self, QtCore.SLOT("on_clicked_button2()"))
# Способ 2 (3 параметра)
self.connect(self.button2, QtCore.SIGNAL("clicked()"), QtCore.SLOT("on_clicked_button2()"))
@QtCore.pyqtSlot()
def on_clicked_button2(self):
print("Сигнал получен кнопкой 2")
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
window = MyWindow()
window.show()
sys.exit(app.exec_()) | mit | -807,823,098,126,535,000 | 31.315789 | 105 | 0.629177 | false |
appsembler/configuration | playbooks/roles/backups/files/backup.py | 1 | 17068 | #!/usr/bin/python
import argparse
import datetime
import logging
import math
import os
import requests
import shutil
import socket
import subprocess
import sys
import time
import raven
def make_file_prefix(base_name):
hostname = socket.gethostname()
return '{0}_{1}'.format(hostname, base_name)
def make_file_name(base_name):
"""
Create a file name based on the hostname, a base_name, and date
e.g. openedxlite12345_mysql_20140102
"""
return '{0}_{1}'.format(make_file_prefix(base_name), datetime.datetime.now().
strftime("%Y%m%d"))
def upload_to_s3(file_path, bucket, aws_access_key_id, aws_secret_access_key):
"""
Upload a file to the specified S3 bucket.
file_path: An absolute path to the file to be uploaded.
bucket: The name of an S3 bucket.
aws_access_key_id: An AWS access key.
aws_secret_access_key: An AWS secret access key.
"""
from filechunkio import FileChunkIO
import boto
logging.info('Uploading backup at "{}" to Amazon S3 bucket "{}"'
.format(file_path, bucket))
conn = boto.connect_s3(aws_access_key_id, aws_secret_access_key)
bucket = conn.lookup(bucket)
file_name = os.path.basename(file_path)
file_size = os.stat(file_path).st_size
chunk_size = 104857600 # 100 MB
chunk_count = int(math.ceil(file_size / float(chunk_size)))
multipart_upload = bucket.initiate_multipart_upload(file_name)
for i in range(chunk_count):
offset = chunk_size * i
bytes_to_read = min(chunk_size, file_size - offset)
with FileChunkIO(file_path, 'r', offset=offset, bytes=bytes_to_read) as fp:
logging.info('Upload chunk {}/{}'.format(i + 1, chunk_count))
multipart_upload.upload_part_from_file(fp, part_num=(i + 1))
multipart_upload.complete_upload()
logging.info('Upload successful')
def upload_to_gcloud_storage(file_path, bucket):
"""
Upload a file to the specified Google Cloud Storage bucket.
Note that the host machine must be properly configured to use boto with a
Google Cloud Platform service account. See
https://cloud.google.com/storage/docs/xml-api/gspythonlibrary.
file_path: An absolute path to the file to be uploaded.
bucket: The name of a Google Cloud Storage bucket.
"""
import boto
import gcs_oauth2_boto_plugin
logging.info('Uploading backup at "{}" to Google Cloud Storage bucket '
'"{}"'.format(file_path, bucket))
file_name = os.path.basename(file_path)
gcloud_uri = boto.storage_uri(bucket + '/' + file_name, 'gs')
gcloud_uri.new_key().set_contents_from_filename(file_path)
logging.info('Upload successful')
def upload_to_azure_storage(file_path, bucket, account, key):
"""
Upload a file to the specified Azure Storage container.
file_path: An absolute path to the file to be uploaded.
bucket: The name of an Azure Storage container.
account: An Azure Storage account.
key: An Azure Storage account key.
"""
from azure.storage.blob import BlockBlobService
logging.info('Uploading backup at "{}" to Azure Storage container'
'"{}"'.format(file_path, bucket))
file_name = os.path.basename(file_path)
blob_service = BlockBlobService(account_name=account, account_key=key)
blob_service.create_blob_from_path(bucket, file_name, file_path)
logging.info('Upload successful')
class NoBackupsFound(Exception):
pass
def monitor_gcloud_backups(bucket, service, sentry, pushgateway):
"""Double check the backups in the Google Cloud Storage Bucket
Finds the most recent backup file and pushes the creation
timestamp to our monitoring. This gives us something of a "dead
man's switch" to alert us if the previous day's backups failed
silently.
We also raise a Sentry error if there are no backups found or
if this monitoring process fails.
bucket: The name of a Google Cloud Storage bucket.
service: the service name (really only supports 'mongodb' currently)
sentry: The sentry client
pushgateway: URL of the pushgateway
"""
import boto
import gcs_oauth2_boto_plugin
logging.info('checking backups in Google Cloud Storage bucket '
'"{}"'.format(bucket))
sentry.extra_context({'bucket': bucket})
try:
gcloud_uri = boto.storage_uri(bucket, 'gs')
keys = gcloud_uri.get_all_keys()
prefix = make_file_prefix(service)
backups = [k for k in keys if k.key.startswith(prefix)]
if len(backups) < 1:
raise NoBackupsFound("There are no backup files in the bucket")
backups.sort(key=lambda x: x.last_modified)
most_recent = backups[-1]
sentry.extra_context({'most_recent': most_recent})
last_modified = datetime.datetime.strptime(most_recent.last_modified,
'%Y-%m-%dT%H:%M:%S.%fZ')
push_backups_age_metric(pushgateway, socket.gethostname(),
float(last_modified.strftime('%s')),
backups_type=service)
logging.info('Monitoring successful')
except Exception:
sentry.CaptureException()
def push_backups_age_metric(gateway, instance, value, backups_type="mongodb"):
""" submits backups timestamp to push gateway service
labelled with the instance (typically hostname) and type ('mongodb'
or 'mysql')"""
headers = {
'Content-type': 'application/octet-stream'
}
requests.post(
'{}/metrics/job/backups_monitor/instance/{}'.format(gateway, instance),
data='backups_timestamp{type="%s"} %f\n' % (backups_type, value),
headers=headers)
def compress_backup(backup_path):
"""
Compress a backup using tar and gzip.
backup_path: An absolute path to a file or directory containing a
database dump.
returns: The absolute path to the compressed backup file.
"""
logging.info('Compressing backup at "{}"'.format(backup_path))
compressed_backup_path = backup_path + '.tar.gz'
zip_cmd = ['tar', '-zcvf', compressed_backup_path, backup_path]
ret = subprocess.call(zip_cmd, env={'GZIP': '-9'})
if ret: # if non-zero return
error_msg = 'Error occurred while compressing backup'
logging.error(error_msg)
raise Exception(error_msg)
return compressed_backup_path
def dump_service(service_name, backup_dir, user='', password=''):
"""
Dump the database contents for a service.
service_name: The name of the service to dump, either mysql or mongodb.
backup_dir: The directory where the database is to be dumped.
returns: The absolute path of the file or directory containing the
dump.
"""
commands = {
'mysql': 'mysqldump -u root --all-databases --single-transaction > {}',
'mongodb': 'mongodump -o {}',
}
if user and password:
commands['mongodb'] += (' --authenticationDatabase admin -u {} -p {}'
.format(user, password))
cmd_template = commands.get(service_name)
if cmd_template:
backup_filename = make_file_name(service_name)
backup_path = os.path.join(backup_dir, backup_filename)
cmd = cmd_template.format(backup_path)
logging.info('Dumping database: `{}`'.format(cmd))
ret = subprocess.call(cmd, shell=True)
if ret: # if non-zero return
error_msg = 'Error occurred while dumping database'
logging.error(error_msg)
raise Exception(error_msg)
return backup_path
else:
error_msg = 'Unknown service {}'.format(service_name)
logging.error(error_msg)
raise Exception(error_msg)
def clean_up(backup_path):
"""
Remove the local database dump and the corresponding tar file if it exists.
backup_path: An absolute path to a file or directory containing a
database dump.
"""
logging.info('Cleaning up "{}"'.format(backup_path))
backup_tar = backup_path + '.tar.gz'
if os.path.isfile(backup_tar):
os.remove(backup_tar)
try:
if os.path.isdir(backup_path):
shutil.rmtree(backup_path)
elif os.path.isfile(backup_path):
os.remove(backup_path)
except OSError:
logging.exception('Removing files at {} failed!'.format(backup_path))
def restore(service_name, backup_path, uncompress=True, settings=None):
"""
Restore a database from a backup.
service_name: The name of the service whose database is to be restored,
either mysql or mongodb.
backup_path: The absolute path to a backup.
uncompress: If True, the backup is assumed to be a gzipped tar and is
uncompressed before the database restoration.
"""
if service_name == 'mongodb':
restore_mongodb(backup_path, uncompress)
elif service_name == 'mysql':
restore_mysql(backup_path, uncompress, settings=settings)
def restore_mongodb(backup_path, uncompress=True):
"""
Restore a MongoDB database from a backup.
backup_path: The absolute path to a backup.
uncompress: If True, the backup is assumed to be a gzipped tar and is
uncompressed before the database restoration.
"""
logging.info('Restoring MongoDB from "{}"'.format(backup_path))
if uncompress:
backup_path = _uncompress(backup_path)
cmd = 'mongorestore {}'.format(backup_path)
ret = subprocess.call(cmd, shell=True)
if ret: # if non-zero return
error_msg = 'Error occurred while restoring MongoDB backup'
logging.error(error_msg)
raise Exception(error_msg)
logging.info('MongoDB successfully restored')
def restore_mysql(backup_path, uncompress=True, settings=None):
"""
Restore a MySQL database from a backup.
backup_path: The absolute path to a backup.
uncompress: If True, the backup is assumed to be a gzipped tar and is
uncompressed before the database restoration.
"""
logging.info('Restoring MySQL from "{}"'.format(backup_path))
if uncompress:
backup_path = _uncompress(backup_path)
cmd = 'mysqladmin -f drop edxapp'
ret = subprocess.call(cmd, shell=True)
if ret: # if non-zero return
error_msg = 'Error occurred while deleting old mysql database'
logging.error(error_msg)
raise Exception(error_msg)
cmd = 'mysqladmin -f create edxapp'
ret = subprocess.call(cmd, shell=True)
if ret: # if non-zero return
error_msg = 'Error occurred while creating new mysql database'
logging.error(error_msg)
raise Exception(error_msg)
cmd = 'mysql -D edxapp < {0}'.format(backup_path)
ret = subprocess.call(cmd, shell=True)
if ret: # if non-zero return
error_msg = 'Error occurred while restoring mysql database'
logging.error(error_msg)
raise Exception(error_msg)
cmd = ('source /edx/app/edxapp/edxapp_env && /edx/bin/manage.edxapp '
'lms migrate --settings={}'.format(settings))
ret = subprocess.call(cmd, shell=True, executable="/bin/bash")
if ret: # if non-zero return
error_msg = 'Error occurred while running edx migrations'
logging.error(error_msg)
raise Exception(error_msg)
cmd = '/edx/bin/supervisorctl restart edxapp:'
ret = subprocess.call(cmd, shell=True)
if ret: # if non-zero return
error_msg = 'Error occurred while restarting edx'
logging.error(error_msg)
raise Exception(error_msg)
logging.info('MySQL successfully restored')
def _uncompress(file_path):
"""
Uncompress a gzipped tar file. The contents of the compressed file are
extracted to the directory containing the compressed file.
file_path: An absolute path to a gzipped tar file.
returns: The directory containing the contents of the compressed file.
"""
logging.info('Uncompressing file at "{}"'.format(file_path))
file_dir = os.path.dirname(file_path)
cmd = 'tar xzvf {}'.format(file_path)
ret = subprocess.call(cmd, shell=True)
if ret: # if non-zero return
error_msg = 'Error occurred while uncompressing {}'.format(file_path)
logging.error(error_msg)
raise Exception(error_msg)
return file_path.replace('.tar.gz', '')
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('service', help='mongodb or mysql')
parser.add_argument('-r', '--restore-path',
help='path to a backup used to restore a database')
parser.add_argument('-d', '--dir', dest='backup_dir',
help='temporary storage directory used during backup')
parser.add_argument('-u', '--user', help='database user')
parser.add_argument('--password', help='database password')
parser.add_argument('-p', '--provider', help='gs or s3')
parser.add_argument('-b', '--bucket', help='bucket name')
parser.add_argument('-i', '--s3-id', dest='s3_id',
help='AWS access key id')
parser.add_argument('-k', '--s3-key', dest='s3_key',
help='AWS secret access key')
parser.add_argument('--azure-account', dest='azure_account',
help='Azure storage account')
parser.add_argument('--azure-key', dest='azure_key',
help='Azure storage account key')
parser.add_argument('-n', '--uncompressed', dest='compressed',
action='store_false', default=True,
help='disable compression')
parser.add_argument('-s', '--settings',
help='Django settings used when running database '
'migrations')
parser.add_argument('--sentry-dsn', help='Sentry data source name')
parser.add_argument('--pushgateway', help='Prometheus pushgateway URL')
return parser.parse_args()
def _main():
args = _parse_args()
program_name = os.path.basename(sys.argv[0])
backup_dir = (args.backup_dir or os.environ.get('BACKUP_DIR',
'/tmp/db_backups'))
user = args.user or os.environ.get('BACKUP_USER', '')
password = args.password or os.environ.get('BACKUP_PASSWORD', '')
bucket = args.bucket or os.environ.get('BACKUP_BUCKET')
compressed = args.compressed
provider = args.provider or os.environ.get('BACKUP_PROVIDER', 'gs')
restore_path = args.restore_path
s3_id = args.s3_id or os.environ.get('BACKUP_AWS_ACCESS_KEY_ID')
s3_key = args.s3_key or os.environ.get('BACKUP_AWS_SECRET_ACCESS_KEY')
azure_account = args.azure_account or os.environ.get('BACKUP_AZURE_STORAGE_ACCOUNT')
azure_key = args.azure_key or os.environ.get('BACKUP_AZURE_STORAGE_KEY')
settings = args.settings or os.environ.get('BACKUP_SETTINGS', 'aws_appsembler')
sentry_dsn = args.sentry_dsn or os.environ.get('BACKUP_SENTRY_DSN', '')
pushgateway = args.pushgateway or os.environ.get('PUSHGATEWAY', 'https://pushgateway.infra.appsembler.com')
service = args.service
sentry = raven.Client(sentry_dsn)
if program_name == 'edx_backup':
backup_path = ''
try:
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
backup_path = dump_service(service, backup_dir, user, password)
if compressed:
backup_path = compress_backup(backup_path)
if provider == 'gs':
upload_to_gcloud_storage(backup_path, bucket)
elif provider == 's3':
upload_to_s3(backup_path, bucket, aws_access_key_id=s3_id,
aws_secret_access_key=s3_key)
elif provider == 'azure':
upload_to_azure_storage(backup_path, bucket, azure_account,
azure_key)
else:
error_msg = ('Invalid storage provider specified. Please use '
'"gs" or "s3".')
logging.warning(error_msg)
except:
logging.exception("The backup failed!")
sentry.captureException(fingerprint=['{{ default }}', time.time()])
finally:
clean_up(backup_path.replace('.tar.gz', ''))
elif program_name == 'edx_restore':
restore(service, restore_path, compressed, settings=settings)
elif program_name == 'edx_backups_monitor':
if provider == 'gs':
monitor_gcloud_backups(bucket, service, sentry, pushgateway)
else:
# other providers not supported yet
logging.warning("no backup monitoring available for this provider")
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
_main()
| agpl-3.0 | 4,505,051,146,387,217,400 | 34.558333 | 111 | 0.629189 | false |
jtacoma/geometriki | geometriki/tests/functional/test_pages.py | 1 | 2036 | # This file is part of geometriki.
#
# geometriki is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# geometriki is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with geometriki, in a file named COPYING. If not,
# see <http://www.gnu.org/licenses/>.
from helloworld.tests import *
class TestPagesController(TestController):
def test_index(self):
response = self.app.get(url('pages'))
# Test response...
def test_index_as_xml(self):
response = self.app.get(url('formatted_pages', format='xml'))
def test_create(self):
response = self.app.post(url('pages'))
def test_new(self):
response = self.app.get(url('new_page'))
def test_new_as_xml(self):
response = self.app.get(url('formatted_new_page', format='xml'))
def test_update(self):
response = self.app.put(url('page', id=1))
def test_update_browser_fakeout(self):
response = self.app.post(url('page', id=1), params=dict(_method='put'))
def test_delete(self):
response = self.app.delete(url('page', id=1))
def test_delete_browser_fakeout(self):
response = self.app.post(url('page', id=1), params=dict(_method='delete'))
def test_show(self):
response = self.app.get(url('page', id=1))
def test_show_as_xml(self):
response = self.app.get(url('formatted_page', id=1, format='xml'))
def test_edit(self):
response = self.app.get(url('edit_page', id=1))
def test_edit_as_xml(self):
response = self.app.get(url('formatted_edit_page', id=1, format='xml'))
| agpl-3.0 | 943,609,453,979,199,700 | 34.103448 | 82 | 0.667485 | false |
maliceio/malice-pdf | pdfparser/pdf_parser.py | 1 | 58940 | """
Modified by CSE to fit ASSEMBLYLINE service
"""
__description__ = 'pdf-parser, use it to parse a PDF document'
__author__ = 'Didier Stevens'
__version__ = '0.6.8'
__date__ = '2017/10/29'
__minimum_python_version__ = (2, 5, 1)
__maximum_python_version__ = (3, 6, 3)
"""
Source code put in public domain by Didier Stevens, no Copyright
https://DidierStevens.com
Use at your own risk
History:
2008/05/02: continue
2008/05/03: continue
2008/06/02: streams
2008/10/19: refactor, grep & extract functionality
2008/10/20: reference
2008/10/21: cleanup
2008/11/12: V0.3 dictionary parser
2008/11/13: option elements
2008/11/14: continue
2009/05/05: added /ASCIIHexDecode support (thanks Justin Prosco)
2009/05/11: V0.3.1 updated usage, added --verbose and --extract
2009/07/16: V0.3.2 Added Canonicalize (thanks Justin Prosco)
2009/07/18: bugfix EqualCanonical
2009/07/24: V0.3.3 Added --hash option
2009/07/25: EqualCanonical for option --type, added option --nocanonicalizedoutput
2009/07/28: V0.3.4 Added ASCII85Decode support
2009/08/01: V0.3.5 Updated ASCIIHexDecode to support whitespace obfuscation
2009/08/30: V0.3.6 TestPythonVersion
2010/01/08: V0.3.7 Added RLE and LZW support (thanks pARODY); added dump option
2010/01/09: Fixed parsing of incomplete startxref
2010/09/22: V0.3.8 Changed dump option, updated PrettyPrint, added debug option
2011/12/17: fixed bugs empty objects
2012/03/11: V0.3.9 fixed bugs double nested [] in PrettyPrintSub (thanks kurt)
2013/01/11: V0.3.10 Extract and dump bug fixes by Priit; added content option
2013/02/16: Performance improvement in cPDFTokenizer by using StringIO for token building by Christophe Vandeplas; xrange replaced with range
2013/02/16: V0.4.0 added http/https support; added error handling for missing file or URL; ; added support for ZIP file with password 'infected'
2013/03/13: V0.4.1 fixes for Python 3
2013/04/11: V0.4.2 modified PrettyPrintSub for strings with unprintable characters
2013/05/04: Added options searchstream, unfiltered, casesensitive, regex
2013/09/18: V0.4.3 fixed regression bug -w option
2014/09/25: V0.5.0 added option -g
2014/09/29: Added PrintGenerateObject and PrintOutputObject
2014/12/05: V0.6.0 Added YARA support
2014/12/09: cleanup, refactoring
2014/12/13: Python 3 fixes
2015/01/11: Added support for multiple YARA rule files; added request to search in trailer
2015/01/31: V0.6.1 Added optionyarastrings
2015/02/09: Added decoders
2015/04/05: V0.6.2 Added generateembedded
2015/04/06: fixed bug reported by Kurt for stream produced by Ghostscript where endstream is not preceded by whitespace; fixed prettyprint bug
2015/04/24: V0.6.3 when option dump's filename is -, content is dumped to stdout
2015/08/12: V0.6.4 option hash now also calculates hashes of streams when selecting or searching objects; and displays hexasciidump first line
2016/07/27: V0.6.5 bugfix whitespace 0x00 0x0C after stream 0x0D 0x0A reported by @mr_me
2016/11/20: V0.6.6 added workaround zlib errors FlateDecode
2016/12/17: V0.6.7 added option -k
2017/01/07: V0.6.8 changed cPDFParseDictionary to handle strings () with % character
2017/10/28: fixed bug
2017/10/29: added # support for option -y
Todo:
- handle printf todo
- support for JS hex string EC61C64349DB8D88AF0523C4C06E0F4D.pdf.vir
"""
import re
import optparse
import zlib
import binascii
import hashlib
import sys
import zipfile
import time
import os
if sys.version_info[0] >= 3:
from io import StringIO
import urllib.request
urllib23 = urllib.request
else:
from cStringIO import StringIO
import urllib2
urllib23 = urllib2
try:
import yara
except:
pass
CHAR_WHITESPACE = 1
CHAR_DELIMITER = 2
CHAR_REGULAR = 3
CONTEXT_NONE = 1
CONTEXT_OBJ = 2
CONTEXT_XREF = 3
CONTEXT_TRAILER = 4
PDF_ELEMENT_COMMENT = 1
PDF_ELEMENT_INDIRECT_OBJECT = 2
PDF_ELEMENT_XREF = 3
PDF_ELEMENT_TRAILER = 4
PDF_ELEMENT_STARTXREF = 5
PDF_ELEMENT_MALFORMED = 6
dumplinelength = 16
#Convert 2 Bytes If Python 3
def C2BIP3(string):
if sys.version_info[0] > 2:
return bytes([ord(x) for x in string])
else:
return string
# CIC: Call If Callable
def CIC(expression):
if callable(expression):
return expression()
else:
return expression
# IFF: IF Function
def IFF(expression, valueTrue, valueFalse):
if expression:
return CIC(valueTrue)
else:
return CIC(valueFalse)
def Timestamp(epoch=None):
if epoch == None:
localTime = time.localtime()
else:
localTime = time.localtime(epoch)
return '%04d%02d%02d-%02d%02d%02d' % localTime[0:6]
def CopyWithoutWhiteSpace(content):
result = []
for token in content:
if token[0] != CHAR_WHITESPACE:
result.append(token)
return result
def Obj2Str(content):
return ''.join(map(lambda x: repr(x[1])[1:-1], CopyWithoutWhiteSpace(content)))
class cPDFDocument:
def __init__(self, filepath):
self.file = filepath
if type(filepath) != str:
self.infile = filepath
elif filepath.lower().startswith('http://') or filepath.lower().startswith('https://'):
try:
if sys.hexversion >= 0x020601F0:
self.infile = urllib23.urlopen(filepath, timeout=5)
else:
self.infile = urllib23.urlopen(filepath)
except urllib23.HTTPError:
print('Error accessing URL %s' % filepath)
print(sys.exc_info()[1])
sys.exit()
elif filepath.lower().endswith('.zip'):
try:
self.zipfile = zipfile.ZipFile(filepath, 'r')
self.infile = self.zipfile.open(self.zipfile.infolist()[0], 'r', C2BIP3('infected'))
except:
print('Error opening file %s' % filepath)
print(sys.exc_info()[1])
sys.exit()
else:
try:
self.infile = open(filepath, 'rb')
except:
raise Exception('Error opening file %s' % filepath)
self.ungetted = []
self.position = -1
def byte(self):
if len(self.ungetted) != 0:
self.position += 1
return self.ungetted.pop()
inbyte = self.infile.read(1)
if not inbyte or inbyte == '':
self.infile.close()
return None
self.position += 1
return ord(inbyte)
def unget(self, byte):
self.position -= 1
self.ungetted.append(byte)
def CharacterClass(byte):
if byte == 0 or byte == 9 or byte == 10 or byte == 12 or byte == 13 or byte == 32:
return CHAR_WHITESPACE
if byte == 0x28 or byte == 0x29 or byte == 0x3C or byte == 0x3E or byte == 0x5B or byte == 0x5D or byte == 0x7B or byte == 0x7D or byte == 0x2F or byte == 0x25:
return CHAR_DELIMITER
return CHAR_REGULAR
def IsNumeric(str):
return re.match('^[0-9]+', str)
class cPDFTokenizer:
def __init__(self, file):
try:
self.oPDF = cPDFDocument(file)
except Exception as e:
raise Exception(e)
self.ungetted = []
def Token(self):
if len(self.ungetted) != 0:
return self.ungetted.pop()
if self.oPDF == None:
return None
self.byte = self.oPDF.byte()
if self.byte == None:
self.oPDF = None
return None
elif CharacterClass(self.byte) == CHAR_WHITESPACE:
file_str = StringIO()
while self.byte != None and CharacterClass(self.byte) == CHAR_WHITESPACE:
file_str.write(chr(self.byte))
self.byte = self.oPDF.byte()
if self.byte != None:
self.oPDF.unget(self.byte)
else:
self.oPDF = None
self.token = file_str.getvalue()
return (CHAR_WHITESPACE, self.token)
elif CharacterClass(self.byte) == CHAR_REGULAR:
file_str = StringIO()
while self.byte != None and CharacterClass(self.byte) == CHAR_REGULAR:
file_str.write(chr(self.byte))
self.byte = self.oPDF.byte()
if self.byte != None:
self.oPDF.unget(self.byte)
else:
self.oPDF = None
self.token = file_str.getvalue()
return (CHAR_REGULAR, self.token)
else:
if self.byte == 0x3C:
self.byte = self.oPDF.byte()
if self.byte == 0x3C:
return (CHAR_DELIMITER, '<<')
else:
self.oPDF.unget(self.byte)
return (CHAR_DELIMITER, '<')
elif self.byte == 0x3E:
self.byte = self.oPDF.byte()
if self.byte == 0x3E:
return (CHAR_DELIMITER, '>>')
else:
self.oPDF.unget(self.byte)
return (CHAR_DELIMITER, '>')
elif self.byte == 0x25:
file_str = StringIO()
while self.byte != None:
file_str.write(chr(self.byte))
if self.byte == 10 or self.byte == 13:
self.byte = self.oPDF.byte()
break
self.byte = self.oPDF.byte()
if self.byte != None:
if self.byte == 10:
file_str.write(chr(self.byte))
else:
self.oPDF.unget(self.byte)
else:
self.oPDF = None
self.token = file_str.getvalue()
return (CHAR_DELIMITER, self.token)
return (CHAR_DELIMITER, chr(self.byte))
def TokenIgnoreWhiteSpace(self):
token = self.Token()
while token != None and token[0] == CHAR_WHITESPACE:
token = self.Token()
return token
def Tokens(self):
tokens = []
token = self.Token()
while token != None:
tokens.append(token)
token = self.Token()
return tokens
def unget(self, byte):
self.ungetted.append(byte)
class cPDFParser:
def __init__(self, file, verbose=False, extract=None):
self.context = CONTEXT_NONE
self.content = []
try:
self.oPDFTokenizer = cPDFTokenizer(file)
except Exception as e:
raise Exception(e)
self.verbose = verbose
self.extract = extract
def GetObject(self):
while True:
if self.context == CONTEXT_OBJ:
self.token = self.oPDFTokenizer.Token()
else:
self.token = self.oPDFTokenizer.TokenIgnoreWhiteSpace()
if self.token:
if self.token[0] == CHAR_DELIMITER:
if self.token[1][0] == '%':
if self.context == CONTEXT_OBJ:
self.content.append(self.token)
else:
return cPDFElementComment(self.token[1])
elif self.token[1] == '/':
self.token2 = self.oPDFTokenizer.Token()
if self.token2[0] == CHAR_REGULAR:
if self.context != CONTEXT_NONE:
self.content.append((CHAR_DELIMITER, self.token[1] + self.token2[1]))
# elif self.verbose:
# print('todo 1: %s' % (self.token[1] + self.token2[1]))
else:
self.oPDFTokenizer.unget(self.token2)
if self.context != CONTEXT_NONE:
self.content.append(self.token)
# elif self.verbose:
# print('todo 2: %d %s' % (self.token[0], repr(self.token[1])))
elif self.context != CONTEXT_NONE:
self.content.append(self.token)
# elif self.verbose:
# print('todo 3: %d %s' % (self.token[0], repr(self.token[1])))
elif self.token[0] == CHAR_WHITESPACE:
if self.context != CONTEXT_NONE:
self.content.append(self.token)
# elif self.verbose:
# print('todo 4: %d %s' % (self.token[0], repr(self.token[1])))
else:
if self.context == CONTEXT_OBJ:
if self.token[1] == 'endobj':
self.oPDFElementIndirectObject = cPDFElementIndirectObject(
self.objectId, self.objectVersion, self.content)
self.context = CONTEXT_NONE
self.content = []
return self.oPDFElementIndirectObject
else:
self.content.append(self.token)
elif self.context == CONTEXT_TRAILER:
if self.token[1] == 'startxref' or self.token[1] == 'xref':
self.oPDFElementTrailer = cPDFElementTrailer(self.content)
self.oPDFTokenizer.unget(self.token)
self.context = CONTEXT_NONE
self.content = []
return self.oPDFElementTrailer
else:
self.content.append(self.token)
elif self.context == CONTEXT_XREF:
if self.token[1] == 'trailer' or self.token[1] == 'xref':
self.oPDFElementXref = cPDFElementXref(self.content)
self.oPDFTokenizer.unget(self.token)
self.context = CONTEXT_NONE
self.content = []
return self.oPDFElementXref
else:
self.content.append(self.token)
else:
if IsNumeric(self.token[1]):
self.token2 = self.oPDFTokenizer.TokenIgnoreWhiteSpace()
if IsNumeric(self.token2[1]):
self.token3 = self.oPDFTokenizer.TokenIgnoreWhiteSpace()
if self.token3[1] == 'obj':
self.objectId = eval(self.token[1])
self.objectVersion = eval(self.token2[1])
self.context = CONTEXT_OBJ
else:
self.oPDFTokenizer.unget(self.token3)
self.oPDFTokenizer.unget(self.token2)
# if self.verbose:
# print('todo 6: %d %s' % (self.token[0], repr(self.token[1])))
else:
self.oPDFTokenizer.unget(self.token2)
# if self.verbose:
# print('todo 7: %d %s' % (self.token[0], repr(self.token[1])))
elif self.token[1] == 'trailer':
self.context = CONTEXT_TRAILER
self.content = [self.token]
elif self.token[1] == 'xref':
self.context = CONTEXT_XREF
self.content = [self.token]
elif self.token[1] == 'startxref':
self.token2 = self.oPDFTokenizer.TokenIgnoreWhiteSpace()
if self.token2 and IsNumeric(self.token2[1]):
return cPDFElementStartxref(eval(self.token2[1]))
else:
self.oPDFTokenizer.unget(self.token2)
# if self.verbose:
# print('todo 9: %d %s' % (self.token[0], repr(self.token[1])))
elif self.extract:
self.bytes = ''
while self.token:
self.bytes += self.token[1]
self.token = self.oPDFTokenizer.Token()
return cPDFElementMalformed(self.bytes)
# elif self.verbose:
# print('todo 10: %d %s' % (self.token[0], repr(self.token[1])))
else:
break
class cPDFElementComment:
def __init__(self, comment):
self.type = PDF_ELEMENT_COMMENT
self.comment = comment
# if re.match('^%PDF-[0-9]\.[0-9]', self.token[1]):
# print(repr(self.token[1]))
# elif re.match('^%%EOF', self.token[1]):
# print(repr(self.token[1]))
class cPDFElementXref:
def __init__(self, content):
self.type = PDF_ELEMENT_XREF
self.content = content
class cPDFElementTrailer:
def __init__(self, content):
self.type = PDF_ELEMENT_TRAILER
self.content = content
def Contains(self, keyword):
data = ''
for i in range(0, len(self.content)):
if self.content[i][1] == 'stream':
break
else:
data += Canonicalize(self.content[i][1])
return data.upper().find(keyword.upper()) != -1
def IIf(expr, truepart, falsepart):
if expr:
return truepart
else:
return falsepart
class cPDFElementIndirectObject:
def __init__(self, id, version, content):
self.type = PDF_ELEMENT_INDIRECT_OBJECT
self.id = id
self.version = version
self.content = content
#fix stream for Ghostscript bug reported by Kurt
if self.ContainsStream():
position = len(self.content) - 1
if position < 0:
return
while self.content[position][0] == CHAR_WHITESPACE and position >= 0:
position -= 1
if position < 0:
return
if self.content[position][0] != CHAR_REGULAR:
return
if self.content[position][1] == 'endstream':
return
if not self.content[position][1].endswith('endstream'):
return
self.content = self.content[0:position] + [
(self.content[position][0], self.content[position][1][:-len('endstream')])
] + [(self.content[position][0], 'endstream')] + self.content[position + 1:]
def GetType(self):
content = CopyWithoutWhiteSpace(self.content)
dictionary = 0
for i in range(0, len(content)):
if content[i][0] == CHAR_DELIMITER and content[i][1] == '<<':
dictionary += 1
if content[i][0] == CHAR_DELIMITER and content[i][1] == '>>':
dictionary -= 1
if dictionary == 1 and content[i][0] == CHAR_DELIMITER and EqualCanonical(content[i][1],
'/Type') and i < len(content) - 1:
return content[i + 1][1]
return ''
def GetReferences(self):
content = CopyWithoutWhiteSpace(self.content)
references = []
for i in range(0, len(content)):
if i > 1 and content[i][0] == CHAR_REGULAR and content[i][1] == 'R' and content[i - 2][0] == CHAR_REGULAR and IsNumeric(
content[i - 2][1]) and content[i - 1][0] == CHAR_REGULAR and IsNumeric(content[i - 1][1]):
references.append((content[i - 2][1], content[i - 1][1], content[i][1]))
return references
def References(self, index):
for ref in self.GetReferences():
if ref[0] == index:
return True
return False
def ContainsStream(self):
for i in range(0, len(self.content)):
if self.content[i][0] == CHAR_REGULAR and self.content[i][1] == 'stream':
return self.content[0:i]
return False
def Contains(self, keyword):
data = ''
for i in range(0, len(self.content)):
if self.content[i][1] == 'stream':
break
else:
data += Canonicalize(self.content[i][1])
return data.upper().find(keyword.upper()) != -1
def StreamContains(self, keyword, filter, casesensitive, regex):
if not self.ContainsStream():
return False
streamData = self.Stream(filter)
if filter and streamData == 'No filters':
streamData = self.Stream(False)
if regex:
return re.search(keyword, streamData, IIf(casesensitive, 0, re.I))
elif casesensitive:
return keyword in streamData
else:
return keyword.lower() in streamData.lower()
def Stream(self, filter=True):
state = 'start'
countDirectories = 0
data = ''
filters = []
for i in range(0, len(self.content)):
if state == 'start':
if self.content[i][0] == CHAR_DELIMITER and self.content[i][1] == '<<':
countDirectories += 1
if self.content[i][0] == CHAR_DELIMITER and self.content[i][1] == '>>':
countDirectories -= 1
if countDirectories == 1 and self.content[i][0] == CHAR_DELIMITER and EqualCanonical(
self.content[i][1], '/Filter'):
state = 'filter'
elif countDirectories == 0 and self.content[i][0] == CHAR_REGULAR and self.content[i][1] == 'stream':
state = 'stream-whitespace'
elif state == 'filter':
if self.content[i][0] == CHAR_DELIMITER and self.content[i][1][0] == '/':
filters = [self.content[i][1]]
state = 'search-stream'
elif self.content[i][0] == CHAR_DELIMITER and self.content[i][1] == '[':
state = 'filter-list'
elif state == 'filter-list':
if self.content[i][0] == CHAR_DELIMITER and self.content[i][1][0] == '/':
filters.append(self.content[i][1])
elif self.content[i][0] == CHAR_DELIMITER and self.content[i][1] == ']':
state = 'search-stream'
elif state == 'search-stream':
if self.content[i][0] == CHAR_REGULAR and self.content[i][1] == 'stream':
state = 'stream-whitespace'
elif state == 'stream-whitespace':
if self.content[i][0] == CHAR_WHITESPACE:
whitespace = self.content[i][1]
if whitespace.startswith('\x0D\x0A') and len(whitespace) > 2:
data += whitespace[2:]
elif whitespace.startswith('\x0A') and len(whitespace) > 1:
data += whitespace[1:]
else:
data += self.content[i][1]
state = 'stream-concat'
elif state == 'stream-concat':
if self.content[i][0] == CHAR_REGULAR and self.content[i][1] == 'endstream':
if filter:
return self.Decompress(data, filters)
else:
return data
else:
data += self.content[i][1]
else:
return 'Unexpected filter state'
return filters
def Decompress(self, data, filters):
for filter in filters:
if EqualCanonical(filter, '/FlateDecode') or EqualCanonical(filter, '/Fl'):
try:
data = FlateDecode(data)
except zlib.error as e:
message = 'FlateDecode decompress failed'
if len(data) > 0 and ord(data[0]) & 0x0F != 8:
message += ', unexpected compression method: %02x' % ord(data[0])
return message + '. zlib.error %s' % e.message
elif EqualCanonical(filter, '/ASCIIHexDecode') or EqualCanonical(filter, '/AHx'):
try:
data = ASCIIHexDecode(data)
except:
return 'ASCIIHexDecode decompress failed'
elif EqualCanonical(filter, '/ASCII85Decode') or EqualCanonical(filter, '/A85'):
try:
data = ASCII85Decode(data.rstrip('>'))
except:
return 'ASCII85Decode decompress failed'
elif EqualCanonical(filter, '/LZWDecode') or EqualCanonical(filter, '/LZW'):
try:
data = LZWDecode(data)
except:
return 'LZWDecode decompress failed'
elif EqualCanonical(filter, '/RunLengthDecode') or EqualCanonical(filter, '/R'):
try:
data = RunLengthDecode(data)
except:
return 'RunLengthDecode decompress failed'
# elif i.startswith('/CC') # CCITTFaxDecode
# elif i.startswith('/DCT') # DCTDecode
else:
return 'Unsupported filter: %s' % repr(filters)
if len(filters) == 0:
return 'No filters'
else:
return data
def StreamYARAMatch(self, rules, decoders, decoderoptions, filter):
if not self.ContainsStream():
return None
streamData = self.Stream(filter)
if filter and streamData == 'No filters':
streamData = self.Stream(False)
oDecoders = [cIdentity(streamData, None)]
for cDecoder in decoders:
try:
oDecoder = cDecoder(streamData, decoderoptions)
oDecoders.append(oDecoder)
except Exception as e:
print('Error instantiating decoder: %s' % cDecoder.name)
raise e
results = []
for oDecoder in oDecoders:
while oDecoder.Available():
yaraResults = rules.match(data=oDecoder.Decode())
if yaraResults != []:
results.append([oDecoder.Name(), yaraResults])
return results
class cPDFElementStartxref:
def __init__(self, index):
self.type = PDF_ELEMENT_STARTXREF
self.index = index
class cPDFElementMalformed:
def __init__(self, content):
self.type = PDF_ELEMENT_MALFORMED
self.content = content
def TrimLWhiteSpace(data):
while data != [] and data[0][0] == CHAR_WHITESPACE:
data = data[1:]
return data
def TrimRWhiteSpace(data):
while data != [] and data[-1][0] == CHAR_WHITESPACE:
data = data[:-1]
return data
class cPDFParseDictionary:
def __init__(self, content, nocanonicalizedoutput):
self.content = content
self.nocanonicalizedoutput = nocanonicalizedoutput
dataTrimmed = TrimLWhiteSpace(TrimRWhiteSpace(self.content))
if dataTrimmed == []:
self.parsed = None
elif self.isOpenDictionary(dataTrimmed[0]) and (self.isCloseDictionary(dataTrimmed[-1]) or
self.couldBeCloseDictionary(dataTrimmed[-1])):
self.parsed = self.ParseDictionary(dataTrimmed)[0]
else:
self.parsed = None
def isOpenDictionary(self, token):
return token[0] == CHAR_DELIMITER and token[1] == '<<'
def isCloseDictionary(self, token):
return token[0] == CHAR_DELIMITER and token[1] == '>>'
def couldBeCloseDictionary(self, token):
return token[0] == CHAR_DELIMITER and token[1].rstrip().endswith('>>')
def ParseDictionary(self, tokens):
state = 0 # start
dictionary = []
while tokens != []:
if state == 0:
if self.isOpenDictionary(tokens[0]):
state = 1
else:
return None, tokens
elif state == 1:
if self.isOpenDictionary(tokens[0]):
pass
elif self.isCloseDictionary(tokens[0]):
return dictionary, tokens
elif tokens[0][0] != CHAR_WHITESPACE:
key = ConditionalCanonicalize(tokens[0][1], self.nocanonicalizedoutput)
value = []
state = 2
elif state == 2:
if self.isOpenDictionary(tokens[0]):
value, tokens = self.ParseDictionary(tokens)
dictionary.append((key, value))
state = 1
elif self.isCloseDictionary(tokens[0]):
dictionary.append((key, value))
return dictionary, tokens
elif value == [] and tokens[0][0] == CHAR_WHITESPACE:
pass
elif value == [] and tokens[0][1] == '[':
value.append(tokens[0][1])
elif value != [] and value[0] == '[' and tokens[0][1] != ']':
value.append(tokens[0][1])
elif value != [] and value[0] == '[' and tokens[0][1] == ']':
value.append(tokens[0][1])
dictionary.append((key, value))
value = []
state = 1
elif value == [] and tokens[0][1] == '(':
value.append(tokens[0][1])
elif value != [] and value[0] == '(' and tokens[0][1] != ')':
if tokens[0][1][0] == '%':
tokens = [tokens[0]] + cPDFTokenizer(StringIO(tokens[0][1][1:])).Tokens() + tokens[1:]
value.append('%')
else:
value.append(tokens[0][1])
elif value != [] and value[0] == '(' and tokens[0][1] == ')':
value.append(tokens[0][1])
dictionary.append((key, value))
value = []
state = 1
elif value != [] and tokens[0][1][0] == '/':
dictionary.append((key, value))
key = ConditionalCanonicalize(tokens[0][1], self.nocanonicalizedoutput)
value = []
state = 2
else:
value.append(ConditionalCanonicalize(tokens[0][1], self.nocanonicalizedoutput))
tokens = tokens[1:]
def Retrieve(self):
return self.parsed
def PrettyPrintSubElement(self, prefix, e):
res = ""
if e[1] == []:
res += '%s %s' % (prefix, e[0])
elif type(e[1][0]) == type(''):
if len(e[1]) == 3 and IsNumeric(e[1][0]) and e[1][1] == '0' and e[1][2] == 'R':
joiner = ' '
else:
joiner = ''
value = joiner.join(e[1]).strip()
reprValue = repr(value)
if "'" + value + "'" != reprValue:
value = reprValue
res += '%s %s %s' % (prefix, e[0], value)
else:
res += '%s %s' % (prefix, e[0])
sres = self.PrettyPrintSub(prefix + ' ', e[1])
res += sres
return res
def PrettyPrintSub(self, prefix, dictionary):
res = ""
if dictionary != None:
res = '<<++<<'
for e in dictionary:
sres = self.PrettyPrintSubElement(prefix, e)
res += sres
res += '>>++>>'
return res
def PrettyPrint(self, prefix):
res = self.PrettyPrintSub(prefix, self.parsed)
return res
def Get(self, select):
for key, value in self.parsed:
if key == select:
return value
return None
def GetNestedSub(self, dictionary, select):
for key, value in dictionary:
if key == select:
return self.PrettyPrintSubElement('', [select, value])
if type(value) == type([]) and len(value) > 0 and type(value[0]) == type((None,)):
result = self.GetNestedSub(value, select)
if result != None:
return self.PrettyPrintSubElement('', [select, result])
return None
def GetNested(self, select):
return self.GetNestedSub(self.parsed, select)
def FormatOutput(data, raw):
if raw:
if type(data) == type([]):
return ''.join(map(lambda x: x[1], data))
else:
return data
else:
return repr(data)
#Fix for http://bugs.python.org/issue11395
def StdoutWriteChunked(data):
if sys.version_info[0] > 2:
sys.stdout.buffer.write(data)
else:
while data != '':
sys.stdout.write(data[0:10000])
try:
sys.stdout.flush()
except IOError:
return
data = data[10000:]
def IfWIN32SetBinary(io):
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(io.fileno(), os.O_BINARY)
def PrintOutputObject(object, filt, nocanonicalizedoutput, dump, show_stream=False, hsh=False, raw=False):
errors = set()
res = ""
res += 'obj %d %d\n' % (object.id, object.version)
res += 'Type: %s\n' % ConditionalCanonicalize(object.GetType(), nocanonicalizedoutput)
res += 'Referencing: %s\n' % ', '.join(map(lambda x: '%s %s %s' % x, object.GetReferences()))
dataPrecedingStream = object.ContainsStream()
oPDFParseDictionary = None
if dataPrecedingStream:
res += 'Contains stream\n'
oPDFParseDictionary = cPDFParseDictionary(dataPrecedingStream, nocanonicalizedoutput)
if hsh:
streamContent = object.Stream(False)
res += 'unfiltered\n'
res += 'len: %6d md5: %s\n' % (len(streamContent), hashlib.md5(streamContent).hexdigest())
res += '%s\n' % HexAsciiDumpLine(streamContent)
streamContent = object.Stream(True)
res += 'filtered\n'
res += 'len: %6d md5: %s\n' % (len(streamContent), hashlib.md5(streamContent).hexdigest())
res += '%s\n' % HexAsciiDumpLine(streamContent)
streamContent = None
else:
if raw:
res += '%s\n' % FormatOutput(object.content, raw)
oPDFParseDictionary = cPDFParseDictionary(object.content, nocanonicalizedoutput)
if show_stream:
res += oPDFParseDictionary.PrettyPrint(' ')
if filt:
filtered = object.Stream()
if filtered == []:
res += ('%s\n' % FormatOutput(object.content, raw))
else:
res += ('%s\n' % FormatOutput(filtered, raw))
if dump:
filtered = object.Stream(filt == True)
if filtered == []:
filtered = ''
fdata = C2BIP3(filtered)
if fdata.startswith('Unsupported filter: '):
errors.add(fdata)
elif len(fdata) > 10:
try:
with open(dump, 'wb') as f:
f.write(fdata)
res += "Object extracted. See extracted files."
except:
errors.add('Error writing file %s' % dump)
return res, errors
def Canonicalize(sIn):
if sIn == '':
return sIn
elif sIn[0] != '/':
return sIn
elif sIn.find('#') == -1:
return sIn
else:
i = 0
iLen = len(sIn)
sCanonical = ''
while i < iLen:
if sIn[i] == '#' and i < iLen - 2:
try:
sCanonical += chr(int(sIn[i + 1:i + 3], 16))
i += 2
except:
sCanonical += sIn[i]
else:
sCanonical += sIn[i]
i += 1
return sCanonical
def EqualCanonical(s1, s2):
return Canonicalize(s1) == s2
def ConditionalCanonicalize(sIn, nocanonicalizedoutput):
if nocanonicalizedoutput:
return sIn
else:
return Canonicalize(sIn)
# http://code.google.com/p/pdfminerr/source/browse/trunk/pdfminer/pdfminer/ascii85.py
def ASCII85Decode(data):
import struct
n = b = 0
out = ''
for c in data:
if '!' <= c and c <= 'u':
n += 1
b = b * 85 + (ord(c) - 33)
if n == 5:
out += struct.pack('>L', b)
n = b = 0
elif c == 'z':
assert n == 0
out += '\0\0\0\0'
elif c == '~':
if n:
for _ in range(5 - n):
b = b * 85 + 84
out += struct.pack('>L', b)[:n - 1]
break
return out
def ASCIIHexDecode(data):
return binascii.unhexlify(''.join([c for c in data if c not in ' \t\n\r']).rstrip('>'))
# if inflating fails, we try to inflate byte per byte (sample 4da299d6e52bbb79c0ac00bad6a1d51d4d5fe42965a8d94e88a359e5277117e2)
def FlateDecode(data):
try:
return zlib.decompress(C2BIP3(data))
except:
if len(data) <= 10:
raise
oDecompress = zlib.decompressobj()
oStringIO = StringIO()
count = 0
for byte in C2BIP3(data):
try:
oStringIO.write(oDecompress.decompress(byte))
count += 1
except:
break
if len(data) - count <= 2:
return oStringIO.getvalue()
else:
raise
def RunLengthDecode(data):
f = StringIO(data)
decompressed = ''
runLength = ord(f.read(1))
while runLength:
if runLength < 128:
decompressed += f.read(runLength + 1)
if runLength > 128:
decompressed += f.read(1) * (257 - runLength)
if runLength == 128:
break
runLength = ord(f.read(1))
# return sub(r'(\d+)(\D)', lambda m: m.group(2) * int(m.group(1)), data)
return decompressed
#### LZW code sourced from pdfminer
# Copyright (c) 2004-2009 Yusuke Shinyama <yusuke at cs dot nyu dot edu>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
class LZWDecoder(object):
def __init__(self, fp):
self.fp = fp
self.buff = 0
self.bpos = 8
self.nbits = 9
self.table = None
self.prevbuf = None
return
def readbits(self, bits):
v = 0
while 1:
# the number of remaining bits we can get from the current buffer.
r = 8 - self.bpos
if bits <= r:
# |-----8-bits-----|
# |-bpos-|-bits-| |
# | |----r----|
v = (v << bits) | ((self.buff >> (r - bits)) & ((1 << bits) - 1))
self.bpos += bits
break
else:
# |-----8-bits-----|
# |-bpos-|---bits----...
# | |----r----|
v = (v << r) | (self.buff & ((1 << r) - 1))
bits -= r
x = self.fp.read(1)
if not x: raise EOFError
self.buff = ord(x)
self.bpos = 0
return v
def feed(self, code):
x = ''
if code == 256:
self.table = [chr(c) for c in range(256)] # 0-255
self.table.append(None) # 256
self.table.append(None) # 257
self.prevbuf = ''
self.nbits = 9
elif code == 257:
pass
elif not self.prevbuf:
x = self.prevbuf = self.table[code]
else:
if code < len(self.table):
x = self.table[code]
self.table.append(self.prevbuf + x[0])
else:
self.table.append(self.prevbuf + self.prevbuf[0])
x = self.table[code]
l = len(self.table)
if l == 511:
self.nbits = 10
elif l == 1023:
self.nbits = 11
elif l == 2047:
self.nbits = 12
self.prevbuf = x
return x
def run(self):
while 1:
try:
code = self.readbits(self.nbits)
except EOFError:
break
x = self.feed(code)
yield x
return
####
def LZWDecode(data):
return ''.join(LZWDecoder(StringIO(data)).run())
def PrintGenerateObject(object, options, newId=None):
if newId == None:
objectId = object.id
else:
objectId = newId
dataPrecedingStream = object.ContainsStream()
if dataPrecedingStream:
if options.filter:
decompressed = object.Stream(True)
if decompressed == 'No filters' or decompressed.startswith('Unsupported filter: '):
print(' oPDF.stream(%d, %d, %s, %s)' %
(objectId, object.version, repr(object.Stream(False).rstrip()),
repr(re.sub('/Length\s+\d+', '/Length %d', FormatOutput(dataPrecedingStream, True)).strip())))
else:
dictionary = FormatOutput(dataPrecedingStream, True)
dictionary = re.sub(r'/Length\s+\d+', '', dictionary)
dictionary = re.sub(r'/Filter\s*/[a-zA-Z0-9]+', '', dictionary)
dictionary = re.sub(r'/Filter\s*\[.+\]', '', dictionary)
dictionary = re.sub(r'^\s*<<', '', dictionary)
dictionary = re.sub(r'>>\s*$', '', dictionary)
dictionary = dictionary.strip()
print(" oPDF.stream2(%d, %d, %s, %s, 'f')" % (objectId, object.version, repr(decompressed.rstrip()),
repr(dictionary)))
else:
print(' oPDF.stream(%d, %d, %s, %s)' %
(objectId, object.version, repr(object.Stream(False).rstrip()),
repr(re.sub('/Length\s+\d+', '/Length %d', FormatOutput(dataPrecedingStream, True)).strip())))
else:
print(' oPDF.indirectobject(%d, %d, %s)' % (objectId, object.version,
repr(FormatOutput(object.content, True).strip())))
def File2Strings(filename):
try:
f = open(filename, 'r')
except:
return None
try:
return map(lambda line: line.rstrip('\n'), f.readlines())
except:
return None
finally:
f.close()
def ProcessAt(argument):
if argument.startswith('@'):
strings = File2Strings(argument[1:])
if strings == None:
raise Exception('Error reading %s' % argument)
else:
return strings
else:
return [argument]
def YARACompile(ruledata):
if ruledata.startswith('#'):
if ruledata.startswith('#h#'):
rule = binascii.a2b_hex(ruledata[3:])
elif ruledata.startswith('#b#'):
rule = binascii.a2b_base64(ruledata[3:])
elif ruledata.startswith('#s#'):
rule = 'rule string {strings: $a = "%s" ascii wide nocase condition: $a}' % ruledata[3:]
elif ruledata.startswith('#q#'):
rule = ruledata[3:].replace("'", '"')
else:
rule = ruledata[1:]
return yara.compile(source=rule)
else:
dFilepaths = {}
if os.path.isdir(ruledata):
for root, dirs, files in os.walk(ruledata):
for file in files:
filename = os.path.join(root, file)
dFilepaths[filename] = filename
else:
for filename in ProcessAt(ruledata):
dFilepaths[filename] = filename
return yara.compile(filepaths=dFilepaths)
def AddDecoder(cClass):
global decoders
decoders.append(cClass)
class cDecoderParent():
pass
def LoadDecoders(decoders, verbose):
if decoders == '':
return
scriptPath = os.path.dirname(sys.argv[0])
for decoder in sum(map(ProcessAt, decoders.split(',')), []):
try:
if not decoder.lower().endswith('.py'):
decoder += '.py'
if os.path.dirname(decoder) == '':
if not os.path.exists(decoder):
scriptDecoder = os.path.join(scriptPath, decoder)
if os.path.exists(scriptDecoder):
decoder = scriptDecoder
exec(open(decoder, 'r').read(), globals(), globals())
except Exception as e:
print('Error loading decoder: %s' % decoder)
if verbose:
raise e
class cIdentity(cDecoderParent):
name = 'Identity function decoder'
def __init__(self, stream, options):
self.stream = stream
self.options = options
self.available = True
def Available(self):
return self.available
def Decode(self):
self.available = False
return self.stream
def Name(self):
return ''
def DecodeFunction(decoders, options, stream):
if decoders == []:
return stream
return decoders[0](stream, options.decoderoptions).Decode()
class cDumpStream():
def __init__(self):
self.text = ''
def Addline(self, line):
if line != '':
self.text += line + '\n'
def Content(self):
return self.text
def HexDump(data):
oDumpStream = cDumpStream()
hexDump = ''
for i, b in enumerate(data):
if i % dumplinelength == 0 and hexDump != '':
oDumpStream.Addline(hexDump)
hexDump = ''
hexDump += IFF(hexDump == '', '', ' ') + '%02X' % ord(b)
oDumpStream.Addline(hexDump)
return oDumpStream.Content()
def CombineHexAscii(hexDump, asciiDump):
if hexDump == '':
return ''
return hexDump + ' ' + (' ' * (3 * (dumplinelength - len(asciiDump)))) + asciiDump
def HexAsciiDump(data):
oDumpStream = cDumpStream()
hexDump = ''
asciiDump = ''
for i, b in enumerate(data):
if i % dumplinelength == 0:
if hexDump != '':
oDumpStream.Addline(CombineHexAscii(hexDump, asciiDump))
hexDump = '%08X:' % i
asciiDump = ''
hexDump += ' %02X' % ord(b)
asciiDump += IFF(ord(b) >= 32, b, '.')
oDumpStream.Addline(CombineHexAscii(hexDump, asciiDump))
return oDumpStream.Content()
def HexAsciiDumpLine(data):
return HexAsciiDump(data[0:16])[10:-1]
def PDFParserMain(filename, outdirectory, **kwargs):
"""
Modified by CSE to fit ASSEMBLYLINE Service
"""
"""
pdf-parser, use it to parse a PDF document
"""
# Options
verbose = kwargs.get("verbose", False)
filt = kwargs.get("filter", False)
search = kwargs.get("search", None)
obj = kwargs.get("object", None)
typ = kwargs.get("type", None)
reference = kwargs.get("reference", None)
searchstream = kwargs.get("searchstream", None)
stats = kwargs.get("stats", False)
key = kwargs.get("key", None)
raw = kwargs.get("raw", False)
hsh = kwargs.get("hash", False)
dump = kwargs.get("dump", None)
get_object_detail = kwargs.get("get_object_detail", False)
get_malform = kwargs.get("get_malform", True)
max_objstm = kwargs.get("max_objstm", 100)
if dump:
dump = os.path.join(outdirectory, dump)
elements = kwargs.get("elements", None)
nocanonicalizedoutput = kwargs.get("nocanonicalizedoutput", False)
malform_content = os.path.join(outdirectory, "malformed_content")
max_search_hits = 50
search_hits = 0
try:
oPDFParser = cPDFParser(filename, verbose=verbose, extract=malform_content)
except Exception as e:
raise Exception(e)
cntComment = 0
cntXref = 0
cntTrailer = 0
cntStartXref = 0
cntIndirectObject = 0
dicObjectTypes = {}
selectComment = False
selectXref = False
selectTrailer = False
selectStartXref = False
selectIndirectObject = False
show_stream = False
if elements:
for c in elements:
if c == 'c':
selectComment = True
elif c == 'x':
selectXref = True
elif c == 't':
selectTrailer = True
elif c == 's':
selectStartXref = True
elif c == 'i':
selectIndirectObject = True
else:
print('Error: unknown --elements value %s' % c)
return
else:
selectIndirectObject = True
if not search and not obj and not reference and not typ and not searchstream and not key:
selectComment = True
selectXref = True
selectTrailer = True
selectStartXref = True
if search or key:
selectTrailer = True
show_stream = True
optionsType = ''
if typ:
optionsType = typ
results = {
'version': __version__,
'parts': [],
'stats': [],
'files': {
'embedded': [],
'malformed': [],
'triage_kw': []
},
'obj_details': ""
}
errors = set()
while True:
try:
object = oPDFParser.GetObject()
except Exception:
continue
if object != None:
if stats:
if object.type == PDF_ELEMENT_COMMENT:
cntComment += 1
elif object.type == PDF_ELEMENT_XREF:
cntXref += 1
elif object.type == PDF_ELEMENT_TRAILER:
cntTrailer += 1
elif object.type == PDF_ELEMENT_STARTXREF:
cntStartXref += 1
elif object.type == PDF_ELEMENT_INDIRECT_OBJECT:
cntIndirectObject += 1
type1 = object.GetType()
if not type1 in dicObjectTypes:
dicObjectTypes[type1] = [object.id]
else:
dicObjectTypes[type1].append(object.id)
else:
if object.type == PDF_ELEMENT_COMMENT and selectComment:
if not search and not key or search and object.Contains(search):
results['parts'].append('PDF Comment %s' % FormatOutput(object.comment, raw))
elif object.type == PDF_ELEMENT_XREF and selectXref:
results['parts'].append('xref %s' % FormatOutput(object.content, raw))
elif object.type == PDF_ELEMENT_TRAILER and selectTrailer:
oPDFParseDictionary = cPDFParseDictionary(object.content[1:], nocanonicalizedoutput)
if not search and not key or search and object.Contains(search):
if oPDFParseDictionary == None:
results['parts'].append('trailer: %s' % FormatOutput(object.content, raw))
else:
trailer = 'trailer:\n'
trailer += oPDFParseDictionary.PrettyPrint(' ')
results['parts'].append(trailer)
elif key:
if oPDFParseDictionary.parsed != None:
result = oPDFParseDictionary.GetNested(key)
if result != None:
results['parts'].append(result)
elif object.type == PDF_ELEMENT_STARTXREF and selectStartXref:
if not search:
results['parts'].append('startxref %d' % object.index)
elif object.type == PDF_ELEMENT_INDIRECT_OBJECT and selectIndirectObject:
if search:
if search_hits <= max_search_hits:
if object.Contains(search):
res, err = PrintOutputObject(
object,
filt,
nocanonicalizedoutput,
dump,
raw=raw,
hsh=hsh,
show_stream=show_stream)
if search in res:
results['parts'].append(res)
search_hits += 1
else:
# Try again, this time getting the raw output
res, err = PrintOutputObject(object, filt, nocanonicalizedoutput, dump, raw=True)
if search in res:
results['parts'].append(res)
search_hits += 1
else:
break
elif key:
oPDFParseDictionary = cPDFParseDictionary(object.content[1:], nocanonicalizedoutput)
if oPDFParseDictionary.parsed != None:
result = oPDFParseDictionary.GetNested(key)
if result != None:
results['parts'].append(result)
elif obj:
if object.id == eval(obj):
res, err = PrintOutputObject(
object, filt, nocanonicalizedoutput, dump, raw=raw, hsh=hsh, show_stream=show_stream)
results['parts'].append(res)
if get_object_detail:
obj_det = re.match(r'[\r]?\n<<.+>>[\r]?\n', FormatOutput(object.content, raw=True),
re.DOTALL)
if obj_det:
results['obj_details'] = obj_det.group(0)
if dump and "Object extracted." in res:
results['files']['embedded'].append(dump)
if len(err) > 0:
for e in err:
errors.add("Object extraction error: {}".format(e))
break
elif reference:
if object.References(reference):
res, err = PrintOutputObject(
object, filt, nocanonicalizedoutput, dump, raw=raw, hsh=hsh, show_stream=show_stream)
results['parts'].append(res)
elif typ:
if EqualCanonical(object.GetType(), optionsType):
if search_hits <= max_objstm:
res, err = PrintOutputObject(
object,
filt,
nocanonicalizedoutput,
dump,
raw=raw,
hsh=hsh,
show_stream=show_stream)
results['parts'].append(res)
search_hits += 1
else:
break
elif hsh:
results['parts'].append('obj %d %d' % (object.id, object.version))
rawContent = FormatOutput(object.content, True)
results['parts'].append(
' len: %d md5: %s' % (len(rawContent), hashlib.md5(rawContent).hexdigest()))
else:
res, err = PrintOutputObject(
object, filt, nocanonicalizedoutput, dump, raw=raw, hsh=hsh, show_stream=show_stream)
results['parts'].append(res)
elif object.type == PDF_ELEMENT_MALFORMED and get_malform:
if len(object.content) > 50:
try:
with open(malform_content, 'wb') as fExtract:
fExtract.write(C2BIP3(object.content))
results['files']['malformed'].append(malform_content)
except:
errors.add('Error writing file %s' % malform_content)
else:
break
if stats:
results['stats'].append('Comment: %s' % cntComment)
results['stats'].append('XREF: %s' % cntXref)
results['stats'].append('Trailer: %s' % cntTrailer)
results['stats'].append('StartXref: %s' % cntStartXref)
results['stats'].append('Indirect object: %s' % cntIndirectObject)
names = dicObjectTypes.keys()
names.sort()
for key in names:
results['stats'].append(
'%s %d: %s' % (key, len(dicObjectTypes[key]), ', '.join(map(lambda x: '%d' % x, dicObjectTypes[key]))))
return results, errors
def TestPythonVersion(enforceMaximumVersion=False, enforceMinimumVersion=False):
if sys.version_info[0:3] > __maximum_python_version__:
if enforceMaximumVersion:
print('This program does not work with this version of Python (%d.%d.%d)' % sys.version_info[0:3])
print('Please use Python version %d.%d.%d' % __maximum_python_version__)
sys.exit()
else:
print('This program has not been tested with this version of Python (%d.%d.%d)' % sys.version_info[0:3])
print('Should you encounter problems, please use Python version %d.%d.%d' % __maximum_python_version__)
if sys.version_info[0:3] < __minimum_python_version__:
if enforceMinimumVersion:
print('This program does not work with this version of Python (%d.%d.%d)' % sys.version_info[0:3])
print('Please use Python version %d.%d.%d' % __maximum_python_version__)
sys.exit()
else:
print('This program has not been tested with this version of Python (%d.%d.%d)' % sys.version_info[0:3])
print('Should you encounter problems, please use Python version %d.%d.%d' % __maximum_python_version__)
| mit | 7,926,709,286,726,347,000 | 36.976804 | 164 | 0.507397 | false |
EmanueleCannizzaro/scons | test/MSVS/vs-9.0Exp-exec.py | 1 | 3292 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/MSVS/vs-9.0Exp-exec.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test that we can actually build a simple program using our generated
Visual Studio 9.0 project (.vcproj) and solution (.sln) files
using Visual C++ 9.0 Express edition.
"""
import os
import sys
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
if sys.platform != 'win32':
msg = "Skipping Visual Studio test on non-Windows platform '%s'\n" % sys.platform
test.skip_test(msg)
msvs_version = '9.0Exp'
if not msvs_version in test.msvs_versions():
msg = "Visual Studio %s not installed; skipping test.\n" % msvs_version
test.skip_test(msg)
# Let SCons figure out the Visual Studio environment variables for us and
# print out a statement that we can exec to suck them into our external
# environment so we can execute devenv and really try to build something.
test.run(arguments = '-n -q -Q -f -', stdin = """\
env = Environment(tools = ['msvc'], MSVS_VERSION='%(msvs_version)s')
print "os.environ.update(%%s)" %% repr(env['ENV'])
""" % locals())
exec(test.stdout())
test.subdir('sub dir')
test.write(['sub dir', 'SConstruct'], """\
env=Environment(MSVS_VERSION = '%(msvs_version)s')
env.MSVSProject(target = 'foo.vcproj',
srcs = ['foo.c'],
buildtarget = 'foo.exe',
variant = 'Release')
env.Program('foo.c')
""" % locals())
test.write(['sub dir', 'foo.c'], r"""
int
main(int argc, char *argv)
{
printf("foo.c\n");
exit (0);
}
""")
test.run(chdir='sub dir', arguments='.')
test.vcproj_sys_path(test.workpath('sub dir', 'foo.vcproj'))
import SCons.Platform.win32
system_dll_path = os.path.join( SCons.Platform.win32.get_system_root(), 'System32' )
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + system_dll_path
test.run(chdir='sub dir',
program=[test.get_msvs_executable(msvs_version)],
arguments=['foo.sln', '/build', 'Release'])
test.run(program=test.workpath('sub dir', 'foo'), stdout="foo.c\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 5,027,620,518,771,086,000 | 29.201835 | 101 | 0.699271 | false |
gstarnberger/paasta | paasta_tools/contrib/delete_old_marathon_deployments.py | 1 | 3106 | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import logging
import dateutil.parser
from dateutil import tz
from pytimeparse import timeparse
from paasta_tools import marathon_tools
log = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--age', dest='age', type=timedelta_type, default='1h',
help="Max age of a Marathon deployment before it is stopped."
"Any pytimeparse unit is supported")
parser.add_argument('-n', '--dry-run', action="store_true",
help="Don't actually stop any Marathon deployments")
parser.add_argument('-v', '--verbose', action='store_true')
options = parser.parse_args()
return options
def timedelta_type(value):
"""Return the :class:`datetime.datetime.DateTime` for a time in the past.
:param value: a string containing a time format supported by :mod:`pytimeparse`
"""
if value is None:
return None
return datetime_seconds_ago(timeparse.timeparse(value))
def datetime_seconds_ago(seconds):
return now() - datetime.timedelta(seconds=seconds)
def now():
return datetime.datetime.now(tz.tzutc())
def delete_deployment_if_too_old(client, deployment, max_date, dry_run):
started_at = dateutil.parser.parse(deployment.version)
age = now() - started_at
if started_at < max_date:
if dry_run is True:
log.warning("Would delete %s for %s as it is %s old" % (deployment.id, deployment.affected_apps[0], age))
else:
log.warning("Deleting %s for %s as it is %s old" % (deployment.id, deployment.affected_apps[0], age))
client.delete_deployment(deployment_id=deployment.id, force=True)
else:
if dry_run is True:
log.warning("NOT deleting %s for %s as it is %s old" % (deployment.id, deployment.affected_apps[0], age))
def main():
args = parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
config = marathon_tools.load_marathon_config()
client = marathon_tools.get_marathon_client(config.get_url(), config.get_username(), config.get_password())
for deployment in client.list_deployments():
delete_deployment_if_too_old(
client=client,
deployment=deployment,
max_date=args.age,
dry_run=args.dry_run,
)
if __name__ == "__main__":
main()
| apache-2.0 | 3,240,582,496,428,836,000 | 32.76087 | 117 | 0.667418 | false |
dorneanu/appvulnms | src/core/parser/AppVulnXMLParser.py | 1 | 14417 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: victor
# @Date: 2014-02-09
# @Last Modified by: victor
# @Last Modified time: 2014-06-06
# @Copyright:
#
# This file is part of the AppVulnMS project.
#
#
# Copyright (c) 2014 Victor Dorneanu <info AAET dornea DOT nu>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# The MIT License (MIT)
import base64
from lxml import etree
from core.parser.HTTPParser import HTTPParser
from core.parser.HTTPParser import HTTPRequestParser
from core.parser.HTTPParser import HTTPResponseParser
class AppVulnXMLParser():
"""AppVulnXML parser. Edits XML data"""
def __init__(self, xml_data):
# Create parser to parse the XML tree and insert new data into it
self.parser = etree.XMLParser(remove_blank_text=True, strip_cdata=False,
ns_clean=True, recover=True, encoding='utf-8')
self.xml_tree = etree.XML(str(xml_data), self.parser)
self.issues = self.xml_tree.xpath("/XmlReport/Results/Vulnerabilities/*")
self.issue_index = 0
def __len__(self):
"""Returns number of available issues
:returns: Number of available issues
"""
return len(self.issues)
def __iter__(self):
"""Iterator to walk through issues
:returns: Iterator to iterate through issues
"""
return self
def __next__(self):
"""Walk through issues"""
issue = self.issues[self.issue_index]
if (self.issue_index + 1) < len(self.issues):
self.issue_index += 1
else:
raise StopIteration
return issue
def get_root(self):
"""Get root of XML document
:returns: Root XML Element
"""
return self.xml_tree
def get_xml(self):
"""Returns XML tree as string
:returns: XML tree as string
"""
return etree.tostring(self.xml_tree, pretty_print=True, encoding="utf-8").decode("utf-8")
def get_scanner(self):
"""Returns /XmlReport/Scanner
:returns: /XmlReport/Scanner as XML document
"""
return self.xml_tree.xpath("/XmlReport/Scanner")
def get_summary(self):
"""Returns /XmlReport/Summary
:returns: /XmlReport/Summary as XML document
"""
return self.xml_tree.xpath("/XmlReport/Summary")
def get_vulnerabilities(self):
"""Return /XmlReport/Results/Vulnerabilities
:returns: /XmlReport/Results/Vulnerabilities as XML document
"""
return self.xml_tree.xpath("/XmlReport/Results/Vulnerabilities/*")
def add_request_data(self, issue, request_data):
"""Add parsed request data to the node
:param issue: Issue as XML document
:param request_data: HTTP request data
"""
request = HTTPRequestParser(request_data)
request.parse_data()
request.set_http_headers()
headers = request.get_headers()
# Add request attributes method like method
try:
xml_request_node = issue.xpath("TestProbe/HTTP/Request")[0]
xml_request_node.attrib['method'] = request.get_method()
xml_request_node.attrib['version'] = request.get_request_version()
except IndexError:
log.error("Index error")
# Add parsed data
try:
xml_parsed_traffic = issue.xpath("TestProbe/HTTP/Request/Parsed")[0]
except IndexError:
Log.error("Index error")
# Iterate through headers and create new XML nodes
for h in headers.keys():
for v in headers[h]:
# Create new sub-element
header_node = etree.Element('Header', name=h, value=v)
xml_parsed_traffic.append(header_node)
# Add request data node
request_data_node = etree.Element('Data')
request_data_node.text = etree.CDATA(request.get_request_data())
xml_parsed_traffic.append(request_data_node)
def add_response_data(self, issue, response_data, binary_data=False):
"""Add parsed response data to the node
:param issue: Issue as XML document
:param response_data: HTTP response data
:param binary_data: Flag indicating whether responde_data is binary
"""
response = HTTPResponseParser(response_data, binary_data)
response.parse_data()
response.set_http_headers()
headers = response.get_headers()
# Add response metadata
try:
xml_response_node = issue.xpath("TestProbe/HTTP/Response")[0]
xml_response_node.attrib['version'] = response.get_response_version()
xml_response_node.attrib['status'] = response.get_status()
xml_response_node.attrib['reason'] = response.get_reason()
except IndexError:
log.error("Index error")
# Add response data
try:
xml_parsed_traffic = issue.xpath("TestProbe/HTTP/Response/Parsed")[0]
except IndexError:
Log.error("Index error")
# Iterate through headers and create new XML nodes
for h in headers.keys():
for v in headers[h]:
# Create new sub-element
header_node = etree.Element('Header', name=h, value=v)
xml_parsed_traffic.append(header_node)
# Add request data node
request_data_node = etree.Element('Data')
request_data_node.text = etree.CDATA(response.get_response_data())
request_data_node.attrib['base64'] = str(binary_data)
xml_parsed_traffic.append(request_data_node)
def extract_traffic(self, issue, binary_data=False):
"""Extract HTTP traffic from RawTraffic/MergedTraffic and adjust XML in single issue
:param issue: Issue as XML document
:param binary_data: Flag indicating whether traffic is binary
"""
raw_traffic = issue.xpath("RawTraffic")[0]
raw_request_traffic = issue.xpath("RawTraffic/RequestTraffic")
raw_response_traffic = issue.xpath("RawTraffic/ResponseTraffic")
raw_merged_traffic = issue.xpath("RawTraffic/MergedTraffic")
# New nodes
request_node = etree.Element("RequestTraffic")
response_node = etree.Element("ResponseTraffic")
request_node.text = ''
response_node.text = ''
# Add base64 flag to traffic
request_node.attrib['base64'] = 'false'
response_node.attrib['base64'] = 'false'
# Check if merged traffic is provided
if len(raw_merged_traffic) > 0:
# Split traffic
http_data = HTTPParser.split_http_traffic(raw_merged_traffic[0].text)
# Adjust XML data
if http_data:
request_node.text = etree.CDATA(http_data['request'])
raw_traffic.append(request_node)
response_node.text = etree.CDATA(http_data['response'])
raw_traffic.append(response_node)
# Remove MergedTraffic node
raw_merged_traffic[0].getparent().remove(raw_merged_traffic[0])
# Check if request traffic already provided
# TODO: Do the same for request traffic?
if len(raw_request_traffic) > 0:
if len(raw_request_traffic[0].text) > 0:
base64_flag = False
if 'base64' in raw_request_traffic[0].attrib:
if raw_request_traffic[0].attrib['base64'] == 'true':
base64_flag = True
# Check if base64
if base64_flag:
# Replace binary data by plaintext data
decoded_request_data = base64.b64decode(raw_request_traffic[0].text).decode("utf-8")
raw_request_traffic[0].getparent().remove(raw_request_traffic[0])
new_request_traffic = etree.Element("RequestTraffic")
new_request_traffic.text = etree.CDATA(decoded_request_data)
new_request_traffic.attrib['base64'] = "false"
# Append new node
raw_traffic.append(new_request_traffic)
else:
# Add new nodes
raw_traffic.append(request_node)
raw_traffic.append(response_node)
def add_data(self, binary_data=False):
"""Adds request data (e.g. headers) to the XML tree
:param binary_data: Flag indicating whether data is binary
"""
for issue in self.issues:
# Extract traffic
self.extract_traffic(issue, binary_data)
# Extract request and response
raw_request_traffic = issue.xpath("RawTraffic/RequestTraffic")[0]
raw_response_traffic = issue.xpath("RawTraffic/ResponseTraffic")[0]
# Add request data
if raw_request_traffic.text:
base64_flag = False
if 'base64' in raw_request_traffic.attrib:
if raw_request_traffic.attrib['base64'] == 'true':
base64_flag = True
# Check if base64
if base64_flag:
decoded_request_traffic = base64.b64decode(raw_request_traffic.text)
self.add_request_data(issue, decoded_request_traffic.decode(encoding="utf-8", errors="ignore"))
else:
self.add_request_data(issue, raw_request_traffic.text)
# Add response data
if raw_response_traffic.text:
base64_flag = False
if 'base64' in raw_response_traffic.attrib:
if raw_response_traffic.attrib['base64'] == 'true':
base64_flag = True
# Check if base64
if base64_flag:
decoded_response_traffic = base64.b64decode(raw_response_traffic.text)
self.add_response_data(
issue, decoded_response_traffic.decode(encoding="utf-8", errors="ignore"), True)
else:
self.add_response_data(issue, raw_response_traffic.text)
def get_payload(self, issue):
"""Gets issue payload information, e.g. parameter/cookie and value
:param issue: Issue as XML document
:returns: XML data containing PoC information
"""
raw_query = issue.xpath("TestProbe/Request/Query")
if len(raw_query) > 0:
return raw_query
else:
return None
def convert_base64_to_plain(self):
"""Converts Base64 traffic to plaintext
For all issue the traffic will be converted to base64.
"""
for issue in self.issues:
raw_traffic = issue.xpath("RawTraffic")
request_traffic = issue.xpath("RawData/RawRequest")
response_traffic = issue.xpath("RawData/RawResponse")
# Decode request traffic
if len(request_traffic) > 0:
base64_traffic = request_traffic[0].text
traffic = base64.b64decode(base64_traffic)
request_traffic[0].text = etree.CDATA(traffic.decode('utf-8'))
# Decode response traffic
if len(response_traffic) > 0:
base64_traffic = response_traffic[0].text
traffic = base64.b64decode(base64_traffic)
# FIXME: Do this better
if len(traffic) < 10000:
response = str(traffic)
else:
response = base64_traffic
# print(response)
response_traffic[0].text = etree.CDATA(response)
# Merge traffic data
raw_traffic[0].text = ''.join([request_traffic[0].text, str(response_traffic[0].text)])
# Remove RawData
raw_data = issue.xpath("RawData")
issue.remove(raw_data[0])
def string(self):
"""Returns string respresentation of XML tree
:returns: Returns string respresentation of XML tree
"""
return etree.tostring(self.xml_tree,
pretty_print=True,
xml_declaration=False
).decode(encoding="utf-8")
def __str__(self):
return self.string()
| mit | -7,039,225,817,628,492,000 | 39.383754 | 119 | 0.553791 | false |
tricoder42/python-ariadne | docs/conf.py | 1 | 8846 | # coding: utf-8
from __future__ import unicode_literals
import os
import sys
import sphinx_rtd_theme
import ariadne
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Python Ariadne'
copyright = '2015, Tomáš Ehrlich'
author = 'Tomáš Ehrlich'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(ariadne.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = ariadne.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonAriadnedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PythonAriadne.tex', 'Python Ariadne Documentation',
'Tomáš Ehrlich', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pythonariadne', 'Python Ariadne Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PythonAriadne', u'Python Ariadne Documentation',
author, 'PythonAriadne', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | 815,831,060,555,983,000 | 31.028986 | 79 | 0.704864 | false |
GennadiyZakharov/locotrack | src/ltgui/preprocessorwidget.py | 1 | 5007 | '''
Created on 29 jan. 2015
@author: Gena
'''
from PyQt4 import QtCore, QtGui
class PreprocessorWidget(QtGui.QWidget):
'''
classdocs
'''
def __init__(self, preprocessor, parent=None):
'''
Constructor
'''
super(PreprocessorWidget, self).__init__(parent)
self.preprocessor = preprocessor
layout = QtGui.QGridLayout()
#
self.negativeChechBox = QtGui.QCheckBox()
negativeLabel = QtGui.QLabel("Negative image")
layout.addWidget(negativeLabel,0,0)
layout.addWidget(self.negativeChechBox,0,1)
self.negativeChechBox.stateChanged.connect(self.setInvertImage)
#
self.removeBarrelChechBox = QtGui.QCheckBox()
removeBarrelLabel = QtGui.QLabel("Remove barrel distortion")
layout.addWidget(removeBarrelLabel)
layout.addWidget(self.removeBarrelChechBox)
self.removeBarrelChechBox.stateChanged.connect(self.setRemoveBarrel)
#
self.removeBarrelSpinbox = QtGui.QDoubleSpinBox()
removeBarrelValLabel = QtGui.QLabel('Distortion coefficient')
self.removeBarrelSpinbox.setRange(-10,10)
self.removeBarrelSpinbox.setSingleStep(0.2)
self.removeBarrelSpinbox.setSuffix('E-5')
layout.addWidget(removeBarrelValLabel)
layout.addWidget(self.removeBarrelSpinbox)
self.removeBarrelSpinbox.valueChanged.connect(self.preprocessor.setRemoveBarrelCoef)
self.removeBarrelFocal = QtGui.QDoubleSpinBox()
removeBarrelFocalLabel = QtGui.QLabel('Focal length')
self.removeBarrelFocal.setRange(2,50)
self.removeBarrelFocal.setSingleStep(0.2)
layout.addWidget(removeBarrelFocalLabel)
layout.addWidget(self.removeBarrelFocal)
self.removeBarrelFocal.valueChanged.connect(self.preprocessor.setRemoveBarrelFocal)
self.centerXSpinBox = QtGui.QSpinBox()
centerXLabel = QtGui.QLabel('Camera position, X')
self.centerXSpinBox.setMaximum(1280)
self.centerXSpinBox.setSingleStep(10)
layout.addWidget(centerXLabel)
layout.addWidget(self.centerXSpinBox)
self.centerXSpinBox.valueChanged.connect(self.preprocessor.setCenterX)
self.centerYSpinBox = QtGui.QSpinBox()
centerYLabel = QtGui.QLabel('Camera position, Y')
self.centerYSpinBox.setMaximum(1024)
self.centerYSpinBox.setSingleStep(10)
layout.addWidget(centerYLabel)
layout.addWidget(self.centerYSpinBox)
self.centerYSpinBox.valueChanged.connect(self.preprocessor.setCenterY)
accumulateBackgroundLabel = QtGui.QLabel('Background frames')
layout.addWidget(accumulateBackgroundLabel)
self.accumulateBackgroundSpinBox = QtGui.QSpinBox()
self.accumulateBackgroundSpinBox.setMaximum(1000)
self.accumulateBackgroundSpinBox.setMinimum(50)
layout.addWidget(self.accumulateBackgroundSpinBox)
self.accumulateBackgroundSpinBox.valueChanged.connect(self.preprocessor.setBackgroundFrames)
self.accumulateBackgroundButton = QtGui.QPushButton('Accumulate background')
layout.addWidget(self.accumulateBackgroundButton)
self.accumulateBackgroundButton.clicked.connect(preprocessor.collectBackground)
self.calibrateImageButton = QtGui.QPushButton('Calibrate from image')
layout.addWidget(self.calibrateImageButton)
self.calibrateImageButton.clicked.connect(self.calibrateImage)
# Layout
self.setLayout(layout)
self.loadState()
@QtCore.pyqtSlot(int)
def setInvertImage(self, state):
self.preprocessor.setInvertImage(state == QtCore.Qt.Checked)
@QtCore.pyqtSlot(int)
def setRemoveBarrel(self, state):
value = (state == QtCore.Qt.Checked)
self.removeBarrelFocal.setEnabled(value)
self.removeBarrelSpinbox.setEnabled(value)
self.preprocessor.setRemoveBarrel(value)
def loadState(self):
self.negativeChechBox.setChecked(self.preprocessor.invertImage)
self.removeBarrelChechBox.setChecked(self.preprocessor.removeBarrel)
self.removeBarrelSpinbox.setValue(self.preprocessor.removeBarrelCoef)
self.removeBarrelFocal.setValue(self.preprocessor.removeBarrelFocal)
self.centerXSpinBox.setValue(self.preprocessor.centerX)
self.centerYSpinBox.setValue(self.preprocessor.centerY)
self.accumulateBackgroundSpinBox.setValue(self.preprocessor.nBackgroundFrames)
def calibrateImage(self):
calibrationImageName = QtGui.QFileDialog.getOpenFileName(self,
"Choose calibration image file",
'.',
"Image file ({})".format("*.*"))
if not calibrationImageName.isEmpty():
self.preprocessor.calibrateFromImage(calibrationImageName)
| lgpl-3.0 | -634,777,850,189,652,000 | 40.38843 | 100 | 0.688236 | false |
kollad/turbo-ninja | utils/mathutils.py | 1 | 5600 | from bisect import insort_left
from collections import MutableMapping, OrderedDict
import random
import struct
import hashlib
from threading import Lock
import os
from engine.utils.timeutils import milliseconds
_inc_lock = Lock()
_inc = 0
_pid = int(os.getpid()) % 0xffff
def random_id(length=18):
"""Generate id, based on timestamp, assumed to be unique for this process.
"""
global _inc
ts = milliseconds()
with _inc_lock:
source = '{}{}{}'.format(ts, _pid, _inc)
_inc += 1
return hash_string(source, length)
def unique_id():
"""Generate random id, based on timestamp, assumed to be unique for this process.
Note, that strings, generated by this function will be sorted, i.e. each next string will be greater than previous.
Do not use this function for very quick generation of pack of ids cause of possible collisions.
"""
global _inc
ts = milliseconds()
s = ts / 1000
ds = ts / 100 - s * 10
with _inc_lock:
source = '{}{}{}{}'.format(
struct.pack('>I', s),
struct.pack('>B', ds),
struct.pack('>H', _pid),
struct.pack('>H', _inc % 0xffff)
)
_inc += 1
return source.encode('hex')
def hash_string(source, length=18):
"""Generate truncated to length hexdigest for provided source string.
:param source: string to computes hash from.
:type source: basestring
:param length: truncate hash to the specified length.
:type length: int
:rtype: str
"""
return hashlib.sha256(source.encode('utf-8')).hexdigest()[0:length]
class Median(object):
def __init__(self, *args):
self.values = sorted(args)
def __add__(self, other):
insort_left(self.values, float(other))
return self
def clear(self):
self.values = []
@property
def min(self):
try:
return self.values[0]
except IndexError:
return 0
@property
def max(self):
try:
return self.values[-1]
except IndexError:
return 0
@property
def len(self):
return len(self.values)
@property
def avg(self):
return self.sum / max(self.len, 1)
@property
def med(self):
index = int(self.len / 2)
try:
return self.values[index]
except IndexError:
return 0
@property
def sum(self):
return sum(self.values)
def __repr__(self):
return '<Median: (min: {:.1f}, max: {:.1f}, med: {:.1f}, avg: {:.2f})>'.format(
self.min, self.max, self.med, self.avg)
def __str__(self):
return self.__repr__()
class WeightedItem(object):
__slots__ = 'name', 'weight', 'toughness', 'hunger'
def __init__(self, name, weight=1):
self.name = name
self.weight = weight
self.toughness = 0
self.hunger = 0
def __repr__(self):
return '(weight: {}, toughness: {}, hunger: {})'.format(self.weight, self.toughness, self.hunger)
def __str__(self):
return self.__repr__()
class Weights(MutableMapping):
def __init__(self, **kwargs):
self._items = {}
self._updated = True
self._total = 0
self._len = 0
self._first = None
self._last = None
self.update(kwargs)
def __getitem__(self, item):
return self._items[item].weight
def __setitem__(self, key, value):
if value >= 0:
try:
self._items[key].weight = value
except KeyError:
self._items[key] = WeightedItem(key, value)
else:
raise ValueError('Value should be positive or zero.')
self._updated = True
def __delitem__(self, key):
del self._items[key]
self._updated = True
def __len__(self):
return len(self._items)
def __contains__(self, item):
return self._items.__contains__(item)
def keys(self):
return list(self._items.keys())
def __iter__(self):
return iter(list(self.keys()))
def _refresh_heights(self):
l = self._len = len(self._items)
if not l:
raise IndexError('Cannot choose from nothing.')
items = sorted(list(self._items.values()), key=lambda item: item.weight)
t = 0
for item in items:
t += item.weight
item.toughness = t
total = self._total = t
t = 0
c = l - 1
for item in items:
t += float(total - item.weight) / c
item.hunger = t
self._items = OrderedDict()
for item in items:
self._items[item.name] = item
self._first = items[0]
def roll(self):
return random.random() * self._total
def choice(self, thin=False):
if self._updated:
self._refresh_heights()
self._updated = False
if self._len < 2:
if self._first:
return self._first.name
else:
raise IndexError('Nothing to choose')
r = self.roll()
if not thin:
for item in self._items.values():
if r < item.toughness:
return item.name
else:
for item in self._items.values():
if r < item.hunger:
return item.name
raise IndexError('Nothing to choose')
def __repr__(self):
return '<Weights: {}>'.format(self._items)
def __str__(self):
return self.__repr__()
| mit | -161,171,418,633,999,780 | 24.339367 | 119 | 0.5425 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/virtual_network_gateway_connection.py | 1 | 7595 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetworkGatewayConnection(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param authorization_key: The authorizationKey.
:type authorization_key: str
:param virtual_network_gateway1: Required. The reference to virtual
network gateway resource.
:type virtual_network_gateway1:
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGateway
:param virtual_network_gateway2: The reference to virtual network gateway
resource.
:type virtual_network_gateway2:
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGateway
:param local_network_gateway2: The reference to local network gateway
resource.
:type local_network_gateway2:
~azure.mgmt.network.v2017_10_01.models.LocalNetworkGateway
:param connection_type: Required. Gateway connection type. Possible values
are: 'Ipsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values
include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient'
:type connection_type: str or
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGatewayConnectionType
:param routing_weight: The routing weight.
:type routing_weight: int
:param shared_key: The IPSec shared key.
:type shared_key: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values are 'Unknown', 'Connecting', 'Connected' and
'NotConnected'. Possible values include: 'Unknown', 'Connecting',
'Connected', 'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2017_10_01.models.VirtualNetworkGatewayConnectionStatus
:ivar tunnel_connection_status: Collection of all tunnels' connection
health status.
:vartype tunnel_connection_status:
list[~azure.mgmt.network.v2017_10_01.models.TunnelConnectionHealth]
:ivar egress_bytes_transferred: The egress bytes transferred in this
connection.
:vartype egress_bytes_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes transferred in this
connection.
:vartype ingress_bytes_transferred: long
:param peer: The reference to peerings resource.
:type peer: ~azure.mgmt.network.v2017_10_01.models.SubResource
:param enable_bgp: EnableBgp flag
:type enable_bgp: bool
:param use_policy_based_traffic_selectors: Enable policy-based traffic
selectors.
:type use_policy_based_traffic_selectors: bool
:param ipsec_policies: The IPSec Policies to be considered by this
connection.
:type ipsec_policies:
list[~azure.mgmt.network.v2017_10_01.models.IpsecPolicy]
:param resource_guid: The resource GUID property of the
VirtualNetworkGatewayConnection resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
VirtualNetworkGatewayConnection resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_gateway1': {'required': True},
'connection_type': {'required': True},
'connection_status': {'readonly': True},
'tunnel_connection_status': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkGateway'},
'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkGateway'},
'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'LocalNetworkGateway'},
'connection_type': {'key': 'properties.connectionType', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'peer': {'key': 'properties.peer', 'type': 'SubResource'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VirtualNetworkGatewayConnection, self).__init__(**kwargs)
self.authorization_key = kwargs.get('authorization_key', None)
self.virtual_network_gateway1 = kwargs.get('virtual_network_gateway1', None)
self.virtual_network_gateway2 = kwargs.get('virtual_network_gateway2', None)
self.local_network_gateway2 = kwargs.get('local_network_gateway2', None)
self.connection_type = kwargs.get('connection_type', None)
self.routing_weight = kwargs.get('routing_weight', None)
self.shared_key = kwargs.get('shared_key', None)
self.connection_status = None
self.tunnel_connection_status = None
self.egress_bytes_transferred = None
self.ingress_bytes_transferred = None
self.peer = kwargs.get('peer', None)
self.enable_bgp = kwargs.get('enable_bgp', None)
self.use_policy_based_traffic_selectors = kwargs.get('use_policy_based_traffic_selectors', None)
self.ipsec_policies = kwargs.get('ipsec_policies', None)
self.resource_guid = kwargs.get('resource_guid', None)
self.provisioning_state = None
self.etag = kwargs.get('etag', None)
| mit | 4,056,119,647,110,844,400 | 48.967105 | 117 | 0.663858 | false |
epuzanov/ZenPacks.community.CIMMon | ZenPacks/community/CIMMon/interfaces.py | 1 | 9776 | ################################################################################
#
# This program is part of the CIMMon Zenpack for Zenoss.
# Copyright (C) 2012 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""interfaces
describes the form field to the user interface.
$Id: interfaces.py,v 1.6 2012/10/14 17:36:23 egor Exp $"""
__version__ = "$Revision: 1.6 $"[11:-2]
from Products.Zuul.interfaces import IComponentInfo,\
IIpInterfaceInfo,\
IExpansionCardInfo
from Products.Zuul.form import schema
from Products.Zuul.utils import ZuulMessageFactory as _t
class IPhysicalMemoryInfo(IComponentInfo):
"""
Info adapter for Physical Memory Module components.
"""
manufacturer = schema.Entity(title=u"Manufacturer", readonly=True,
group='Details')
product = schema.Entity(title=u"Model", readonly=True, group='Details')
slot = schema.Int(title=u"Slot", readonly=False,group='Details')
size = schema.Text(title=u"Size", readonly=True, group='Details')
cimClassName = schema.Text(title=u"CIM Class Name", readonly=True,
group='Details')
cimStatClassName = schema.Text(title=u"CIM Statistics Class Name",
readonly=True, group='Details')
class IDiskDriveInfo(IComponentInfo):
"""
Info adapter for Disk Drive components.
"""
manufacturer = schema.Entity(title=u"Manufacturer", readonly=True,
group='Details')
product = schema.Entity(title=u"Model", readonly=True, group='Details')
serialNumber = schema.Text(title=u"Serial #", readonly=True,group='Details')
FWRev = schema.Text(title=u"Firmware", readonly=True, group='Details')
size = schema.Text(title=u"Size", readonly=True, group='Details')
diskType = schema.Text(title=u"Type", readonly=True, group='Details')
chassis = schema.Entity(title=u"Chassis", readonly=True,group='Details')
storagePool = schema.Entity(title=u"Disk Group", readonly=True,
group='Details')
bay = schema.Int(title=u"Bay", readonly=False, group='Details')
cimClassName = schema.Text(title=u"CIM Class Name", readonly=True,
group='Details')
cimStatClassName = schema.Text(title=u"CIM Statistics Class Name",
readonly=True, group='Details')
class IChassisInfo(IComponentInfo):
"""
Info adapter for Chassis components.
"""
manufacturer = schema.Entity(title=u"Manufacturer", readonly=True,
group='Details')
product = schema.Entity(title=u"Model", readonly=True, group='Details')
serialNumber = schema.Text(title=u"Serial #", readonly=True,group='Details')
layout = schema.Text(title=u"Layout String", readonly=False,group='Details')
cimClassName = schema.Text(title=u"CIM Class Name", readonly=True,
group='Details')
cimStatClassName = schema.Text(title=u"CIM Statistics Class Name",
readonly=True, group='Details')
class IStoragePoolInfo(IComponentInfo):
"""
Info adapter for Storage Pool components.
"""
usage = schema.Text(title=u"Usage", readonly=True, group="Details")
totalDisks = schema.Int(title=u"Total Disk", readonly=True, group="Details")
totalBytesString = schema.Text(title=u"Total Bytes", readonly=True,
group="Details")
usedBytesString = schema.Text(title=u"Used Bytes", readonly=True,
group="Details")
availBytesString = schema.Text(title=u"Available Bytes", readonly=True,
group="Details")
capacity = schema.Text(title=u"Utilization", readonly=True, group="Details")
cimClassName = schema.Text(title=u"CIM Class Name", readonly=True,
group='Details')
cimStatClassName = schema.Text(title=u"CIM Statistics Class Name",
readonly=True, group='Details')
class IStorageVolumeInfo(IComponentInfo):
"""
Info adapter for Storage Volume components.
"""
storagePool = schema.Entity(title=u"Disk Group", readonly=True,
group='Details')
accessType = schema.Text(title=u"Access Type", readonly=True,
group='Details')
diskType = schema.Text(title=u"Disk Type", readonly=True, group='Details')
totalBytesString = schema.Text(title=u"Total Bytes", readonly=True,
group="Details")
cimClassName = schema.Text(title=u"CIM Class Name", readonly=True,
group='Details')
cimStatClassName = schema.Text(title=u"CIM Statistics Class Name",
readonly=True, group='Details')
class IPowerSupplyInfo(IComponentInfo):
"""
Info adapter for PowerSupply components.
"""
watts = schema.Int(title=u'Watts', group='Overview', readonly=True)
type = schema.Text(title=u'Type', group='Overview', readonly=True)
millivolts = schema.Int(title=u'Millivolts', group='Overview',readonly=True)
cimClassName = schema.Text(title=u"CIM Class Name", readonly=True,
group='Details')
cimStatClassName = schema.Text(title=u"CIM Statistics Class Name",
readonly=True, group='Details')
class ITemperatureSensorInfo(IComponentInfo):
"""
Info adapter for TemperatureSensor components.
"""
temperature = schema.Int(title=u'Temperature (Fahrenheit)',group='Overview',
readonly=True)
cimClassName = schema.Text(title=u"CIM Class Name", readonly=True,
group='Details')
cimStatClassName = schema.Text(title=u"CIM Statistics Class Name",
readonly=True, group='Details')
class IFanInfo(IComponentInfo):
"""
Info adapter for Fan components.
"""
type = schema.Text(title=u'Type', group='Overview', readonly=True)
rpm = schema.Text(title=u'RPM', group='Overview', readonly=True)
cimClassName = schema.Text(title=u"CIM Class Name", readonly=True,
group='Details')
cimStatClassName = schema.Text(title=u"CIM Statistics Class Name",
readonly=True, group='Details')
class IComputerSystemInfo(IExpansionCardInfo):
"""
Info adapter for Controller components.
"""
FWRev = schema.Text(title=u"Firmware", readonly=True, group='Details')
uptime = schema.Text(title=u"Uptime", readonly=True, group='Details')
cimClassName = schema.Text(title=u"CIM Class Name", readonly=True,
group='Details')
cimStatClassName = schema.Text(title=u"CIM Statistics Class Name",
readonly=True, group='Details')
class INetworkPortInfo(IIpInterfaceInfo):
"""
Info adapter for Controller components.
"""
controller =schema.Entity(title=u"Controller",readonly=True,group='Details')
cimClassName = schema.Text(title=u"CIM Class Name", readonly=True,
group='Details')
cimStatClassName = schema.Text(title=u"CIM Statistics Class Name",
readonly=True, group='Details')
class IRedundancySetInfo(IComponentInfo):
"""
Info adapter for RedundancySet components.
"""
typeOfSet = schema.Text(title=u"Type", readonly=True, group='Details')
loadBalanceAlgorithm = schema.Text(title=u"Load Balance Algorithm",
readonly=True, group='Details')
minNumberNeeded = schema.Int(title=u"Min Number Needed", readonly=True,
group='Details')
membersCount = schema.Int(title=u"Members Count", readonly=True,
group='Details')
cimClassName = schema.Text(title=u"CIM Class Name", readonly=True,
group='Details')
cimStatClassName = schema.Text(title=u"CIM Statistics Class Name",
readonly=True, group='Details')
class IReplicationGroupInfo(IComponentInfo):
"""
Info adapter for ReplicationGroup components.
"""
cimClassName = schema.Text(title=u"CIM Class Name", readonly=True,
group='Details')
cimStatClassName = schema.Text(title=u"CIM Statistics Class Name",
readonly=True, group='Details')
| gpl-2.0 | -7,139,634,888,139,762,000 | 51.55914 | 80 | 0.54511 | false |
shaz13/oppia | core/controllers/cron.py | 1 | 7669 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the cron jobs."""
import logging
from pipeline import pipeline
from core import jobs
from core.controllers import base
from core.domain import acl_decorators
from core.domain import email_manager
from core.domain import exp_jobs_one_off
from core.domain import recommendations_jobs_one_off
from core.domain import user_jobs_one_off
from core.platform import models
import utils
(job_models,) = models.Registry.import_models([models.NAMES.job])
# The default retention time is 2 days.
MAX_MAPREDUCE_METADATA_RETENTION_MSECS = 2 * 24 * 60 * 60 * 1000
TWENTY_FIVE_HOURS_IN_MSECS = 25 * 60 * 60 * 1000
MAX_JOBS_TO_REPORT_ON = 50
class JobStatusMailerHandler(base.BaseHandler):
"""Handler for mailing admin about job failures."""
@acl_decorators.can_perform_cron_tasks
def get(self):
"""Handles GET requests."""
# TODO(sll): Get the 50 most recent failed shards, not all of them.
failed_jobs = jobs.get_stuck_jobs(TWENTY_FIVE_HOURS_IN_MSECS)
if failed_jobs:
email_subject = 'MapReduce failure alert'
email_message = (
'%s jobs have failed in the past 25 hours. More information '
'(about at most %s jobs; to see more, please check the logs):'
) % (len(failed_jobs), MAX_JOBS_TO_REPORT_ON)
for job in failed_jobs[:MAX_JOBS_TO_REPORT_ON]:
email_message += '\n'
email_message += '-----------------------------------'
email_message += '\n'
email_message += (
'Job with mapreduce ID %s (key name %s) failed. '
'More info:\n\n'
' counters_map: %s\n'
' shard_retries: %s\n'
' slice_retries: %s\n'
' last_update_time: %s\n'
' last_work_item: %s\n'
) % (
job.mapreduce_id, job.key().name(), job.counters_map,
job.retries, job.slice_retries, job.update_time,
job.last_work_item
)
else:
email_subject = 'MapReduce status report'
email_message = 'All MapReduce jobs are running fine.'
email_manager.send_mail_to_admin(email_subject, email_message)
class CronDashboardStatsHandler(base.BaseHandler):
"""Handler for appending dashboard stats to a list."""
@acl_decorators.can_perform_cron_tasks
def get(self):
"""Handles GET requests."""
user_jobs_one_off.DashboardStatsOneOffJob.enqueue(
user_jobs_one_off.DashboardStatsOneOffJob.create_new())
class CronExplorationRecommendationsHandler(base.BaseHandler):
"""Handler for computing exploration recommendations."""
@acl_decorators.can_perform_cron_tasks
def get(self):
"""Handles GET requests."""
job_class = (
recommendations_jobs_one_off.ExplorationRecommendationsOneOffJob)
job_class.enqueue(job_class.create_new())
class CronExplorationSearchRankHandler(base.BaseHandler):
"""Handler for computing exploration search ranks."""
@acl_decorators.can_perform_cron_tasks
def get(self):
"""Handles GET requests."""
exp_jobs_one_off.IndexAllExplorationsJobManager.enqueue(
exp_jobs_one_off.IndexAllExplorationsJobManager.create_new())
class CronMapreduceCleanupHandler(base.BaseHandler):
@acl_decorators.can_perform_cron_tasks
def get(self):
"""Clean up intermediate data items for completed M/R jobs that
started more than MAX_MAPREDUCE_METADATA_RETENTION_MSECS milliseconds
ago.
Map/reduce runs leave around a large number of rows in several
tables. This data is useful to have around for a while:
- it helps diagnose any problems with jobs that may be occurring
- it shows where resource usage is occurring
However, after a few days, this information is less relevant, and
should be cleaned up.
"""
recency_msec = MAX_MAPREDUCE_METADATA_RETENTION_MSECS
num_cleaned = 0
min_age_msec = recency_msec
# Only consider jobs that started at most 1 week before recency_msec.
max_age_msec = recency_msec + 7 * 24 * 60 * 60 * 1000
# The latest start time that a job scheduled for cleanup may have.
max_start_time_msec = (
utils.get_current_time_in_millisecs() - min_age_msec)
# Get all pipeline ids from jobs that started between max_age_msecs
# and max_age_msecs + 1 week, before now.
pipeline_id_to_job_instance = {}
job_instances = job_models.JobModel.get_recent_jobs(1000, max_age_msec)
for job_instance in job_instances:
if (job_instance.time_started_msec < max_start_time_msec and not
job_instance.has_been_cleaned_up):
if 'root_pipeline_id' in job_instance.metadata:
pipeline_id = job_instance.metadata['root_pipeline_id']
pipeline_id_to_job_instance[pipeline_id] = job_instance
# Clean up pipelines.
for pline in pipeline.get_root_list()['pipelines']:
pipeline_id = pline['pipelineId']
job_definitely_terminated = (
pline['status'] == 'done' or
pline['status'] == 'aborted' or
pline['currentAttempt'] > pline['maxAttempts'])
have_start_time = 'startTimeMs' in pline
job_started_too_long_ago = (
have_start_time and
pline['startTimeMs'] < max_start_time_msec)
if (job_started_too_long_ago or
(not have_start_time and job_definitely_terminated)):
# At this point, the map/reduce pipeline is either in a
# terminal state, or has taken so long that there's no
# realistic possibility that there might be a race condition
# between this and the job actually completing.
if pipeline_id in pipeline_id_to_job_instance:
job_instance = pipeline_id_to_job_instance[pipeline_id]
job_instance.has_been_cleaned_up = True
job_instance.put()
# This enqueues a deferred cleanup item.
p = pipeline.Pipeline.from_id(pipeline_id)
if p:
p.cleanup()
num_cleaned += 1
logging.warning('%s MR jobs cleaned up.' % num_cleaned)
if job_models.JobModel.do_unfinished_jobs_exist(
jobs.JobCleanupManager.__name__):
logging.warning('A previous cleanup job is still running.')
else:
jobs.JobCleanupManager.enqueue(
jobs.JobCleanupManager.create_new(), additional_job_params={
jobs.MAPPER_PARAM_MAX_START_TIME_MSEC: max_start_time_msec
})
logging.warning('Deletion jobs for auxiliary entities kicked off.')
| apache-2.0 | -490,610,909,016,384,450 | 40.231183 | 79 | 0.615726 | false |
luzheqi1987/nova-annotation | nova/network/linux_net.py | 1 | 71216 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implements vlans, bridges, and iptables rules using linux utilities."""
import calendar
import inspect
import os
import re
import netaddr
from oslo.concurrency import processutils
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import excutils
from oslo.utils import importutils
from oslo.utils import timeutils
import six
from nova import exception
from nova.i18n import _, _LE
from nova import objects
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova import paths
from nova import utils
LOG = logging.getLogger(__name__)
linux_net_opts = [
cfg.MultiStrOpt('dhcpbridge_flagfile',
default=['/etc/nova/nova-dhcpbridge.conf'],
help='Location of flagfiles for dhcpbridge'),
cfg.StrOpt('networks_path',
default=paths.state_path_def('networks'),
help='Location to keep network config files'),
cfg.StrOpt('public_interface',
default='eth0',
help='Interface for public IP addresses'),
cfg.StrOpt('dhcpbridge',
default=paths.bindir_def('nova-dhcpbridge'),
help='Location of nova-dhcpbridge'),
cfg.StrOpt('routing_source_ip',
default='$my_ip',
help='Public IP of network host'),
cfg.IntOpt('dhcp_lease_time',
default=86400,
help='Lifetime of a DHCP lease in seconds'),
cfg.MultiStrOpt('dns_server',
default=[],
help='If set, uses specific DNS server for dnsmasq. Can'
' be specified multiple times.'),
cfg.BoolOpt('use_network_dns_servers',
default=False,
help='If set, uses the dns1 and dns2 from the network ref.'
' as dns servers.'),
cfg.ListOpt('dmz_cidr',
default=[],
help='A list of dmz range that should be accepted'),
cfg.MultiStrOpt('force_snat_range',
default=[],
help='Traffic to this range will always be snatted to the '
'fallback ip, even if it would normally be bridged out '
'of the node. Can be specified multiple times.'),
cfg.StrOpt('dnsmasq_config_file',
default='',
help='Override the default dnsmasq settings with this file'),
cfg.StrOpt('linuxnet_interface_driver',
default='nova.network.linux_net.LinuxBridgeInterfaceDriver',
help='Driver used to create ethernet devices.'),
cfg.StrOpt('linuxnet_ovs_integration_bridge',
default='br-int',
help='Name of Open vSwitch bridge used with linuxnet'),
cfg.BoolOpt('send_arp_for_ha',
default=False,
help='Send gratuitous ARPs for HA setup'),
cfg.IntOpt('send_arp_for_ha_count',
default=3,
help='Send this many gratuitous ARPs for HA setup'),
cfg.BoolOpt('use_single_default_gateway',
default=False,
help='Use single default gateway. Only first nic of vm will '
'get default gateway from dhcp server'),
cfg.MultiStrOpt('forward_bridge_interface',
default=['all'],
help='An interface that bridges can forward to. If this '
'is set to all then all traffic will be forwarded. '
'Can be specified multiple times.'),
cfg.StrOpt('metadata_host',
default='$my_ip',
help='The IP address for the metadata API server'),
cfg.IntOpt('metadata_port',
default=8775,
help='The port for the metadata API port'),
cfg.StrOpt('iptables_top_regex',
default='',
help='Regular expression to match iptables rule that should '
'always be on the top.'),
cfg.StrOpt('iptables_bottom_regex',
default='',
help='Regular expression to match iptables rule that should '
'always be on the bottom.'),
cfg.StrOpt('iptables_drop_action',
default='DROP',
help=('The table that iptables to jump to when a packet is '
'to be dropped.')),
cfg.IntOpt('ovs_vsctl_timeout',
default=120,
help='Amount of time, in seconds, that ovs_vsctl should wait '
'for a response from the database. 0 is to wait forever.'),
cfg.BoolOpt('fake_network',
default=False,
help='If passed, use fake network devices and addresses'),
]
CONF = cfg.CONF
CONF.register_opts(linux_net_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('network_device_mtu', 'nova.objects.network')
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
def get_binary_name():
"""Grab the name of the binary we're running in."""
return os.path.basename(inspect.stack()[-1][1])[:16]
binary_name = get_binary_name()
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False):
self.chain = chain
self.rule = rule
self.wrap = wrap
self.top = top
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (binary_name, self.chain)
else:
chain = self.chain
# new rules should have a zero [packet: byte] count
return '[0:0] -A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self):
self.rules = []
self.remove_rules = []
self.chains = set()
self.unwrapped_chains = set()
self.remove_chains = set()
self.dirty = True
def has_chain(self, name, wrap=True):
if wrap:
return name in self.chains
else:
return name in self.unwrapped_chains
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
self.dirty = True
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
if wrap:
chain_set = self.chains
else:
chain_set = self.unwrapped_chains
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
self.dirty = True
# non-wrapped chains and rules need to be dealt with specially,
# so we keep a list of them to be iterated over in apply()
if not wrap:
self.remove_chains.add(name)
chain_set.remove(name)
if not wrap:
self.remove_rules += filter(lambda r: r.chain == name, self.rules)
self.rules = filter(lambda r: r.chain != name, self.rules)
if wrap:
jump_snippet = '-j %s-%s' % (binary_name, name)
else:
jump_snippet = '-j %s' % (name,)
if not wrap:
self.remove_rules += filter(lambda r: jump_snippet in r.rule,
self.rules)
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
def add_rule(self, chain, rule, wrap=True, top=False):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
if wrap and chain not in self.chains:
raise ValueError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
rule_obj = IptablesRule(chain, rule, wrap, top)
if rule_obj in self.rules:
LOG.debug("Skipping duplicate iptables rule addition. "
"%(rule)r already in %(rules)r",
{'rule': rule_obj, 'rules': self.rules})
else:
self.rules.append(IptablesRule(chain, rule, wrap, top))
self.dirty = True
def _wrap_target_chain(self, s):
if s.startswith('$'):
return '%s-%s' % (binary_name, s[1:])
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top))
if not wrap:
self.remove_rules.append(IptablesRule(chain, rule, wrap, top))
self.dirty = True
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def remove_rules_regex(self, regex):
"""Remove all rules matching regex."""
if isinstance(regex, six.string_types):
regex = re.compile(regex)
num_rules = len(self.rules)
self.rules = filter(lambda r: not regex.match(str(r)), self.rules)
removed = num_rules - len(self.rules)
if removed > 0:
self.dirty = True
return removed
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
if chained_rules:
self.dirty = True
for rule in chained_rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, nova-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from nova-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, execute=None):
if not execute:
self.execute = _execute
else:
self.execute = execute
self.ipv4 = {'filter': IptablesTable(),
'nat': IptablesTable(),
'mangle': IptablesTable()}
self.ipv6 = {'filter': IptablesTable()}
self.iptables_apply_deferred = False
# Add a nova-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('nova-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('nova-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING'],
'mangle': ['POSTROUTING']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' % (chain,),
wrap=False)
# Add a nova-postrouting-bottom chain. It's intended to be shared
# among the various nova components. We set it as the last chain
# of POSTROUTING chain.
self.ipv4['nat'].add_chain('nova-postrouting-bottom', wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING', '-j nova-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared nova-postrouting-bottom chain
# so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat',
wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
def defer_apply_on(self):
self.iptables_apply_deferred = True
def defer_apply_off(self):
self.iptables_apply_deferred = False
self.apply()
def dirty(self):
for table in self.ipv4.itervalues():
if table.dirty:
return True
if CONF.use_ipv6:
for table in self.ipv6.itervalues():
if table.dirty:
return True
return False
def apply(self):
if self.iptables_apply_deferred:
return
if self.dirty():
self._apply()
else:
LOG.debug("Skipping apply due to lack of new rules")
@utils.synchronized('iptables', external=True)
def _apply(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if CONF.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
all_tables, _err = self.execute('%s-save' % (cmd,), '-c',
run_as_root=True,
attempts=5)
all_lines = all_tables.split('\n')
for table_name, table in tables.iteritems():
start, end = self._find_table(all_lines, table_name)
all_lines[start:end] = self._modify_rules(
all_lines[start:end], table, table_name)
table.dirty = False
self.execute('%s-restore' % (cmd,), '-c', run_as_root=True,
process_input='\n'.join(all_lines),
attempts=5)
LOG.debug("IPTablesManager.apply completed with success")
def _find_table(self, lines, table_name):
if len(lines) < 3:
# length only <2 when fake iptables
return (0, 0)
try:
start = lines.index('*%s' % table_name) - 1
except ValueError:
# Couldn't find table_name
return (0, 0)
end = lines[start:].index('COMMIT') + start + 2
return (start, end)
def _modify_rules(self, current_lines, table, table_name):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
remove_chains = table.remove_chains
rules = table.rules
remove_rules = table.remove_rules
if not current_lines:
fake_table = ['#Generated by nova',
'*' + table_name, 'COMMIT',
'#Completed by nova']
current_lines = fake_table
# Remove any trace of our rules
new_filter = filter(lambda line: binary_name not in line,
current_lines)
top_rules = []
bottom_rules = []
if CONF.iptables_top_regex:
regex = re.compile(CONF.iptables_top_regex)
temp_filter = filter(lambda line: regex.search(line), new_filter)
for rule_str in temp_filter:
new_filter = filter(lambda s: s.strip() != rule_str.strip(),
new_filter)
top_rules = temp_filter
if CONF.iptables_bottom_regex:
regex = re.compile(CONF.iptables_bottom_regex)
temp_filter = filter(lambda line: regex.search(line), new_filter)
for rule_str in temp_filter:
new_filter = filter(lambda s: s.strip() != rule_str.strip(),
new_filter)
bottom_rules = temp_filter
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(new_filter):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
if not seen_chains:
rules_index = 2
our_rules = top_rules
bot_rules = []
for rule in rules:
rule_str = str(rule)
if rule.top:
# rule.top == True means we want this rule to be at the top.
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
# We don't want to remove an entry if it has non-zero
# [packet:byte] counts and replace it with [0:0], so let's
# go look for a duplicate, and over-ride our table rule if
# found.
# ignore [packet:byte] counts at beginning of line
if rule_str.startswith('['):
rule_str = rule_str.split(']', 1)[1]
dup_filter = filter(lambda s: rule_str.strip() in s.strip(),
new_filter)
new_filter = filter(lambda s:
rule_str.strip() not in s.strip(),
new_filter)
# if no duplicates, use original rule
if dup_filter:
# grab the last entry, if there is one
dup = dup_filter[-1]
rule_str = str(dup)
else:
rule_str = str(rule)
rule_str.strip()
our_rules += [rule_str]
else:
bot_rules += [rule_str]
our_rules += bot_rules
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = [':%s - [0:0]' % (name,)
for name in unwrapped_chains]
new_filter[rules_index:rules_index] = [':%s-%s - [0:0]' %
(binary_name, name,)
for name in chains]
commit_index = new_filter.index('COMMIT')
new_filter[commit_index:commit_index] = bottom_rules
seen_lines = set()
def _weed_out_duplicates(line):
# ignore [packet:byte] counts at beginning of lines
if line.startswith('['):
line = line.split(']', 1)[1]
line = line.strip()
if line in seen_lines:
return False
else:
seen_lines.add(line)
return True
def _weed_out_removes(line):
# We need to find exact matches here
if line.startswith(':'):
# it's a chain, for example, ":nova-billing - [0:0]"
# strip off everything except the chain name
line = line.split(':')[1]
line = line.split('- [')[0]
line = line.strip()
for chain in remove_chains:
if chain == line:
remove_chains.remove(chain)
return False
elif line.startswith('['):
# it's a rule
# ignore [packet:byte] counts at beginning of lines
line = line.split(']', 1)[1]
line = line.strip()
for rule in remove_rules:
# ignore [packet:byte] counts at beginning of rules
rule_str = str(rule)
rule_str = rule_str.split(' ', 1)[1]
rule_str = rule_str.strip()
if rule_str == line:
remove_rules.remove(rule)
return False
# Leave it alone
return True
# We filter duplicates, letting the *last* occurrence take
# precedence. We also filter out anything in the "remove"
# lists.
new_filter.reverse()
new_filter = filter(_weed_out_duplicates, new_filter)
new_filter = filter(_weed_out_removes, new_filter)
new_filter.reverse()
# flush lists, just in case we didn't find something
remove_chains.clear()
for rule in remove_rules:
remove_rules.remove(rule)
return new_filter
# NOTE(jkoelker) This is just a nice little stub point since mocking
# builtins with mox is a nightmare
def write_to_file(file, data, mode='w'):
with open(file, mode) as f:
f.write(data)
def metadata_forward():
"""Create forwarding rule for metadata."""
if CONF.metadata_host != '127.0.0.1':
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j DNAT '
'--to-destination %s:%s' %
(CONF.metadata_host,
CONF.metadata_port))
else:
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 '
'-j REDIRECT --to-ports %s' %
CONF.metadata_port)
iptables_manager.apply()
def metadata_accept():
"""Create the filter accept rule for metadata."""
rule = '-s 0.0.0.0/0 -p tcp -m tcp --dport %s' % CONF.metadata_port
if CONF.metadata_host != '127.0.0.1':
rule += ' -d %s -j ACCEPT' % CONF.metadata_host
else:
rule += ' -m addrtype --dst-type LOCAL -j ACCEPT'
iptables_manager.ipv4['filter'].add_rule('INPUT', rule)
iptables_manager.apply()
def add_snat_rule(ip_range, is_external=False):
if CONF.routing_source_ip:
if is_external:
if CONF.force_snat_range:
snat_range = CONF.force_snat_range
else:
snat_range = []
else:
snat_range = ['0.0.0.0/0']
for dest_range in snat_range:
rule = ('-s %s -d %s -j SNAT --to-source %s'
% (ip_range, dest_range, CONF.routing_source_ip))
if not is_external and CONF.public_interface:
rule += ' -o %s' % CONF.public_interface
iptables_manager.ipv4['nat'].add_rule('snat', rule)
iptables_manager.apply()
def init_host(ip_range, is_external=False):
"""Basic networking setup goes here."""
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
add_snat_rule(ip_range, is_external)
rules = []
if is_external:
for snat_range in CONF.force_snat_range:
rules.append('PREROUTING -p ipv4 --ip-src %s --ip-dst %s '
'-j redirect --redirect-target ACCEPT' %
(ip_range, snat_range))
if rules:
ensure_ebtables_rules(rules, 'nat')
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s/32 -j ACCEPT' %
(ip_range, CONF.metadata_host))
for dmz in CONF.dmz_cidr:
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s -j ACCEPT' %
(ip_range, dmz))
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %(range)s -d %(range)s '
'-m conntrack ! --ctstate DNAT '
'-j ACCEPT' %
{'range': ip_range})
iptables_manager.apply()
def send_arp_for_ip(ip, device, count):
out, err = _execute('arping', '-U', ip,
'-A', '-I', device,
'-c', str(count),
run_as_root=True, check_exit_code=False)
if err:
LOG.debug('arping error for ip %s', ip)
def bind_floating_ip(floating_ip, device):
"""Bind ip to public interface."""
_execute('ip', 'addr', 'add', str(floating_ip) + '/32',
'dev', device,
run_as_root=True, check_exit_code=[0, 2, 254])
if CONF.send_arp_for_ha and CONF.send_arp_for_ha_count > 0:
send_arp_for_ip(floating_ip, device, CONF.send_arp_for_ha_count)
def unbind_floating_ip(floating_ip, device):
"""Unbind a public ip from public interface."""
_execute('ip', 'addr', 'del', str(floating_ip) + '/32',
'dev', device,
run_as_root=True, check_exit_code=[0, 2, 254])
def ensure_metadata_ip():
"""Sets up local metadata ip."""
_execute('ip', 'addr', 'add', '169.254.169.254/32',
'scope', 'link', 'dev', 'lo',
run_as_root=True, check_exit_code=[0, 2, 254])
def ensure_vpn_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan."""
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'-d %s -p udp '
'--dport 1194 '
'-j ACCEPT' % private_ip)
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.ipv4['nat'].add_rule('OUTPUT',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.apply()
def ensure_floating_forward(floating_ip, fixed_ip, device, network):
"""Ensure floating ip forwarding rule."""
# NOTE(vish): Make sure we never have duplicate rules for the same ip
regex = '.*\s+%s(/32|\s+|$)' % floating_ip
num_rules = iptables_manager.ipv4['nat'].remove_rules_regex(regex)
if num_rules:
msg = _('Removed %(num)d duplicate rules for floating ip %(float)s')
LOG.warn(msg % {'num': num_rules, 'float': floating_ip})
for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device):
iptables_manager.ipv4['nat'].add_rule(chain, rule)
iptables_manager.apply()
if device != network['bridge']:
ensure_ebtables_rules(*floating_ebtables_rules(fixed_ip, network))
def remove_floating_forward(floating_ip, fixed_ip, device, network):
"""Remove forwarding for floating ip."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip, device):
iptables_manager.ipv4['nat'].remove_rule(chain, rule)
iptables_manager.apply()
if device != network['bridge']:
remove_ebtables_rules(*floating_ebtables_rules(fixed_ip, network))
def floating_ebtables_rules(fixed_ip, network):
"""Makes sure only in-network traffic is bridged."""
return (['PREROUTING --logical-in %s -p ipv4 --ip-src %s '
'! --ip-dst %s -j redirect --redirect-target ACCEPT' %
(network['bridge'], fixed_ip, network['cidr'])], 'nat')
def floating_forward_rules(floating_ip, fixed_ip, device):
rules = []
rule = '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip)
if device:
rules.append(('float-snat', rule + ' -d %s' % fixed_ip))
rules.append(('float-snat', rule + ' -o %s' % device))
else:
rules.append(('float-snat', rule))
rules.append(
('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)))
rules.append(
('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)))
rules.append(('POSTROUTING', '-s %s -m conntrack --ctstate DNAT -j SNAT '
'--to-source %s' %
(fixed_ip, floating_ip)))
return rules
def clean_conntrack(fixed_ip):
try:
_execute('conntrack', '-D', '-r', fixed_ip, run_as_root=True,
check_exit_code=[0, 1])
except processutils.ProcessExecutionError:
LOG.exception(_('Error deleting conntrack entries for %s'), fixed_ip)
def _enable_ipv4_forwarding():
sysctl_key = 'net.ipv4.ip_forward'
stdout, stderr = _execute('sysctl', '-n', sysctl_key)
if stdout.strip() is not '1':
_execute('sysctl', '-w', '%s=1' % sysctl_key, run_as_root=True)
@utils.synchronized('lock_gateway', external=True)
def initialize_gateway_device(dev, network_ref):
if not network_ref:
return
_enable_ipv4_forwarding()
# NOTE(vish): The ip for dnsmasq has to be the first address on the
# bridge for it to respond to requests properly
try:
prefix = network_ref.cidr.prefixlen
except AttributeError:
prefix = network_ref['cidr'].rpartition('/')[2]
full_ip = '%s/%s' % (network_ref['dhcp_server'], prefix)
new_ip_params = [[full_ip, 'brd', network_ref['broadcast']]]
old_ip_params = []
out, err = _execute('ip', 'addr', 'show', 'dev', dev,
'scope', 'global')
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
ip_params = fields[1:-1]
old_ip_params.append(ip_params)
if ip_params[0] != full_ip:
new_ip_params.append(ip_params)
if not old_ip_params or old_ip_params[0][0] != full_ip:
old_routes = []
result = _execute('ip', 'route', 'show', 'dev', dev)
if result:
out, err = result
for line in out.split('\n'):
fields = line.split()
if fields and 'via' in fields:
old_routes.append(fields)
_execute('ip', 'route', 'del', fields[0],
'dev', dev, run_as_root=True)
for ip_params in old_ip_params:
_execute(*_ip_bridge_cmd('del', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
for ip_params in new_ip_params:
_execute(*_ip_bridge_cmd('add', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
for fields in old_routes:
_execute('ip', 'route', 'add', *fields,
run_as_root=True)
if CONF.send_arp_for_ha and CONF.send_arp_for_ha_count > 0:
send_arp_for_ip(network_ref['dhcp_server'], dev,
CONF.send_arp_for_ha_count)
if CONF.use_ipv6:
_execute('ip', '-f', 'inet6', 'addr',
'change', network_ref['cidr_v6'],
'dev', dev, run_as_root=True)
def get_dhcp_leases(context, network_ref):
"""Return a network's hosts config in dnsmasq leasefile format."""
hosts = []
host = None
if network_ref['multi_host']:
host = CONF.host
for fixedip in objects.FixedIPList.get_by_network(context,
network_ref,
host=host):
# NOTE(cfb): Don't return a lease entry if the IP isn't
# already leased
if fixedip.leased:
hosts.append(_host_lease(fixedip))
return '\n'.join(hosts)
def get_dhcp_hosts(context, network_ref, fixedips):
"""Get network's hosts config in dhcp-host format."""
hosts = []
macs = set()
for fixedip in fixedips:
if fixedip.allocated:
if fixedip.virtual_interface.address not in macs:
hosts.append(_host_dhcp(fixedip))
macs.add(fixedip.virtual_interface.address)
return '\n'.join(hosts)
def get_dns_hosts(context, network_ref):
"""Get network's DNS hosts in hosts format."""
hosts = []
for fixedip in objects.FixedIPList.get_by_network(context, network_ref):
if fixedip.allocated:
hosts.append(_host_dns(fixedip))
return '\n'.join(hosts)
def _add_dnsmasq_accept_rules(dev):
"""Allow DHCP and DNS traffic through to dnsmasq."""
table = iptables_manager.ipv4['filter']
for port in [67, 53]:
for proto in ['udp', 'tcp']:
args = {'dev': dev, 'port': port, 'proto': proto}
table.add_rule('INPUT',
'-i %(dev)s -p %(proto)s -m %(proto)s '
'--dport %(port)s -j ACCEPT' % args)
iptables_manager.apply()
def _remove_dnsmasq_accept_rules(dev):
"""Remove DHCP and DNS traffic allowed through to dnsmasq."""
table = iptables_manager.ipv4['filter']
for port in [67, 53]:
for proto in ['udp', 'tcp']:
args = {'dev': dev, 'port': port, 'proto': proto}
table.remove_rule('INPUT',
'-i %(dev)s -p %(proto)s -m %(proto)s '
'--dport %(port)s -j ACCEPT' % args)
iptables_manager.apply()
# NOTE(russellb) Curious why this is needed? Check out this explanation from
# markmc: https://bugzilla.redhat.com/show_bug.cgi?id=910619#c6
def _add_dhcp_mangle_rule(dev):
table = iptables_manager.ipv4['mangle']
table.add_rule('POSTROUTING',
'-o %s -p udp -m udp --dport 68 -j CHECKSUM '
'--checksum-fill' % dev)
iptables_manager.apply()
def _remove_dhcp_mangle_rule(dev):
table = iptables_manager.ipv4['mangle']
table.remove_rule('POSTROUTING',
'-o %s -p udp -m udp --dport 68 -j CHECKSUM '
'--checksum-fill' % dev)
iptables_manager.apply()
def get_dhcp_opts(context, network_ref, fixedips):
"""Get network's hosts config in dhcp-opts format."""
gateway = network_ref['gateway']
# NOTE(vish): if we are in multi-host mode and we are not sharing
# addresses, then we actually need to hand out the
# dhcp server address as the gateway.
if network_ref['multi_host'] and not (network_ref['share_address'] or
CONF.share_dhcp_address):
gateway = network_ref['dhcp_server']
hosts = []
if CONF.use_single_default_gateway:
for fixedip in fixedips:
if fixedip.allocated:
vif_id = fixedip.virtual_interface_id
if fixedip.default_route:
hosts.append(_host_dhcp_opts(vif_id, gateway))
else:
hosts.append(_host_dhcp_opts(vif_id))
else:
hosts.append(_host_dhcp_opts(None, gateway))
return '\n'.join(hosts)
def release_dhcp(dev, address, mac_address):
try:
utils.execute('dhcp_release', dev, address, mac_address,
run_as_root=True)
except processutils.ProcessExecutionError:
raise exception.NetworkDhcpReleaseFailed(address=address,
mac_address=mac_address)
def update_dhcp(context, dev, network_ref):
conffile = _dhcp_file(dev, 'conf')
host = None
if network_ref['multi_host']:
host = CONF.host
fixedips = objects.FixedIPList.get_by_network(context,
network_ref,
host=host)
write_to_file(conffile, get_dhcp_hosts(context, network_ref, fixedips))
restart_dhcp(context, dev, network_ref, fixedips)
def update_dns(context, dev, network_ref):
hostsfile = _dhcp_file(dev, 'hosts')
host = None
if network_ref['multi_host']:
host = CONF.host
fixedips = objects.FixedIPList.get_by_network(context,
network_ref,
host=host)
write_to_file(hostsfile, get_dns_hosts(context, network_ref))
restart_dhcp(context, dev, network_ref, fixedips)
def update_dhcp_hostfile_with_text(dev, hosts_text):
conffile = _dhcp_file(dev, 'conf')
write_to_file(conffile, hosts_text)
def kill_dhcp(dev):
pid = _dnsmasq_pid_for(dev)
if pid:
# Check that the process exists and looks like a dnsmasq process
conffile = _dhcp_file(dev, 'conf')
out, _err = _execute('cat', '/proc/%d/cmdline' % pid,
check_exit_code=False)
if conffile.split('/')[-1] in out:
_execute('kill', '-9', pid, run_as_root=True)
else:
LOG.debug('Pid %d is stale, skip killing dnsmasq', pid)
_remove_dnsmasq_accept_rules(dev)
_remove_dhcp_mangle_rule(dev)
# NOTE(ja): Sending a HUP only reloads the hostfile, so any
# configuration options (like dchp-range, vlan, ...)
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
def restart_dhcp(context, dev, network_ref, fixedips):
"""(Re)starts a dnsmasq server for a given network.
If a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance.
"""
conffile = _dhcp_file(dev, 'conf')
optsfile = _dhcp_file(dev, 'opts')
write_to_file(optsfile, get_dhcp_opts(context, network_ref, fixedips))
os.chmod(optsfile, 0o644)
_add_dhcp_mangle_rule(dev)
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0o644)
pid = _dnsmasq_pid_for(dev)
# if dnsmasq is already running, then tell it to reload
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline' % pid,
check_exit_code=False)
# Using symlinks can cause problems here so just compare the name
# of the file itself
if conffile.split('/')[-1] in out:
try:
_execute('kill', '-HUP', pid, run_as_root=True)
_add_dnsmasq_accept_rules(dev)
return
except Exception as exc: # pylint: disable=W0703
LOG.error(_LE('Hupping dnsmasq threw %s'), exc)
else:
LOG.debug('Pid %d is stale, relaunching dnsmasq', pid)
cmd = ['env',
'CONFIG_FILE=%s' % jsonutils.dumps(CONF.dhcpbridge_flagfile),
'NETWORK_ID=%s' % str(network_ref['id']),
'dnsmasq',
'--strict-order',
'--bind-interfaces',
'--conf-file=%s' % CONF.dnsmasq_config_file,
'--pid-file=%s' % _dhcp_file(dev, 'pid'),
'--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
'--dhcp-range=set:%s,%s,static,%s,%ss' %
(network_ref['label'],
network_ref['dhcp_start'],
network_ref['netmask'],
CONF.dhcp_lease_time),
'--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])),
'--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % CONF.dhcpbridge,
'--no-hosts',
'--leasefile-ro']
# dnsmasq currently gives an error for an empty domain,
# rather than ignoring. So only specify it if defined.
if CONF.dhcp_domain:
cmd.append('--domain=%s' % CONF.dhcp_domain)
dns_servers = set(CONF.dns_server)
if CONF.use_network_dns_servers:
if network_ref.get('dns1'):
dns_servers.add(network_ref.get('dns1'))
if network_ref.get('dns2'):
dns_servers.add(network_ref.get('dns2'))
if network_ref['multi_host']:
cmd.append('--addn-hosts=%s' % _dhcp_file(dev, 'hosts'))
if dns_servers:
cmd.append('--no-resolv')
for dns_server in dns_servers:
cmd.append('--server=%s' % dns_server)
_execute(*cmd, run_as_root=True)
_add_dnsmasq_accept_rules(dev)
@utils.synchronized('radvd_start')
def update_ra(context, dev, network_ref):
conffile = _ra_file(dev, 'conf')
conf_str = """
interface %s
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 10;
prefix %s
{
AdvOnLink on;
AdvAutonomous on;
};
};
""" % (dev, network_ref['cidr_v6'])
write_to_file(conffile, conf_str)
# Make sure radvd can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0o644)
pid = _ra_pid_for(dev)
# if radvd is already running, then tell it to reload
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline'
% pid, check_exit_code=False)
if conffile in out:
try:
_execute('kill', pid, run_as_root=True)
except Exception as exc: # pylint: disable=W0703
LOG.error(_LE('killing radvd threw %s'), exc)
else:
LOG.debug('Pid %d is stale, relaunching radvd', pid)
cmd = ['radvd',
'-C', '%s' % _ra_file(dev, 'conf'),
'-p', '%s' % _ra_file(dev, 'pid')]
_execute(*cmd, run_as_root=True)
def _host_lease(fixedip):
"""Return a host string for an address in leasefile format."""
timestamp = timeutils.utcnow()
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
return '%d %s %s %s *' % (seconds_since_epoch + CONF.dhcp_lease_time,
fixedip.virtual_interface.address,
fixedip.address,
fixedip.instance.hostname or '*')
def _host_dhcp_network(vif_id):
return 'NW-%s' % vif_id
def _host_dhcp(fixedip):
"""Return a host string for an address in dhcp-host format."""
if CONF.use_single_default_gateway:
net = _host_dhcp_network(fixedip.virtual_interface_id)
return '%s,%s.%s,%s,net:%s' % (fixedip.virtual_interface.address,
fixedip.instance.hostname,
CONF.dhcp_domain,
fixedip.address,
net)
else:
return '%s,%s.%s,%s' % (fixedip.virtual_interface.address,
fixedip.instance.hostname,
CONF.dhcp_domain,
fixedip.address)
def _host_dns(fixedip):
return '%s\t%s.%s' % (fixedip.address,
fixedip.instance.hostname,
CONF.dhcp_domain)
def _host_dhcp_opts(vif_id=None, gateway=None):
"""Return an empty gateway option."""
values = []
if vif_id is not None:
values.append(_host_dhcp_network(vif_id))
# NOTE(vish): 3 is the dhcp option for gateway.
values.append('3')
if gateway:
values.append('%s' % gateway)
return ','.join(values)
def _execute(*cmd, **kwargs):
"""Wrapper around utils._execute for fake_network."""
if CONF.fake_network:
LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd)))
return 'fake', 0
else:
return utils.execute(*cmd, **kwargs)
def device_exists(device):
"""Check if ethernet device exists."""
return os.path.exists('/sys/class/net/%s' % device)
def _dhcp_file(dev, kind):
"""Return path to a pid, leases, hosts or conf file for a bridge/device."""
fileutils.ensure_tree(CONF.networks_path)
return os.path.abspath('%s/nova-%s.%s' % (CONF.networks_path,
dev,
kind))
def _ra_file(dev, kind):
"""Return path to a pid or conf file for a bridge/device."""
fileutils.ensure_tree(CONF.networks_path)
return os.path.abspath('%s/nova-ra-%s.%s' % (CONF.networks_path,
dev,
kind))
def _dnsmasq_pid_for(dev):
"""Returns the pid for prior dnsmasq instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _dhcp_file(dev, 'pid')
if os.path.exists(pid_file):
try:
with open(pid_file, 'r') as f:
return int(f.read())
except (ValueError, IOError):
return None
def _ra_pid_for(dev):
"""Returns the pid for prior radvd instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _ra_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
def _ip_bridge_cmd(action, params, device):
"""Build commands to add/del ips to bridges/devices."""
cmd = ['ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])
return cmd
def _set_device_mtu(dev, mtu=None):
"""Set the device MTU."""
if not mtu:
mtu = CONF.network_device_mtu
if mtu:
utils.execute('ip', 'link', 'set', dev, 'mtu',
mtu, run_as_root=True,
check_exit_code=[0, 2, 254])
def _create_veth_pair(dev1_name, dev2_name):
"""Create a pair of veth devices with the specified names,
deleting any previous devices with those names.
"""
for dev in [dev1_name, dev2_name]:
delete_net_dev(dev)
utils.execute('ip', 'link', 'add', dev1_name, 'type', 'veth', 'peer',
'name', dev2_name, run_as_root=True)
for dev in [dev1_name, dev2_name]:
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'promisc', 'on',
run_as_root=True)
_set_device_mtu(dev)
def _ovs_vsctl(args):
full_args = ['ovs-vsctl', '--timeout=%s' % CONF.ovs_vsctl_timeout] + args
try:
return utils.execute(*full_args, run_as_root=True)
except Exception as e:
LOG.error(_LE("Unable to execute %(cmd)s. Exception: %(exception)s"),
{'cmd': full_args, 'exception': e})
raise exception.AgentError(method=full_args)
def create_ovs_vif_port(bridge, dev, iface_id, mac, instance_id):
_ovs_vsctl(['--', '--if-exists', 'del-port', dev, '--',
'add-port', bridge, dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % iface_id,
'external-ids:iface-status=active',
'external-ids:attached-mac=%s' % mac,
'external-ids:vm-uuid=%s' % instance_id])
_set_device_mtu(dev)
def delete_ovs_vif_port(bridge, dev):
_ovs_vsctl(['--', '--if-exists', 'del-port', bridge, dev])
delete_net_dev(dev)
def create_ivs_vif_port(dev, iface_id, mac, instance_id):
utils.execute('ivs-ctl', 'add-port',
dev, run_as_root=True)
def delete_ivs_vif_port(dev):
utils.execute('ivs-ctl', 'del-port', dev,
run_as_root=True)
utils.execute('ip', 'link', 'delete', dev,
run_as_root=True)
def create_tap_dev(dev, mac_address=None):
if not device_exists(dev):
try:
# First, try with 'ip'
utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
run_as_root=True, check_exit_code=[0, 2, 254])
except processutils.ProcessExecutionError:
# Second option: tunctl
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
if mac_address:
utils.execute('ip', 'link', 'set', dev, 'address', mac_address,
run_as_root=True, check_exit_code=[0, 2, 254])
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True,
check_exit_code=[0, 2, 254])
def delete_net_dev(dev):
"""Delete a network device only if it exists."""
if device_exists(dev):
try:
utils.execute('ip', 'link', 'delete', dev, run_as_root=True,
check_exit_code=[0, 2, 254])
LOG.debug("Net device removed: '%s'", dev)
except processutils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed removing net device: '%s'"), dev)
# Similar to compute virt layers, the Linux network node
# code uses a flexible driver model to support different ways
# of creating ethernet interfaces and attaching them to the network.
# In the case of a network host, these interfaces
# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces.
interface_driver = None
def _get_interface_driver():
global interface_driver
if not interface_driver:
interface_driver = importutils.import_object(
CONF.linuxnet_interface_driver)
return interface_driver
def plug(network, mac_address, gateway=True):
return _get_interface_driver().plug(network, mac_address, gateway)
def unplug(network):
return _get_interface_driver().unplug(network)
def get_dev(network):
return _get_interface_driver().get_dev(network)
class LinuxNetInterfaceDriver(object):
"""Abstract class that defines generic network host API
for all Linux interface drivers.
"""
def plug(self, network, mac_address):
"""Create Linux device, return device name."""
raise NotImplementedError()
def unplug(self, network):
"""Destroy Linux device, return device name."""
raise NotImplementedError()
def get_dev(self, network):
"""Get device name."""
raise NotImplementedError()
# plugs interfaces using Linux Bridge
class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
vlan = network.get('vlan')
if vlan is not None:
iface = CONF.vlan_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
vlan,
network['bridge'],
iface,
network,
mac_address,
network.get('mtu'))
iface = 'vlan%s' % vlan
else:
iface = CONF.flat_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_bridge(
network['bridge'],
iface,
network, gateway)
if network['share_address'] or CONF.share_dhcp_address:
isolate_dhcp_address(iface, network['dhcp_server'])
# NOTE(vish): applying here so we don't get a lock conflict
iptables_manager.apply()
return network['bridge']
def unplug(self, network, gateway=True):
vlan = network.get('vlan')
if vlan is not None:
iface = 'vlan%s' % vlan
LinuxBridgeInterfaceDriver.remove_vlan_bridge(vlan,
network['bridge'])
else:
iface = CONF.flat_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.remove_bridge(network['bridge'],
gateway)
if network['share_address'] or CONF.share_dhcp_address:
remove_isolate_dhcp_address(iface, network['dhcp_server'])
iptables_manager.apply()
return self.get_dev(network)
def get_dev(self, network):
return network['bridge']
@staticmethod
def ensure_vlan_bridge(vlan_num, bridge, bridge_interface,
net_attrs=None, mac_address=None,
mtu=None):
"""Create a vlan and bridge unless they already exist."""
interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num,
bridge_interface, mac_address,
mtu)
LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs)
return interface
@staticmethod
def remove_vlan_bridge(vlan_num, bridge):
"""Delete a bridge and vlan."""
LinuxBridgeInterfaceDriver.remove_bridge(bridge)
LinuxBridgeInterfaceDriver.remove_vlan(vlan_num)
@staticmethod
@utils.synchronized('lock_vlan', external=True)
def ensure_vlan(vlan_num, bridge_interface, mac_address=None, mtu=None):
"""Create a vlan unless it already exists."""
interface = 'vlan%s' % vlan_num
if not device_exists(interface):
LOG.debug('Starting VLAN interface %s', interface)
_execute('ip', 'link', 'add', 'link', bridge_interface,
'name', interface, 'type', 'vlan',
'id', vlan_num, run_as_root=True,
check_exit_code=[0, 2, 254])
# (danwent) the bridge will inherit this address, so we want to
# make sure it is the value set from the NetworkManager
if mac_address:
_execute('ip', 'link', 'set', interface, 'address',
mac_address, run_as_root=True,
check_exit_code=[0, 2, 254])
_execute('ip', 'link', 'set', interface, 'up', run_as_root=True,
check_exit_code=[0, 2, 254])
# NOTE(vish): set mtu every time to ensure that changes to mtu get
# propogated
_set_device_mtu(interface, mtu)
return interface
@staticmethod
@utils.synchronized('lock_vlan', external=True)
def remove_vlan(vlan_num):
"""Delete a vlan."""
vlan_interface = 'vlan%s' % vlan_num
delete_net_dev(vlan_interface)
@staticmethod
@utils.synchronized('lock_bridge', external=True)
def ensure_bridge(bridge, interface, net_attrs=None, gateway=True,
filtering=True):
"""Create a bridge unless it already exists.
:param interface: the interface to create the bridge on.
:param net_attrs: dictionary with attributes used to create bridge.
:param gateway: whether or not the bridge is a gateway.
:param filtering: whether or not to create filters on the bridge.
If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
The code will attempt to move any ips that already exist on the
interface onto the bridge and reset the default gateway if necessary.
"""
if not device_exists(bridge):
LOG.debug('Starting Bridge %s', bridge)
_execute('brctl', 'addbr', bridge, run_as_root=True)
_execute('brctl', 'setfd', bridge, 0, run_as_root=True)
# _execute('brctl setageing %s 10' % bridge, run_as_root=True)
_execute('brctl', 'stp', bridge, 'off', run_as_root=True)
# (danwent) bridge device MAC address can't be set directly.
# instead it inherits the MAC address of the first device on the
# bridge, which will either be the vlan interface, or a
# physical NIC.
_execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
if interface:
LOG.debug('Adding interface %(interface)s to bridge %(bridge)s',
{'interface': interface, 'bridge': bridge})
out, err = _execute('brctl', 'addif', bridge, interface,
check_exit_code=False, run_as_root=True)
if (err and err != "device %s is already a member of a bridge; "
"can't enslave it to bridge %s.\n" % (interface, bridge)):
msg = _('Failed to add interface: %s') % err
raise exception.NovaException(msg)
out, err = _execute('ip', 'link', 'set', interface, 'up',
check_exit_code=False, run_as_root=True)
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
# NOTE(danms): We also need to copy routes to the bridge so as
# not to break existing connectivity on the interface
old_routes = []
out, err = _execute('ip', 'route', 'show', 'dev', interface)
for line in out.split('\n'):
fields = line.split()
if fields and 'via' in fields:
old_routes.append(fields)
_execute('ip', 'route', 'del', *fields,
run_as_root=True)
out, err = _execute('ip', 'addr', 'show', 'dev', interface,
'scope', 'global')
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
if fields[-2] in ('secondary', 'dynamic', ):
params = fields[1:-2]
else:
params = fields[1:-1]
_execute(*_ip_bridge_cmd('del', params, fields[-1]),
run_as_root=True, check_exit_code=[0, 2, 254])
_execute(*_ip_bridge_cmd('add', params, bridge),
run_as_root=True, check_exit_code=[0, 2, 254])
for fields in old_routes:
_execute('ip', 'route', 'add', *fields,
run_as_root=True)
if filtering:
# Don't forward traffic unless we were told to be a gateway
ipv4_filter = iptables_manager.ipv4['filter']
if gateway:
for rule in get_gateway_rules(bridge):
ipv4_filter.add_rule(*rule)
else:
ipv4_filter.add_rule('FORWARD',
('--in-interface %s -j %s'
% (bridge, CONF.iptables_drop_action)))
ipv4_filter.add_rule('FORWARD',
('--out-interface %s -j %s'
% (bridge, CONF.iptables_drop_action)))
@staticmethod
@utils.synchronized('lock_bridge', external=True)
def remove_bridge(bridge, gateway=True, filtering=True):
"""Delete a bridge."""
if not device_exists(bridge):
return
else:
if filtering:
ipv4_filter = iptables_manager.ipv4['filter']
if gateway:
for rule in get_gateway_rules(bridge):
ipv4_filter.remove_rule(*rule)
else:
drop_actions = ['DROP']
if CONF.iptables_drop_action != 'DROP':
drop_actions.append(CONF.iptables_drop_action)
for drop_action in drop_actions:
ipv4_filter.remove_rule('FORWARD',
('--in-interface %s -j %s'
% (bridge, drop_action)))
ipv4_filter.remove_rule('FORWARD',
('--out-interface %s -j %s'
% (bridge, drop_action)))
delete_net_dev(bridge)
@utils.synchronized('ebtables', external=True)
def ensure_ebtables_rules(rules, table='filter'):
for rule in rules:
cmd = ['ebtables', '-t', table, '-D'] + rule.split()
_execute(*cmd, check_exit_code=False, run_as_root=True)
cmd[3] = '-I'
_execute(*cmd, run_as_root=True)
@utils.synchronized('ebtables', external=True)
def remove_ebtables_rules(rules, table='filter'):
for rule in rules:
cmd = ['ebtables', '-t', table, '-D'] + rule.split()
_execute(*cmd, check_exit_code=False, run_as_root=True)
def isolate_dhcp_address(interface, address):
# block arp traffic to address across the interface
rules = []
rules.append('INPUT -p ARP -i %s --arp-ip-dst %s -j DROP'
% (interface, address))
rules.append('OUTPUT -p ARP -o %s --arp-ip-src %s -j DROP'
% (interface, address))
rules.append('FORWARD -p IPv4 -i %s --ip-protocol udp '
'--ip-destination-port 67:68 -j DROP'
% interface)
rules.append('FORWARD -p IPv4 -o %s --ip-protocol udp '
'--ip-destination-port 67:68 -j DROP'
% interface)
# NOTE(vish): the above is not possible with iptables/arptables
ensure_ebtables_rules(rules)
def remove_isolate_dhcp_address(interface, address):
# block arp traffic to address across the interface
rules = []
rules.append('INPUT -p ARP -i %s --arp-ip-dst %s -j DROP'
% (interface, address))
rules.append('OUTPUT -p ARP -o %s --arp-ip-src %s -j DROP'
% (interface, address))
rules.append('FORWARD -p IPv4 -i %s --ip-protocol udp '
'--ip-destination-port 67:68 -j DROP'
% interface)
rules.append('FORWARD -p IPv4 -o %s --ip-protocol udp '
'--ip-destination-port 67:68 -j DROP'
% interface)
remove_ebtables_rules(rules)
# NOTE(vish): the above is not possible with iptables/arptables
def get_gateway_rules(bridge):
interfaces = CONF.forward_bridge_interface
if 'all' in interfaces:
return [('FORWARD', '-i %s -j ACCEPT' % bridge),
('FORWARD', '-o %s -j ACCEPT' % bridge)]
rules = []
for iface in CONF.forward_bridge_interface:
if iface:
rules.append(('FORWARD', '-i %s -o %s -j ACCEPT' % (bridge,
iface)))
rules.append(('FORWARD', '-i %s -o %s -j ACCEPT' % (iface,
bridge)))
rules.append(('FORWARD', '-i %s -o %s -j ACCEPT' % (bridge, bridge)))
rules.append(('FORWARD', '-i %s -j %s' % (bridge,
CONF.iptables_drop_action)))
rules.append(('FORWARD', '-o %s -j %s' % (bridge,
CONF.iptables_drop_action)))
return rules
# plugs interfaces using Open vSwitch
class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
if not device_exists(dev):
bridge = CONF.linuxnet_ovs_integration_bridge
_ovs_vsctl(['--', '--may-exist', 'add-port', bridge, dev,
'--', 'set', 'Interface', dev, 'type=internal',
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-status=active',
'--', 'set', 'Interface', dev,
'external-ids:attached-mac=%s' % mac_address])
_execute('ip', 'link', 'set', dev, 'address', mac_address,
run_as_root=True)
_set_device_mtu(dev, network.get('mtu'))
_execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
if not gateway:
# If we weren't instructed to act as a gateway then add the
# appropriate flows to block all non-dhcp traffic.
_execute('ovs-ofctl',
'add-flow', bridge, 'priority=1,actions=drop',
run_as_root=True)
_execute('ovs-ofctl', 'add-flow', bridge,
'udp,tp_dst=67,dl_dst=%s,priority=2,actions=normal' %
mac_address, run_as_root=True)
# .. and make sure iptbles won't forward it as well.
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j %s' % (bridge,
CONF.iptables_drop_action))
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j %s' % (bridge,
CONF.iptables_drop_action))
else:
for rule in get_gateway_rules(bridge):
iptables_manager.ipv4['filter'].add_rule(*rule)
return dev
def unplug(self, network):
dev = self.get_dev(network)
bridge = CONF.linuxnet_ovs_integration_bridge
_ovs_vsctl(['--', '--if-exists', 'del-port', bridge, dev])
return dev
def get_dev(self, network):
dev = 'gw-' + str(network['uuid'][0:11])
return dev
# plugs interfaces using Linux Bridge when using NeutronManager
class NeutronLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
BRIDGE_NAME_PREFIX = 'brq'
GATEWAY_INTERFACE_PREFIX = 'gw-'
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
bridge = self.get_bridge(network)
if not gateway:
# If we weren't instructed to act as a gateway then add the
# appropriate flows to block all non-dhcp traffic.
# .. and make sure iptbles won't forward it as well.
iptables_manager.ipv4['filter'].add_rule('FORWARD',
('--in-interface %s -j %s'
% (bridge, CONF.iptables_drop_action)))
iptables_manager.ipv4['filter'].add_rule('FORWARD',
('--out-interface %s -j %s'
% (bridge, CONF.iptables_drop_action)))
return bridge
else:
for rule in get_gateway_rules(bridge):
iptables_manager.ipv4['filter'].add_rule(*rule)
create_tap_dev(dev, mac_address)
if not device_exists(bridge):
LOG.debug("Starting bridge %s ", bridge)
utils.execute('brctl', 'addbr', bridge, run_as_root=True)
utils.execute('brctl', 'setfd', bridge, str(0), run_as_root=True)
utils.execute('brctl', 'stp', bridge, 'off', run_as_root=True)
utils.execute('ip', 'link', 'set', bridge, 'address', mac_address,
run_as_root=True, check_exit_code=[0, 2, 254])
utils.execute('ip', 'link', 'set', bridge, 'up', run_as_root=True,
check_exit_code=[0, 2, 254])
LOG.debug("Done starting bridge %s", bridge)
full_ip = '%s/%s' % (network['dhcp_server'],
network['cidr'].rpartition('/')[2])
utils.execute('ip', 'address', 'add', full_ip, 'dev', bridge,
run_as_root=True, check_exit_code=[0, 2, 254])
return dev
def unplug(self, network):
dev = self.get_dev(network)
if not device_exists(dev):
return None
else:
delete_net_dev(dev)
return dev
def get_dev(self, network):
dev = self.GATEWAY_INTERFACE_PREFIX + str(network['uuid'][0:11])
return dev
def get_bridge(self, network):
bridge = self.BRIDGE_NAME_PREFIX + str(network['uuid'][0:11])
return bridge
# provide compatibility with existing configs
QuantumLinuxBridgeInterfaceDriver = NeutronLinuxBridgeInterfaceDriver
iptables_manager = IptablesManager()
| apache-2.0 | -7,665,344,293,116,199,000 | 37.979748 | 79 | 0.538292 | false |
ProgVal/Supybot-website | plugins/models.py | 1 | 2683 | from django.contrib.auth.models import User
from django.db import models
from django import forms
import datetime
class Plugin(models.Model):
author = models.ForeignKey(User, help_text='The user who wrote the plugin.')
name = models.SlugField(max_length=255, help_text='The name of the plugin.',
unique=True)
short_description = models.TextField(max_length=512, help_text='A short '
'description of the plugin, shown in list view.')
description = models.TextField(help_text='A full description of the '
'plugin.')
minimal_version = models.CharField(max_length=4096, help_text='The oldest '
'Supybot version compatible with this plugin.', default='0.83.4.1')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
published = models.BooleanField(default=False, help_text='Determines '
'whether or not the plugin can be seen by everyone.')
url = models.URLField(blank=True, help_text='The URL to the website for the plugin.')
# git_repo is not a foreign key to GitRepository, because GitRepository
# items are only helpers for developpers, and are totally optionnal.
git_repo = models.CharField(max_length=512, help_text='The URL to the '
'Git repository.', blank=True)
def __unicode__(self):
return u'%s' % self.name
def get_absolute_url(self):
return '/plugins/view/%s/' % self.name
class Meta:
ordering = ['-created_at']
class PluginSubmitForm(forms.ModelForm):
class Meta:
model = Plugin
fields = ('name', 'published', 'minimal_version', 'git_repo', 'url',
'short_description', 'description')
class PluginEditForm(PluginSubmitForm):
class Meta(PluginSubmitForm.Meta):
exclude = ('name',)
class PluginComment(models.Model):
key = models.ForeignKey(Plugin)
user = models.ForeignKey(User)
text = models.TextField()
created_date = models.DateTimeField(auto_now_add=True)
class GitRepository(models.Model):
maintainer = models.ForeignKey(User)
name = models.SlugField(unique=True)
url = models.CharField(max_length=512)
latest_fetch = models.DateTimeField(default=datetime.datetime.min)
state = models.CharField(max_length=1, choices=(
('c', 'cloning'),
('o', 'ok'),
('w', 'working'),
('n', 'not initialized')))
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = 'Git repositories'
class GitRepositoryForm(forms.ModelForm):
class Meta:
model = GitRepository
fields = ('name', 'url',)
| bsd-3-clause | -7,488,097,766,186,343,000 | 34.302632 | 89 | 0.660082 | false |
nikofil/invenio-search-ui | invenio_search_ui/version.py | 1 | 1207 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Version information for Invenio-Search-UI.
This file is imported by ``invenio_search_ui.__init__``,
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = "1.0.0a6.dev20160628"
| gpl-2.0 | 1,992,955,465,545,032,400 | 35.575758 | 76 | 0.743165 | false |
imcgreer/simqso | simqso/sqrun.py | 1 | 27784 | #!/usr/bin/env python
import os
import numpy as np
from functools import partial
from astropy.io import fits
from astropy.table import Table,hstack
from astropy import cosmology
from . import sqbase
from . import sqgrids as grids
from . import hiforest
from . import dustextinction
from . import sqphoto
from . import sqmodels
import multiprocessing
def buildWaveGrid(simParams):
dispersionScale = simParams.get('DispersionScale','logarithmic')
if dispersionScale == 'logarithmic':
lam1,lam2 = simParams['waveRange']
R = simParams['SpecDispersion']
wave = sqbase.fixed_R_dispersion(lam1,lam2,R)
else:
raise ValueError('Dispersion scale %s not supported' % dispersionScale)
return wave
def reseed(par):
try:
np.random.seed(par['RandomSeed'])
except KeyError:
pass
def buildQsoGrid(simParams):
'''
Create a grid of simulated quasar "points". This function parses the
'GridParams' section of simParams, and intreprets the following options:
- FluxRedshiftGrid : points are defined by (appMag,z)
- LuminosityRedshiftGrid : points are defined by (absMag,z)
- LuminosityFunction : points are defined by (appMag,z) and sampled from
a luminosity function.
'''
cosmodef = simParams.get('Cosmology')
gridPars = simParams['GridParams']
try:
gridType = gridPars['GridType']
except KeyError:
raise ValueError('Must specify a GridType')
kcorrType = gridPars.get('InitialKCorrection','Continuum')
if kcorrType == 'Continuum':
kcorr = sqbase.ContinuumKCorr(gridPars['ObsBand'],
gridPars['RestBand'])
elif kcorrType == 'DefaultEmissionLine':
kcorr = sqbase.EmissionLineKCorr(gridPars['ObsBand'],
gridPars['RestBand'])
else:
raise ValueError
reseed(gridPars)
#
def get_nbins(low,high,n):
if type(n) is int:
return n
else:
return int( np.floor((high - low) / n) )
if gridType.endswith('RedshiftGrid'):
m1,m2,nm = gridPars['mRange']
z1,z2,nz = gridPars['zRange']
nBins = ( get_nbins(m1,m2,nm), get_nbins(z1,z2,nz) )
mSampler = grids.UniformSampler(m1,m2)
zSampler = grids.UniformSampler(z1,z2)
if gridType.startswith('Luminosity'):
m = grids.AbsMagVar(mSampler,restWave=gridPars['LumUnits'])
units = 'luminosity'
elif gridType.startswith('Flux'):
m = grids.AppMagVar(mSampler,gridPars['ObsBand'])
units = 'flux'
z = grids.RedshiftVar(zSampler)
elif gridType == 'FixedGrid':
raise NotImplementedError
m = grids.FixedSampler(gridPars['fixed_M'])
z = grids.FixedSampler(gridPars['fixed_z'])
# XXX units
elif gridType == 'LuminosityFunction':
try:
qlf = gridPars['QLFmodel']
qlf.set_cosmology(cosmodef)
except KeyError:
raise ValueError('Must specify a parameterization of the LF')
qsoGrid = grids.generateQlfPoints(qlf,
gridPars['mRange'],
gridPars['zRange'],
kcorr,
**gridPars['QLFargs'])
units = 'flux'
else:
raise ValueError('GridType %s unknown' % gridType)
if gridType != 'LuminosityFunction':
qsoGrid = grids.QsoSimGrid([m,z],nBins,gridPars['nPerBin'],
units=units,cosmo=cosmodef)
try:
_ = qsoGrid.absMag
except:
absMag = grids.AbsMagFromAppMagVar(qsoGrid.appMag,z,kcorr,cosmo,
gridPars['RestBand'])
qsoGrid.addVar(absMag)
return qsoGrid
def buildForest(wave,z,simParams,outputDir):
'''Create a set of absorbers for a given number of lines-of-sight,
sampled according to the input forest model. Then calculate the
transmission along each line of sight. The input redshifts correspond
to individual QSOs. The number of LOSs is generally smaller so that
fewer forest computations are needed; individual LOSs are built up
in redshift steps as each QSO redshift is iterated.
'''
forestParams = simParams['ForestParams']
reseed(forestParams)
forestFn = forestParams.get('FileName')
tgrid = None
if forestFn:
try:
tgrid = hiforest.CachedIGMTransmissionGrid(forestFn,outputDir)
if not np.allclose(wave[:len(tgrid.specWave)],tgrid.specWave):
raise ValueError("Input wavegrid doesn't match stored wave")
except IOError:
pass
if tgrid is None:
nlos = forestParams['NumLinesOfSight']
forestModel = forestParams['ForestModel']
if isinstance(forestModel,str):
forestModel = sqmodels.forestModels[forestModel]
tgrid = hiforest.IGMTransmissionGrid(wave,forestModel,nlos,
zmax=z.max(),**forestParams)
return tgrid
def buildContinuumModels(qsoGrid,simParams,verbose=0):
continuumParams = simParams['QuasarModelParams']['ContinuumParams']
reseed(continuumParams)
slopes = continuumParams['PowerLawSlopes'][::2]
breakpts = continuumParams['PowerLawSlopes'][1::2]
if verbose > 0:
print('... building continuum grid')
cmodel = continuumParams['ContinuumModel']
if cmodel == 'BrokenPowerLaw':
slopeVars = [ grids.GaussianSampler(*s) for s in slopes ]
continuumVars = [ grids.BrokenPowerLawContinuumVar(slopeVars,
breakpts) ]
elif isinstance(cmodel,grids.QsoSimVar):
continuumVars = [ cmodel ]
else:
raise ValueError
qsoGrid.addVars(continuumVars)
def buildEmissionLineGrid(qsoGrid,simParams):
emLineParams = simParams['QuasarModelParams']['EmissionLineParams']
reseed(emLineParams)
if emLineParams['EmissionLineModel'] == 'FixedVdBCompositeLines':
emLineGrid = grids.generateVdBCompositeEmLines(
minEW=emLineParams.get('minEW',1.0),
noFe=emLineParams.get('VdB_noFe',False))
elif emLineParams['EmissionLineModel'] == 'VariedEmissionLineGrid':
emLineGrid = grids.generateBEffEmissionLines(qsoGrid.absMag,
**emLineParams)
elif isinstance(emLineParams['EmissionLineModel'],grids.QsoSimVar):
emLineGrid = emLineParams['EmissionLineModel']
else:
raise ValueError('invalid emission line model: ' +
emLineParams['EmissionLineModel'])
qsoGrid.addVar(emLineGrid)
def buildDustGrid(qsoGrid,simParams,verbose=0):
if verbose > 0:
print('... building dust extinction grid')
dustParams = simParams['QuasarModelParams']['DustExtinctionParams']
reseed(dustParams)
if dustParams['DustExtinctionModel'] == 'Fixed E(B-V)':
sampler = grids.ConstSampler(dustParams['E(B-V)'])
elif dustParams['DustExtinctionModel']=='Exponential E(B-V) Distribution':
sampler = grids.ExponentialSampler(dustParams['E(B-V)'])
else:
raise ValueError('invalid dust extinction model: '+
dustParams['DustExtinctionModel'])
if dustParams['DustModelName'] == 'SMC':
dustVar = grids.SMCDustVar(sampler)
elif dustParams['DustModelName'] == 'CalzettiSB':
dustVar = grids.CalzettiDustVar(sampler)
else:
raise ValueError('invalid dust extinction model: '+
dustParams['DustModelName'])
# XXX
# fraction=dustParams.get('DustLOSfraction',1.0))
qsoGrid.addVar(dustVar)
def buildFeatures(qsoGrid,wave,simParams,forest=None,verbose=0):
buildContinuumModels(qsoGrid,simParams,verbose=verbose)
qsoParams = simParams['QuasarModelParams']
if 'EmissionLineParams' in qsoParams:
buildEmissionLineGrid(qsoGrid,simParams)
if 'IronEmissionParams' in qsoParams:
# only option for now is the VW01 template
scalings = qsoParams['IronEmissionParams'].get('FeScalings')
feGrid = grids.VW01FeTemplateGrid(qsoGrid.z,wave,scales=scalings)
qsoGrid.addVar(grids.FeTemplateVar(feGrid))
if 'DustExtinctionParams' in qsoParams:
buildDustGrid(qsoGrid,simParams,verbose=verbose)
if forest is not None:
if isinstance(forest,hiforest.CachedIGMTransmissionGrid):
losMap = forest.losMap
else:
losMap = None
if isinstance(forest,hiforest.GridForest):
forestVar = grids.SightlineVar(forest,losMap=losMap)
else:
forestVar = grids.HIAbsorptionVar(forest,losMap=losMap)
qsoGrid.addVar(forestVar)
def _getpar(feature,obj):
if feature is None:
return None
elif isinstance(feature.sampler,grids.NullSampler):
return None
elif isinstance(feature.sampler,grids.IndexSampler):
return obj.index
else:
return obj[feature.name]
def buildQsoSpectrum(wave,cosmo,specFeatures,obj,iterNum=1,
save_components=False):
spec = sqbase.Spectrum(wave,z=obj['z'])
if save_components:
base = sqbase.Spectrum(spec.wave,spec.f_lambda.copy(),spec.z)
components = {}
# start with continuum
if cosmo is None:
fluxNorm = None
else:
distmod = lambda z: cosmo.distmod(z).value
fluxNorm = {'wavelength':1450.,'M_AB':obj['absMag'],'DM':distmod}
for feature in specFeatures:
if isinstance(feature,grids.ContinuumVar):
assocvals = _getpar(feature.get_associated_var(),obj)
spec = feature.add_to_spec(spec,_getpar(feature,obj),
assocvals=assocvals,
fluxNorm=fluxNorm)
if save_components:
components[feature.name] = spec - base
base.f_lambda[:] = spec.f_lambda
# add emission (multiplicative) features
emspec = sqbase.Spectrum(wave,z=obj['z'])
if save_components:
base = sqbase.Spectrum(emspec.wave,emspec.f_lambda.copy(),emspec.z)
for feature in specFeatures:
if isinstance(feature,grids.EmissionFeatureVar):
assocvals = _getpar(feature.get_associated_var(),obj)
emspec = feature.add_to_spec(emspec,_getpar(feature,obj),
assocvals=assocvals)
if save_components:
components[feature.name] = emspec - base
base.f_lambda[:] = emspec.f_lambda
spec *= emspec + 1
# add any remaining features
for feature in specFeatures:
if isinstance(feature,grids.ContinuumVar) or \
isinstance(feature,grids.EmissionFeatureVar):
continue
assocvals = _getpar(feature.get_associated_var(),obj)
spec = feature.add_to_spec(spec,_getpar(feature,obj),
assocvals=assocvals,
advance=(iterNum==1))
if save_components:
components[feature.name] = spec - base
base.f_lambda[:] = spec.f_lambda
if save_components:
return spec,components
else:
return spec
def buildGrpSpectra(wave,cosmo,specFeatures,photoCache,saveSpectra,
fluxBand,nIter,verbose,objGroup):
n = len(objGroup)
if verbose and verbose > 0:
losGrp = objGroup['igmlos'][0]
if losGrp % verbose == 0:
print('processing ',n,' obj in group ',losGrp)
rv = dict()
if photoCache:
nb = len(photoCache)
rv['synMag'] = np.zeros((n,nb),dtype=np.float32)
rv['synFlux'] = np.zeros((n,nb),dtype=np.float32)
if saveSpectra:
nw = len(wave)
rv['spectra'] = np.zeros((n,nw),dtype=np.float32)
zi = objGroup['z'].argsort()
for i in zi:
for iterNum in range(1,nIter+1):
sp = buildQsoSpectrum(wave,cosmo,specFeatures,objGroup[i],iterNum)
if photoCache is not None:
synMag,synFlux = sqphoto.calcSynPhot(sp,photoCache=photoCache)
if nIter > 1:
dm = synMag[fluxBand] - objGroup['appMag'][i]
objGroup['absMag'][i] -= dm
# resample features with updated absolute mags
for var in specFeatures:
if var.dependentVars is not None:
var.resample(objGroup[var.dependentVars][i],ii=i)
# pass index as 1d-array to preserve correct shape
objGroup[var.name][i] = var(None,ii=np.array([i]))
if np.abs(dm) < 0.005:
break
if photoCache is not None:
rv['synMag'][i] = synMag
rv['synFlux'][i] = synFlux
if saveSpectra:
rv['spectra'][i] = sp.f_lambda
rv['absMag'] = objGroup['absMag'].copy()
return rv
def _regroup(spOut):
# XXX tell me there's a better way to do this
n = len(spOut[0])
rv = [ [] for i in range(n) ]
for sp in spOut:
for j in range(n):
rv[j].append(sp[j])
return [ np.array(v) for v in rv ]
def buildSpectraBySightLine(wave,qsoGrid,procMap=map,
maxIter=1,verbose=0,saveSpectra=False):
'''Assemble the spectral components of QSOs from the input parameters.
Parameters
----------
wave : `~numpy.ndarray`
Input wavelength grid.
'''
photoCache = qsoGrid.getPhotoCache(wave)
if verbose > 0:
print('simulating ',qsoGrid.nObj,' quasar spectra')
print('units are ',qsoGrid.units)
print('max number iterations: ',maxIter)
verby = 0 if not verbose else qsoGrid.nObj//(5*verbose)
if qsoGrid.units == 'luminosity' or photoCache is None:
nIter = 1
fluxBand = None
else:
nIter = maxIter
fluxBand = qsoGrid.getObsBandIndex()
#
# extract the feature lists, group by sightline, and run
specFeatures = qsoGrid.getVars(grids.SpectralFeatureVar)
build_grp_spec = partial(buildGrpSpectra,wave,qsoGrid.cosmo,
specFeatures,photoCache,saveSpectra,
fluxBand,nIter,verby)
qsoGroups = qsoGrid.group_by('igmlos',with_index=True)
# pool.map() doesn't like the iterable produced by table.group_by(), so
# forcing resolution of the elements here with list() -- not that much
# memory anyway
specOut = list(procMap(build_grp_spec,list(qsoGroups)))
if qsoGrid.photoMap:
bands = qsoGrid.photoBands
def newarr():
return np.zeros((qsoGrid.nObj,len(bands)),dtype=np.float32)
qsoGrid.addVar(grids.SynMagVar(grids.FixedSampler(newarr())))
qsoGrid.addVar(grids.SynFluxVar(grids.FixedSampler(newarr())))
# the output needs to be remapped to the input locations
for objgrp,out in zip(qsoGroups,specOut):
for k in ['absMag','synMag','synFlux']:
qsoGrid.data[k][objgrp['_ii']] = out[k]
if saveSpectra:
spectra = np.vstack([s['spectra'] for s in specOut])
spectra = spectra[qsoGroups.parent['_ii'].argsort()]
else:
spectra = None
return qsoGrid,spectra
def buildSpecWithPhot(wave,cosmo,specFeatures,photoCache,
objData,iterNum=None,saveSpectra=False):
sp = buildQsoSpectrum(wave,cosmo,specFeatures,objData,
iterNum=iterNum)
if photoCache is None:
rv = (None,None)
else:
rv = sqphoto.calcSynPhot(sp,photoCache=photoCache)
if saveSpectra:
rv = rv + (sp.f_lambda,)
else:
rv = rv + (None,)
return rv
def buildSpectraBulk(wave,qsoGrid,procMap=map,
maxIter=1,verbose=0,saveSpectra=False):
'''Assemble the spectral components of QSOs from the input parameters.
Parameters
----------
wave : `~numpy.ndarray`
Input wavelength grid.
'''
photoCache = qsoGrid.getPhotoCache(wave)
if verbose > 0:
print('simulating ',qsoGrid.nObj,' quasar spectra')
print('units are ',qsoGrid.units)
if qsoGrid.units == 'luminosity' or photoCache is None:
nIter = 1
fluxBand = None
else:
nIter = maxIter
fluxBand = qsoGrid.getObsBandIndex()
#
for iterNum in range(1,nIter+1):
specFeatures = qsoGrid.getVars(grids.SpectralFeatureVar)
samplers = []
for f in specFeatures:
samplers.append(f.sampler)
if not ( isinstance(f.sampler,grids.NullSampler) or
isinstance(f.sampler,grids.IndexSampler) ):
f.sampler = None
build_one_spec = partial(buildSpecWithPhot,wave,qsoGrid.cosmo,
specFeatures,photoCache,iterNum=iterNum,
saveSpectra=saveSpectra)
if verbose > 1:
print('buildSpectra iteration ',iterNum,' out of ',nIter)
specOut = list(procMap(build_one_spec,qsoGrid))
specOut = _regroup(specOut)
synMag,synFlux,spectra = specOut
v = qsoGrid.getVars(grids.SightlineVar)
if len(v) > 0 and isinstance(v[0].forest,hiforest.GridForest):
jj,dm,df = v[0].forest.get(qsoGrid.data['igmlos'],
qsoGrid.data['z'])
synMag[:,jj] += dm
synFlux[:,jj] *= df
for f,s in zip(specFeatures,samplers):
f.sampler = s
if nIter > 1:
# find the largest mag offset
dm = synMag[:,fluxBand] - qsoGrid.appMag
if verbose > 1:
print('--> delta mag mean = %.7f, rms = %.7f, |max| = %.7f' % \
(dm.mean(),dm.std(),np.abs(dm).max()))
qsoGrid.absMag[:] -= dm
dmagMax = np.abs(dm).max()
# resample features with updated absolute mags
for var in specFeatures:
if var.dependentVars is not None:
var.resample(qsoGrid.data[var.dependentVars])
qsoGrid.data[var.name][:] = var(None)
if dmagMax < 0.01:
break
if qsoGrid.photoMap is not None:
qsoGrid.addVar(grids.SynMagVar(grids.FixedSampler(synMag)))
qsoGrid.addVar(grids.SynFluxVar(grids.FixedSampler(synFlux)))
return qsoGrid,spectra
def readSimulationData(fileName,outputDir,retParams=False,clean=False):
qsoGrid = grids.QsoSimObjects()
qsoGrid.read(os.path.join(outputDir,fileName+'.fits'),clean=clean)
simPars = qsoGrid.simPars
gridPars = simPars['GridParams']
if True:
mSampler = grids.FixedSampler(qsoGrid.appMag)
m = grids.AppMagVar(mSampler,gridPars['ObsBand'])
try:
mSampler = grids.FixedSampler(qsoGrid.appMag)
m = grids.AppMagVar(mSampler,gridPars['ObsBand'])
except:
mSampler = grids.FixedSampler(qsoGrid.absMag)
m = grids.AbsMagVar(mSampler,restWave=gridPars['LumUnits'])
z = grids.RedshiftVar(grids.FixedSampler(qsoGrid.z))
qsoGrid.addVars([m,z])
if retParams:
return qsoGrid,simPars
return qsoGrid
def restore_qso_grid(fileName,wave,outputDir='.',**kwargs):
qsoGrid = grids.QsoSimObjects()
if not fileName.endswith('.fits'):
fileName += '.fits'
qsoGrid.read(os.path.join(outputDir,fileName),**kwargs)
# IGM transmission spectra depend on a (possibly) pre-computed grid,
# which must be regenerated
try:
hiVar = qsoGrid.getVars(grids.HIAbsorptionVar)[0]
fmodel,nlos,kwargs = hiVar.varmeta
igmGrid = hiforest.IGMTransmissionGrid(wave,fmodel,nlos,**kwargs)
hiVar.set_forest_grid(igmGrid)
except IndexError:
# no forest
pass
# Fe template spectra depend on a (possibly) pre-computed grid,
# which must be regenerated
try:
feVar = qsoGrid.getVars(grids.FeTemplateVar)[0]
kwargs = feVar.varmeta
fetempl = grids.VW01FeTemplateGrid(qsoGrid.z,wave,**kwargs)
feVar.set_template_grid(fetempl)
except IndexError:
# no forest
pass
#
return qsoGrid
def qsoSimulation(simParams,**kwargs):
'''
Run a complete simulation.
1. Construct grid of QSOs.
2. Generate Lyman forest transmission spectra from a subsample of
random LOSs (optional).
3. Sample QSO spectral features (continuum, emission lines, dust).
4. Build simulated spectra and derive photometry (photometry is optional).
5. Transfer the simulated photometry to observed photometry by
calculating errors and folding them in (optional).
Parameters
----------
saveSpectra : bool
save the simulated spectra, not just the photometry.
Beware! result may be quite large (Nqso x Npixels). [default:False]
forestOnly : bool
Only generate the forest transmission spectra. [default:False]
noPhotoMap : bool
skip the simulation of observed photometry [default:False]
outputDir : str
write files to this directory [default:'./']
nproc : int
number of processes to use [default: 1]
'''
saveSpectra = kwargs.get('saveSpectra',False)
forestOnly = kwargs.get('forestOnly',False)
noPhotoMap = kwargs.get('noPhotoMap',False)
noWriteOutput = kwargs.get('noWriteOutput',False)
outputDir = kwargs.get('outputDir','./')
nproc = kwargs.get('nproc',1)
verbose = kwargs.get('verbose',0)
#
# build or restore the grid of (M,z) for each QSO
#
wave = buildWaveGrid(simParams)
reseed(simParams)
if nproc > 1:
pool = multiprocessing.Pool(nproc)
procMap = pool.map
else:
procMap = map
timerLog = sqbase.TimerLog()
try:
qsoGrid,simParams = readSimulationData(simParams['FileName'],
outputDir,retParams=True,
clean=True)
except IOError:
if verbose > 0:
print(simParams['FileName']+' output not found')
if 'GridFileName' in simParams:
if verbose > 0:
print('restoring grid from ',simParams['GridFileName'])
try:
qsoGrid = readSimulationData(simParams['GridFileName'],
outputDir)
except IOError:
if verbose > 0:
print(simParams['GridFileName'],' not found, generating')
qsoGrid = buildQsoGrid(simParams)
qsoGrid.write(simParams,outputDir,
simParams['GridFileName']+'.fits')
else:
if verbose > 0:
print('generating QSO grid')
qsoGrid = buildQsoGrid(simParams)
if not forestOnly:
if not noWriteOutput and 'GridFileName' in simParams:
qsoGrid.write(simParams,outputDir,
simParams['GridFileName']+'.fits')
qsoGrid.setCosmology(simParams.get('Cosmology'))
timerLog('Initialize Grid')
#
# configure the IGM transmission spectra grid (load if cached)
#
if 'ForestParams' in simParams:
forest = buildForest(wave,qsoGrid.z,simParams,outputDir)
else:
forest = None
if forestOnly:
timerLog.dump()
return
#
if isinstance(forest,hiforest.IGMTransmissionGrid):
# build sightlines on-the-fly
buildSpec = buildSpectraBySightLine
# if the user specified a file name, save the forest spectra in it
fpar = simParams.get('ForestParams',{})
forestFn = fpar.get('FileName')
if forestFn:
# map the objects to sightlines and save the forest spectra grid
losSampler = grids.RandomSubSampler(forest.numSightLines)
losMap = losSampler.sample(qsoGrid.nObj)
forest.write(forestFn,outputDir,losMap=losMap,
z_em=qsoGrid.z,**fpar)
# now use the cached forest
forest = hiforest.CachedIGMTransmissionGrid(forestFn,outputDir)
if not np.allclose(wave[:len(tgrid.specWave)],tgrid.specWave):
raise ValueError("Input wavegrid doesn't match stored wave")
timerLog('Generate Forest')
else:
# else no forest or cached forest
buildSpec = buildSpectraBulk
#
qsoGrid.loadPhotoMap(simParams['PhotoMapParams']['PhotoSystems'])
if 'GridForestFile' in simParams:
forest = hiforest.GridForest(simParams['GridForestFile'],
qsoGrid.photoBands)
#
# add the quasar model variables to the grid (does the random sampling)
#
buildFeatures(qsoGrid,wave,simParams,forest,verbose=verbose)
timerLog('Generate Features')
#
# Use continuum and emission line distributions to build the components
# of the intrinsic QSO spectrum, then calculate photometry
#
_,spectra = buildSpec(wave,qsoGrid,procMap,
maxIter=simParams.get('maxFeatureIter',5),
verbose=verbose,saveSpectra=saveSpectra)
timerLog('Build Quasar Spectra')
#
# map the simulated photometry to observed values with uncertainties
#
if not noPhotoMap:
if verbose > 0:
print('mapping photometry')
reseed(simParams['PhotoMapParams'])
photoData = sqphoto.calcObsPhot(qsoGrid.synFlux,qsoGrid.photoMap)
qsoGrid.addData(photoData)
timerLog('PhotoMap')
timerLog.dump()
if nproc > 1:
pool.close()
if not noWriteOutput:
qsoGrid.write(simPars=simParams,outputDir=outputDir)
if saveSpectra:
spfn = os.path.join(outputDir,simParams['FileName']+'_spectra.fits')
save_spectra(wave,spectra,spfn,outputDir)
return qsoGrid,spectra
else:
return qsoGrid
def load_sim_output(simFileName,outputDir='.',with_spec=True):
simdat,par = readSimulationData(simFileName,outputDir,retParams=True)
if with_spec:
sp = fits.getdata(os.path.join(outputDir,simFileName+'_spectra.fits'))
wave = buildWaveGrid(par)
qsos = hstack([simdat.data,Table(dict(spec=sp))])
return wave,qsos
else:
return simdat.data
def save_spectra(wave,spectra,fileName,outputDir='.',overwrite=True):
logwave = np.log(wave[:2])
dloglam = np.diff(logwave)
hdr = fits.Header()
hdr['CD1_1'] = float(dloglam)
hdr['CRPIX1'] = 1
hdr['CRVAL1'] = logwave[0]
hdr['CRTYPE1'] = 'LOGWAVE'
hdr['SPECSCAL'] = (1e-17,'erg/s/cm^2/A')
spectra = (spectra*1e17).astype(np.float32)
if not fileName.endswith('.fits'):
fileName += '.fits'
fits.writeto(os.path.join(outputDir,fileName),spectra,header=hdr,
overwrite=overwrite)
def load_spectra(fileName,outputDir='.'):
if not fileName.endswith('.fits'):
fileName += '.fits'
spec,hdr = fits.getdata(fileName,header=True)
wi = np.arange(spec.shape[-1])
logwave = hdr['CRVAL1'] + hdr['CD1_1']*(wi-(hdr['CRPIX1']-1))
wave = np.exp(logwave)
return wave,spec
def generate_default_binned_forest(fileName,outputDir='.',**kwargs):
nlos = kwargs.pop('numSightlines',1000)
zbins = kwargs.pop('zBins',np.arange(0.1,4.6,0.025))
waverange = kwargs.pop('waverange',(1300.,7000))
R = kwargs.pop('R',300)
hiforest.generate_binned_forest(fileName,sqmodels.WP11_model,
nlos,zbins,waverange,R,
outputDir=outputDir,**kwargs)
| bsd-3-clause | -7,249,735,259,691,682,000 | 38.634807 | 79 | 0.610711 | false |
openstack/python-neutronclient | neutronclient/tests/functional/core/test_readonly_neutron.py | 1 | 5377 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from tempest.lib import exceptions
from neutronclient.tests.functional import base
class SimpleReadOnlyNeutronClientTest(base.ClientTestBase):
"""This is a first pass at a simple read only python-neutronclient test.
This only exercises client commands that are read only.
This should test commands:
* as a regular user
* as a admin user
* with and without optional parameters
* initially just check return codes, and later test command outputs
"""
def test_admin_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-neutron-exist')
# NOTE(mestery): Commands in order listed in 'neutron help'
# Optional arguments:
def test_neutron_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.neutron,
'this-does-not-exist')
def test_neutron_net_list(self):
net_list = self.parser.listing(self.neutron('net-list'))
self.assertTableStruct(net_list, ['id', 'name', 'subnets'])
def test_neutron_ext_list(self):
ext = self.parser.listing(self.neutron('ext-list'))
self.assertTableStruct(ext, ['alias', 'name'])
def test_neutron_dhcp_agent_list_hosting_net(self):
self.neutron('dhcp-agent-list-hosting-net',
params='private')
def test_neutron_agent_list(self):
agents = self.parser.listing(self.neutron('agent-list'))
field_names = ['id', 'agent_type', 'host', 'alive', 'admin_state_up']
self.assertTableStruct(agents, field_names)
def test_neutron_floatingip_list(self):
self.neutron('floatingip-list')
def test_neutron_meter_label_list(self):
if not self.is_extension_enabled('metering'):
self.skipTest('metering is not enabled')
self.neutron('meter-label-list')
def test_neutron_meter_label_rule_list(self):
if not self.is_extension_enabled('metering'):
self.skipTest('metering is not enabled')
self.neutron('meter-label-rule-list')
def test_neutron_net_external_list(self):
net_ext_list = self.parser.listing(self.neutron('net-external-list'))
self.assertTableStruct(net_ext_list, ['id', 'name', 'subnets'])
def test_neutron_port_list(self):
port_list = self.parser.listing(self.neutron('port-list'))
self.assertTableStruct(port_list, ['id', 'name', 'mac_address',
'fixed_ips'])
def test_neutron_quota_list(self):
self.neutron('quota-list')
def test_neutron_router_list(self):
router_list = self.parser.listing(self.neutron('router-list'))
self.assertTableStruct(router_list, ['id', 'name',
'external_gateway_info'])
def test_neutron_security_group_list(self):
security_grp = self.parser.listing(self.neutron('security-group-list'))
self.assertTableStruct(security_grp, ['id', 'name',
'security_group_rules'])
def test_neutron_security_group_rule_list(self):
security_grp = self.parser.listing(self.neutron
('security-group-rule-list'))
self.assertTableStruct(security_grp, ['id', 'security_group',
'direction', 'ethertype',
'port/protocol', 'remote'])
def test_neutron_subnet_list(self):
subnet_list = self.parser.listing(self.neutron('subnet-list'))
self.assertTableStruct(subnet_list, ['id', 'name', 'cidr',
'allocation_pools'])
def test_neutron_help(self):
help_text = self.neutron('help')
lines = help_text.split('\n')
self.assertFirstLineStartsWith(lines, 'usage: neutron')
commands = []
cmds_start = lines.index('Commands for API v2.0:')
command_pattern = re.compile(r'^ {2}([a-z0-9\-\_]+)')
for line in lines[cmds_start:]:
match = command_pattern.match(line)
if match:
commands.append(match.group(1))
commands = set(commands)
wanted_commands = set(('net-create', 'subnet-list', 'port-delete',
'router-show', 'agent-update', 'help'))
self.assertFalse(wanted_commands - commands)
# Optional arguments:
def test_neutron_version(self):
self.neutron('', flags='--version')
def test_neutron_debug_net_list(self):
self.neutron('net-list', flags='--debug')
def test_neutron_quiet_net_list(self):
self.neutron('net-list', flags='--quiet')
| apache-2.0 | 2,111,925,428,263,533,600 | 38.536765 | 79 | 0.606286 | false |
lrei/canonical_urls | urlhelpers.py | 1 | 1775 | """
"""
import re
import logging
import rfc3987
import urlparse
def url_encode_non_ascii(b):
return re.sub('[\x80-\xFF]', lambda c: '%%%02x' % ord(c.group(0)), b)
def ensure_url(iri):
'''If IRI, convert to URL
If fragments (#), remove
http://stackoverflow.com/posts/4391299/revisions
'''
# if it's not unicode, it must be utf8, otherwise fail
if not isinstance(iri, unicode):
try:
uri = iri.decode('utf8') # noqa - we check if decoding works here
except Exception as e:
logging.exception(e)
return None
parts = urlparse.urlparse(iri)
url_parts = []
for index, part in enumerate(parts):
if index == 1:
url_parts.append(part.lower().encode('idna'))
else:
url_parts.append(url_encode_non_ascii(part.encode('utf-8')))
url = urlparse.urlunparse(url_parts)
url = urlparse.urldefrag(url)[0]
return url
def validate_url(url):
'''
Validates URL (actually, IRIs).
'''
try:
rfc3987.parse(url, rule='IRI')
except:
return False
return True
def url_or_error(url):
"""Return a valid url or None
"""
# if it's not unicode, it must be utf8, otherwise fail
if not isinstance(url, unicode):
try:
url = url.decode('utf8') # noqa - we check if decoding works here
except Exception as e:
logging.exception(e)
return None
# Convert URI to URL if necessary
try:
url = ensure_url(url)
except Exception as e:
logging.exception(e)
return None
# Validate URL
if not validate_url(url):
msg = 'bad url: {} '.format(url)
logging.error(msg)
return None
return url
| mit | 2,921,001,869,390,411,000 | 21.75641 | 78 | 0.579155 | false |
thruflo/pyramid_redis | src/pyramid_redis/hooks.py | 1 | 4492 | # -*- coding: utf-8 -*-
"""Provides a ``RedisFactory`` to get a configured redis client from a
settings dictionary, e.g.::
>>> factory = RedisFactory()
>>> client = factory({'redis.url': 'redis://localhost:6379'})
And ``GetRedisClient`` which wraps the factory so it can be used as a
Pyramid request method.
"""
__all__ = [
'GetRedisClient',
'RedisFactory',
]
import logging
logger = logging.getLogger(__name__)
import pyramid.exceptions
import redis
try:
import urlparse
except ImportError: # py3
import urllib.parse as urlparse
from zope.component import getGlobalSiteManager
from zope.interface import Interface
from zope.interface import directlyProvides
class IRedisClientConfiguration(Interface):
"""Marker interface provided by RedisClientConfiguration"""
class RedisClientConfiguration(dict):
"""Parse the application settings into connection pool kwargs."""
def __init__(self, **kwargs):
self.parse_url = kwargs.get('parse_url', urlparse.urlparse)
self.pool_cls = kwargs.get('pool_cls', redis.BlockingConnectionPool)
def __call__(self, settings):
"""Unpack the settings. Parse the url into components and build
a dict to return. As an alternative, you may also provide a
unix_socket_path.
"""
self.clear() # make sure you can reconfigure the client
db = settings.get('redis.db', 0)
config = {'db': int(db)}
if ('redis.unix_socket_path' in settings and
settings['redis.unix_socket_path'] is not None):
config['unix_socket_path'] = settings['redis.unix_socket_path']
elif ('redis.url' in settings and
settings['redis.url'] is not None): # should default to
# `redis://localhost:6379`
# Unpack.
url = settings['redis.url']
# Parse into a config dict.
o = self.parse_url(url)
config.update({
'host': o.hostname,
'port': o.port,
})
if o.password:
config['password'] = o.password
max_connections = settings.get('redis.max_connections', None)
if max_connections is not None:
config['max_connections'] = int(max_connections)
config = {'connection_pool': self.pool_cls(**config)}
else:
raise pyramid.exceptions.ConfigurationError(
"""To use redis with pyramid, redis.url or
redis.unix_socket_path should be provided"""
)
self.update(config)
return self
class RedisFactory(object):
def __init__(self, **kwargs):
self.get_registry = kwargs.get('get_registry', getGlobalSiteManager)
self.config = kwargs.get('parse_config', RedisClientConfiguration())
self.provides = kwargs.get('provides', directlyProvides)
self.redis_cls = kwargs.get('redis_cls', redis.StrictRedis)
def __call__(self, settings, registry=None):
"""Returns a ``redis`` client that uses a client configuration
registered in the ``registry`` provided that is, in turn,
configured with the ``settings`` provided.
"""
# If called without a registry, i.e.: not within the context of a
# Pyramid application, then register the connection pool in a
# zope.component registry.
if registry is None:
registry = self.get_registry()
# Query the registry for a client_configuration. If it doesn't exist,
# instantiate and register one for next time.
redis_client_conf = registry.queryUtility(IRedisClientConfiguration)
if not redis_client_conf:
redis_client_conf = self.config(settings) # update RedisClientConf
self.provides(self.config, IRedisClientConfiguration)
registry.registerUtility(self.config,
IRedisClientConfiguration)
# And use it to instantiate a redis client.
return self.redis_cls(**redis_client_conf)
class GetRedisClient(object):
"""Provide the redis factory as a Pyramid request method."""
def __init__(self, **kwargs):
self.redis_factory = kwargs.get('redis_factory', RedisFactory())
def __call__(self, request):
registry = request.registry
return self.redis_factory(registry.settings, registry=registry)
| unlicense | 5,676,939,344,290,679,000 | 34.370079 | 79 | 0.61821 | false |
YufeiZhang/Principles-of-Programming-Python-3 | Preparing/words.py | 1 | 1255 | # words.py
def main():
try:
#txt = open("test_1.txt")
#txt = open("test_2.txt")
#txt = open("test_3.txt")
txt = open("test_4.txt")
#target = input("Enter characters (spaces will be ignored): ")
#target = "cluuud IN DeD 23*"
target = "NSCRT - oooe+*"
except OSError:
print("OSError: Cannot find the file.")
string = ''
for ch in target: string += ch.lower()
lines = []
for line in txt: line = line.strip(); lines.append(line)
all_words = []
for line in lines:
words = line.split()
for word in words:
if not word[-1].isalpha(): word = word[:-1]
if word.lower() not in all_words:
all_words.append(word.lower())
all_words = sorted(all_words)
#print(all_words)
is_in = {}
for word in all_words:
flag = 1
for char in word:
if char == '.':
pass
else:
if char in string and char:
pass
else:
flag = 0
break
if flag:
if len(word) not in is_in:
is_in[len(word)] = [word]
else:
is_in[len(word)].append(word)
is_in = sorted(is_in.items(), key = lambda x:x[0])
for key in is_in:
print("Words of length {:d} built from these characters, in lexicographic order:".format(key[0]))
for ch in key[1]:
print('\t', ch)
if __name__ == '__main__':
main() | gpl-3.0 | -474,582,302,238,988,500 | 18.936508 | 99 | 0.588048 | false |
NERC-CEH/jules-jasmin | majic/joj/tests/functional/test_model_run_pre_create.py | 1 | 3962 | # Majic
# Copyright (C) 2014 CEH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from urlparse import urlparse
from hamcrest import *
from joj.tests import *
from joj.services.model_run_service import ModelRunService
from joj.utils import constants
from joj.model import session_scope, Session, User
from joj.services.user import UserService
class TestModelRunControllerPreCreate(TestController):
def setUp(self):
super(TestModelRunControllerPreCreate, self).setUp()
self.clean_database()
def test_GIVEN_nothing_WHEN_navigate_to_create_or_redirect_THEN_create_run_page_shown(self):
self.login()
response = self.app.get(
url(controller='model_run', action='pre_create'))
assert_that(response.status_code, is_(302), "Response is redirect")
assert_that(urlparse(response.response.location).path, is_(url(controller='model_run', action='create')), "url")
def test_GIVEN_user_over_quota_WHEN_navigate_to_precreate_THEN_index_shown(self):
user = self.login()
self.create_run_model(storage_in_mb=user.storage_quota_in_gb * 1024 + 1, name="big_run", user=user)
response = self.app.get(
url(controller='model_run', action='pre_create'))
assert_that(response.status_code, is_(302), "Response is redirect")
assert_that(urlparse(response.response.location).path, is_(url(controller='model_run', action='index')), "url")
def test_GIVEN_model_created_and_user_not_seen_page_WHEN_navigate_to_create_or_redirect_THEN_create_run_page_shown(self):
user = self.login()
self.create_run_model(storage_in_mb=0, name="big_run", user=user, status=constants.MODEL_RUN_STATUS_CREATED)
response = self.app.get(
url(controller='model_run', action='pre_create'))
assert_that(response.status_code, is_(302), "Response is redirect")
assert_that(urlparse(response.response.location).path, is_(url(controller='model_run', action='create')), "url")
def test_GIVEN_model_created_and_user_action_set_WHEN_navigate_to_create_or_redirect_THEN_user_action_page_shown(self):
user = self.login()
user_service = UserService()
user_service.set_current_model_run_creation_action(user, "driving_data")
self.create_run_model(storage_in_mb=0, name="big_run", user=user, status=constants.MODEL_RUN_STATUS_CREATED)
response = self.app.get(
url(controller='model_run', action='pre_create'))
assert_that(response.status_code, is_(302), "Response is redirect")
assert_that(urlparse(response.response.location).path, is_(url(controller='model_run', action='driving_data')), "url")
def test_GIVEN_no_model_created_and_user_action_set_WHEN_navigate_to_create_or_redirect_THEN_create_page_shown(self):
user = self.login()
user_service = UserService()
user_service.set_current_model_run_creation_action(user, "driving_data")
response = self.app.get(
url(controller='model_run', action='pre_create'))
assert_that(response.status_code, is_(302), "Response is redirect")
assert_that(urlparse(response.response.location).path, is_(url(controller='model_run', action='create')), "url")
| gpl-2.0 | -5,171,435,831,803,058,000 | 44.022727 | 126 | 0.693337 | false |
google/mirandum | alerts/streamjar/models.py | 1 | 1874 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
import main.models
import json
import iso8601
class StreamjarUpdate(main.models.Updater):
access_token = models.CharField(max_length=255)
class StreamjarEvent(main.models.UpdaterEvent):
details = models.TextField()
updater = models.ForeignKey(StreamjarUpdate)
def as_dict(self):
details = json.loads(self.details)
name = details.get("name", "Anonymous")
amount = " ".join([str(details['amount']), details['currency']])
timestamp = iso8601.parse_date(details['created_at'])
info = {
'name': name,
'amount': amount,
'comment': details['message'],
'donation_amount': float(details['amount']),
'currency': details['currency'],
'timestamp': timestamp,
}
return info
class StreamjarAlertConfig(main.models.AlertConfig):
blacklist = models.TextField(blank=True, null=True)
filter_type = models.CharField(max_length=20, choices=(
('1equal', 'Equals'),
('2gt', 'Greater than'),
('3default', 'Default'),
), default='3default', help_text="When filtering for specific amounts, comparison to use.")
filter_amount = models.FloatField(blank=True, null=True)
| apache-2.0 | -5,070,340,945,048,618,000 | 37.244898 | 95 | 0.670224 | false |
summerisgone/gitrecipe | setup.py | 1 | 1115 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name="gitrecipe",
version='0.0.2',
description='Simple buildout recipe for downloading git repositories. It uses system git command and its syntax',
author='Ivan Gromov',
author_email='[email protected]',
url='http://github.com/summerisgone/gitrecipe',
download_url='http://github.com/summerisgone/gitrecipe/zipball/0.1',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Buildout',
'Intended Audience :: Developers',
'License :: Freely Distributable',
'Natural Language :: Russian',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Programming Language :: Python :: 2.5',
'Topic :: Software Development :: Version Control',
],
namespace_packages=['recipe'],
packages=find_packages(),
install_requires=['setuptools', 'zc.recipe.egg'],
entry_points={'zc.buildout': ['default = recipe.git:GitRecipe']},
zip_safe=False,
long_description=open('README.rst').read(),
)
| bsd-3-clause | -8,936,679,593,237,095,000 | 37.448276 | 117 | 0.642152 | false |
jnez71/demos | geometry/bezier_surface.py | 1 | 5934 | #!/usr/bin/env python3
"""
Efficient implementation of a Bezier surface and its differential geometry.
"""
from __future__ import division
import numpy as np
################################################## CORE
class Bezier(object):
"""
Bezier manifold of dimension 2 embedded in Euclidean space of dimension 3.
"""
def __init__(self, knots=None):
if knots is None:
# Default to identity patch
n = 4
knots = np.zeros((n, n, 3), dtype=np.float64)
for i in range(n):
for j in range(n):
knots[i, j] = np.float64((i, j, 0)) / (n-1)
self.set_knots(knots)
def set_knots(self, knots):
"""
Provide the control knots in an array with the first two
dimensions indexing which knot and the third dimension
holding the Euclidean coordinates of each knot.
"""
self.knots = np.array(knots, dtype=np.float64)
self.degree_x = self.knots.shape[0] - 1
self.degree_y = self.knots.shape[1] - 1
self.dimension = self.knots.shape[2] - 1
assert self.degree_x > 0
assert self.degree_y > 0
assert self.dimension == 2
self.dknots_x = self.degree_x * np.diff(self.knots, axis=0)
self.dknots_y = self.degree_y * np.diff(self.knots, axis=1)
def evaluate(self, x, y):
"""
De Casteljau's algorithm is used to map the given surface coordinates
(each from 0.0 to 1.0) to their corresponding location in Euclidean space.
"""
lerps_x = np.zeros((self.degree_x+1, self.dimension+1), dtype=np.float64)
for i in range(len(lerps_x)):
lerps_y = self.knots[i].copy()
for j in range(self.degree_y):
for k in range(self.degree_y - j):
lerps_y[k] = (1.0-y)*lerps_y[k] + y*lerps_y[k+1]
lerps_x[i] = lerps_y[0]
for i in range(self.degree_x):
for k in range(self.degree_x - i):
lerps_x[k] = (1.0-x)*lerps_x[k] + x*lerps_x[k+1]
return lerps_x[0]
def jacobian(self, x, y):
"""
Returns the 2by3 Jacobian matrix of the `evaluate` function
at the given argument. The Grammian of this is the metric tensor.
"""
return np.column_stack((Bezier(self.dknots_x).evaluate(x, y),
Bezier(self.dknots_y).evaluate(x, y)))
def metric(self, x, y):
"""
Returns the 2by2 metric tensor at the given surface coordinates.
"""
J = self.jacobian(x, y)
return J.T.dot(J)
def orientation(self, x, y, q=0.0):
"""
Returns a rotation matrix describing the orientation of the normal
coordinates at [`x`, `y`] with yaw angle `q` in radians.
"""
J = self.jacobian(x, y)
rx, ry = (J / np.linalg.norm(J, axis=0)).T
normal = np.cross(rx, ry)
ncrossx = np.cross(normal, rx) # must be re-unitized to mitigate roundoff error
tangent = np.cos(q)*rx + np.sin(q)*(ncrossx / np.linalg.norm(ncrossx))
binormal = np.cross(normal, tangent)
R = np.column_stack((tangent, binormal, normal))
return R / np.linalg.norm(R, axis=0) # must be re-unitized to mitigate roundoff error
def plot(self, n=40, block=True):
"""
Plots this surface discretized by the given grid size `n`.
Also shows the control knots and the central normal coordinate system.
"""
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
mesh = np.linspace(0.0, 1.0, n)
points = np.transpose([self.evaluate(x, y) for x in mesh for y in mesh])
quiver_origins = np.transpose([self.evaluate(mesh[n//2], mesh[n//2])]*3)
quiver_arrows = self.orientation(mesh[n//2], mesh[n//2])
fig = pyplot.figure()
ax = fig.add_subplot(111, projection="3d")
ax.set_title("bezier", fontsize=12)
ax.set_xlabel("rx", fontsize=12)
ax.set_ylabel("ry", fontsize=12)
ax.set_zlabel("rz", fontsize=12)
ax.scatter(*self.knots.reshape(-1, 3).T, c='r', s=80)
ax.scatter(*points, c=points[-1, :], s=60, marker='o', edgecolors=None)
ax.quiver(quiver_origins[0], quiver_origins[1], quiver_origins[2],
quiver_arrows[0], quiver_arrows[1], quiver_arrows[2],
length=0.25, color=(1.0, 0.5, 0.0), lw=2.5)
ax.axis("equal")
pyplot.show(block=block)
################################################## TEST
if __name__ == "__main__":
# Initialize a flat set of knots
knots = np.zeros((5, 4, 3), dtype=np.float64)
for i in range(knots.shape[0]):
for j in range(knots.shape[1]):
knots[i, j] = np.float64((i, j, 0))
# Mess with the knots to make them more interesting
knots[:, :, 0] *= -1.0
knots[1:3, 1:3, 2] = -1.0
knots[1:3, 0, 2] = (0.25, 0.5)
knots[-1, -1, :] = (-4/2, 3/2, 0.5)
# Construct the Bezier surface
bezier = Bezier(knots)
# Verify the analytical Jacobian against finite-differences at a random location
x, y = np.random.sample(2)
r = bezier.evaluate(x, y)
d = 1e-6
drdx = (bezier.evaluate(x+d, y) - r) / d
drdy = (bezier.evaluate( x, y+d) - r) / d
assert np.allclose(np.column_stack((drdx, drdy)), bezier.jacobian(x, y), atol=10*d)
# Verify that the metric tensor computation is consistent with finite-differences
assert np.allclose([[drdx.dot(drdx), drdx.dot(drdy)],
[drdy.dot(drdx), drdy.dot(drdy)]], bezier.metric(x, y), atol=10*d)
# Verify that the orientation calculation returns an orthonormal matrix
R = bezier.orientation(x, y, 2*np.pi*np.random.sample())
assert np.allclose(R.dot(R.T), np.eye(3))
# Plot the corresponding Bezier surface to visually inspect
bezier.plot()
| mit | -1,133,954,785,867,220,100 | 37.283871 | 94 | 0.568419 | false |
vedujoshi/tempest | tempest/lib/common/preprov_creds.py | 1 | 16653 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import os
from oslo_concurrency import lockutils
from oslo_log import log as logging
import six
import yaml
from tempest.lib import auth
from tempest.lib.common import cred_provider
from tempest.lib.common import fixed_network
from tempest.lib import exceptions as lib_exc
from tempest.lib.services import clients
LOG = logging.getLogger(__name__)
def read_accounts_yaml(path):
try:
with open(path, 'r') as yaml_file:
accounts = yaml.safe_load(yaml_file)
except IOError:
raise lib_exc.InvalidConfiguration(
'The path for the test accounts file: %s '
'could not be found' % path)
return accounts
class PreProvisionedCredentialProvider(cred_provider.CredentialProvider):
# Exclude from the hash fields specific to v2 or v3 identity API
# i.e. only include user*, project*, tenant* and password
HASH_CRED_FIELDS = (set(auth.KeystoneV2Credentials.ATTRIBUTES) &
set(auth.KeystoneV3Credentials.ATTRIBUTES))
def __init__(self, identity_version, test_accounts_file,
accounts_lock_dir, name=None, credentials_domain=None,
admin_role=None, object_storage_operator_role=None,
object_storage_reseller_admin_role=None, identity_uri=None):
"""Credentials provider using pre-provisioned accounts
This credentials provider loads the details of pre-provisioned
accounts from a YAML file, in the format specified by
`etc/accounts.yaml.sample`. It locks accounts while in use, using the
external locking mechanism, allowing for multiple python processes
to share a single account file, and thus running tests in parallel.
The accounts_lock_dir must be generated using `lockutils.get_lock_path`
from the oslo.concurrency library. For instance:
accounts_lock_dir = os.path.join(lockutils.get_lock_path(CONF),
'test_accounts')
Role names for object storage are optional as long as the
`operator` and `reseller_admin` credential types are not used in the
accounts file.
:param identity_version: identity version of the credentials
:param admin_role: name of the admin role
:param test_accounts_file: path to the accounts YAML file
:param accounts_lock_dir: the directory for external locking
:param name: name of the hash file (optional)
:param credentials_domain: name of the domain credentials belong to
(if no domain is configured)
:param object_storage_operator_role: name of the role
:param object_storage_reseller_admin_role: name of the role
:param identity_uri: Identity URI of the target cloud
"""
super(PreProvisionedCredentialProvider, self).__init__(
identity_version=identity_version, name=name,
admin_role=admin_role, credentials_domain=credentials_domain,
identity_uri=identity_uri)
self.test_accounts_file = test_accounts_file
if test_accounts_file:
accounts = read_accounts_yaml(self.test_accounts_file)
else:
raise lib_exc.InvalidCredentials("No accounts file specified")
self.hash_dict = self.get_hash_dict(
accounts, admin_role, object_storage_operator_role,
object_storage_reseller_admin_role)
self.accounts_dir = accounts_lock_dir
self._creds = {}
@classmethod
def _append_role(cls, role, account_hash, hash_dict):
if role in hash_dict['roles']:
hash_dict['roles'][role].append(account_hash)
else:
hash_dict['roles'][role] = [account_hash]
return hash_dict
@classmethod
def get_hash_dict(cls, accounts, admin_role,
object_storage_operator_role=None,
object_storage_reseller_admin_role=None):
hash_dict = {'roles': {}, 'creds': {}, 'networks': {}}
# Loop over the accounts read from the yaml file
for account in accounts:
roles = []
types = []
resources = []
if 'roles' in account:
roles = account.pop('roles')
if 'types' in account:
types = account.pop('types')
if 'resources' in account:
resources = account.pop('resources')
temp_hash = hashlib.md5()
account_for_hash = dict((k, v) for (k, v) in account.items()
if k in cls.HASH_CRED_FIELDS)
temp_hash.update(six.text_type(account_for_hash).encode('utf-8'))
temp_hash_key = temp_hash.hexdigest()
hash_dict['creds'][temp_hash_key] = account
for role in roles:
hash_dict = cls._append_role(role, temp_hash_key,
hash_dict)
# If types are set for the account append the matching role
# subdict with the hash
for type in types:
if type == 'admin':
hash_dict = cls._append_role(admin_role, temp_hash_key,
hash_dict)
elif type == 'operator':
if object_storage_operator_role:
hash_dict = cls._append_role(
object_storage_operator_role, temp_hash_key,
hash_dict)
else:
msg = ("Type 'operator' configured, but no "
"object_storage_operator_role specified")
raise lib_exc.InvalidCredentials(msg)
elif type == 'reseller_admin':
if object_storage_reseller_admin_role:
hash_dict = cls._append_role(
object_storage_reseller_admin_role,
temp_hash_key,
hash_dict)
else:
msg = ("Type 'reseller_admin' configured, but no "
"object_storage_reseller_admin_role specified")
raise lib_exc.InvalidCredentials(msg)
# Populate the network subdict
for resource in resources:
if resource == 'network':
hash_dict['networks'][temp_hash_key] = resources[resource]
else:
LOG.warning(
'Unknown resource type %s, ignoring this field',
resource
)
return hash_dict
def is_multi_user(self):
return len(self.hash_dict['creds']) > 1
def is_multi_tenant(self):
return self.is_multi_user()
def _create_hash_file(self, hash_string):
path = os.path.join(os.path.join(self.accounts_dir, hash_string))
if not os.path.isfile(path):
with open(path, 'w') as fd:
fd.write(self.name)
return True
return False
@lockutils.synchronized('test_accounts_io', external=True)
def _get_free_hash(self, hashes):
# Cast as a list because in some edge cases a set will be passed in
hashes = list(hashes)
if not os.path.isdir(self.accounts_dir):
os.mkdir(self.accounts_dir)
# Create File from first hash (since none are in use)
self._create_hash_file(hashes[0])
return hashes[0]
names = []
for _hash in hashes:
res = self._create_hash_file(_hash)
if res:
return _hash
else:
path = os.path.join(os.path.join(self.accounts_dir,
_hash))
with open(path, 'r') as fd:
names.append(fd.read())
msg = ('Insufficient number of users provided. %s have allocated all '
'the credentials for this allocation request' % ','.join(names))
raise lib_exc.InvalidCredentials(msg)
def _get_match_hash_list(self, roles=None):
hashes = []
if roles:
# Loop over all the creds for each role in the subdict and generate
# a list of cred lists for each role
for role in roles:
temp_hashes = self.hash_dict['roles'].get(role, None)
if not temp_hashes:
raise lib_exc.InvalidCredentials(
"No credentials with role: %s specified in the "
"accounts ""file" % role)
hashes.append(temp_hashes)
# Take the list of lists and do a boolean and between each list to
# find the creds which fall under all the specified roles
temp_list = set(hashes[0])
for hash_list in hashes[1:]:
temp_list = temp_list & set(hash_list)
hashes = temp_list
else:
hashes = self.hash_dict['creds'].keys()
# NOTE(mtreinish): admin is a special case because of the increased
# privilege set which could potentially cause issues on tests where
# that is not expected. So unless the admin role isn't specified do
# not allocate admin.
admin_hashes = self.hash_dict['roles'].get(self.admin_role,
None)
if ((not roles or self.admin_role not in roles) and
admin_hashes):
useable_hashes = [x for x in hashes if x not in admin_hashes]
else:
useable_hashes = hashes
return useable_hashes
def _sanitize_creds(self, creds):
temp_creds = creds.copy()
temp_creds.pop('password')
return temp_creds
def _get_creds(self, roles=None):
useable_hashes = self._get_match_hash_list(roles)
if not useable_hashes:
msg = 'No users configured for type/roles %s' % roles
raise lib_exc.InvalidCredentials(msg)
free_hash = self._get_free_hash(useable_hashes)
clean_creds = self._sanitize_creds(
self.hash_dict['creds'][free_hash])
LOG.info('%s allocated creds:\n%s', self.name, clean_creds)
return self._wrap_creds_with_network(free_hash)
@lockutils.synchronized('test_accounts_io', external=True)
def remove_hash(self, hash_string):
hash_path = os.path.join(self.accounts_dir, hash_string)
if not os.path.isfile(hash_path):
LOG.warning('Expected an account lock file %s to remove, but '
'one did not exist', hash_path)
else:
os.remove(hash_path)
if not os.listdir(self.accounts_dir):
os.rmdir(self.accounts_dir)
def get_hash(self, creds):
for _hash in self.hash_dict['creds']:
# Comparing on the attributes that are expected in the YAML
init_attributes = creds.get_init_attributes()
# Only use the attributes initially used to calculate the hash
init_attributes = [x for x in init_attributes if
x in self.HASH_CRED_FIELDS]
hash_attributes = self.hash_dict['creds'][_hash].copy()
# NOTE(andreaf) Not all fields may be available on all credentials
# so defaulting to None for that case.
if all([getattr(creds, k, None) == hash_attributes.get(k, None) for
k in init_attributes]):
return _hash
raise AttributeError('Invalid credentials %s' % creds)
def remove_credentials(self, creds):
_hash = self.get_hash(creds)
clean_creds = self._sanitize_creds(self.hash_dict['creds'][_hash])
self.remove_hash(_hash)
LOG.info("%s returned allocated creds:\n%s", self.name, clean_creds)
def get_primary_creds(self):
if self._creds.get('primary'):
return self._creds.get('primary')
net_creds = self._get_creds()
self._creds['primary'] = net_creds
return net_creds
def get_alt_creds(self):
if self._creds.get('alt'):
return self._creds.get('alt')
net_creds = self._get_creds()
self._creds['alt'] = net_creds
return net_creds
def get_creds_by_roles(self, roles, force_new=False):
roles = list(set(roles))
exist_creds = self._creds.get(six.text_type(roles).encode(
'utf-8'), None)
# The force kwarg is used to allocate an additional set of creds with
# the same role list. The index used for the previously allocation
# in the _creds dict will be moved.
if exist_creds and not force_new:
return exist_creds
elif exist_creds and force_new:
# NOTE(andreaf) In py3.x encode returns bytes, and b'' is bytes
# In py2.7 encode returns strings, and b'' is still string
new_index = six.text_type(roles).encode('utf-8') + b'-' + \
six.text_type(len(self._creds)).encode('utf-8')
self._creds[new_index] = exist_creds
net_creds = self._get_creds(roles=roles)
self._creds[six.text_type(roles).encode('utf-8')] = net_creds
return net_creds
def clear_creds(self):
for creds in self._creds.values():
self.remove_credentials(creds)
def get_admin_creds(self):
return self.get_creds_by_roles([self.admin_role])
def is_role_available(self, role):
if self.hash_dict['roles'].get(role):
return True
return False
def admin_available(self):
return self.is_role_available(self.admin_role)
def _wrap_creds_with_network(self, hash):
creds_dict = self.hash_dict['creds'][hash]
# Make sure a domain scope if defined for users in case of V3
# Make sure a tenant is available in case of V2
creds_dict = self._extend_credentials(creds_dict)
# This just builds a Credentials object, it does not validate
# nor fill with missing fields.
credential = auth.get_credentials(
auth_url=None, fill_in=False,
identity_version=self.identity_version, **creds_dict)
net_creds = cred_provider.TestResources(credential)
net_clients = clients.ServiceClients(credentials=credential,
identity_uri=self.identity_uri)
compute_network_client = net_clients.compute.NetworksClient()
net_name = self.hash_dict['networks'].get(hash, None)
try:
network = fixed_network.get_network_from_name(
net_name, compute_network_client)
except lib_exc.InvalidTestResource:
network = {}
net_creds.set_resources(network=network)
return net_creds
def _extend_credentials(self, creds_dict):
# Add or remove credential domain fields to fit the identity version
domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
if 'domain' in x)
msg = 'Assuming they are valid in the default domain.'
if self.identity_version == 'v3':
if not domain_fields.intersection(set(creds_dict.keys())):
msg = 'Using credentials %s for v3 API calls. ' + msg
LOG.warning(msg, self._sanitize_creds(creds_dict))
creds_dict['domain_name'] = self.credentials_domain
if self.identity_version == 'v2':
if domain_fields.intersection(set(creds_dict.keys())):
msg = 'Using credentials %s for v2 API calls. ' + msg
LOG.warning(msg, self._sanitize_creds(creds_dict))
# Remove all valid domain attributes
for attr in domain_fields.intersection(set(creds_dict.keys())):
creds_dict.pop(attr)
return creds_dict
| apache-2.0 | -6,200,474,663,788,724,000 | 43.526738 | 79 | 0.582297 | false |
appleseedhq/cortex | test/IECoreScene/MeshNormalsOpTest.py | 1 | 4405 | ##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import IECoreScene
import math
class MeshNormalsOpTest( unittest.TestCase ) :
def testPlane( self ) :
p = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ) )
if "N" in p :
del p["N"]
self.assert_( not "N" in p )
pp = IECoreScene.MeshNormalsOp()( input=p )
self.assert_( "N" in pp )
self.assertEqual( pp["N"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
normals = pp["N"].data
self.assert_( normals.isInstanceOf( IECore.V3fVectorData.staticTypeId() ) )
self.assertEqual( normals.size(), pp.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( normals.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
for n in normals :
self.assertEqual( n, imath.V3f( 0, 0, 1 ) )
def testOnlyNAdded( self ) :
p = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ) )
del p["N"]
pp = IECoreScene.MeshNormalsOp()( input=p )
del pp["N"]
self.assertEqual( pp, p )
def testSphere( self ) :
s = IECore.Reader.create( "test/IECore/data/cobFiles/pSphereShape1.cob" ).read()
del s["N"]
self.assert_( not "N" in s )
ss = IECoreScene.MeshNormalsOp()( input=s )
self.assert_( "N" in ss )
self.assertEqual( ss["N"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Vertex )
normals = ss["N"].data
self.assert_( normals.isInstanceOf( IECore.V3fVectorData.staticTypeId() ) )
self.assertEqual( normals.size(), ss.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Vertex ) )
self.assertEqual( normals.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
points = ss["P"].data
for i in range( 0, normals.size() ) :
self.assert_( math.fabs( normals[i].length() - 1 ) < 0.001 )
p = points[i].normalize()
self.assert_( normals[i].dot( p ) > 0.99 )
self.assert_( normals[i].dot( p ) < 1.01 )
def testUniformInterpolation( self ) :
m = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( -1 ), imath.V2f( 1 ) ), imath.V2i( 10 ) )
del m["N"]
self.assertTrue( "N" not in m )
m2 = IECoreScene.MeshNormalsOp()( input = m, interpolation = IECoreScene.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( m2["N"].interpolation, IECoreScene.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( len( m2["N"].data ), m2.variableSize( IECoreScene.PrimitiveVariable.Interpolation.Uniform ) )
for n in m2["N"].data :
self.assertEqual( n, imath.V3f( 0, 0, 1 ) )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -3,052,171,660,580,019,000 | 38.330357 | 116 | 0.69353 | false |
lihuanshuai/libmc | tests/shabby/reconnect_delay.py | 1 | 1557 | # coding: utf-8
import os
import time
import libmc
import slow_memcached_server
import subprocess
def memcached_server_ctl(cmd, port):
ctl_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)
))),
'misc', 'memcached_server'
)
print ctl_path
subprocess.check_call([ctl_path, cmd, str(port)])
def test_soft_server_error():
mc = libmc.Client(["127.0.0.1:%d" % slow_memcached_server.PORT])
mc.config(libmc._client.MC_POLL_TIMEOUT,
slow_memcached_server.BLOCKING_SECONDS * 1000 * 2) # ms
RETRY_TIMEOUT = 2
mc.config(libmc.MC_RETRY_TIMEOUT, RETRY_TIMEOUT)
assert mc.set('foo', 1)
assert not mc.set(slow_memcached_server.KEY_SET_SERVER_ERROR, 1)
assert mc.set('foo', 1) # back to live
time.sleep(RETRY_TIMEOUT / 2)
assert mc.set('foo', 1) # alive
time.sleep(RETRY_TIMEOUT + 1)
assert mc.set('foo', 1) # alive
def test_hard_server_error():
normal_port = 21211
mc = libmc.Client(["127.0.0.1:%d" % normal_port])
RETRY_TIMEOUT = 10
mc.config(libmc.MC_RETRY_TIMEOUT, RETRY_TIMEOUT)
assert mc.set('foo', 1)
memcached_server_ctl('stop', normal_port)
assert not mc.set('foo', 1) # still fail
memcached_server_ctl('start', normal_port)
assert not mc.set('foo', 1) # still fail
time.sleep(RETRY_TIMEOUT + 1)
assert mc.set('foo', 1) # back to live
def main():
test_soft_server_error()
test_hard_server_error()
if __name__ == '__main__':
main()
| bsd-3-clause | -3,118,505,879,109,745,000 | 24.95 | 70 | 0.62492 | false |
EmanueleCannizzaro/scons | test/LINK/SHLINKCOM.py | 1 | 2607 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/LINK/SHLINKCOM.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test the ability to configure the $SHLINKCOM construction variable.
"""
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('mycc.py', r"""
import sys
outfile = open(sys.argv[1], 'wb')
for f in sys.argv[2:]:
infile = open(f, 'rb')
for l in [l for l in infile.readlines() if l != '/*cc*/\n']:
outfile.write(l)
sys.exit(0)
""")
test.write('mylink.py', r"""
import sys
outfile = open(sys.argv[1], 'wb')
for f in sys.argv[2:]:
infile = open(f, 'rb')
for l in [l for l in infile.readlines() if l != '/*link*/\n']:
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(SHCCCOM = r'%(_python_)s mycc.py $TARGET $SOURCES',
SHLINKCOM = r'%(_python_)s mylink.py $TARGET $SOURCES',
SHOBJSUFFIX = '.obj',
SHLIBPREFIX = '',
SHLIBSUFFIX = '.dll')
t1 = env.SharedObject('test1', 'test1.c')
t2 = env.SharedObject('test2', 'test2.c')
env.SharedLibrary(target = 'test3', source = [t1, t2])
""" % locals())
test.write('test1.c', """\
test1.c
/*cc*/
/*link*/
""")
test.write('test2.c', """\
test2.c
/*cc*/
/*link*/
""")
test.run()
test.must_match('test3.dll', "test1.c\ntest2.c\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -4,458,785,082,767,178,000 | 26.442105 | 96 | 0.671653 | false |
eirannejad/pyRevit | extensions/pyRevitTools.extension/pyRevit.tab/Project.panel/ptools.stack/Links.pulldown/Create Workset For Linked Element.pushbutton/script.py | 1 | 1095 | from pyrevit import revit, DB, UI
from pyrevit import script
from pyrevit import forms
logger = script.get_logger()
selection = revit.get_selection()
linkedModelName = ''
if len(selection) > 0:
for el in selection:
if isinstance(el, DB.RevitLinkInstance):
linkedModelName = el.Name.split(':')[0]
elif isinstance(el, DB.ImportInstance):
linkedModelName = \
el.Parameter[DB.BuiltInParameter.IMPORT_SYMBOL_NAME].AsString()
if linkedModelName:
if not revit.doc.IsWorkshared and revit.doc.CanEnableWorksharing:
revit.doc.EnableWorksharing('Shared Levels and Grids',
'Workset1')
with revit.Transaction('Create Workset for linked model'):
newWs = DB.Workset.Create(revit.doc, linkedModelName)
worksetParam = \
el.Parameter[DB.BuiltInParameter.ELEM_PARTITION_PARAM]
worksetParam.Set(newWs.Id.IntegerValue)
else:
forms.alert('At least one linked element must be selected.')
| gpl-3.0 | -6,848,140,956,992,978,000 | 35.5 | 79 | 0.624658 | false |
9p0le/simiki | simiki/log.py | 1 | 2342 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from logging import getLogger, Formatter, StreamHandler
from simiki import utils
from simiki.compat import is_linux, is_osx
class ANSIFormatter(Formatter):
"""Use ANSI escape sequences to colored log"""
def format(self, record):
try:
msg = super(ANSIFormatter, self).format(record)
except:
# for python2.6
# Formatter is old-style class in python2.6 and type is classobj
# another trick: http://stackoverflow.com/a/18392639/1276501
msg = Formatter.format(self, record)
lvl2color = {
"DEBUG": "blue",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "bgred"
}
rln = record.levelname
if rln in lvl2color:
return "[{0}]: {1}".format(
utils.color_msg(lvl2color[rln], rln),
msg
)
else:
return msg
class NonANSIFormatter(Formatter):
'''Non ANSI color format'''
def format(self, record):
try:
msg = super(NonANSIFormatter, self).format(record)
except:
# for python2.6
# Formatter is old-style class in python2.6 and type is classobj
# another trick: http://stackoverflow.com/a/18392639/1276501
msg = Formatter.format(self, record)
rln = record.levelname
return "[{0}]: {1}".format(rln, msg)
def _is_platform_allowed_ansi():
'''ansi be used on linux/macos'''
if is_linux or is_osx:
return True
else:
return False
def logging_init(level=None, logger=getLogger(),
handler=StreamHandler(), use_color=True):
if use_color and _is_platform_allowed_ansi():
fmt = ANSIFormatter()
else:
fmt = NonANSIFormatter()
handler.setFormatter(fmt)
logger.addHandler(handler)
if level:
logger.setLevel(level)
if __name__ == "__main__":
logging_init(level=logging.DEBUG)
root_logger = logging.getLogger()
root_logger.debug("debug")
root_logger.info("info")
root_logger.warning("warning")
root_logger.error("error")
root_logger.critical("critical")
| mit | 7,475,655,482,972,480,000 | 25.91954 | 76 | 0.584116 | false |
akegan/plasmoids | relReconVars.py | 1 | 9293 | ###############################################
###
### This file is generated by wpp.
###
### Input file : /scr_verus/wernerg/vrun/relRecon/relReconRepo/relReconPre.py
### Output file : relRecon.in
### Translation: 2014 Dec 04, 14:26:11
###
### disthistMac version $Id: disthistMac.py 104 2014-04-29 03:36:27Z wernerg $
### domainDecomp version $Id: domainDecomp.py 93 2014-01-24 04:32:36Z wernerg $
### funcMac version $Id: funcMac.py 103 2014-02-18 23:17:40Z wernerg $
### histMac version $Id: histMac.py 99 2014-02-18 21:45:07Z wernerg $
### mathphysMac version $Id: mathphysMac.py 55 2013-03-07 19:25:09Z wernerg $
### verbosityMac version $Id: verbosityMac.py 55 2013-03-07 19:25:09Z wernerg $
### wpp version $Id: wpp.py 102 2014-02-18 23:09:05Z wernerg $
### wppGlobals version $Id: wppGlobals.py 75 2013-08-14 01:55:44Z wernerg $
###
### created for vorpal r22018
###
###############################################
##########
#
# Import needed modules
#
##########
import sys
sys.path.append(".")
import math
#
__FILE__ = "/scr_verus/wernerg/vrun/relRecon/relReconRepo/relReconPre.py"
T_S_ION = 11118542910.652687
DENSITY_BG = 470659793006.75714
GAMMA_MINUS_1_DRIFT_ION = 0.25000000000000044
baseExpr1 = '( -0.0085225444505285155 * lncosh((93.75)*(1/5.0)))'
NOM_DUMPPERIOD = 101
estHistNumsPerStep = 20803.793333333335
USE_VAY_MOVE = 0
LY_BOX = 187.5
PLASMA_SIGMA_I = 59.999999999999993
LX_PML_XLO = 0.0
mathphysMacVersion = '$Id: mathphysMac.py 55 2013-03-07 19:25:09Z wernerg $'
PLASMA_SIGMA_E = 59.999999999999993
vorpalExec = '/scr_verus/wernerg/vorpals/mainline3/vorpalall/builds/vorpal/par/vorpal/vorpal'
X_PML_XHI = 281.25
baseExpry = '( -0.0085225444505285155 * lncosh((y)*(1/5.0)))'
SMOOTH_J = 0
ELECMASS = 9.10938215e-31
SEED_PTCLS_KT_OVER_MCSQR = 10.0
SMOOTH_E = 2
LIGHTSPEED = 299792458.0
p = 20
LY_TOT = 375.0
numCells = [400, 400]
DENSITY_0 = 4706597930067.5713
BETA_DRIFT_ION = 0.59999999999999987
LARMOR_PERIODS_PER_SEED_SAVE = 50
insertProb = 0.2525
BASE_AMP_UPDATERS = ['yeeAmpere']
NZ_ABC = 0
CFL_FRAC = 0.99
MPTCLS_PER_DZ = 1
MPTCLS_PER_DY = 8
LZ_TOTAL_BOXSIZE_OVER_LARMOR_LENGTH = 0.0
PTCL_BCS = ['periodic', 'periodic', 'periodic']
numDomains = 64
NX_BEGIN = 0
procInput = True
PERIODIC_DIRS = [0, 1, 2]
LY_OVER_LARMOR_LENGTH = 100.0
LX_PML_XHI = 0.0
ELECCHARGE = -1.602176487e-19
X_PML_XLO = -93.75
USE_CELL_SPECIES = 0
LZ_PML_ZHI = 0.0
ezAtXHistBlockUpNYs = [300, 301]
GAMMA_MINUS_1_DRIFT = 0.25
TRACK_PTCLS = 1
VP_DEBUG = 7
BG_MPTCLS_PER_DZ = 1
VP_WARNING = 4
LX_TOTAL_BOXSIZE_OVER_LARMOR_LENGTH = 200.0
ALL_SPECIES_HIST_PERIOD = 100
X_END = 281.25
densityBins = [100, 100]
LZ = 0.0
LX = 375.0
LY = 375.0
maxCellsPerDomain = 3300
numLatitudeBins = 60
ezAvgHistBlockDnNXs = [206, 238, 254, 270, 278, 286, 290, 294, 296, 298, 299, 300, 301, 302, 304, 306, 310, 314, 322, 330, 346, 362, 394]
SMOOTH_E_ENERGY_LEFT_AT_NYQ_K = 0.35910836115772415
NORMALIZE_LARMOR = 1
BG_DIST_IS_POWER_LAW = 0
FIELDDUMPPERIOD = 100
numLongitudeBins = 120
midDeltaNY = 1
ezAvgHistBlockDnMidNYs = [99, 101]
VP_DEBUG3 = 9
VP_DEBUG2 = 8
SMOOTH_E_ENERGY_LOSS_AT_NYQ_K = 0.6408916388422758
NX_END = 400
LCM_PERIOD = 20
PBC_IN_X = 1
PBC_IN_Y = 1
PBC_IN_Z = 1
LX_TOTS = [375.0, 375.0, 0.0]
yp = 'yPosHolder'
Z_START = -0.0
VP_INFO = 6
name = 'BxQ7Right'
A_PERTURB_AMP = 0.01
PROTMASS = 1.672621637e-27
ezAvgHistBlockUpNXs = [6, 38, 54, 70, 78, 86, 90, 94, 96, 98, 99, 100, 101, 102, 104, 106, 110, 114, 122, 130, 146, 162, 194]
LZ_PML_ZLO = 0.0
NY_ABC = 0
ezAtXHistBlockDnNXs = [300, 301]
SMALL_DIST_CALC_PERIOD = 50
TRACK_PTCLS_YRANGE = 'LY_BOX/2.'
MIN_WEIGHT = 1e-06
DOUBLE_BOX_IN_X = 1
LX_BOX = 187.5
LARMOR_FREQ = 159889310.93333334
oPtUpNY = 300
oPtUpNX = 300
Y_PML_YLO = -93.75
smoothers2InX = ['smoothAgainE00', 'smoothAgainE10', 'smoothAgainE20']
smoothers2InY = ['smoothAgainE01', 'smoothAgainE11', 'smoothAgainE21']
NZ_END = 0
useIsoConductors = 0
AMPERE_UPDATERS = ['yeeAmpere']
LARMOR_LENGTH = 1.875
Y_START = -93.75
B_0 = 0.001704508890105703
densExpr = '4.706597930067571e+12 / 4.706597930067571e+12 * (1. / cosh(yPosHolder*(1./5.0)))^2'
T_BG = 59298895.523481
trackPtclsYrange = 93.75
coef = '-0.0085225444505285155'
qi = 7
NUMFIELDDUMPS = 20
LX_TOT = 375.0
xc = 281.25
xPtUpNX = 100
xPtUpNY = 300
TRAJ_APPLY_PERIOD = 6
NZ = 0
NUM_EZ2D_HISTS_ALONG_X = 20
GAMMA_BG_INDEX = 2.0
USE_ABC = False
BETA_GAMMA_DRIFT_ION = 0.75
PIO2 = 1.5707963267948966
KB = 1.3806504e-23
MU0 = 1.2566370614359173e-06
DT1D = 3.1271633924826753e-09
VP_ALERT = 1
EZ_AVG_CALC_PERIOD = 50
oPtDnNX = 100
oPtDnNY = 100
verbosityMacVersion = '$Id: verbosityMac.py 55 2013-03-07 19:25:09Z wernerg $'
false = False
E_B_FAC = 1.008
LX_TOTAL_BOXSIZE_OVER_DELTA = 75.0
DZ = 1.0
DX = 0.9375
DY = 0.9375
ezAvgHistBlockDnNYs = [100, 104, 108, 116, 124, 140]
DT = 2.186936930239505e-09
SIN_SQR_LAYER_K = 0.3086582838174551
SVN_REVISION_PREFILE = '$Rev: 43 $'
Y_PML_YHI = 281.25
useGammaNumberDensity = True
GAMMA_DRIFT_ION = 1.2500000000000004
GAMMA_BG_LO = 1.0
SMOOTH_E_ENERGY_LOSS_AT_LAYER_K = 0.09294990994629804
ENERGY_CALC_PERIOD = 20
prty = '(1 + 0.01 * cos(3.141592653589793 * (x) / 187.5) * cos(3.141592653589793*(y)/187.5)^2 )'
BETA_DRIFT = 0.6
NUMDUMPS = 20
ezAvgHistBlockHeights = [4, 4, 8, 8, 16]
prtyp = '(1 + 0.01 * cos(3.141592653589793 * (x - 187.5) / 187.5) * cos(3.141592653589793*(if(y <= 0.5*187.5, y, 187.5 - y))/187.5)^2 )'
DENSITY_FACTOR = 4706597930067.5713
LAYER_PTCL_STAT_MAPCELLSIZE = [0]
PTCL_BC_IN_X = 'periodic'
PTCL_BC_IN_Y = 'periodic'
PTCL_BC_IN_Z = 'periodic'
Z_PML_ZLO = 0.0
numGammaBins = 50
PY = 100
gammaBinMin = 1.0
xcMax = 281.25
SMOOTH_E_ENERGY_LEFT_AT_LAYER_K = 0.907050090053702
gammaBinMax = 375.0
X_START = -93.75
SMOOTH_E_ENERGY_LEFT_AT_HALF_NYQ_K = 0.7741356267766225
LXS = [375.0, 375.0, 0.0]
M_CONST = 3.7955758958333335e-30
LARGE_DIST_CALC_PERIOD_STR = '250'
NX_TOT = 400
VP_NOTICE = 5
DOMAIN_DECOMP = ('quadWeighted', 64, [1.0, 3.0])
BOX_CROSSING_TIME = 8.844953762811314e-07
VP_CRITICAL = 2
PZ = 0
PX = 200
PI = 3.141592653589793
baseExpryp = '( -0.0085225444505285155 * lncosh((if(y <= 0.5*187.5, y, 187.5 - y))*(1/5.0)))'
EZ_AVG_NUM_CONSECUTIVE_STEPS = 5
endy = 50
endx = 50
wl = 1
NUM_TRACK_PTCLS = 10000
wy = 3.0
wx = 1.0
LZ_TOT = 0.0
K_LAYER = 1.2566370614359172
Z_PML_ZHI = 0.0
E_B_FAC2 = 0.992
GAMMA_SCALE = 1.875
DUMP_NODAL_FIELDS = 0
MASS_RATIO = 1.0
LARGE_DIST_CALC_PERIOD = 100
SIMTIME_OVER_NR_LARMOR_PERIOD = 0
velGenDrift = {'kind': 'relMaxwell', 'T': 11118542910.652687}
gammaBetaSqr = 0.4499999999999999
NX_ABC = 0
DT2D = 2.211238440702829e-09
C2 = 8.987551787368176e+16
NY_TOT = 400
GAMMA_BG_HI = 100.0
kB_T_S_ION_OVER_MCSQR = 1.875
halfLy = 93.75
kB_T_S_OVER_MCSQR = 1.875
IONMASS = 9.10938215e-31
ACTUAL_SIM_TIME = 4.3738738604790101e-06
TIMESTEPS_PER_LARMOR = 2.859856952655878
ezAvgBlockSectionCellsY = 4
NX_TOTS = [400, 400, 0]
ptclBC = 'periodic'
LY_PML_YHI = 0.0
TIMESTEPS = 2000
BG_MPTCLS_PER_DX = 4
BG_MPTCLS_PER_DY = 8
TWOPI = 6.283185307179586
MPTCLS_PER_DX = 4
LY_TOTAL_BOXSIZE_OVER_DELTA = 75.0
SMALL_DIST_CALC_PERIOD_STR = '50'
T_BG_ION = 59298895.523481
SIMTIME_OVER_BOX_CROSSING_TIME = 5.0
CELLS_PER_LARMOR_LENGTH = 2.0
DELTA = 5.0
T_S = 11118542910.652687
LZ_OVER_LARMOR_LENGTH = 0.5
NZ_TOT = 0
BETA_GAMMA_DRIFT = 0.75
inputFile = 'relRecon.in'
Y_END = 281.25
ELEMCHARGE = 1.602176487e-19
decompTypes = ['even', 'weighted', 'quadWeighted']
NZ_BEGIN = 0
VP_EMERGENCY = 0
ezAvgBlockSectionCellsX = 1
NXS = [400, 400, 0]
SMOOTH_E_A_FAC2 = 0.004
SMOOTH_E_ENERGY_LOSS_AT_HALF_NYQ_K = 0.2258643732233775
bpf = 8.0
SMOOTH_E_A_FAC = -0.004
J_0 = 271.28101540440281
B_GUIDE_OVER_B0 = 0.0
numCellsT = 160000
NY_BEGIN = 0
VP_ERROR = 3
LY_TOTAL_BOXSIZE_OVER_LARMOR_LENGTH = 200.0
LZ_TOTAL_BOXSIZE_OVER_DELTA = 0.0
DUMPPERIOD = 100
LY_PML_YLO = 0.0
ezAtXHistBlockUpNXs = [100, 101]
EZ_AVG_CALC_PERIOD_STR = '50'
tubs = [281.25, 93.75, 0.0]
NOM_TIME = 4.4224768814056575e-06
NDIM = 2
run = True
ASPECT_RATIO_Y_OVER_X = 1.0
MUONMASS = 1.8835313e-28
BASE_FAR_UPDATERS = ['yeeFaraday']
estPtclNums = 1414.0
ndy = 8
ndx = 8
ezAvgHistBlockUpMidNYs = [299, 301]
PTCLDUMPPERIOD = 100
ezAvgHistBlockWidths = [32, 16, 16, 8, 8, 4, 4, 2, 2, 1, 1, 1, 1, 2, 2, 4, 4, 8, 8, 16, 16, 32]
SET_ION_LAYER = 'theta'
tlbs = [-93.75, 0.0, -0.0]
VEL_NORM_FACTOR = 5.892988110396219e-64
DENSITY_BG_OVER_0 = 0.1
NX = 400
NY = 400
ORIGIN = [-93.75, -93.75, -0.0]
estFieldNums = 22.0
true = True
smoothersInY = ['smoothE01', 'smoothE11', 'smoothE21']
smoothersInX = ['smoothE00', 'smoothE10', 'smoothE20']
OTHER_EZ_AVG_CALC_PERIOD = 100
EPSILON0 = 8.854187817620389e-12
smallPeriods = [20]
SEED_PTCLS = 0
DOUBLE_PERIODIC_IN_Y = 1
ELECMASSEV = 510998.90984764055
periods = [50, 250, 50, 20]
SVN_ID_PREFILE = '$Id: relRecon.pre 43 2012-06-22 16:25:13Z wernerg $'
VelGenBg = {'kind': 'relMaxwell', 'T': 59298895.523481}
LX_OVER_LARMOR_LENGTH = 100.0
velGenDriftIon = {'kind': 'relMaxwell', 'T': 11118542910.652687}
NY_END = 400
cellsPerDomX = [50, 50, 50, 50, 50, 50, 50, 50]
cellsPerDomY = [65, 35, 34, 64, 66, 36, 35, 65]
nx = 400
ny = 400
ezAtXHistBlockDnNYs = [100, 101]
CELLS_PER_SHEET_THICKNESS = 5.333333333333333
xcDelta = 18.75
Z_END = 0.0
USE_GAMMAM1_BINS = 0
FARADAY_UPDATERS = ['yeeFaraday']
GAMMA_DRIFT = 1.25
VelGenBgIon = {'kind': 'relMaxwell', 'T': 59298895.523481}
xPtDnNY = 100
xPtDnNX = 300
DUMP_AT_START = 0
DT3D = 1.843048705090566e-09
ALPHA = 0.0
| mit | 7,488,255,541,433,285,000 | 27.160606 | 137 | 0.689551 | false |
esteluk/reinhardt | memberinfo/mailman.py | 2 | 4938 | # Copyright (C) 1998-2007 by the Free Software Foundation, Inc.
# Much of this is based on /usr/lib/mailman/bin/paths.py and Fixes the path of the project in order to use mailman
# BEGIN MAILMAN PATH INCLUSION ---------------------------
import os
import sys
from warnings import filterwarnings
# some scripts expect this attribute to be in this module
prefix = '/var/lib/mailman'
exec_prefix = '${prefix}'
# work around a bogus autoconf 2.12 bug
if exec_prefix == '${prefix}':
exec_prefix = prefix
# Supress Python 2.5 warning about string exceptions.
filterwarnings('ignore', '.* string exception', DeprecationWarning)
# Hack the path to include the parent directory of the $prefix/Mailman package
# directory.
sys.path.insert(0, prefix)
# We also need the pythonlib directory on the path to pick up any overrides of
# standard modules and packages. Note that these must go at the front of the
# path for this reason.
sys.path.insert(0, os.path.join(prefix, 'pythonlib'))
# Include Python's site-packages directory.
sitedir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[:3],'site-packages')
sys.path.append(sitedir)
# END MAILMAN PATH INCLUSION ---------------------------
from compsoc.memberinfo.models import MailingList
from Mailman import Utils
from Mailman import MailList
from django.contrib.auth.models import User
from Mailman import Errors
def validate_lists():
'''
Checks current data in the compsoc database corresponds to that in Mailman.
Caveat: they have to be subscribed using the same email address they use for the compsoc website.
This includes:
Checking all lists in the MailingList model have a mailman equivalent
Checking all signups to a list are subscribed to the mailman list
'''
for list in MailingList.objects.all():
if not Utils.list_exists(list.list):
print "%s doesn't exist on mailman" % list.list
else:
mailman_list = MailList.MailList(list.list, lock=False)
members = mailman_list.getMemberCPAddresses(mailman_list.getRegularMemberKeys()+mailman_list.getDigestMemberKeys())
for user in list.users.all():
if not user.email in members:
print "The website thinks %s is subscribed to %s but he/she isn't" % (user.member.all_name(),list.list)
def import_lists(prefix):
'''
Imports lists named with the given prefix from mailman
into the compsoc website.
Caveat: they have to be subscribed using the same email
address they use for the compsoc website.
'''
for list_name in Utils.list_names():
if list_name.startswith(prefix):
list,new = MailingList.objects.get_or_create(list=list_name)
mailman_list = MailList.MailList(list_name, lock=False)
members = mailman_list.getMemberCPAddresses(mailman_list.getRegularMemberKeys()+mailman_list.getDigestMemberKeys())
for member in members:
try:
list.users.add(User.objects.get(email=member))
except User.DoesNotExist: pass
class UserDesc:
def __init__(self,name,address):
self.name = name
self.address = address
self.digest = False
class MailmanError(Exception):
def __init__(self,msg):
self.msg = msg
def subscribe_member(user,list):
'''
Adds a compsoc member to a mailing list
'''
try:
mailman_list = MailList.MailList(list.list)
try:
# 1 = send welcome message
mailman_list.ApprovedAddMember(UserDesc(user.member.all_name(),user.email), 1, 0)
mailman_list.Save()
except Errors.MMAlreadyAMember:
raise MailmanError('User is already a member')
except Errors.MembershipIsBanned, pattern:
raise MailmanError("User's email is banned by pattern %s " % pattern)
except Errors.MMBadEmailError:
raise MailmanError("Mailman has rejected the user's email")
except Errors.MMHostileAddress:
raise MailmanError('User is considered hostile by mailman')
finally:
mailman_list.Unlock()
except Errors.MMUnknownListError:
raise MailmanError("This mailman list doesn't exist")
def unsubscribe_member(user,list):
'''
Removes a compsoc member from a mailing list
'''
try:
mailman_list = MailList.MailList(list.list)
try:
if not mailman_list.isMember(user.email):
raise MailmanError("User isn't subscribed to the list")
#last 2 args: is admin notified, is user notified
mailman_list.ApprovedDeleteMember(user.email, 'bin/remove_members',True,True)
mailman_list.Save()
finally:
mailman_list.Unlock()
except Errors.MMUnknownListError:
raise MailmanError("This mailman list doesn't exist")
| agpl-3.0 | -8,628,226,163,998,219,000 | 38.190476 | 127 | 0.662009 | false |
liaozhida/liaozhida.github.io | _posts/pythonbak/preCaptcha.py | 1 | 2197 | # -*- coding: utf-8 -*-
import requests
import json
from bs4 import BeautifulSoup
import time
class CaptchaHelper:
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Host':'www.zhihu.com',
'Origin':'https://www.zhihu.com',
'Referer':'https://www.zhihu.com',
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8'
}
data_file = open('config.json')
self.data = json.load(data_file)
self._session = requests.session()
def douban(self):
url = self.data["douban"]["captcha-url"]
response = self._session.get(url)
# print response.text
page = BeautifulSoup(response.text, 'lxml')
captcha_id = page.find('input', attrs={'name':'captcha-id'})['value']
imageurl = page.find('img', alt='captcha')['src']
response = requests.get(imageurl, stream=True)
with open('./captcha/douban.png', 'wb') as f:
f.write(response.content)
f.close
del response
self.data['douban']['captcha-id'] = captcha_id
print self.data
file = open('config.json','w')
file.write('\r\n')
json.dump(self.data,file);
file.close()
def zhihu(self):
# 获取验证码链接
imageurl = self.data['zhihu']['captcha-url']
print imageurl
imageurl = 'http://www.zhihu.com/captcha.gif?r=%d&type=login';
response = self._session.get(imageurl % (time.time() * 1000), headers=self.headers)
# 保存验证码到本地
with open('./captcha/zhihu.png', 'wb') as f:
f.write(response.content)
f.close
del response
# 写入cookie信息
file = open('zhihu_cookies','w');
cookies = self._session.cookies.get_dict()
json.dump(cookies, file)
file.close()
if __name__ == '__main__':
ch = CaptchaHelper();
# ch.douban()
ch.zhihu();
else:
print 'being imported as module'
| apache-2.0 | -7,904,162,348,769,327,000 | 25.654321 | 150 | 0.553497 | false |
SystemsBioinformatics/cbmpy | setupegg.py | 1 | 1159 | """
CBMPy: Constraint Based Modelling in Python (http://pysces.sourceforge.net/cbm)
============
Copyright (C) 2010-2018 Brett G. Olivier, VU University Amsterdam, Amsterdam, The Netherlands
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
Author: Brett G. Olivier
Contact email: [email protected]
Last edit: $Author: bgoli $ ($Id: setupegg.py 660 2018-09-24 14:57:04Z bgoli $)
"""
"""
A setup.py script to use setuptools, which gives egg goodness, etc.
Adapted from the original NumPy src (numpy.scipy.org).
"""
FRYING_EGGS = True
from setuptools import setup
execfile('setup.py')
| gpl-3.0 | 2,300,610,091,657,087,700 | 35.21875 | 93 | 0.762726 | false |
Jelby-John/HatalogicoWeatherStation | main.py | 1 | 5387 | #!/usr/bin/python
from Adafruit_PWM_Servo_Driver import PWM
from Adafruit_ADS1x15 import ADS1x15
import time, os, sys
import Adafruit_DHT
# HOW MANY CYCLES TO BE PERFORMED BEFORE SHOWING THE HIGH AND LOW SEQUENCE
# SET TO 0 FOR OFF
intervalHighLow = 60
# HOW LONG TO REST BETWEEN CYCLES - ZERO IS FINE
intervalSleep = 1
# HOW LONG TO DISPLAY THE HIGH AND LOW DISPLAYS
intervalDisplay = 5
# INTERVAL COUNTER/TRACKER. ALWAYS START AT 1
intervalCounter = 1
# Sensor should be set to Adafruit_DHT.DHT11,
# Adafruit_DHT.DHT22, or Adafruit_DHT.AM2302.
DHTsensor = Adafruit_DHT.DHT22
DHTpin = '22'
# SETUP THE PWMS
pwm = PWM(0x70)
pwm.setPWMFreq(100)
# SETUP THE ADCS
ADS1015 = 0x00
gain = 6144
sps = 100
adc = ADS1x15(address=0x49, ic=ADS1015)
# SET LEFT AND RIGHT POSITION FOR SERVOS
servoMin = 380
servoMax = 1150
# DEFINE SERVO PINS ON HATALOGICO PWMS
servoLight = 8
servoHumid = 10
servoTemp = 12
# DEFINE MAX AND MIN VALUES
tempMin = 40
tempMax = 15
humidMin = 100
humidMax = 0
lightMin = 1
lightMax = 2800
# DECLARE DEFAULT VALUES FOR HIGH AND LOW TRACKERS
tempHigh = 0
tempLow = 100
humidHigh = 0
humidLow = 100
lightHigh = 0
lightLow = 100
# LED PIN CONFIG ON HATALOGICO PWMS
brightRed = 3
brightGreen = 5
humidRed = 7
humidGreen = 9
tempRed = 11
tempGreen = 13
def showHighs():
# SCALE READINGS INTO OUTPUT VALUES
tempPercent = (tempHigh - tempMin) / (tempMax - tempMin)
tempOutput = int(tempPercent * (servoMax - servoMin) + servoMin)
lightPercent = (lightHigh - lightMin) / (lightMax - lightMin)
lightOutput = int(lightPercent * (servoMax - servoMin) + servoMin)
humidPercent = (humidHigh - humidMin) / (humidMax - humidMin)
humidOutput = int(humidPercent * (servoMax - servoMin) + servoMin)
pwm.setPWM(brightGreen, 0, 4095)
pwm.setPWM(brightRed, 0, 0)
pwm.setPWM(humidGreen, 0, 4095)
pwm.setPWM(humidRed, 0, 0)
pwm.setPWM(tempGreen, 0, 4095)
pwm.setPWM(tempRed, 0, 0)
pwm.setPWM(servoTemp, 0, tempOutput)
pwm.setPWM(servoHumid, 0, humidOutput)
pwm.setPWM(servoLight, 0, lightOutput)
time.sleep(intervalDisplay)
def showLows():
# SCALE READINGS INTO OUTPUT VALUES
tempPercent = (tempLow - tempMin) / (tempMax - tempMin)
tempOutput = int(tempPercent * (servoMax - servoMin) + servoMin)
lightPercent = (lightLow - lightMin) / (lightMax - lightMin)
lightOutput = int(lightPercent * (servoMax - servoMin) + servoMin)
humidPercent = (humidLow - humidMin) / (humidMax - humidMin)
humidOutput = int(humidPercent * (servoMax - servoMin) + servoMin)
pwm.setPWM(brightGreen, 0, 0)
pwm.setPWM(brightRed, 0, 4095)
pwm.setPWM(humidGreen, 0, 0)
pwm.setPWM(humidRed, 0, 4095)
pwm.setPWM(tempGreen, 0, 0)
pwm.setPWM(tempRed, 0, 4095)
pwm.setPWM(servoTemp, 0, tempOutput)
pwm.setPWM(servoHumid, 0, humidOutput)
pwm.setPWM(servoLight, 0, lightOutput)
time.sleep(intervalDisplay)
def lightsOff():
pwm.setPWM(brightRed, 0, 4095)
pwm.setPWM(humidRed, 0, 4095)
pwm.setPWM(tempRed, 0, 4095)
pwm.setPWM(brightGreen, 0, 4095)
pwm.setPWM(humidGreen, 0, 4095)
pwm.setPWM(tempGreen, 0, 4095)
def startup():
lightsOff()
# TURN ON RED LEDS FOR SERVO START-UP PROCEDURE
pwm.setPWM(brightRed, 0, 0)
pwm.setPWM(humidRed, 0, 0)
pwm.setPWM(tempRed, 0, 0)
time.sleep(3)
lightsOff()
pwm.setPWM(brightGreen, 0, 0)
pwm.setPWM(humidGreen, 0, 0)
pwm.setPWM(tempGreen, 0, 0)
time.sleep(5)
lightsOff()
startup()
while (True):
if(intervalCounter == intervalHighLow):
showHighs()
showLows()
lightsOff()
intervalCounter = 1
elif(intervalCounter < intervalHighLow):
intervalCounter += 1
# GET HUMIDITY AND TEMPERATURE READINGS FROM DHT22
humidity, temperature = Adafruit_DHT.read_retry(DHTsensor, DHTpin)
ldrValue = adc.readADCSingleEnded(0, gain, sps)
lightValue = (ldrValue - lightMin) / (lightMax - lightMin) * 100
# SCALE READINGS INTO OUTPUT VALUES
tempPercent = (temperature - tempMin) / (tempMax - tempMin)
tempOutput = int(tempPercent * (servoMax - servoMin) + servoMin)
humidPercent = (humidity - humidMin) / (humidMax - humidMin)
humidOutput = int(humidPercent * (servoMax - servoMin) + servoMin)
lightPercent = lightValue / 100
lightOutput = int(lightPercent * (servoMax - servoMin) + servoMin)
# CHECK FOR HIGH AND LOW VALUES
# HUMIDITY
if(humidity > humidHigh):
humidHigh = humidity
if(humidity < humidLow):
humidLow = humidity
# TEMPERATURE
if(temperature > tempHigh):
tempHigh = temperature
if(temperature < tempLow):
tempLow = temperature
# BRIGHTNESS
if(lightValue > lightHigh):
lightHigh = lightValue
if(lightValue < lightLow):
lightLow = lightValue
os.system('clear')
print "----- INPUTS ------"
print "Temperature: %d" % temperature
print "Humidity: %d" % humidity
print "Brightness: %d" % lightValue
print "----- OUTPUTS -----"
print "Temperature: %d" % tempOutput
print "Humidity: %d" % humidOutput
print "Brightness: %d" % lightOutput
print "----- HISTORY -----"
print " | Temperature | Humidity | Brightness "
print "High: | %.1f" % tempHigh + " degC | %.1f" % humidHigh + " %% | %.1f" % lightHigh + " %"
print "Low: | %.1f" % tempLow + " degC | %.1f" % humidLow + " %% | %.1f" % lightLow + " %"
print "------------------------------"
pwm.setPWM(servoTemp, 0, tempOutput)
pwm.setPWM(servoHumid, 0, humidOutput)
pwm.setPWM(servoLight, 0, lightOutput)
time.sleep(intervalSleep)
| mit | -341,734,205,532,914,300 | 24.77512 | 100 | 0.699462 | false |
AntonovAlexander/activecore | designs/rtl/sigma/sw/benchmarks/mul_sw/hw_test_mul_sw.py | 1 | 1185 | # -*- coding:utf-8 -*-
from __future__ import division
import sys
sys.path.append('../../../../../rtl/udm/sw')
import time
import udm
from udm import *
sys.path.append('..')
import sigma
from sigma import *
def test_mul_sw(sigma, a, b):
sigma.tile.udm.wr32(0x6000, a)
sigma.tile.udm.wr32(0x6004, b)
corr_result = a * b
time.sleep(0.3)
led_val = sigma.udm.rd32(0x80000000)
if (led_val == corr_result):
print("CORRECT: ", a, " * ", b, " = ", corr_result)
return 1
else:
print("INCORRECT: ", a, " * ", b, " = ", corr_result, ", received: ", led_val)
return 0
def hw_test_mul_sw(sigma, mul_sw_filename):
print("#### MUL_SW TEST STARTED ####")
print("Loading test program...")
sigma.tile.loadelf(mul_sw_filename)
print("Test program written!")
test_succ_flag = 1
test_succ_flag &= test_mul_sw(sigma, 6, 7)
test_succ_flag &= test_mul_sw(sigma, 2, 10)
test_succ_flag &= test_mul_sw(sigma, 256, 256)
if (test_succ_flag):
print("#### MUL_SW TEST PASSED! ####")
else:
print("#### MUL_SW TEST FAILED! ####")
print("")
return test_succ_flag
| apache-2.0 | -607,769,951,120,433,200 | 24.212766 | 86 | 0.55865 | false |
ifiddes/pycbio | tests/libtests/pycbio/sys/symEnumTests.py | 1 | 4840 | # Copyright 2006-2014 Mark Diekhans
import unittest, sys, cPickle
if __name__ == '__main__':
sys.path.extend(["../../..", "../../../.."])
from pycbio.sys.symEnum import SymEnum, SymEnumValue
from pycbio.sys.testCaseBase import TestCaseBase
class Color(SymEnum):
red = 1
green = 2
blue = 3
class GeneFeature(SymEnum):
promoter = 1
utr5 = SymEnumValue(2, "5'UTR")
cds = SymEnumValue(3, "CDS")
utr3 = SymEnumValue(4, "3'UTR")
coding = cds
class SymEnumTests(TestCaseBase):
def testBasics(self):
self.assertEqual(Color.red.name, "red")
self.assertEqual(Color.green.name, "green")
self.assertEqual(Color.blue.name, "blue")
self.assertTrue(Color.red < Color.blue)
self.assertTrue(Color.red == Color.red)
self.assertTrue(Color.red != Color.blue)
self.assertTrue(Color.red is not None)
self.assertTrue(None != Color.red)
def testLookup(self):
self.assertTrue(Color.red == Color("red"))
self.assertTrue(Color.green == Color("green"))
self.assertTrue(Color.green != Color("red"))
def testStrings(self):
self.assertTrue(str(Color.red) == "red")
self.assertTrue(str(Color.green) == "green")
self.assertTrue(sorted([str(c) for c in Color]), ["red", "green", "blue"])
def testAliases(self):
class Name(SymEnum):
Fred = 1
Rick = 2
Richard = Dick = HeyYou = Rick
Bill = 3
self.assertTrue(Name("Richard") is Name.Rick)
self.assertEqual(Name("Dick"), Name.Rick)
self.assertTrue(Name("Dick") is Name.Rick)
self.assertTrue(Name("Rick") == Name.Rick)
self.assertTrue(Name("HeyYou") == Name.Rick)
self.assertTrue(Name("Fred") == Name.Fred)
self.assertTrue(Name("Fred") is Name.Fred)
self.assertEqual([n for n in Name], [Name.Fred, Name.Rick, Name.Bill])
def testSetOps(self):
colSet = set([Color.blue, Color.green])
self.assertTrue(Color.green in colSet)
self.assertFalse(Color.red in colSet)
def testNumberDef(self):
class NumDef(SymEnum):
neg = -2
zero = 0
pos= 2
big = 3
values = [(v.name, v.value) for v in NumDef]
self.assertEqual(values, [('neg', -2), ('zero', 0), ('pos', 2), ('big', 3)])
self.assertEqual(NumDef(2), NumDef.pos)
def __testColorPickleProtocol(self, protocol):
stuff = {Color.red: "red one",
Color.green: "green one"}
world = cPickle.dumps((Color, stuff,), protocol)
color, stuff2 = cPickle.loads(world)
self.assertTrue(Color.red in stuff2)
self.assertTrue(Color.green in stuff2)
def testColorPickle2(self):
self.assertTrue(cPickle.HIGHEST_PROTOCOL == 2)
self.__testColorPickleProtocol(2)
def testColorPickle1(self):
self.__testColorPickleProtocol(1)
def testColorPickle0(self):
self.__testColorPickleProtocol(0)
def testExtNameLookup(self):
self.assertEqual(GeneFeature.promoter, GeneFeature("promoter"))
self.assertEqual(GeneFeature.utr5, GeneFeature("5'UTR"))
self.assertEqual(GeneFeature.utr5, GeneFeature("utr5"))
self.assertEqual(GeneFeature.cds, GeneFeature("CDS"))
self.assertEqual(GeneFeature.utr3, GeneFeature("3'UTR"))
self.assertEqual(GeneFeature.utr3, GeneFeature("utr3"))
self.assertEqual(GeneFeature.cds, GeneFeature("coding"))
def testExtNameStrings(self):
self.assertEqual(str(GeneFeature.promoter), "promoter")
self.assertEqual(str(GeneFeature.utr5), "5'UTR")
self.assertEqual(str(GeneFeature.cds), "CDS")
self.assertEqual(str(GeneFeature.utr3), "3'UTR")
self.assertNotEqual(str(GeneFeature.utr3), "utr3")
self.assertEqual(str(GeneFeature.coding), "CDS")
self.assertEqual(sorted([str(c) for c in GeneFeature]), ["3'UTR", "5'UTR", "CDS", "promoter"])
def __testGeneFeaturePickleProtocol(self, protocol):
stuff = {GeneFeature.utr3: "UTR'3 one",
GeneFeature.cds: "CDS one"}
world = cPickle.dumps((GeneFeature, stuff,), protocol)
geneFeature, stuff2 = cPickle.loads(world)
self.assertTrue(GeneFeature.utr3 in stuff2)
self.assertTrue(GeneFeature.cds in stuff2)
def testGeneFeaturePickle2(self):
self.assertTrue(cPickle.HIGHEST_PROTOCOL == 2)
self.__testGeneFeaturePickleProtocol(2)
def testGeneFeaturePickle1(self):
self.__testGeneFeaturePickleProtocol(1)
def testGeneFeaturePickle0(self):
self.__testGeneFeaturePickleProtocol(0)
def suite():
ts = unittest.TestSuite()
ts.addTest(unittest.makeSuite(SymEnumTests))
return ts
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 1,242,285,625,566,823,200 | 36.51938 | 102 | 0.628926 | false |
gtt116/rabbitclient | rabbit.py | 1 | 3469 | #!/usr/bin/env python
"""
A Kombu based RabbitMQ server client
"""
import sys
import argparse
import json
import pprint
try:
from kombu.messaging import Producer
from kombu import Exchange, Queue, Connection
except ImportError:
print 'Please install kombu before running this script.'
print 'You can run it on Nova compute.'
sys.exit(1)
class RabbitClient(object):
def __init__(self, host, username='guest', password='guest'):
self.host = host
self.username = username
self.password = password
self._amqp_connection = 'amqp://%s:%s@%s' % (self.username,
self.password,
self.host)
self.conn = None
def _channel(self):
if not self.conn:
self.conn = Connection(self._amqp_connection)
return self.conn.channel()
def queue_delete(self, queue_name):
# NOTE(gtt): We can omit exchange and routing_key argument here
# queue = Queue(queue_name, exchange=exchange,
# routing_key=routing_key, channel=conn.channel())
queue = Queue(queue_name, channel=self._channel())
print "Deleting queue %s" % queue
return queue.delete()
def queue_purge(self, queue_name):
queue = Queue(queue_name, channel=self._channel())
print "Purging queue %s" % queue
return queue.purge()
def queue_get(self, queue_name, ack=True):
queue = Queue(queue_name, channel=self._channel())
msg = queue.get()
if not msg:
return None
if ack:
msg.ack()
return msg
def queue_publish(self, routing_key,
exchange_name, exchange_type='topic', body=None):
exchange = Exchange(name=exchange_name, type=exchange_type,
exclusive=False, durable=False, auto_delete=False)
p = Producer(self._channel(), exchange, routing_key=routing_key)
return p.publish(body)
def queue_get_print(self, queue_name):
msg = self.queue_get(queue_name)
if not msg:
print None
return
try:
print json.dumps(json.loads(msg.body), indent=2)
except ValueError:
print msg.body
def dispatch(self, action_name, args):
if action_name == 'queue-get':
return self.queue_get_print(args.queue_name)
if action_name == 'queue-delete':
return self.queue_delete(args.queue_name)
if action_name == 'queue-purge':
return self.queue_purge(args.queue_name)
raise ValueError("Method not support: %s" % action_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-H', '--host')
parser.add_argument('-u', '--username')
parser.add_argument('-p', '--password')
subparser = parser.add_subparsers(dest='action',
help='commands help')
delete_parser = subparser.add_parser('queue-delete')
delete_parser.add_argument('queue_name')
purge_parser = subparser.add_parser('queue-purge')
purge_parser.add_argument('queue_name')
get_parser = subparser.add_parser('queue-get')
get_parser.add_argument('queue_name')
args = parser.parse_args()
rabbit = RabbitClient(args.host, args.username, args.password)
print rabbit.dispatch(args.action, args)
| apache-2.0 | -6,895,754,863,888,696,000 | 31.726415 | 78 | 0.593255 | false |
aymeric-spiga/remap | py/archive/reduced.py | 1 | 2347 | #! /usr/bin/env python
import netCDF4 as nc
import sys
import math
import numpy as np
def from_reduced(N,M):
#"N elements from south to north and N elements around equator "
if gaussian:
hmax = 2*math.pi/N
hmin = hmax/2
nlon = N
cells_lon = []
cells_lat = []
for i in range(M/2):
lat1 = 180.0/M*i
lat2 = 180.0/M*(i+1)
print "yorgl",i,lat1
if gaussian:
y = math.sin(lat1*math.pi/180)
r = math.cos(lat1*math.pi/180)
h = 2.0*r/nlon
reduce_nlon = (h < hmin) and (i > 0) and (nlon > 4)
else:
reduce_nlon = False
if reduce_nlon:
nlon = nlon/2
for j in range(nlon):
lon1 = 360.0*j/nlon
lon2 = 360.0*(j+1)/nlon
bounds_lon = [lon1, lon1, lon2, lon2]
bounds_lat = [lat1, lat2, lat2, lat1]
if reduce_nlon:
bounds_lon.append((lon1+lon2)/2)
bounds_lat.append(lat1)
else: # close by netCDF convention
bounds_lon.append(bounds_lon[0])
bounds_lat.append(bounds_lat[0])
# northern hemisphere
cells_lon.append(bounds_lon)
cells_lat.append(bounds_lat)
# southern hemisphere
cells_lon.append(bounds_lon)
cells_lat.append(list(-np.array(bounds_lat))) # convert to array to negate elementwise
return np.array(cells_lon), np.array(cells_lat)
gaussian = True
gaussian = False
#for N in [64, 128, 256, 512]:
for N in [64]:
filename = "reduced" + str(N) + ".nc"
print "Generating: N =", N
lon, lat = from_reduced(N*2,N)
print lon.shape[0], "cells -> writing as ", filename
f = nc.Dataset(filename,'w')
f.createDimension('n_vert', 5)
f.createDimension('n_cell', lon.shape[0])
var = f.createVariable('lat', 'd', ('n_cell'))
var.setncattr("long_name", "latitude")
var.setncattr("units", "degrees_north")
var.setncattr("bounds", "bounds_lat")
var[:] = np.zeros(lon.shape[0])
var = f.createVariable('lon', 'd', ('n_cell'))
var.setncattr("long_name", "longitude")
var.setncattr("units", "degrees_east")
var.setncattr("bounds", "bounds_lon")
var[:] = np.zeros(lon.shape[0])
var = f.createVariable('bounds_lon', 'd', ('n_cell','n_vert'))
var[:] = lon
var = f.createVariable('bounds_lat', 'd', ('n_cell','n_vert'))
var[:] = lat
var = f.createVariable('val', 'd', ('n_cell'))
var.setncattr("coordinates", "lon lat")
var[:] = np.arange(lon.shape[0])
f.close()
| gpl-2.0 | 2,083,614,771,329,507,000 | 26.290698 | 89 | 0.612697 | false |
hip-odoo/odoo | addons/hr_attendance/models/hr_employee.py | 15 | 7281 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from random import choice
from string import digits
from odoo import models, fields, api, exceptions, _, SUPERUSER_ID
class HrEmployee(models.Model):
_inherit = "hr.employee"
_description = "Employee"
def _default_random_pin(self):
return ("".join(choice(digits) for i in range(4)))
def _default_random_barcode(self):
barcode = None
while not barcode or self.env['hr.employee'].search([('barcode', '=', barcode)]):
barcode = "".join(choice(digits) for i in range(8))
return barcode
barcode = fields.Char(string="Badge ID", help="ID used for employee identification.", default=_default_random_barcode, copy=False)
pin = fields.Char(string="PIN", default=_default_random_pin, help="PIN used to Check In/Out in Kiosk Mode (if enabled in Configuration).", copy=False)
attendance_ids = fields.One2many('hr.attendance', 'employee_id', help='list of attendances for the employee')
last_attendance_id = fields.Many2one('hr.attendance', compute='_compute_last_attendance_id')
attendance_state = fields.Selection(string="Attendance", compute='_compute_attendance_state', selection=[('checked_out', "Checked out"), ('checked_in', "Checked in")])
manual_attendance = fields.Boolean(string='Manual Attendance', compute='_compute_manual_attendance', inverse='_inverse_manual_attendance',
help='The employee will have access to the "My Attendances" menu to check in and out from his session')
_sql_constraints = [('barcode_uniq', 'unique (barcode)', "The Badge ID must be unique, this one is already assigned to another employee.")]
@api.multi
def _compute_manual_attendance(self):
for employee in self:
employee.manual_attendance = employee.user_id.has_group('hr.group_hr_attendance') if employee.user_id else False
@api.multi
def _inverse_manual_attendance(self):
manual_attendance_group = self.env.ref('hr.group_hr_attendance')
for employee in self:
if employee.user_id:
if employee.manual_attendance:
manual_attendance_group.users = [(4, employee.user_id.id, 0)]
else:
manual_attendance_group.users = [(3, employee.user_id.id, 0)]
@api.depends('attendance_ids')
def _compute_last_attendance_id(self):
for employee in self:
employee.last_attendance_id = employee.attendance_ids and employee.attendance_ids[0] or False
@api.depends('last_attendance_id.check_in', 'last_attendance_id.check_out', 'last_attendance_id')
def _compute_attendance_state(self):
for employee in self:
employee.attendance_state = employee.last_attendance_id and not employee.last_attendance_id.check_out and 'checked_in' or 'checked_out'
@api.constrains('pin')
def _verify_pin(self):
for employee in self:
if employee.pin and not employee.pin.isdigit():
raise exceptions.ValidationError(_("The PIN must be a sequence of digits."))
@api.model
def attendance_scan(self, barcode):
""" Receive a barcode scanned from the Kiosk Mode and change the attendances of corresponding employee.
Returns either an action or a warning.
"""
employee = self.search([('barcode', '=', barcode)], limit=1)
return employee and employee.attendance_action('hr_attendance.hr_attendance_action_kiosk_mode') or \
{'warning': _('No employee corresponding to barcode %(barcode)s') % {'barcode': barcode}}
@api.multi
def attendance_manual(self, next_action, entered_pin=None):
self.ensure_one()
if not (entered_pin is None) or self.env['res.users'].browse(SUPERUSER_ID).has_group('hr_attendance.group_hr_attendance_use_pin') and (self.user_id and self.user_id.id != self._uid or not self.user_id):
if entered_pin != self.pin:
return {'warning': _('Wrong PIN')}
return self.attendance_action(next_action)
@api.multi
def attendance_action(self, next_action):
""" Changes the attendance of the employee.
Returns an action to the check in/out message,
next_action defines which menu the check in/out message should return to. ("My Attendances" or "Kiosk Mode")
"""
self.ensure_one()
action_message = self.env.ref('hr_attendance.hr_attendance_action_greeting_message').read()[0]
action_message['previous_attendance_change_date'] = self.last_attendance_id and (self.last_attendance_id.check_out or self.last_attendance_id.check_in) or False
action_message['employee_name'] = self.name
action_message['next_action'] = next_action
if self.user_id:
modified_attendance = self.sudo(self.user_id.id).attendance_action_change()
else:
modified_attendance = self.sudo().attendance_action_change()
action_message['attendance'] = modified_attendance.read()[0]
return {'action': action_message}
@api.multi
def attendance_action_change(self):
""" Check In/Check Out action
Check In: create a new attendance record
Check Out: modify check_out field of appropriate attendance record
"""
if len(self) > 1:
raise exceptions.UserError(_('Cannot perform check in or check out on multiple employees.'))
action_date = fields.Datetime.now()
if self.attendance_state != 'checked_in':
vals = {
'employee_id': self.id,
'check_in': action_date,
}
return self.env['hr.attendance'].create(vals)
else:
attendance = self.env['hr.attendance'].search([('employee_id', '=', self.id), ('check_out', '=', False)], limit=1)
if attendance:
attendance.check_out = action_date
else:
raise exceptions.UserError(_('Cannot perform check out on %(empl_name)s, could not find corresponding check in. '
'Your attendances have probably been modified manually by human resources.') % {'empl_name': self.name, })
return attendance
@api.model_cr_context
def _init_column(self, column_name):
""" Initialize the value of the given column for existing rows.
Overridden here because we need to have different default values
for barcode and pin for every employee.
"""
if column_name not in ["barcode", "pin"]:
super(HrEmployee, self)._init_column(column_name)
else:
default_compute = self._fields[column_name].default
query = 'SELECT id FROM "%s" WHERE "%s" is NULL' % (
self._table, column_name)
self.env.cr.execute(query)
employee_ids = self.env.cr.fetchall()
for employee_id in employee_ids:
default_value = default_compute(self)
query = 'UPDATE "%s" SET "%s"=%%s WHERE id = %s' % (
self._table, column_name, employee_id[0])
self.env.cr.execute(query, (default_value,))
| agpl-3.0 | 2,383,363,399,660,738,600 | 48.530612 | 210 | 0.631232 | false |
cloud-ark/cloudark | client/fmcmds/call_server.py | 1 | 14059 | import gzip
import json
import os
import requests
import tarfile
import urllib2
resources_endpoint = "http://localhost:5002/resources"
resource_stacks_endpoint = "http://localhost:5002/resource_stacks"
environments_endpoint = "http://localhost:5002/environments"
apps_endpoint = "http://localhost:5002/apps"
containers_endpoint = "http://localhost:5002/containers"
SERVER_ERROR = "Something caused error in cloudark. Please submit bug report on cloudark github repo. "
SERVER_ERROR = SERVER_ERROR + "Attach logs from cld.log which is available in cloudark directory."
class TakeAction(object):
def __init__(self):
pass
def _make_tarfile(self, output_filename, source_dir):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
def _read_tarfile(self, tarfile_name):
with gzip.open(tarfile_name, "rb") as f:
contents = f.read()
return contents
def _delete_tarfile(self, tarfile_name, source_dir):
cwd = os.getcwd()
os.chdir(source_dir)
if os.path.exists(tarfile_name):
os.remove(tarfile_name)
os.chdir(cwd)
def _check_server(self):
try:
req = urllib2.Request(apps_endpoint)
urllib2.urlopen(req)
except Exception as e:
print("CloudARK server is not running. Please run ./start-cloudark.sh.")
exit()
def create_container(self, source_dir, cont_info):
self._check_server()
req = urllib2.Request(containers_endpoint)
req.add_header('Content-Type', 'application/octet-stream')
cont_name = cont_info['cont_name']
tarfile_name = cont_name + ".tar"
self._make_tarfile(tarfile_name, source_dir)
tarfile_content = self._read_tarfile(tarfile_name)
cont_info['cont_tar_name'] = tarfile_name
cont_info['content'] = tarfile_content
cont_url = ''
try:
data = {'cont_info': cont_info}
response = urllib2.urlopen(req, json.dumps(data, ensure_ascii=True, encoding='ISO-8859-1'))
cont_url = response.headers.get('location')
print("Request to create container %s accepted." % cont_name)
except Exception as e:
error = e.read()
print(error)
self._delete_tarfile(tarfile_name, source_dir)
def get_container(self, container_name):
self._check_server()
cont_url = containers_endpoint + "/" + container_name
req = urllib2.Request(cont_url)
cont_data = ''
try:
response = urllib2.urlopen(req)
cont_data = response.fp.read()
except urllib2.HTTPError as e:
if e.getcode() == 404:
print("Container with name %s not found." % container_name)
return cont_data
def get_container_list(self):
self._check_server()
req = urllib2.Request(containers_endpoint)
data = ''
try:
response = urllib2.urlopen(req)
data = response.fp.read()
except urllib2.HTTPError as e:
print("Error occurred in querying endpoint %s" % containers_endpoint)
print(e)
return data
def delete_container(self, cont_name):
self._check_server()
cont_url = containers_endpoint + "/" + cont_name
response = requests.delete(cont_url)
if response.status_code == 404:
print("Container with name %s not found." % cont_name)
if response.status_code == 202:
print("Request to delete container with name %s accepted." % cont_name)
if response.status_code == 303:
print("Request to delete container with name %s accepted." % cont_name)
print("*** Please delete the container image from GCR manually -- automation is not available for that yet.***")
return response
def deploy_app(self, app_path, app_info):
self._check_server()
source_dir = app_path
app_name = app_info['app_name']
tarfile_name = app_name + ".tar"
self._make_tarfile(tarfile_name, source_dir)
tarfile_content = self._read_tarfile(tarfile_name)
app_info['app_name'] = app_name
app_info['app_tar_name'] = tarfile_name
app_info['app_content'] = tarfile_content
data = {'app_info': app_info}
req = urllib2.Request(apps_endpoint)
req.add_header('Content-Type', 'application/octet-stream')
app_url = ''
try:
response = urllib2.urlopen(req, json.dumps(data, ensure_ascii=True, encoding='ISO-8859-1'))
app_url = response.headers.get('location')
print("Request to deploy %s application accepted." % app_name)
except Exception as e:
error = e.read()
print(error)
self._delete_tarfile(tarfile_name, source_dir)
return app_url
def get_app(self, app_name):
self._check_server()
app_url = apps_endpoint + "/" + app_name
req = urllib2.Request(app_url)
app_data = ''
try:
response = urllib2.urlopen(req)
app_data = response.fp.read()
except urllib2.HTTPError as e:
if e.getcode() == 404:
print("App with name %s not found." % app_name)
return app_data
def get_app_logs(self, app_name):
self._check_server()
app_url = apps_endpoint + "/" + app_name + "/logs"
req = urllib2.Request(app_url)
logs_data = ''
try:
response = urllib2.urlopen(req)
logs_data = response.fp.read()
except urllib2.HTTPError as e:
if e.getcode() == 404:
print("App with name %s not found." % app_name)
return logs_data
def delete_app(self, app_name):
self._check_server()
app_url = apps_endpoint + "/" + app_name
response = requests.delete(app_url)
if response.status_code == 404:
print("App with name %s not found." % app_name)
if response.status_code == 202:
print("Request to delete app with name %s accepted." % app_name)
if response.status_code == 303:
print("Request to delete app with name %s accepted." % app_name)
return response
def redeploy_app(self, app_path, app_info, app_name):
self._check_server()
app_id_url = apps_endpoint + "/" + app_name
source_dir = app_path
app_name = "app-redeploy-id-" + app_name
tarfile_name = app_name + ".tar"
self._make_tarfile(tarfile_name, source_dir)
tarfile_content = self._read_tarfile(tarfile_name)
app_info['app_tar_name'] = tarfile_name
app_info['app_content'] = tarfile_content
data = {'app_info': app_info}
app_url = ''
req = urllib2.Request(app_id_url)
req.add_header('Content-Type', 'application/octet-stream')
req.get_method = lambda: 'PUT'
try:
response = urllib2.urlopen(req, json.dumps(data, ensure_ascii=True, encoding='ISO-8859-1'))
if response.code == 202:
print("Request to redeploy app with name %s accepted." % app_name)
app_url = response.headers.get('location')
except Exception as e:
if e.msg == 'NOT FOUND':
print("App with name %s not found." % app_name)
if e.msg == 'INTERNAL SERVER ERROR':
print(SERVER_ERROR)
return
self._delete_tarfile(tarfile_name, source_dir)
return app_url
def get_app_list(self):
self._check_server()
req = urllib2.Request(apps_endpoint)
data = ''
try:
response = urllib2.urlopen(req)
data = response.fp.read()
except urllib2.HTTPError as e:
print("Error occurred in querying endpoint %s" % apps_endpoint)
print(e)
return data
# Functions for environment
def run_command(self, env_name, command_string):
self._check_server()
environment_command_endpoint = environments_endpoint + "/" + env_name + "/command"
req = urllib2.Request(environment_command_endpoint)
req.add_header('Content-Type', 'application/octet-stream')
data = {'command_string': command_string,
'environment_name': env_name}
try:
response = urllib2.urlopen(req, json.dumps(data, ensure_ascii=True, encoding='ISO-8859-1'))
response_data = response.fp.read()
resp_data_json = json.loads(response_data)
result = resp_data_json['data']
result_str = '\n'.join(result)
return result_str
except Exception as e:
if e.msg == 'NOT FOUND':
print("Environment with name %s not found." % env_name)
exit()
def create_environment(self, env_name, environment_def):
self._check_server()
req = urllib2.Request(environments_endpoint)
req.add_header('Content-Type', 'application/octet-stream')
data = {'environment_def': environment_def,
'environment_name': env_name}
try:
response = urllib2.urlopen(req, json.dumps(data, ensure_ascii=True, encoding='ISO-8859-1'))
print("Request to create environment %s accepted." % env_name)
except Exception as e:
if e.code == 503 or e.code == 500 or e.code == 412 or e.code == 400:
error = e.read()
print(error)
exit()
environment_url = response.headers.get('location')
return environment_url
def get_environment(self, env_name):
self._check_server()
env_url = environments_endpoint + "/" + env_name
req = urllib2.Request(env_url)
env_data = ''
try:
response = urllib2.urlopen(req)
env_data = response.fp.read()
except urllib2.HTTPError as e:
if e.getcode() == 404:
print("Environment with name %s not found." % env_name)
exit()
return env_data
def delete_environment(self, env_name, force_flag=''):
self._check_server()
env_url = environments_endpoint + "/" + env_name
if force_flag:
env_url = environments_endpoint + "/" + env_name + "?force=" + force_flag
response = requests.delete(env_url)
if response.status_code == 404:
print("Environment with name %s not found." % env_name)
if response.status_code == 202 or response.status_code == 200:
print("Request to delete env with name %s accepted." % env_name)
if response.status_code == 412:
print("Environment cannot be deleted as there are applications still running on it.")
if response.status_code == 303:
print("Request to delete env with name %s accepted." % env_name)
print("*** Please delete the VPC network from Google cloud console that was created for this environment ***.")
print("*** Check: https://github.com/cloud-ark/cloudark/issues/101 for details. ***")
return response
def get_environment_list(self):
self._check_server()
req = urllib2.Request(environments_endpoint)
data = ''
try:
response = urllib2.urlopen(req)
data = response.fp.read()
except urllib2.HTTPError as e:
print("Error occurred in querying endpoint %s" % environments_endpoint)
print(e)
return data
# Functions for Individual resource
def get_resources(self):
self._check_server()
req = urllib2.Request(resources_endpoint)
data = ''
try:
response = urllib2.urlopen(req)
data = response.fp.read()
except urllib2.HTTPError as e:
print(e)
return data
def get_resources_for_environment(self, env_name):
self._check_server()
req = urllib2.Request(resources_endpoint + "?env_name=%s" % env_name)
data = ''
try:
response = urllib2.urlopen(req)
data = response.fp.read()
except urllib2.HTTPError as e:
if e.getcode() == 404:
print("Environment with name %s not found." % env_name)
return data
def create_resource(self, resource_obj):
self._check_server()
req = urllib2.Request(resources_endpoint)
req.add_header('Content-Type', 'application/octet-stream')
request_data = {'resource_info': resource_obj}
response = urllib2.urlopen(req, json.dumps(request_data,
ensure_ascii=True,
encoding='ISO-8859-1'))
resource_endpoint = response.headers.get('location')
print("Resource URL:%s" % resource_endpoint)
return resource_endpoint
def get_resource(self, resource_id):
self._check_server()
resource_endpoint = resources_endpoint + "/" + resource_id
req = urllib2.Request(resource_endpoint)
resource_data = ''
try:
response = urllib2.urlopen(req)
resource_data = response.fp.read()
except urllib2.HTTPError as e:
if e.getcode() == 404:
print("Resource with resource-id %s not found." % resource_id)
return resource_data
def delete_resource(self, resource_id):
self._check_server()
resource_endpoint = resources_endpoint + "/" + resource_id
response = requests.delete(resource_endpoint)
if response.status_code == 404:
print("Resource with resource-id %s not found." % resource_id)
if response.status_code == 202:
print("Request to delete resource with resource-id %s accepted." % resource_id)
return response
| apache-2.0 | 1,964,869,006,662,134,500 | 37.203804 | 124 | 0.580838 | false |
ibmjstart/bluemix-python-eve-sample | macreduce/run.py | 1 | 3837 | #!/usr/bin/env python
"""Instantiates the Python Eve REST API Server.
Instantiates the Python Eve REST API Server for both local
and cloud (IBM Bluemix) execution. Provides a default catch-all
routing to provide API consumers with intentional responses
for all routes. Provides a redis cloud caching instance for
session management where desired.
"""
from settings import (REDIS_INSTANCE,
APP_HOST,
APP_PORT,
VCAP_CONFIG)
from flask.ext.bootstrap import Bootstrap
from eve import Eve
from eve_docs import eve_docs
from eve_swagger import swagger
from routes import home
from hooks.event import (before_returning_items,
after_returning_items)
from gevent import wsgi, monkey, socket
import os
from platform import python_version
__author__ = "Sanjay Joshi"
__copyright__ = "IBM Copyright 2015"
__credits__ = ["Sanjay Joshi"]
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "Sanjay Joshi"
__email__ = "[email protected]"
__status__ = "Prototype"
# Monkey Patching app behavior to make it greenlet non-blocking
# This is usually required by gevent for native bindings for things
# like Redis interactions, etc ...
monkey.patch_all()
socket.setdefaulttimeout(240)
# capture current working directory
PWD = os.environ.get("PWD")
# set static folder path for static data
static_folder = os.path.join(PWD, "macreduce/static")
# Detect if we are deployed within Bluemix or not and configure accordingly
if VCAP_CONFIG:
print('Welcome to Bluemix')
print('Running on Python version: ' + python_version())
app = Eve(static_folder=static_folder,
redis=REDIS_INSTANCE)
REDIS_INSTANCE.flushdb()
else:
print('We are not running in Bluemix! Dev Mode Enabled')
app = Eve(static_folder=static_folder,
redis=REDIS_INSTANCE)
print(' Enabling Debug ...')
app.debug = True
# Setup some default home page path rules for JSON and HTML
app.add_url_rule('/', 'index', home.index)
# app.add_url_rule('/<path:path>', 'nonresource', home.index)
# Setup a favicon url for the home page
app.add_url_rule('/favicon', 'favicon',
view_func=home.favicon, methods=['GET'])
app.add_url_rule('/populate', 'populate',
view_func=home.populate, methods=['GET'])
# Setup examples of event hooks
app.on_pre_GET_mac += \
before_returning_items
app.on_post_GET_mac += \
after_returning_items
app.config['SWAGGER_INFO'] = {
'title': 'Macreduce API',
'version': '1.0',
'description': 'Python-Eve Framework application backend deployed on IBM '
'Bluemix that provides a practical illustration of setting '
'up a python REST API to support mobile workloads and '
'integration with 3rd party API platforms.',
'termsOfService': 'Have fun and learn!',
'contact': {
'name': 'joshisa',
'url': 'http://ibm.biz/sanjay_joshi'
},
'license': {
'name': 'Apache 2.0',
'url': 'https://github.com/ibmjstart/bluemix-python-eve-sample/'
'blob/master/LICENSE',
}
}
# Bootstrap and start Flask app within the WSGI GEvent Process
if __name__ == '__main__':
# Required to enable the Eve-docs extension
Bootstrap(app)
# Example invocation for running the Flask Server by itself
# app.run(host=APP_HOST, port=int(APP_PORT))
# Register the Flask Eve-docs blueprint
app.register_blueprint(eve_docs, url_prefix='/docs')
# Register the Swagger Extension for Eve
app.register_blueprint(swagger)
# Starting the GEvent WSGI Server to host the Flask App
# GEvent should provide superior response times to the
# dev Flask server
ws = wsgi.WSGIServer((APP_HOST, int(APP_PORT)), app)
ws.serve_forever()
| apache-2.0 | 7,938,279,015,738,166,000 | 32.955752 | 79 | 0.66823 | false |
sbarton272/AcousticBarcodes-Explorations | barcodes/dxfwrite/examples/mtext.py | 1 | 3720 | #!/usr/bin/env python
#coding:utf-8
# Author: mozman
# Purpose: examples for dxfwrite usage, see also tests for examples
# Created: 09.02.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
import sys
import os
try:
import dxfwrite
except ImportError:
# if dxfwrite is not 'installed' append parent dir of __file__ to sys.path
import os
curdir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(curdir, os.path.pardir)))
import dxfwrite
from dxfwrite import DXFEngine as dxf
def textblock(mtext, x, y, rot, color=3, mirror=0):
dwg.add(dxf.line((x+50, y), (x+50, y+50), color=color))
dwg.add(dxf.line((x+100, y), (x+100, y+50), color=color))
dwg.add(dxf.line((x+150, y), (x+150, y+50), color=color))
dwg.add(dxf.line((x+50, y), (x+150, y), color=color))
dwg.add(dxf.mtext(mtext, (x+50, y), mirror=mirror, rotation=rot))
dwg.add(dxf.mtext(mtext, (x+100, y), mirror=mirror, rotation=rot,
halign=dxfwrite.CENTER))
dwg.add(dxf.mtext(mtext, (x+150, y), mirror=mirror, rotation=rot,
halign=dxfwrite.RIGHT))
dwg.add(dxf.line((x+50, y+25), (x+150, y+25), color=color))
dwg.add(dxf.mtext(mtext, (x+50, y+25), mirror=mirror, rotation=rot,
valign=dxfwrite.MIDDLE))
dwg.add(dxf.mtext(mtext, (x+100, y+25), mirror=mirror, rotation=rot,
valign=dxfwrite.MIDDLE, halign=dxfwrite.CENTER))
dwg.add(dxf.mtext(mtext, (x+150, y+25), mirror=mirror, rotation=rot,
valign=dxfwrite.MIDDLE, halign=dxfwrite.RIGHT))
dwg.add(dxf.line((x+50, y+50), (x+150, y+50), color=color))
dwg.add(dxf.mtext(mtext, (x+50, y+50), mirror=mirror,
valign=dxfwrite.BOTTOM, rotation=rot))
dwg.add(dxf.mtext(mtext, (x+100, y+50), mirror=mirror,
valign=dxfwrite.BOTTOM, rotation=rot,
halign=dxfwrite.CENTER))
dwg.add(dxf.mtext(mtext, (x+150, y+50), mirror=mirror,
valign=dxfwrite.BOTTOM, rotation=rot,
halign=dxfwrite.RIGHT))
def rotate_text(text, insert, parts=16, color=3):
delta = 360. / parts
for part in range(parts):
dwg.add(dxf.mtext(text, insert, rotation=(delta*part),
color=color, valign=dxfwrite.TOP))
name = "mtext.dxf"
dwg = dxf.drawing(name)
txt = "Das ist ein mehrzeiliger Text\nZeile 2\nZeile 3\nUnd eine lange lange" \
" ................ Zeile4"
textblock(txt, 0, 0, 0., color=1)
textblock(txt, 150, 0, 45., color=2)
textblock(txt, 300, 0, 90., color=3)
textblock(txt, 0, 70, 135., color=4)
textblock(txt, 150, 70, 180., color=5)
textblock(txt, 300, 70, 225., color=6)
txt = "MText Zeile 1\nMIRROR_X\nZeile 3"
textblock(txt, 0, 140, 0., color=4, mirror=dxfwrite.MIRROR_X)
textblock(txt, 150, 140, 45., color=5, mirror=dxfwrite.MIRROR_X)
textblock(txt, 300, 140, 90., color=6, mirror=dxfwrite.MIRROR_X)
txt = "MText Zeile 1\nMIRROR_Y\nZeile 3"
textblock(txt, 0, 210, 0., color=4, mirror=dxfwrite.MIRROR_Y)
textblock(txt, 150, 210, 45., color=5, mirror=dxfwrite.MIRROR_Y)
textblock(txt, 300, 210, 90., color=6, mirror=dxfwrite.MIRROR_Y)
textblock("Einzeiler 0 deg", 0, -70, 0., color=1)
textblock("Einzeiler 45 deg", 150, -70, 45., color=2)
textblock("Einzeiler 90 deg", 300, -70, 90., color=3)
txt = "--------------------------------------------------Zeile 1\n" \
"----------------- MTEXT MTEXT --------------------Zeile 2 zum Rotieren!\n" \
"--------------------------------------------------Zeile 3\n"
rotate_text(txt, (600, 100), parts=16, color=3)
dwg.save()
print("drawing '%s' created.\n" % name)
| mit | -6,541,401,395,220,813,000 | 39.879121 | 83 | 0.600538 | false |
openstack/smaug | karbor/tests/unit/protection/test_cinder_freezer_protection_plugin.py | 1 | 7811 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from karbor.common import constants
from karbor.context import RequestContext
from karbor.resource import Resource
from karbor.services.protection.bank_plugin import Bank
from karbor.services.protection.bank_plugin import BankPlugin
from karbor.services.protection.bank_plugin import BankSection
from karbor.services.protection import client_factory
from karbor.services.protection.protection_plugins.volume import \
volume_freezer_plugin_schemas
from karbor.services.protection.protection_plugins.volume.\
volume_freezer_plugin import FreezerProtectionPlugin
from karbor.tests import base
import mock
from oslo_config import cfg
from oslo_config import fixture
class FakeBankPlugin(BankPlugin):
def update_object(self, key, value, context=None):
return
def get_object(self, key, context=None):
return
def list_objects(self, prefix=None, limit=None, marker=None,
sort_dir=None, context=None):
return
def delete_object(self, key, context=None):
return
def get_owner_id(self, context=None):
return
fake_bank = Bank(FakeBankPlugin())
fake_bank_section = BankSection(bank=fake_bank, section="fake")
ResourceNode = collections.namedtuple(
"ResourceNode",
["value",
"child_nodes"]
)
Job = collections.namedtuple(
"Job",
["job_schedule"]
)
def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs):
def noop(*args, **kwargs):
pass
hooks = (
'on_prepare_begin',
'on_prepare_finish',
'on_main',
'on_complete',
)
for hook_name in hooks:
hook = getattr(operation, hook_name, noop)
hook(checkpoint, resource, context, parameters, **kwargs)
class FakeCheckpoint(object):
def __init__(self):
self.bank_section = fake_bank_section
self.id = "fake_id"
def get_resource_bank_section(self, resource_id):
return self.bank_section
class VolumeFreezerProtectionPluginTest(base.TestCase):
def setUp(self):
super(VolumeFreezerProtectionPluginTest, self).setUp()
plugin_config = cfg.ConfigOpts()
plugin_config_fixture = self.useFixture(fixture.Config(plugin_config))
plugin_config_fixture.load_raw_values(
group='freezer_protection_plugin',
poll_interval=0,
)
self.plugin = FreezerProtectionPlugin(plugin_config)
self._public_url = 'http://127.0.0.1/v2.0'
cfg.CONF.set_default('freezer_endpoint',
self._public_url,
'freezer_client')
# due to freezer client bug, auth_uri should be specified
cfg.CONF.set_default('auth_uri',
'http://127.0.0.1/v2.0',
'freezer_client')
self.cntxt = RequestContext(user_id='demo',
project_id='fake_project_id',
auth_token='fake_token')
self.freezer_client = client_factory.ClientFactory.create_client(
'freezer', self.cntxt
)
self.checkpoint = FakeCheckpoint()
def test_get_options_schema(self):
options_schema = self.plugin.get_options_schema(
constants.VOLUME_RESOURCE_TYPE)
self.assertEqual(options_schema,
volume_freezer_plugin_schemas.OPTIONS_SCHEMA)
def test_get_restore_schema(self):
options_schema = self.plugin.get_restore_schema(
constants.VOLUME_RESOURCE_TYPE)
self.assertEqual(options_schema,
volume_freezer_plugin_schemas.RESTORE_SCHEMA)
def test_get_saved_info_schema(self):
options_schema = self.plugin.get_saved_info_schema(
constants.VOLUME_RESOURCE_TYPE)
self.assertEqual(options_schema,
volume_freezer_plugin_schemas.SAVED_INFO_SCHEMA)
@mock.patch('karbor.services.protection.protection_plugins.volume.'
'volume_freezer_plugin.utils.status_poll')
@mock.patch('karbor.services.protection.clients.freezer.create')
def test_create_backup(self, mock_freezer_create, mock_status_poll):
resource = Resource(id="123",
type=constants.VOLUME_RESOURCE_TYPE,
name='fake')
fake_bank_section.update_object = mock.MagicMock()
protect_operation = self.plugin.get_protect_operation(resource)
mock_freezer_create.return_value = self.freezer_client
mock_status_poll.return_value = True
self.freezer_client.clients.list = mock.MagicMock()
self.freezer_client.clients.list.return_value = [
{
'client_id': 'fake_client_id'
}
]
self.freezer_client.jobs.create = mock.MagicMock()
self.freezer_client.jobs.create.return_value = "123"
self.freezer_client.jobs.start_job = mock.MagicMock()
self.freezer_client.jobs.get = mock.MagicMock()
self.freezer_client.jobs.get.return_value = {
'job_actions': []
}
self.freezer_client.jobs.delete = mock.MagicMock()
call_hooks(protect_operation, self.checkpoint, resource, self.cntxt,
{})
@mock.patch('karbor.services.protection.protection_plugins.volume.'
'volume_freezer_plugin.utils.status_poll')
@mock.patch('karbor.services.protection.clients.freezer.create')
def test_delete_backup(self, mock_freezer_create, mock_status_poll):
resource = Resource(id="123",
type=constants.VOLUME_RESOURCE_TYPE,
name='fake')
delete_operation = self.plugin.get_delete_operation(resource)
fake_bank_section.update_object = mock.MagicMock()
fake_bank_section.get_object = mock.MagicMock()
fake_bank_section.get_object.return_value = {
'job_info': {
'description': '123',
'job_actions': [{
'freezer_action': {
'backup_name': 'test',
'action': 'backup',
'mode': 'cinder',
'cinder_vol_id': 'test',
'storage': 'swift',
'container': 'karbor/123'
}
}]
}
}
mock_freezer_create.return_value = self.freezer_client
mock_status_poll.return_value = True
self.freezer_client.jobs.create = mock.MagicMock()
self.freezer_client.jobs.create.return_value = '321'
self.freezer_client.jobs.start_job = mock.MagicMock()
self.freezer_client.jobs.get = mock.MagicMock()
self.freezer_client.jobs.get.return_value = {
'job_actions': []
}
self.freezer_client.jobs.delete = mock.MagicMock()
call_hooks(delete_operation, self.checkpoint, resource, self.cntxt,
{})
def test_get_supported_resources_types(self):
types = self.plugin.get_supported_resources_types()
self.assertEqual(types,
[constants.VOLUME_RESOURCE_TYPE])
| apache-2.0 | -7,735,721,643,586,370,000 | 36.373206 | 79 | 0.616566 | false |
fangohr/oommf-python | new/evolvers_test.py | 1 | 3923 | import pytest
from evolvers import RungeKuttaEvolve, CGEvolve
class TestRungeKuttaEvolve(object):
def setup(self):
# Set of valid arguments.
self.args1 = [[1, 1, 1, 'rkf54'],
[0.5, 1e5, 0.01, 'rk2'],
[0.05, 2.21e5, 5e6, 'rk4'],
[0.1, .1, 1e-2, 'rkf54m']]
# Set of invalid arguments.
self.args2 = [[-0.1, 1, 1, 'bac'],
[0.5, -1e5, 0.01, 'bac'],
[0.05, 2.21e5, 'abc', 'rkf54'],
[0.1, .1, -1e-2, 'rk2'],
[0.1, 0.1, 0.1, 0.1]]
def test_init(self):
# Valid arguments.
for arg in self.args1:
alpha = arg[0]
gamma_G = arg[1]
start_dm = arg[2]
method = arg[3]
evolver = RungeKuttaEvolve(alpha, gamma_G, start_dm, method)
assert evolver.alpha == alpha
assert isinstance(alpha, (int, float))
assert evolver.gamma_G == gamma_G
assert isinstance(gamma_G, (int, float))
assert evolver.start_dm == start_dm
assert isinstance(start_dm, (int, float))
assert evolver.method == method
assert isinstance(method, str)
def test_init_exceptions(self):
# Invalid arguments (ValueError expected).
for arg in self.args2:
alpha = arg[0]
gamma_G = arg[1]
start_dm = arg[2]
method = arg[3]
with pytest.raises(ValueError):
evolver = RungeKuttaEvolve(alpha, gamma_G, start_dm, method)
def test_get_mif(self):
for arg in self.args1:
alpha = arg[0]
gamma_G = arg[1]
start_dm = arg[2]
method = arg[3]
evolver = RungeKuttaEvolve(alpha, gamma_G, start_dm, method)
mif = evolver.get_mif()
mif_lines = evolver.get_mif().split('\n')
# Assert comment.
l = mif_lines[0].split()
assert l[0] == '#'
assert l[1] == 'RungeKutta'
assert l[2] == 'evolver'
# Assert Specify line.
l = mif_lines[1].split()
assert l[0] == 'Specify'
assert l[1].split(':')[0] == 'Oxs_RungeKuttaEvolve'
assert l[2] == '{'
# Assert parameters lines
assert mif_lines[2][0] == '\t'
l = mif_lines[2].split()
assert l[0] == 'alpha'
assert float(l[1]) == alpha
# Assert parameters lines
assert mif_lines[3][0] == '\t'
l = mif_lines[3].split()
assert l[0] == 'gamma_G'
assert float(l[1]) == gamma_G
# Assert parameters lines
assert mif_lines[4][0] == '\t'
l = mif_lines[4].split()
assert l[0] == 'start_dm'
assert float(l[1]) == start_dm
# Assert parameters lines
assert mif_lines[5][0] == '\t'
l = mif_lines[5].split()
assert l[0] == 'method'
assert l[1] == method
# Assert mif end.
assert mif_lines[6] == '}'
# Assert new lines at the end of the string.
assert mif[-2:] == '\n\n'
class TestCGEvolve(object):
def test_get_mif(self):
evolver = CGEvolve()
mif = evolver.get_mif()
mif_lines = evolver.get_mif().split('\n')
# Assert comment.
l = mif_lines[0].split()
assert l[0] == '#'
assert l[1] == 'CG'
assert l[2] == 'evolver'
# Assert Specify line.
l = mif_lines[1].split()
assert l[0] == 'Specify'
assert l[1].split(':')[0] == 'Oxs_CGEvolve'
assert l[1].split(':')[1] == 'evolver'
assert l[2] == '{}'
# Assert new lines at the end of the string.
assert mif[-2:] == '\n\n'
| bsd-2-clause | 3,618,059,933,764,490,000 | 30.134921 | 76 | 0.465205 | false |
DutBright/scientificResearch | adminStaff/views.py | 1 | 8772 | # coding: UTF-8
'''
Created on 2014-06-07
Desc: adminStaff' view, includes home(manage), review report view
'''
import time
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.decorators import csrf
from backend.decorators import *
from const import *
from backend.logging import loginfo
from backend.utility import getContext
from adminStaff.forms import NewsForm,ObjectForm,TemplateNoticeMessageForm,DispatchForm,DispatchAddCollegeForm
from teacher.forms import ProjectBudgetInformationForm,ProjectBudgetAnnualForm, SettingForm, ProjectCreationTeacherForm
from common.forms import NoticeForm, SearchForm
from common.views import scheduleManage, financialManage,noticeMessageSettingBase,scheduleManage,finalReportViewWork,fundBudgetViewWork,fileUploadManage,researchConcludingManage,getType
from adminStaff.models import TemplateNoticeMessage,News,ProjectSingle,HomePagePic
from users.models import SchoolProfile,CollegeProfile,ExpertProfile,Special,College
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def appView(request):
context = {}
return render(request, "adminStaff/application.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def allocManageView(request):
userauth = {
'role': 'adminStaff',
}
object_form = ObjectForm()
#special
special_list = []
user_special_info = {}
for i in Special.objects.all() :
special_list.append({'name':i.name, 'user':i.school_user, })
for i in SchoolProfile.objects.all():
user_special_info[i] = []
for i in special_list:
if i['user']:
user_special_info[i['user']].append(i['name'])
# college
college_list = []
user_college_info = {}
for i in College.objects.all() :
college_list.append({'name':i.name, 'user':i.college_user, })
for i in CollegeProfile.objects.all():
user_college_info[i] = []
for i in college_list:
if i['user']:
user_college_info[i['user']].append(i['name'])
instance_list = [
{
'object_chinese_name':"专题",
'object_name': "special",
'object_form': object_form,
'object_list': special_list,
'user_object_info':user_special_info,
},
{
'object_chinese_name':"学院",
'object_name': "college",
'object_form': object_form,
'object_list': college_list,
'user_object_info':user_college_info,
},
]
context = {
'instance_list': instance_list,
}
return render(request, "adminStaff/alloc_manage.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def scheduleView(request):
userauth = {
'role': 'adminStaff',
'status':'all'
}
return scheduleManage(request, userauth)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def deleteProject(request):
try:
iid=request.GET['iid']
print iid
project=ProjectSingle.objects.get(project_id=iid)
if project:
project.delete()
return HttpResponse('Success')
else:
return HttpResponse('Not exists')
except:
return HttpResponse('Invalid project_id')
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def newsRelease(request):
if request.method == "GET":
form = NewsForm()
else:
form = NewsForm(request.POST, request.FILES)
if form.is_valid():
form.save()
newsList = News.objects.all().order_by('-news_date')
context = getContext(newsList,1,"item",page_elems=7)
context.update({"newsform":NewsForm,
})
return render(request,"adminStaff/news_release.html",context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def noticeMessageSetting(request):
userauth={
"role":"adminStaff"
}
return noticeMessageSettingBase(request,userauth)
def dispatchView(request):
dispatch_form = DispatchForm()
dispatchAddCollege_form=DispatchAddCollegeForm()
college_users = CollegeProfile.objects.all()
expert_users = ExpertProfile.objects.all().order_by('college')
school_users = SchoolProfile.objects.all()
context = {
"dispatch_form":dispatch_form,
"dispatchAddCollege_form":dispatchAddCollege_form,
"search_form": SearchForm(),
}
context.update(getContext(school_users, 1, "item"))
context.update(getContext(college_users, 1, "item2"))
context.update(getContext(expert_users, 1, "item3"))
return render(request, "adminStaff/dispatch.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def financialView(request):
userauth = {
"role": 'adminStaff',
}
return financialManage(request, userauth)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def financialInfoView(request):
budgetinfoform = ProjectBudgetInformationForm()
budgetannuform = ProjectBudgetAnnualForm()
context = {
'budgetinfoform':budgetinfoform,
'budgetannuform':budgetannuform,
}
return render(request,"adminStaff/project_financial_info.html",context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def infoModifyView(request):
context = {}
return render(request, "adminStaff/teacher_info_modify.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def infoExportView(request):
context = {
'EXCELTYPE_DICT':EXCELTYPE_DICT_OBJECT(),
}
return render(request, "adminStaff/infoexport.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def finalInfoView(request,pid):
project = ProjectSingle.objects.filter(project_id = pid)
context = {
'project_list':project,
'role':'adminStaff',
}
return render(request, "adminStaff/finalinfo.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
@check_submit_status()
def finalReportView(request,pid,is_submited={}):
print "YYA" * 10
context = finalReportViewWork(request,pid,is_submited[SUBMIT_STATUS_FINAL])
context = dict(context, **fileUploadManage(request, pid,is_submited))
context['is_submited'] = is_submited
context['user'] = "adminStaff"
loginfo(p=is_submited,label="is_submited")
# if context['redirect']:
# return HttpResponseRedirect('/teacher/finalinfo')
return render(request,"adminStaff/final.html",context)
# def fileUploadManageView(request, pid, is_submited = False):
# context = fileUploadManage(request, pid)
# context['user'] = "teacher"
# # is_submited = False
# context['is_submited'] = is_submited
# return render(request, "teacher/file_upload.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
@check_submit_status()
def fundBudgetView(request,pid,is_submited={}):
context = fundBudgetViewWork(request,pid,is_submited[SUBMIT_STATUS_FINAL])
context['role'] = 'adminStaff'
if context['redirect']:
return HttpResponseRedirect('/adminStaff/finalinfo/'+str(pid))
return render(request,"adminStaff/fundbudget.html",context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
@check_submit_status()
def fileUploadManageView(request, pid, is_submited={}):
context = fileUploadManage(request, pid, is_submited)
context['user'] = "adminStaff"
# is_submited = False
context['is_submited'] = is_submited
return render(request, "adminStaff/file_upload.html", context)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def homepic_import_view(request):
"""
project group member change
"""
if request.method == "POST":
f = request.FILES["file"]
ftype = getType(f.name)
new_pic = HomePagePic()
new_pic.pic_obj = f
new_pic.name = f.name
new_pic.file_type = ftype
new_pic.uploadtime = time.strftime('%Y-%m-%d %X', time.localtime(time.time()))
new_pic.file_size = f.size
new_pic.save()
file_history = HomePagePic.objects.all()
loginfo(file_history.count())
data = {'files': file_history,
}
return render(request, 'adminStaff/home_pic_import.html', data)
@csrf.csrf_protect
@login_required
@authority_required(ADMINSTAFF_USER)
def createProject(request):
"""
project group member change
"""
return render(request, 'adminStaff/create_project.html', {'form': ProjectCreationTeacherForm()})
| agpl-3.0 | -821,464,746,761,467,800 | 30.52518 | 185 | 0.684961 | false |
rudhir-upretee/Sumo17_With_Netsim | tools/traci/constants.py | 1 | 16975 | """
@file constants.py
This script contains TraCI constant definitions from <SUMO_HOME>/src/traci-server/TraCIConstants.h
generated by "rebuildConstants.py" on 2012-12-03 12:37:11.425000.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2009-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
# ****************************************
# VERSION
# ****************************************
TRACI_VERSION = 5
# ****************************************
# COMMANDS
# ****************************************
# command: get version
CMD_GETVERSION = 0x00
# command: simulation step
CMD_SIMSTEP2 = 0x02
# command: stop node
CMD_STOP = 0x12
# command: set lane
CMD_CHANGELANE = 0x13
# command: slow down
CMD_SLOWDOWN = 0x14
# command: change target
CMD_CHANGETARGET = 0x31
# command: add vehicle
CMD_ADDVEHICLE = 0x74
# command: close sumo
CMD_CLOSE = 0x7F
# command: subscribe induction loop (e1) context
CMD_SUBSCRIBE_INDUCTIONLOOP_CONTEXT = 0x80
# response: subscribe induction loop (e1) context
RESPONSE_SUBSCRIBE_INDUCTIONLOOP_CONTEXT = 0x90
# command: get induction loop (e1) variable
CMD_GET_INDUCTIONLOOP_VARIABLE = 0xa0
# response: get induction loop (e1) variable
RESPONSE_GET_INDUCTIONLOOP_VARIABLE = 0xb0
# command: subscribe induction loop (e1) variable
CMD_SUBSCRIBE_INDUCTIONLOOP_VARIABLE = 0xd0
# response: subscribe induction loop (e1) variable
RESPONSE_SUBSCRIBE_INDUCTIONLOOP_VARIABLE = 0xe0
# command: subscribe areal detector (e3) context
CMD_SUBSCRIBE_MULTI_ENTRY_EXIT_DETECTOR_CONTEXT = 0x81
# response: subscribe areal detector (e3) context
RESPONSE_SUBSCRIBE_MULTI_ENTRY_EXIT_DETECTOR_CONTEXT = 0x91
# command: get multi-entry/multi-exit detector (e3) variable
CMD_GET_MULTI_ENTRY_EXIT_DETECTOR_VARIABLE = 0xa1
# response: get areal detector (e3) variable
RESPONSE_GET_MULTI_ENTRY_EXIT_DETECTOR_VARIABLE = 0xb1
# command: subscribe multi-entry/multi-exit detector (e3) variable
CMD_SUBSCRIBE_MULTI_ENTRY_EXIT_DETECTOR_VARIABLE = 0xd1
# response: subscribe areal detector (e3) variable
RESPONSE_SUBSCRIBE_MULTI_ENTRY_EXIT_DETECTOR_VARIABLE = 0xe1
# command: subscribe traffic lights context
CMD_SUBSCRIBE_TL_CONTEXT = 0x82
# response: subscribe traffic lights context
RESPONSE_SUBSCRIBE_TL_CONTEXT = 0x92
# command: get traffic lights variable
CMD_GET_TL_VARIABLE = 0xa2
# response: get traffic lights variable
RESPONSE_GET_TL_VARIABLE = 0xb2
# command: set traffic lights variable
CMD_SET_TL_VARIABLE = 0xc2
# command: subscribe traffic lights variable
CMD_SUBSCRIBE_TL_VARIABLE = 0xd2
# response: subscribe traffic lights variable
RESPONSE_SUBSCRIBE_TL_VARIABLE = 0xe2
# command: subscribe lane context
CMD_SUBSCRIBE_LANE_CONTEXT = 0x83
# response: subscribe lane context
RESPONSE_SUBSCRIBE_LANE_CONTEXT = 0x93
# command: get lane variable
CMD_GET_LANE_VARIABLE = 0xa3
# response: get lane variable
RESPONSE_GET_LANE_VARIABLE = 0xb3
# command: set lane variable
CMD_SET_LANE_VARIABLE = 0xc3
# command: subscribe lane variable
CMD_SUBSCRIBE_LANE_VARIABLE = 0xd3
# response: subscribe lane variable
RESPONSE_SUBSCRIBE_LANE_VARIABLE = 0xe3
# command: subscribe vehicle context
CMD_SUBSCRIBE_VEHICLE_CONTEXT = 0x84
# response: subscribe vehicle context
RESPONSE_SUBSCRIBE_VEHICLE_CONTEXT = 0x94
# command: get vehicle variable
CMD_GET_VEHICLE_VARIABLE = 0xa4
# response: get vehicle variable
RESPONSE_GET_VEHICLE_VARIABLE = 0xb4
# command: set vehicle variable
CMD_SET_VEHICLE_VARIABLE = 0xc4
# command: subscribe vehicle variable
CMD_SUBSCRIBE_VEHICLE_VARIABLE = 0xd4
# response: subscribe vehicle variable
RESPONSE_SUBSCRIBE_VEHICLE_VARIABLE = 0xe4
# command: subscribe vehicle type context
CMD_SUBSCRIBE_VEHICLETYPE_CONTEXT = 0x85
# response: subscribe vehicle type context
RESPONSE_SUBSCRIBE_VEHICLETYPE_CONTEXT = 0x95
# command: get vehicle type variable
CMD_GET_VEHICLETYPE_VARIABLE = 0xa5
# response: get vehicle type variable
RESPONSE_GET_VEHICLETYPE_VARIABLE = 0xb5
# command: set vehicle type variable
CMD_SET_VEHICLETYPE_VARIABLE = 0xc5
# command: subscribe vehicle type variable
CMD_SUBSCRIBE_VEHICLETYPE_VARIABLE = 0xd5
# response: subscribe vehicle type variable
RESPONSE_SUBSCRIBE_VEHICLETYPE_VARIABLE = 0xe5
# command: subscribe route context
CMD_SUBSCRIBE_ROUTE_CONTEXT = 0x86
# response: subscribe route context
RESPONSE_SUBSCRIBE_ROUTE_CONTEXT = 0x96
# command: get route variable
CMD_GET_ROUTE_VARIABLE = 0xa6
# response: get route variable
RESPONSE_GET_ROUTE_VARIABLE = 0xb6
# command: set route variable
CMD_SET_ROUTE_VARIABLE = 0xc6
# command: subscribe route variable
CMD_SUBSCRIBE_ROUTE_VARIABLE = 0xd6
# response: subscribe route variable
RESPONSE_SUBSCRIBE_ROUTE_VARIABLE = 0xe6
# command: subscribe poi context
CMD_SUBSCRIBE_POI_CONTEXT = 0x87
# response: subscribe poi context
RESPONSE_SUBSCRIBE_POI_CONTEXT = 0x97
# command: get poi variable
CMD_GET_POI_VARIABLE = 0xa7
# response: get poi variable
RESPONSE_GET_POI_VARIABLE = 0xb7
# command: set poi variable
CMD_SET_POI_VARIABLE = 0xc7
# command: subscribe poi variable
CMD_SUBSCRIBE_POI_VARIABLE = 0xd7
# response: subscribe poi variable
RESPONSE_SUBSCRIBE_POI_VARIABLE = 0xe7
# command: subscribe polygon context
CMD_SUBSCRIBE_POLYGON_CONTEXT = 0x88
# response: subscribe polygon context
RESPONSE_SUBSCRIBE_POLYGON_CONTEXT = 0x98
# command: get polygon variable
CMD_GET_POLYGON_VARIABLE = 0xa8
# response: get polygon variable
RESPONSE_GET_POLYGON_VARIABLE = 0xb8
# command: set polygon variable
CMD_SET_POLYGON_VARIABLE = 0xc8
# command: subscribe polygon variable
CMD_SUBSCRIBE_POLYGON_VARIABLE = 0xd8
# response: subscribe polygon variable
RESPONSE_SUBSCRIBE_POLYGON_VARIABLE = 0xe8
# command: subscribe junction context
CMD_SUBSCRIBE_JUNCTION_CONTEXT = 0x89
# response: subscribe junction context
RESPONSE_SUBSCRIBE_JUNCTION_CONTEXT = 0x99
# command: get junction variable
CMD_GET_JUNCTION_VARIABLE = 0xa9
# response: get junction variable
RESPONSE_GET_JUNCTION_VARIABLE = 0xb9
# command: set junction variable
CMD_SET_JUNCTION_VARIABLE = 0xc9
# command: subscribe junction variable
CMD_SUBSCRIBE_JUNCTION_VARIABLE = 0xd9
# response: subscribe junction variable
RESPONSE_SUBSCRIBE_JUNCTION_VARIABLE = 0xe9
# command: subscribe edge context
CMD_SUBSCRIBE_EDGE_CONTEXT = 0x8a
# response: subscribe edge context
RESPONSE_SUBSCRIBE_EDGE_CONTEXT = 0x9a
# command: get edge variable
CMD_GET_EDGE_VARIABLE = 0xaa
# response: get edge variable
RESPONSE_GET_EDGE_VARIABLE = 0xba
# command: set edge variable
CMD_SET_EDGE_VARIABLE = 0xca
# command: subscribe edge variable
CMD_SUBSCRIBE_EDGE_VARIABLE = 0xda
# response: subscribe edge variable
RESPONSE_SUBSCRIBE_EDGE_VARIABLE = 0xea
# command: subscribe simulation context
CMD_SUBSCRIBE_SIM_CONTEXT = 0x8b
# response: subscribe simulation context
RESPONSE_SUBSCRIBE_SIM_CONTEXT = 0x9b
# command: get simulation variable
CMD_GET_SIM_VARIABLE = 0xab
# response: get simulation variable
RESPONSE_GET_SIM_VARIABLE = 0xbb
# command: set simulation variable
CMD_SET_SIM_VARIABLE = 0xcb
# command: subscribe simulation variable
CMD_SUBSCRIBE_SIM_VARIABLE = 0xdb
# response: subscribe simulation variable
RESPONSE_SUBSCRIBE_SIM_VARIABLE = 0xeb
# command: subscribe GUI context
CMD_SUBSCRIBE_GUI_CONTEXT = 0x8c
# response: subscribe GUI context
RESPONSE_SUBSCRIBE_GUI_CONTEXT = 0x9c
# command: get GUI variable
CMD_GET_GUI_VARIABLE = 0xac
# response: get GUI variable
RESPONSE_GET_GUI_VARIABLE = 0xbc
# command: set GUI variable
CMD_SET_GUI_VARIABLE = 0xcc
# command: subscribe GUI variable
CMD_SUBSCRIBE_GUI_VARIABLE = 0xdc
# response: subscribe GUI variable
RESPONSE_SUBSCRIBE_GUI_VARIABLE = 0xec
# ****************************************
# POSITION REPRESENTATIONS
# ****************************************
# Position in geo-coordinates
POSITION_LAT_LON = 0x00
# 2D cartesian coordinates
POSITION_2D = 0x01
# Position in geo-coordinates with altitude
POSITION_LAT_LON_ALT = 0x02
# 3D cartesian coordinates
POSITION_3D = 0x03
# Position on road map
POSITION_ROADMAP = 0x04
# ****************************************
# DATA TYPES
# ****************************************
# Boundary Box (4 doubles)
TYPE_BOUNDINGBOX = 0x05
# Polygon (2*n doubles)
TYPE_POLYGON = 0x06
# unsigned byte
TYPE_UBYTE = 0x07
# signed byte
TYPE_BYTE = 0x08
# 32 bit signed integer
TYPE_INTEGER = 0x09
# float
TYPE_FLOAT = 0x0A
# double
TYPE_DOUBLE = 0x0B
# 8 bit ASCII string
TYPE_STRING = 0x0C
# list of traffic light phases
TYPE_TLPHASELIST = 0x0D
# list of strings
TYPE_STRINGLIST = 0x0E
# compound object
TYPE_COMPOUND = 0x0F
# color (four ubytes)
TYPE_COLOR = 0x11
# ****************************************
# RESULT TYPES
# ****************************************
# result type: Ok
RTYPE_OK = 0x00
# result type: not implemented
RTYPE_NOTIMPLEMENTED = 0x01
# result type: error
RTYPE_ERR = 0xFF
# return value for invalid queries (especially vehicle is not on the road)
INVALID_DOUBLE_VALUE = -1001.
# return value for invalid queries (especially vehicle is not on the road)
INVALID_INT_VALUE = -1
# ****************************************
# TRAFFIC LIGHT PHASES
# ****************************************
# red phase
TLPHASE_RED = 0x01
# yellow phase
TLPHASE_YELLOW = 0x02
# green phase
TLPHASE_GREEN = 0x03
# tl is blinking
TLPHASE_BLINKING = 0x04
# tl is off and not blinking
TLPHASE_NOSIGNAL = 0x05
# ****************************************
# DIFFERENT DISTANCE REQUESTS
# ****************************************
# air distance
REQUEST_AIRDIST = 0x00
# driving distance
REQUEST_DRIVINGDIST = 0x01
# ****************************************
# VEHICLE REMOVAL REASONS
# ****************************************
# vehicle started teleport
REMOVE_TELEPORT = 0x00
# vehicle removed while parking
REMOVE_PARKING = 0x01
# vehicle arrived
REMOVE_ARRIVED = 0x02
# vehicle was vaporized
REMOVE_VAPORIZED = 0x03
# vehicle finished route during teleport
REMOVE_TELEPORT_ARRIVED = 0x04
# ****************************************
# VARIABLE TYPES (for CMD_GET_*_VARIABLE)
# ****************************************
# list of instances' ids (get: all)
ID_LIST = 0x00
# count of instances (get: all)
ID_COUNT = 0x01
# subscribe object variables (get: all)
OBJECT_VARIABLES_SUBSCRIPTION = 0x02
# subscribe context variables (get: all)
SURROUNDING_VARIABLES_SUBSCRIPTION = 0x03
# last step vehicle number (get: induction loops, multi-entry/multi-exit detector, lanes, edges)
LAST_STEP_VEHICLE_NUMBER = 0x10
# last step vehicle number (get: induction loops, multi-entry/multi-exit detector, lanes, edges)
LAST_STEP_MEAN_SPEED = 0x11
# last step vehicle number (get: induction loops, multi-entry/multi-exit detector, lanes, edges)
LAST_STEP_VEHICLE_ID_LIST = 0x12
# last step occupancy (get: induction loops, lanes, edges)
LAST_STEP_OCCUPANCY = 0x13
# last step vehicle halting number (get: multi-entry/multi-exit detector, lanes, edges)
LAST_STEP_VEHICLE_HALTING_NUMBER = 0x14
# last step mean vehicle length (get: induction loops, lanes, edges)
LAST_STEP_LENGTH = 0x15
# last step time since last detection (get: induction loops)
LAST_STEP_TIME_SINCE_DETECTION = 0x16
# entry times
LAST_STEP_VEHICLE_DATA = 0x17
# traffic light states, encoded as rRgGyYoO tuple (get: traffic lights)
TL_RED_YELLOW_GREEN_STATE = 0x20
# index of the phase (set: traffic lights)
TL_PHASE_INDEX = 0x22
# traffic light program (set: traffic lights)
TL_PROGRAM = 0x23
# phase duration (set: traffic lights)
TL_PHASE_DURATION = 0x24
# controlled lanes (get: traffic lights)
TL_CONTROLLED_LANES = 0x26
# controlled links (get: traffic lights)
TL_CONTROLLED_LINKS = 0x27
# index of the current phase (get: traffic lights)
TL_CURRENT_PHASE = 0x28
# name of the current program (get: traffic lights)
TL_CURRENT_PROGRAM = 0x29
# controlled junctions (get: traffic lights)
TL_CONTROLLED_JUNCTIONS = 0x2a
# complete definition (get: traffic lights)
TL_COMPLETE_DEFINITION_RYG = 0x2b
# complete program (set: traffic lights)
TL_COMPLETE_PROGRAM_RYG = 0x2c
# assumed time to next switch (get: traffic lights)
TL_NEXT_SWITCH = 0x2d
# outgoing link number (get: lanes)
LANE_LINK_NUMBER = 0x30
# id of parent edge (get: lanes)
LANE_EDGE_ID = 0x31
# outgoing link definitions (get: lanes)
LANE_LINKS = 0x33
# list of allowed vehicle classes (get&set: lanes)
LANE_ALLOWED = 0x34
# list of not allowed vehicle classes (get&set: lanes)
LANE_DISALLOWED = 0x35
# speed (get: vehicle)
VAR_SPEED = 0x40
# maximum allowed/possible speed (get: vehicle types, lanes, set: edges, lanes)
VAR_MAXSPEED = 0x41
# position (2D) (get: vehicle, poi, set: poi)
VAR_POSITION = 0x42
# angle (get: vehicle)
VAR_ANGLE = 0x43
# angle (get: vehicle types, lanes, set: lanes)
VAR_LENGTH = 0x44
# color (get: vehicles, vehicle types, polygons, pois)
VAR_COLOR = 0x45
# max. acceleration (get: vehicle types)
VAR_ACCEL = 0x46
# max. deceleration (get: vehicle types)
VAR_DECEL = 0x47
# driver reaction time (get: vehicle types)
VAR_TAU = 0x48
# vehicle class (get: vehicle types)
VAR_VEHICLECLASS = 0x49
# emission class (get: vehicle types)
VAR_EMISSIONCLASS = 0x4a
# shape class (get: vehicle types)
VAR_SHAPECLASS = 0x4b
# minimum gap (get: vehicle types)
VAR_MINGAP = 0x4c
# width (get: vehicle types, lanes)
VAR_WIDTH = 0x4d
# shape (get: polygons)
VAR_SHAPE = 0x4e
# type id (get: vehicles, polygons, pois)
VAR_TYPE = 0x4f
# road id (get: vehicles)
VAR_ROAD_ID = 0x50
# lane id (get: vehicles)
VAR_LANE_ID = 0x51
# lane index (get: vehicles)
VAR_LANE_INDEX = 0x52
# route id (get & set: vehicles)
VAR_ROUTE_ID = 0x53
# edges (get: routes)
VAR_EDGES = 0x54
# filled? (get: polygons)
VAR_FILL = 0x55
# position (1D along lane) (get: vehicle)
VAR_LANEPOSITION = 0x56
# route (set: vehicles)
VAR_ROUTE = 0x57
# travel time information (get&set: vehicle)
VAR_EDGE_TRAVELTIME = 0x58
# effort information (get&set: vehicle)
VAR_EDGE_EFFORT = 0x59
# last step travel time (get: edge, lane)
VAR_CURRENT_TRAVELTIME = 0x5a
# signals state (get/set: vehicle)
VAR_SIGNALS = 0x5b
# new lane/position along (set: vehicle)
VAR_MOVE_TO = 0x5c
# driver imperfection (set: vehicle)
VAR_IMPERFECTION = 0x5d
# speed factor (set: vehicle)
VAR_SPEED_FACTOR = 0x5e
# speed deviation (set: vehicle)
VAR_SPEED_DEVIATION = 0x5f
# speed without TraCI influence (get: vehicle)
VAR_SPEED_WITHOUT_TRACI = 0xb1
# best lanes (get: vehicle)
VAR_BEST_LANES = 0xb2
# how speed is set (set: vehicle)
VAR_SPEEDSETMODE = 0xb3
# move vehicle, VTD version (set: vehicle)
VAR_MOVE_TO_VTD = 0xb4
# current CO2 emission of a node (get: vehicle, lane, edge)
VAR_CO2EMISSION = 0x60
# current CO emission of a node (get: vehicle, lane, edge)
VAR_COEMISSION = 0x61
# current HC emission of a node (get: vehicle, lane, edge)
VAR_HCEMISSION = 0x62
# current PMx emission of a node (get: vehicle, lane, edge)
VAR_PMXEMISSION = 0x63
# current NOx emission of a node (get: vehicle, lane, edge)
VAR_NOXEMISSION = 0x64
# current fuel consumption of a node (get: vehicle, lane, edge)
VAR_FUELCONSUMPTION = 0x65
# current noise emission of a node (get: vehicle, lane, edge)
VAR_NOISEEMISSION = 0x66
# current person number (get: vehicle)
VAR_PERSON_NUMBER = 0x67
VAR_BUS_STOP_WAITING = 0x67
# current time step (get: simulation)
VAR_TIME_STEP = 0x70
# number of loaded vehicles (get: simulation)
VAR_LOADED_VEHICLES_NUMBER = 0x71
# loaded vehicle ids (get: simulation)
VAR_LOADED_VEHICLES_IDS = 0x72
# number of departed vehicle (get: simulation)
VAR_DEPARTED_VEHICLES_NUMBER = 0x73
# departed vehicle ids (get: simulation)
VAR_DEPARTED_VEHICLES_IDS = 0x74
# number of vehicles starting to teleport (get: simulation)
VAR_TELEPORT_STARTING_VEHICLES_NUMBER = 0x75
# ids of vehicles starting to teleport (get: simulation)
VAR_TELEPORT_STARTING_VEHICLES_IDS = 0x76
# number of vehicles ending to teleport (get: simulation)
VAR_TELEPORT_ENDING_VEHICLES_NUMBER = 0x77
# ids of vehicles ending to teleport (get: simulation)
VAR_TELEPORT_ENDING_VEHICLES_IDS = 0x78
# number of arrived vehicles (get: simulation)
VAR_ARRIVED_VEHICLES_NUMBER = 0x79
# ids of arrived vehicles (get: simulation)
VAR_ARRIVED_VEHICLES_IDS = 0x7a
# delta t (get: simulation)
VAR_DELTA_T = 0x7b
# bounding box (get: simulation)
VAR_NET_BOUNDING_BOX = 0x7c
# minimum number of expected vehicles (get: simulation)
VAR_MIN_EXPECTED_VEHICLES = 0x7d
# add an instance (poi, polygon, vehicle, route)
ADD = 0x80
# remove an instance (poi, polygon)
REMOVE = 0x81
# convert coordinates
POSITION_CONVERSION = 0x82
# distance between points or vehicles
DISTANCE_REQUEST = 0x83
# force rerouting based on travel time (vehicles)
CMD_REROUTE_TRAVELTIME = 0x90
# force rerouting based on effort (vehicles)
CMD_REROUTE_EFFORT = 0x91
# validates current route (vehicles)
VAR_ROUTE_VALID = 0x92
# zoom
VAR_VIEW_ZOOM = 0xa0
# view position
VAR_VIEW_OFFSET = 0xa1
# view schema
VAR_VIEW_SCHEMA = 0xa2
# view by boundary
VAR_VIEW_BOUNDARY = 0xa3
# screenshot
VAR_SCREENSHOT = 0xa5
# track vehicle
VAR_TRACK_VEHICLE = 0xa6
| gpl-3.0 | -4,040,832,587,357,143,600 | 24.916031 | 98 | 0.729131 | false |
vensder/itmo_python | nasledovanye3.py | 1 | 1220 | import random
class Transport:
def __init__(self, name, speed = 0, wheel_count = 0, mass = 0, color = (0,0,0)):
self.name = name
self.speed = speed
self.wheel_count = wheel_count
self.mass = mass
self.color = color
self.pos = 0
def drive(self, time):
self.pos += self.speed * time
#return self.pos
def show_pos(self):
print(self.name, ':', self.pos)
class Car(Transport):
def __init__(self, name):
super().__init__(name, speed = 120, wheel_count = 4, mass = 2, color = (0,255,0))
class Tank(Transport):
def __init__(self, name):
super().__init__(name, speed = 120, wheel_count = 4, mass = 2, color = (0,255,0))
self.can_fire = True
class Airplane(Transport):
def __init__(self, name):
super().__init__(name, speed = 800, wheel_count = 22, mass = 100,
color = (250,250,250))
self.wings_count = 2
self.tail = True
machines = [
Car('car-1'),
Tank('tank-1'),
Airplane('plane-1'),
Car('car-2'),
Tank('tank-2'),
Airplane('plane-2'),
]
for m in machines:
if hasattr(m, 'fire'):
m.fire()
for m in machines:
m.show_pos()
for m in machines:
time = random.randint(1, 150)
m.drive(10)
print('-'*20)
for m in machines:
m.show_pos()
| gpl-3.0 | -2,407,283,530,971,650,000 | 16.941176 | 83 | 0.594262 | false |
meisamhe/GPLshared | Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/scoping.py | 1 | 1058 | # @include
x, y, z = 'global-x', 'global-y', 'global-z'
def basic_scoping():
print(x) # global-x
y = 'local-y'
global z
z = 'local-z'
basic_scoping()
print(x, y, z) # global-x global-y local-z
def inner_outer_scoping():
def inner1():
print(x) # outer-x
def inner2():
x = 'inner2-x'
print(x) # inner2-x
def inner3():
nonlocal x
x = 'inner3-x'
print(x) # inner3-x
x = "outer-x"
inner1(), inner2(), inner3()
print(x) # inner3-x
inner_outer_scoping()
print(x, y, z) # global-x global-y local-z
def outer_scope_error():
def inner():
try:
x = x + 321
except NameError:
print('Error: x is local, and so x + 1 is not defined yet')
x = 123
inner()
outer_scope_error() # prints 'Error: ...'
def outer_scope_array_no_error():
def inner():
x[0] = -x[0] # x[0] isn't a variable, it's resolved from outer x.
x = [314]
inner()
print(x[0]) # -314
outer_scope_array_no_error()
# @exclude
| gpl-3.0 | -7,386,681,169,818,829,000 | 18.592593 | 74 | 0.52741 | false |
the-duck/launcher-next | src/duck/launcher/XlibStuff.py | 1 | 1626 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#########
#########
#Copyright (C) 2014-2015 Mark Spurgeon <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########
import Xlib
from Xlib import display as D
import sys
from PyQt4 import QtGui, QtCore
import Config
def fix_window(winId,left,right,top,bottom):
set = False
while set == False:
try:
window = reserveSpace(winId)
if window != None:
window.now(left,right,top,bottom)
set = True
else:
self.sleep(1)
except:
raise
class reserveSpace():
def __init__(self, winId):
self._display = D.Display()
self._win = self._display.create_resource_object('window', winId)
def now(self, left,right,top,bottom):
self._win.change_property(self._display.intern_atom('_NET_WM_STRUT'), self._display.intern_atom('CARDINAL'),32, [left,right,top,bottom])
self._display.sync()
| gpl-2.0 | 6,072,618,563,806,798,000 | 35.954545 | 138 | 0.636531 | false |
alefnula/perart | src/perart/forms.py | 1 | 1510 | __author__ = 'Viktor Kerkez <[email protected]>'
__contact__ = '[email protected]'
__date__ = '20 April 2010'
__copyright__ = 'Copyright (c) 2010 Viktor Kerkez'
import logging
from django import forms
from django.conf import settings
from google.appengine.api import mail
# perart imports
from perart import models
class PerArtForm(forms.ModelForm):
tinymce = True
class ProgramForm(PerArtForm):
class Meta:
model = models.Program
exclude = ['url']
class ProjectForm(PerArtForm):
class Meta:
model = models.Project
exclude = ['url']
class NewsForm(PerArtForm):
class Meta:
model = models.News
exclude = ['url']
class MenuForm(PerArtForm):
tinymce = False
class Meta:
model = models.Menu
exclude = ['url']
class GalleryForm(PerArtForm):
class Meta:
model = models.Gallery
exclude = ['url']
class NewsletterForm(forms.Form):
name = forms.CharField(required=True)
email = forms.EmailField(required=True)
def send_email(self):
try:
mail.send_mail(sender='[email protected]',
to=settings.PERART_EMAIL,
subject='"%(name)s" se prijavio za newsletter' % self.cleaned_data,
body='Ime: %(name)s\nEmail: %(email)s' % self.cleaned_data)
return True
except:
logging.exception('sending message failed')
return False
| gpl-3.0 | -3,145,847,521,591,563,000 | 22.984127 | 94 | 0.602649 | false |
OCA/purchase-workflow | purchase_landed_cost/wizard/import_invoice_line.py | 1 | 1945 | # Copyright 2014-2016 Tecnativa - Pedro M. Baeza
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3
from odoo import api, fields, models
class ImportInvoiceLine(models.TransientModel):
_name = "import.invoice.line.wizard"
_description = "Import supplier invoice line"
supplier = fields.Many2one(
comodel_name='res.partner', string='Supplier', required=True,
domain="[('supplier', '=', True)]")
invoice = fields.Many2one(
comodel_name='account.invoice', string="Invoice", required=True,
domain="[('partner_id', '=', supplier), ('type', '=', 'in_invoice'),"
"('state', 'in', ['open', 'paid'])]")
invoice_line = fields.Many2one(
comodel_name='account.invoice.line', string="Invoice line",
required=True, domain="[('invoice_id', '=', invoice)]")
expense_type = fields.Many2one(
comodel_name='purchase.expense.type', string='Expense type',
required=True)
@api.multi
def action_import_invoice_line(self):
self.ensure_one()
dist_id = self.env.context['active_id']
distribution = self.env['purchase.cost.distribution'].browse(dist_id)
currency_from = self.invoice_line.currency_id
amount = self.invoice_line.price_subtotal
currency_to = distribution.currency_id
company = distribution.company_id or self.env.user.company_id
cost_date = distribution.date or fields.Date.today()
expense_amount = currency_from._convert(amount, currency_to, company,
cost_date)
self.env['purchase.cost.distribution.expense'].create({
'distribution': dist_id,
'invoice_line': self.invoice_line.id,
'invoice_id': self.invoice_line.invoice_id.id,
'ref': self.invoice_line.name,
'expense_amount': expense_amount,
'type': self.expense_type.id,
})
| agpl-3.0 | -5,586,129,776,587,690,000 | 44.232558 | 77 | 0.614396 | false |
vitmod/enigma2-1 | skin.py | 1 | 33730 | from Tools.Profile import profile
profile("LOAD:ElementTree")
import xml.etree.cElementTree
import os
profile("LOAD:enigma_skin")
from enigma import eSize, ePoint, eRect, gFont, eWindow, eLabel, ePixmap, eWindowStyleManager, addFont, gRGB, eWindowStyleSkinned, getDesktop
from Components.config import ConfigSubsection, ConfigText, config
from Components.Sources.Source import ObsoleteSource
from Tools.Directories import resolveFilename, SCOPE_SKIN, SCOPE_SKIN_IMAGE, SCOPE_FONTS, SCOPE_ACTIVE_SKIN, SCOPE_ACTIVE_LCDSKIN, SCOPE_CURRENT_SKIN, SCOPE_CONFIG, fileExists
from Tools.Import import my_import
from Tools.LoadPixmap import LoadPixmap
from Components.RcModel import rc_model
colorNames = {}
# Predefined fonts, typically used in built-in screens and for components like
# the movie list and so.
fonts = {
"Body": ("Regular", 18, 22, 16),
"ChoiceList": ("Regular", 20, 24, 18),
}
parameters = {}
def dump(x, i=0):
print " " * i + str(x)
try:
for n in x.childNodes:
dump(n, i + 1)
except:
None
class SkinError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return "{%s}: %s. Please contact the skin's author!" % (config.skin.primary_skin.value, self.msg)
class DisplaySkinError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return "{%s}: %s. Please contact the skin's author!" % (config.skin.display_skin.value, self.msg)
dom_skins = [ ]
def addSkin(name, scope = SCOPE_SKIN):
# read the skin
filename = resolveFilename(scope, name)
if fileExists(filename):
mpath = os.path.dirname(filename) + "/"
try:
file = open(filename, 'r')
dom_skins.append((mpath, xml.etree.cElementTree.parse(file).getroot()))
except:
print "[SKIN ERROR] error in %s" % filename
return False
else:
return True
return False
# get own skin_user_skinname.xml file, if exist
def skin_user_skinname():
name = "skin_user_" + config.skin.primary_skin.value[:config.skin.primary_skin.value.rfind('/')] + ".xml"
filename = resolveFilename(SCOPE_CONFIG, name)
if fileExists(filename):
return name
return None
# we do our best to always select the "right" value
# skins are loaded in order of priority: skin with
# highest priority is loaded last, usually the user-provided
# skin.
# currently, loadSingleSkinData (colors, bordersets etc.)
# are applied one-after-each, in order of ascending priority.
# the dom_skin will keep all screens in descending priority,
# so the first screen found will be used.
# example: loadSkin("nemesis_greenline/skin.xml")
config.skin = ConfigSubsection()
DEFAULT_SKIN = "ViX-Night-HD/skin.xml"
if not fileExists(resolveFilename(SCOPE_SKIN, DEFAULT_SKIN)):
# in that case, fallback to Magic (which is an SD skin)
DEFAULT_SKIN = "skin.xml"
config.skin.primary_skin = ConfigText(default=DEFAULT_SKIN)
DEFAULT_DISPLAY_SKIN = "skin_display.xml"
config.skin.display_skin = ConfigText(default=DEFAULT_DISPLAY_SKIN)
profile("LoadSkin")
res = None
name = skin_user_skinname()
if name:
res = addSkin(name, SCOPE_CONFIG)
if not name or not res:
addSkin('skin_user.xml', SCOPE_CONFIG)
# some boxes lie about their dimensions
addSkin('skin_box.xml')
# add optional discrete second infobar
addSkin('skin_second_infobar.xml')
display_skin_id = 1
try:
if not addSkin(os.path.join('display', config.skin.display_skin.value)):
raise DisplaySkinError, "display skin not found"
except Exception, err:
print "SKIN ERROR:", err
skin = DEFAULT_DISPLAY_SKIN
if config.skin.display_skin.value == skin:
skin = 'skin_display.xml'
print "defaulting to standard display skin...", skin
config.skin.display_skin.value = skin
skin = os.path.join('display', skin)
addSkin(skin)
del skin
addSkin('skin_subtitles.xml')
try:
if not addSkin(config.skin.primary_skin.value):
raise SkinError, "primary skin not found"
except Exception, err:
print "SKIN ERROR:", err
skin = DEFAULT_SKIN
if config.skin.primary_skin.value == skin:
skin = 'skin.xml'
print "defaulting to standard skin...", skin
config.skin.primary_skin.value = skin
addSkin(skin)
del skin
addSkin('skin_default.xml')
profile("LoadSkinDefaultDone")
def parseCoordinate(s, e, size=0, font=None):
s = s.strip()
if s == "center":
if not size:
val = 0
else:
val = (e - size)/2
elif s == '*':
return None
else:
if s[0] is 'e':
val = e
s = s[1:]
elif s[0] is 'c':
val = e/2
s = s[1:]
else:
val = 0
if s:
if s[-1] is '%':
val += e * int(s[:-1]) / 100
elif s[-1] is 'w':
val += fonts[font][3] * int(s[:-1])
elif s[-1] is 'h':
val += fonts[font][2] * int(s[:-1])
else:
val += int(s)
if val < 0:
val = 0
return val
def getParentSize(object, desktop):
size = eSize()
if object:
parent = object.getParent()
# For some widgets (e.g. ScrollLabel) the skin attributes are applied to
# a child widget, instead of to the widget itself. In that case, the parent
# we have here is not the real parent, but it is the main widget.
# We have to go one level higher to get the actual parent.
# We can detect this because the 'parent' will not have a size yet
# (the main widget's size will be calculated internally, as soon as the child
# widget has parsed the skin attributes)
if parent and parent.size().isEmpty():
parent = parent.getParent()
if parent:
size = parent.size()
elif desktop:
#widget has no parent, use desktop size instead for relative coordinates
size = desktop.size()
return size
def parsePosition(s, scale, object = None, desktop = None, size = None):
x, y = s.split(',')
parentsize = eSize()
if object and (x[0] in ('c', 'e') or y[0] in ('c', 'e')):
parentsize = getParentSize(object, desktop)
xval = parseCoordinate(x, parentsize.width(), size and size.width())
yval = parseCoordinate(y, parentsize.height(), size and size.height())
return ePoint(xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1])
def parseSize(s, scale, object = None, desktop = None):
x, y = s.split(',')
parentsize = eSize()
if object and (x[0] in ('c', 'e') or y[0] in ('c', 'e')):
parentsize = getParentSize(object, desktop)
xval = parseCoordinate(x, parentsize.width())
yval = parseCoordinate(y, parentsize.height())
return eSize(xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1])
def parseFont(s, scale):
try:
f = fonts[s]
name = f[0]
size = f[1]
except:
name, size = s.split(';')
return gFont(name, int(size) * scale[0][0] / scale[0][1])
def parseColor(s):
if s[0] != '#':
try:
return colorNames[s]
except:
raise SkinError("color '%s' must be #aarrggbb or valid named color" % s)
return gRGB(int(s[1:], 0x10))
def collectAttributes(skinAttributes, node, context, skin_path_prefix=None, ignore=(), filenames=frozenset(("pixmap", "pointer", "seek_pointer", "backgroundPixmap", "selectionPixmap", "sliderPixmap", "scrollbarbackgroundPixmap"))):
# walk all attributes
size = None
pos = None
font = None
for attrib, value in node.items():
if attrib not in ignore:
if attrib in filenames:
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, value, path_prefix=skin_path_prefix)
if fileExists(resolveFilename(SCOPE_ACTIVE_LCDSKIN, value, path_prefix=skin_path_prefix)):
pngfile = resolveFilename(SCOPE_ACTIVE_LCDSKIN, value, path_prefix=skin_path_prefix)
value = pngfile
# Bit of a hack this, really. When a window has a flag (e.g. wfNoBorder)
# it needs to be set at least before the size is set, in order for the
# window dimensions to be calculated correctly in all situations.
# If wfNoBorder is applied after the size has been set, the window will fail to clear the title area.
# Similar situation for a scrollbar in a listbox; when the scrollbar setting is applied after
# the size, a scrollbar will not be shown until the selection moves for the first time
if attrib == 'size':
size = value.encode("utf-8")
elif attrib == 'position':
pos = value.encode("utf-8")
elif attrib == 'font':
font = value.encode("utf-8")
skinAttributes.append((attrib, font))
else:
skinAttributes.append((attrib, value.encode("utf-8")))
if pos is not None:
pos, size = context.parse(pos, size, font)
skinAttributes.append(('position', pos))
if size is not None:
skinAttributes.append(('size', size))
def morphRcImagePath(value):
if rc_model.rcIsDefault() is False:
if ('rc.png' or 'oldrc.png') in value:
value = rc_model.getRcLocation() + 'rc.png'
return value
def loadPixmap(path, desktop):
option = path.find("#")
if option != -1:
path = path[:option]
ptr = LoadPixmap(morphRcImagePath(path), desktop)
if ptr is None:
raise SkinError("pixmap file %s not found!" % path)
return ptr
class AttributeParser:
def __init__(self, guiObject, desktop, scale=((1,1),(1,1))):
self.guiObject = guiObject
self.desktop = desktop
self.scaleTuple = scale
def applyOne(self, attrib, value):
try:
getattr(self, attrib)(value)
except AttributeError:
print "[SKIN] Attribute not implemented:", attrib, "value:", value
except SkinError, ex:
print "[SKIN] Error:", ex
def applyAll(self, attrs):
for attrib, value in attrs:
self.applyOne(attrib, value)
def conditional(self, value):
pass
def position(self, value):
if isinstance(value, tuple):
self.guiObject.move(ePoint(*value))
else:
self.guiObject.move(parsePosition(value, self.scaleTuple, self.guiObject, self.desktop, self.guiObject.csize()))
def size(self, value):
if isinstance(value, tuple):
self.guiObject.resize(eSize(*value))
else:
self.guiObject.resize(parseSize(value, self.scaleTuple, self.guiObject, self.desktop))
def animationPaused(self, value):
pass
def animationMode(self, value):
self.guiObject.setAnimationMode(
{ "disable": 0x00,
"off": 0x00,
"offshow": 0x10,
"offhide": 0x01,
"onshow": 0x01,
"onhide": 0x10,
}[value])
def title(self, value):
self.guiObject.setTitle(_(value))
def text(self, value):
self.guiObject.setText(_(value))
def font(self, value):
self.guiObject.setFont(parseFont(value, self.scaleTuple))
def zPosition(self, value):
self.guiObject.setZPosition(int(value))
def itemHeight(self, value):
self.guiObject.setItemHeight(int(value))
def pixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setPixmap(ptr)
def backgroundPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setBackgroundPicture(ptr)
def selectionPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setSelectionPicture(ptr)
def sliderPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setSliderPicture(ptr)
def scrollbarbackgroundPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setScrollbarBackgroundPicture(ptr)
def alphatest(self, value):
self.guiObject.setAlphatest(
{ "on": 1,
"off": 0,
"blend": 2,
}[value])
def scale(self, value):
self.guiObject.setScale(1)
def orientation(self, value): # used by eSlider
try:
self.guiObject.setOrientation(*
{ "orVertical": (self.guiObject.orVertical, False),
"orTopToBottom": (self.guiObject.orVertical, False),
"orBottomToTop": (self.guiObject.orVertical, True),
"orHorizontal": (self.guiObject.orHorizontal, False),
"orLeftToRight": (self.guiObject.orHorizontal, False),
"orRightToLeft": (self.guiObject.orHorizontal, True),
}[value])
except KeyError:
print "oprientation must be either orVertical or orHorizontal!, not %s. Please contact the skin's author!" % value
def valign(self, value):
try:
self.guiObject.setVAlign(
{ "top": self.guiObject.alignTop,
"center": self.guiObject.alignCenter,
"bottom": self.guiObject.alignBottom
}[value])
except KeyError:
print "valign must be either top, center or bottom!, not %s. Please contact the skin's author!" % value
def halign(self, value):
try:
self.guiObject.setHAlign(
{ "left": self.guiObject.alignLeft,
"center": self.guiObject.alignCenter,
"right": self.guiObject.alignRight,
"block": self.guiObject.alignBlock
}[value])
except KeyError:
print "halign must be either left, center, right or block!, not %s. Please contact the skin's author!" % value
def textOffset(self, value):
x, y = value.split(',')
self.guiObject.setTextOffset(ePoint(int(x) * self.scaleTuple[0][0] / self.scaleTuple[0][1], int(y) * self.scaleTuple[1][0] / self.scaleTuple[1][1]))
def flags(self, value):
flags = value.split(',')
for f in flags:
try:
fv = eWindow.__dict__[f]
self.guiObject.setFlag(fv)
except KeyError:
print "illegal flag %s!" % f
def backgroundColor(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def backgroundColorSelected(self, value):
self.guiObject.setBackgroundColorSelected(parseColor(value))
def foregroundColor(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def foregroundColorSelected(self, value):
self.guiObject.setForegroundColorSelected(parseColor(value))
def foregroundNotCrypted(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def backgroundNotCrypted(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def foregroundCrypted(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def backgroundCrypted(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def foregroundEncrypted(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def backgroundEncrypted(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def shadowColor(self, value):
self.guiObject.setShadowColor(parseColor(value))
def selectionDisabled(self, value):
self.guiObject.setSelectionEnable(0)
def transparent(self, value):
self.guiObject.setTransparent(int(value))
def borderColor(self, value):
self.guiObject.setBorderColor(parseColor(value))
def borderWidth(self, value):
self.guiObject.setBorderWidth(int(value))
def scrollbarMode(self, value):
self.guiObject.setScrollbarMode(getattr(self.guiObject, value))
# { "showOnDemand": self.guiObject.showOnDemand,
# "showAlways": self.guiObject.showAlways,
# "showNever": self.guiObject.showNever,
# "showLeft": self.guiObject.showLeft
# }[value])
def enableWrapAround(self, value):
self.guiObject.setWrapAround(True)
def itemHeight(self, value):
self.guiObject.setItemHeight(int(value))
def pointer(self, value):
(name, pos) = value.split(':')
pos = parsePosition(pos, self.scaleTuple)
ptr = loadPixmap(name, self.desktop)
self.guiObject.setPointer(0, ptr, pos)
def seek_pointer(self, value):
(name, pos) = value.split(':')
pos = parsePosition(pos, self.scaleTuple)
ptr = loadPixmap(name, self.desktop)
self.guiObject.setPointer(1, ptr, pos)
def shadowOffset(self, value):
self.guiObject.setShadowOffset(parsePosition(value, self.scaleTuple))
def noWrap(self, value):
self.guiObject.setNoWrap(1)
def applySingleAttribute(guiObject, desktop, attrib, value, scale = ((1,1),(1,1))):
# Someone still using applySingleAttribute?
AttributeParser(guiObject, desktop, scale).applyOne(attrib, value)
def applyAllAttributes(guiObject, desktop, attributes, scale):
AttributeParser(guiObject, desktop, scale).applyAll(attributes)
def loadSingleSkinData(desktop, skin, path_prefix):
"""loads skin data like colors, windowstyle etc."""
assert skin.tag == "skin", "root element in skin must be 'skin'!"
for c in skin.findall("output"):
id = c.attrib.get('id')
if id:
id = int(id)
else:
id = 0
if id == 0: # framebuffer
for res in c.findall("resolution"):
get_attr = res.attrib.get
xres = get_attr("xres")
if xres:
xres = int(xres)
else:
xres = 720
yres = get_attr("yres")
if yres:
yres = int(yres)
else:
yres = 576
bpp = get_attr("bpp")
if bpp:
bpp = int(bpp)
else:
bpp = 32
#print "Resolution:", xres,yres,bpp
from enigma import gMainDC
gMainDC.getInstance().setResolution(xres, yres)
desktop.resize(eSize(xres, yres))
if bpp != 32:
# load palette (not yet implemented)
pass
for skininclude in skin.findall("include"):
filename = skininclude.attrib.get("filename")
if filename:
skinfile = resolveFilename(SCOPE_ACTIVE_SKIN, filename, path_prefix=path_prefix)
if not fileExists(skinfile):
skinfile = resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix)
if fileExists(skinfile):
print "[SKIN] loading include:", skinfile
loadSkin(skinfile)
for c in skin.findall("colors"):
for color in c.findall("color"):
get_attr = color.attrib.get
name = get_attr("name")
color = get_attr("value")
if name and color:
colorNames[name] = parseColor(color)
#print "Color:", name, color
else:
raise SkinError("need color and name, got %s %s" % (name, color))
for c in skin.findall("fonts"):
for font in c.findall("font"):
get_attr = font.attrib.get
filename = get_attr("filename", "<NONAME>")
name = get_attr("name", "Regular")
scale = get_attr("scale")
if scale:
scale = int(scale)
else:
scale = 100
is_replacement = get_attr("replacement") and True or False
render = get_attr("render")
if render:
render = int(render)
else:
render = 0
resolved_font = resolveFilename(SCOPE_FONTS, filename, path_prefix=path_prefix)
if not fileExists(resolved_font): #when font is not available look at current skin path
resolved_font = resolveFilename(SCOPE_ACTIVE_SKIN, filename)
if fileExists(resolveFilename(SCOPE_CURRENT_SKIN, filename)):
resolved_font = resolveFilename(SCOPE_CURRENT_SKIN, filename)
elif fileExists(resolveFilename(SCOPE_ACTIVE_LCDSKIN, filename)):
resolved_font = resolveFilename(SCOPE_ACTIVE_LCDSKIN, filename)
addFont(resolved_font, name, scale, is_replacement, render)
#print "Font: ", resolved_font, name, scale, is_replacement
for alias in c.findall("alias"):
get = alias.attrib.get
try:
name = get("name")
font = get("font")
size = int(get("size"))
height = int(get("height", size)) # to be calculated some day
width = int(get("width", size))
global fonts
fonts[name] = (font, size, height, width)
except Exception, ex:
print "[SKIN] bad font alias", ex
for c in skin.findall("parameters"):
for parameter in c.findall("parameter"):
get = parameter.attrib.get
try:
name = get("name")
value = get("value")
parameters[name] = map(int, value.split(","))
except Exception, ex:
print "[SKIN] bad parameter", ex
for c in skin.findall("subtitles"):
from enigma import eSubtitleWidget
scale = ((1,1),(1,1))
for substyle in c.findall("sub"):
get_attr = substyle.attrib.get
font = parseFont(get_attr("font"), scale)
col = get_attr("foregroundColor")
if col:
foregroundColor = parseColor(col)
haveColor = 1
else:
foregroundColor = gRGB(0xFFFFFF)
haveColor = 0
col = get_attr("borderColor")
if col:
borderColor = parseColor(col)
else:
borderColor = gRGB(0)
borderwidth = get_attr("borderWidth")
if borderwidth is None:
# default: use a subtitle border
borderWidth = 3
else:
borderWidth = int(borderwidth)
face = eSubtitleWidget.__dict__[get_attr("name")]
eSubtitleWidget.setFontStyle(face, font, haveColor, foregroundColor, borderColor, borderWidth)
for windowstyle in skin.findall("windowstyle"):
style = eWindowStyleSkinned()
style_id = windowstyle.attrib.get("id")
if style_id:
style_id = int(style_id)
else:
style_id = 0
# defaults
font = gFont("Regular", 20)
offset = eSize(20, 5)
for title in windowstyle.findall("title"):
get_attr = title.attrib.get
offset = parseSize(get_attr("offset"), ((1,1),(1,1)))
font = parseFont(get_attr("font"), ((1,1),(1,1)))
style.setTitleFont(font)
style.setTitleOffset(offset)
#print " ", font, offset
for borderset in windowstyle.findall("borderset"):
bsName = str(borderset.attrib.get("name"))
for pixmap in borderset.findall("pixmap"):
get_attr = pixmap.attrib.get
bpName = get_attr("pos")
filename = get_attr("filename")
if filename and bpName:
pngfile = resolveFilename(SCOPE_ACTIVE_SKIN, filename, path_prefix=path_prefix)
if fileExists(resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix)):
pngfile = resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix)
png = loadPixmap(pngfile, desktop)
style.setPixmap(eWindowStyleSkinned.__dict__[bsName], eWindowStyleSkinned.__dict__[bpName], png)
#print " borderset:", bpName, filename
for color in windowstyle.findall("color"):
get_attr = color.attrib.get
colorType = get_attr("name")
color = parseColor(get_attr("color"))
try:
style.setColor(eWindowStyleSkinned.__dict__["col" + colorType], color)
except:
raise SkinError("Unknown color %s" % colorType)
#pass
#print " color:", type, color
x = eWindowStyleManager.getInstance()
x.setStyle(style_id, style)
for margin in skin.findall("margin"):
style_id = margin.attrib.get("id")
if style_id:
style_id = int(style_id)
else:
style_id = 0
r = eRect(0,0,0,0)
v = margin.attrib.get("left")
if v:
r.setLeft(int(v))
v = margin.attrib.get("top")
if v:
r.setTop(int(v))
v = margin.attrib.get("right")
if v:
r.setRight(int(v))
v = margin.attrib.get("bottom")
if v:
r.setBottom(int(v))
# the "desktop" parameter is hardcoded to the UI screen, so we must ask
# for the one that this actually applies to.
getDesktop(style_id).setMargins(r)
dom_screens = {}
def loadSkin(name, scope = SCOPE_SKIN):
# Now a utility for plugins to add skin data to the screens
global dom_screens, display_skin_id
filename = resolveFilename(scope, name)
if fileExists(filename):
path = os.path.dirname(filename) + "/"
file = open(filename, 'r')
for elem in xml.etree.cElementTree.parse(file).getroot():
if elem.tag == 'screen':
name = elem.attrib.get('name', None)
if name:
sid = elem.attrib.get('id', None)
if sid and (sid != display_skin_id):
# not for this display
elem.clear()
continue
if name in dom_screens:
# Clear old versions, save memory
dom_screens[name][0].clear()
dom_screens[name] = (elem, path)
else:
elem.clear()
else:
elem.clear()
file.close()
def loadSkinData(desktop):
# Kinda hackish, but this is called once by mytest.py
global dom_skins
skins = dom_skins[:]
skins.reverse()
for (path, dom_skin) in skins:
loadSingleSkinData(desktop, dom_skin, path)
for elem in dom_skin:
if elem.tag == 'screen':
name = elem.attrib.get('name', None)
if name:
sid = elem.attrib.get('id', None)
if sid and (sid != display_skin_id):
# not for this display
elem.clear()
continue
if name in dom_screens:
# Kill old versions, save memory
dom_screens[name][0].clear()
dom_screens[name] = (elem, path)
else:
# without name, it's useless!
elem.clear()
else:
# non-screen element, no need for it any longer
elem.clear()
# no longer needed, we know where the screens are now.
del dom_skins
class additionalWidget:
def __init__(self):
pass
# Class that makes a tuple look like something else. Some plugins just assume
# that size is a string and try to parse it. This class makes that work.
class SizeTuple(tuple):
def split(self, *args):
return str(self[0]), str(self[1])
def strip(self, *args):
return '%s,%s' % self
def __str__(self):
return '%s,%s' % self
class SkinContext:
def __init__(self, parent=None, pos=None, size=None, font=None):
if parent is not None:
if pos is not None:
pos, size = parent.parse(pos, size, font)
self.x, self.y = pos
self.w, self.h = size
else:
self.x = None
self.y = None
self.w = None
self.h = None
def __str__(self):
return "Context (%s,%s)+(%s,%s) " % (self.x, self.y, self.w, self.h)
def parse(self, pos, size, font):
if pos == "fill":
pos = (self.x, self.y)
size = (self.w, self.h)
self.w = 0
self.h = 0
else:
w,h = size.split(',')
w = parseCoordinate(w, self.w, 0, font)
h = parseCoordinate(h, self.h, 0, font)
if pos == "bottom":
pos = (self.x, self.y + self.h - h)
size = (self.w, h)
self.h -= h
elif pos == "top":
pos = (self.x, self.y)
size = (self.w, h)
self.h -= h
self.y += h
elif pos == "left":
pos = (self.x, self.y)
size = (w, self.h)
self.x += w
self.w -= w
elif pos == "right":
pos = (self.x + self.w - w, self.y)
size = (w, self.h)
self.w -= w
else:
size = (w, h)
pos = pos.split(',')
pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font))
return SizeTuple(pos), SizeTuple(size)
class SkinContextStack(SkinContext):
# A context that stacks things instead of aligning them
def parse(self, pos, size, font):
if pos == "fill":
pos = (self.x, self.y)
size = (self.w, self.h)
else:
w,h = size.split(',')
w = parseCoordinate(w, self.w, 0, font)
h = parseCoordinate(h, self.h, 0, font)
if pos == "bottom":
pos = (self.x, self.y + self.h - h)
size = (self.w, h)
elif pos == "top":
pos = (self.x, self.y)
size = (self.w, h)
elif pos == "left":
pos = (self.x, self.y)
size = (w, self.h)
elif pos == "right":
pos = (self.x + self.w - w, self.y)
size = (w, self.h)
else:
size = (w, h)
pos = pos.split(',')
pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font))
return SizeTuple(pos), SizeTuple(size)
def readSkin(screen, skin, names, desktop):
if not isinstance(names, list):
names = [names]
# try all skins, first existing one have priority
global dom_screens
for n in names:
myscreen, path = dom_screens.get(n, (None,None))
if myscreen is not None:
# use this name for debug output
name = n
break
else:
name = "<embedded-in-'%s'>" % screen.__class__.__name__
# otherwise try embedded skin
if myscreen is None:
myscreen = getattr(screen, "parsedSkin", None)
# try uncompiled embedded skin
if myscreen is None and getattr(screen, "skin", None):
skin = screen.skin
print "[SKIN] Parsing embedded skin", name
if isinstance(skin, tuple):
for s in skin:
candidate = xml.etree.cElementTree.fromstring(s)
if candidate.tag == 'screen':
sid = candidate.attrib.get('id', None)
if (not sid) or (int(sid) == display_skin_id):
myscreen = candidate
break
else:
print "[SKIN] Hey, no suitable screen!"
else:
myscreen = xml.etree.cElementTree.fromstring(skin)
if myscreen:
screen.parsedSkin = myscreen
if myscreen is None:
print "[SKIN] No skin to read..."
myscreen = screen.parsedSkin = xml.etree.cElementTree.fromstring("<screen></screen>")
screen.skinAttributes = [ ]
skin_path_prefix = getattr(screen, "skin_path", path)
context = SkinContextStack()
s = desktop.bounds()
context.x = s.left()
context.y = s.top()
context.w = s.width()
context.h = s.height()
del s
collectAttributes(screen.skinAttributes, myscreen, context, skin_path_prefix, ignore=("name",))
context = SkinContext(context, myscreen.attrib.get('position'), myscreen.attrib.get('size'))
screen.additionalWidgets = [ ]
screen.renderer = [ ]
visited_components = set()
# now walk all widgets and stuff
def process_none(widget, context):
pass
def process_widget(widget, context):
get_attr = widget.attrib.get
# ok, we either have 1:1-mapped widgets ('old style'), or 1:n-mapped
# widgets (source->renderer).
wname = get_attr('name')
wsource = get_attr('source')
if wname is None and wsource is None:
print "widget has no name and no source!"
return
if wname:
#print "Widget name=", wname
visited_components.add(wname)
# get corresponding 'gui' object
try:
attributes = screen[wname].skinAttributes = [ ]
except:
raise SkinError("component with name '" + wname + "' was not found in skin of screen '" + name + "'!")
# assert screen[wname] is not Source
collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('name',))
elif wsource:
# get corresponding source
#print "Widget source=", wsource
while True: # until we found a non-obsolete source
# parse our current "wsource", which might specifiy a "related screen" before the dot,
# for example to reference a parent, global or session-global screen.
scr = screen
# resolve all path components
path = wsource.split('.')
while len(path) > 1:
scr = screen.getRelatedScreen(path[0])
if scr is None:
#print wsource
#print name
raise SkinError("specified related screen '" + wsource + "' was not found in screen '" + name + "'!")
path = path[1:]
# resolve the source.
source = scr.get(path[0])
if isinstance(source, ObsoleteSource):
# however, if we found an "obsolete source", issue warning, and resolve the real source.
print "WARNING: SKIN '%s' USES OBSOLETE SOURCE '%s', USE '%s' INSTEAD!" % (name, wsource, source.new_source)
print "OBSOLETE SOURCE WILL BE REMOVED %s, PLEASE UPDATE!" % source.removal_date
if source.description:
print source.description
wsource = source.new_source
else:
# otherwise, use that source.
break
if source is None:
raise SkinError("source '" + wsource + "' was not found in screen '" + name + "'!")
wrender = get_attr('render')
if not wrender:
raise SkinError("you must define a renderer with render= for source '%s'" % wsource)
for converter in widget.findall("convert"):
ctype = converter.get('type')
assert ctype, "'convert'-tag needs a 'type'-attribute"
#print "Converter:", ctype
try:
parms = converter.text.strip()
except:
parms = ""
#print "Params:", parms
converter_class = my_import('.'.join(("Components", "Converter", ctype))).__dict__.get(ctype)
c = None
for i in source.downstream_elements:
if isinstance(i, converter_class) and i.converter_arguments == parms:
c = i
if c is None:
c = converter_class(parms)
c.connect(source)
source = c
renderer_class = my_import('.'.join(("Components", "Renderer", wrender))).__dict__.get(wrender)
renderer = renderer_class() # instantiate renderer
renderer.connect(source) # connect to source
attributes = renderer.skinAttributes = [ ]
collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('render', 'source'))
screen.renderer.append(renderer)
def process_applet(widget, context):
try:
codeText = widget.text.strip()
widgetType = widget.attrib.get('type')
code = compile(codeText, "skin applet", "exec")
except Exception, ex:
raise SkinError("applet failed to compile: " + str(ex))
if widgetType == "onLayoutFinish":
screen.onLayoutFinish.append(code)
else:
raise SkinError("applet type '%s' unknown!" % widgetType)
def process_elabel(widget, context):
w = additionalWidget()
w.widget = eLabel
w.skinAttributes = [ ]
collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',))
screen.additionalWidgets.append(w)
def process_epixmap(widget, context):
w = additionalWidget()
w.widget = ePixmap
w.skinAttributes = [ ]
collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',))
screen.additionalWidgets.append(w)
def process_screen(widget, context):
for w in widget.getchildren():
conditional = w.attrib.get('conditional')
if conditional and not [i for i in conditional.split(",") if i in screen.keys()]:
continue
p = processors.get(w.tag, process_none)
try:
p(w, context)
except SkinError, e:
print "[SKIN] SKIN ERROR in screen '%s' widget '%s':" % (name, w.tag), e
def process_panel(widget, context):
n = widget.attrib.get('name')
if n:
try:
s = dom_screens[n]
except KeyError:
print "[SKIN] Unable to find screen '%s' referred in screen '%s'" % (n, name)
else:
process_screen(s[0], context)
layout = widget.attrib.get('layout')
if layout == 'stack':
cc = SkinContextStack
else:
cc = SkinContext
try:
c = cc(context, widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font'))
except Exception, ex:
raise SkinError("Failed to create skincontext (%s,%s,%s) in %s: %s" % (widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font'), context, ex) )
process_screen(widget, c)
processors = {
None: process_none,
"widget": process_widget,
"applet": process_applet,
"eLabel": process_elabel,
"ePixmap": process_epixmap,
"panel": process_panel
}
try:
print "[SKIN] processing screen %s:" % name
context.x = 0 # reset offsets, all components are relative to screen
context.y = 0 # coordinates.
process_screen(myscreen, context)
except Exception, e:
print "[SKIN] SKIN ERROR in %s:" % name, e
from Components.GUIComponent import GUIComponent
nonvisited_components = [x for x in set(screen.keys()) - visited_components if isinstance(x, GUIComponent)]
assert not nonvisited_components, "the following components in %s don't have a skin entry: %s" % (name, ', '.join(nonvisited_components))
# This may look pointless, but it unbinds 'screen' from the nested scope. A better
# solution is to avoid the nested scope above and use the context object to pass
# things around.
screen = None
visited_components = None
| gpl-2.0 | 6,708,689,500,288,114,000 | 32.2643 | 231 | 0.678476 | false |
adsabs/adsabs-pyingest | pyingest/parsers/hstprop.py | 1 | 5181 | #!/usr/bin/env python
from past.utils import old_div
import sys
import math
import requests
class URLError(Exception):
pass
class RequestError(Exception):
pass
class DataError(Exception):
pass
class HSTParser(object):
# HSTParser will return a list of articles taken from a HST API
# (https://proper.stsci.edu/proper/adsProposalSearch/query)
def __init__(self):
self.errors = []
pass
def get_batch(self, api_token, api_url, **kwargs):
get_header = {'apiKey': api_token, 'Accept': 'text/plain',
'Content-type': 'application/json'}
buff = requests.get(api_url, headers=get_header, params=kwargs).json()
return buff
def get_records(self, url, **kwargs):
if url.find('adsProposalSearch') == -1:
raise URLError("This parser is only for the HST adsProposalSearch search.")
# if not kwargs.has_key('api_key'):
if 'api_key' not in kwargs:
raise RequestError('No API key provided to query the HST API.')
token = kwargs['api_key']
del kwargs['api_key']
buff = {}
# Store the value of maxRecords, if this was set
maxrecs = kwargs.get('maxRecords', 200)
# First get 1 record to determine the total amount of records
kwargs['maxRecords'] = 1
# Do the first query
try:
batch = self.get_batch(token, url, **kwargs)
except Exception as err:
raise URLError("Request to HST blew up: %s" % err)
# How many records are there?
totrecs = batch['query']['total']
# Store the first batch of records
records = batch['programs']
# How often do we need to paginate to get them all?
num_paginates = int(math.ceil(old_div(totrecs, (1.0 * maxrecs))))
# If we run in test mode, do not paginate
if kwargs.get('test'):
num_paginates = 0
# We harvested the first record to get the total number of records,
# so we continue with the 2nd
offset = 1
kwargs['maxRecords'] = maxrecs
for i in range(num_paginates):
kwargs['offset'] = offset
try:
batch = self.get_batch(token, url, **kwargs)
except Exception as err:
raise URLError("Request to HST blew up: %s" % err)
records += batch['programs']
offset += maxrecs
return records
def is_complete(self, rec):
required_fields = ['bibstem', 'title', 'authorNames', 'date', 'link', 'comment', 'journalCode', 'affiliations', 'authorOrcidIdentifiers']
return all(elem in list(rec.keys()) for elem in required_fields)
def add_orcids(self, affs, orcids):
if len(affs) != len(orcids):
raise DataError('Affiliation and ORCID arrays have different lengths!')
afflist = []
for i in range(len(affs)):
if orcids[i]:
afflist.append('%s <ID system="ORCID">%s</ID>' % (affs[i], orcids[i].replace('http://orcid.org/', '')))
else:
afflist.append(affs[i])
return afflist
def parse(self, url, **kwargs):
hst_props = [{}]
# retrieve data from HST API
data = self.get_records(url, **kwargs)
# process the new records
for d in data:
if self.is_complete(d):
# The "journal field" is a composite from the "journalCode" and "comment" fields:
# 1. journalCode: expression of mission cycle ('HST Proposal. Cycle NN' or 'JWST Proposal. Cycle N')
# 2. comment: preformatted as 'HST Proposal#xxxx' or 'JWST Proposal#xxxx'
# What will go into the 'publication' field will have the form: HST Proposal. Cycle 26, ID. #15676
journal = "%s, ID. #%s" % (d['journalCode'], d['comment'].split('#')[-1])
# The ORCID information will have to be inserted into the affiliation information
try:
affils = self.add_orcids(d['affiliations'], d['authorOrcidIdentifiers'])
except DataError:
sys.stderr.write('Found misaligned affiliation/ORCID arrays: %s\n' % d['bibstem'])
self.errors.append('Found misaligned affiliation/ORCID arrays: %s' % d['bibstem'])
affils = d['affiliations']
hst_props.append({'bibcode': d['bibstem'],
'authors': d['authorNames'],
'affiliations': affils,
'title': d['title'],
'pubdate': d['date'],
'publication': journal,
'abstract': d['abstract'],
'properties': {'data': d['link']}})
else:
recid = d.get('comment') or d.get('bibstem')
sys.stderr.write('Found record with missing data: %s\n' % recid)
self.errors.append('Found record with missing data: %s' % recid)
continue
return hst_props
| mit | 8,031,092,040,686,377,000 | 40.448 | 145 | 0.550087 | false |
radical-cybertools/radical.ensemblemd | src/radical/entk/execman/mock/resource_manager.py | 1 | 3425 | __copyright__ = 'Copyright 2017-2018, http://radical.rutgers.edu'
__author__ = 'Vivek Balasubramanian <[email protected]>'
__license__ = 'MIT'
# pylint: disable=unused-argument
from ..base.resource_manager import Base_ResourceManager
import radical.utils as ru
# ------------------------------------------------------------------------------
#
class ResourceManager(Base_ResourceManager):
'''
A resource manager takes the responsibility of placing resource requests on
different, possibly multiple, DCIs. This ResourceManager uses mocks an
implementation by doing nothing, it is only usable for testing.
:arguments:
:resource_desc: dictionary with details of the resource request and
access credentials of the user
:example: resource_desc = {
| 'resource' : 'xsede.stampede',
| 'walltime' : 120,
| 'cpus' : 64,
| 'project' : 'TG-abcxyz',
| 'queue' : 'abc', # optional
| 'access_schema' : 'ssh' # optional}
'''
# --------------------------------------------------------------------------
#
def __init__(self, resource_desc, sid, rts_config):
super(ResourceManager, self).__init__(resource_desc=resource_desc,
sid=sid,
rts='mock',
rts_config=rts_config)
# --------------------------------------------------------------------------
#
def get_resource_allocation_state(self):
'''
**Purpose**: get the state of the resource allocation
'''
try:
ru.raise_on(tag='resource_fail')
return 'Running'
except:
return 'Final'
# --------------------------------------------------------------------------
#
def get_completed_states(self):
'''
**Purpose**: test if a resource allocation was submitted
'''
return ['Final']
# --------------------------------------------------------------------------
#
def _validate_resource_desc(self):
'''
**Purpose**: validate the provided resource description
'''
return True
# --------------------------------------------------------------------------
#
def _populate(self):
'''
**Purpose**: evaluate attributes provided in the resource description
'''
return None
# --------------------------------------------------------------------------
#
def submit_resource_request(self, *args):
'''
**Purpose**: Create a resourceas per provided resource description
'''
return None
# --------------------------------------------------------------------------
#
def get_rts_info(self):
return None
# --------------------------------------------------------------------------
#
def _terminate_resource_request(self):
'''
**Purpose**: Cancel the resource
'''
return None
# ------------------------------------------------------------------------------
| mit | -5,283,086,850,979,547,000 | 29.309735 | 80 | 0.38219 | false |
pratheekms/nlp-class | unit_test_5/feature_functions.py | 1 | 8956 | '''
feature_functions.py
Implements the feature generation mechanism
Author: Anantharaman Narayana Iyer
Date: 21 Nov 2014
'''
from nltk import sent_tokenize, word_tokenize
import nltk
import json
import numpy
import pickle
import datetime
from MyMaxEnt import MyMaxEnt
phones = ["phone", "phones", "smartphone", "smartphones", "mobile"]
org_list = ["Google", "Samsung", "HTC", "Sony", "Apple", "Micromax"]
class FeatureFunctions(object):
def __init__(self, wmap, tag_list):
self.wmap = wmap
self.supported_tags = tag_list
self.flist = [self.f1, self.f2, self.f3, self.f4, self.f5, self.f6, self.f7, self.f8, self.f9, self.f10, self.f11, self.f12, self.f13]
return
def f1(self, h, tag):
if (tag == 'Version' and h[1] == 'OS'):
return 1
else:
return 0
def f2(self, h, tag):
if (tag == 'Version' and h[1] == 'Other'):
return 1
else:
return 0
def f3(self, h, tag):
if (tag == 'Phone' and h[1] == 'Other'):
return 1
else:
return 0
def f4(self, h, tag):
if (tag == 'Org' and h[1] == 'Other'):
return 1
else:
return 0
def f5(self, h, tag):
if (tag == 'Date' and h[1] == 'Other'):
return 1
else:
return 0
def f6(self, h, tag):
if (tag == 'Location' and h[1] == 'Other'):
return 1
else:
return 0
def f7(self, h, tag):
if (tag == 'Size' and h[1] == 'Other'):
return 1
else:
return 0
def f8(self, h, tag):
if (tag == 'App' and h[1] == 'Other'):
return 1
else:
return 0
def f9(self, h, tag):
if (tag == 'Family' and h[1] == 'Other'):
return 1
else:
return 0
def f10(self, h, tag):
if (tag == 'Family' and h[1] == 'Org'):
return 1
else:
return 0
def f11(self, h, tag):
if (tag == 'Price' and h[1] == 'Other'):
return 1
else:
return 0
def f12(self, h, tag):
if (tag == 'Phone' and h[1] == 'Org'):
return 1
else:
return 0
def f13(self, h, tag):
if (tag == 'Phone' and h[1] == 'OS'):
return 1
else:
return 0
def f14(self, h, tag):
if (tag == 'App' and h[1] == 'App'):
return 1
else:
return 0
def f15(self, h, tag):
if (tag == 'Price' and h[1] == 'Price'):
return 1
else:
return 0
def f16(self, h, tag):
if (tag == 'Version' and h[1] == 'OS'):
return 1
else:
return 0
def f17(self, h, tag):
if (tag == 'Version' and h[1] == 'OS'):
return 1
else:
return 0
def f18(self, h, tag):
if (tag == 'Family' and h[0] == 'Org'):
return 1
else:
return 0
def f19(self, h, tag):
if (tag == 'Model' and h[0] == 'Org'):
return 1
else:
return 0
def f20(self, h, tag):
if (tag == 'Other' and h[0] == 'Other'):
return 1
else:
return 0
def f21(self, h, tag):
if (tag == 'Other' and h[1] == 'Other'):
return 1
else:
return 0
def f22(self, h, tag):
if (tag == 'Version' and h[0] == 'Org'):
return 1
else:
return 0
def f23(self, h, tag):
if (tag == 'Other' and h[0] == 'Date'):
return 1
else:
return 0
def f24(self, h, tag):
if (tag == 'Other' and h[0] == 'Place'):
return 1
else:
return 0
def f25(self, h, tag):
if (tag == 'Size' and h[0] == 'Other'):
return 1
else:
return 0
def f26(self, h, tag):
if (tag == 'Price' and h[0] == 'Other'):
return 1
else:
return 0
def f27(self, h, tag):
if (tag == 'Location' and h[0] == 'Other'):
return 1
else:
return 0
def f28(self, h, tag):
if (tag == 'Price' and h[0] == 'Date'):
return 1
else:
return 0
def f29(self, h, tag):
if (tag == 'Model' and h[0] == 'Other'):
return 1
else:
return 0
def f30(self, h, tag):
if (tag == 'OS' and h[0] == 'Org'):
return 1
else:
return 0
def f31(self, h, tag):
if (tag == 'Other' and h[0] == 'OS'):
return 1
else:
return 0
def f32(self, h, tag):
if (tag == 'Place' and h[0] == 'Version'):
return 1
else:
return 0
def f33(self, h, tag):
if (tag == 'Price' and h[0] == 'Version'):
return 1
else:
return 0
def f34(self, h, tag):
if (tag == 'Family' and h[0] == 'Date'):
return 1
else:
return 0
def f35(self, h, tag):
if (tag == 'Size' and h[0] == 'Phone'):
return 1
else:
return 0
def evaluate(self, xi, tag):
feats = []
for f in self.flist:
feats.append(int(f(xi, tag)))
return feats
def build_history(data_list, supported_tags):
history_list = [] # list of all histories
words_map = {}
count = 0
for data in data_list: # data is the inputs entered by a given student
data1 = data['data']
for rec in data1:
updates = rec['updates']
sent = rec['sentence']
words = []
for i in range(len(updates)):
words.append(updates[i]['word'])
#------------------------------------------------------------------------------------------------
# NOTE: below code is a temporary hack to build the MAxEnt for just 2 tags - we will change this later
if (updates[i]['tag'] not in supported_tags):
updates[i]['tag'] = "Other"
#------------------------------------------------------------------------------------------------
words_map[count] = {'words': words, 'pos_tags': nltk.pos_tag(words)}
for i in range(len(updates)):
history = {}
history["i"] = i
if i == 0:
history["ta"] = "*" # special tag
history["tb"] = "*" # special tag
elif i == 1:
history["ta"] = "*" # special tag
history["tb"] = updates[i - 1]['tag']
else:
history["ta"] = updates[i - 2]['tag']
history["tb"] = updates[i - 1]['tag']
history["wn"] = count
history_list.append((history, updates[i]['tag'], ))
count += 1
return (history_list, words_map)
def test(clf, history_list):
result = []
for history in history_list:
mymap = wmap[history[0]["wn"]]
words = mymap['words']
tags = mymap['pos_tags']
index = history[0]["i"]
val = clf.classify(history[0])
result.append({'predicted': val, 'word': words[index], 'expected': history[1]})
return result
if __name__ == "__main__":
#----- REPLACE THESE PATHS FOR YOUR SYSTEM ---------------------
json_file = r"C:\home\ananth\research\pesit\nlp\ner\all_data.json"
pickle_file = r"C:\home\ananth\research\pesit\nlp\ner\all_data.p"
# ----------------------------------------------------------------
TRAIN = int(raw_input("Enter 1 for Train, 0 to use pickeled file: "))
supported_tags = ["Org", "OS", "Version", "Other"]
tag_set = {"Org": 0, "Other": 1}
dims = 9
trg_data_x = []
trg_data_y = []
trg_data = {'Org': [], 'Other': []}
data = json.loads(open(json_file).read())['root']
print "num stu = ", len(data)
(history_list, wmap) = build_history(data, supported_tags)
print "After build_history"
func_obj = FeatureFunctions(wmap, supported_tags)
clf = MyMaxEnt(history_list, func_obj, reg_lambda = 0.001, pic_file = pickle_file)
print clf.model
if TRAIN == 1:
clf.train()
result = test(clf, history_list[-500:])
for r in result:
print r['word'], r['predicted'], r['expected']
| mit | -8,602,308,351,966,660,000 | 26.252366 | 142 | 0.423515 | false |
PW-Sat2/PWSat2OBC | integration_tests/emulator/rtc.py | 1 | 1580 | from datetime import timedelta, datetime
import wx
from wx import xrc
from devices import RTCDevice
from .base import ModuleBase, bind
class RTCModule(ModuleBase):
GridPos = (1, 0)
def __init__(self, system):
self._system = system
self._rtc = system.rtc # type: RTCDevice
self.title = 'RTC'
self.grid_pos = (1, 2)
self.grid_span = (1, 1)
def load(self, res, parent):
self._panel = res.LoadPanel(parent, 'RTCModule')
self.bind_handlers()
self._time = xrc.XRCCTRL(self._panel, 'rtc_time')
def root(self):
return self._panel
def update(self):
t = self._rtc.response_time()
self._time.SetLabel('RTC time: \n' + t.strftime('%Y-%m-%d %H:%M:%S'))
@bind('rtc_start', wx.EVT_BUTTON)
def _on_start(self, evt):
self._rtc.start_running()
@bind('rtc_stop', wx.EVT_BUTTON)
def _on_stop(self, evt):
self._rtc.stop_running()
@bind('rtc_advance_5min', wx.EVT_BUTTON, args=(timedelta(minutes=5),))
def _on_advance(self, evt, interval):
self._rtc.advance_by(interval)
@bind('rtc_system_time', wx.EVT_BUTTON,)
def _on_use_system_time(self, evt):
self._rtc._current_time = datetime.now()
@bind('rtc_advance_value', wx.EVT_TEXT)
def _on_advance_time_value_changed(self, evt):
new_text = evt.EventObject.GetValue()
new_value = 1000
try:
new_value = int(new_text)
except:
pass
self._rtc._advance_time_interval = timedelta(milliseconds=new_value)
| agpl-3.0 | 8,938,563,817,151,320,000 | 27.214286 | 77 | 0.593671 | false |
lxml/lxml | versioninfo.py | 1 | 2210 | import io
import os
import re
import sys
__LXML_VERSION = None
def version():
global __LXML_VERSION
if __LXML_VERSION is None:
with open(os.path.join(get_base_dir(), 'src', 'lxml', '__init__.py')) as f:
__LXML_VERSION = re.search(r'__version__\s*=\s*"([^"]+)"', f.read(250)).group(1)
assert __LXML_VERSION
return __LXML_VERSION
def branch_version():
return version()[:3]
def is_pre_release():
version_string = version()
return "a" in version_string or "b" in version_string
def dev_status():
_version = version()
if 'a' in _version:
return 'Development Status :: 3 - Alpha'
elif 'b' in _version or 'c' in _version:
return 'Development Status :: 4 - Beta'
else:
return 'Development Status :: 5 - Production/Stable'
def changes():
"""Extract part of changelog pertaining to version.
"""
_version = version()
with io.open(os.path.join(get_base_dir(), "CHANGES.txt"), 'r', encoding='utf8') as f:
lines = []
for line in f:
if line.startswith('====='):
if len(lines) > 1:
break
if lines:
lines.append(line)
elif line.startswith(_version):
lines.append(line)
return ''.join(lines[:-1])
def create_version_h():
"""Create lxml-version.h
"""
lxml_version = version()
# make sure we have a triple part version number
parts = lxml_version.split('-')
while parts[0].count('.') < 2:
parts[0] += '.0'
lxml_version = '-'.join(parts).replace('a', '.alpha').replace('b', '.beta')
file_path = os.path.join(get_base_dir(), 'src', 'lxml', 'includes', 'lxml-version.h')
# Avoid changing file timestamp if content didn't change.
if os.path.isfile(file_path):
with open(file_path, 'r') as version_h:
if ('"%s"' % lxml_version) in version_h.read(100):
return
with open(file_path, 'w') as version_h:
version_h.write('''\
#ifndef LXML_VERSION_STRING
#define LXML_VERSION_STRING "%s"
#endif
''' % lxml_version)
def get_base_dir():
return os.path.abspath(os.path.dirname(sys.argv[0]))
| bsd-3-clause | -7,693,295,235,261,800,000 | 26.283951 | 92 | 0.567873 | false |
exic/spade2 | spade/pubsub.py | 1 | 9884 | from Behaviour import MessageTemplate, OneShotBehaviour
from xmpp.protocol import *
from xmpp.simplexml import Node
import uuid
def gen_id():
return str(uuid.uuid4())
#def PubSubMessageTemplate():
# msgs = []
# for ns in (NS_PUBSUB, NS_PUBSUB_OWNER):
# msg = Iq()
# msg.addChild(name='pubsub', namespace=ns)
# msgs.append(msg)
# return reduce(lambda a,b: a | b, map(lambda msg: MessageTemplate(msg), msgs))
#class XMPPIdTemplate(MessageTemplate):
# def __init__(self, id):
# iq = Iq()
# iq.setID(id)
# MessageTemplate.__init__(self, iq)
#TODO: Implementar retrieve nodes y discovery
class PubSub(object):
def __init__(self, agent): #, msgrecv):
self._client = agent.getAID().getName()
#self.msgrecv = msgrecv
self.myAgent = agent
self._server = agent.server
def _sendAndReceive(self, iq, getContents):
id = gen_id()
t = MessageTemplate(Iq(attrs={'id':id}))
iq.setID(id)
b = self._sendAndReceiveBehav(iq,getContents)
if self.myAgent._running:
self.myAgent.addBehaviour(b,t)
b.join()
else:
self.myAgent.runBehaviourOnce(b,t)
return b.result
class _sendAndReceiveBehav(OneShotBehaviour):
def __init__(self,iq,getContents):
OneShotBehaviour.__init__(self)
self.iq = iq
self.getContents = getContents
self.timeout = 15
self.result = (None,None)
def _process(self):
#print 'Sending ', str(self.iq)
self.myAgent.send(self.iq)
#Wait for the answer
msg = self._receive(block=True,timeout=self.timeout)
#print 'Received ', str(msg)
if msg is None:
#Timeout
self.result = ('error',['timeout'])
return
if msg['type'] == 'error':
errors = []
for error in msg.getTag('error').getChildren():
if error.getName() == 'text': continue
errors.append(error.getName())
self.result = ('error',errors)
return
if msg['type'] == 'result':
self.result = ('ok',self.getContents(msg))
return
self.result = ('error',['unknown'])
return
def publish(self, node, event=None):
"""
Publishes an item to a given node.
XXX: 'node' here is not an XML node, but the attribute for <publish>
@type node: string
@param node: The ID of the pubsub node to publish
@type event: Event
@param event: Content to publish
@rtype: (string , list[string])
@return: A tuple with the type of answer ('ok','error') and information
about the answer. In case of 'error', a list with the errors. In case of
'ok' the name of the created node.
"""
iq = Iq(
typ='set',
queryNS=None,
attrs={},
frm=self._client
)
pubsub_node = Node(tag='pubsub', attrs={'xmlns':NS_PUBSUB})
publish_node = Node(tag='publish', attrs={'node':node})
item_node = Node(tag='item')
if event is not None:
item_node.addChild(node=event)
publish_node.addChild(node=item_node)
pubsub_node.addChild(node=publish_node)
iq.addChild(node=pubsub_node)
def getContents(msg):
node_publish = msg.getTag('pubsub').getTag('publish')
#XXX: Server implementation always returns the item id, but XEP-60 does
# vim snot require it
return [node_publish['node'],node_publish.getTag('item')['id']]
return self._sendAndReceive(iq, getContents)
def subscribe(self, node, server=None, jid=None):
"""
Subscribes to the selected node
@type node: string
@param node: id of the node to delete
@type server: string
@param server: PubSub server
@rtype: (string , list[string])
@return: A tuple with the type of answer ('ok','error') and information
about the answer. In case of 'error', a list with the errors. In case of
'ok', an empty list.
"""
if server is None:
server = self._server
if jid is None:
jid = self._client
iq = Iq(
typ='set',
queryNS=None,
attrs={},
frm=self._client,
to=server
)
pubsub_node = Node(tag='pubsub', attrs={'xmlns':NS_PUBSUB})
subscribe_node = Node(tag='subscribe', attrs={'node':node, 'jid':jid})
pubsub_node.addChild(node=subscribe_node)
iq.addChild(node=pubsub_node)
return self._sendAndReceive(iq, lambda msg: [])
def unsubscribe(self, node, server=None, jid=None):
"""
Unsubscribe from the selected node
@type node: string
@param node: id of the node to unsubscribe
@type server: string
@param server: PubSub server
@rtype: (string , list[string])
@return: A tuple with the type of answer ('ok','error') and information
about the answer. In case of 'error', a list with the errors. In case of
'ok' an empty list.
"""
if server is None:
server = self._server
if jid is None:
jid = self._client
iq = Iq(
typ='set',
queryNS=None,
attrs={},
frm=self._client,
to=server
)
pubsub_node = Node(tag='pubsub', attrs={'xmlns':NS_PUBSUB_OWNER})
unsubscribe_node = Node(tag='unsubscribe', attrs={'node':node, 'jid':jid})
pubsub_node.addChild(node=unsubscribe_node)
iq.addChild(node=pubsub_node)
return self._sendAndReceive(iq, lambda msg: [])
def createNode(self, node, server=None, type='leaf', parent=None, access=None):
"""
Creates a node with the specified parameters.
@type node: string
@param node: The ID of the node to create
@type server: string
@param server: PubSub server
@type type: string
@param type: Type of the node: 'leaf' or 'collection'
@type parent: string
@param parent: id of the parent node. None if parent is root
@type access: string
@param acccess: Access model of the node
@rtype: (string , list[string])
@return: A tuple with the type of answer ('ok','error') and information
about the answer. In case of 'error', a list with the errors. In case of
'ok' the name of the created node.
"""
#TODO: Add suport for node configuration (RECOMMENDED in XEP-60)
if server is None:
server = self._server
iq = Iq(
typ='set',
queryNS=None,
attrs={},
frm=self._client,
to=server
)
pubsub_node = Node(tag='pubsub', attrs={'xmlns':NS_PUBSUB})
create_node = Node(tag='create', attrs={} if node is None else {'node':node})
pubsub_node.addChild(node=create_node)
iq.addChild(node=pubsub_node)
if parent is not None or type=='collection' or access is not None:
field_nodes=[]
configure_node = Node(tag='configure')
field_nodes.append(DataField('FORM_TYPE', NS_PUBSUB+'#node_config','hidden'))
if parent is not None:
field_nodes.append(DataField('pubsub#collection',parent))
# <field var='pubsub#collection'><value>announcements</value></field>
if type == 'collection':
field_nodes.append(DataField('pubsub#node_type','collection'))
if access is not None:
field_nodes.append(DataField('pubsub#access_model',access))
x_node = DataForm(typ='submit',data=field_nodes)
configure_node.addChild(x_node)
pubsub_node.addChild(configure_node)
return self._sendAndReceive(iq, lambda msg:[msg.getTag('pubsub').getTag('create')['node']])
def createInstantNode(self, server=None, type='leaf', parent=None, access=None):
"""
Creates an instant node without a name. The server will generate id.
"""
if server is None:
server = self._server
return createNode(self, None, server, type, parent, access)
def deleteNode(self, node, server=None):
"""
Deletes the selected node.
@type node: string
@param node: id of the node to delete
@type server: string
@param server: PubSub server
@rtype: (string , list[string])
@return: A tuple with the type of answer ('ok','error') and information
about the answer. In case of 'error', a list with the errors. In case of
'ok' an empty list.
"""
#TODO: A method to redirect the subscriptions to the node to another one COULD be implemented
if server is None:
server = self._server
iq = Iq(
typ='set',
queryNS=None,
attrs={},
frm=self._client,
to=server,
)
pubsub_node = Node(tag='pubsub', attrs={'xmlns':NS_PUBSUB_OWNER})
pubsub_node.addChild(name='delete', attrs={'node':node})
iq.addChild(node=pubsub_node)
return self._sendAndReceive(iq, lambda msg: [])
| lgpl-2.1 | 4,256,735,416,115,895,300 | 31.620462 | 101 | 0.542493 | false |
amoin62/cs229 | youtube_sentiment/tags.py | 1 | 8165 | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import BernoulliNB
from sklearn.pipeline import Pipeline
from sklearn.linear_model import RidgeClassifier, LogisticRegression
from timeit import default_timer as timer
from sklearn import decomposition
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
import sys
def read_data(data, y_column, y_filter=None):
data_y = []
data_tags = []
for index, row in data.iterrows():
if y_filter is None or row[y_column] in y_filter:
tags = row.tags
if tags is not np.nan:
data_y.append(row[y_column])
data_tags.append(tags)
return data_tags, data_y
def fit_clf(X, Y, classifier, name, tfidf_flag=True, dimensions=None):
print('Fitting %s' % name)
start = timer()
tfidf = TfidfTransformer()
if dimensions is None and not tfidf_flag:
text_clf = Pipeline([('vect', CountVectorizer()), ('clf', classifier)])
elif dimensions is not None and not tfidf_flag:
svd = decomposition.TruncatedSVD(n_components=dimensions)
text_clf = Pipeline([('vect', CountVectorizer()), ('svd', svd), ('clf', classifier)])
elif dimensions is None and tfidf_flag:
text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', tfidf), ('clf', classifier)])
elif dimensions is not None and tfidf_flag:
svd = decomposition.TruncatedSVD(n_components=dimensions)
text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', tfidf), ('svd', svd), ('clf', classifier)])
text_clf.fit(X, Y)
end = timer()
print('Execution time = %f' % (end - start))
return text_clf
def predict_random(X, Y, clf):
predicted = clf.predict(X)
unique_y = list(set(Y))
rand_Y = [unique_y[random.randint(0, len(unique_y) - 1)] for i in range(len(Y))]
accuracy = np.mean(predicted == rand_Y)
return accuracy
def predict(X, Y, clf):
predicted = clf.predict(X)
accuracy = np.mean(predicted == Y)
return accuracy
def process_tags(tags_list):
for row in range(len(tags_list)):
tags_list[row] = tags_list[row].replace('|', ' ')
def predict_categories(train, dev, test, cat_filter=None):
res = {}
print('Number of categories used = %d' % cat_filter.size)
y_column = 'category_id'
train_tags, train_y = read_data(train, y_column, cat_filter)
process_tags(train_tags)
dev_tags, dev_y = read_data(dev, y_column, cat_filter)
process_tags(dev_tags)
test_tags, test_y = read_data(test, y_column, cat_filter)
process_tags(test_tags)
dimensions = None
tfidf = False
clf = fit_clf(train_tags, train_y, LogisticRegression(solver='newton-cg', C=1.0, multi_class='multinomial', tol=0.01, max_iter=30),
'LogisticRegression', tfidf_flag=tfidf, dimensions=dimensions)
train_accuracy = predict(train_tags, train_y, clf)
print ('LogisticRegression Train Accuracy = ', train_accuracy)
dev_accuracy = predict(dev_tags, dev_y, clf)
print ('LogisticRegression Dev Accuracy = ', dev_accuracy)
test_accuracy = predict(test_tags, test_y, clf)
print ('LogisticRegression Test Accuracy = ', test_accuracy)
res['LogisticRegression'] = [train_accuracy, dev_accuracy, test_accuracy]
clf = fit_clf(train_tags, train_y, RidgeClassifier(alpha=10), 'RidgeClassifier', tfidf_flag=tfidf, dimensions=dimensions)
train_accuracy = predict(train_tags, train_y, clf)
print ('RidgeClassifier Train Accuracy = ', train_accuracy)
dev_accuracy = predict(dev_tags, dev_y, clf)
print ('RidgeClassifier Dev Accuracy = ', dev_accuracy)
test_accuracy = predict(test_tags, test_y, clf)
print ('RidgeClassifier Test Accuracy = ', test_accuracy)
res['RidgeClassifier'] = [train_accuracy, dev_accuracy, test_accuracy]
clf = fit_clf(train_tags, train_y, BernoulliNB(), 'BernoulliNB', tfidf_flag=tfidf, dimensions=dimensions)
train_accuracy = predict(train_tags, train_y, clf)
print ('BernoulliNB Train Accuracy = ', train_accuracy)
dev_accuracy = predict(dev_tags, dev_y, clf)
print ('BernoulliNB Dev Accuracy = ', dev_accuracy)
test_accuracy = predict(test_tags, test_y, clf)
print ('BernoulliNB Test Accuracy = ', test_accuracy)
res['BernoulliNB'] = [train_accuracy, dev_accuracy, test_accuracy]
clf = fit_clf(train_tags, train_y, LinearSVC(random_state=0, C=.05), 'LinearSVC', tfidf_flag=tfidf, dimensions=dimensions)
train_accuracy = predict(train_tags, train_y, clf)
print ('LinearSVC Train Accuracy = ', train_accuracy)
dev_accuracy = predict(dev_tags, dev_y, clf)
print ('LinearSVC Dev Accuracy = ', dev_accuracy)
test_accuracy = predict(test_tags, test_y, clf)
print ('LinearSVC Test Accuracy = ', test_accuracy)
res['LinearSVC'] = [train_accuracy, dev_accuracy, test_accuracy]
return res
def get_categories(train, num_of_categories=None):
group_size = train.groupby('category_id').size().sort_values(ascending=False)
cat_num = group_size.size
print('Number of categories = %d' % cat_num)
num_of_categories = cat_num if num_of_categories is None else num_of_categories
num_of_categories = min(cat_num, num_of_categories)
categories = group_size.keys()[0:num_of_categories]
return cat_num, categories
def plot(X, Y_dict, title, file_name):
colors = ['green', 'blue', 'red', 'brown']
plt.figure()
counter = 0
for classifier in Y_dict:
plt.plot(X, Y_dict[classifier], color=colors[counter], label=classifier)
counter += 1
plt.xlabel('Number of categories')
plt.ylabel('Accuracy')
plt.title(title)
plt.legend()
plt.savefig(file_name)
pass
def main():
fields = ['video_id', 'category_id', 'tags']
# train = pd.read_csv('../data/youtube/USvideos_clean.csv', encoding='utf8', error_bad_lines=False, usecols=fields)
# print('train csv read')
# dev = pd.read_csv('../data/youtube/GBvideos_clean_train.csv', encoding='utf8', error_bad_lines=False, usecols=fields)
# print('dev csv read')
# test = pd.read_csv('../data/youtube/GBvideos_clean_test.csv', encoding='utf8', error_bad_lines=False, usecols=fields)
# print('dev csv read')
train = pd.read_csv('../data/youtube/USvideos_clean_train.csv', encoding='utf8', error_bad_lines=False, usecols=fields)
print('train csv read')
dev = pd.read_csv('../data/youtube/USvideos_clean_dev.csv', encoding='utf8', error_bad_lines=False,
usecols=fields)
print('dev csv read')
test = pd.read_csv('../data/youtube/USvideos_clean_test.csv', encoding='utf8', error_bad_lines=False,
usecols=fields)
print('dev csv read')
train_acc_dict = {}
test_acc_dict = {}
cat_num_list = []
cat_num, _ = get_categories(train)
for i in range(cat_num, cat_num + 1):
#for i in range(2, 3):
cat_num_list.append(i)
_, cat_filter = get_categories(train, i)
acc_res = predict_categories(train, dev, test, cat_filter=cat_filter)
for classifier in acc_res:
train_acc = acc_res[classifier][0] # get train accuracy for this classifier
train_acc_list = train_acc_dict.get(classifier, [])
train_acc_list.append(train_acc)
train_acc_dict[classifier] = train_acc_list
test_acc = acc_res[classifier][2] # get test accuracy for this classifier
test_acc_list = test_acc_dict.get(classifier, [])
test_acc_list.append(test_acc)
test_acc_dict[classifier] = test_acc_list
#plot(cat_num_list, train_acc_dict, 'Train Accuracy vs. Number of Categories', 'cat-num-train-acc.png')
#plot(cat_num_list, test_acc_dict, 'Test Accuracy vs. Number of Categories', 'cat-num-test-acc.png')
if __name__ == '__main__':
main()
| gpl-3.0 | -6,587,001,173,086,228,000 | 40.973684 | 135 | 0.644213 | false |
xiaocong/remote-task-http-server | devices.py | 1 | 4107 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle import Bottle, request, static_file, abort
import re
import time
import os
import subprocess
from io import BytesIO
try:
import PIL.Image as Image
except:
from PIL import Image
from jobs import lock
import adb
app = Bottle()
@app.get("/")
def devices():
result = {'android': []}
good_devices = adb.devices(status='good')
for se, name in adb.devices(status=request.params.get("status", "all")).items():
device = {'adb': {'serial': se, 'device': name}}
if se in good_devices:
props = adb.getprop(se)
device.update({
'product': {
'brand': props.get('ro.product.brand'),
'manufacturer': props.get('ro.product.manufacturer'),
'model': props.get('ro.product.model'),
'board': props.get('ro.product.board'),
'device': props.get('ro.product.device')
},
'locale': {
'language': props.get('ro.product.locale.language'),
'region': props.get('ro.product.locale.region')
},
'build': {
'fingerprint': props.get('ro.build.fingerprint'),
'type': props.get('ro.build.type'),
'date_utc': props.get('ro.build.date.utc'),
'display_id': props.get('ro.build.display.id'),
'id': props.get('ro.build.id'),
'version': {
'incremental': props.get('ro.build.version.incremental'),
'release': props.get('ro.build.version.release'),
'sdk': props.get('ro.build.version.sdk'),
'codename': props.get('ro.build.version.codename')
}
}
})
result['android'].append(device)
return result
@app.route("/<serial>/adb/<cmds:path>")
def adb_cmd(serial, cmds):
return adb.cmd(['-s', serial] + cmds.split("/"), timeout=request.params.get("timeout", 10))
def meminfo(serial):
result = {}
for line in adb.cmd(['-s', serial, 'shell', 'cat', '/proc/meminfo'])['stdout'].splitlines():
item = [i.strip() for i in line.split(':')]
if len(item) == 2:
values = item[1].split()
result[item[0]] = int(values[0])*1024 if len(values) == 2 and values[1] == 'kB' else int(values[0])
return result
def top(serial):
result = {"processes": []}
out = adb.cmd(['-s', serial, 'shell', 'top', '-n', '1'])['stdout']
m = re.search(r'User\s*(\d+)%,\s*System\s*(\d+)%,\s*IOW\s*(\d+)%,\s*IRQ\s*(\d+)%', out)
if m:
result["CPU"] = {"User": int(m.group(1))/100., "System": int(m.group(2))/100., "IOW": int(m.group(3))/100., "IRQ": int(m.group(4))/100.}
for item in re.findall(r'(\d+)\s+(\d+)\s+(\d+)%\s+(\w+)\s+(\d+)\s+(\d+)K\s+(\d+)K\s+(fg|bg)?\s+(\S+)\s+(\S+)', out):
pid, pr, cpu, s, thr, vss, rss, pcy, uid, name = item
result["processes"].append({"pid": int(pid), "pr": int(pr), "cpu": int(cpu)/100., "s": s, "thr": int(thr), "vss": int(vss)*1024, "rss": int(rss)*1024, "pcy": pcy, "uid": uid, "name": name})
return result
@app.get("/<serial>/stat")
def stat(serial):
return {"meminfo": meminfo(serial), "top": top(serial)}
@app.get("/<serial>/screenshot")
@lock
def screenshot(serial):
size = (int(request.params.get('width', 480)), int(request.params.get('height', 480)))
thumbnail = '%s(%dx%d).thumbnail.png' % (serial, size[0], size[1])
if not os.path.exists('/tmp/%s' % thumbnail) or time.time() - os.stat('/tmp/%s' % thumbnail).st_mtime > 5:
p1 = subprocess.Popen(["adb", "-s", serial, "shell", "screencap", "-p"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["sed", "s/\r$//"], stdout=subprocess.PIPE, stdin=p1.stdout)
im = Image.open(BytesIO(p2.communicate()[0]))
im.thumbnail(size, Image.ANTIALIAS)
im.save('/tmp/%s' % thumbnail)
return static_file(thumbnail, root='/tmp')
| mit | 7,346,349,997,841,327,000 | 39.663366 | 197 | 0.528366 | false |
bors-ltd/django-gitstorage | gitstorage/models.py | 1 | 6291 | # Copyright Bors LTD
# This file is part of django-gitstorage.
#
# Django-gitstorage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Django-gitstorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with django-gitstorage. If not, see <http://www.gnu.org/licenses/>.
import magic
from django.apps import apps as django_apps
from django.conf import settings
from django.contrib.auth import models as auth_models
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.translation import ugettext_lazy as _
from . import mimetypes
from . import utils
from . import validators
def get_blob_metadata_model():
"""
Returns the BlobMetadata model that is active in this project.
"""
try:
return django_apps.get_model(settings.GIT_STORAGE_BLOB_METADATA_MODEL)
except ValueError:
raise ImproperlyConfigured("GIT_STORAGE_BLOB_METADATA_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"GIT_STORAGE_BLOB_METADATA_MODEL refers to model '%s' that has not been installed" % (
settings.GIT_STORAGE_BLOB_METADATA_MODEL,
)
)
except AttributeError:
return BlobMetadata
def guess_mimetype(name=None, buffer=None):
mimetype = None
if name is not None:
mimetype = mimetypes.guess_type(name)[0]
# Mimetype guessing on name is not more accurate but more easily extensible
if mimetype is None and buffer is not None:
mimetype = magic.from_buffer(buffer, mime=True).decode()
return mimetype
class BaseObjectMetadata(models.Model):
id = models.CharField(_("id"), primary_key=True, unique=True, db_index=True, editable=False, max_length=40)
class Meta:
abstract = True
class TreeMetadata(BaseObjectMetadata):
class Meta:
managed = False
def __str__(self):
return "{0.id}".format(self)
class BaseBlobMetadata(BaseObjectMetadata):
# Cached properties to avoid loading the blob
size = models.PositiveIntegerField(verbose_name=_(u"Size"))
# Extra properties that must be optional (they are filled after the initial creation)
mimetype = models.CharField(_("mimetype"), max_length=255, null=True, blank=True)
def fill(self, repository, name, blob, **kwargs):
"""Method called after creation of the object to fill extra properties: mimetype, ...
Override to fill your own extra fields and call this parent.
"""
if self.mimetype is None:
self.mimetype = guess_mimetype(name=name, buffer=blob.data)
class Meta:
verbose_name = _("blob metadata")
verbose_name_plural = _("blob metadata")
abstract = True
def __str__(self):
return "{0.id} type={0.mimetype}".format(self)
class BlobMetadata(BaseBlobMetadata):
class Meta:
swappable = 'GIT_STORAGE_BLOB_METADATA_MODEL'
class TreePermissionQuerySet(models.QuerySet):
def current_permissions(self, path, **kwargs):
return self.filter(parent_path=path.parent_path, name=path.name, **kwargs).select_related('user')
def allowed_names(self, user, parent_path, **kwargs):
if user:
if user.is_superuser:
# Reads as no restriction
return None
if not user.is_authenticated():
user = None
return self.filter(parent_path=parent_path, user=user, **kwargs).values_list('name', flat=True)
def allowed_paths(self, user):
if user:
if user.is_superuser:
# Reads as no restriction
return None
if not user.is_authenticated():
user = None
all_permissions = self.filter(user=user).values_list('parent_path', 'name')
return ["/".join(filter(None, segments)) for segments in all_permissions]
def for_user(self, user, path, **kwargs):
if user:
if not user.is_authenticated():
user = None
return self.filter(user=user, parent_path=path.parent_path, name=path.name, **kwargs)
def other_permissions(self, user, path, **kwargs):
if user:
if not user.is_authenticated():
user = None
return self.filter(user=user, parent_path=path.parent_path, **kwargs).exclude(name=path.name).exists()
def is_allowed(self, user, path, **kwargs):
if user:
if user.is_superuser:
return True
return self.for_user(user, path, **kwargs).exists()
def add(self, users, path):
for user in users:
self.get_or_create(parent_path=path.parent_path, name=path.name, user=user)
def remove(self, users, path):
# Does not work for [None]
if None in users:
for user in users:
self.filter(parent_path=path.parent_path, name=path.name, user=user).delete()
else:
self.filter(parent_path=path.parent_path, name=path.name, user__in=users).delete()
class TreePermission(models.Model):
parent_path = models.CharField(_("parent path"), max_length=2048, db_index=True, blank=True,
validators=[validators.path_validator])
name = models.CharField(_("name"), max_length=256, db_index=True, blank=True,
validators=[validators.name_validator])
user = models.ForeignKey(auth_models.User, null=True, blank=True) # For anonymous user
objects = TreePermissionQuerySet.as_manager()
class Meta:
verbose_name = _("tree permission")
verbose_name_plural = _("tree permissions")
def __str__(self):
path = utils.Path(self.parent_path).resolve(self.name)
return "{0} on {1}".format(self.user, path)
| gpl-3.0 | 749,146,428,825,357,800 | 34.948571 | 112 | 0.650135 | false |
maas/maas | src/maasserver/models/tests/test_cleansave.py | 1 | 13144 | # Copyright 2018 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for `CleanSave`."""
from unittest.mock import sentinel
from django.db.models import Model
from maasserver.models.cleansave import CleanSaveModelState
from maasserver.monkey import DeferredValueAccessError
from maasserver.testing.testcase import MAASLegacyServerTestCase
from maasserver.tests.models import CleanSaveTestModel, GenericTestModel
from maastesting.matchers import MockCalledOnceWith, MockNotCalled
class TestCleanSave(MAASLegacyServerTestCase):
"""Tests for the `CleanSave` mixin."""
def test_state_is_clean_save_based(self):
obj = CleanSaveTestModel.objects.create()
self.assertIsInstance(obj._state, CleanSaveModelState)
self.assertEqual({}, obj._state._changed_fields)
def test_setting_property(self):
obj = CleanSaveModelState()
obj.test_prop = sentinel.value
self.assertEqual(sentinel.value, obj.test_prop)
def test_handling_deferred_field_getting(self):
obj = CleanSaveTestModel.objects.create()
obj = CleanSaveTestModel.objects.filter(id=obj.id).only("id").first()
self.assertRaises(DeferredValueAccessError, lambda: obj.field)
def test_handling_deferred_field_setting(self):
obj = CleanSaveTestModel.objects.create()
obj = CleanSaveTestModel.objects.filter(id=obj.id).only("id").first()
obj.field = "test"
self.assertIn("field", obj._state._changed_fields)
obj.save()
def test_field_marked_changed_for_new_obj(self):
obj = CleanSaveTestModel()
obj.field = "test"
self.assertEqual({"field": None}, obj._state._changed_fields)
def test_field_marked_changed_for_new_obj_when_reset(self):
obj = CleanSaveTestModel()
obj.field = "test"
obj.field = None
self.assertEqual({"field": None}, obj._state._changed_fields)
def test_field_marked_changed_for_existing_obj(self):
obj = CleanSaveTestModel.objects.create()
obj.field = "test"
self.assertEqual({"field": None}, obj._state._changed_fields)
def test_field_not_marked_changed_for_existing_obj_when_reset(self):
obj = CleanSaveTestModel.objects.create()
obj.field = "test"
obj.field = None
self.assertEqual({}, obj._state._changed_fields)
def test_field_not_marked_changed_when_refresh_from_db(self):
obj = CleanSaveTestModel.objects.create()
duplicate = CleanSaveTestModel.objects.get(id=obj.id)
duplicate.field = "test"
duplicate.save()
obj.refresh_from_db()
self.assertEqual("test", obj.field)
self.assertEqual({}, obj._state._changed_fields)
def test_field_not_marked_changed_when_refresh_from_db_no_fields(self):
obj = CleanSaveTestModel.objects.create()
duplicate = CleanSaveTestModel.objects.get(id=obj.id)
duplicate.field = "test"
duplicate.save()
obj.refresh_from_db(fields=[])
self.assertEqual(None, obj.field)
self.assertEqual({}, obj._state._changed_fields)
def test_field_not_marked_changed_when_refresh_with_changed_fields(self):
obj = CleanSaveTestModel.objects.create()
duplicate = CleanSaveTestModel.objects.get(id=obj.id)
duplicate.field = "test"
duplicate.save()
obj.refresh_from_db(fields=["field"])
self.assertEqual("test", obj.field)
self.assertEqual({}, obj._state._changed_fields)
def test_field_not_marked_changed_when_refresh_with_same_fields(self):
obj = CleanSaveTestModel.objects.create()
obj.refresh_from_db(fields=["field"])
self.assertEqual(None, obj.field)
self.assertEqual({}, obj._state._changed_fields)
def test_field_marked_changed_when_refresh_from_db_with_no_fields(self):
obj = CleanSaveTestModel.objects.create()
duplicate = CleanSaveTestModel.objects.get(id=obj.id)
duplicate.field = "test"
duplicate.save()
obj.field = "test"
obj.refresh_from_db(fields=[])
self.assertEqual("test", obj.field)
self.assertEqual({"field": None}, obj._state._changed_fields)
def test_field_marked_changed_rel_id_for_new_obj(self):
related = GenericTestModel.objects.create(field="")
obj = CleanSaveTestModel()
obj.related_id = related.id
self.assertEqual({"related_id": None}, obj._state._changed_fields)
def test_field_marked_changed_rel_attname_for_new_obj(self):
related = GenericTestModel.objects.create(field="")
obj = CleanSaveTestModel()
obj.related = related
self.assertEqual({"related_id": None}, obj._state._changed_fields)
def test_field_marked_changed_rel_id_for_existing_obj(self):
related = GenericTestModel.objects.create(field="")
new_related = GenericTestModel.objects.create(field="")
obj = CleanSaveTestModel.objects.create(related=related)
obj.related_id = new_related.id
self.assertEqual(
{"related_id": related.id}, obj._state._changed_fields
)
def test_field_marked_changed_rel_attname_for_existing_obj(self):
related = GenericTestModel.objects.create(field="")
new_related = GenericTestModel.objects.create(field="")
obj = CleanSaveTestModel.objects.create(related=related)
obj.related = new_related
self.assertEqual(
{"related_id": related.id}, obj._state._changed_fields
)
def test_field_not_marked_changed_rel_id_for_existing_obj(self):
related = GenericTestModel.objects.create(field="")
new_related = GenericTestModel.objects.create(field="")
obj = CleanSaveTestModel.objects.create(related=related)
obj.related_id = new_related.id
obj.related_id = related.id
self.assertEqual({}, obj._state._changed_fields)
def test_field_not_marked_changed_rel_attname_for_existing_obj(self):
related = GenericTestModel.objects.create(field="")
new_related = GenericTestModel.objects.create(field="")
obj = CleanSaveTestModel.objects.create(related=related)
obj.related = new_related
obj.related = related
self.assertEqual({}, obj._state._changed_fields)
def test_save_always_calls_save_when_new(self):
mock_save = self.patch(Model, "save")
obj = CleanSaveTestModel()
obj.save()
self.assertThat(mock_save, MockCalledOnceWith())
def test_save_doesnt_clean_pk_and_related_fields_when_new(self):
obj = CleanSaveTestModel()
mock_full_clean = self.patch(obj, "full_clean")
obj.save()
self.assertThat(
mock_full_clean,
MockCalledOnceWith(
exclude={"id", "related"}, validate_unique=False
),
)
def test_save_validates_unique_except_for_pk_when_new(self):
obj = CleanSaveTestModel()
mock_validate_unique = self.patch(obj, "validate_unique")
obj.save()
self.assertThat(
mock_validate_unique, MockCalledOnceWith(exclude=["id"])
)
def test_save_resets_changed_fields_when_new(self):
obj = CleanSaveTestModel()
obj.field = "test"
obj.save()
self.assertEqual({}, obj._state._changed_fields)
def test_save_performed_with_force_update(self):
obj = CleanSaveTestModel.objects.create()
mock_save = self.patch(Model, "save")
obj.save(force_update=True)
self.assertThat(mock_save, MockCalledOnceWith(force_update=True))
def test_save_performed_when_id_reset(self):
obj = CleanSaveTestModel.objects.create()
obj.id = None
mock_save = self.patch(Model, "save")
obj.save()
self.assertThat(mock_save, MockCalledOnceWith())
def test_save_performed_when_state_forced(self):
obj = CleanSaveTestModel.objects.create()
obj._state.adding = True
mock_save = self.patch(Model, "save")
obj.save()
self.assertThat(mock_save, MockCalledOnceWith())
def test_save_performed_with_force_insert(self):
obj = CleanSaveTestModel.objects.create()
mock_save = self.patch(Model, "save")
obj.save(force_insert=True)
self.assertThat(mock_save, MockCalledOnceWith(force_insert=True))
def test_save_not_performed_when_nothing_changed(self):
obj = CleanSaveTestModel.objects.create()
mock_save = self.patch(Model, "save")
obj.save()
self.assertThat(mock_save, MockNotCalled())
def test_save_table_called_when_changed_fields(self):
related = GenericTestModel.objects.create(field="")
obj = CleanSaveTestModel.objects.create()
mock_save = self.patch(Model, "_save_table")
obj.field = "test"
obj.related = related
obj.save()
self.assertThat(
mock_save,
MockCalledOnceWith(
cls=CleanSaveTestModel,
force_insert=False,
force_update=False,
raw=False,
update_fields={"field", "related_id"},
using="default",
),
)
self.assertEqual({}, obj._state._changed_fields)
def test_save_table_updates_update_fields_with_changed_fields(self):
related = GenericTestModel.objects.create(field="")
obj = CleanSaveTestModel.objects.create()
mock_save = self.patch(Model, "_save_table")
obj.field = "test"
obj.related = related
obj.save(update_fields=["field"])
self.assertThat(
mock_save,
MockCalledOnceWith(
cls=CleanSaveTestModel,
force_insert=False,
force_update=False,
raw=False,
update_fields={"field", "related_id"},
using="default",
),
)
self.assertEqual({}, obj._state._changed_fields)
def test_save_ignores_clean_on_deferred(self):
obj = CleanSaveTestModel.objects.create(field="test")
obj = CleanSaveTestModel.objects.filter(id=obj.id).only("id").first()
related = GenericTestModel.objects.create(field="")
obj.related = related
obj.save(force_update=True)
def test_full_clean_excludes_unchanged_fields(self):
related = GenericTestModel.objects.create(field="")
obj = CleanSaveTestModel.objects.create()
mock_full_clean = self.patch(Model, "full_clean")
obj.related = related
obj.save()
self.assertThat(
mock_full_clean,
MockCalledOnceWith(
exclude={"id", "field", "related"}, validate_unique=False
),
)
def test_full_clean_doesnt_exclude_changed_fields(self):
related = GenericTestModel.objects.create(field="")
obj = CleanSaveTestModel.objects.create()
mock_full_clean = self.patch(Model, "full_clean")
obj.field = "test"
obj.related = related
obj.save()
self.assertThat(
mock_full_clean,
MockCalledOnceWith(
exclude={"id", "related"}, validate_unique=False
),
)
def test_validate_unique_excludes_unchanged_fields(self):
related = GenericTestModel.objects.create(field="")
obj = CleanSaveTestModel.objects.create()
mock_validate_unique = self.patch(Model, "validate_unique")
obj.related = related
obj.save()
self.assertThat(
mock_validate_unique, MockCalledOnceWith(exclude={"id", "field"})
)
def test_utils_get_changed(self):
obj = CleanSaveTestModel.objects.create()
obj.field = "test"
self.assertEqual({"field"}, obj._state.get_changed())
def test_utils_has_changed_True(self):
obj = CleanSaveTestModel.objects.create()
obj.field = "test"
self.assertTrue(obj._state.has_changed("field"))
def test_utils_has_changed_False(self):
obj = CleanSaveTestModel.objects.create()
self.assertFalse(obj._state.has_changed("field"))
def test_utils_has_any_changed_True(self):
obj = CleanSaveTestModel.objects.create()
obj.field = "test"
self.assertTrue(obj._state.has_any_changed(["field"]))
def test_utils_has_any_changed_False(self):
obj = CleanSaveTestModel.objects.create()
self.assertFalse(obj._state.has_any_changed(["field"]))
def test_utils_get_old_value(self):
related = GenericTestModel.objects.create(field="")
new_related = GenericTestModel.objects.create(field="")
obj = CleanSaveTestModel.objects.create(related=related)
obj.related = new_related
self.assertEqual(related.id, obj._state.get_old_value("related_id"))
def test_utils_get_old_value_returns_None_when_not_changed(self):
obj = CleanSaveTestModel.objects.create()
self.assertIsNone(obj._state.get_old_value("field"))
| agpl-3.0 | -2,817,001,847,400,110,600 | 37.887574 | 77 | 0.638771 | false |
MarkusHackspacher/unknown-horizons | horizons/util/python/weakmethod.py | 1 | 2225 | # ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# [email protected]
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
import types
import weakref
class WeakMethod:
def __init__(self, function):
assert callable(function)
if isinstance(function, types.MethodType) and function.__self__ is not None:
self.function = function.__func__
self.instance = weakref.ref(function.__self__)
else:
self.function = function
self.instance = None
def __call__(self, *args, **kwargs):
if self.instance is None:
return self.function(*args, **kwargs)
elif self.instance() is not None:
return self.function(self.instance(), *args, **kwargs)
else:
raise ReferenceError("Instance: {} Function: {} Function from module: {}"
.format(self.instance(), self.function, self.function.__module__))
def __eq__(self, other):
if isinstance(other, WeakMethod):
if self.function != other.function:
return False
# check also if either instance is None or else if instances are equal
if self.instance is None:
return other.instance is None
else:
return self.instance() == other.instance()
elif callable(other):
return self == WeakMethod(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.instance, self.function))
def __str__(self):
return str(self.function)
| gpl-2.0 | 867,174,227,849,065,700 | 32.208955 | 90 | 0.671461 | false |
CSSIP-AIR/UMETRICS | collapse_persons/person_attribute.py | 1 | 3929 | ################################################################################
# Copyright (c) 2013, AMERICAN INSTITUTES FOR RESEARCH
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
import common, os, sys
sys.path.append(os.path.abspath('../Shared'))
import __string__ as s
def get_person_attributes(database, person_id):
"""A person can have more than one attribute. Get all the attributes for the person indicated."""
query = """
select
a.AttributeId,
pa.RelationshipCode,
a.Attribute,
coalesce(pas.RelationshipCodeWeight, 0.0) RelationshipCodeWeight
from
Attribute a
inner join PersonAttribute pa on
pa.AttributeId = a.AttributeId and
pa.PersonId = %d
left outer join UMETRICSSupport.PersonAttributeStatistics pas on
pas.RelationshipCode = pa.RelationshipCode;
""" % person_id
person_attributes = []
rows = database.get_all_rows(query)
for row in rows:
person_attribute = PersonAttribute(row[0], row[1], row[2], row[3])
if person_attribute.is_populated:
person_attributes.append(person_attribute)
return person_attributes
def any_attributes_match(database, person_attributes, candidate_person_id):
"""Check to see if any of the candidate person's attributes match the current person's"""
candidate_person_attributes = get_person_attributes(database, candidate_person_id)
for person_attribute in person_attributes:
for candidate_person_attribute in candidate_person_attributes:
if person_attribute.is_similar(candidate_person_attribute):
return True
return False
class PersonAttribute:
"""Simplistic class to hold a PersonAttribute"""
def __init__(self, id, relationship, attribute, weight):
self.id = id
self.relationship = common.standardize_text(relationship)
self.attribute = common.standardize_text(attribute)
self.weight = weight
self.concat = s.nullify_blanks(s.make_string([self.relationship, self.attribute]))
self.is_populated = self.concat is not None
def is_similar(self, other_person_attribute):
if not self.is_populated or not other_person_attribute.is_populated:
return False
if self.relationship != other_person_attribute.relationship:
return False
if self.id == other_person_attribute.id:
return True
return (self.weight > 0.9 and self.attribute == other_person_attribute.attribute)
# TODO: Add more sophisticated matching
| bsd-2-clause | -7,868,506,971,949,554,000 | 54.338028 | 757 | 0.686689 | false |
myquant/strategy | SkyPark/python/SkyPark.py | 1 | 4283 | # encoding: utf-8
from gmsdk.api import StrategyBase
from gmsdk import md
from gmsdk.enums import *
import arrow
import time
# 每次开仓量
OPEN_VOL = 5
class SkyPark(StrategyBase):
def __init__(self, *args, **kwargs):
super(SkyPark, self).__init__(*args, **kwargs)
# 上、下轨
self.upr = None
self.dwn = None
# 开仓标识
self.open_long_flag = False
self.open_short_flag = False
# 持仓量
self.hoding = 0;
self.__get_param()
self.__init_data()
def __get_param(self):
'''
获取配置参数
'''
# 交易证券代码
self.trade_symbol = self.config.get('para', 'trade_symbol')
pos = self.trade_symbol.find('.')
self.exchange = self.trade_symbol[:pos]
self.sec_id = self.trade_symbol[pos + 1:]
FMT = '%s %s'
today = arrow.now().date()
# 第一根K线时间
first_kline_time = self.config.get('para', 'first_kline_time')
et = FMT % (today.isoformat(), first_kline_time)
self.first_kline_time_str = et
first_kline_time1 = self.config.get('para', 'first_kline_time1')
et = FMT % (today.isoformat(), first_kline_time1)
self.first_kline_time_str1 = et
# 平仓时间
end_time = self.config.get('para', 'end_time')
et = FMT % (today.isoformat(), end_time)
self.end_trading = arrow.get(et).replace(tzinfo='local').timestamp
print("end time %s" % (et))
# 开多阀值
self.open_long_size = self.config.getfloat('para', 'open_long_size')
# 开空阀值
self.open_short_size = self.config.getfloat('para', 'open_short_size')
def __init_data(self):
dailybars = self.get_last_dailybars(self.trade_symbol)
if len(dailybars) > 0:
self.pre_close = dailybars[0].close
# 第一根K线数据
while self.upr is None or self.dwn is None:
print('waiting for get the first K line...')
bars = self.get_bars(self.trade_symbol, 60, self.first_kline_time_str, self.first_kline_time_str1)
if len(bars) > 0:
self.upr = bars[0].high # 上轨
self.dwn = bars[0].low # 下轨
print('upr:%s, dwn: %s' % (self.upr, self.dwn))
if bars[0].open > self.pre_close * (1 + self.open_long_size):
self.open_long_flag = True # 开多仓标识
elif bars[0].open > self.pre_close * (1 - self.open_short_size):
self.open_short_flag = True # 开空仓标识
else:
print('Do not meet the trading condition, today do not trading.')
break
time.sleep(1)
def on_tick(self, tick):
# 最新报价
self.close = tick.last_price
def on_bar(self, bar):
'''
bar周期数据事件
'''
if self.open_long_flag and self.close > self.upr and 0 == self.hoding:
self.open_long(self.exchange, self.sec_id, 0, OPEN_VOL)
self.hoding += OPEN_VOL
print('open long: last price %s, vol %s' % (self.close, OPEN_VOL))
elif self.open_short_flag and self.close < self.dwn and 0 == self.hoding:
self.open_short(self.exchange, self.sec_id, 0, OPEN_VOL)
self.hoding += OPEN_VOL
print('open short: last price %s, vol %s' % (self.close, OPEN_VOL))
# 日内平仓
if bar.utc_time > self.end_trading:
if self.open_long_flag and self.hoding > 0:
self.close_long(self.exchange, self.sec_id, 0, self.hoding)
self.hoding = 0
self.open_long_flag = False
print('end trading time close long, vol: %s' % self.hoding)
elif self.open_short_flag and self.hoding > 0:
self.close_short(self.exchange, self.sec_id, 0, self.hoding)
self.hoding = 0
self.open_short_flag = False
print('end trading time close short, vol: %s' % self.hoding)
if __name__ == '__main__':
sky_park = SkyPark(config_file='SkyPark.ini')
ret = sky_park.run()
print(sky_park.get_strerror(ret)) | apache-2.0 | 4,281,629,947,472,806,000 | 33.366667 | 110 | 0.543051 | false |
pcmoritz/Strada.jl | deps/src/caffe/python/apollocaffe/layers/caffe_layers.py | 1 | 7272 | """
List of layer classes for building protobuf layer parameters from python
"""
from .layer_headers import Layer, LossLayer, DataLayer
from .layer_helpers import assign_proto, Filler
from apollocaffe.proto import caffe_pb2
class CapSequence(Layer):
def __init__(self, name, sequence_lengths, **kwargs):
super(CapSequence, self).__init__(self, name, kwargs)
for x in sequence_lengths:
self.p.rp.cap_sequence_param.sequence_lengths.append(x)
class Concat(Layer):
def __init__(self, name, **kwargs):
super(Concat, self).__init__(self, name, kwargs)
class Convolution(Layer):
def __init__(self, name, kernel_dim, num_output, weight_filler=None, bias_filler=None, **kwargs):
kwargs['kernel_h'] = kernel_dim[0]
kwargs['kernel_w'] = kernel_dim[1]
kwargs['num_output'] = num_output
super(Convolution, self).__init__(self, name, kwargs)
if weight_filler is None:
weight_filler = Filler('xavier')
self.p.convolution_param.weight_filler.CopyFrom(weight_filler.filler_param)
if bias_filler is None:
bias_filler = Filler('constant', 0.)
self.p.convolution_param.bias_filler.CopyFrom(bias_filler.filler_param)
class Data(DataLayer):
def __init__(self, name, source, batch_size, transform=None, **kwargs):
kwargs['source'] = source
kwargs['batch_size'] = batch_size
super(Data, self).__init__(self, name, kwargs)
self.p.data_param.backend = caffe_pb2.DataParameter.LMDB
if transform is not None:
self.p.transform_param.CopyFrom(transform.transform_param)
class Dropout(Layer):
def __init__(self, name, dropout_ratio, **kwargs):
kwargs['dropout_ratio'] = dropout_ratio
super(Dropout, self).__init__(self, name, kwargs)
class DummyData(DataLayer):
def __init__(self, name, shape, **kwargs):
super(DummyData, self).__init__(self, name, kwargs)
assert len(shape) == 4
self.p.dummy_data_param.num.append(shape[0])
self.p.dummy_data_param.channels.append(shape[1])
self.p.dummy_data_param.height.append(shape[2])
self.p.dummy_data_param.width.append(shape[3])
class Eltwise(Layer):
def __init__(self, name, operation, **kwargs):
super(Eltwise, self).__init__(self, name, kwargs)
if operation == 'MAX':
self.p.eltwise_param.operation = caffe_pb2.EltwiseParameter.MAX
elif operation == 'SUM':
self.p.eltwise_param.operation = caffe_pb2.EltwiseParameter.SUM
elif operation == 'PROD':
self.p.eltwise_param.operation = caffe_pb2.EltwiseParameter.PROD
else:
raise ValueError('Unknown Eltwise operator')
class EuclideanLoss(LossLayer):
def __init__(self, name, **kwargs):
super(EuclideanLoss, self).__init__(self, name, kwargs)
class HDF5Data(DataLayer):
def __init__(self, name, source, batch_size, transform=None, **kwargs):
kwargs['source'] = source
kwargs['batch_size'] = batch_size
super(HDF5Data, self).__init__(self, name, kwargs)
if transform is not None:
self.p.transform_param.CopyFrom(transform.transform_param)
class ImageData(DataLayer):
def __init__(self, name, source, batch_size, transform=None, **kwargs):
kwargs['source'] = source
kwargs['batch_size'] = batch_size
super(ImageData, self).__init__(self, name, kwargs)
if transfrom is not None:
self.p.transform_param.CopyFrom(transform.transform_param)
class InnerProduct(Layer):
def __init__(self, name, num_output, weight_filler=None, bias_filler=None, **kwargs):
kwargs['num_output'] = num_output
super(InnerProduct, self).__init__(self, name, kwargs)
if weight_filler is None:
weight_filler = Filler('xavier')
self.p.inner_product_param.weight_filler.CopyFrom(weight_filler.filler_param)
if bias_filler is None:
bias_filler = Filler('constant', 0.)
self.p.inner_product_param.bias_filler.CopyFrom(bias_filler.filler_param)
class LRN(Layer):
def __init__(self, name, **kwargs):
super(LRN, self).__init__(self, name, kwargs)
class LstmUnit(Layer):
def __init__(self, name, num_cells, weight_filler=None, **kwargs):
super(LstmUnit, self).__init__(self, name, kwargs)
self.p.lstm_unit_param.num_cells = num_cells
if weight_filler is None:
weight_filler = Filler('uniform', 0.1)
self.p.lstm_unit_param.input_weight_filler.CopyFrom(
weight_filler.filler_param)
self.p.lstm_unit_param.input_gate_weight_filler.CopyFrom(
weight_filler.filler_param)
self.p.lstm_unit_param.forget_gate_weight_filler.CopyFrom(
weight_filler.filler_param)
self.p.lstm_unit_param.output_gate_weight_filler.CopyFrom(
weight_filler.filler_param)
class L1Loss(LossLayer):
def __init__(self, name, **kwargs):
super(L1Loss, self).__init__(self, name, kwargs)
class NumpyData(DataLayer):
def __init__(self, name, data, **kwargs):
super(NumpyData, self).__init__(self, name, kwargs)
from apollocaffe import make_numpy_data_param
import numpy as np
#self.p.rp.ParseFromString(make_numpy_data_param(np.array(data, dtype=np.float32)).SerializeToString())
self.p = make_numpy_data_param(self.p, np.array(data, dtype=np.float32))
class Pooling(Layer):
def __init__(self, name, pool='MAX', **kwargs):
super(Pooling, self).__init__(self, name, kwargs)
if pool is not None:
if pool == 'MAX':
self.p.pooling_param.pool = caffe_pb2.PoolingParameter.MAX
elif pool == 'AVE':
self.p.pooling_param.pool = caffe_pb2.PoolingParameter.AVE
elif pool == 'STOCHASTIC':
self.p.pooling_param.pool = caffe_pb2.PoolingParameter.STOCHASTIC
else:
raise ValueError('Unknown pooling method')
class Power(Layer):
def __init__(self, name, **kwargs):
super(Power, self).__init__(self, name, kwargs)
class ReLU(Layer):
def __init__(self, name, **kwargs):
super(ReLU, self).__init__(self, name, kwargs)
class Softmax(Layer):
def __init__(self, name, **kwargs):
super(Softmax, self).__init__(self, name, kwargs)
class SoftmaxWithLoss(LossLayer):
def __init__(self, name, **kwargs):
super(SoftmaxWithLoss, self).__init__(self, name, kwargs)
class Accuracy(Layer):
def __init__(self, name, **kwargs):
super(Accuracy, self).__init__(self, name, kwargs)
class Transpose(Layer):
def __init__(self, name, **kwargs):
super(Transpose, self).__init__(self, name, kwargs)
class Unknown(Layer):
def __init__(self, p):
self.p = p
class Wordvec(Layer):
def __init__(self, name, dimension, vocab_size, weight_filler=None, **kwargs):
kwargs['dimension'] = dimension
kwargs['vocab_size'] = vocab_size
super(Wordvec, self).__init__(self, name, kwargs)
if weight_filler is None:
weight_filler = Filler('uniform', 0.1)
self.p.wordvec_param.weight_filler.CopyFrom(weight_filler.filler_param)
| bsd-2-clause | -2,210,375,708,227,604,200 | 39.853933 | 111 | 0.630226 | false |
mbiciunas/nix | test/config/test_config_tags.py | 1 | 3679 | # Nix
# Copyright (c) 2017 Mark Biciunas.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from config.config_tag import ConfigTag
from config.config_tags import ConfigTags
from utility.nix_error import NixError
class TestConfigTags:
_TAG_VALID_1 = "tag1"
_TAG_VALID_2 = "tag2"
_TAG_INVALID_1 = "bad_tag_1"
_TAG_INVALID_2 = "bad_tag_2"
_TAG_VALID_LIST = [_TAG_VALID_1, _TAG_VALID_2]
_TAG_INVALID_LIST = [_TAG_INVALID_1, _TAG_INVALID_2]
_TAG_MIX_LIST = [_TAG_INVALID_1, _TAG_VALID_1, _TAG_INVALID_2, _TAG_VALID_2]
def test_exist(self, config_valid):
_tags = config_valid.config().get_tags()
assert _tags.exist(self._TAG_VALID_1), "Tag not found: {}".format(self._TAG_VALID_1)
assert not _tags.exist(self._TAG_INVALID_1), "Non existing tag found: {}".format(self._TAG_INVALID_1)
def test_get_invalid_tags(self, config_valid):
_tags = config_valid.config().get_tags()
_result = _tags.get_invalid_tags(self._TAG_VALID_LIST)
assert len(_result) == 0, "Valid Tags found as invalid: {}".format(_result)
_result = _tags.get_invalid_tags(self._TAG_INVALID_LIST)
assert len(_result) == len(self._TAG_INVALID_LIST), "Invalid Tags found as valid: {}".format(_result)
_result = _tags.get_invalid_tags(self._TAG_MIX_LIST)
_correct = [x for x in self._TAG_MIX_LIST if x not in self._TAG_VALID_LIST]
assert len(_result) == len(_correct), "Mix of valid and invalid Tags wrong: {}".format(_result)
def test_insert(self, config_valid):
_tags = config_valid.config().get_tags()
_tag = _tags.insert()
assert type(_tag) is ConfigTag
assert _tag.get_name() is "", "Tag name should be none, contains: {}".format(_tag.get_name())
assert _tag.get_desc() is "", "Tag description should be none, contains: {}".format(_tag.get_desc())
def test_delete(self, config_valid):
_tags = config_valid.config().get_tags()
_tags.delete(self._TAG_VALID_1)
with pytest.raises(NixError):
_tags.delete(self._TAG_INVALID_1)
def test_list(self, config_valid):
_tags = config_valid.config().get_tags()
_tag_list = _tags.list()
assert len(_tag_list) == config_valid.get_count_tags()
def test_find(self, config_valid):
_tags = config_valid.config().get_tags()
_tag = _tags.find(self._TAG_VALID_1)
assert _tag.get_name() == self._TAG_VALID_1
with pytest.raises(NixError):
_tags.find(self._TAG_INVALID_1)
def test_export_data(self, config_valid):
_tags = config_valid.config().get_tags()
_export = _tags.export_data()
assert type(_export) == list
assert len(_export) == config_valid.get_count_tags()
def test_import_data(self, config_valid):
_tags = config_valid.config().get_tags()
_export = _tags.export_data()
_tags_new = ConfigTags()
_tags_new.import_data(_export)
assert len(_tags_new.list()) == config_valid.get_count_tags()
| gpl-3.0 | -418,231,377,767,631,170 | 34.375 | 109 | 0.638761 | false |
ethancaballero/neural-engineers-first-attempt | adaptive_attention.py | 1 | 9391 | import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell, seq2seq
from utils import get_seq_length, _add_gradient_noise, _position_encoding, _xavier_weight_init, _last_relevant, batch_norm
#from https://github.com/DeNeutoy/act-rte-inference/blob/master/AdaptiveIAAModel.py
class Adaptive_Episodes_Config(object):
init_scale = 0.05
learning_rate = 0.001
max_grad_norm = 5
num_layers = 2
num_steps = 20
encoder_size = 128
inference_size = 256
max_epoch = 4
max_max_epoch = 3
keep_prob = 0.8
lr_decay = 0.8
batch_size = 32
vocab_size = 10000
bidirectional = False
embedding_size = 300
embedding_reg = 0.0001
train_embeddings = True
use_embeddings = False
eps = 0.1
max_computation = 20
step_penalty = 0.00001
#class AdaptiveIAAModel(object):
class Adaptive_Episodes(object):
""" Implements Iterative Alternating Attention for Machine Reading
http://arxiv.org/pdf/1606.02245v3.pdf """
def __init__(self, config, pretrained_embeddings=None,
update_embeddings=True, is_training=False):
self.config = config
def gate_mechanism(self, gate_input, scope):
with tf.variable_scope(scope):
if self.bidirectional:
size = 3*2*self.config.encoder_size + self.hidden_size
out_size = 2*self.config.encoder_size
else:
size = 3*self.config.encoder_size + self.hidden_size
out_size = self.config.encoder_size
hidden1_w = tf.get_variable("hidden1_w", [size, size])
hidden1_b = tf.get_variable("hidden1_b", [size])
hidden2_w = tf.get_variable("hidden2_w", [size, size])
hidden2_b = tf.get_variable("hidden2_b", [size])
sigmoid_w = tf.get_variable("sigmoid_w", [size, out_size])
sigmoid_b = tf.get_variable("sigmoid_b", [out_size])
if self.config.keep_prob < 1.0 and self.is_training:
gate_input = tf.nn.dropout(gate_input, self.config.keep_prob)
hidden1 = tf.nn.relu(tf.matmul(gate_input, hidden1_w) + hidden1_b)
if self.config.keep_prob < 1.0 and self.is_training:
hidden1 = tf.nn.dropout(hidden1, self.config.keep_prob)
hidden2 = tf.nn.relu(tf.matmul(hidden1, hidden2_w) + hidden2_b)
gate_output = tf.nn.sigmoid(tf.matmul(hidden2, sigmoid_w) + sigmoid_b)
return gate_output
def get_attention(self, prev_memory, fact_vec):
"""Use question vector and previous memory to create scalar attention for current fact"""
with tf.variable_scope("attention", reuse=True, initializer=_xavier_weight_init()):
W_1 = tf.get_variable("W_1")
b_1 = tf.get_variable("bias_1")
W_2 = tf.get_variable("W_2")
b_2 = tf.get_variable("bias_2")
features = [fact_vec*prev_memory, tf.abs(fact_vec - prev_memory)]
feature_vec = tf.concat(1, features)
attention = tf.matmul(tf.tanh(tf.matmul(feature_vec, W_1) + b_1), W_2) + b_2
return attention
def _attention_GRU_step(self, rnn_input, h, g):
"""Implement attention GRU as described by https://arxiv.org/abs/1603.01417"""
with tf.variable_scope("attention_gru", reuse=True, initializer=_xavier_weight_init()):
Wr = tf.get_variable("Wr")
Ur = tf.get_variable("Ur")
br = tf.get_variable("bias_r")
W = tf.get_variable("W")
U = tf.get_variable("U")
bh = tf.get_variable("bias_h")
r = tf.sigmoid(tf.matmul(rnn_input, Wr) + tf.matmul(h, Ur) + br)
h_hat = tf.tanh(tf.matmul(rnn_input, W) + r*tf.matmul(h, U) + bh)
rnn_output = g*h_hat + (1-g)*h
return rnn_output
#analogous to inference_step
def generate_episode(self, batch_mask, prob_compare, prob, counter, episode, fact_vecs, acc_states, counter_int, weight_container, bias_container):
"""Generate episode by applying attention to current fact vectors through a modified GRU"""
fact_vecs_t = tf.unpack(tf.transpose(fact_vecs, perm=[1,0,2]))
'''TRY REPLACING acc_states WITH episode AND SEE WHICH WORKS BETTER'''
attentions = [tf.squeeze(self.get_attention(acc_states, fv), squeeze_dims=[1]) for fv in fact_vecs_t]
attentions = tf.transpose(tf.pack(attentions))
softs = tf.nn.softmax(attentions)
softs = tf.split(1, self.max_input_len, softs)
gru_outputs = []
# set initial state to zero
h = tf.zeros((self.batch_size, self.hidden_size))
# use attention gru
for i, fv in enumerate(fact_vecs_t):
h = self._attention_GRU_step(fv, h, softs[i])
gru_outputs.append(h)
# extract gru outputs at proper index according to input_lens
gru_outputs = tf.pack(gru_outputs)
gru_outputs = tf.transpose(gru_outputs, perm=[1,0,2])
#analogous to output, new_state = self.inference_cell(input,state)
episode = _last_relevant(gru_outputs, self.input_len_placeholder)
''' # TARGET_SIDE ATTENTION
episode = self.generate_episode(prev_memory, fact_vecs, concat_all)
'''
p = tf.squeeze(tf.sigmoid(self.shared_linear_layer(episode, 1, True)))
new_batch_mask = tf.logical_and(tf.less(prob + p,self.one_minus_eps),batch_mask)
new_float_mask = tf.cast(new_batch_mask, tf.float32)
prob += p * new_float_mask
prob_compare += p * tf.cast(batch_mask, tf.float32)
'''based on github.com/tensorflow/tensorflow/issues/5608#issuecomment-260549420'''
#untied
Wt = weight_container.read(counter_int)
bt = bias_container.read(counter_int)
#tied
#Wt = weight_container.read(0)
#bt = bias_container.read(0)
counter_int+=1
def use_remainder():
remainder = tf.constant(1.0, tf.float32,[self.batch_size]) - prob
remainder_expanded = tf.expand_dims(remainder,1)
tiled_remainder = tf.tile(remainder_expanded,[1,self.hidden_size])
acc_state = tf.nn.relu(tf.matmul(tf.concat(1, [acc_states, episode * tiled_remainder]), Wt) + bt)
return acc_state
def normal():
p_expanded = tf.expand_dims(p * new_float_mask,1)
tiled_p = tf.tile(p_expanded,[1,self.hidden_size])
acc_state = tf.nn.relu(tf.matmul(tf.concat(1, [acc_states, episode * tiled_p]), Wt) + bt)
return acc_state
counter += tf.constant(1.0,tf.float32,[self.batch_size]) * new_float_mask
counter_condition = tf.less(counter,self.N)
condition = tf.reduce_any(tf.logical_and(new_batch_mask,counter_condition))
acc_state = tf.cond(condition, normal, use_remainder)
'''ADD MECHANISM TO INCREASE HALT PROB IF MULTIPLE SIMILAR ATTENTION MASKS IN A ROW;
would be the difference between consecutive attention masks
based on this cooment: reddit.com/r/MachineLearning/comments/59sfz8/research_learning_to_reason_with_adaptive/d9bgqxw/'''
return (new_batch_mask, prob_compare, prob, counter, episode, fact_vecs, acc_state, counter_int, weight_container, bias_container)
#analogous to do_inference_steps
def do_generate_episodes(self, prev_memory, fact_vecs, batch_size, hidden_size, max_input_len, input_len_placeholder, max_num_hops, epsilon, weight_container, bias_container):
self.batch_size = batch_size
self.hidden_size = hidden_size
self.max_input_len = max_input_len
self.input_len_placeholder = input_len_placeholder
counter_int=tf.constant(0)
self.shared_linear_layer = tf.make_template('shared_linear_layer', tf.nn.rnn_cell._linear)
self.one_minus_eps = tf.constant(1.0 - epsilon, tf.float32,[self.batch_size])
self.N = tf.constant(max_num_hops, tf.float32,[self.batch_size])
prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
self.counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
acc_states = tf.zeros_like(prev_memory, tf.float32, name="state_accumulator")
batch_mask = tf.constant(True, tf.bool,[self.batch_size])
# While loop stops when this predicate is FALSE.
# Ie all (probability < 1-eps AND counter < N) are false.
pred = lambda batch_mask, prob_compare, prob,\
counter, prev_memory, fact_vecs, acc_state, counter_int, weight_container, bias_container:\
tf.reduce_any(
tf.logical_and(
tf.less(prob_compare,self.one_minus_eps),
tf.less(counter,self.N)))
# only stop if all of the batch have passed either threshold
# Do while loop iterations until predicate above is false.
_,_,remainders,iterations,_,_,state,_,_,_ = \
tf.while_loop(pred, self.generate_episode,
[batch_mask, prob_compare, prob,
counter, prev_memory, fact_vecs, acc_states, counter_int, weight_container, bias_container])
return state, remainders, iterations | mit | 8,711,310,250,242,839,000 | 39.658009 | 179 | 0.621127 | false |
TAlonglong/trollduction-test | trollduction/tests/test_xml_read.py | 1 | 4230 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Martin Raspaud
# Author(s):
# Martin Raspaud <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test xml_read.py
"""
import unittest
xmlstuff = """<?xml version="1.0" encoding='utf-8'?>
<?xml-stylesheet type="text/xsl" href="prodlist2.xsl"?>
<!-- This config is used by Trollduction.-->
<product_config>
<metadata>
<platform>noaa</platform>
<number>15</number>
</metadata>
<common>
<output_dir>/tmp</output_dir>
</common>
<variables>
<output_dir id="local_sir">/local_disk/data/sir</output_dir>
<output_dir id="sir">/local_disk/data/out/sir</output_dir>
<output_dir id="rgb">/local_disk/data/out/rgb</output_dir>
<output_dir id="tmp">/tmp</output_dir>
</variables>
<product_list>
<!-- dump to netcdf -->
<!-- calibrated, satellite projection -->
<dump>
<file output_dir="sir" format="netcdf4">{time:%Y%m%d_%H%M}_{platform}{satnumber}.nc</file>
</dump>
<area id="eurol" name="Europe_large">
<!-- Generate the product only if sun is above the horizon at the
defined longitude/latitude -->
<product id="overview" name="overview" sunzen_day_maximum="90" sunzen_lonlat="25, 60">
<file>{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="natural" name="dnc" sunzen_day_maximum="90" sunzen_lonlat="25, 60">
<file>{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="green_snow" name="green_snow" sunzen_day_maximum="90" sunzen_lonlat="25, 60">
<file>{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="red_snow" name="red_snow" sunzen_day_maximum="90" sunzen_lonlat="25, 60">
<file format="png">{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="cloudtop" name="cloudtop">
<file format="png">{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<!-- Generate only if the Sun is below the horizon -->
<product id="night_overview" name="night_overview" sunzen_night_minimum="90" sunzen_lonlat="25, 60">
<file format="png">{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
<product id="night_fog" name="night_fog" sunzen_night_minimum="90" sunzen_lonlat="25, 60">
<file>{time:%Y%m%d_%H%M}_{platform}{satnumber}_{areaname}_{composite}.png</file>
</product>
</area>
</product_list>
</product_config>
"""
from trollduction.xml_read import ProductList
from StringIO import StringIO
class TestProductList(unittest.TestCase):
# def test_vars(self):
# pconfig = ProductList(StringIO(xmlstuff))
# self.assertEquals(pconfig.vars,
# {'output_dir': {'local_sir': '/local_disk/data/sir',
# 'rgb': '/local_disk/data/out/rgb',
# 'sir': '/local_disk/data/out/sir',
# 'tmp': '/tmp'}})
# dump_item = pconfig.prodlist.findall('./dump/file')[0]
# self.assertEquals(dump_item.attrib["output_dir"],
# '/local_disk/data/out/sir')
pass
def suite():
"""The suite for test_xml_read
"""
loader = unittest.TestLoader()
mysuite = unittest.TestSuite()
mysuite.addTest(loader.loadTestsFromTestCase(TestProductList))
return mysuite
| gpl-3.0 | 3,548,981,223,696,637,000 | 34.546218 | 106 | 0.623404 | false |
operator/sqlalchemy_bulk_lazy_loader | test/conftest.py | 1 | 1226 | #!/usr/bin/env python
"""
pytest plugin script.
This script is an extension to py.test which
installs SQLAlchemy's testing plugin into the local environment.
"""
import sys
import os
from lib.sqlalchemy_bulk_lazy_loader import BulkLazyLoader
from sqlalchemy.testing import plugin
BulkLazyLoader.register_loader()
# if not sys.flags.no_user_site:
# # this is needed so that test scenarios like "python setup.py test"
# # work correctly, as well as plain "py.test". These commands assume
# # that the package in question is locally present, but since we have
# # ./lib/, we need to punch that in.
# # We check no_user_site to honor the use of this flag.
# sys.path.insert(
# 0,
# os.path.join(
# os.path.dirname(os.path.abspath(__file__)), '..', 'lib')
# )
# use bootstrapping so that test plugins are loaded
# without touching the main library before coverage starts
bootstrap_file = os.path.join(
os.path.dirname(os.path.abspath(plugin.__file__)),
'bootstrap.py'
)
with open(bootstrap_file) as f:
code = compile(f.read(), "bootstrap.py", 'exec')
to_bootstrap = "pytest"
exec(code, globals(), locals())
from pytestplugin import * # noqa
| mit | 7,862,574,074,721,302,000 | 28.902439 | 74 | 0.67863 | false |
mbusb/multibootusb | scripts/config.py | 1 | 2054 | #!/usr/bin/env python3
# Name: config.py
# Purpose: Module to share important variables between various modules. Mainly included so as not to call many
# functions again and again
# Authors: Sundar
# Licence: This file is a part of multibootusb package. You can redistribute it or modify
# under the terms of GNU General Public License, v.2 or above
iso_link = ""
usb_disk = None
usb_mount = ""
usb_uuid = ""
usb_label = ""
usb_details = ''
image_path = None
persistence = 0
persistence_available = False
persistence_max_size = 0
distro = ""
status_text = ""
percentage = 0
syslinux_version = ''
uninstall_distro_dir_name = ""
uninstall_distro_dir_path = ""
iso_file_list = ''
iso_bin_dir = ''
process_exist = None
yes = False
cli_dd = False
cli_syslinux = False
usb_gpt = ''
imager_iso_link = ""
imager_usb_disk_selected = ""
imager_lock = ""
imager_percentage = ""
imager_status_text = ""
imager_return = ""
install_size = ""
editors_linux = ["xdg-open", "gedit", "kate", "kwrite"]
editors_win = ["notepad++.exe", "notepad.exe"]
imager_usb_disk = []
remounted_partitions = []
debug = False
# protected_drives = ['C:','D:','E:', '/dev/sda', '/dev/sdb', '/dev/sdc']
# If turned off, qemu will be sought at a few preset locations
# first before deciding to use the bundled exe.
# Set 'qemu_exe_path' to explicitly specify.
qemu_use_builtin = True # Relevant on Windows only
# qemu_exe_path = r"C:\pkgs\qemu\qemu-system-x86_64.exe"
# Relevant on Windows only
# Enable QEMU accelaration by Intel HAXM hypervisor.
# Bundled QEMU does not support this.
# See https://www.qemu.org/2017/11/22/haxm-usage-windows/ for setup.
qemu_use_haxm = not qemu_use_builtin # Relevant on Windows only
# qemu_use_kvm = False
# qemu_bios = 'OVMF.fd'
def update_usb_mount(new_usb_details):
global usb_mount, usb_details
usb_mount = new_usb_details['mount_point'].replace('\\x20', ' ')
usb_details = new_usb_details
def add_remounted(usb_disk):
if usb_disk not in remounted_partitions:
remounted_partitions.append(usb_disk)
| gpl-2.0 | -479,208,830,022,669,200 | 26.756757 | 111 | 0.689387 | false |
glogiotatidis/bedrock | bedrock/releasenotes/tests/test_base.py | 1 | 17464 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.core.cache import caches
from django.http import Http404
from django.test.client import RequestFactory
from django.test.utils import override_settings
from bedrock.base.urlresolvers import reverse
from mock import patch, Mock
from nose.tools import eq_, ok_
from pathlib2 import Path
from pyquery import PyQuery as pq
from rna.models import Release
from bedrock.firefox.firefox_details import FirefoxDesktop
from bedrock.mozorg.tests import TestCase
from bedrock.releasenotes import views
from bedrock.thunderbird.details import ThunderbirdDesktop
DATA_PATH = str(Path(__file__).parent / 'data')
firefox_desktop = FirefoxDesktop(json_dir=DATA_PATH)
thunderbird_desktop = ThunderbirdDesktop(json_dir=DATA_PATH)
class TestRNAViews(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.request = self.factory.get('/')
self.render_patch = patch('bedrock.releasenotes.views.l10n_utils.render')
self.mock_render = self.render_patch.start()
self.mock_render.return_value.has_header.return_value = False
def tearDown(self):
self.render_patch.stop()
@property
def last_ctx(self):
"""
Convenient way to access the context of the last rendered
response.
"""
return self.mock_render.call_args[0][2]
@patch('bedrock.releasenotes.views.get_object_or_404')
@patch('bedrock.releasenotes.views.Q')
def test_get_release_or_404(self, Q, get_object_or_404):
eq_(views.get_release_or_404('version', 'product'),
get_object_or_404.return_value)
get_object_or_404.assert_called_with(
Release, Q.return_value, version='version')
Q.assert_called_once_with(product='product')
@patch('bedrock.releasenotes.views.get_object_or_404')
@patch('bedrock.releasenotes.views.Q')
def test_get_release_or_404_esr(self, Q, get_object_or_404):
eq_(views.get_release_or_404('24.5.0', 'Firefox'),
get_object_or_404.return_value)
Q.assert_any_call(product='Firefox')
Q.assert_any_call(product='Firefox Extended Support Release')
@patch('bedrock.releasenotes.views.get_object_or_404')
@patch('bedrock.releasenotes.views.Q')
def test_get_release_or_404_endswith_esr(self, Q, get_object_or_404):
eq_(views.get_release_or_404('45.0esr', 'Firefox'),
get_object_or_404.return_value)
Q.assert_any_call(product='Firefox')
Q.assert_any_call(product='Firefox Extended Support Release')
@override_settings(DEV=False)
@patch('bedrock.releasenotes.views.release_notes_template')
@patch('bedrock.releasenotes.views.get_release_or_404')
@patch('bedrock.releasenotes.views.equivalent_release_url')
def test_release_notes(self, mock_equiv_rel_url, get_release_or_404,
mock_release_notes_template):
"""
Should use release returned from get_release_or_404 with the
correct params and pass the correct context variables and
template to l10n_utils.render.
"""
mock_release = get_release_or_404.return_value
mock_release.major_version.return_value = '34'
mock_release.notes.return_value = ([Release(id=1), Release(id=2)],
[Release(id=3), Release(id=4)])
views.release_notes(self.request, '27.0')
get_release_or_404.assert_called_with('27.0', 'Firefox')
mock_release.notes.assert_called_with(public_only=True)
eq_(self.last_ctx['version'], '27.0')
eq_(self.last_ctx['release'], mock_release)
eq_(self.last_ctx['new_features'], [Release(id=1), Release(id=2)])
eq_(self.last_ctx['known_issues'], [Release(id=3), Release(id=4)])
eq_(self.mock_render.call_args[0][1],
mock_release_notes_template.return_value)
mock_equiv_rel_url.assert_called_with(mock_release)
mock_release_notes_template.assert_called_with(
mock_release.channel, 'Firefox', 34)
@patch('bedrock.releasenotes.views.get_release_or_404')
@patch('bedrock.releasenotes.views.releasenotes_url')
def test_release_notes_beta_redirect(self, releasenotes_url,
get_release_or_404):
"""
Should redirect to url for beta release
"""
get_release_or_404.side_effect = [Http404, 'mock release']
releasenotes_url.return_value = '/firefox/27.0beta/releasenotes/'
response = views.release_notes(self.request, '27.0')
eq_(response.status_code, 302)
eq_(response['location'], '/firefox/27.0beta/releasenotes/')
get_release_or_404.assert_called_with('27.0beta', 'Firefox')
releasenotes_url.assert_called_with('mock release')
@patch('bedrock.releasenotes.views.get_release_or_404')
def test_system_requirements(self, get_release_or_404):
"""
Should use release returned from get_release_or_404, with a
default channel of Release and default product of Firefox,
and pass the version to l10n_utils.render
"""
views.system_requirements(self.request, '27.0.1')
get_release_or_404.assert_called_with('27.0.1', 'Firefox')
eq_(self.last_ctx['release'], get_release_or_404.return_value)
eq_(self.last_ctx['version'], '27.0.1')
eq_(self.mock_render.call_args[0][1],
'firefox/releases/system_requirements.html')
def test_release_notes_template(self):
"""
Should return correct template name based on channel
and product
"""
eq_(views.release_notes_template('Nightly', 'Firefox'),
'firefox/releases/nightly-notes.html')
eq_(views.release_notes_template('Aurora', 'Firefox'),
'firefox/releases/aurora-notes.html')
eq_(views.release_notes_template('Aurora', 'Firefox', 35),
'firefox/releases/dev-browser-notes.html')
eq_(views.release_notes_template('Aurora', 'Firefox', 34),
'firefox/releases/aurora-notes.html')
eq_(views.release_notes_template('Beta', 'Firefox'),
'firefox/releases/beta-notes.html')
eq_(views.release_notes_template('Release', 'Firefox'),
'firefox/releases/release-notes.html')
eq_(views.release_notes_template('ESR', 'Firefox'),
'firefox/releases/esr-notes.html')
eq_(views.release_notes_template('Release', 'Thunderbird'),
'thunderbird/releases/release-notes.html')
eq_(views.release_notes_template('Beta', 'Thunderbird'),
'thunderbird/releases/beta-notes.html')
eq_(views.release_notes_template('', ''),
'firefox/releases/release-notes.html')
@override_settings(DEV=False)
@patch('bedrock.releasenotes.views.get_object_or_404')
def test_non_public_release(self, get_object_or_404):
"""
Should raise 404 if not release.is_public and not settings.DEV
"""
get_object_or_404.return_value = Release(is_public=False)
with self.assertRaises(Http404):
views.get_release_or_404('42', 'Firefox')
@patch('bedrock.releasenotes.views.releasenotes_url')
def test_no_equivalent_release_url(self, mock_releasenotes_url):
"""
Should return None without calling releasenotes_url
"""
release = Mock()
release.equivalent_android_release.return_value = None
release.equivalent_desktop_release.return_value = None
eq_(views.equivalent_release_url(release), None)
eq_(mock_releasenotes_url.called, 0)
@patch('bedrock.releasenotes.views.releasenotes_url')
def test_android_equivalent_release_url(self, mock_releasenotes_url):
"""
Should return the url for the equivalent android release
"""
release = Mock()
eq_(views.equivalent_release_url(release),
mock_releasenotes_url.return_value)
mock_releasenotes_url.assert_called_with(
release.equivalent_android_release.return_value)
@patch('bedrock.releasenotes.views.releasenotes_url')
def test_desktop_equivalent_release_url(self, mock_releasenotes_url):
"""
Should return the url for the equivalent desktop release
"""
release = Mock()
release.equivalent_android_release.return_value = None
eq_(views.equivalent_release_url(release),
mock_releasenotes_url.return_value)
mock_releasenotes_url.assert_called_with(
release.equivalent_desktop_release.return_value)
def test_get_download_url_android(self):
"""
Shoud return the download link for the release.channel from
android_builds. Note that the channel names are from ship-it, so those
are different from the internal names like release, beta or alpha.
"""
store_url = 'https://play.google.com/store/apps/details?id=%s'
release = Mock(product='Firefox for Android', channel='Release')
link = views.get_download_url(release)
ok_(link.startswith(store_url % 'org.mozilla.firefox'))
release = Mock(product='Firefox for Android', channel='Beta')
link = views.get_download_url(release)
ok_(link.startswith(store_url % 'org.mozilla.firefox_beta'))
release = Mock(product='Firefox for Android', channel='Aurora')
link = views.get_download_url(release)
ok_(link.startswith(store_url % 'org.mozilla.fennec_aurora'))
def test_get_download_url_thunderbird(self):
release = Mock(product='Thunderbird')
link = views.get_download_url(release)
eq_(link, '/en-US/thunderbird/')
def test_get_download_url_thunderbird_beta(self):
release = Mock(product='Thunderbird', channel='Beta')
link = views.get_download_url(release)
eq_(link, '/en-US/thunderbird/channel/')
def test_check_url(self):
eq_(views.check_url('Firefox for Android', '45.0'),
'https://support.mozilla.org/kb/will-firefox-work-my-mobile-device')
eq_(views.check_url('Firefox for Android', '46.0'),
'/en-US/firefox/android/46.0/system-requirements/')
eq_(views.check_url('Firefox for iOS', '1.4'),
'/en-US/firefox/ios/1.4/system-requirements/')
eq_(views.check_url('Firefox', '42.0'),
'/en-US/firefox/42.0/system-requirements/')
class TestReleaseNotesIndex(TestCase):
pd_cache = caches['product-details']
def setUp(self):
self.pd_cache.clear()
@patch('bedrock.releasenotes.views.l10n_utils.render')
@patch('bedrock.releasenotes.views.firefox_desktop', firefox_desktop)
def test_relnotes_index_firefox(self, render_mock):
with self.activate('en-US'):
self.client.get(reverse('firefox.releases.index'))
releases = render_mock.call_args[0][2]['releases']
eq_(len(releases), len(firefox_desktop.firefox_history_major_releases))
eq_(releases[0][0], 36.0)
eq_(releases[0][1]['major'], '36.0')
eq_(releases[0][1]['minor'], [])
eq_(releases[3][0], 33.1)
eq_(releases[3][1]['major'], '33.1')
eq_(releases[3][1]['minor'], ['33.1.1'])
eq_(releases[4][0], 33.0)
eq_(releases[4][1]['major'], '33.0')
eq_(releases[4][1]['minor'], ['33.0.1', '33.0.2', '33.0.3'])
eq_(releases[6][0], 31.0)
eq_(releases[6][1]['major'], '31.0')
eq_(releases[6][1]['minor'],
['31.1.0', '31.1.1', '31.2.0', '31.3.0', '31.4.0', '31.5.0'])
@patch('bedrock.releasenotes.views.thunderbird_desktop', thunderbird_desktop)
def test_relnotes_index_thunderbird(self):
with self.activate('en-US'):
response = self.client.get(reverse('thunderbird.releases.index'))
doc = pq(response.content)
eq_(len(doc('a[href="0.1.html"]')), 1)
eq_(len(doc('a[href="1.5.0.2.html"]')), 1)
eq_(len(doc('a[href="../2.0.0.0/releasenotes/"]')), 1)
eq_(len(doc('a[href="../3.0.1/releasenotes/"]')), 1)
class TestNotesRedirects(TestCase):
def _test(self, url_from, url_to):
with self.activate('en-US'):
url = '/en-US' + url_from
response = self.client.get(url)
eq_(response.status_code, 302)
eq_(response['Location'], 'http://testserver/en-US' + url_to)
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='22.0'))
def test_desktop_release_version(self):
self._test('/firefox/notes/',
'/firefox/22.0/releasenotes/')
self._test('/firefox/latest/releasenotes/',
'/firefox/22.0/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='23.0b1'))
def test_desktop_beta_version(self):
self._test('/firefox/beta/notes/',
'/firefox/23.0beta/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='24.0a2'))
def test_desktop_developer_version(self):
self._test('/firefox/developer/notes/',
'/firefox/24.0a2/auroranotes/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='24.2.0esr'))
def test_desktop_esr_version(self):
self._test('/firefox/organizations/notes/',
'/firefox/24.2.0/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_android.latest_version',
Mock(return_value='22.0'))
def test_android_release_version(self):
self._test('/firefox/android/notes/',
'/firefox/android/22.0/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_android.latest_version',
Mock(return_value='23.0b1'))
def test_android_beta_version(self):
self._test('/firefox/android/beta/notes/',
'/firefox/android/23.0beta/releasenotes/')
@patch('bedrock.releasenotes.views.firefox_android.latest_version',
Mock(return_value='24.0a2'))
def test_android_aurora_version(self):
self._test('/firefox/android/aurora/notes/',
'/firefox/android/24.0a2/auroranotes/')
@patch('bedrock.releasenotes.views.firefox_ios.latest_version',
Mock(return_value='1.4'))
def test_ios_release_version(self):
self._test('/firefox/ios/notes/',
'/firefox/ios/1.4/releasenotes/')
@patch('bedrock.releasenotes.views.thunderbird_desktop.latest_version',
Mock(return_value='22.0'))
def test_thunderbird_release_version(self):
self._test('/thunderbird/notes/',
'/thunderbird/22.0/releasenotes/')
self._test('/thunderbird/latest/releasenotes/',
'/thunderbird/22.0/releasenotes/')
@patch('bedrock.releasenotes.views.thunderbird_desktop.latest_version',
Mock(return_value='41.0b1'))
def test_thunderbird_beta_version(self):
self._test('/thunderbird/beta/notes/',
'/thunderbird/41.0beta/releasenotes/')
class TestSysreqRedirect(TestCase):
def _test(self, url_from, url_to):
with self.activate('en-US'):
url = '/en-US' + url_from
response = self.client.get(url)
eq_(response.status_code, 302)
eq_(response['Location'], 'http://testserver/en-US' + url_to)
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='22.0'))
def test_desktop_release_version(self):
self._test('/firefox/system-requirements/',
'/firefox/22.0/system-requirements/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='23.0b1'))
def test_desktop_beta_version(self):
self._test('/firefox/beta/system-requirements/',
'/firefox/23.0beta/system-requirements/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='24.0a2'))
def test_desktop_developer_version(self):
self._test('/firefox/developer/system-requirements/',
'/firefox/24.0a2/system-requirements/')
@patch('bedrock.releasenotes.views.firefox_desktop.latest_version',
Mock(return_value='24.2.0esr'))
def test_desktop_esr_version(self):
self._test('/firefox/organizations/system-requirements/',
'/firefox/24.0/system-requirements/')
@patch('bedrock.releasenotes.views.thunderbird_desktop.latest_version',
Mock(return_value='22.0'))
def test_thunderbird_release_version(self):
self._test('/thunderbird/system-requirements/',
'/thunderbird/22.0/system-requirements/')
self._test('/thunderbird/latest/system-requirements/',
'/thunderbird/22.0/system-requirements/')
@patch('bedrock.releasenotes.views.thunderbird_desktop.latest_version',
Mock(return_value='41.0b1'))
def test_thunderbird_beta_version(self):
self._test('/thunderbird/beta/system-requirements/',
'/thunderbird/41.0beta/system-requirements/')
| mpl-2.0 | -2,249,092,954,485,534,700 | 43.10101 | 81 | 0.633704 | false |
kirmani/lockman | MC/pio.py | 1 | 3202 | import RPi.GPIO as GPIO
import requests
import time
import threading
import os
import base64
import string
import random
import datetime
"GPIO.setmode(BOARD)"
def closeLock():
p = GPIO.PWM(12,50) #sets pin 12 to PWM and sends 50 signals per second
p.start(7.5) #starts by sending a pulse at 7.5% to center the servo
p.ChangeDutyCycle(4.5) #sends a 4.5% pulse to turn the servo CCW
time.sleep(2)
p.stop()
def openLock():
p = GPIO.PWM(12,50) #sets pin 12 to PWM and sends 50 signals per second
p.start(7.5) #starts by sending a pulse at 7.5% to center the servo
p.ChangeDutyCycle(10.5) #sends a 4.5% pulse to turn the servo CCW
time.sleep(2)
p.stop()
def id_generator(size=25, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def checkStatus():
open = False
r = requests.put("http://api.codered.kirmani.io/lock/state", data = {"open": open})
GPIO.setmode(GPIO.BOARD)
GPIO.setup(12,GPIO.OUT)
r = requests.get('http://api.codered.kirmani.io/lock/list')
while True:
if exit.is_set():
thread.exit()
list = r.json()["result"]
print list
for id in list:
url = "http://api.codered.kirmani.io/lock/id/"+id
r = requests.get(url)
if id == "OVERRIDE":
action = r.json()["result"]["action"]
if action == "open":
print "WOO"
r = requests.delete(url)
if not open:
openLock()
open = True
r = requests.put("http://api.codered.kirmani.io/lock/state", data = {"open": open})
if action == "close":
print "CLOSING"
r = requests.delete(url)
if open:
closeLock()
open = False
r = requests.put("http://api.codered.kirmani.io/lock/state", data = {"open": open})
else:
status = r.json()["result"]["approved"]
waiting = r.json()["result"]["waiting"]
if waiting == False:
if status == True:
print "WOO"
r = requests.delete(url)
if not open:
openLock()
open = True
r = requests.put("http://api.codered.kirmani.io/lock/state", data = {"open": open})
if status == False:
print "BOO"
r = requests.delete(url)
r = requests.get('http://api.codered.kirmani.io/lock/list')
def checkInput():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.IN)
input = GPIO.input(7);
while True:
if exit.is_set():
thread.exit()
input = GPIO.input(7);
while input == True:
input = GPIO.input(7);
#code to activate camera
timestamp = time.strftime("%d-%m-%Y_%H:%M:%S")
filename = "/home/pi/timerecord/" + timestamp + ".png"
os.system("fswebcam -d /dev/video0 -r 680x480 --no-banner " + filename)
encoded = base64.b64encode(open(filename, "rb").read())
random = id_generator()
r = requests.post("http://api.codered.kirmani.io/lock/id/" + random, data = {"image":encoded, "time": timestamp})
exit = threading.Event()
exit.clear()
status = threading.Thread(target=checkStatus)
input = threading.Thread(target=checkInput)
status.start()
input.start()
try:
while True:
x=1
except KeyboardInterrupt:
exit.set()
GPIO.cleanup()
| mit | 1,823,985,232,699,865,300 | 30.087379 | 115 | 0.625859 | false |
beeftornado/sentry | src/sentry/shared_integrations/client.py | 1 | 10459 | from __future__ import absolute_import
import logging
import requests
import sentry_sdk
import six
from collections import OrderedDict
from django.core.cache import cache
from bs4 import BeautifulSoup
from django.utils.functional import cached_property
from requests.exceptions import ConnectionError, Timeout, HTTPError
from sentry.http import build_session
from sentry.utils import metrics, json
from sentry.utils.hashlib import md5_text
from sentry.utils.decorators import classproperty
from .exceptions import ApiHostError, ApiTimeoutError, ApiError, UnsupportedResponseType
class BaseApiResponse(object):
text = ""
def __init__(self, headers=None, status_code=None):
self.headers = headers
self.status_code = status_code
def __repr__(self):
return u"<%s: code=%s, content_type=%s>" % (
type(self).__name__,
self.status_code,
self.headers.get("Content-Type", "") if self.headers else "",
)
@cached_property
def rel(self):
if not self.headers:
return {}
link_header = self.headers.get("Link")
if not link_header:
return {}
return {item["rel"]: item["url"] for item in requests.utils.parse_header_links(link_header)}
@classmethod
def from_response(self, response, allow_text=False):
if response.request.method == "HEAD":
return BaseApiResponse(response.headers, response.status_code)
# XXX(dcramer): this doesnt handle leading spaces, but they're not common
# paths so its ok
if response.text.startswith(u"<?xml"):
return XmlApiResponse(response.text, response.headers, response.status_code)
elif response.text.startswith("<"):
if not allow_text:
raise ValueError(u"Not a valid response type: {}".format(response.text[:128]))
elif response.status_code < 200 or response.status_code >= 300:
raise ValueError(
u"Received unexpected plaintext response for code {}".format(
response.status_code
)
)
return TextApiResponse(response.text, response.headers, response.status_code)
# Some APIs will return JSON with an invalid content-type, so we try
# to decode it anyways
if "application/json" not in response.headers.get("Content-Type", ""):
try:
data = json.loads(response.text, object_pairs_hook=OrderedDict)
except (TypeError, ValueError):
if allow_text:
return TextApiResponse(response.text, response.headers, response.status_code)
raise UnsupportedResponseType(
response.headers.get("Content-Type", ""), response.status_code
)
else:
data = json.loads(response.text, object_pairs_hook=OrderedDict)
if isinstance(data, dict):
return MappingApiResponse(data, response.headers, response.status_code)
elif isinstance(data, (list, tuple)):
return SequenceApiResponse(data, response.headers, response.status_code)
else:
raise NotImplementedError
class TextApiResponse(BaseApiResponse):
def __init__(self, text, *args, **kwargs):
self.text = text
super(TextApiResponse, self).__init__(*args, **kwargs)
class XmlApiResponse(BaseApiResponse):
def __init__(self, text, *args, **kwargs):
self.xml = BeautifulSoup(text, "xml")
super(XmlApiResponse, self).__init__(*args, **kwargs)
class MappingApiResponse(dict, BaseApiResponse):
def __init__(self, data, *args, **kwargs):
dict.__init__(self, data)
BaseApiResponse.__init__(self, *args, **kwargs)
@property
def json(self):
return self
class SequenceApiResponse(list, BaseApiResponse):
def __init__(self, data, *args, **kwargs):
list.__init__(self, data)
BaseApiResponse.__init__(self, *args, **kwargs)
@property
def json(self):
return self
class BaseApiClient(object):
base_url = None
allow_text = False
allow_redirects = None
integration_type = None
log_path = None
datadog_prefix = None
cache_time = 900
def __init__(self, verify_ssl=True, logging_context=None):
self.verify_ssl = verify_ssl
self.logging_context = logging_context
@cached_property
def logger(self):
return logging.getLogger(self.log_path)
@classproperty
def name_field(cls):
return u"%s_name" % cls.integration_type
@classproperty
def name(cls):
return getattr(cls, cls.name_field)
def get_cache_prefix(self):
return u"%s.%s.client:" % (self.integration_type, self.name)
def track_response_data(self, code, span, error=None, resp=None):
metrics.incr(
u"%s.http_response" % (self.datadog_prefix),
sample_rate=1.0,
tags={self.integration_type: self.name, "status": code},
)
try:
span.set_http_status(int(code))
except ValueError:
span.set_status(code)
span.set_tag(self.integration_type, self.name)
extra = {
self.integration_type: self.name,
"status_string": six.text_type(code),
"error": six.text_type(error)[:256] if error else None,
}
extra.update(getattr(self, "logging_context", None) or {})
self.logger.info(u"%s.http_response" % (self.integration_type), extra=extra)
def build_url(self, path):
if path.startswith("/"):
if not self.base_url:
raise ValueError(u"Invalid URL: {}".format(path))
return u"{}{}".format(self.base_url, path)
return path
def _request(
self,
method,
path,
headers=None,
data=None,
params=None,
auth=None,
json=True,
allow_text=None,
allow_redirects=None,
timeout=None,
):
if allow_text is None:
allow_text = self.allow_text
if allow_redirects is None:
allow_redirects = self.allow_redirects
if allow_redirects is None: # is still None
allow_redirects = method.upper() == "GET"
if timeout is None:
timeout = 30
full_url = self.build_url(path)
metrics.incr(
u"%s.http_request" % self.datadog_prefix,
sample_rate=1.0,
tags={self.integration_type: self.name},
)
try:
with sentry_sdk.configure_scope() as scope:
parent_span_id = scope.span.span_id
trace_id = scope.span.trace_id
except AttributeError:
parent_span_id = None
trace_id = None
with sentry_sdk.start_transaction(
op=u"{}.http".format(self.integration_type),
name=u"{}.http_response.{}".format(self.integration_type, self.name),
parent_span_id=parent_span_id,
trace_id=trace_id,
sampled=True,
) as span:
try:
with build_session() as session:
resp = getattr(session, method.lower())(
url=full_url,
headers=headers,
json=data if json else None,
data=data if not json else None,
params=params,
auth=auth,
verify=self.verify_ssl,
allow_redirects=allow_redirects,
timeout=timeout,
)
resp.raise_for_status()
except ConnectionError as e:
self.track_response_data("connection_error", span, e)
raise ApiHostError.from_exception(e)
except Timeout as e:
self.track_response_data("timeout", span, e)
raise ApiTimeoutError.from_exception(e)
except HTTPError as e:
resp = e.response
if resp is None:
self.track_response_data("unknown", span, e)
self.logger.exception(
"request.error", extra={self.integration_type: self.name, "url": full_url}
)
raise ApiError("Internal Error", url=full_url)
self.track_response_data(resp.status_code, span, e)
raise ApiError.from_response(resp, url=full_url)
self.track_response_data(resp.status_code, span, None, resp)
if resp.status_code == 204:
return {}
return BaseApiResponse.from_response(resp, allow_text=allow_text)
# subclasses should override ``request``
def request(self, *args, **kwargs):
return self._request(*args, **kwargs)
def delete(self, *args, **kwargs):
return self.request("DELETE", *args, **kwargs)
def get_cached(self, path, *args, **kwargs):
query = ""
if kwargs.get("params", None):
query = json.dumps(kwargs.get("params"), sort_keys=True)
key = self.get_cache_prefix() + md5_text(self.build_url(path), query).hexdigest()
result = cache.get(key)
if result is None:
result = self.request("GET", path, *args, **kwargs)
cache.set(key, result, self.cache_time)
return result
def get(self, *args, **kwargs):
return self.request("GET", *args, **kwargs)
def patch(self, *args, **kwargs):
return self.request("PATCH", *args, **kwargs)
def post(self, *args, **kwargs):
return self.request("POST", *args, **kwargs)
def put(self, *args, **kwargs):
return self.request("PUT", *args, **kwargs)
def head(self, *args, **kwargs):
return self.request("HEAD", *args, **kwargs)
def head_cached(self, path, *args, **kwargs):
query = ""
if kwargs.get("params", None):
query = json.dumps(kwargs.get("params"), sort_keys=True)
key = self.get_cache_prefix() + md5_text(self.build_url(path), query).hexdigest()
result = cache.get(key)
if result is None:
result = self.head(path, *args, **kwargs)
cache.set(key, result, self.cache_time)
return result
| bsd-3-clause | 5,743,440,093,390,676,000 | 32.522436 | 100 | 0.574816 | false |
ChromiumWebApps/chromium | tools/telemetry/telemetry/core/backends/chrome/extension_dict_backend.py | 1 | 2602 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import re
from telemetry.core import extension_page
from telemetry.core.backends.chrome import inspector_backend
class ExtensionNotFoundException(Exception):
pass
class ExtensionDictBackend(object):
def __init__(self, browser_backend):
self._browser_backend = browser_backend
# Maps extension ids to ExtensionPage objects.
self._extension_dict = {}
def __getitem__(self, extension_id):
extension_object = self._extension_dict.get(extension_id)
if not extension_object:
extension_object = self._CreateExtensionObject(extension_id)
assert extension_object
self._extension_dict[extension_id] = extension_object
return extension_object
def __contains__(self, extension_id):
return extension_id in self.GetExtensionIds()
@staticmethod
def _ExtractExtensionId(url):
m = re.match(r"(chrome-extension://)([^/]+)", url)
assert m
return m.group(2)
@staticmethod
def _GetExtensionId(extension_info):
if 'url' not in extension_info:
return None
return ExtensionDictBackend._ExtractExtensionId(extension_info['url'])
def _CreateExtensionObject(self, extension_id):
extension_info = self._FindExtensionInfo(extension_id)
if not extension_info or not 'webSocketDebuggerUrl' in extension_info:
raise ExtensionNotFoundException()
return extension_page.ExtensionPage(
extension_id,
extension_info['url'],
self._CreateInspectorBackendForDebuggerUrl(
extension_info['webSocketDebuggerUrl']))
def _CreateInspectorBackendForDebuggerUrl(self, debugger_url):
return inspector_backend.InspectorBackend(self._browser_backend.browser,
self._browser_backend,
debugger_url)
def _FindExtensionInfo(self, extension_id):
for extension_info in self.GetExtensionInfoList():
if self._GetExtensionId(extension_info) == extension_id:
return extension_info
return None
def GetExtensionInfoList(self, timeout=None):
data = self._browser_backend.Request('', timeout=timeout)
return self._FilterExtensions(json.loads(data))
def _FilterExtensions(self, all_pages):
return [page_info for page_info in all_pages
if page_info['url'].startswith('chrome-extension://')]
def GetExtensionIds(self):
return map(self._GetExtensionId, self.GetExtensionInfoList())
| bsd-3-clause | -441,002,597,549,063,600 | 33.693333 | 76 | 0.701768 | false |
mishbahr/django-users2 | users/views.py | 1 | 6157 | from django.contrib import messages
from django.contrib.auth import get_user_model, login
from django.urls import reverse
from django.shortcuts import redirect, resolve_url
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from .compat import urlsafe_base64_decode
from .conf import settings
from .signals import user_activated, user_registered
from .utils import EmailActivationTokenGenerator, send_activation_email
try:
from django.contrib.sites.shortcuts import get_current_site
except ImportError: # pragma: no cover
from django.contrib.sites.models import get_current_site
if settings.USERS_SPAM_PROTECTION: # pragma: no cover
from .forms import RegistrationFormHoneypot as RegistrationForm
else:
from .forms import RegistrationForm
@csrf_protect
@never_cache
def register(request,
template_name='users/registration_form.html',
activation_email_template_name='users/activation_email.html',
activation_email_subject_template_name='users/activation_email_subject.html',
activation_email_html_template_name=None,
registration_form=RegistrationForm,
registered_user_redirect_to=None,
post_registration_redirect=None,
activation_from_email=None,
current_app=None,
extra_context=None):
if registered_user_redirect_to is None:
registered_user_redirect_to = getattr(settings, 'LOGIN_REDIRECT_URL')
if request.user.is_authenticated:
return redirect(registered_user_redirect_to)
if not settings.USERS_REGISTRATION_OPEN:
return redirect(reverse('users_registration_closed'))
if post_registration_redirect is None:
post_registration_redirect = reverse('users_registration_complete')
if request.method == 'POST':
form = registration_form(request.POST)
if form.is_valid():
user = form.save()
if settings.USERS_AUTO_LOGIN_AFTER_REGISTRATION:
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
elif not user.is_active and settings.USERS_VERIFY_EMAIL:
opts = {
'user': user,
'request': request,
'from_email': activation_from_email,
'email_template': activation_email_template_name,
'subject_template': activation_email_subject_template_name,
'html_email_template': activation_email_html_template_name,
}
send_activation_email(**opts)
user_registered.send(sender=user.__class__, request=request, user=user)
return redirect(post_registration_redirect)
else:
form = registration_form()
current_site = get_current_site(request)
context = {
'form': form,
'site': current_site,
'site_name': current_site.name,
'title': _('Register'),
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def registration_closed(request,
template_name='users/registration_closed.html',
current_app=None,
extra_context=None):
context = {
'title': _('Registration closed'),
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def registration_complete(request,
template_name='users/registration_complete.html',
current_app=None,
extra_context=None):
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': _('Registration complete'),
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@never_cache
def activate(request,
uidb64=None,
token=None,
template_name='users/activate.html',
post_activation_redirect=None,
current_app=None,
extra_context=None):
context = {
'title': _('Account activation '),
}
if post_activation_redirect is None:
post_activation_redirect = reverse('users_activation_complete')
UserModel = get_user_model()
assert uidb64 is not None and token is not None
token_generator = EmailActivationTokenGenerator()
try:
uid = urlsafe_base64_decode(uidb64)
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
user.activate()
user_activated.send(sender=user.__class__, request=request, user=user)
if settings.USERS_AUTO_LOGIN_ON_ACTIVATION:
user.backend = 'django.contrib.auth.backends.ModelBackend' # todo - remove this hack
login(request, user)
messages.info(request, 'Thanks for registering. You are now logged in.')
return redirect(post_activation_redirect)
else:
title = _('Email confirmation unsuccessful')
context = {
'title': title,
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
def activation_complete(request,
template_name='users/activation_complete.html',
current_app=None,
extra_context=None):
context = {
'title': _('Activation complete'),
}
if extra_context is not None: # pragma: no cover
context.update(extra_context)
return TemplateResponse(request, template_name, context)
| bsd-3-clause | -3,139,041,058,954,845,000 | 35.431953 | 97 | 0.637486 | false |
Arabidopsis-Information-Portal/GAG | test/sequence_tests.py | 1 | 16369 | #!/usr/bin/env python
# coding=utf-8
import unittest
from mock import Mock
from src.sequence import Sequence, overlap
class TestSequence(unittest.TestCase):
def setUp(self):
self.seq1 = Sequence("seq1", "GATTACA")
def add_mock_gene(self, name="foo_gene"):
mockgene = Mock()
mockgene.identifier = name
mockgene.indices = [2, 4]
mockgene.death_flagged = False
mockgene.to_mrna_fasta.return_value = "mockgene_to_mrna_fasta\n"
mockgene.to_cds_fasta.return_value = "mockgene_to_cds_fasta\n"
mockgene.to_protein_fasta.return_value = "mockgene_to_protein_fasta\n"
mockgene.get_valid_mrnas = Mock(return_value=[])
self.seq1.add_gene(mockgene)
def add_mock_gene_with_1_mrna(self, name):
mockgene = Mock()
mockgene.indices = [1, 10]
mockgene.identifier = name
mockgene.death_flagged = False
mockgene.mrnas = [Mock()]
mockgene.mrnas[0].identifier = name + "-RA"
mockgene.mrnas[0].cds = Mock()
mockgene.mrnas[0].cds.identifier = [name + "-RA:CDS"]
mockgene.mrnas[0].cds.length = Mock(return_value=5)
mockgene.mrnas[0].exon = Mock()
mockgene.mrnas[0].length = Mock(return_value=2)
mockgene.get_valid_mrnas = Mock(return_value=mockgene.mrnas)
mockgene.length = Mock(return_value=20)
mockgene.get_partial_info.return_value = {"complete": 1, "start_no_stop": 0, "stop_no_start": 1,
"no_stop_no_start": 1}
mockgene.get_num_exons.return_value = 5
mockgene.get_num_introns.return_value = 4
mockgene.get_longest_exon.return_value = 20
mockgene.get_longest_intron.return_value = 20
mockgene.get_shortest_exon.return_value = 8
mockgene.get_shortest_intron.return_value = 8
mockgene.get_total_exon_length.return_value = 15
mockgene.get_total_intron_length.return_value = 15
self.seq1.add_gene(mockgene)
def add_mock_gene_with_2_mrnas(self, name):
mockgene = Mock()
mockgene.indices = [20, 30]
mockgene.identifier = name
mockgene.death_flagged = False
mockgene.mrnas = [Mock(), Mock()]
mockgene.mrnas[0].identifier = name + "-RA"
mockgene.mrnas[0].cds = None
mockgene.mrnas[0].exon = None
mockgene.mrnas[0].length = Mock(return_value=5)
mockgene.mrnas[1].identifier = name + "-RB"
mockgene.mrnas[1].cds = Mock()
mockgene.mrnas[1].cds.identifier = [name + "-RB:CDS"]
mockgene.mrnas[1].cds.length = Mock(return_value=3)
mockgene.mrnas[1].exon = Mock()
mockgene.mrnas[1].length = Mock(return_value=2)
mockgene.get_valid_mrnas = Mock(return_value=mockgene.mrnas)
mockgene.length = Mock(return_value=10)
mockgene.get_partial_info.return_value = {"complete": 0, "start_no_stop": 1, "stop_no_start": 1,
"no_stop_no_start": 1}
mockgene.get_num_exons.return_value = 4
mockgene.get_num_introns.return_value = 3
mockgene.get_longest_exon.return_value = 10
mockgene.get_longest_intron.return_value = 10
mockgene.get_shortest_exon.return_value = 5
mockgene.get_shortest_intron.return_value = 5
mockgene.get_total_exon_length.return_value = 25
mockgene.get_total_intron_length.return_value = 25
self.seq1.add_gene(mockgene)
def test_string(self):
expected = "Sequence seq1 of length 7 containing 0 genes\n"
self.assertEquals(expected, str(self.seq1))
def test_how_many_Ns_forward(self):
badseq = Sequence('seq1', 'NNnNNGATTACA')
self.assertEqual(5, badseq.how_many_n_forward(1))
def test_how_many_Ns_forward_returns_zero_if_no_Ns(self):
badseq = Sequence('seq2', 'GATTACA')
self.assertEqual(0, badseq.how_many_n_forward(3))
def test_how_many_Ns_backward(self):
badseq = Sequence('seq3', 'gattaNnN')
self.assertEqual(3, badseq.how_many_n_backward(8))
def test_how_many_Ns_backward_returns_zero_if_no_Ns(self):
self.assertEqual(0, self.seq1.how_many_n_backward(3))
def test_number_of_gagflags(self):
gene1, gene2 = Mock(), Mock()
gene1.number_of_gagflags.return_value = 2
gene2.number_of_gagflags.return_value = 1
self.seq1.genes = [gene1, gene2]
self.assertEquals(3, self.seq1.number_of_gagflags())
def test_remove_terminal_ns_beginning(self):
badseq = Sequence('badseq', 'nNGATTACA')
mockgene = Mock()
mockgene.indices = [3, 6]
badseq.genes = [mockgene]
badseq.remove_terminal_ns()
self.assertEquals("GATTACA", badseq.bases)
def test_remove_terminal_ns_end(self):
badseq = Sequence('badseq', 'GATTACAnNNn')
mockgene = Mock()
mockgene.indices = [2, 6]
badseq.genes = [mockgene]
badseq.remove_terminal_ns()
self.assertEquals("GATTACA", badseq.bases)
def test_remove_terminal_ns_beginning_and_end(self):
badseq = Sequence('badseq', 'nnGATTACAnNNn')
mockgene = Mock()
mockgene.indices = [3, 8]
badseq.genes = [mockgene]
badseq.remove_terminal_ns()
self.assertEquals("GATTACA", badseq.bases)
def test_add_gene(self):
self.add_mock_gene()
self.assertEqual(1, len(self.seq1.genes))
def test_remove_gene(self):
self.add_mock_gene('foo_gene')
self.assertEqual(1, len(self.seq1.genes))
self.assertEqual(0, len(self.seq1.removed_genes))
self.seq1.remove_gene('foo_gene')
self.assertEqual(0, len(self.seq1.genes))
self.assertEqual(1, len(self.seq1.removed_genes))
def test_remove_genes_from_list(self):
self.add_mock_gene('foo_gene')
self.add_mock_gene('bar_gene')
self.add_mock_gene('zub_gene')
bad_genes = ["zub_gene", "foo_gene"]
self.assertEquals(3, len(self.seq1.genes))
removed_genes = self.seq1.remove_genes_from_list(bad_genes)
self.assertEquals(2, len(removed_genes))
self.assertEquals(1, len(self.seq1.genes))
self.assertEquals(2, len(self.seq1.removed_genes))
def test_remove_genes_from_list_bad_list(self):
self.add_mock_gene('foo_gene')
self.add_mock_gene('bar_gene')
self.add_mock_gene('zub_gene')
bad_genes = ["nice_gene", "bacon", 28]
self.assertEquals(3, len(self.seq1.genes))
self.seq1.remove_genes_from_list(bad_genes) # nothing should happen
self.assertEquals(3, len(self.seq1.genes))
def test_remove_mrnas_from_list(self):
self.seq1.genes = [Mock()]
self.seq1.genes[0].remove_mrnas_from_list.return_value = ["foo"]
bad_mrnas = ["foo_mrna", "bar_mrna"]
removed = self.seq1.remove_mrnas_from_list(bad_mrnas)
self.assertEquals(["foo"], removed)
self.seq1.genes[0].remove_mrnas_from_list.assert_called_with(bad_mrnas)
def test_remove_empty_genes(self):
self.add_mock_gene('foo_gene')
self.add_mock_gene('bar_gene')
self.add_mock_gene('zub_gene')
self.seq1.genes[0].mrnas = [Mock()]
self.seq1.genes[1].mrnas = []
self.seq1.genes[2].mrnas = []
self.assertEquals(3, len(self.seq1.genes))
removed_genes = self.seq1.remove_empty_genes()
self.assertEquals(2, len(removed_genes))
self.assertEquals(1, len(self.seq1.genes))
self.assertEquals(2, len(self.seq1.removed_genes))
def test_remove_empty_mrnas(self):
self.seq1.genes = [Mock(), Mock()]
self.seq1.genes[0].remove_empty_mrnas.return_value = []
self.seq1.genes[1].remove_empty_mrnas.return_value = []
self.seq1.remove_empty_mrnas()
self.seq1.genes[0].remove_empty_mrnas.assert_called_with()
self.seq1.genes[1].remove_empty_mrnas.assert_called_with()
def test_remove_empty_mrnas_returns_list(self):
gene = Mock()
gene.remove_empty_mrnas.return_value = [1, 2] # should be list of mRNAs but whatever
self.seq1.genes = [gene]
removed_mrnas = self.seq1.remove_empty_mrnas()
self.assertEquals([1, 2], removed_mrnas)
def test_remove_empty_mrnas_returns_list_multiple_genes(self):
gene1 = Mock()
gene1.remove_empty_mrnas.return_value = [1, 2]
gene2 = Mock()
gene2.remove_empty_mrnas.return_value = [3, 4]
self.seq1.genes = [gene1, gene2]
removed_mrnas = self.seq1.remove_empty_mrnas()
self.assertEquals([1, 2, 3, 4], removed_mrnas)
def test_get_gene_ids(self):
self.seq1.genes = [Mock(), Mock()]
self.seq1.genes[0].identifier = "foo gene"
self.seq1.genes[1].identifier = "bar gene"
expected = ["foo gene", "bar gene"]
self.assertEquals(self.seq1.get_gene_ids(), expected)
def test_get_mrna_ids(self):
self.seq1.genes = [Mock(), Mock()]
self.seq1.genes[0].get_mrna_ids.return_value = ["mrna1", "mrna2"]
self.seq1.genes[1].get_mrna_ids.return_value = ["mrna3", "mrna4"]
expected = ["mrna1", "mrna2", "mrna3", "mrna4"]
self.assertEquals(self.seq1.get_mrna_ids(), expected)
def test_trim_region(self):
self.assertEquals("GATTACA", self.seq1.bases)
self.seq1.trim_region(1, 4)
self.assertEquals("ACA", self.seq1.bases)
def test_trim_region_removes_gene_contained_in_trimmed_region(self):
self.add_mock_gene()
self.assertEquals(1, len(self.seq1.genes))
self.seq1.trim_region(1, 3)
self.assertEquals(0, len(self.seq1.genes))
def test_add_annotations_from_list_adds_to_mrna(self):
gene = Mock()
mrna = Mock()
self.seq1.genes = [gene]
gene.mrnas = [mrna]
gene.identifier = "foo_gene"
gene.contains_mrna.return_value = True
anno_list = [["foo_mrna", "Dbxref", "PFAM:0001"]]
self.seq1.add_annotations_from_list(anno_list)
gene.add_mrna_annotation.assert_called_with("foo_mrna", "Dbxref", "PFAM:0001", feat_type=None)
def test_add_annotations_from_list_adds_to_gene(self):
gene = Mock()
self.seq1.genes = [gene]
gene.identifier = "foo_gene"
anno_list = [["foo_gene", "name", "ABC123"], ["bar_gene", "name", "XYZ789"]]
self.seq1.add_annotations_from_list(anno_list)
self.assertEquals("ABC123", gene.name)
def test_get_subseq(self):
self.assertEquals("ATTA", self.seq1.get_subseq(2, 5))
def test_get_cds_partial_info(self):
self.add_mock_gene_with_1_mrna("foo_gene1")
self.add_mock_gene_with_2_mrnas("foo_gene2")
partial_info = self.seq1.get_cds_partial_info()
self.assertEquals(1, partial_info["CDS: complete"])
def test_get_contained_genes(self):
fake_gene0 = Mock()
fake_gene0.indices = [0, 10]
self.seq1.add_gene(fake_gene0)
fake_gene1 = Mock()
fake_gene1.indices = [1, 9]
self.seq1.add_gene(fake_gene1)
fake_gene2 = Mock()
fake_gene2.indices = [0, 10]
self.seq1.add_gene(fake_gene2)
fake_gene3 = Mock()
fake_gene3.indices = [5, 15]
self.seq1.add_gene(fake_gene3)
fake_gene4 = Mock()
fake_gene4.indices = [20, 30]
self.seq1.add_gene(fake_gene4)
contained = self.seq1.get_contained_genes()
self.assertEqual(contained, [fake_gene1])
def test_overlap(self):
cases = [
([1, 100], [25, 75], True), # 2nd fully enclosed in 1st
([25, 75], [1, 100], True), # 1st fully enclosed in 2nd
([1, 100], [50, 150], True), # 2nd starts before 1st ends
([50, 150], [1, 100], True), # 1st starts before 2nd ends
([1, 100], [100, 200], True), # adjacent, shaired endpoint
([100, 1], [75, 25], True), # 2nd fully enclosed in 1st, indicies in reverse order
([1, 100], [101, 200], False), # adjacent, not overlaping
([101, 200], [1, 100], False) # reversed order, not overlaping
]
for case in cases:
indices1, indices2, expected = case
self.assertEqual(overlap(indices1, indices2), expected)
def test_get_overlapping_genes(self):
fake_gene0 = Mock()
fake_gene0.indices = [0, 10]
self.seq1.add_gene(fake_gene0)
fake_gene1 = Mock()
fake_gene1.indices = [1, 9]
self.seq1.add_gene(fake_gene1)
fake_gene2 = Mock()
fake_gene2.indices = [0, 10]
self.seq1.add_gene(fake_gene2)
fake_gene3 = Mock()
fake_gene3.indices = [5, 15]
self.seq1.add_gene(fake_gene3)
fake_gene4 = Mock()
fake_gene4.indices = [20, 30]
self.seq1.add_gene(fake_gene4)
contained = self.seq1.get_overlapping_genes()
self.assertTrue(fake_gene0 in contained)
self.assertTrue(fake_gene1 in contained)
self.assertTrue(fake_gene2 in contained)
self.assertTrue(fake_gene3 in contained)
def test_cds_to_gff(self):
mockgene = Mock()
mockgene.contains_mrna.return_value = True
self.seq1.genes = [mockgene]
self.seq1.cds_to_gff("foo_mrna")
mockgene.cds_to_gff.assert_called_with("seq1", "foo_mrna")
def test_cds_to_tbl(self):
mockgene = Mock()
mockgene.contains_mrna.return_value = True
self.seq1.genes = [mockgene]
self.seq1.cds_to_tbl("foo_mrna")
mockgene.cds_to_tbl.assert_called_with("foo_mrna")
def test_to_mrna_fasta(self):
self.add_mock_gene()
expected = "mockgene_to_mrna_fasta\n"
self.assertEquals(expected, self.seq1.to_mrna_fasta())
def test_to_cds_fasta(self):
self.add_mock_gene()
expected = "mockgene_to_cds_fasta\n"
self.assertEquals(expected, self.seq1.to_cds_fasta())
def test_to_protein_fasta(self):
self.add_mock_gene()
expected = "mockgene_to_protein_fasta\n"
self.assertEquals(expected, self.seq1.to_protein_fasta())
def test_to_tbl(self):
self.add_mock_gene()
self.seq1.genes[0].to_tbl.return_value = "mockgene to tbl"
tbl = self.seq1.to_tbl()
expected = ">Feature seq1\n"
expected += "1\t7\tREFERENCE\n"
expected += "\t\t\tPBARC\t12345\n"
expected += "mockgene to tbl"
self.assertEquals(tbl, expected)
def test_stats(self):
self.add_mock_gene_with_1_mrna("foo_gene1")
self.add_mock_gene_with_2_mrnas("foo_gene2")
stats = self.seq1.stats()
self.assertEquals(stats["Total sequence length"], 7)
self.assertEquals(stats["Number of genes"], 2)
self.assertEquals(stats["Number of mRNAs"], 3)
self.assertEquals(stats["Number of exons"], 9)
self.assertEquals(stats["Number of introns"], 7)
self.assertEquals(stats["Number of CDS"], 2)
self.assertEquals(stats["Overlapping genes"], 0)
self.assertEquals(stats["Contained genes"], 0)
self.assertEquals(stats["CDS: complete"], 1)
self.assertEquals(stats["CDS: start, no stop"], 1)
self.assertEquals(stats["CDS: stop, no start"], 2)
self.assertEquals(stats["CDS: no stop, no start"], 2)
self.assertEquals(stats["Longest gene"], 20)
self.assertEquals(stats["Longest mRNA"], 5)
self.assertEquals(stats["Longest exon"], 20)
self.assertEquals(stats["Longest intron"], 20)
self.assertEquals(stats["Longest CDS"], 5)
self.assertEquals(stats["Shortest gene"], 10)
self.assertEquals(stats["Shortest mRNA"], 2)
self.assertEquals(stats["Shortest exon"], 5)
self.assertEquals(stats["Shortest intron"], 5)
self.assertEquals(stats["Shortest CDS"], 3)
self.assertEquals(stats["Total gene length"], 30)
self.assertEquals(stats["Total mRNA length"], 9)
self.assertEquals(stats["Total exon length"], 40)
self.assertEquals(stats["Total intron length"], 40)
self.assertEquals(stats["Total CDS length"], 8)
def suite():
_suite = unittest.TestSuite()
_suite.addTest(unittest.makeSuite(TestSequence))
return _suite
if __name__ == '__main__':
unittest.main()
| mit | 5,033,641,834,612,063,000 | 40.231738 | 104 | 0.610789 | false |
JustinTulloss/harmonize.fm | masterapp/masterapp/model/user.py | 1 | 22317 | # Justin Tulloss
#
# Putting user in its own file since it's huge
import logging
from pylons import cache, request, session, c
from pylons.templating import render
from decorator import decorator
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Table, sql
from sqlalchemy.sql import func, select, join, or_, and_
from sqlalchemy.orm import relation, join, synonym, aliased
from datetime import datetime
from . import (
metadata,
Session,
songs_table,
artists_table,
playlists_table,
spotlights_table,
Artist,
Album,
Song,
SongOwner,
RemovedOwner,
Playlist,
Spotlight,
SpotlightComment,
BlogEntry,
SongStat,
Recommendation
)
from facebook.wsgi import facebook
from facebook import FacebookError
from masterapp.lib import fblogin
from masterapp.lib.fbaccess import fbaccess
from operator import itemgetter, attrgetter
import time
log = logging.getLogger(__name__)
Base = declarative_base(metadata=metadata)
class User(Base):
"""
User class that abstracts away all information that deals with a user. It
pulls that data from whereever it might live, and takes care of all caching
and refetching of that data as well.
At the risk of being inconsistent, this is also the first mapped class to
take advantage of sqlalchemy's declarative extension, which is included with
sqlalchemy .5
"""
# Declarative constructs
__table__ = Table("users", Base.metadata, autoload=True)
__mapper_args__ = {'exclude_properties': ['nowplayingid', 'name']}
_nowplayingid = __table__.c.nowplayingid
_name = __table__.c.name
playlists = relation(Playlist, order_by=playlists_table.c.name)
fbid = None
fbinfo = None
listeningto = None
fbcache = None
fbfriendscache = None
fballfriendscache = None
present_mode = False
def __init__(self, fbid, **kws):
Base.__init__(self, **kws)
self.fbid = fbid
self.premium = False
def personal_cache(type=None, expiretime=None, addsession = False):
def wrapper(func, self, *args, **kwargs):
c = cache.get_cache('%s.%s' %
(func.__module__, func.__name__))
funcargs = {
'key': self.id,
'createfunc': lambda: func(self, *args, **kwargs)
}
if type:
funcargs['type'] = type
if expiretime:
funcargs['expiretime'] = expiretime
val = c.get_value(**funcargs)
if addsession:
if hasattr(val, '__iter__'):
for r in xrange(0, len(val)):
val[r] = Session.merge(val[r], dont_load=True)
else:
val = Session.merge(val, dont_load=True)
return val
return decorator(wrapper)
@decorator
def fbfriends(func, self, *args, **kwargs):
self._setup_fbfriends_cache()
self._fbfriends = self.fbfriendscache.get_value(
key = self.fbid,
expiretime = self._fbexpiration,
createfunc = self._get_fbfriends
)
try:
return func(self, *args, **kwargs)
except:
# Try invalidating the cache
self.fbfriendscache.remove_value(self.fbid)
self._setup_fbfriends_cache()
self._fbfriends = self.fbfriendscache.get_value(self.fbid,
expiretime = self._fbexpiration,
createfunc = self._get_fbfriends
)
return func(self, *args, **kwargs)
@decorator
def fballfriends(func, self, *args, **kwargs):
self._setup_fballfriends_cache()
self._fballfriends = self.fballfriendscache.get_value(self.fbid,
expiretime = self._fbexpiration,
createfunc = self._get_fballfriends
)
try:
return func(self, *args, **kwargs)
except:
# Try invalidating the cache
self.fballfriendscache.remove_value(self.fbid)
self._setup_fballfriends_cache()
self._fballfriends = self.fballfriendscache.get_value(
key = self.fbid,
expiretime = self._fbexpiration,
createfunc = self._get_fballfriends
)
return func(self, *args, **kwargs)
@decorator
def fbattr (func, self, *args, **kwargs):
self._setup_fbinfo_cache()
self.fbinfo = self.fbcache.get_value(
key = self.fbid,
expiretime = self._fbexpiration,
createfunc = self._get_fbinfo
)
try:
return func(self, *args, **kwargs)
except:
self.fbcache.remove_value(self.fbid)
self.fbcache[self.fbid] = self._get_fbinfo()
self.fbinfo = self.fbcache.get_value(
key = self.fbid,
expiretime = self._fbexpiration,
createfunc = self._get_fbinfo
)
return func(self, *args, **kwargs)
def _get_caches(self):
self.fbcache = cache.get_cache('fbprofile')
self.fbfriendscache = cache.get_cache('fbfriends')
self.fballfriendscache = cache.get_cache('fballfriends')
# Facebook session_key_expires is not set for some reason
#self._fbexpiration = facebook.session_key_expires - time.time()
self._fbexpiration = 24*60*60 #24 hours
def _setup_fbinfo_cache(self):
if not self.fbcache:
self._get_caches()
def _setup_fbfriends_cache(self):
if not self.fbfriendscache:
self._get_caches()
def _setup_fballfriends_cache(self):
if not self.fballfriendscache:
self._get_caches()
@fbaccess
def _get_fbinfo(self):
fields = [
'name',
'first_name',
'pic',
'pic_big',
'pic_square',
'music',
'sex',
'has_added_app'
]
info = facebook.users.getInfo(self.fbid, fields=fields)[0]
return info
@fbaccess
def _get_fbfriends(self):
olduid = facebook.uid
oldsession = facebook.session_key
if self.fbid != int(facebook.uid):
facebook.uid = unicode(self.fbid)
facebook.session_key = self.fbsession
log.debug("Querying for wrong user's friends, trying to sub in their session")
try:
try:
ids = facebook.friends.getAppUsers()
except FacebookError, e:
if e.code == 102:
if oldsession != facebook.session_key:
return [] #XXX: This is bad, but it fixes errors
if len(ids) == 0:
ids = []
if session.get('present') == True:
ids.extend([1909354, 1908861])
# I'm banking on caches in a big way here. I'm assuming that the vast
# majority of additional facebook information will be cached per user,
# so when we're actually accessing the attributes of these users 1 by 1,
# it won't be too expensive.
friendor = or_()
if ids:
for id in ids:
friendor.append(User.fbid == id)
users = Session.query(User).filter(friendor).order_by(User._name)
else:
return []
finally:
facebook.uid = olduid
facebook.session_key = oldsession
return users.all()
@fbaccess
def _get_fballfriends(self):
ids = facebook.friends.get()
users = facebook.users.getInfo(ids)
return sorted(users, key=itemgetter('name'))
@fbattr
def get_name(self):
if self._name != self.fbinfo['name']:
self._name = self.fbinfo['name']
Session.add(self)
Session.commit()
return self._name
name = property(get_name)
@fbattr
def get_firstname(self):
return self.fbinfo['first_name']
firstname = property(get_firstname)
@fbattr
def get_picture(self):
return self.fbinfo['pic']
picture = property(get_picture)
@fbattr
def get_bigpicture(self):
return self.fbinfo['pic_big']
bigpicture = property(get_bigpicture)
@fbattr
def get_swatch(self):
return self.fbinfo['pic_square']
swatch = property(get_swatch)
@fbattr
def get_musictastes(self):
return self.fbinfo['music']
musictastes = property(get_musictastes)
@fbattr
def get_sex(self):
return self.fbinfo['sex']
sex = property(get_sex)
@fbattr
def get_hasfbapp(self):
return self.fbinfo['has_added_app']
hasfbapp = property(get_hasfbapp)
def are_friends(self, user):
return user in self.friends
@fbfriends
def get_friends(self):
if self._fbfriends:
for i in xrange(0, len(self._fbfriends)):
self._fbfriends[i]= Session.merge(self._fbfriends[i], dont_load=True)
return self._fbfriends
else:
return []
friends = property(get_friends)
@fballfriends
def get_all_friends(self):
return self._fballfriends
allfriends = property(get_all_friends)
def is_friends_with(self, someguy):
"""
Tells you if a user is friends with another user.
"""
if isinstance(someguy, User):
if someguy.id == self.id:
return True
else:
for friend in self.friends:
if friend.id == someguy.id:
return True
return False
else:
if someguy['uid'] == self.fbid:
return True
else:
for friend in self.friends:
if friend.fbid == someguy['uid']:
return True
return False
@personal_cache(expiretime=600, type='memory')
def get_songcount(self):
count = Session.query(func.sum(AlbumCounts.songcount).label('songs')).\
filter(AlbumCounts.userid == self.id).first().songs
if count:
return int(count)
else:
return 0
songcount = property(get_songcount)
@personal_cache(expiretime=600, type='memory')
def get_albumcount(self):
return Session.query(func.count(AlbumCounts.albumid).label('albums')).\
filter(AlbumCounts.userid == self.id).first().albums
albumcount = property(get_albumcount)
def get_nowplaying(self):
return self._nowplaying
def set_nowplaying(self, song):
self._nowplayingid = song.id
stats = Session.query(SongStat).\
filter(SongStat.song == song).\
filter(SongStat.user == self)
if session.has_key('src'):
stats = stats.filter(SongStat.source == session['src'])
stats = stats.first()
if not stats:
stats = SongStat(user = self, song = song)
stats.playcount = stats.playcount + 1
stats.lastplayed = datetime.now()
if session.has_key('src'):
stats.source = session['src']
Session.add(stats)
nowplaying = property(get_nowplaying,set_nowplaying)
def get_url(self):
return 'http://%s/player#/people/profile/%d' % (request.host, self.id)
url = property(get_url)
def get_top_10_artists(self):
totalcount = Session.query(Artist.id, Artist.name,
func.sum(SongStat.playcount).label('totalcount')
)
totalcount = totalcount.join([Artist.songs, SongStat])
totalcount = totalcount.filter(SongStat.uid == self.id)
# this excludes any songs listened to on friend radio:
totalcount = totalcount.filter(or_(
SongStat.source == SongStat.FROM_OWN_LIBRARY,
SongStat.source == SongStat.FROM_BROWSE,
SongStat.source == SongStat.FROM_SPOTLIGHT,
SongStat.source == None))
totalcount = totalcount.group_by(Artist.id)
totalcount = totalcount.order_by(sql.desc('totalcount')).limit(10)
return totalcount.all()
top_10_artists = property(get_top_10_artists)
@personal_cache(expiretime=600, type='memory', addsession=True)
def get_feed_entries(self):
max_count=20
entries = Session.query(BlogEntry)[:max_count]
myor = or_()
for friend in self.friends:
myor.append(Spotlight.uid == friend.id)
if len(myor)>0:
entries.extend(Session.query(Spotlight).filter(
and_(myor, Spotlight.active==True)).\
order_by(sql.desc(Spotlight.timestamp))\
[:max_count])
commentor = or_()
spotlightor = or_()
for friend in self.friends:
commentor.append(SpotlightComment.uid == friend.id)
spotlightor.append(Spotlight.uid == friend.id)
if len(commentor)>0 and len(spotlightor)>0:
entries.extend(Session.query(SpotlightComment).\
join((Spotlight, SpotlightComment.spotlight)).\
filter(and_(
SpotlightComment.uid!=session['userid'],
or_(Spotlight.uid==session['userid'],
and_(commentor, spotlightor)),
Spotlight.active == True)).\
order_by(sql.desc(SpotlightComment.timestamp))[:max_count])
entries.extend(Session.query(Recommendation).\
filter(and_(
Recommendation.recommendeefbid == self.fbid,
Recommendation.active == True))[:max_count])
def sort_by_timestamp(x, y):
if x.timestamp == None:
if y.timestamp == None:
return 0
return 1
elif y.timestamp == None:
return -1
elif x.timestamp > y.timestamp:
return -1
elif x.timestamp == y.timestamp:
return 0
else:
return 1
entries.sort(sort_by_timestamp)
return entries[:max_count]
feed_entries = property(get_feed_entries)
def _build_song_query(self):
from masterapp.config.schema import dbfields
query = Session.query(SongOwner.uid.label('Friend_id'),
User._name.label('Friend_name'), *dbfields['song'])
query = query.join(Song.album).reset_joinpoint()
query = query.join(Song.artist).reset_joinpoint()
query = query.join(User).filter(SongOwner.uid == self.id)
return query
def get_song_query(self):
query = self._build_song_query()
return query.distinct()
song_query = property(get_song_query)
def get_song_count(self):
query = Session.query(SongOwner).filter(SongOwner.uid == self.id).count()
return query
song_count = property(get_song_count)
def get_album_query(self):
from masterapp.config.schema import dbfields
# Number of songs available on this album subquery
havesongs = Session.query(Album.id.label('albumid'),
func.count(Song.id).label('Album_havesongs'),
func.sum(Song.length).label('Album_length')
).join(Album.songs, SongOwner).filter(SongOwner.uid == self.id)
havesongs = havesongs.group_by(Album.id).subquery()
query = Session.query(SongOwner.uid.label('Friend_id'), havesongs.c.Album_havesongs,
havesongs.c.Album_length, User._name.label('Friend_name'),
*dbfields['album'])
joined = join(Album, havesongs, Album.id == havesongs.c.albumid)
query = query.select_from(joined)
query = query.join(Album.artist).reset_joinpoint()
query = query.join(Album.songs, SongOwner, SongOwner.user).filter(SongOwner.uid == self.id)
query = query.group_by(Album)
return query
album_query = property(get_album_query)
def get_playlist_query(self):
from masterapp.config.schema import dbfields
query = Session.query(Playlist.ownerid.label('Friend_id'),
*dbfields['playlist']).\
filter(Playlist.ownerid == self.id)
return query
playlist_query = property(get_playlist_query)
def get_artist_query(self):
from masterapp.config.schema import dbfields
# Build the main query
query = Session.query(SongOwner.uid.label('Friend_id'),
User._name.label('Friend_name'),
ArtistCounts.songcount.label('Artist_availsongs'),
ArtistCounts.albumcount.label('Artist_numalbums'),
*dbfields['artist'])
query = query.join(Artist.albums, Song, SongOwner, SongOwner.user).\
join((ArtistCounts, and_(
SongOwner.uid == ArtistCounts.userid,
Artist.id == ArtistCounts.artistid,
Artist.id == Album.artistid)))
query = query.filter(SongOwner.uid == self.id)
query = query.group_by(Artist)
return query
artist_query = property(get_artist_query)
def get_album_by_id(self, id):
qry = self.album_query
qry = qry.filter(Album.id == id)
return qry.first()
def get_active_spotlights(self):
return Session.query(Spotlight).filter(sql.and_(\
Spotlight.uid==self.id, Spotlight.active==True)).\
order_by(sql.desc(Spotlight.timestamp))
active_spotlights = property(get_active_spotlights)
def get_inactive_spotlights(self):
return Session.query(Spotlight).filter(sql.and_(
Spotlight.uid==self.id, Spotlight.active==False)).\
order_by(sql.desc(Spotlight.timestamp))
inactive_spotlights = property(get_inactive_spotlights)
def get_playlist_by_id(self, id):
qry = self.playlist_query
qry = qry.filter(Playlist.id == id)
return qry.first()
def get_song_by_id(self, id):
return self.song_query.filter(Song.id == id).first()
def add_song(self, song):
"""
Adds a song to this user's collection. Keeps counts up to date.
"""
# Add to collection
owner = SongOwner(song = song, user = self)
# Keep counts up to date
new_album = False
albumc = Session.query(AlbumCounts).get((song.album.id, self.id))
if albumc:
albumc.songcount += 1
else:
new_album = True
albumc = AlbumCounts(user = self, album = song.album, songcount=1)
artistc = Session.query(ArtistCounts).get((song.album.artistid, self.id))
if artistc:
artistc.songcount += 1
if new_album:
artistc.albumcount += 1
else:
artistc = ArtistCounts(
user=self, artist=song.album.artist, songcount=1, albumcount=1)
Session.add_all([owner, artistc, albumc])
Session.commit()
return owner
def remove_song(self, songrow):
"""
Removes a song from the users's collection and updates the counts.
"""
# the passed song is a RowTuple, so we convert it so a Song object
song = Session.query(Song).get(songrow.Song_id)
movedowner = RemovedOwner(
song = song,
user = self
)
Session.add(movedowner)
owner = Session.query(SongOwner).\
filter(SongOwner.song == song).\
filter(SongOwner.user == self).first()
Session.delete(owner)
albumc = Session.query(AlbumCounts).get((song.albumid, self.id))
albumc.songcount -= 1
remove_album = False
if albumc.songcount == 0:
remove_album = True
artistc = Session.query(ArtistCounts).get((song.album.artistid, self.id))
artistc.songcount -= 1
if remove_album:
artistc.albumcount -= 1
Session.add(artistc)
return True
@fbaccess
def update_profile(self):
c.user = self
fbml = render('facebook/profile.mako.fbml')
facebook.profile.setFBML(fbml)
@fbaccess
def publish_spotlight(self, spot):
title_t = """
{actor} created
<fb:if-multiple-actors>Spotlights
<fb:else>a Spotlight </fb:else>
</fb:if-multiple-actors>
on {album} at
<a href="http://harmonize.fm" target="_blank">harmonize.fm</a>
"""
title_d = '{"album":"%s"}' % spot.title
r = facebook.feed.publishTemplatizedAction(
title_template=title_t,
title_data=title_d
)
return r
def add_spotlight(self, spotlight):
spotlight.user = self
Session.add(spotlight)
spotlight.unactivate_lru()
self.publish_spotlight(spotlight)
self.update_profile()
def add_me_to_friends(self):
for friend in self.friends:
try:
friend.friends.append(self)
friend.friends.sort(key=attrgetter('name'))
except:
# oh well, they'll find me eventually
logging.debug('Could not be added to %s', friend.id)
def update_friends_caches(self):
for friend in self.friends:
self.fbfriendscache.remove_value(friend.id)
def get_recommendations(self):
return Session.query(Recommendation).filter(
sql.and_(Recommendation.recommendeefbid == self.fbid,
Recommendation.active == True)).\
order_by(sql.desc(Recommendation.timestamp))
recommendations = property(get_recommendations)
class ArtistCounts(Base):
__table__ = Table('counts_artist', metadata, autoload=True)
key = [__table__.c.artistid, __table__.c.userid]
__mapper_args__ = {'primary_key': key}
artist = relation(Artist)
user = relation(User)
class AlbumCounts(Base):
__table__ = Table('counts_album', metadata, autoload=True)
key = [__table__.c.albumid, __table__.c.userid]
__mapper_args__ = {'primary_key': key}
album = relation(Album)
user = relation(User)
| mit | 8,144,348,351,670,925,000 | 32.916413 | 99 | 0.577049 | false |
Osndok/zim-desktop-wiki | tests/translations.py | 1 | 4529 |
import re
from glob import glob
from tests import TestCase
class TestTranslations(TestCase):
def runTest(self, verbose=False):
'''Sanity check translation files'''
pot_creation_date = None
for file in ['translations/zim.pot'] + glob('translations/*.po'):
if verbose:
print 'Checking %s' % file
t = TranslationFile(file)
if file == 'translations/zim.pot':
text = open(file).read()
self.assertFalse('namespace' in text.lower(), 'Use "notebook section" or instead of "namespace"')
pot_creation_date = t.headers['POT-Creation-Date']
else:
if not t.headers['POT-Creation-Date'] == pot_creation_date:
print 'WARNING: Translation not based on up to date template: %s' % file
self.assertTrue(t.nplural > 0, 'Missing number of plurals: %s' % file)
t.assertValid()
class TranslationMessage(object):
@property
def nplural(self):
return len(self.msgstr)
def __init__(self, lineno, text):
self.lineno = lineno
self.msgid = None
self.msgid_plural = None
self.msgstr = []
self.comment = ''
text = text.replace('"\n"', '')
for line in text.splitlines():
if line.startswith('#'):
self.comment += line
else:
type, msg = line.split(' ', 1)
if type == 'msgid':
self.msgid = msg
elif type == 'msgid_plural':
self.msgid_plural = msg
elif type.startswith('msgstr'):
self.msgstr.append(msg)
else:
raise AssertionError('Could not parse line: %s %s' % (type, msg))
assert self.msgid, 'No msgid found'
assert self.msgstr, 'No msgstr found'
_format_string_re = re.compile('%(?:\(\w+\))?\w')
# match "%s", "%d" etc. but also "%(foo)s" - but not just "%"
def check_nplural(self, nplural):
if self.msgid_plural and self.msgstr[0] != '""':
return self.nplural == nplural
else:
return True
def check_format_strings(self):
'''Check format strings in msgid_plural and msgstr'''
if 'strftime' in self.comment:
return self._check_strftime_string()
else:
return self._check_format_strings()
def _check_format_strings(self):
wanted = sorted(self._format_string_re.findall(self.msgid))
if not wanted:
return True # no string format used
for msg in [self.msgid_plural] + self.msgstr:
if msg and not msg == '""':
got = sorted(self._format_string_re.findall(msg))
if not got == wanted:
return False
else:
return True
def _check_strftime_string(self):
for msg in self.msgstr:
if msg and not msg == '""':
for c in re.findall('\%(.)', msg):
if c not in 'aAwdbBmyYHIpMSfzZjUWcxX%':
# valid charaters based on doc for datetime module
# other characters may be valid depending on platform
# but not robust
return False
else:
return True
class TranslationFile(object):
def __init__(self, file):
self.file = file
self.messages = []
buffer = []
lineno = 0
msgidlineno = 0
def flush():
if not buffer \
or all(line.startswith('#') for line in buffer):
return
try:
text = ''.join(buffer)
message = TranslationMessage(msgidlineno, text)
self.messages.append(message)
except AssertionError as error:
raise AssertionError('Error while parsing %s msgid on line %i\n' % (self.file, msgidlineno) + error.message)
for line in open(file):
lineno += 1
if not line or line.isspace():
flush()
buffer = []
else:
if line.startswith('msgid '):
msgidlineno = lineno
buffer.append(line)
else:
flush()
plural_forms = self.headers['Plural-Forms']
m = re.search(r'nplurals=(\d+);', plural_forms)
if m:
self.nplural = int(m.group(1))
else:
self.nplural = None
@property
def headers(self):
message = self.get('""')
lines = message.msgstr[0].strip().strip('"').split('\\n')
headers = {}
for line in lines:
if not line:
continue
k, v = line.strip('"').replace('\\n', '').split(': ', 1)
headers[k] = v
return headers
def get(self, msgid):
for message in self.messages:
if message.msgid == msgid:
return message
else:
return None
def assertValid(self):
for message in self.messages:
if self.nplural and not message.check_nplural(self.nplural):
raise AssertionError('Number of plural forms NOK in %s msgid on line %i\n' % (self.file, message.lineno) + message.msgid)
if not message.check_format_strings():
raise AssertionError('Error with format strings in %s msgid on line %i\n' % (self.file, message.lineno) + message.msgid)
if __name__ == '__main__':
TestTranslations().runTest(verbose=True)
| gpl-2.0 | 5,430,020,703,552,679,000 | 24.88 | 125 | 0.647384 | false |
11craft/django-cms | cms/tests/management.py | 1 | 8977 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.models import Page
from django.core import management
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.context_managers import SettingsOverride
from cms.api import create_page, add_plugin
from cms.management.commands import cms
from cms.management.commands.subcommands.list import plugin_report
from cms.models.pluginmodel import CMSPlugin
from cms.models.placeholdermodel import Placeholder
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from cms.utils.compat.string_io import StringIO
APPHOOK = "SampleApp"
PLUGIN = "TextPlugin"
class ManagementTestCase(CMSTestCase):
def test_list_apphooks(self):
out = StringIO()
apps = ["cms", "menus", "sekizai", "cms.test_utils.project.sampleapp"]
with SettingsOverride(INSTALLED_APPS=apps):
create_page('Hello Title', "nav_playground.html", "en", apphook=APPHOOK)
self.assertEqual(Page.objects.filter(application_urls=APPHOOK).count(), 1)
command = cms.Command()
command.stdout = out
command.handle("list", "apphooks", interactive=False)
self.assertEqual(out.getvalue(), "SampleApp\n")
def test_uninstall_apphooks_without_apphook(self):
out = StringIO()
command = cms.Command()
command.stdout = out
command.handle("uninstall", "apphooks", APPHOOK, interactive=False)
self.assertEqual(out.getvalue(), "no 'SampleApp' apphooks found\n")
def test_uninstall_apphooks_with_apphook(self):
out = StringIO()
apps = ["cms", "menus", "sekizai", "cms.test_utils.project.sampleapp"]
with SettingsOverride(INSTALLED_APPS=apps):
create_page('Hello Title', "nav_playground.html", "en", apphook=APPHOOK)
self.assertEqual(Page.objects.filter(application_urls=APPHOOK).count(), 1)
command = cms.Command()
command.stdout = out
command.handle("uninstall", "apphooks", APPHOOK, interactive=False)
self.assertEqual(out.getvalue(), "1 'SampleApp' apphooks uninstalled\n")
self.assertEqual(Page.objects.filter(application_urls=APPHOOK).count(), 0)
def test_list_plugins(self):
out = StringIO()
apps = ["cms", "menus", "sekizai", "cms.test_utils.project.sampleapp"]
with SettingsOverride(INSTALLED_APPS=apps):
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, TextPlugin, "en", body="en body")
link_plugin = add_plugin(placeholder, "LinkPlugin", "en",
name="A Link", url="https://www.django-cms.org")
self.assertEqual(
CMSPlugin.objects.filter(plugin_type=PLUGIN).count(),
2)
self.assertEqual(
CMSPlugin.objects.filter(plugin_type="LinkPlugin").count(),
1)
# create a CMSPlugin with an unsaved instance
instanceless_plugin = CMSPlugin(language="en", plugin_type="TextPlugin")
instanceless_plugin.save()
# create a bogus CMSPlugin to simulate one which used to exist but
# is no longer installed
bogus_plugin = CMSPlugin(language="en", plugin_type="BogusPlugin")
bogus_plugin.save()
report = plugin_report()
# there should be reports for three plugin types
self.assertEqual(
len(report),
3)
# check the bogus plugin
bogus_plugins_report = report[0]
self.assertEqual(
bogus_plugins_report["model"],
None)
self.assertEqual(
bogus_plugins_report["type"],
u'BogusPlugin')
self.assertEqual(
bogus_plugins_report["instances"][0],
bogus_plugin)
# check the link plugin
link_plugins_report = report[1]
self.assertEqual(
link_plugins_report["model"],
link_plugin.__class__)
self.assertEqual(
link_plugins_report["type"],
u'LinkPlugin')
self.assertEqual(
link_plugins_report["instances"][0].get_plugin_instance()[0],
link_plugin)
# check the text plugins
text_plugins_report = report[2]
self.assertEqual(
text_plugins_report["model"],
TextPlugin.model)
self.assertEqual(
text_plugins_report["type"],
u'TextPlugin')
self.assertEqual(
len(text_plugins_report["instances"]),
3)
self.assertEqual(
text_plugins_report["instances"][2],
instanceless_plugin)
self.assertEqual(
text_plugins_report["unsaved_instances"],
[instanceless_plugin])
def test_delete_orphaned_plugins(self):
apps = ["cms", "menus", "sekizai", "cms.test_utils.project.sampleapp"]
with SettingsOverride(INSTALLED_APPS=apps):
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, TextPlugin, "en", body="en body")
add_plugin(placeholder, TextPlugin, "en", body="en body")
link_plugin = add_plugin(placeholder, "LinkPlugin", "en",
name="A Link", url="https://www.django-cms.org")
instanceless_plugin = CMSPlugin(
language="en", plugin_type="TextPlugin")
instanceless_plugin.save()
# create a bogus CMSPlugin to simulate one which used to exist but
# is no longer installed
bogus_plugin = CMSPlugin(language="en", plugin_type="BogusPlugin")
bogus_plugin.save()
report = plugin_report()
# there should be reports for three plugin types
self.assertEqual(
len(report),
3)
# check the bogus plugin
bogus_plugins_report = report[0]
self.assertEqual(
len(bogus_plugins_report["instances"]),
1)
# check the link plugin
link_plugins_report = report[1]
self.assertEqual(
len(link_plugins_report["instances"]),
1)
# check the text plugins
text_plugins_report = report[2]
self.assertEqual(
len(text_plugins_report["instances"]),
3)
self.assertEqual(
len(text_plugins_report["unsaved_instances"]),
1)
management.call_command(
'cms', 'delete_orphaned_plugins',
stdout=StringIO(), interactive=False)
report = plugin_report()
# there should be reports for two plugin types (one should have been deleted)
self.assertEqual(
len(report),
2)
# check the link plugin
link_plugins_report = report[0]
self.assertEqual(
len(link_plugins_report["instances"]),
1)
# check the text plugins
text_plugins_report = report[1]
self.assertEqual(
len(text_plugins_report["instances"]),
2)
self.assertEqual(
len(text_plugins_report["unsaved_instances"]),
0)
def test_uninstall_plugins_without_plugin(self):
out = StringIO()
command = cms.Command()
command.stdout = out
command.handle("uninstall", "plugins", PLUGIN, interactive=False)
self.assertEqual(out.getvalue(), "no 'TextPlugin' plugins found\n")
def test_uninstall_plugins_with_plugin(self):
out = StringIO()
apps = ["cms", "menus", "sekizai", "cms.test_utils.project.sampleapp"]
with SettingsOverride(INSTALLED_APPS=apps):
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, TextPlugin, "en", body="en body")
self.assertEqual(CMSPlugin.objects.filter(plugin_type=PLUGIN).count(), 1)
command = cms.Command()
command.stdout = out
command.handle("uninstall", "plugins", PLUGIN, interactive=False)
self.assertEqual(out.getvalue(), "1 'TextPlugin' plugins uninstalled\n")
self.assertEqual(CMSPlugin.objects.filter(plugin_type=PLUGIN).count(), 0)
| bsd-3-clause | -3,773,510,374,559,143,400 | 37.546256 | 89 | 0.560209 | false |
ARM-software/trappy | tests/test_utils.py | 1 | 2217 | # Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
import unittest
from trappy import utils
import pandas
from pandas.util.testing import assert_series_equal
class TestUtils(unittest.TestCase):
def test_handle_duplicate_index(self):
"""Test Util Function: handle_duplicate_index
"""
# Refer to the example in the function doc string
values = [0, 1, 2, 3, 4]
index = [0.0, 1.0, 1.0, 6.0, 7.0]
series = pandas.Series(values, index=index)
new_index = [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 7.0]
with self.assertRaises(ValueError):
series.reindex(new_index)
max_delta = 0.001
expected_index = [0.0, 1.0, 1 + max_delta, 6.0, 7.0]
expected_series = pandas.Series(values, index=expected_index)
series = utils.handle_duplicate_index(series, max_delta)
assert_series_equal(series, expected_series)
# Make sure that the reindex doesn't raise ValueError any more
series.reindex(new_index)
def test_handle_duplicate_index_duplicate_end(self):
"""handle_duplicate_index copes with duplicates at the end of the series"""
max_delta = 0.001
values = [0, 1, 2, 3, 4]
index = [0.0, 1.0, 2.0, 6.0, 6.0]
expected_index = index[:]
expected_index[-1] += max_delta
series = pandas.Series(values, index=index)
expected_series = pandas.Series(values, index=expected_index)
series = utils.handle_duplicate_index(series, max_delta)
assert_series_equal(series, expected_series)
| apache-2.0 | -5,604,032,030,369,191,000 | 35.344262 | 83 | 0.666216 | false |
RazerM/pg_grant | tests/conftest.py | 1 | 2475 | from pathlib import Path
import pytest
import testing.postgresql
from sqlalchemy import create_engine, text
from sqlalchemy.engine.url import make_url
from testcontainers.postgres import PostgresContainer as _PostgresContainer
tests_dir = Path(__file__).parents[0].resolve()
test_schema_file = Path(tests_dir, 'data', 'test-schema.sql')
SUPERUSER_NAME = 'alice'
DB_NAME = 'db1'
Postgresql = testing.postgresql.PostgresqlFactory(
initdb_args='-U postgres -A trust',
database=DB_NAME,
)
class PostgresContainer(_PostgresContainer):
POSTGRES_USER = 'postgres'
POSTGRES_DB = DB_NAME
def pytest_addoption(parser):
parser.addoption(
'--no-container', action='store_true',
help='Use temporary PostgreSQL cluster without a container.')
def pytest_runtest_setup(item):
if 'nocontainer' in item.keywords and not item.config.getoption('--no-container'):
pytest.skip('Use --no-container to execute this test.')
@pytest.fixture(scope='session')
def postgres_url(request):
no_container = request.config.getoption("--no-container")
if no_container:
postgresql = Postgresql()
# Use superuser to create new superuser, then yield new connection URL
url = make_url(postgresql.url())
engine = create_engine(url)
engine.execute('CREATE ROLE {} WITH SUPERUSER LOGIN'.format(SUPERUSER_NAME))
engine.dispose()
url.username = SUPERUSER_NAME
yield str(url)
else:
postgres_container = PostgresContainer("postgres:latest")
with postgres_container as postgres:
# Use superuser to create new superuser, then yield new connection URL
url = make_url(postgres.get_connection_url())
engine = create_engine(url)
engine.execute(
text(
'CREATE ROLE {} WITH SUPERUSER LOGIN PASSWORD '
':password'.format(SUPERUSER_NAME)
),
password=postgres_container.POSTGRES_PASSWORD,
)
engine.dispose()
url.username = SUPERUSER_NAME
yield str(url)
@pytest.fixture(scope='session')
def engine(postgres_url):
return create_engine(postgres_url)
@pytest.fixture(scope='session')
def pg_schema(engine):
with test_schema_file.open() as fp:
engine.execute(fp.read())
@pytest.fixture
def connection(engine, pg_schema):
with engine.connect() as conn:
yield conn
| mit | 4,059,386,316,065,879,600 | 28.117647 | 86 | 0.657778 | false |
raymondnoonan/Mpropulator | MPropulator/write_tab.py | 1 | 1656 | from MPropulator import helpers
import string
def write_tab(sheet, table_data, xls_startcell, skiprows, skipcols):
"""Writes the data for a particular table to the corresponding
Excel spreadsheet.
sheet: openpyxl worksheet to which you're writing
table_data: pandas data frame containing data to write
xls_startcell: cell in the sheet at which you will begin writing
skiprows: list of rows in Excel spreadsheet to skip
skipcols: list of columns in Excel spreadsheet to skip
"""
num_rows = table_data.shape[0]
num_cols = table_data.shape[1]
# We subtract one to remain 0-indexed
start_row = int(xls_startcell.translate(None, string.ascii_letters)) - 1
start_col = helpers.col_to_number(xls_startcell.translate(None,
string.digits))
num_skipcols = [helpers.col_to_number(col) for col in skipcols]
total_rows = start_row + num_rows + len(skiprows)
table_rows_to_write = [row for row in range(start_row, total_rows) if
row not in skiprows]
total_cols = start_col + num_cols + len(skipcols)
table_cols_to_write = [col for col in range(start_col, total_cols) if
col not in num_skipcols]
for row_idx, row in enumerate(table_rows_to_write):
for col_idx, col in enumerate(table_cols_to_write):
current_cell = helpers.cell_name(row, col)
value = table_data.iloc[row_idx, col_idx]
try:
value = float(value)
except ValueError:
pass
sheet[current_cell].value = value
| mit | 5,847,719,726,850,679,000 | 37.511628 | 77 | 0.624396 | false |
johnny555/2d3g | viz.py | 1 | 5005 | __author__ = 'Admin'
def group_bands(depth, coal_labels, holeID):
coal_label_list = ['RU', 'R', 'R1', 'R2', 'RO', 'RL', 'MU', 'MM', 'MML', 'LN', 'TR', 'TRL', 'PS', 'PSL', 'P2', 'P2U',
'P2LA', 'P2LB', 'BA', 'G1', 'G2', 'G34', 'G3', 'G4', 'G56', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10',
'G11', 'BGA', 'BGB' 'BGC', 'BG', 'HEU', 'HEL', 'CN', 'FH', 'FL', 'MAC', 'PX', 'PU', 'PM', 'P',
'PL', 'AQ', 'AQL', 'T1UA', 'T1UB', 'T1U', 'T1M', 'T1L', 'T2', 'C1U', 'C1', 'C1L', 'CM', 'CM',
'CS', 'C2', 'GUS' 'GU', 'GC', 'GL', 'BN']
deltaD = depth[1]-depth[0]
dist_from_last_coal_seam = float('inf')
seam_prop = []
seam_list = []
for i, label in enumerate(coal_labels):
if label in coal_label_list:
if (dist_from_last_coal_seam == float('inf')) or (dist_from_last_coal_seam is not 0):
dist_from_last_coal_seam = 0
seam_prop.append(depth[i])
elif (label not in coal_labels) and (dist_from_last_coal_seam == 0):
seam_prop.append(depth[i])
seam_list.append(seam_prop)
seam_prop = []
dist_from_last_coal_seam += deltaD
print seam_list
allowable_dist = 20
group_no = 1
nSeam = len(seam_list)
group_list = [group_no]
for iSeam in range(nSeam-1):
if seam_list[iSeam+1][0] - seam_list[iSeam][1] > allowable_dist:
group_no += 1
group_list.append(group_no)
print group_list
out_list = []
for i, seam in enumerate(seam_list):
out_dict = {}
out_dict['top'] = seam[0]
out_dict['bot'] = seam[1]
out_dict['type'] = group_list[i]
out_list.append(out_dict)
import json
with open('%s_seaminfo.json'%holeID,'w') as fp:
json.dump(out_list, fp)
return seam_list
def display_acoustic(df, holeID, useful_features = ['ADEN', 'BRDU', 'CADE', 'CODE', 'DENB', 'DENL', 'GRDE', 'LSDU']):
import matplotlib.pyplot as plt
feature_list = df.columns
# print feature_list
accoustic_features = []
for feature in feature_list:
if 'ATV_AMP' in feature:
accoustic_features.append(feature)
# print accoustic_features
accoustic_scan = df[accoustic_features].values
coal_label_list = ['RU', 'R', 'R1', 'R2', 'RO', 'RL', 'MU', 'MM', 'MML', 'LN', 'TR', 'TRL', 'PS', 'PSL', 'P2', 'P2U',
'P2LA', 'P2LB', 'BA', 'G1', 'G2', 'G34', 'G3', 'G4', 'G56', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10',
'G11', 'BGA', 'BGB' 'BGC', 'BG', 'HEU', 'HEL', 'CN', 'FH', 'FL', 'MAC', 'PX', 'PU', 'PM', 'P',
'PL', 'AQ', 'AQL', 'T1UA', 'T1UB', 'T1U', 'T1M', 'T1L', 'T2', 'C1U', 'C1', 'C1L', 'CM', 'CM',
'CS', 'C2', 'GUS' 'GU', 'GC', 'GL', 'BN']
# useful_features = ['ADEN', 'BRDU', 'CADE', 'CODE', 'DENB', 'DENL', 'GRDE', 'LSDU']
nPlots = len(useful_features) + 2
iPlot = 1
for feature in useful_features:
plt.subplot(1,nPlots,iPlot)
plt.plot(df[feature].values, df['DEPTH'].values)
plt.ylim(min(df['DEPTH'].values), max(df['DEPTH'].values))
# plt.title(feature)
plt.gca().invert_yaxis()
plt.axis('off')
iPlot += 1
plt.subplot(1,nPlots,iPlot)
plt.imshow(accoustic_scan, aspect='auto')
plt.ylim(1, len(accoustic_scan))
plt.title('Acoustic scan')
plt.gca().invert_yaxis()
iPlot += 1
plt.subplot(1,nPlots,iPlot)
# plt.plot([l in coal_label_list for l in df['LABELS'].values], df['DEPTH'].values)
x1 = [l in coal_label_list for l in df['LABELS'].values]
# x2 = [2 if x == True else 0 for x in x1]
x2 = [0]*len(x1)
y1 = df['DEPTH'].values
y2 = y1
plt.plot((x1, x2), (y1, y2), 'k-')
plt.ylim(min(df['DEPTH'].values), max(df['DEPTH'].values))
plt.title('Label')
plt.gca().invert_yaxis()
iPlot += 1
# plt.imsave('%s.png'%holeID)
plt.savefig('%s.png'%holeID)
group_bands(df['DEPTH'].values, df['LABELS'].values, holeID = holeID)
plt.show()
if __name__ == '__main__':
import pandas as pd
import matplotlib.pyplot as plt
# holeID = [ 'DD1102']
#
# # extract_seams(bore_id = holeID, seam_list = hole_boundaries)
# [extract_seams(bore_id=h) for h in holeID]
holeID = 'DD1097'
holeID = 'DD1098'
holeID = 'DD1099'
# holeID = 'DD1100'
# holeID = 'DD1102'
shit = [ 'DD1101','DD1106' ]
done = [ ]
holeId = ['DD1097',
'DD1098',
'DD1099',
'DD1100',
'DD1102',
'DD1104', 'DD1105' ,'DD1107', 'DD1108','DD1103',
'DD0541',
'DD0542',
'DD0551',
'DD0980A',
'DD0989',
'DD0991',
'DD0992',
'DD1000',
'DD1005',
'DD1006',
'DD1010',
'DD1012',
'DD1013',
'DD1014']
for holeID in holeId:
df = pd.read_csv('dats/%s_cleandata.csv'%holeID)
display_acoustic(df, holeID) | bsd-2-clause | 1,808,144,811,737,757,200 | 24.804124 | 121 | 0.507493 | false |
letsmeet-click/letsmeet.click | letsmeet/events/migrations/0001_initial.py | 1 | 2446 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-28 00:55
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('communities', '0003_auto_20151227_2241'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('name', models.CharField(max_length=64, unique=True)),
('slug', models.SlugField(max_length=64, unique=True)),
('begin', models.DateTimeField(default=django.utils.timezone.now)),
('end', models.DateTimeField(default=django.utils.timezone.now)),
('community', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='communities.Community')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='EventRSVP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('coming', models.BooleanField()),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rsvps', to='events.Event')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rsvps', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='eventrsvp',
unique_together=set([('event', 'user')]),
),
]
| mit | -1,845,259,420,887,350,000 | 45.150943 | 145 | 0.616926 | false |
teonlamont/mne-python | mne/datasets/brainstorm/bst_auditory.py | 4 | 1920 | # Authors: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
_data_path_doc)
has_brainstorm_data = partial(has_dataset, name='brainstorm')
_description = u"""
URL: http://neuroimage.usc.edu/brainstorm/DatasetAuditory
- One subject, two acquisition runs of 6 minutes each
- Subject stimulated binaurally with intra-aural earphones
(air tubes+transducers)
- Each run contains:
- 200 regular beeps (440Hz)
- 40 easy deviant beeps (554.4Hz, 4 semitones higher)
- Random inter-stimulus interval: between 0.7s and 1.7s seconds, uniformly
distributed
- The subject presses a button when detecting a deviant with the right
index finger
- Auditory stimuli generated with the Matlab Psychophysics toolbox
"""
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='brainstorm',
download=download, archive_name='bst_auditory.tar.gz')
_data_path_doc = _data_path_doc.format(name='brainstorm',
conf='MNE_DATASETS_BRAINSTORM_DATA'
'_PATH')
_data_path_doc = _data_path_doc.replace('brainstorm dataset',
'brainstorm (bst_auditory) dataset')
data_path.__doc__ = _data_path_doc
def get_version(): # noqa: D103
return _get_version('brainstorm')
get_version.__doc__ = _version_doc.format(name='brainstorm')
def description():
"""Get description of brainstorm (bst_auditory) dataset."""
for desc in _description.splitlines():
print(desc)
| bsd-3-clause | -4,784,413,848,918,143,000 | 33.285714 | 78 | 0.639063 | false |
dtudares/hello-world | yardstick/tests/unit/benchmark/scenarios/networking/test_netperf_node.py | 1 | 3858 | #!/usr/bin/env python
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for
# yardstick.benchmark.scenarios.networking.netperf_node.NetperfNode
import mock
import unittest
import os
import json
from yardstick.benchmark.scenarios.networking import netperf_node
@mock.patch('yardstick.benchmark.scenarios.networking.netperf_node.ssh')
class NetperfNodeTestCase(unittest.TestCase):
def setUp(self):
self.ctx = {
'host': {
'ip': '192.168.10.10',
'user': 'root',
'password': 'root'
},
'target': {
'ip': '192.168.10.11',
'user': 'root',
'password': 'root'
}
}
def test_netperf_node_successful_setup(self, mock_ssh):
p = netperf_node.NetperfNode({}, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.setup()
self.assertIsNotNone(p.server)
self.assertIsNotNone(p.client)
self.assertEqual(p.setup_done, True)
def test_netperf_node_successful_no_sla(self, mock_ssh):
options = {}
args = {'options': options}
result = {}
p = netperf_node.NetperfNode(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.host = mock_ssh.SSH()
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
expected_result = json.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
def test_netperf_node_successful_sla(self, mock_ssh):
options = {}
args = {
'options': options,
'sla': {'mean_latency': 100}
}
result = {}
p = netperf_node.NetperfNode(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.host = mock_ssh.SSH()
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
expected_result = json.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
def test_netperf_node_unsuccessful_sla(self, mock_ssh):
options = {}
args = {
'options': options,
'sla': {'mean_latency': 5}
}
result = {}
p = netperf_node.NetperfNode(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.host = mock_ssh.SSH()
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
self.assertRaises(AssertionError, p.run, result)
def test_netperf_node_unsuccessful_script_error(self, mock_ssh):
options = {}
args = {'options': options}
result = {}
p = netperf_node.NetperfNode(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.host = mock_ssh.SSH()
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, p.run, result)
def _read_sample_output(self):
curr_path = os.path.dirname(os.path.abspath(__file__))
output = os.path.join(curr_path, 'netperf_sample_output.json')
with open(output) as f:
sample_output = f.read()
return sample_output
def main():
unittest.main()
if __name__ == '__main__':
main()
| apache-2.0 | -2,371,470,591,946,177,000 | 29.377953 | 78 | 0.554951 | false |
jamesstout/fail2ban-0.8.4-OpenSolaris | testcases/filtertestcase.py | 1 | 5930 | # This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Author: Cyril Jaquier
#
# $Revision: 728 $
__author__ = "Cyril Jaquier"
__version__ = "$Revision: 728 $"
__date__ = "$Date: 2009-02-08 18:31:24 +0100 (Sun, 08 Feb 2009) $"
__copyright__ = "Copyright (c) 2004 Cyril Jaquier"
__license__ = "GPL"
import unittest
from server.filterpoll import FilterPoll
from server.filter import FileFilter
from server.failmanager import FailManager
from server.failmanager import FailManagerEmpty
class IgnoreIP(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.__filter = FileFilter(None)
def tearDown(self):
"""Call after every test case."""
def testIgnoreIPOK(self):
ipList = "127.0.0.1", "192.168.0.1", "255.255.255.255", "99.99.99.99"
for ip in ipList:
self.__filter.addIgnoreIP(ip)
self.assertTrue(self.__filter.inIgnoreIPList(ip))
# Test DNS
self.__filter.addIgnoreIP("www.epfl.ch")
self.assertTrue(self.__filter.inIgnoreIPList("128.178.50.12"))
def testIgnoreIPNOK(self):
ipList = "", "999.999.999.999", "abcdef", "192.168.0."
for ip in ipList:
self.__filter.addIgnoreIP(ip)
self.assertFalse(self.__filter.inIgnoreIPList(ip))
# Test DNS
self.__filter.addIgnoreIP("www.epfl.ch")
self.assertFalse(self.__filter.inIgnoreIPList("127.177.50.10"))
class LogFile(unittest.TestCase):
FILENAME = "testcases/files/testcase01.log"
def setUp(self):
"""Call before every test case."""
self.__filter = FilterPoll(None)
self.__filter.addLogPath(LogFile.FILENAME)
def tearDown(self):
"""Call after every test case."""
#def testOpen(self):
# self.__filter.openLogFile(LogFile.FILENAME)
def testIsModified(self):
self.assertTrue(self.__filter.isModified(LogFile.FILENAME))
class GetFailures(unittest.TestCase):
FILENAME_01 = "testcases/files/testcase01.log"
FILENAME_02 = "testcases/files/testcase02.log"
FILENAME_03 = "testcases/files/testcase03.log"
FILENAME_04 = "testcases/files/testcase04.log"
def setUp(self):
"""Call before every test case."""
self.__filter = FileFilter(None)
self.__filter.setActive(True)
# TODO Test this
#self.__filter.setTimeRegex("\S{3}\s{1,2}\d{1,2} \d{2}:\d{2}:\d{2}")
#self.__filter.setTimePattern("%b %d %H:%M:%S")
def tearDown(self):
"""Call after every test case."""
def testGetFailures01(self):
output = ('193.168.0.128', 3, 1124013599.0)
self.__filter.addLogPath(GetFailures.FILENAME_01)
self.__filter.addFailRegex("(?:(?:Authentication failure|Failed [-/\w+]+) for(?: [iI](?:llegal|nvalid) user)?|[Ii](?:llegal|nvalid) user|ROOT LOGIN REFUSED) .*(?: from|FROM) <HOST>")
self.__filter.getFailures(GetFailures.FILENAME_01)
ticket = self.__filter.failManager.toBan()
attempts = ticket.getAttempt()
date = ticket.getTime()
ip = ticket.getIP()
found = (ip, attempts, date)
self.assertEqual(found, output)
def testGetFailures02(self):
output = ('141.3.81.106', 4, 1124013539.0)
self.__filter.addLogPath(GetFailures.FILENAME_02)
self.__filter.addFailRegex("Failed .* from <HOST>")
self.__filter.getFailures(GetFailures.FILENAME_02)
ticket = self.__filter.failManager.toBan()
attempts = ticket.getAttempt()
date = ticket.getTime()
ip = ticket.getIP()
found = (ip, attempts, date)
self.assertEqual(found, output)
def testGetFailures03(self):
output = ('203.162.223.135', 6, 1124013544.0)
self.__filter.addLogPath(GetFailures.FILENAME_03)
self.__filter.addFailRegex("error,relay=<HOST>,.*550 User unknown")
self.__filter.getFailures(GetFailures.FILENAME_03)
ticket = self.__filter.failManager.toBan()
attempts = ticket.getAttempt()
date = ticket.getTime()
ip = ticket.getIP()
found = (ip, attempts, date)
self.assertEqual(found, output)
def testGetFailures04(self):
output = [('212.41.96.186', 4, 1124013600.0),
('212.41.96.185', 4, 1124013598.0)]
self.__filter.addLogPath(GetFailures.FILENAME_04)
self.__filter.addFailRegex("Invalid user .* <HOST>")
self.__filter.getFailures(GetFailures.FILENAME_04)
try:
for i in range(2):
ticket = self.__filter.failManager.toBan()
attempts = ticket.getAttempt()
date = ticket.getTime()
ip = ticket.getIP()
found = (ip, attempts, date)
self.assertEqual(found, output[i])
except FailManagerEmpty:
pass
def testGetFailuresMultiRegex(self):
output = ('141.3.81.106', 8, 1124013541.0)
self.__filter.addLogPath(GetFailures.FILENAME_02)
self.__filter.addFailRegex("Failed .* from <HOST>")
self.__filter.addFailRegex("Accepted .* from <HOST>")
self.__filter.getFailures(GetFailures.FILENAME_02)
ticket = self.__filter.failManager.toBan()
attempts = ticket.getAttempt()
date = ticket.getTime()
ip = ticket.getIP()
found = (ip, attempts, date)
self.assertEqual(found, output)
def testGetFailuresIgnoreRegex(self):
output = ('141.3.81.106', 8, 1124013541.0)
self.__filter.addLogPath(GetFailures.FILENAME_02)
self.__filter.addFailRegex("Failed .* from <HOST>")
self.__filter.addFailRegex("Accepted .* from <HOST>")
self.__filter.addIgnoreRegex("for roehl")
self.__filter.getFailures(GetFailures.FILENAME_02)
self.assertRaises(FailManagerEmpty, self.__filter.failManager.toBan)
| gpl-2.0 | 2,968,010,339,299,266,600 | 29.101523 | 184 | 0.698314 | false |
uogbuji/Library.Link | pylib/resource.py | 1 | 2994 | '''
'''
import re
import http
import asyncio
from itertools import *
from versa.driver import memory
from versa import I, VERSA_BASEIRI, ORIGIN, RELATIONSHIP, TARGET, ATTRIBUTES
from versa.reader import rdfalite
from versa.reader.rdfalite import RDF_NS, SCHEMAORG_NS
from versa import util as versautil
#from bibframe import BFZ, BL
#from bibframe.zextra import LL
#from rdflib import URIRef, Literal
#from rdflib import BNode
from amara3 import iri
from amara3.uxml import tree
from amara3.uxml import xml
from amara3.uxml.treeutil import *
from amara3.uxml import html5
RDFTYPE = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'
SCHEMAORG = 'http://schema.org/'
LL_RESOURCE_BASE = 'http://library.link/resource/'
LL_ISBN_STEMPLATE = 'http://library.link/id/isbn/{isbn}/brief.json'
OPENLIBRARY_TITLESEARCHBASE = 'http://openlibrary.org/search.json'
async def BUSTED(title, session=None, max_retries=1):
'''
Async helper to get information from isbn.nu for a title query
Returns a JSON object
>>> from amara3.asynctools import go_async
>>> from librarylink.util import rdfa_from_page
>>> from versa import util as versautil
>>> url = "http://library.link/resource/2_8BKlrtCTI/brief.json"
>>> obj = go_async(network_resource_content(url))
'''
for isbn in [ compute_ean13_check(i) for i in c14n_isbns]:
task = network_isbn_info(isbn)
tasks.append(task)
ll_result_sets = await asyncio.gather(*tasks)
for isbn, result in ll_result_sets:
#print(isbn, result)
if result and isbn not in isbns_seen:
filtered_isbns.append(isbn)
isbns_seen.add(isbn)
if filtered_isbns:
ll_super_list.append({'label': group_label, 'isbns': filtered_isbns})
async def network_isbn_info(isbn, session=None, max_retries=1):
'''
Async helper to get JSON content from network resource page
Returns a JSON object
>>> from amara3.asynctools import go_async
>>> from librarylink.resource import network_isbn_info
>>> obj = go_async(network_isbn_info(9780871290861))
>>> obj['workExample'][0].get('holdings_count')
19
'''
retry_count = 0
url = LL_ISBN_STEMPLATE.format(**{'isbn': isbn})
#print('processing', url, file=sys.stderr)
while True:
model = memory.connection()
try:
if session == None:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
obj = await response.json()
return obj
else:
async with session.get(url) as response:
obj = await response.json()
return obj
except Exception as e:
#print(url, f'[EXCEPTION {e}], context: {context}', file=sys.stderr)
retry_count += 1
if retry_count >= max_retries:
return None
await asyncio.sleep(0.2)
| apache-2.0 | 7,825,348,551,972,483,000 | 31.193548 | 80 | 0.641283 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.