repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
emjemj/pyfiberdriver | pyfiberdriver.py | 1 | 10302 | from pysnmp.hlapi import *
import argparse
class SNMP:
hostname = None
community = None
def __init__(self, hostname, community):
self.hostname = hostname
self.community = community
def walk(self, root):
result = []
for errorIndication, errorStatus, errorIndex, varBinds in self.next_cmd(root):
if errorIndication:
print errorIndication
break
elif errorStatus:
print "{0} at {1}".format(errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex)-1][0] or "?")
break
else:
for varBind in varBinds:
result.append({"oid": varBind[0].prettyPrint(), "value": varBind[1]})
return result
def next_cmd(self, root):
return nextCmd(
SnmpEngine(),
CommunityData(self.community),
UdpTransportTarget((self.hostname, 161)),
ContextData(),
ObjectType(ObjectIdentity(root)),
lookupNames=False, lookupValues=False, lookupMib=False, lexicographicMode=False
)
class MRVFiberDriver:
snmp = None
chassis = {}
def __init__(self, hostname, community):
self.snmp = SNMP(hostname, community)
self.discover()
def discover(self):
chassis = {}
# Figure out slots
for o in self.snmp.walk("1.3.6.1.4.1.629.200.7.1.1.2"):
self.chassis[o["value"]] = {}
# Initialize chassis data.
self._init_slots()
self._init_ports()
def _init_slots(self):
# slot model
models = self.snmp.walk("1.3.6.1.4.1.629.200.7.1.1.4")
# port count
portcounts = self.snmp.walk("1.3.6.1.4.1.629.200.7.1.1.6")
# hardware revisions
hwrevs = self.snmp.walk("1.3.6.1.4.1.629.200.7.1.1.7")
# card types
cardtypes = self.snmp.walk("1.3.6.1.4.1.629.200.7.1.1.8")
# serial numbers
serials = self.snmp.walk("1.3.6.1.4.1.629.200.7.1.1.32")
for slot in self.chassis:
slot = int(slot)
self.chassis[slot]["model"] = self._slot_value(slot, models)
self.chassis[slot]["portcount"] = self._slot_value(slot, portcounts)
self.chassis[slot]["hwrev"] = self._slot_value(slot, hwrevs)
self.chassis[slot]["type"] = self._slot_value_type(slot, cardtypes)
self.chassis[slot]["serial"] = self._slot_value(slot, serials)
def _init_ports(self):
# port types
porttypes = { 28: "TP", 87: "Console", 125: "SFP" }
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.4"):
c, s, p = self._sp(i["oid"])
self.chassis[s]["ports"][p]["type"] = porttypes[i["value"]]
# link status
linkstatuses = { 1: "Other", 2: "NoSignal", 3: "SignalDetected", 4: "Link" }
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.6"):
c, s, p = self._sp(i["oid"])
self.chassis[s]["ports"][p]["link"] = linkstatuses[i["value"]]
# loopback
loopbacks = { 1: "NotSupported", 2: "Off" }
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.13"):
c, s, p = self._sp(i["oid"])
self.chassis[s]["ports"][p]["loopback"] = loopbacks[i["value"]]
# enable
enables = { 1: "NotSupported", 3: "Enabled" }
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.14"):
c, s, p = self._sp(i["oid"])
self.chassis[s]["ports"][p]["enabled"] = enables[i["value"]]
# link integrity notification
lins = { 1: "NotSupported", 3: "Enabled" }
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.16"):
c, s, p = self._sp(i["oid"])
self.chassis[s]["ports"][p]["lin"] = lins[int(i["value"])]
# port names (descriptions)
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.21"):
c, s, p = self._sp(i["oid"])
self.chassis[s]["ports"][p]["name"] = i["value"]
# optics serial
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.28"):
c, s, p = self._sp(i["oid"])
val = str(i["value"])
if(val == "N/A"):
val = None
self.chassis[s]["ports"][p]["optics"]["serial"] = val
# optics vendor info
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.29"):
c, s, p = self._sp(i["oid"])
val = str(i["value"])
if(val == "N/A"):
val = None
self.chassis[s]["ports"][p]["optics"]["vendor"] = val
# optics model
for i in self.snmp.walk(".1.3.6.1.4.1.629.200.8.1.1.42"):
c, s, p = self._sp(i["oid"])
val = str(i["value"])
if(val == "N/A"):
val = None
self.chassis[s]["ports"][p]["optics"]["model"] = val
# optics temperature
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.30"):
c, s, p = self._sp(i["oid"])
val = i["value"]
if(val < 0):
val = None
self.chassis[s]["ports"][p]["optics"]["temperature"] = val
# optics txpower
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.31"):
c, s, p = self._sp(i["oid"])
val = float(i["value"]) / 1000
self.chassis[s]["ports"][p]["optics"]["txpower"] = val
# optics rxpower
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.32"):
c, s, p = self._sp(i["oid"])
val = float(i["value"]) / 1000
self.chassis[s]["ports"][p]["optics"]["rxpower"] = val
# optics bias amps
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.33"):
c, s, p = self._sp(i["oid"])
val = float(i["value"]) / 1000
self.chassis[s]["ports"][p]["optics"]["bias"] = val
# optics voltage
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.34"):
c, s, p = self._sp(i["oid"])
val = float(i["value"]) / 1000
self.chassis[s]["ports"][p]["optics"]["voltage"] = val
# optics wavelength
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.37"):
c, s, p = self._sp(i["oid"])
self.chassis[s]["ports"][p]["optics"]["wavelength"] = i["value"]
# digital diagnostic status
doms = { 1: "NotSupported", 2: "DiagsOk" }
for i in self.snmp.walk("1.3.6.1.4.1.629.200.8.1.1.38"):
c, s, p = self._sp(i["oid"])
self.chassis[s]["ports"][p]["domstatus"] = i["value"]
def _sp(self, oid):
# Helper function to parse chassis, slot, port from oid
pcs = oid.split(".")
c = int(pcs[-3])
s = int(pcs[-2])
p = int(pcs[-1])
if(s in self.chassis and not "ports" in self.chassis[s]):
self.chassis[s]["ports"] = {}
if(s in self.chassis and not p in self.chassis[s]["ports"]):
self.chassis[s]["ports"][p] = {"optics": {}}
return c, s, p
def _slot_value(self, slot, data):
# Helper function to store data for slot.
for i in data:
pcs = i["oid"].split(".")
if(slot == int(pcs[-1])):
if str(i["value"]) == "N/A":
return None
return str(i["value"]).strip()
return None
def _slot_value_type(self, slot, data):
types = { 1: None, 2: "Management", 3: "Converter" }
for i in data:
pcs = i["oid"].split(".")
if(slot == int(pcs[-1])):
if i["value"] in types:
return types[i["value"]]
else:
return None
def get_chassis(self):
return self.chassis
def get_slot_count(self):
return len(self.chassis)
def get_slot_active_count(self):
active = 0
for slot in self.chassis:
if self.chassis[slot]["model"] is not None:
active +=1
return active
def main():
parser = argparse.ArgumentParser(description="List info from MRV Fiberdriver chassis")
parser.add_argument("--host", "-H", help="Host for your fiberdriver chassis", required=True)
parser.add_argument("--community", "-c", help="SNMP Community", required=True)
parser.add_argument("--list-slots", "-s", help="display a list of chassis slots", action="store_true")
parser.add_argument("--list-ports", "-p", help="display a list of ports", action="store_true")
parser.add_argument("--digital-diagnostics", "-d", help="display digital diagnostics information", action="store_true")
parser.add_argument("--inventory", "-i", help="display inventory", action="store_true")
opts = parser.parse_args()
fd = MRVFiberDriver(opts.host, opts.community)
if(opts.list_slots):
print "{:4} {:20} {:20} {:20}".format("Slot", "Model", "Type", "Serial")
for slot_id in fd.get_chassis():
slot = fd.get_chassis()[slot_id]
print "{:4} {:20} {:20} {:20}".format(slot_id, slot["model"], slot["type"], slot["serial"])
if(opts.inventory):
print "{:4} {:8} {:15} {:20} {:25} {:25}".format("Type", "Location", "Serial", "Vendor", "Model", "Revision")
optics = []
for slot_id in fd.get_chassis():
slot = fd.get_chassis()[slot_id]
if "ports" in slot and len(slot["ports"]) > 0:
print "{:4} 1.{:6} {:15} {:20} {:25} {:25}".format("Slot", slot_id, slot["serial"], "MRV", slot["model"], slot["hwrev"])
for port_id in slot["ports"]:
port = slot["ports"][port_id]
if port["optics"]["serial"] is None:
continue
optic = {
"location": "{}.{}".format(slot_id, port_id),
"type": port["type"],
"vendor": port["optics"]["vendor"],
"serial": port["optics"]["serial"],
"model": port["optics"]["model"],
"hwrev": "N/A"
}
optics.append(optic)
for optic in optics:
print "{:4} 1.{:6} {:15} {:20} {:25} {:25}".format(optic["type"], optic["location"], optic["serial"], optic["vendor"], optic["model"], optic["hwrev"])
if(opts.list_ports):
print "{:5} {:13} {:15} {:13} {:15} {:6} {:7} {:20}".format("Port", "Enabled", "Link", "Lin", "DOM", "WL(nm)", "Channel", "Name")
for slot_id in fd.get_chassis():
slot = fd.get_chassis()[slot_id]
if "ports" in slot and len(slot["ports"]) > 0:
for port_id in slot["ports"]:
port = slot["ports"][port_id]
print "1.{}.{} {:13} {:15} {:13} {:15} {:6} {:7} {:20}".format(
slot_id,
port_id,
port["enabled"],
port["link"],
port["lin"],
port["domstatus"],
port["optics"]["wavelength"],
"Channel",
port["name"])
if(opts.digital_diagnostics):
print "{:5} {:10} {:10} {:10} {:10} {:10} {:10}".format("Port", "DDiags", "Temp(C)", "Supply(V)", "TxPower(dBm)", "RxPower(dBm)", "Bias(mA)")
for slot_id in fd.get_chassis():
slot = fd.get_chassis()[slot_id]
if "ports" in slot and len(slot["ports"]) > 0:
for port_id in slot["ports"]:
port = slot["ports"][port_id]
optic = port["optics"]
if port["domstatus"] == 1:
# Don't list ports where dom is not available
continue
def dom_status(x):
return {
2: "Ok"
}.get(x, "N/A")
print "1.{}.{} {:10} {:10} {:10} {:10} {:10} {:10}".format(
slot_id,
port_id,
dom_status(port["domstatus"]),
optic["temperature"],
optic["voltage"],
optic["txpower"],
optic["rxpower"],
optic["bias"]
)
if __name__ == "__main__":
main()
| bsd-2-clause | 3,303,528,582,462,716,400 | 28.774566 | 154 | 0.57882 | false | 2.628061 | false | false | false |
azul-cloud/serendipity | products/migrations/0001_initial.py | 1 | 1857 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(max_length=30)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Perk',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(max_length=30)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('slug', models.SlugField(unique=True, blank=True, editable=False)),
('title', models.CharField(max_length=40)),
('description', models.CharField(max_length=1024)),
('type', models.CharField(choices=[('MIX', 'Dip Mix'), ('RUB', 'Dry Rub'), ('MAR', 'Marinade')], max_length='3')),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('contains', models.ManyToManyField(to='products.Ingredient', null=True, blank=True)),
('perks', models.ManyToManyField(to='products.Perk', null=True, blank=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
| mit | -9,119,335,546,226,879,000 | 36.14 | 130 | 0.517501 | false | 4.400474 | false | false | false |
edwardsmith999/pyDataView | postproclib/cplpostproc.py | 1 | 4562 | import os
from .cplfields import *
from .postproc import PostProc
from .pplexceptions import NoResultsInDir, DataNotAvailable
from .mdpostproc import MD_PostProc
from .cfdpostproc import CFD_PostProc
from .serial_cfdpostproc import Serial_CFD_PostProc
from .openfoampostproc import OpenFOAM_PostProc
# Results directory paths for each code
resultsdirs = {
'flowmol': 'flowmol/results',
'lammps': 'lammps/',
'serialcouette': 'couette_serial/results/',
'openfoam': 'openfoam/',
'transflow': 'couette_data/'
}
# Field classes that are associated with velocity for each code
vfieldtypes = {
'flowmol':
mdfields.MD_vField,
'lammps':
lammpsfields.LAMMPS_vField,
'serialcouette':
serial_cfdfields.Serial_CFD_vField,
'openfoam':
openfoamfields.OpenFOAM_vField,
'transflow':
cfdfields.CFD_vField
}
# Field classes that are associated with momentum for each code
momfieldtypes = {
'flowmol':
mdfields.MD_momField,
'serialcouette':
serial_cfdfields.Serial_CFD_momField,
'lammps':
lammpsfields.LAMMPS_momField,
'openfoam':
openfoamfields.OpenFOAM_momField,
'transflow':
None
}
# Field classes that are associated with stress for each code
stressfieldtypes = {
'flowmol':
mdfields.MD_stressField,
'lammps':
None,
'serialcouette':
serial_cfdfields.Serial_CFD_StressField,
'openfoam':
openfoamfields.OpenFOAM_mugradvField,
'transflow':
cfdfields.CFD_mugradvField
}
# CPL Field classes that could potentially be constructed
possible_fields = {
'CPL Velocity': CPL_vField,
'CPL Momentum': CPL_momField,
'CPL Stress': CPL_stressField
}
# And their associated field class dictionary
type_dicts = {
'CPL Velocity': vfieldtypes,
'CPL Momentum': momfieldtypes,
'CPL Stress': stressfieldtypes
}
# All possible pairings (Surely this should be done with itertools permute?)
possible_pairs = [
{'MD':'flowmol', 'CFD':'serialcouette'},
{'MD':'flowmol', 'CFD':'openfoam'},
{'MD':'flowmol', 'CFD':'transflow'},
{'MD':'lammps', 'CFD':'openfoam'}
]
class CPL_PostProc(PostProc):
"""
Post processing class for Coupled runs
"""
def __init__(self,resultsdir,**kwargs):
self.resultsdir = resultsdir
# Check directory exists before instantiating object and check
# which files associated with plots are in directory
if (not os.path.isdir(self.resultsdir)):
print(("Directory " + self.resultsdir + " not found"))
raise IOError
self.plotlist = {}
try:
fobj = open(self.resultsdir + 'cpl/coupler_header','r')
except IOError:
raise NoResultsInDir
for pair in possible_pairs:
MDkey = pair['MD']
CFDkey = pair['CFD']
for CPLkey, CPLfieldtype in list(possible_fields.items()):
print(('Attempting to construct ' + str(CPLfieldtype)
+ ' for ' + MDkey + ' and ' + CFDkey))
try:
self.plotlist[CPLkey] = CPLfieldtype(self.resultsdir,
MDFieldType=type_dicts[CPLkey][MDkey],
CFDFieldType=type_dicts[CPLkey][CFDkey],
mddir=resultsdirs[MDkey],
cfddir=resultsdirs[CFDkey])
except AssertionError:
pass
except DataNotAvailable:
pass
except IOError:
pass
except TypeError:
pass
| gpl-3.0 | -720,757,905,833,127,600 | 34.092308 | 97 | 0.488601 | false | 4.521308 | false | false | false |
cloudbase/cinder | cinder/consistencygroup/api.py | 4 | 34498 | # Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to consistency groups.
"""
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from cinder import db
from cinder.db import base
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder import objects
from cinder.objects import fields as c_fields
import cinder.policy
from cinder import quota
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
CGQUOTAS = quota.CGQUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
def wrap_check_policy(func):
"""Check policy corresponding to the wrapped methods prior to execution.
This decorator requires the first 3 args of the wrapped function
to be (self, context, consistencygroup)
"""
@functools.wraps(func)
def wrapped(self, context, target_obj, *args, **kwargs):
check_policy(context, func.__name__, target_obj)
return func(self, context, target_obj, *args, **kwargs)
return wrapped
def check_policy(context, action, target_obj=None):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
target_obj = target_obj.fields if target_obj else {}
target.update(target_obj)
_action = 'consistencygroup:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume manager for consistency groups."""
def __init__(self, db_driver=None):
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.volume_rpcapi = volume_rpcapi.VolumeAPI()
self.availability_zone_names = ()
self.volume_api = volume_api.API()
super(API, self).__init__(db_driver)
def _valid_availability_zone(self, availability_zone):
if availability_zone in self.availability_zone_names:
return True
if CONF.storage_availability_zone == availability_zone:
return True
azs = self.volume_api.list_availability_zones()
self.availability_zone_names = [az['name'] for az in azs]
return availability_zone in self.availability_zone_names
def _extract_availability_zone(self, availability_zone):
if availability_zone is None:
if CONF.default_availability_zone:
availability_zone = CONF.default_availability_zone
else:
# For backwards compatibility use the storage_availability_zone
availability_zone = CONF.storage_availability_zone
valid = self._valid_availability_zone(availability_zone)
if not valid:
msg = _LW(
"Availability zone '%s' is invalid") % (availability_zone)
LOG.warning(msg)
raise exception.InvalidInput(reason=msg)
return availability_zone
def create(self, context, name, description,
cg_volume_types, availability_zone=None):
check_policy(context, 'create')
volume_type_list = None
volume_type_list = cg_volume_types.split(',')
req_volume_types = []
# NOTE: Admin context is required to get extra_specs of volume_types.
req_volume_types = (self.db.volume_types_get_by_name_or_id(
context.elevated(), volume_type_list))
req_volume_type_ids = ""
for voltype in req_volume_types:
req_volume_type_ids = (
req_volume_type_ids + voltype.get('id') + ",")
if len(req_volume_type_ids) == 0:
req_volume_type_ids = None
availability_zone = self._extract_availability_zone(availability_zone)
kwargs = {'user_id': context.user_id,
'project_id': context.project_id,
'availability_zone': availability_zone,
'status': c_fields.ConsistencyGroupStatus.CREATING,
'name': name,
'description': description,
'volume_type_id': req_volume_type_ids}
group = None
try:
group = objects.ConsistencyGroup(context=context, **kwargs)
group.create()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating consistency group"
" %s."), name)
request_spec_list = []
filter_properties_list = []
for req_volume_type in req_volume_types:
request_spec = {'volume_type': req_volume_type.copy(),
'consistencygroup_id': group.id}
filter_properties = {}
request_spec_list.append(request_spec)
filter_properties_list.append(filter_properties)
# Update quota for consistencygroups
self.update_quota(context, group, 1)
self._cast_create_consistencygroup(context, group,
request_spec_list,
filter_properties_list)
return group
def create_from_src(self, context, name, description=None,
cgsnapshot_id=None, source_cgid=None):
check_policy(context, 'create')
kwargs = {
'user_id': context.user_id,
'project_id': context.project_id,
'status': c_fields.ConsistencyGroupStatus.CREATING,
'name': name,
'description': description,
'cgsnapshot_id': cgsnapshot_id,
'source_cgid': source_cgid,
}
group = None
try:
group = objects.ConsistencyGroup(context=context, **kwargs)
group.create(cg_snap_id=cgsnapshot_id, cg_id=source_cgid)
except exception.ConsistencyGroupNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Source CG %(source_cg)s not found when "
"creating consistency group %(cg)s from "
"source."),
{'cg': name, 'source_cg': source_cgid})
except exception.CgSnapshotNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("CG snapshot %(cgsnap)s not found when creating "
"consistency group %(cg)s from source."),
{'cg': name, 'cgsnap': cgsnapshot_id})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating consistency group"
" %(cg)s from cgsnapshot %(cgsnap)s."),
{'cg': name, 'cgsnap': cgsnapshot_id})
# Update quota for consistencygroups
self.update_quota(context, group, 1)
if not group.host:
msg = _("No host to create consistency group %s.") % group.id
LOG.error(msg)
raise exception.InvalidConsistencyGroup(reason=msg)
if cgsnapshot_id:
self._create_cg_from_cgsnapshot(context, group, cgsnapshot_id)
elif source_cgid:
self._create_cg_from_source_cg(context, group, source_cgid)
return group
def _create_cg_from_cgsnapshot(self, context, group, cgsnapshot_id):
try:
cgsnapshot = objects.CGSnapshot.get_by_id(context, cgsnapshot_id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if not snapshots:
msg = _("Cgsnahost is empty. No consistency group "
"will be created.")
raise exception.InvalidConsistencyGroup(reason=msg)
for snapshot in snapshots:
kwargs = {}
kwargs['availability_zone'] = group.availability_zone
kwargs['cgsnapshot'] = cgsnapshot
kwargs['consistencygroup'] = group
kwargs['snapshot'] = snapshot
volume_type_id = snapshot.volume_type_id
if volume_type_id:
kwargs['volume_type'] = volume_types.get_volume_type(
context, volume_type_id)
# Since cgsnapshot is passed in, the following call will
# create a db entry for the volume, but will not call the
# volume manager to create a real volume in the backend yet.
# If error happens, taskflow will handle rollback of quota
# and removal of volume entry in the db.
try:
self.volume_api.create(context,
snapshot.volume_size,
None,
None,
**kwargs)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating volume "
"entry from snapshot in the process of "
"creating consistency group %(group)s "
"from cgsnapshot %(cgsnap)s."),
{'group': group.id,
'cgsnap': cgsnapshot.id})
except Exception:
with excutils.save_and_reraise_exception():
try:
group.destroy()
finally:
LOG.error(_LE("Error occurred when creating consistency "
"group %(group)s from cgsnapshot "
"%(cgsnap)s."),
{'group': group.id,
'cgsnap': cgsnapshot.id})
volumes = self.db.volume_get_all_by_group(context,
group.id)
for vol in volumes:
# Update the host field for the volume.
self.db.volume_update(context, vol['id'],
{'host': group.get('host')})
self.volume_rpcapi.create_consistencygroup_from_src(
context, group, cgsnapshot)
def _create_cg_from_source_cg(self, context, group, source_cgid):
try:
source_cg = objects.ConsistencyGroup.get_by_id(context,
source_cgid)
source_vols = self.db.volume_get_all_by_group(context,
source_cg.id)
if not source_vols:
msg = _("Source CG is empty. No consistency group "
"will be created.")
raise exception.InvalidConsistencyGroup(reason=msg)
for source_vol in source_vols:
kwargs = {}
kwargs['availability_zone'] = group.availability_zone
kwargs['source_cg'] = source_cg
kwargs['consistencygroup'] = group
kwargs['source_volume'] = source_vol
volume_type_id = source_vol.get('volume_type_id')
if volume_type_id:
kwargs['volume_type'] = volume_types.get_volume_type(
context, volume_type_id)
# Since source_cg is passed in, the following call will
# create a db entry for the volume, but will not call the
# volume manager to create a real volume in the backend yet.
# If error happens, taskflow will handle rollback of quota
# and removal of volume entry in the db.
try:
self.volume_api.create(context,
source_vol['size'],
None,
None,
**kwargs)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when creating cloned "
"volume in the process of creating "
"consistency group %(group)s from "
"source CG %(source_cg)s."),
{'group': group.id,
'source_cg': source_cg.id})
except Exception:
with excutils.save_and_reraise_exception():
try:
group.destroy()
finally:
LOG.error(_LE("Error occurred when creating consistency "
"group %(group)s from source CG "
"%(source_cg)s."),
{'group': group.id,
'source_cg': source_cg.id})
volumes = self.db.volume_get_all_by_group(context,
group.id)
for vol in volumes:
# Update the host field for the volume.
self.db.volume_update(context, vol['id'],
{'host': group.host})
self.volume_rpcapi.create_consistencygroup_from_src(context, group,
None, source_cg)
def _cast_create_consistencygroup(self, context, group,
request_spec_list,
filter_properties_list):
try:
for request_spec in request_spec_list:
volume_type = request_spec.get('volume_type', None)
volume_type_id = None
if volume_type:
volume_type_id = volume_type.get('id', None)
specs = {}
if volume_type_id:
qos_specs = volume_types.get_volume_type_qos_specs(
volume_type_id)
specs = qos_specs['qos_specs']
if not specs:
# to make sure we don't pass empty dict
specs = None
volume_properties = {
'size': 0, # Need to populate size for the scheduler
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
'attach_status': 'detached',
'encryption_key_id': request_spec.get('encryption_key_id',
None),
'display_description': request_spec.get('description',
None),
'display_name': request_spec.get('name', None),
'volume_type_id': volume_type_id,
}
request_spec['volume_properties'] = volume_properties
request_spec['qos_specs'] = specs
except Exception:
with excutils.save_and_reraise_exception():
try:
group.destroy()
finally:
LOG.error(_LE("Error occurred when building "
"request spec list for consistency group "
"%s."), group.id)
# Cast to the scheduler and let it handle whatever is needed
# to select the target host for this group.
self.scheduler_rpcapi.create_consistencygroup(
context,
group,
request_spec_list=request_spec_list,
filter_properties_list=filter_properties_list)
def update_quota(self, context, group, num, project_id=None):
reserve_opts = {'consistencygroups': num}
try:
reservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
if reservations:
CGQUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
try:
group.destroy()
finally:
LOG.error(_LE("Failed to update quota for "
"consistency group %s."), group.id)
@wrap_check_policy
def delete(self, context, group, force=False):
if not group.host:
self.update_quota(context, group, -1, group.project_id)
LOG.debug("No host for consistency group %s. Deleting from "
"the database.", group.id)
group.destroy()
return
if force:
expected = {}
else:
expected = {'status': (c_fields.ConsistencyGroupStatus.AVAILABLE,
c_fields.ConsistencyGroupStatus.ERROR)}
filters = [~db.cg_has_cgsnapshot_filter(),
~db.cg_has_volumes_filter(attached_or_with_snapshots=force),
~db.cg_creating_from_src(cg_id=group.id)]
values = {'status': c_fields.ConsistencyGroupStatus.DELETING}
if not group.conditional_update(values, expected, filters):
if force:
reason = _('Consistency group must not have attached volumes, '
'volumes with snapshots, or dependent cgsnapshots')
else:
reason = _('Consistency group status must be available or '
'error and must not have volumes or dependent '
'cgsnapshots')
msg = (_('Cannot delete consistency group %(id)s. %(reason)s, and '
'it cannot be the source for an ongoing CG or CG '
'Snapshot creation.')
% {'id': group.id, 'reason': reason})
raise exception.InvalidConsistencyGroup(reason=msg)
self.volume_rpcapi.delete_consistencygroup(context, group)
def _check_update(self, group, name, description, add_volumes,
remove_volumes, allow_empty=False):
if allow_empty:
if (name is None and description is None
and not add_volumes and not remove_volumes):
msg = (_("Cannot update consistency group %(group_id)s "
"because no valid name, description, add_volumes, "
"or remove_volumes were provided.") %
{'group_id': group.id})
raise exception.InvalidConsistencyGroup(reason=msg)
else:
if not (name or description or add_volumes or remove_volumes):
msg = (_("Cannot update consistency group %(group_id)s "
"because no valid name, description, add_volumes, "
"or remove_volumes were provided.") %
{'group_id': group.id})
raise exception.InvalidConsistencyGroup(reason=msg)
def update(self, context, group, name, description,
add_volumes, remove_volumes, allow_empty=False):
"""Update consistency group."""
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes = add_volumes.strip(',')
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes = remove_volumes.strip(',')
remove_volumes_list = remove_volumes.split(',')
invalid_uuids = []
for uuid in add_volumes_list:
if uuid in remove_volumes_list:
invalid_uuids.append(uuid)
if invalid_uuids:
msg = _("UUIDs %s are in both add and remove volume "
"list.") % invalid_uuids
raise exception.InvalidVolume(reason=msg)
# Validate name.
if name == group.name:
name = None
# Validate description.
if description == group.description:
description = None
self._check_update(group, name, description, add_volumes,
remove_volumes, allow_empty)
fields = {'updated_at': timeutils.utcnow()}
# Update name and description in db now. No need to
# to send them over through an RPC call.
if allow_empty:
if name is not None:
fields['name'] = name
if description is not None:
fields['description'] = description
else:
if name:
fields['name'] = name
if description:
fields['description'] = description
# NOTE(geguileo): We will use the updating status in the CG as a lock
# mechanism to prevent volume add/remove races with other API, while we
# figure out if we really need to add or remove volumes.
if add_volumes or remove_volumes:
fields['status'] = c_fields.ConsistencyGroupStatus.UPDATING
# We cannot modify the members of this CG if the CG is being used
# to create another CG or a CGsnapshot is being created
filters = [~db.cg_creating_from_src(cg_id=group.id),
~db.cgsnapshot_creating_from_src()]
else:
filters = []
expected = {'status': c_fields.ConsistencyGroupStatus.AVAILABLE}
if not group.conditional_update(fields, expected, filters):
msg = _("Cannot update consistency group %s, status must be "
"available, and it cannot be the source for an ongoing "
"CG or CG Snapshot creation.") % group.id
raise exception.InvalidConsistencyGroup(reason=msg)
# Now the CG is "locked" for updating
try:
# Validate volumes in add_volumes and remove_volumes.
add_volumes_new = self._validate_add_volumes(
context, group.volumes, add_volumes_list, group)
remove_volumes_new = self._validate_remove_volumes(
group.volumes, remove_volumes_list, group)
self._check_update(group, name, description, add_volumes_new,
remove_volumes_new, allow_empty)
except Exception:
# If we have an error on the volume_lists we must return status to
# available as we were doing before removing API races
with excutils.save_and_reraise_exception():
group.status = c_fields.ConsistencyGroupStatus.AVAILABLE
group.save()
# Do an RPC call only if the update request includes
# adding/removing volumes. add_volumes_new and remove_volumes_new
# are strings of volume UUIDs separated by commas with no spaces
# in between.
if add_volumes_new or remove_volumes_new:
self.volume_rpcapi.update_consistencygroup(
context, group,
add_volumes=add_volumes_new,
remove_volumes=remove_volumes_new)
# If there are no new volumes to add or remove and we had changed
# the status to updating, turn it back to available
elif group.status == c_fields.ConsistencyGroupStatus.UPDATING:
group.status = c_fields.ConsistencyGroupStatus.AVAILABLE
group.save()
def _validate_remove_volumes(self, volumes, remove_volumes_list, group):
# Validate volumes in remove_volumes.
if not remove_volumes_list:
return None
remove_volumes_new = ""
for volume in volumes:
if volume['id'] in remove_volumes_list:
if volume['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from "
"consistency group %(group_id)s because volume "
"is in an invalid state: %(status)s. Valid "
"states are: %(valid)s.") %
{'volume_id': volume['id'],
'group_id': group.id,
'status': volume['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# Volume currently in CG. It will be removed from CG.
if remove_volumes_new:
remove_volumes_new += ","
remove_volumes_new += volume['id']
for rem_vol in remove_volumes_list:
if rem_vol not in remove_volumes_new:
msg = (_("Cannot remove volume %(volume_id)s from "
"consistency group %(group_id)s because it "
"is not in the group.") %
{'volume_id': rem_vol,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
return remove_volumes_new
def _validate_add_volumes(self, context, volumes, add_volumes_list, group):
if not add_volumes_list:
return None
add_volumes_new = ""
for volume in volumes:
if volume['id'] in add_volumes_list:
# Volume already in CG. Remove from add_volumes.
add_volumes_list.remove(volume['id'])
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume cannot be "
"found.") %
{'volume_id': add_vol,
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
orig_group = add_vol_ref.get('consistencygroup_id', None)
if orig_group:
# If volume to be added is already in the group to be updated,
# it should have been removed from the add_volumes_list in the
# beginning of this function. If we are here, it means it is
# in a different group.
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because it is already in "
"consistency group %(orig_group)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'orig_group': orig_group})
raise exception.InvalidVolume(reason=msg)
if add_vol_ref:
add_vol_type_id = add_vol_ref.get('volume_type_id', None)
if not add_vol_type_id:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because it has no volume "
"type.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
if add_vol_type_id not in group.volume_type_id:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume type "
"%(volume_type)s is not supported by the "
"group.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'volume_type': add_vol_type_id})
raise exception.InvalidVolume(reason=msg)
if (add_vol_ref['status'] not in
VALID_ADD_VOL_TO_CG_STATUS):
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an "
"invalid state: %(status)s. Valid states are: "
"%(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'status': add_vol_ref['status'],
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# group.host and add_vol_ref['host'] are in this format:
# 'host@backend#pool'. Extract host (host@backend) before
# doing comparison.
vol_host = vol_utils.extract_host(add_vol_ref['host'])
group_host = vol_utils.extract_host(group.host)
if group_host != vol_host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
# Volume exists. It will be added to CG.
if add_volumes_new:
add_volumes_new += ","
add_volumes_new += add_vol_ref['id']
else:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume does not exist.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id})
raise exception.InvalidVolume(reason=msg)
return add_volumes_new
def get(self, context, group_id):
group = objects.ConsistencyGroup.get_by_id(context, group_id)
check_policy(context, 'get', group)
return group
def get_all(self, context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):
check_policy(context, 'get_all')
if filters is None:
filters = {}
if filters:
LOG.debug("Searching by: %s", filters)
if (context.is_admin and 'all_tenants' in filters):
del filters['all_tenants']
groups = objects.ConsistencyGroupList.get_all(
context, filters=filters, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
groups = objects.ConsistencyGroupList.get_all_by_project(
context, context.project_id, filters=filters, marker=marker,
limit=limit, offset=offset, sort_keys=sort_keys,
sort_dirs=sort_dirs)
return groups
def create_cgsnapshot(self, context, group, name, description):
options = {'consistencygroup_id': group.id,
'user_id': context.user_id,
'project_id': context.project_id,
'status': "creating",
'name': name,
'description': description}
cgsnapshot = None
cgsnapshot_id = None
try:
cgsnapshot = objects.CGSnapshot(context, **options)
cgsnapshot.create()
cgsnapshot_id = cgsnapshot.id
snap_name = cgsnapshot.name
snap_desc = cgsnapshot.description
with group.obj_as_admin():
self.volume_api.create_snapshots_in_db(
context, group.volumes, snap_name, snap_desc,
cgsnapshot_id)
except Exception:
with excutils.save_and_reraise_exception():
try:
# If the cgsnapshot has been created
if cgsnapshot.obj_attr_is_set('id'):
cgsnapshot.destroy()
finally:
LOG.error(_LE("Error occurred when creating cgsnapshot"
" %s."), cgsnapshot_id)
self.volume_rpcapi.create_cgsnapshot(context, cgsnapshot)
return cgsnapshot
def delete_cgsnapshot(self, context, cgsnapshot, force=False):
values = {'status': 'deleting'}
expected = {'status': ('available', 'error')}
filters = [~db.cg_creating_from_src(cgsnapshot_id=cgsnapshot.id)]
res = cgsnapshot.conditional_update(values, expected, filters)
if not res:
msg = _('CgSnapshot status must be available or error, and no CG '
'can be currently using it as source for its creation.')
raise exception.InvalidCgSnapshot(reason=msg)
self.volume_rpcapi.delete_cgsnapshot(context.elevated(), cgsnapshot)
def update_cgsnapshot(self, context, cgsnapshot, fields):
cgsnapshot.update(fields)
cgsnapshot.save()
def get_cgsnapshot(self, context, cgsnapshot_id):
check_policy(context, 'get_cgsnapshot')
cgsnapshots = objects.CGSnapshot.get_by_id(context, cgsnapshot_id)
return cgsnapshots
def get_all_cgsnapshots(self, context, search_opts=None):
check_policy(context, 'get_all_cgsnapshots')
search_opts = search_opts or {}
if context.is_admin and 'all_tenants' in search_opts:
# Need to remove all_tenants to pass the filtering below.
del search_opts['all_tenants']
cgsnapshots = objects.CGSnapshotList.get_all(context, search_opts)
else:
cgsnapshots = objects.CGSnapshotList.get_all_by_project(
context.elevated(), context.project_id, search_opts)
return cgsnapshots
| apache-2.0 | -1,308,947,372,528,759,300 | 43.058748 | 79 | 0.530524 | false | 4.627498 | false | false | false |
eugeneav/escout | service/back-end/escout/applications/dashboard/views.py | 1 | 1136 | from django.http import JsonResponse
from rest_framework import viewsets
from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
from escout.applications.dashboard.models import Application
from escout.applications.dashboard.serializers import ApplicationSerializer
# Create your views here.
class ApplicationsViewSet(viewsets.ModelViewSet):
queryset = Application.objects.all()
serializer_class = ApplicationSerializer
permission_classes = (IsAuthenticated,)
# TODO
def logout(request):
auth_token = request.META['HTTP_AUTHORIZATION']
token_parts = auth_token.split(" ")
token = token_parts[1]
token_record = Token.objects.get(key=token)
token_record.delete()
return JsonResponse({
'status': 'OK',
'data': {
}
})
# @login_required
def get_applications(request):
applicationRecords = Application.objects.all()
applications = []
return JsonResponse({
'status': 'OK',
'data': {
'applications': applications,
'offset': 0,
'limit': 0
}
})
| gpl-3.0 | -1,437,667,483,683,200,800 | 23.170213 | 75 | 0.679577 | false | 4.369231 | false | false | false |
phihag/adhocracy | src/adhocracy/lib/cache/invalidate.py | 4 | 2235 | import logging
from adhocracy import model
from adhocracy.lib.cache.util import clear_tag
log = logging.getLogger(__name__)
def invalidate_badge(badge):
log.debug('invalidate_badge %s' % badge)
clear_tag(badge)
def invalidate_userbadges(userbadges):
clear_tag(userbadges)
invalidate_user(userbadges.user)
def invalidate_delegateablebadges(delegateablebadges):
clear_tag(delegateablebadges)
invalidate_delegateable(delegateablebadges.delegateable)
def invalidate_user(user):
clear_tag(user)
def invalidate_text(text):
clear_tag(text)
invalidate_page(text.page)
def invalidate_page(page):
invalidate_delegateable(page)
def invalidate_delegateable(d, include_parents=True):
clear_tag(d)
if include_parents:
for p in d.parents:
invalidate_delegateable(p)
if not len(d.parents):
clear_tag(d.instance)
def invalidate_revision(rev):
invalidate_comment(rev.comment)
def invalidate_comment(comment):
clear_tag(comment)
if comment.reply:
invalidate_comment(comment.reply)
invalidate_delegateable(comment.topic)
def invalidate_delegation(delegation):
invalidate_user(delegation.principal)
invalidate_user(delegation.agent)
def invalidate_vote(vote):
clear_tag(vote)
invalidate_user(vote.user)
invalidate_poll(vote.poll)
def invalidate_selection(selection):
if selection is None:
return
clear_tag(selection)
if selection.page:
invalidate_delegateable(selection.page)
if selection.proposal:
invalidate_delegateable(selection.proposal)
def invalidate_poll(poll):
clear_tag(poll)
if poll.action == poll.SELECT:
invalidate_selection(poll.selection)
elif isinstance(poll.subject, model.Delegateable):
invalidate_delegateable(poll.subject)
elif isinstance(poll.subject, model.Comment):
invalidate_comment(poll.subject)
def invalidate_instance(instance):
# muharhar cache epic fail
clear_tag(instance)
for d in instance.delegateables:
invalidate_delegateable(d, include_parents=False)
def invalidate_tagging(tagging):
clear_tag(tagging)
invalidate_delegateable(tagging.delegateable)
| agpl-3.0 | -1,121,266,491,764,022,100 | 22.28125 | 60 | 0.720358 | false | 3.737458 | false | false | false |
sdgdsffdsfff/jumpserver | apps/orgs/api.py | 1 | 2390 | # -*- coding: utf-8 -*-
#
from rest_framework import status
from rest_framework.views import Response
from rest_framework_bulk import BulkModelViewSet
from common.permissions import IsSuperUserOrAppUser
from .models import Organization
from .serializers import OrgSerializer, OrgReadSerializer, \
OrgMembershipUserSerializer, OrgMembershipAdminSerializer
from users.models import User, UserGroup
from assets.models import Asset, Domain, AdminUser, SystemUser, Label
from perms.models import AssetPermission
from orgs.utils import current_org
from common.utils import get_logger
from .mixins.api import OrgMembershipModelViewSetMixin
logger = get_logger(__file__)
class OrgViewSet(BulkModelViewSet):
queryset = Organization.objects.all()
serializer_class = OrgSerializer
permission_classes = (IsSuperUserOrAppUser,)
org = None
def get_serializer_class(self):
if self.action in ('list', 'retrieve'):
return OrgReadSerializer
else:
return super().get_serializer_class()
def get_data_from_model(self, model):
if model == User:
data = model.objects.filter(related_user_orgs__id=self.org.id)
else:
data = model.objects.filter(org_id=self.org.id)
return data
def destroy(self, request, *args, **kwargs):
self.org = self.get_object()
models = [
User, UserGroup,
Asset, Domain, AdminUser, SystemUser, Label,
AssetPermission,
]
for model in models:
data = self.get_data_from_model(model)
if data:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
if str(current_org) == str(self.org):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
self.org.delete()
return Response({'msg': True}, status=status.HTTP_200_OK)
class OrgMembershipAdminsViewSet(OrgMembershipModelViewSetMixin, BulkModelViewSet):
serializer_class = OrgMembershipAdminSerializer
membership_class = Organization.admins.through
permission_classes = (IsSuperUserOrAppUser, )
class OrgMembershipUsersViewSet(OrgMembershipModelViewSetMixin, BulkModelViewSet):
serializer_class = OrgMembershipUserSerializer
membership_class = Organization.users.through
permission_classes = (IsSuperUserOrAppUser, )
| gpl-2.0 | -5,938,913,917,464,487,000 | 33.637681 | 83 | 0.697908 | false | 4.192982 | false | false | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.6.0/Lib/test/test_fstring.py | 1 | 27910 | import ast
import types
import decimal
import unittest
a_global = 'global variable'
# You could argue that I'm too strict in looking for specific error
# values with assertRaisesRegex, but without it it's way too easy to
# make a syntax error in the test strings. Especially with all of the
# triple quotes, raw strings, backslashes, etc. I think it's a
# worthwhile tradeoff. When I switched to this method, I found many
# examples where I wasn't testing what I thought I was.
class TestCase(unittest.TestCase):
def assertAllRaise(self, exception_type, regex, error_strings):
for str in error_strings:
with self.subTest(str=str):
with self.assertRaisesRegex(exception_type, regex):
eval(str)
def test__format__lookup(self):
# Make sure __format__ is looked up on the type, not the instance.
class X:
def __format__(self, spec):
return 'class'
x = X()
# Add a bound __format__ method to the 'y' instance, but not
# the 'x' instance.
y = X()
y.__format__ = types.MethodType(lambda self, spec: 'instance', y)
self.assertEqual(f'{y}', format(y))
self.assertEqual(f'{y}', 'class')
self.assertEqual(format(x), format(y))
# __format__ is not called this way, but still make sure it
# returns what we expect (so we can make sure we're bypassing
# it).
self.assertEqual(x.__format__(''), 'class')
self.assertEqual(y.__format__(''), 'instance')
# This is how __format__ is actually called.
self.assertEqual(type(x).__format__(x, ''), 'class')
self.assertEqual(type(y).__format__(y, ''), 'class')
def test_ast(self):
# Inspired by http://bugs.python.org/issue24975
class X:
def __init__(self):
self.called = False
def __call__(self):
self.called = True
return 4
x = X()
expr = """
a = 10
f'{a * x()}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
# Make sure x was not called.
self.assertFalse(x.called)
# Actually run the code.
exec(c)
# Make sure x was called.
self.assertTrue(x.called)
def test_literal_eval(self):
# With no expressions, an f-string is okay.
self.assertEqual(ast.literal_eval("f'x'"), 'x')
self.assertEqual(ast.literal_eval("f'x' 'y'"), 'xy')
# But this should raise an error.
with self.assertRaisesRegex(ValueError, 'malformed node or string'):
ast.literal_eval("f'x{3}'")
# As should this, which uses a different ast node
with self.assertRaisesRegex(ValueError, 'malformed node or string'):
ast.literal_eval("f'{3}'")
def test_ast_compile_time_concat(self):
x = ['']
expr = """x[0] = 'foo' f'{3}'"""
t = ast.parse(expr)
c = compile(t, '', 'exec')
exec(c)
self.assertEqual(x[0], 'foo3')
def test_literal(self):
self.assertEqual(f'', '')
self.assertEqual(f'a', 'a')
self.assertEqual(f' ', ' ')
def test_unterminated_string(self):
self.assertAllRaise(SyntaxError, 'f-string: unterminated string',
[r"""f'{"x'""",
r"""f'{"x}'""",
r"""f'{("x'""",
r"""f'{("x}'""",
])
def test_mismatched_parens(self):
self.assertAllRaise(SyntaxError, 'f-string: mismatched',
["f'{((}'",
])
def test_double_braces(self):
self.assertEqual(f'{{', '{')
self.assertEqual(f'a{{', 'a{')
self.assertEqual(f'{{b', '{b')
self.assertEqual(f'a{{b', 'a{b')
self.assertEqual(f'}}', '}')
self.assertEqual(f'a}}', 'a}')
self.assertEqual(f'}}b', '}b')
self.assertEqual(f'a}}b', 'a}b')
self.assertEqual(f'{{}}', '{}')
self.assertEqual(f'a{{}}', 'a{}')
self.assertEqual(f'{{b}}', '{b}')
self.assertEqual(f'{{}}c', '{}c')
self.assertEqual(f'a{{b}}', 'a{b}')
self.assertEqual(f'a{{}}c', 'a{}c')
self.assertEqual(f'{{b}}c', '{b}c')
self.assertEqual(f'a{{b}}c', 'a{b}c')
self.assertEqual(f'{{{10}', '{10')
self.assertEqual(f'}}{10}', '}10')
self.assertEqual(f'}}{{{10}', '}{10')
self.assertEqual(f'}}a{{{10}', '}a{10')
self.assertEqual(f'{10}{{', '10{')
self.assertEqual(f'{10}}}', '10}')
self.assertEqual(f'{10}}}{{', '10}{')
self.assertEqual(f'{10}}}a{{' '}', '10}a{}')
# Inside of strings, don't interpret doubled brackets.
self.assertEqual(f'{"{{}}"}', '{{}}')
self.assertAllRaise(TypeError, 'unhashable type',
["f'{ {{}} }'", # dict in a set
])
def test_compile_time_concat(self):
x = 'def'
self.assertEqual('abc' f'## {x}ghi', 'abc## defghi')
self.assertEqual('abc' f'{x}' 'ghi', 'abcdefghi')
self.assertEqual('abc' f'{x}' 'gh' f'i{x:4}', 'abcdefghidef ')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{x' f'{x}', '{xdef')
self.assertEqual('{x}' f'{x}', '{x}def')
self.assertEqual('{{x}}' f'{x}', '{{x}}def')
self.assertEqual('{{x' f'{x}', '{{xdef')
self.assertEqual('x}}' f'{x}', 'x}}def')
self.assertEqual(f'{x}' 'x}}', 'defx}}')
self.assertEqual(f'{x}' '', 'def')
self.assertEqual('' f'{x}' '', 'def')
self.assertEqual('' f'{x}', 'def')
self.assertEqual(f'{x}' '2', 'def2')
self.assertEqual('1' f'{x}' '2', '1def2')
self.assertEqual('1' f'{x}', '1def')
self.assertEqual(f'{x}' f'-{x}', 'def-def')
self.assertEqual('' f'', '')
self.assertEqual('' f'' '', '')
self.assertEqual('' f'' '' f'', '')
self.assertEqual(f'', '')
self.assertEqual(f'' '', '')
self.assertEqual(f'' '' f'', '')
self.assertEqual(f'' '' f'' '', '')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3' f'}'", # can't concat to get a valid f-string
])
def test_comments(self):
# These aren't comments, since they're in strings.
d = {'#': 'hash'}
self.assertEqual(f'{"#"}', '#')
self.assertEqual(f'{d["#"]}', 'hash')
self.assertAllRaise(SyntaxError, "f-string expression part cannot include '#'",
["f'{1#}'", # error because the expression becomes "(1#)"
"f'{3(#)}'",
"f'{#}'",
"f'{)#}'", # When wrapped in parens, this becomes
# '()#)'. Make sure that doesn't compile.
])
def test_many_expressions(self):
# Create a string with many expressions in it. Note that
# because we have a space in here as a literal, we're actually
# going to use twice as many ast nodes: one for each literal
# plus one for each expression.
def build_fstr(n, extra=''):
return "f'" + ('{x} ' * n) + extra + "'"
x = 'X'
width = 1
# Test around 256.
for i in range(250, 260):
self.assertEqual(eval(build_fstr(i)), (x+' ')*i)
# Test concatenating 2 largs fstrings.
self.assertEqual(eval(build_fstr(255)*256), (x+' ')*(255*256))
s = build_fstr(253, '{x:{width}} ')
self.assertEqual(eval(s), (x+' ')*254)
# Test lots of expressions and constants, concatenated.
s = "f'{1}' 'x' 'y'" * 1024
self.assertEqual(eval(s), '1xy' * 1024)
def test_format_specifier_expressions(self):
width = 10
precision = 4
value = decimal.Decimal('12.34567')
self.assertEqual(f'result: {value:{width}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width!r}.{precision}}', 'result: 12.35')
self.assertEqual(f'result: {value:{width:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{1}{0:0}.{precision:1}}', 'result: 12.35')
self.assertEqual(f'result: {value:{ 1}{ 0:0}.{ precision:1}}', 'result: 12.35')
self.assertEqual(f'{10:#{1}0x}', ' 0xa')
self.assertEqual(f'{10:{"#"}1{0}{"x"}}', ' 0xa')
self.assertEqual(f'{-10:-{"#"}1{0}x}', ' -0xa')
self.assertEqual(f'{-10:{"-"}#{1}0{"x"}}', ' -0xa')
self.assertEqual(f'{10:#{3 != {4:5} and width}x}', ' 0xa')
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["""f'{"s"!r{":10"}}'""",
# This looks like a nested format spec.
])
self.assertAllRaise(SyntaxError, "invalid syntax",
[# Invalid syntax inside a nested spec.
"f'{4:{/5}}'",
])
self.assertAllRaise(SyntaxError, "f-string: expressions nested too deeply",
[# Can't nest format specifiers.
"f'result: {value:{width:{0}}.{precision:1}}'",
])
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
[# No expansion inside conversion or for
# the : or ! itself.
"""f'{"s"!{"r"}}'""",
])
def test_side_effect_order(self):
class X:
def __init__(self):
self.i = 0
def __format__(self, spec):
self.i += 1
return str(self.i)
x = X()
self.assertEqual(f'{x} {x}', '1 2')
def test_missing_expression(self):
self.assertAllRaise(SyntaxError, 'f-string: empty expression not allowed',
["f'{}'",
"f'{ }'"
"f' {} '",
"f'{!r}'",
"f'{ !r}'",
"f'{10:{ }}'",
"f' { } '",
# Catch the empty expression before the
# invalid conversion.
"f'{!x}'",
"f'{ !xr}'",
"f'{!x:}'",
"f'{!x:a}'",
"f'{ !xr:}'",
"f'{ !xr:a}'",
"f'{!}'",
"f'{:}'",
# We find the empty expression before the
# missing closing brace.
"f'{!'",
"f'{!s:'",
"f'{:'",
"f'{:x'",
])
def test_parens_in_expressions(self):
self.assertEqual(f'{3,}', '(3,)')
# Add these because when an expression is evaluated, parens
# are added around it. But we shouldn't go from an invalid
# expression to a valid one. The added parens are just
# supposed to allow whitespace (including newlines).
self.assertAllRaise(SyntaxError, 'invalid syntax',
["f'{,}'",
"f'{,}'", # this is (,), which is an error
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3)+(4}'",
])
self.assertAllRaise(SyntaxError, 'EOL while scanning string literal',
["f'{\n}'",
])
def test_backslashes_in_string_part(self):
self.assertEqual(f'\t', '\t')
self.assertEqual(r'\t', '\\t')
self.assertEqual(rf'\t', '\\t')
self.assertEqual(f'{2}\t', '2\t')
self.assertEqual(f'{2}\t{3}', '2\t3')
self.assertEqual(f'\t{3}', '\t3')
self.assertEqual(f'\u0394', '\u0394')
self.assertEqual(r'\u0394', '\\u0394')
self.assertEqual(rf'\u0394', '\\u0394')
self.assertEqual(f'{2}\u0394', '2\u0394')
self.assertEqual(f'{2}\u0394{3}', '2\u03943')
self.assertEqual(f'\u0394{3}', '\u03943')
self.assertEqual(f'\U00000394', '\u0394')
self.assertEqual(r'\U00000394', '\\U00000394')
self.assertEqual(rf'\U00000394', '\\U00000394')
self.assertEqual(f'{2}\U00000394', '2\u0394')
self.assertEqual(f'{2}\U00000394{3}', '2\u03943')
self.assertEqual(f'\U00000394{3}', '\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}', '\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'{2}\N{GREEK CAPITAL LETTER DELTA}{3}', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}{3}', '\u03943')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}', '2\u0394')
self.assertEqual(f'2\N{GREEK CAPITAL LETTER DELTA}3', '2\u03943')
self.assertEqual(f'\N{GREEK CAPITAL LETTER DELTA}3', '\u03943')
self.assertEqual(f'\x20', ' ')
self.assertEqual(r'\x20', '\\x20')
self.assertEqual(rf'\x20', '\\x20')
self.assertEqual(f'{2}\x20', '2 ')
self.assertEqual(f'{2}\x20{3}', '2 3')
self.assertEqual(f'\x20{3}', ' 3')
self.assertEqual(f'2\x20', '2 ')
self.assertEqual(f'2\x203', '2 3')
self.assertEqual(f'\x203', ' 3')
def test_misformed_unicode_character_name(self):
# These test are needed because unicode names are parsed
# differently inside f-strings.
self.assertAllRaise(SyntaxError, r"\(unicode error\) 'unicodeescape' codec can't decode bytes in position .*: malformed \\N character escape",
[r"f'\N'",
r"f'\N{'",
r"f'\N{GREEK CAPITAL LETTER DELTA'",
# Here are the non-f-string versions,
# which should give the same errors.
r"'\N'",
r"'\N{'",
r"'\N{GREEK CAPITAL LETTER DELTA'",
])
def test_no_backslashes_in_expression_part(self):
self.assertAllRaise(SyntaxError, 'f-string expression part cannot include a backslash',
[r"f'{\'a\'}'",
r"f'{\t3}'",
r"f'{\}'",
r"rf'{\'a\'}'",
r"rf'{\t3}'",
r"rf'{\}'",
r"""rf'{"\N{LEFT CURLY BRACKET}"}'""",
])
def test_no_escapes_for_braces(self):
# \x7b is '{'. Make sure it doesn't start an expression.
self.assertEqual(f'\x7b2}}', '{2}')
self.assertEqual(f'\x7b2', '{2')
self.assertEqual(f'\u007b2', '{2')
self.assertEqual(f'\N{LEFT CURLY BRACKET}2\N{RIGHT CURLY BRACKET}', '{2}')
def test_newlines_in_expressions(self):
self.assertEqual(f'{0}', '0')
self.assertEqual(rf'''{3+
4}''', '7')
def test_lambda(self):
x = 5
self.assertEqual(f'{(lambda y:x*y)("8")!r}', "'88888'")
self.assertEqual(f'{(lambda y:x*y)("8")!r:10}', "'88888' ")
self.assertEqual(f'{(lambda y:x*y)("8"):10}', "88888 ")
# lambda doesn't work without parens, because the colon
# makes the parser think it's a format_spec
self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing',
["f'{lambda x:x}'",
])
def test_yield(self):
# Not terribly useful, but make sure the yield turns
# a function into a generator
def fn(y):
f'y:{yield y*2}'
g = fn(4)
self.assertEqual(next(g), 8)
def test_yield_send(self):
def fn(x):
yield f'x:{yield (lambda i: x * i)}'
g = fn(10)
the_lambda = next(g)
self.assertEqual(the_lambda(4), 40)
self.assertEqual(g.send('string'), 'x:string')
def test_expressions_with_triple_quoted_strings(self):
self.assertEqual(f"{'''x'''}", 'x')
self.assertEqual(f"{'''eric's'''}", "eric's")
# Test concatenation within an expression
self.assertEqual(f'{"x" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"x" """eric"s"""}', 'xeric"s')
self.assertEqual(f'{"""eric"s""" "y"}', 'eric"sy')
self.assertEqual(f'{"""x""" """eric"s""" "y"}', 'xeric"sy')
self.assertEqual(f'{"""x""" """eric"s""" """y"""}', 'xeric"sy')
self.assertEqual(f'{r"""x""" """eric"s""" """y"""}', 'xeric"sy')
def test_multiple_vars(self):
x = 98
y = 'abc'
self.assertEqual(f'{x}{y}', '98abc')
self.assertEqual(f'X{x}{y}', 'X98abc')
self.assertEqual(f'{x}X{y}', '98Xabc')
self.assertEqual(f'{x}{y}X', '98abcX')
self.assertEqual(f'X{x}Y{y}', 'X98Yabc')
self.assertEqual(f'X{x}{y}Y', 'X98abcY')
self.assertEqual(f'{x}X{y}Y', '98XabcY')
self.assertEqual(f'X{x}Y{y}Z', 'X98YabcZ')
def test_closure(self):
def outer(x):
def inner():
return f'x:{x}'
return inner
self.assertEqual(outer('987')(), 'x:987')
self.assertEqual(outer(7)(), 'x:7')
def test_arguments(self):
y = 2
def f(x, width):
return f'x={x*y:{width}}'
self.assertEqual(f('foo', 10), 'x=foofoo ')
x = 'bar'
self.assertEqual(f(10, 10), 'x= 20')
def test_locals(self):
value = 123
self.assertEqual(f'v:{value}', 'v:123')
def test_missing_variable(self):
with self.assertRaises(NameError):
f'v:{value}'
def test_missing_format_spec(self):
class O:
def __format__(self, spec):
if not spec:
return '*'
return spec
self.assertEqual(f'{O():x}', 'x')
self.assertEqual(f'{O()}', '*')
self.assertEqual(f'{O():}', '*')
self.assertEqual(f'{3:}', '3')
self.assertEqual(f'{3!s:}', '3')
def test_global(self):
self.assertEqual(f'g:{a_global}', 'g:global variable')
self.assertEqual(f'g:{a_global!r}', "g:'global variable'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:global variable l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'global variable'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:global variable l:'local variable'")
self.assertIn("module 'unittest' from", f'{unittest}')
def test_shadowed_global(self):
a_global = 'really a local'
self.assertEqual(f'g:{a_global}', 'g:really a local')
self.assertEqual(f'g:{a_global!r}', "g:'really a local'")
a_local = 'local variable'
self.assertEqual(f'g:{a_global} l:{a_local}',
'g:really a local l:local variable')
self.assertEqual(f'g:{a_global!r}',
"g:'really a local'")
self.assertEqual(f'g:{a_global} l:{a_local!r}',
"g:really a local l:'local variable'")
def test_call(self):
def foo(x):
return 'x=' + str(x)
self.assertEqual(f'{foo(10)}', 'x=10')
def test_nested_fstrings(self):
y = 5
self.assertEqual(f'{f"{0}"*3}', '000')
self.assertEqual(f'{f"{y}"*3}', '555')
def test_invalid_string_prefixes(self):
self.assertAllRaise(SyntaxError, 'unexpected EOF while parsing',
["fu''",
"uf''",
"Fu''",
"fU''",
"Uf''",
"uF''",
"ufr''",
"urf''",
"fur''",
"fru''",
"rfu''",
"ruf''",
"FUR''",
"Fur''",
"fb''",
"fB''",
"Fb''",
"FB''",
"bf''",
"bF''",
"Bf''",
"BF''",
])
def test_leading_trailing_spaces(self):
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{ 3}', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'{3 }', '3')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]}}',
'expr={1: 2}')
self.assertEqual(f'expr={ {x: y for x, y in [(1, 2), ]} }',
'expr={1: 2}')
def test_not_equal(self):
# There's a special test for this because there's a special
# case in the f-string parser to look for != as not ending an
# expression. Normally it would, while looking for !s or !r.
self.assertEqual(f'{3!=4}', 'True')
self.assertEqual(f'{3!=4:}', 'True')
self.assertEqual(f'{3!=4!s}', 'True')
self.assertEqual(f'{3!=4!s:.3}', 'Tru')
def test_conversions(self):
self.assertEqual(f'{3.14:10.10}', ' 3.14')
self.assertEqual(f'{3.14!s:10.10}', '3.14 ')
self.assertEqual(f'{3.14!r:10.10}', '3.14 ')
self.assertEqual(f'{3.14!a:10.10}', '3.14 ')
self.assertEqual(f'{"a"}', 'a')
self.assertEqual(f'{"a"!r}', "'a'")
self.assertEqual(f'{"a"!a}', "'a'")
# Not a conversion.
self.assertEqual(f'{"a!r"}', "a!r")
# Not a conversion, but show that ! is allowed in a format spec.
self.assertEqual(f'{3.14:!<10.10}', '3.14!!!!!!')
self.assertAllRaise(SyntaxError, 'f-string: invalid conversion character',
["f'{3!g}'",
"f'{3!A}'",
"f'{3!3}'",
"f'{3!G}'",
"f'{3!!}'",
"f'{3!:}'",
"f'{3! s}'", # no space before conversion char
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{x!s{y}}'",
"f'{3!ss}'",
"f'{3!ss:}'",
"f'{3!ss:s}'",
])
def test_assignment(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["f'' = 3",
"f'{0}' = x",
"f'{x}' = x",
])
def test_del(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
["del f''",
"del '' f''",
])
def test_mismatched_braces(self):
self.assertAllRaise(SyntaxError, "f-string: single '}' is not allowed",
["f'{{}'",
"f'{{}}}'",
"f'}'",
"f'x}'",
"f'x}x'",
# Can't have { or } in a format spec.
"f'{3:}>10}'",
"f'{3:}}>10}'",
])
self.assertAllRaise(SyntaxError, "f-string: expecting '}'",
["f'{3:{{>10}'",
"f'{3'",
"f'{3!'",
"f'{3:'",
"f'{3!s'",
"f'{3!s:'",
"f'{3!s:3'",
"f'x{'",
"f'x{x'",
"f'{x'",
"f'{3:s'",
"f'{{{'",
"f'{{}}{'",
"f'{'",
])
# But these are just normal strings.
self.assertEqual(f'{"{"}', '{')
self.assertEqual(f'{"}"}', '}')
self.assertEqual(f'{3:{"}"}>10}', '}}}}}}}}}3')
self.assertEqual(f'{2:{"{"}>10}', '{{{{{{{{{2')
def test_if_conditional(self):
# There's special logic in compile.c to test if the
# conditional for an if (and while) are constants. Exercise
# that code.
def test_fstring(x, expected):
flag = 0
if f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_empty(x, expected):
flag = 0
if '' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
def test_concat_non_empty(x, expected):
flag = 0
if ' ' f'{x}':
flag = 1
else:
flag = 2
self.assertEqual(flag, expected)
test_fstring('', 2)
test_fstring(' ', 1)
test_concat_empty('', 2)
test_concat_empty(' ', 1)
test_concat_non_empty('', 1)
test_concat_non_empty(' ', 1)
def test_empty_format_specifier(self):
x = 'test'
self.assertEqual(f'{x}', 'test')
self.assertEqual(f'{x:}', 'test')
self.assertEqual(f'{x!s:}', 'test')
self.assertEqual(f'{x!r:}', "'test'")
def test_str_format_differences(self):
d = {'a': 'string',
0: 'integer',
}
a = 0
self.assertEqual(f'{d[0]}', 'integer')
self.assertEqual(f'{d["a"]}', 'string')
self.assertEqual(f'{d[a]}', 'integer')
self.assertEqual('{d[a]}'.format(d=d), 'string')
self.assertEqual('{d[0]}'.format(d=d), 'integer')
def test_invalid_expressions(self):
self.assertAllRaise(SyntaxError, 'invalid syntax',
[r"f'{a[4)}'",
r"f'{a(4]}'",
])
def test_errors(self):
# see issue 26287
self.assertAllRaise(TypeError, 'non-empty',
[r"f'{(lambda: 0):x}'",
r"f'{(0,):x}'",
])
self.assertAllRaise(ValueError, 'Unknown format code',
[r"f'{1000:j}'",
r"f'{1000:j}'",
])
def test_loop(self):
for i in range(1000):
self.assertEqual(f'i:{i}', 'i:' + str(i))
def test_dict(self):
d = {'"': 'dquote',
"'": 'squote',
'foo': 'bar',
}
self.assertEqual(f'''{d["'"]}''', 'squote')
self.assertEqual(f"""{d['"']}""", 'dquote')
self.assertEqual(f'{d["foo"]}', 'bar')
self.assertEqual(f"{d['foo']}", 'bar')
if __name__ == '__main__':
unittest.main()
| mit | 7,434,914,116,047,775,000 | 36.312834 | 150 | 0.436833 | false | 3.795213 | true | false | false |
hheimbuerger/suds-gzip | suds/client.py | 2 | 28840 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from sudsobject import Object
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.document import Document
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from urlparse import urlparse
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
In most cases, the wsdl only defines (1) service in which access
by subscript is passed through to a L{PortSelector}. This is also the
behavior when a I{default} service has been specified. In cases
where multiple services have been defined and no default has been
specified, the service is found by name (or index) and a L{PortSelector}
for the service is returned. In all cases, attribute access is
forwarded to the L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{wsdl} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{wsdl} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@param name: The name of a method.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides selection of the I{service} by name (string) or
index (integer). In cases where only (1) service is defined
or a I{default} has been specified, the request is forwarded
to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not len(self.__services):
raise Exception, 'No services defined'
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound, 'at [%d]' % name
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound, name
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is None:
return None
else:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector}
for the port is returned. In all cases, attribute access is
forwarded to the L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides selection of the I{port} by name (string) or
index (integer). In cases where only (1) port is defined
or a I{default} has been specified, the request is forwarded
to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
else:
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not len(self.__ports):
raise Exception, 'No ports defined: %s' % self.__qn
if isinstance(name, int):
qn = '%s[%d]' % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound, qn
else:
qn = '.'.join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound, qn
qn = '.'.join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is None:
return None
else:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = '.'.join((self.__qn, name))
raise MethodNotFound, qn
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{wsdl} method.
@type I{wsdl} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""
Invoke the method.
"""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
if not self.faults():
try:
return client.invoke(args, kwargs)
except WebFault, e:
return (500, e)
else:
return client.invoke(args, kwargs)
def faults(self):
""" get faults option """
return self.client.options.faults
def clientclass(self, kwargs):
""" get soap client class """
if SimClient.simulation(kwargs):
return SimClient
else:
return SoapClient
class SoapClient:
"""
A lightweight soap based web client B{**not intended for external use}
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object}
"""
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs)
timer.stop()
metrics.log.debug(
"message for '%s' created: %s",
self.method.name,
timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug(
"method '%s' invoked: %s",
self.method.name,
timer)
return result
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
nosend = self.options.nosend
prettyxml = self.options.prettyxml
timer = metrics.Timer()
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
ctx = plugins.message.sending(envelope=soapenv)
soapenv = ctx.envelope
if nosend:
return RequestContext(self, binding, soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
timer.start()
reply = transport.send(request)
timer.stop()
metrics.log.debug('waited %s on server reply', timer)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError, e:
if e.httpcode in (202,204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
if isinstance(action, unicode):
action = action.encode('utf-8')
stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }
# At this point the action was encoded, but the vanilla suds code takes all injected headers as they are,
# potentially implicitly decoding the whole request into a unicode string, if there's any unicode in the
# headers (e.g. because you're like me and trying to be clever and Python 3 compatible by using
# unicode_literals. This causes all kinds of horrible pains, as I've had to repeatedly notice. We could
# silently encode everything here, but I'll go the safer(?) route and just reject all unicode strings.
for k, v in self.options.headers.items():
if type(k) != str:
raise ValueError("'%s' header has a non-string name, but only (encoded/non-unicode) strings are allowed" % repr(k))
if type(v) != str:
raise ValueError("'%s' header has a non-string value, but only (encoded/non-unicode) strings are allowed: %s" % (k, repr(v)))
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result
def succeeded(self, binding, reply):
"""
Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server.
"""
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result)
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise TransportError(reason, status)
else:
return (status, None)
def location(self):
p = Unskin(self.options)
return p.get('location', self.method.location)
def last_sent(self, d=None):
key = 'tx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
def last_received(self, d=None):
key = 'rx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
class SimClient(SoapClient):
"""
Loopback client used for message/reply simulation.
"""
injkey = '__inject'
@classmethod
def simulation(cls, kwargs):
""" get whether loopback has been specified in the I{kwargs}. """
return kwargs.has_key(SimClient.injkey)
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg)
def __reply(self, reply, args, kwargs):
""" simulate the reply """
binding = self.method.binding.input
msg = binding.get_message(self.method, args, kwargs)
log.debug('inject (simulated) send message:\n%s', msg)
binding = self.method.binding.output
return self.succeeded(binding, reply)
def __fault(self, reply):
""" simulate the (fault) reply """
binding = self.method.binding.output
if self.options.faults:
r, p = binding.get_fault(reply)
self.last_received(r)
return (500, p)
else:
return (500, None)
class RequestContext:
"""
A request context.
Returned when the ''nosend'' options is specified.
@ivar client: The suds client.
@type client: L{Client}
@ivar binding: The binding for this request.
@type binding: I{Binding}
@ivar envelope: The request soap envelope.
@type envelope: str
"""
def __init__(self, client, binding, envelope):
"""
@param client: The suds client.
@type client: L{Client}
@param binding: The binding for this request.
@type binding: I{Binding}
@param envelope: The request soap envelope.
@type envelope: str
"""
self.client = client
self.binding = binding
self.envelope = envelope
def succeeded(self, reply):
"""
Re-entry for processing a successful reply.
@param reply: The reply soap envelope.
@type reply: str
@return: The returned value for the invoked method.
@rtype: object
"""
options = self.client.options
plugins = PluginContainer(options.plugins)
ctx = plugins.message.received(reply=reply)
reply = ctx.reply
return self.client.succeeded(self.binding, reply)
def failed(self, error):
"""
Re-entry for processing a failure reply.
@param error: The error returned by the transport.
@type error: A suds I{TransportError}.
"""
return self.client.failed(self.binding, error)
| lgpl-3.0 | 89,138,078,589,958,480 | 32.692757 | 142 | 0.571637 | false | 4.202244 | false | false | false |
ephillipe/nltk-trainer | nltk_trainer/classification/args.py | 6 | 8308 | from nltk.classify import DecisionTreeClassifier, MaxentClassifier, NaiveBayesClassifier, megam
from nltk_trainer import basestring
from nltk_trainer.classification.multi import AvgProbClassifier
classifier_choices = ['NaiveBayes', 'DecisionTree', 'Maxent'] + MaxentClassifier.ALGORITHMS
dense_classifiers = set(['ExtraTreesClassifier', 'GradientBoostingClassifier',
'RandomForestClassifier', 'GaussianNB', 'DecisionTreeClassifier'])
verbose_classifiers = set(['RandomForestClassifier', 'SVC'])
try:
import svmlight # do this first since svm module makes ugly errors
from nltk.classify.svm import SvmClassifier
classifier_choices.append('Svm')
except:
pass
try:
from nltk.classify import scikitlearn
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn import ensemble, feature_selection, linear_model, naive_bayes, neighbors, svm, tree
classifiers = [
ensemble.ExtraTreesClassifier,
ensemble.GradientBoostingClassifier,
ensemble.RandomForestClassifier,
linear_model.LogisticRegression,
#linear_model.SGDClassifier, # NOTE: this seems terrible, but could just be the options
naive_bayes.BernoulliNB,
naive_bayes.GaussianNB,
naive_bayes.MultinomialNB,
neighbors.KNeighborsClassifier, # TODO: options for nearest neighbors
svm.LinearSVC,
svm.NuSVC,
svm.SVC,
tree.DecisionTreeClassifier,
]
sklearn_classifiers = {}
for classifier in classifiers:
sklearn_classifiers[classifier.__name__] = classifier
classifier_choices.extend(sorted(['sklearn.%s' % c.__name__ for c in classifiers]))
except ImportError as exc:
sklearn_classifiers = {}
def add_maxent_args(parser):
maxent_group = parser.add_argument_group('Maxent Classifier',
'These options only apply when a Maxent classifier is chosen.')
maxent_group.add_argument('--max_iter', default=10, type=int,
help='maximum number of training iterations, defaults to %(default)d')
maxent_group.add_argument('--min_ll', default=0, type=float,
help='stop classification when average log-likelihood is less than this, default is %(default)d')
maxent_group.add_argument('--min_lldelta', default=0.1, type=float,
help='''stop classification when the change in average log-likelihood is less than this.
default is %(default)f''')
def add_decision_tree_args(parser):
decisiontree_group = parser.add_argument_group('Decision Tree Classifier',
'These options only apply when the DecisionTree classifier is chosen')
decisiontree_group.add_argument('--entropy_cutoff', default=0.05, type=float,
help='default is 0.05')
decisiontree_group.add_argument('--depth_cutoff', default=100, type=int,
help='default is 100')
decisiontree_group.add_argument('--support_cutoff', default=10, type=int,
help='default is 10')
sklearn_kwargs = {
# ensemble
'ExtraTreesClassifier': ['criterion', 'max_feats', 'depth_cutoff', 'n_estimators'],
'GradientBoostingClassifier': ['learning_rate', 'max_feats', 'depth_cutoff', 'n_estimators'],
'RandomForestClassifier': ['criterion', 'max_feats', 'depth_cutoff', 'n_estimators'],
# linear_model
'LogisticRegression': ['C','penalty'],
# naive_bayes
'BernoulliNB': ['alpha'],
'MultinomialNB': ['alpha'],
# svm
'LinearSVC': ['C', 'loss', 'penalty'],
'NuSVC': ['nu', 'kernel'],
'SVC': ['C', 'kernel'],
# tree
'DecisionTreeClassifier': ['criterion', 'max_feats', 'depth_cutoff'],
}
def add_sklearn_args(parser):
if not sklearn_classifiers: return
sklearn_group = parser.add_argument_group('sklearn Classifiers',
'These options are used by one or more sklearn classification algorithms.')
sklearn_group.add_argument('--alpha', type=float, default=1.0,
help='smoothing parameter for naive bayes classifiers, default is %(default)s')
sklearn_group.add_argument('--C', type=float, default=1.0,
help='penalty parameter, default is %(default)s')
sklearn_group.add_argument('--criterion', choices=['gini', 'entropy'],
default='gini', help='Split quality function, default is %(default)s')
sklearn_group.add_argument('--kernel', default='rbf',
choices=['linear', 'poly', 'rbf', 'sigmoid', 'precomputed'],
help='kernel type for support vector machine classifiers, default is %(default)s')
sklearn_group.add_argument('--learning_rate', type=float, default=0.1,
help='learning rate, default is %(default)s')
sklearn_group.add_argument('--loss', choices=['l1', 'l2'],
default='l2', help='loss function, default is %(default)s')
sklearn_group.add_argument('--n_estimators', type=int, default=10,
help='Number of trees for Decision Tree ensembles, default is %(default)s')
sklearn_group.add_argument('--nu', type=float, default=0.5,
help='upper bound on fraction of training errors & lower bound on fraction of support vectors, default is %(default)s')
sklearn_group.add_argument('--penalty', choices=['l1', 'l2'],
default='l2', help='norm for penalization, default is %(default)s')
sklearn_group.add_argument('--tfidf', default=False, action='store_true',
help='Use TfidfTransformer')
# for mapping existing args to sklearn args
sklearn_keys = {
'max_feats': 'max_features',
'depth_cutoff': 'max_depth'
}
def make_sklearn_classifier(algo, args):
name = algo.split('.', 1)[1]
kwargs = {}
for key in sklearn_kwargs.get(name, []):
val = getattr(args, key, None)
if val: kwargs[sklearn_keys.get(key, key)] = val
if args.trace and kwargs:
print('training %s with %s' % (algo, kwargs))
if args.trace and name in verbose_classifiers:
kwargs['verbose'] = True
return sklearn_classifiers[name](**kwargs)
def make_classifier_builder(args):
if isinstance(args.classifier, basestring):
algos = [args.classifier]
else:
algos = args.classifier
for algo in algos:
if algo not in classifier_choices:
raise ValueError('classifier %s is not supported' % algo)
classifier_train_args = []
for algo in algos:
classifier_train_kwargs = {}
if algo == 'DecisionTree':
classifier_train = DecisionTreeClassifier.train
classifier_train_kwargs['binary'] = False
classifier_train_kwargs['entropy_cutoff'] = args.entropy_cutoff
classifier_train_kwargs['depth_cutoff'] = args.depth_cutoff
classifier_train_kwargs['support_cutoff'] = args.support_cutoff
classifier_train_kwargs['verbose'] = args.trace
elif algo == 'NaiveBayes':
classifier_train = NaiveBayesClassifier.train
elif algo == 'Svm':
classifier_train = SvmClassifier.train
elif algo.startswith('sklearn.'):
# TODO: support many options for building an estimator pipeline
pipe = [('classifier', make_sklearn_classifier(algo, args))]
tfidf = getattr(args, 'tfidf', None)
penalty = getattr(args, 'penalty', None)
if tfidf and penalty:
if args.trace:
print('using tfidf transformer with norm %s' % penalty)
pipe.insert(0, ('tfidf', TfidfTransformer(norm=penalty)))
sparse = pipe[-1][1].__class__.__name__ not in dense_classifiers
if not sparse and args.trace:
print('using dense matrix')
value_type = getattr(args, 'value_type', 'bool')
if value_type == 'bool' and not tfidf:
dtype = bool
elif value_type == 'int' and not tfidf:
dtype = int
else:
dtype = float
if args.trace:
print('using dtype %s' % dtype.__name__)
classifier_train = scikitlearn.SklearnClassifier(Pipeline(pipe), dtype=dtype, sparse=sparse).train
else:
if algo != 'Maxent':
classifier_train_kwargs['algorithm'] = algo
if algo == 'MEGAM':
megam.config_megam()
classifier_train = MaxentClassifier.train
classifier_train_kwargs['max_iter'] = args.max_iter
classifier_train_kwargs['min_ll'] = args.min_ll
classifier_train_kwargs['min_lldelta'] = args.min_lldelta
classifier_train_kwargs['trace'] = args.trace
classifier_train_args.append((algo, classifier_train, classifier_train_kwargs))
def trainf(train_feats):
classifiers = []
for algo, classifier_train, train_kwargs in classifier_train_args:
if args.trace:
print('training %s classifier' % algo)
classifiers.append(classifier_train(train_feats, **train_kwargs))
if len(classifiers) == 1:
return classifiers[0]
else:
return AvgProbClassifier(classifiers)
return trainf
#return lambda(train_feats): classifier_train(train_feats, **classifier_train_kwargs)
| apache-2.0 | -619,178,258,999,579,300 | 36.423423 | 121 | 0.718344 | false | 3.435897 | false | false | false |
huangtao-sh/orange | orange/utils/log.py | 1 | 1284 | # 项目: 工具库
# 模块: 日志模块
# 作者: 黄涛
# License: GPL
# Email: [email protected]
# 创建:2019-07-20 22:26
import logging
import sys
import os
from orange import Path
from .datetime_ import datetime
today = datetime.now() % '%F'
name = sys.argv[0] or 'test'
logger = logging.getLogger(name)
if os.name == 'nt':
path = Path(f'%localappdata%/logs/{today}')
else:
path = Path(f'~/.logs/{today}')
path.ensure()
path = (path / name.split(os.sep)[-1]).with_suffix('.log')
log = logger.log
debug = logger.debug
info = logger.info
warning = logger.warning
error = logger.error
fatal = logger.fatal
critical = logger.critical
warn = logger.warn
logging.basicConfig(format='%(asctime)s %(levelname)-8s: %(message)s',
filename=str(path),
datefmt='%F %T')
def set_debug(level=logging.DEBUG):
logger.setLevel(level)
def set_verbose(fmt='%(message)s'):
if logger.level == 0 or logger.level > logging.INFO:
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(fmt=fmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
| gpl-2.0 | 3,417,165,787,511,814,000 | 21.884615 | 70 | 0.636876 | false | 2.978417 | false | false | false |
aldnav/eventure | extra/views.py | 1 | 2204 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import JsonResponse
from django.views.generic import TemplateView, View
from .mixins import JSONView
from .models import Profile, Note, Game
from .serializer import activity_serializer
from activity.models import Activity, Verb
class HomeView(TemplateView):
template_name = 'extra/index.html'
class ActivityResourceView(JSONView):
serializer = activity_serializer
def get_context_data(self, **kwargs):
context = super(ActivityResourceView, self).get_context_data(**kwargs)
context['objects'] = []
filters = self.request.GET.dict()
query_set = Activity.objects.filter(**filters)
print query_set
fields = ['pk', 'actor', 'object_ref', 'target', 'verb',
'published_verbose', 'title', 'summary']
for obj in query_set:
# obj = self.serializer.serialize(obj, fields)
context['objects'].append(obj)
return context
class DrumKitView(TemplateView):
template_name = 'drumkit/index.html'
def post(self, request, *args, **kwargs):
note = request.POST.get('note')
ip_address = self.get_client_ip()
verb = Verb.objects.get(name='play')
game = Game.objects.get(title='Drumkit')
note, _ = Note.objects.get_or_create(title=note)
profiles = Profile.objects.filter(ip_address=ip_address)
if profiles.count() == 0:
user = Profile.objects.create_anon_user(ip_address).owner
else:
user = profiles.first().owner
activity = Activity.objects.create(
actor=user,
object_ref=note,
verb=verb,
target=game,
title='{} {} {} {}'.format(
user.username, verb.name, note.title, game.title)
)
return JsonResponse({'activity': activity.pk})
def get_client_ip(self, **kwargs):
request = self.request
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
| mit | 396,356,481,751,064,600 | 32.907692 | 78 | 0.610254 | false | 3.900885 | false | false | false |
fengsp/flask-snippets | templatetricks/timesince_filter.py | 2 | 1041 | # -*- coding: utf-8 -*-
"""
templatetricks.timesince_filter
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
timesince filter
http://flask.pocoo.org/snippets/33/
"""
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from datetime import datetime
from app import app
@app.template_filter()
def timesince(dt, default="just now"):
"""
Returns string representing "time since" e.g.
3 days ago, 5 hours ago etc.
"""
now = datetime.utcnow()
diff = now - dt
periods = (
(diff.days / 365, "year", "years"),
(diff.days / 30, "month", "months"),
(diff.days / 7, "week", "weeks"),
(diff.days, "day", "days"),
(diff.seconds / 3600, "hour", "hours"),
(diff.seconds / 60, "minute", "minutes"),
(diff.seconds, "second", "seconds"),
)
for period, singular, plural in periods:
if period:
return "%d %s ago" % (period, singular if period == 1 else plural)
return default
| bsd-3-clause | 5,286,875,435,464,828,000 | 23.209302 | 79 | 0.553314 | false | 3.614583 | false | false | false |
athyuttamre/accessible-facebook-ui | public/conversejs/bosh_manager/punjab-master/build/lib/punjab/xmpp/server.py | 3 | 7061 | # XMPP server class
from twisted.application import service
from twisted.python import components
from twisted.internet import reactor
from twisted.words.xish import domish, xpath, xmlstream
from twisted.words.protocols.jabber import jid
from punjab.xmpp import ns
SASL_XMLNS = 'urn:ietf:params:xml:ns:xmpp-sasl'
COMP_XMLNS = 'http://jabberd.jabberstudio.org/ns/component/1.0'
STREAMS_XMLNS = 'urn:ietf:params:xml:ns:xmpp-streams'
from zope.interface import Interface, implements
# interfaces
class IXMPPServerService(Interface):
pass
class IXMPPServerFactory(Interface):
pass
class IXMPPFeature(Interface):
pass
class IXMPPAuthenticationFeature(IXMPPFeature):
pass
class IQAuthFeature(object):
""" XEP-0078 : http://www.xmpp.org/extensions/xep-0078.html"""
implements(IXMPPAuthenticationFeature)
IQ_GET_AUTH = xpath.internQuery(ns.IQ_GET_AUTH)
IQ_SET_AUTH = xpath.internQuery(ns.IQ_SET_AUTH)
def associateWithStream(self, xs):
"""Add a streamm start event observer.
And do other things to associate with the xmlstream if necessary.
"""
self.xmlstream = xs
self.xmlstream.addOnetimeObserver(xmlstream.STREAM_START_EVENT,
self.streamStarted)
def disassociateWithStream(self, xs):
self.xmlstream.removeObserver(self.IQ_GET_AUTH,
self.authRequested)
self.xmlstream.removeObserver(self.IQ_SET_AUTH,
self.auth)
self.xmlstream = None
def streamStarted(self, elm):
"""
Called when client sends stream:stream
"""
self.xmlstream.addObserver(self.IQ_GET_AUTH,
self.authRequested)
self.xmlstream.addObserver(self.IQ_SET_AUTH,
self.auth)
def authRequested(self, elem):
"""Return the supported auth type.
"""
resp = domish.Element(('iq', ns.NS_CLIENT))
resp['type'] = 'result'
resp['id'] = elem['id']
q = resp.addElement("query", ns.NS_AUTH)
q.addElement("username", content=str(elem.query.username))
q.addElement("digest")
q.addElement("password")
q.addElement("resource")
self.xmlstream.send(resp)
def auth(self, elem):
"""Do not auth the user, anyone can log in"""
username = elem.query.username.__str__()
resource = elem.query.resource.__str__()
user = jid.internJID(username+'@'+self.xmlstream.host+'/'+resource)
resp = domish.Element(('iq', ns.NS_CLIENT))
resp['type'] = 'result'
resp['id'] = elem['id']
self.xmlstream.send(resp)
self.xmlstream.authenticated(user)
class XMPPServerProtocol(xmlstream.XmlStream):
""" Basic dummy server protocol """
host = "localhost"
user = None
initialized = False
id = 'Punjab123'
features = [IQAuthFeature()]
delay_features = 0
def connectionMade(self):
"""
a client connection has been made
"""
xmlstream.XmlStream.connectionMade(self)
self.bootstraps = [
(xmlstream.STREAM_CONNECTED_EVENT, self.streamConnected),
(xmlstream.STREAM_START_EVENT, self.streamStarted),
(xmlstream.STREAM_END_EVENT, self.streamEnded),
(xmlstream.STREAM_ERROR_EVENT, self.streamErrored),
]
for event, fn in self.bootstraps:
self.addObserver(event, fn)
# load up the authentication features
for f in self.features:
if IXMPPAuthenticationFeature.implementedBy(f.__class__):
f.associateWithStream(self)
def send(self, obj):
if not self.initialized:
self.transport.write("""<?xml version="1.0"?>\n""")
self.initialized = True
xmlstream.XmlStream.send(self, obj)
def streamConnected(self, elm):
print "stream connected"
def streamStarted(self, elm):
"""stream has started, we need to respond
"""
if self.delay_features == 0:
self.send("""<stream:stream xmlns='%s' xmlns:stream='http://etherx.jabber.org/streams' from='%s' id='%s' version='1.0' xml:lang='en'><stream:features><register xmlns='http://jabber.org/features/iq-register'/></stream:features>""" % (ns.NS_CLIENT, self.host, self.id,))
else:
self.send("""<stream:stream xmlns='%s' xmlns:stream='http://etherx.jabber.org/streams' from='%s' id='%s' version='1.0' xml:lang='en'>""" % (ns.NS_CLIENT, self.host, self.id,))
reactor.callLater(self.delay_features, self.send, """<stream:features><register xmlns='http://jabber.org/features/iq-register'/></stream:features>""")
def streamEnded(self, elm):
self.send("""</stream:stream>""")
def streamErrored(self, elm):
self.send("""<stream:error/></stream:stream>""")
def authenticated(self, user):
"""User has authenticated.
"""
self.user = user
def onElement(self, element):
try:
xmlstream.XmlStream.onElement(self, element)
except Exception, e:
print "Exception!", e
raise e
def onDocumentEnd(self):
pass
def connectionLost(self, reason):
xmlstream.XmlStream.connectionLost(self, reason)
pass
def triggerChallenge(self):
""" send a fake challenge for testing
"""
self.send("""<challenge xmlns='urn:ietf:params:xml:ns:xmpp-sasl'>cmVhbG09ImNoZXNzcGFyay5jb20iLG5vbmNlPSJ0YUhIM0FHQkpQSE40eXNvNEt5cFlBPT0iLHFvcD0iYXV0aCxhdXRoLWludCIsY2hhcnNldD11dGYtOCxhbGdvcml0aG09bWQ1LXNlc3M=</challenge>""")
def triggerInvalidXML(self):
"""Send invalid XML, to trigger a parse error."""
self.send("""<parse error=>""")
self.streamEnded(None)
def triggerStreamError(self):
""" send a stream error
"""
self.send("""
<stream:error xmlns:stream='http://etherx.jabber.org/streams'>
<policy-violation xmlns='urn:ietf:params:xml:ns:xmpp-streams'/>
<text xmlns='urn:ietf:params:xml:ns:xmpp-streams' xml:lang='langcode'>Error text</text>
<arbitrary-extension val='2'/>
</stream:error>""")
self.streamEnded(None)
class XMPPServerFactoryFromService(xmlstream.XmlStreamFactory):
implements(IXMPPServerFactory)
protocol = XMPPServerProtocol
def __init__(self, service):
xmlstream.XmlStreamFactory.__init__(self)
self.service = service
def buildProtocol(self, addr):
self.resetDelay()
xs = self.protocol()
xs.factory = self
for event, fn in self.bootstraps:
xs.addObserver(event, fn)
return xs
components.registerAdapter(XMPPServerFactoryFromService,
IXMPPServerService,
IXMPPServerFactory)
class XMPPServerService(service.Service):
implements(IXMPPServerService)
| mit | -7,401,368,338,839,916,000 | 29.834061 | 280 | 0.620875 | false | 3.753854 | false | false | false |
madmax983/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_DEPRECATED_loss_behaviorGBM.py | 3 | 2563 | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def distribution_behaviorGBM():
#Log.info("==============================")
#Log.info("Default Behavior - Gaussian")
#Log.info("==============================")
eco = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/ecology_model.csv"))
# 0/1 response: expect gaussian
eco_model = h2o.gbm(x=eco[2:13], y=eco["Angaus"])
# more than 2 integers for response: expect gaussian
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars.csv"))
cars_model = h2o.gbm(x=cars[3:7], y=cars["cylinders"])
#Log.info("==============================")
#Log.info("Gaussian Behavior")
#Log.info("==============================")
# 0/1 response: expect gaussian
eco_model = h2o.gbm(x=eco[2:13], y=eco["Angaus"], distribution="gaussian")
# character response: expect error
try:
eco_model = h2o.gbm(x=eco[1:8], y=eco["Method"], distribution="gaussian")
assert False, "expected an error"
except EnvironmentError:
assert True
#Log.info("==============================")
#Log.info("Bernoulli Behavior")
#Log.info("==============================")
# 0/1 response: expect bernoulli
eco_model = h2o.gbm(x=eco[2:13], y=eco["Angaus"].asfactor(), distribution="bernoulli")
# 2 level character response: expect bernoulli
tree = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/test_tree_minmax.csv"))
tree_model = h2o.gbm(x=tree[0:3], y=tree["response"], distribution="bernoulli", min_rows=1)
# more than two integers for response: expect error
try:
cars_mod = h2o.gbm(x=cars[3:7], y=cars["cylinders"], distribution="bernoulli")
assert False, "expected an error"
except EnvironmentError:
assert True
# more than two character levels for response: expect error
try:
eco_model = h2o.gbm(x=eco[0:8], y=eco["Method"], distribution="bernoulli")
assert False, "expected an error"
except EnvironmentError:
assert True
#Log.info("==============================")
#Log.info("Multinomial Behavior")
#Log.info("==============================")
# more than two integers for response: expect multinomial
cars_model = h2o.gbm(x=cars[3:7], y=cars["cylinders"].asfactor(), distribution="multinomial")
# more than two character levels for response: expect multinomial
eco_model = h2o.gbm(x=eco[0:8], y=eco["Method"], distribution="multinomial")
if __name__ == "__main__":
pyunit_utils.standalone_test(distribution_behaviorGBM())
else:
distribution_behaviorGBM()
| apache-2.0 | 3,524,654,603,472,656,400 | 36.691176 | 95 | 0.622708 | false | 3.195761 | false | false | false |
bitcraze/crazyflie-firmware | tools/verify/elf_sanity.py | 1 | 5586 | import argparse
import struct
import sys
try:
from elftools.elf.elffile import ELFFile
except ImportError:
print('pytelftools missing, install to run this script', file=sys.stderr)
print('https://github.com/eliben/pyelftools#installing', file=sys.stderr)
sys.exit(1)
class Colors:
RED = '\033[91m'
BLUE = '\033[94m'
GREEN = '\033[92m'
END = '\033[0m'
CORE = 0x20
param_type_to_str_dict = {
0x0 | 0x0 << 2 | 0x1 << 3: 'PARAM_UINT8',
0x0 | 0x0 << 2 | 0x0 << 3: 'PARAM_INT8',
0x1 | 0x0 << 2 | 0x1 << 3: 'PARAM_UIN16',
0x1 | 0x0 << 2 | 0x0 << 3: 'PARAM_INT16',
0x2 | 0x0 << 2 | 0x1 << 3: 'PARAM_UINT32',
0x2 | 0x0 << 2 | 0x0 << 3: 'PARAM_INT32',
0x2 | 0x1 << 2 | 0x0 << 3: 'PARAM_FLOAT'
}
def param_type_to_str(t: int) -> str:
extra = str()
if t & (1 << 5): # PARAM_CORE set
extra = ' | PARAM_CORE'
if t & (1 << 6): # PARAM_RONLY set
extra += ' | PARAM_RONLY'
int_type = t & ~(1 << 5 | 1 << 6)
return '{:12}{}'.format(param_type_to_str_dict[int_type], extra)
log_type_to_str_dict = {
0x1: 'LOG_UINT8',
0x2: 'LOG_INT8',
0x3: 'LOG_UIN16',
0x4: 'LOG_INT16',
0x5: 'LOG_UINT32',
0x6: 'LOG_INT32',
0x7: 'LOG_FLOAT',
0x8: 'LOG_FP16'
}
def log_type_to_str(t: int) -> str:
extra = str()
if t & (1 << 5): # LOG_CORE set
extra = ' | LOG_CORE'
if t & (1 << 6): # BY_FUNCTION set
extra += ' | BY_FUNCTION'
int_type = t & ~(1 << 5 | 1 << 6)
return '{:12}{}'.format(log_type_to_str_dict[int_type], extra)
def process_file(filename, list_params: bool, list_logs: bool, core: bool):
with open(filename, 'rb') as f:
parameters = check_structs(f, 'param', core)
if list_params:
for key in sorted(parameters.keys()):
t = parameters[key]
print('{:25}\t{}'.format(key, param_type_to_str(t)))
logs = check_structs(f, 'log', core)
if list_logs:
for key in sorted(logs.keys()):
t = logs[key]
print('{:25}\t{}'.format(key, log_type_to_str(t)))
n_logs = Colors.GREEN + str(len(logs.keys())) + Colors.END
n_params = Colors.BLUE + str(len(parameters.keys())) + Colors.END
print('{} parameters and {} log vars in elf'.format(n_params, n_logs))
def get_offset_of(elf, addr):
for seg in elf.iter_segments():
if seg.header['p_type'] != 'PT_LOAD':
continue
# If the symbol is inside the range of a LOADed segment, calculate the
# file offset by subtracting the virtual start address and adding the
# file offset of the loaded section(s)
if addr >= seg['p_vaddr'] and addr < seg['p_vaddr'] + seg['p_filesz']:
return addr - seg['p_vaddr'] + seg['p_offset']
return None
def get_offset_of_symbol(elf, name):
section = elf.get_section_by_name('.symtab')
sym = section.get_symbol_by_name(name)[0]
if not sym:
print('symbol %s not found' % name, file=sys.stderr)
sys.exit(1)
return get_offset_of(elf, sym['st_value'])
def check_structs(stream, what: str, core: bool) -> dict:
elf = ELFFile(stream)
offset = get_offset_of_symbol(elf, '_{}_start'.format(what))
stop_offset = get_offset_of_symbol(elf, '_{}_stop'.format(what))
name_type_dict = {}
name_maxlen = 25
struct_len = 12
group_bit = 0x1 << 7
start_bit = 0x1
while offset < stop_offset:
elf.stream.seek(offset)
#
# Parsing log or param, first unpack the struct:
# struct [param_s|log_s] {
# uint8_t type;
# char * name;
# void * address;
# };
#
# We want the type and the name.
#
buffer = elf.stream.read(struct_len)
t, addr = struct.unpack('@Bxxxixxxx', buffer)
#
# Next, convert address of name to offset in elf
#
addr = get_offset_of(elf, addr)
#
# And read the name from that offset
#
elf.stream.seek(addr)
name = ''.join(iter(lambda: stream.read(1).decode('ascii'), '\x00'))
#
# Check if this is start of a group
#
if t & group_bit != 0 and t & start_bit != 0:
current_group = name
elif t & group_bit == 0:
name = '%s.%s' % (current_group, name)
if name in name_type_dict:
print('%sDuplicate parameter detected!%s (%s)' %
(Colors.RED, Colors.END, name), file=sys.stderr)
sys.exit(1)
else:
#
# If core only is specified we check if the core flag is set
#
if not core or (t & CORE) != 0:
name_type_dict[name] = t
if len(name) > name_maxlen:
print('%sName too long!%s (%s > %d)' %
(Colors.RED, Colors.END, name, name_maxlen),
file=sys.stderr)
sys.exit(1)
offset += struct_len
return name_type_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--list-params', action='store_true')
parser.add_argument('--list-logs', action='store_true')
parser.add_argument('--core', action='store_true')
parser.add_argument('filename', nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.filename:
process_file(args.filename[0], args.list_params, args.list_logs, args.core)
else:
sys.exit(1)
| gpl-3.0 | -600,877,974,639,809,700 | 28.555556 | 83 | 0.535267 | false | 3.15415 | false | false | false |
JuliaWinchester/morphotester | topomesh.py | 1 | 5807 | '''
Created on Jan 10, 2016
@author: Julia M. Winchester
'''
import plython
import DNE
import OPC
import RFI
import implicitfair
from collections import defaultdict
class TopoMesh(plython.PlythonMesh):
"""A class for creating and interacting with triangulated polygon meshes and topographic variables.
Class inherits from plython.PlythonMesh. Creates a list of Numpy ndarray objects containing
triangulated polygon mesh data if provided with a path to a .ply file. Topographic variables
are instanced as None and take the data types specified below when generated using the
ProcessSurface method.
Args:
filepath (str): Path to a .ply polygon mesh file
Attributes:
mesh (list): Triangulated polygon mesh data. Contains three ndarrays:
vertex XYZ points, polygons with component vertex XYZ points,
and polygons with component vertex indices.
nvert (int): Number of vertices in mesh.
nface (int): Number of polygons in mesh.
vertices (ndarray): Vertex XYZ points for mesh.
faces (ndarray): Polygons with component vertex indices for mesh.
triverts (ndarray): Polygons with component vertex XYZ points for mesh.
DNE (float): Total Dirichlet normal energy of mesh.
DNEscalars (ndarray): Scalars for visualizing DNE.
conditionfaces (list): List of polygon face indices with high matrix condition numbers.
boundaryfaces (list): List of polygon face indices forming mesh edges.
outlierfaces (list): List of polygon face indices removed as outliers, with DNE values and face areas.
RFI (float): Relief index of mesh (surface area/projected area).
surfarea (float): 3D surface area of mesh.
projarea (float): 2D surface area of mesh projected on XY plane.
OPCR (float): Orientation patch count rotated for mesh.
OPClist (list): Orientation patch counts at 8 rotations for mesh.
OPCscalars: Scalars for visualizing OPC.
"""
def __init__(self, filepath=""):
super(TopoMesh,self).__init__(filepath)
self.DNE = None
self.DNEscalars = None
self.conditionfaces = None
self.boundaryfaces = None
self.outlierfaces = None
self.RFI = None
self.surfarea = None
self.projarea = None
self.linelen = None
self.bluepixie = None
self.redpixie = None
self.pixelratio = None
self.OPCR = None
self.OPClist = None
self.OPCscalars = None
def GenerateDNE(self, dosmooth, smoothit, smoothstep, docondition, dooutlier, outlierperc, outliertype, filename):
"""Calculates Dirichlet normal energy (surface bending) from mesh data.
For details on args, see DNE.MeshDNE class.
Args:
doSmooth (bool): If true, do implicit fair smooth.
SmoothIt (int): Iterations of smoothing
SmoothStep (float): Smoothing step size.
doCondition (bool): If true, do polygon condition number control.
doOutlier (bool): If true, do outlier removal.
OutlierPerc (float): Outlier percentile.
OutlierType (bool): If true, outliers as energy*area. If false, outliers as energy.
"""
self.check_for_mesh(self.GenerateDNE)
surfcurv = DNE.MeshDNE(self, dosmooth, smoothit, smoothstep, docondition, dooutlier, outlierperc, outliertype, filename)
self.DNE = surfcurv.DNE
self.DNEscalars = surfcurv.equantity
self.conditionfaces = surfcurv.high_condition_faces
self.boundaryfaces = surfcurv.boundary_faces
self.outlierfaces = surfcurv.outlier_faces
def GenerateRFI(self):
"""Calculates relief index (surface relief) from mesh data."""
self.check_for_mesh(self.GenerateRFI)
surfrelf = RFI.MeshRFI(self)
self.RFI = surfrelf.RFI
self.surfarea = surfrelf.surfarea
self.projarea = surfrelf.projarea
self.linelen = surfrelf.linelen
self.bluepixie = surfrelf.bluepixie
self.redpixie = surfrelf.redpixie
self.pixelratio = surfrelf.pixelratio
def GenerateOPCR(self, minpatch):
"""Calculates orientation patch count rotated (surface complexity) from mesh data.
For details on args see OPC.MeshOPCR class.
Args:
minpatch (int): Minimum size for counting patches.
"""
self.check_for_mesh(self.GenerateOPCR)
surfcomp = OPC.MeshOPCR(self, minpatch)
self.OPCR = surfcomp.OPCR
self.OPClist = surfcomp.opc_list
self.OPCscalars = surfcomp.colormap_list[0]
def implicit_fair_mesh(self, iterations, step):
self.get_vert_tri_dict()
faired_vertices = implicitfair.smooth(self.vertices, self.faces, iterations, step, self.vert_tri_dict)
self.vertices = faired_vertices
self.mesh[0] = faired_vertices
for i in range(len(self.triverts)):
self.triverts[i] = self.vertices[self.faces[i]]
self.mesh[1] = self.triverts
def get_vert_tri_dict(self):
"""Generates dictionary associating vertex index keys with related polygon index values."""
self.vert_tri_dict = defaultdict(list)
for findex, face in enumerate(self.faces):
for vertex in face:
self.vert_tri_dict[vertex].append(findex)
def check_for_mesh(self, function="function"):
if self.mesh == None:
raise ValueError('A mesh has not been imported, %s cannot proceed.' % function)
| gpl-2.0 | -8,210,878,785,973,345,000 | 39.615385 | 128 | 0.64009 | false | 4.058001 | false | false | false |
tyndare/osmose-backend | analysers/analyser_merge_bicycle_rental_FR_bm.py | 1 | 3368 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2014 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from Analyser_Merge import Analyser_Merge, Source, SHP, Load, Mapping, Select, Generate
class Analyser_Merge_Bicycle_Rental_FR_bm(Analyser_Merge):
def __init__(self, config, logger = None):
self.missing_official = {"item":"8160", "class": 1, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental not integrated") }
self.possible_merge = {"item":"8161", "class": 3, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental integration suggestion") }
self.update_official = {"item":"8162", "class": 4, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle update") }
Analyser_Merge.__init__(self, config, logger,
"http://data.bordeaux-metropole.fr/data.php?themes=10",
u"Station VCUB",
SHP(Source(attribution = u"Bordeaux Métropole", millesime = "08/2016",
fileUrl = "http://data.bordeaux-metropole.fr/files.php?gid=43&format=2", zip = "TB_STVEL_P.shp", encoding = "ISO-8859-15")),
Load(("ST_X(geom)",), ("ST_Y(geom)",), srid = 2154),
Mapping(
select = Select(
types = ["nodes"],
tags = {"amenity": "bicycle_rental"}),
osmRef = "ref",
conflationDistance = 100,
generate = Generate(
static1 = {
"amenity": "bicycle_rental",
"network": "VCUB"},
static2 = {"source": self.source},
mapping1 = {
"name": "NOM",
"ref": "NUMSTAT",
"capacity": "NBSUPPOR",
"vending": lambda res: "subscription" if res["TERMBANC"] == "OUI" else None,
"description": lambda res: "VCUB+" if res["TARIF"] == "VLS PLUS" else None} )))
| gpl-3.0 | -5,211,930,841,178,215,000 | 62.490566 | 176 | 0.455572 | false | 4.28117 | false | false | false |
realitygaps/cloudbrain | cloudbrain/settings.py | 2 | 3884 | DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
SENSOR_DATA_KEYSPACE = "sensor_data"
CASSANDRA_ADDRESS = 'cassandra.cloudbrain.rocks'
RABBITMQ_ADDRESS = 'rabbitmq.cloudbrain.rocks'
WEBSERVER_ADDRESS = 'webserver.cloudbrain.rocks'
WEBSERVER_PORT = 8080
MOCK_DEVICE_ID = "mock"
REGISTERED_DEVICES_TABLE_NAME = "registered_device_ids"
# Metric metadata of all wearable devices accepted by CloudBrain.
DEVICE_METADATA = [
{'device_name': 'openbci',
'device_type': 'eeg_headset',
'metrics':
[
{
'metric_name': 'eeg',
'num_channels': 8,
'metric_description': 'Raw eeg data coming from the OpenBCI channels'
}
]
},
{
'device_name': 'muse',
'device_type': 'eeg_headset',
'metrics':
[
{
'metric_name': 'eeg',
'num_channels': 4,
'metric_description': 'Raw eeg data coming from the 4 channels of the Muse'
},
{
'metric_name': 'horseshoe',
'num_channels': 4,
'metric_description': 'Status indicator for each channel (1 = good, 2 = ok, >=3 bad)'
},
{
'metric_name': 'concentration',
'num_channels': 1,
'metric_description': None
},
{
'metric_name': 'mellow',
'num_channels': 1,
'metric_description': None
},
{
'metric_name': 'acc',
'num_channels': 3,
'metric_description': None
},
{
'metric_name': 'delta_absolute',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'theta_absolute',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'beta_absolute',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'alpha_absolute',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'gamma_absolute',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'delta_relative',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'theta_relative',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'beta_relative',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'alpha_relative',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'gamma_relative',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'is_good',
'num_channels': 4,
'metric_description': 'Strict data quality indicator for each channel, 0= bad, 1 = good.'
},
{
'metric_name': 'blink',
'num_channels': 1,
'metric_description': None
},
{
'metric_name': 'jaw_clench',
'num_channels': 1,
'metric_description': None
},
]
},
{
'device_name': 'neurosky',
'device_type': 'eeg_headset',
'metrics': [
{
'metric_name': 'concentration',
'num_channels': 1,
'metric_description': None
},
{
'metric_name': 'meditation',
'num_channels': 1,
'metric_description': None
},
{
'metric_name': 'signal_strength',
'num_channels': 1,
'metric_description': None
},
]
},
{
'device_name': 'pulsesensor',
'device_type': 'heart_rate_monitor',
'metrics': [
{
'metric_name': 'raw',
'num_channels': 1,
'metric_description': None
}
]
}
]
| agpl-3.0 | 1,109,216,164,785,016,300 | 23.738854 | 99 | 0.47863 | false | 3.77821 | false | false | false |
navcoindev/navcoin-core | qa/rpc-tests/dao-light-voting-range-consultations.py | 1 | 9778 | #!/usr/bin/env python3
# Copyright (c) 2019 The Navcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import NavCoinTestFramework
from test_framework.cfund_util import *
import time
class LightVotingTest(NavCoinTestFramework):
"""Tests the voting from light wallets"""
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self, split=False):
self.nodes = []
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug=dao","-dandelion=0"]]*3)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
connect_nodes(self.nodes[2], 0)
def run_test(self):
# Get cfund parameters
blocks_per_voting_cycle = self.nodes[0].cfundstats()["consensus"]["blocksPerVotingCycle"]
self.nodes[0].staking(False)
self.nodes[1].staking(False)
self.nodes[2].staking(False)
activate_softfork(self.nodes[0], "coldstaking_v2")
votingkey = self.nodes[2].getnewaddress()
coldstaking = self.nodes[0].getcoldstakingaddress(self.nodes[1].getnewaddress(),self.nodes[1].getnewaddress(),votingkey)
self.nodes[0].sendtoaddress(votingkey, 100)
self.nodes[0].sendtoaddress(coldstaking, 3000000)
self.nodes[0].sendtoaddress(coldstaking, 3000000)
self.nodes[0].sendtoaddress(coldstaking, 3000000)
self.nodes[0].sendtoaddress(coldstaking, 3000000)
self.nodes[0].sendtoaddress(coldstaking, 3000000)
self.nodes[0].sendtoaddress(coldstaking, 3000000)
slow_gen(self.nodes[0], 10)
time.sleep(3)
consultation_hash = self.nodes[0].createconsultation("range", 300, 600, True)['hash']
slow_gen(self.nodes[0], 1)
start_new_cycle(self.nodes[0])
reversed_hash = reverse_byte_str(consultation_hash)
support_str = '6a' + 'c9' + 'c4' + '20' + reversed_hash
supportrm_str = '6a' + 'c9' + 'c8' + '20' + reversed_hash
rawtx=self.nodes[2].createrawtransaction([],{support_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
time.sleep(3)
assert_equal(self.nodes[0].getconsultation(consultation_hash)['support'], 2)
rawtx=self.nodes[2].createrawtransaction([],{supportrm_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
time.sleep(3)
assert_equal(self.nodes[0].getconsultation(consultation_hash)['support'], 2)
start_new_cycle(self.nodes[0])
start_new_cycle(self.nodes[0])
start_new_cycle(self.nodes[0])
start_new_cycle(self.nodes[0])
vote_str_500 = '6a' + 'cb' + 'ca' + '20' + reversed_hash + '02f401'
vote_str_400 = '6a' + 'cb' + 'ca' + '20' + reversed_hash + '029001'
voteabs_str = '6a' + 'cb' + 'c7' + '20' + reversed_hash
voterm_str = '6a' + 'cb' + 'c8' + '20' + reversed_hash
rawtx=self.nodes[2].createrawtransaction([],{vote_str_500:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
time.sleep(3)
assert_equal(self.nodes[0].getconsultation(consultation_hash)['answers'][0]['500'], 2)
assert_equal(self.nodes[0].getconsultation(consultation_hash)['abstain'], 0)
rawtx=self.nodes[2].createrawtransaction([],{voterm_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
time.sleep(3)
assert_equal(self.nodes[0].getconsultation(consultation_hash)["answers"][0]['500'], 2)
assert_equal(self.nodes[0].getconsultation(consultation_hash)['abstain'], 0)
rawtx=self.nodes[2].createrawtransaction([],{voteabs_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
time.sleep(3)
assert_equal(self.nodes[0].getconsultation(consultation_hash)["answers"][0]['500'], 2)
assert_equal(self.nodes[0].getconsultation(consultation_hash)['abstain'], 2)
start_new_cycle(self.nodes[0])
sync_blocks(self.nodes)
# 1 abstain vote
time.sleep(3)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
time.sleep(3)
# remove abstain vote
rawtx=self.nodes[2].createrawtransaction([],{voterm_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
time.sleep(3)
# switch to vote 400
rawtx=self.nodes[2].createrawtransaction([],{vote_str_400:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
time.sleep(3)
assert_equal(self.nodes[0].getconsultation(consultation_hash)["answers"][0]['400'], 2)
assert_equal(self.nodes[0].getconsultation(consultation_hash)['abstain'], 1)
# switch to vote 500
rawtx=self.nodes[2].createrawtransaction([],{voterm_str:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
rawtx=self.nodes[2].createrawtransaction([],{vote_str_500:0,'6ac1':0.1})
rawtx = "08" + rawtx[2:]
fundedrawtx=self.nodes[2].fundrawtransaction(rawtx,{'changeAddress':votingkey})['hex']
signedrawtx=self.nodes[2].signrawtransaction(fundedrawtx)['hex']
self.nodes[2].sendrawtransaction(signedrawtx)
self.nodes[2].generatetoaddress(1, votingkey)
sync_blocks(self.nodes)
self.stake_block(self.nodes[1], False)
self.stake_block(self.nodes[1], False)
sync_blocks(self.nodes)
time.sleep(3)
assert_equal(self.nodes[0].getconsultation(consultation_hash)["answers"][0]['500'], 2)
assert_equal(self.nodes[0].getconsultation(consultation_hash)["answers"][0]['400'], 2)
assert_equal(self.nodes[0].getconsultation(consultation_hash)['abstain'], 1)
def stake_block(self, node, mature = True):
# Get the current block count to check against while we wait for a stake
blockcount = node.getblockcount()
# Turn staking on
node.staking(True)
# wait for a new block to be mined
while node.getblockcount() == blockcount:
#print("waiting for a new block...")
time.sleep(1)
# We got one
#print("found a new block...")
# Turn staking off
node.staking(False)
# Get the staked block
block_hash = node.getbestblockhash()
# Only mature the blocks if we asked for it
if (mature):
# Make sure the blocks are mature before we check the report
slow_gen(node, 5, 0.5)
self.sync_all()
# return the block hash to the function caller
return block_hash
if __name__ == '__main__':
LightVotingTest().main()
| mit | 2,637,631,453,742,712,000 | 42.07489 | 128 | 0.638372 | false | 3.240968 | true | false | false |
BitFunnel/sigir2017-bitfunnel | scripts/mike.py | 1 | 14438 | from build_filtered_chunks import process_chunk_list
from experiment import Experiment
from innovations import measure_innovations, analyze_innovations
from latex import latex_corpora, latex_performance
###########################################################################
#
# Experiments
#
###########################################################################
experiment_windows_273_150_100 = Experiment(
# Paths to tools
r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe",
r"D:\git\mg4j-workbench",
r"/home/mhop/git/partitioned_elias_fano/bin",
# The directory containing all indexes and the basename for this index
r"D:\temp\indexes",
100,
150,
r"273-150-100",
# The directory with the gov2 chunks and the regular expression pattern
# used to determine which chunks will be used for this experiment.
r"d:\sigir\chunks-100-150",
r"GX.*", # Use all chunks
# The query log to be used for this experiment.
r"D:\sigir\queries\06.efficiency_topics.all",
# BitFunnel density
0.15,
# Min and max thread counts
8,
1,
8
)
experiment_windows_273_64_127 = Experiment(
# Paths to tools
r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe",
r"D:\git\mg4j-workbench",
r"/home/mhop/git/partitioned_elias_fano/bin",
# The directory containing all indexes and the basename for this index
r"D:\temp\indexes",
64,
127,
r"273-64-127",
# The directory with the gov2 chunks and the regular expression pattern
# used to determine which chunks will be used for this experiment.
r"d:\sigir\chunks-64-127",
r"GX.*", # Use all chunks
# The query log to be used for this experiment.
r"D:\sigir\queries\06.efficiency_topics.all",
# BitFunnel density
0.15,
# Min and max thread counts
8,
1,
8
)
experiment_windows_273_128_255 = Experiment(
# Paths to tools
r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe",
r"D:\git\mg4j-workbench",
r"/home/mhop/git/partitioned_elias_fano/bin",
# The directory containing all indexes and the basename for this index
r"D:\temp\indexes",
128,
255,
r"273_128_255",
# The directory with the gov2 chunks and the regular expression pattern
# used to determine which chunks will be used for this experiment.
r"d:\sigir\chunks-128-255",
r"GX.*", # Use all chunks
# The query log to be used for this experiment.
r"D:\sigir\queries\06.efficiency_topics.all",
# BitFunnel density
0.15,
# Min and max thread counts
8,
1,
8
)
experiment_windows_273_256_511 = Experiment(
# Paths to tools
r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe",
r"D:\git\mg4j-workbench",
r"/home/mhop/git/partitioned_elias_fano/bin",
# The directory containing all indexes and the basename for this index
r"D:\temp\indexes",
256,
511,
r"273-256-511",
# The directory with the gov2 chunks and the regular expression pattern
# used to determine which chunks will be used for this experiment.
r"d:\sigir\chunks-256-511",
r"GX.*", # Use all chunks
# The query log to be used for this experiment.
r"D:\sigir\queries\06.efficiency_topics.all",
# BitFunnel density
0.15,
# Min and max thread counts
8,
1,
8
)
experiment_windows_273_1000_1500 = Experiment(
# Paths to tools
r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe",
r"D:\git\mg4j-workbench",
r"/home/mhop/git/partitioned_elias_fano/bin",
# The directory containing all indexes and the basename for this index
r"D:\temp\indexes",
1000,
1500,
r"273-1000-1500",
# The directory with the gov2 chunks and the regular expression pattern
# used to determine which chunks will be used for this experiment.
r"d:\sigir\chunks-1000-1500",
r"GX.*", # Use all chunks
# The query log to be used for this experiment.
r"D:\sigir\queries\06.efficiency_topics.all",
# BitFunnel density
0.15,
# Min and max thread counts
8,
1,
8
)
experiment_windows_273_1024_2047 = Experiment(
# Paths to tools
r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe",
r"D:\git\mg4j-workbench",
r"/home/mhop/git/partitioned_elias_fano/bin",
# The directory containing all indexes and the basename for this index
r"D:\temp\indexes",
1024,
2047,
r"273-1024-2047",
# The directory with the gov2 chunks and the regular expression pattern
# used to determine which chunks will be used for this experiment.
r"d:\sigir\chunks-1024-2047",
r"GX.*", # Use all chunks
# The query log to be used for this experiment.
r"D:\sigir\queries\06.efficiency_topics.all",
# BitFunnel density
0.15,
# Min and max thread counts
8,
1,
8
)
experiment_windows_273_2048_4095 = Experiment(
# Paths to tools
r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe",
r"D:\git\mg4j-workbench",
r"/home/mhop/git/partitioned_elias_fano/bin",
# The directory containing all indexes and the basename for this index
r"D:\temp\indexes",
2048,
4095,
r"273-2048-4095",
# The directory with the gov2 chunks and the regular expression pattern
# used to determine which chunks will be used for this experiment.
r"d:\sigir\chunks-2048-4095",
r"GX.*", # Use all chunks
# The query log to be used for this experiment.
r"D:\sigir\queries\06.efficiency_topics.all",
# BitFunnel density
0.15,
# Min and max thread counts
8,
1,
8
)
experiment_linux_273_64_127 = Experiment(
# Paths to tools
r"/home/mhop/git/BitFunnel/build-make/tools/BitFunnel/src/BitFunnel",
r"/home/mhop/git/mg4j-workbench",
r"/home/mhop/git/partitioned_elias_fano/bin",
# The directory containing all indexes and the basename for this index
r"/mnt/d/temp/indexes",
64,
127,
r"273-64-127",
# The directory with the gov2 chunks and the regular expression pattern
# used to determine which chunks will be used for this experiment.
r"/mnt/d/sigir/chunks-64-127",
r"GX.*", # Use all chunks
# The query log to be used for this experiment.
r"/mnt/d/sigir/queries/06.efficiency_topics.all",
# BitFunnel density
0.15,
# Min and max thread counts
8,
1,
8
)
experiment_linux_273_128_255 = Experiment(
# Paths to tools
r"/home/mhop/git/BitFunnel/build-make/tools/BitFunnel/src/BitFunnel",
r"/home/mhop/git/mg4j-workbench",
r"/home/mhop/git/partitioned_elias_fano/bin",
# The directory containing all indexes and the basename for this index
r"/mnt/d/temp/indexes",
128,
255,
r"273_128_255",
# The directory with the gov2 chunks and the regular expression pattern
# used to determine which chunks will be used for this experiment.
r"/mnt/d/sigir/chunks-128-255",
r"GX.*", # Use all chunks
# The query log to be used for this experiment.
r"/mnt/d/sigir/queries/06.efficiency_topics.all",
# BitFunnel density
0.15,
# Min and max thread counts
8,
1,
8
)
experiment_linux_273_256_511 = Experiment(
# Paths to tools
r"/home/mhop/git/BitFunnel/build-make/tools/BitFunnel/src/BitFunnel",
r"/home/mhop/git/mg4j-workbench",
r"/home/mhop/git/partitioned_elias_fano/bin",
# The directory containing all indexes and the basename for this index
r"/mnt/d/temp/indexes",
256,
511,
r"273-256-511",
# The directory with the gov2 chunks and the regular expression pattern
# used to determine which chunks will be used for this experiment.
r"/mnt/d/sigir/chunks-256-511",
r"GX.*", # Use all chunks
# The query log to be used for this experiment.
r"/mnt/d/sigir/queries/06.efficiency_topics.all",
# BitFunnel density
0.15,
# Min and max thread counts
8,
1,
8
)
experiment_linux_273_1024_2047 = Experiment(
# Paths to tools
r"/home/mhop/git/BitFunnel/build-make/tools/BitFunnel/src/BitFunnel",
r"/home/mhop/git/mg4j-workbench",
r"/home/mhop/git/partitioned_elias_fano/bin",
# The directory containing all indexes and the basename for this index
r"/mnt/d/temp/indexes",
1024,
2047,
r"273-1024-2047",
# The directory with the gov2 chunks and the regular expression pattern
# used to determine which chunks will be used for this experiment.
r"/mnt/d/sigir/chunks-100-150",
r"GX.*", # Use all chunks
# The query log to be used for this experiment.
r"/mnt/d/sigir/queries/06.efficiency_topics.all",
# BitFunnel density
0.15,
# Min and max thread counts
8,
1,
8
)
experiment_linux_273_2048_4095 = Experiment(
# Paths to tools
r"/home/mhop/git/BitFunnel/build-make/tools/BitFunnel/src/BitFunnel",
r"/home/mhop/git/mg4j-workbench",
r"/home/mhop/git/partitioned_elias_fano/bin",
# The directory containing all indexes and the basename for this index
r"/mnt/d/temp/indexes",
2048,
4095,
r"273-2048-4095",
# The directory with the gov2 chunks and the regular expression pattern
# used to determine which chunks will be used for this experiment.
r"/mnt/d/sigir/chunks-2048-4095",
r"GX.*", # Use all chunks
# The query log to be used for this experiment.
r"/mnt/d/sigir/queries/06.efficiency_topics.all",
# BitFunnel density
0.15,
# Min and max thread counts
8,
1,
8
)
def runxxx(experiment):
pass
# experiment.fix_query_log()
# experiment.build_chunk_manifest()
#
# # Must build the mg4j index before filtering the query log
# # Must also build mg4j before building PEF which takes the MG4J export
# # as input.
# experiment.build_mg4j_index()
#
# # Build the other indexes at this point
# experiment.build_bf_index()
# experiment.build_lucene_index()
# experiment.build_pef_collection()
# # # experiment.build_pef_index()
# #
# # Must filter the query log before running any queries.
# experiment.filter_query_log()
#
# # Now we're ready to run queries.
#
# experiment.run_bf_queries()
# experiment.run_lucene_queries()
# experiment.run_mg4j_queries()
# # experiment.run_pef_queries()
# experiment.summarize(7)
# print()
def run_windows(experiment):
experiment.run_bf_queries()
experiment.run_lucene_queries()
experiment.run_mg4j_queries()
def run_linux(experiment):
experiment.run_pef_queries()
def linux(experiment):
experiment.build_pef_index()
experiment.run_pef_queries()
def finish(experiment):
experiment.summarize(7)
process_chunk_list(r"d:\data\gov2",
"*",
r"d:\temp\chunks",
r"D:\git\mg4j-workbench",
r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe",
0,
100000,
8)
# process_chunk_list(r"d:\data\gov2",
# r"d:\temp\chunks",
# r"D:\git\mg4j-workbench",
# r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe",
# 64,
# 127,
# 8)
# process_chunk_list(r"d:\data\gov2",
# r"d:\temp\chunks",
# r"D:\git\mg4j-workbench",
# r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe",
# 512,
# 1023,
# 8)
# process_chunk_list(r"d:\data\gov2",
# r"d:\temp\chunks",
# r"D:\git\mg4j-workbench",
# r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe",
# 2048,
# 4095,
# 7)
# process_chunk_list(r"d:\data\gov2",
# r"d:\temp\chunks",
# r"D:\git\mg4j-workbench",
# r"D:\git\BitFunnel\build-msvc\tools\BitFunnel\src\Release\BitFunnel.exe",
# 1024,
# 2047,
# 7)
# process_chunk_list(r"/home/danluu/dev/gov2",
# r"/home/danluu/dev/what-is-this",
# r"/home/danluu/dev/mg4j-workbench",
# r"/home/danluu/dev/BitFunnel/build-ninja/tools/BitFunnel/src/BitFunnel",
# 128,
# 255,
# 7)
# runxxx(experiment_windows_273_64_127)
# runxxx(experiment_windows_273_128_255)
# runxxx(experiment_windows_273_150_100)
# runxxx(experiment_windows_273_1000_1500)
# runxxx(experiment_windows_273_1024_2047)
# runxxx(experiment_linux_273_1024_2047)
# runxxx(experiment_windows_273_256_511)
# linux(experiment_linux_273_256_511)
# runxxx(experiment_windows_273_2048_4095)
# linux(experiment_linux_273_2048_4095)
# finish(experiment_windows_273_2048_4095)
# print()
# runxxx(experiment_windows_273_64_127)
# linux(experiment_linux_273_64_127)
# experiment_windows_273_64_127.run_lucene_queries()
# finish(experiment_windows_273_64_127)
def run_innovations(experiments):
labels = ["BSS", "BSS-FC", "BTFNL"]
treatments = ["ClassicBitsliced", "PrivateSharedRank0", "Optimal"]
densities = [0.05, 0.10, 0.15, 0.20, 0.25, 0.3, 0.35]
# for experiment in experiments:
# measure_innovations(experiment, treatments, densities)
for experiment in experiments:
analyze_innovations(experiment, labels, treatments, densities)
experiments = [
experiment_windows_273_64_127,
experiment_windows_273_128_255,
experiment_windows_273_256_511,
experiment_windows_273_1024_2047,
experiment_windows_273_2048_4095]
# latex_corpora(experiments)
# latex_performance(experiments)
# run_innovations(experiments)
# run_windows(experiment_windows_273_64_127)
# run_windows(experiment_windows_273_128_255)
# run_windows(experiment_windows_273_1024_2047)
# run_windows(experiment_windows_273_2048_4095)
# run_linux(experiment_linux_273_64_127)
# run_linux(experiment_linux_273_128_255)
# run_linux(experiment_linux_273_1024_2047)
# run_linux(experiment_linux_273_2048_4095)
| mit | 7,337,326,349,382,600,000 | 26.190207 | 94 | 0.635753 | false | 3.22709 | false | false | false |
kbytesys/pynagmailplugins | setup.py | 1 | 1136 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='pynagmailplugins',
packages=find_packages(exclude=['docs', 'samples', 'tests']),
namespace_packages=['snowpenguin'],
version='0.0.8',
scripts=['bin/check_mailq.py', 'bin/pydnswl_check.py'],
install_requires=[
'nagiosplugin>=1.2',
],
description='Nagios plugins that detect unusual mail flow.',
author='Andrea Briganti',
author_email='[email protected]',
url='https://github.com/kbytesys/pynagmailplugins',
download_url='https://github.com/kbytesys/pynagmailplugins/tarball/v0.0.8',
keywords=['nagios', 'systemd', 'postfix', 'mail'],
license='GNU GPL v2',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Networking :: Monitoring'
],
)
| gpl-2.0 | -2,357,526,657,243,053,600 | 38.172414 | 79 | 0.629401 | false | 3.72459 | false | true | false |
Jumpscale/jumpscale_core8 | lib/JumpScale/clients/racktivity/energyswitch/modelfactory/models/common/Power_0_0_5_29.py | 1 | 5333 | from JumpScale.clients.racktivity.energyswitch.common.GUIDTable import Value
from JumpScale.clients.racktivity.energyswitch.modelfactory.models.common.Power_0_0_5_20 import Model as Power
import struct
import time
class Model(Power):
def __init__(self, parent):
super(Model, self).__init__(parent)
self._guidTable.update({
50: Value(u"type='TYPE_UNSIGNED_NUMBER'\nsize=2\nlength=2\nunit='%'\nscale=1")
})
self._pointerGuids = [
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 8),
(7, 8),
(8, 8),
(9, 8),
(10, 8),
(11, 1),
(24, 1),
(31, 8),
(50, 8),
(5000, 8),
(5001, 8),
(5002, 1),
(5003, 1),
(5004, 1),
(5005, 1),
(5006, 1),
(5007, 1),
(5010, 8),
(5011, 8),
(5012, 8),
(5013, 8),
(5014, 1),
(5015, 1),
(5016, 1),
(5017, 1),
(15, 8),
(16, 8),
(17, 1),
(18, 1),
(19, 1),
(20, 1),
(21, 1),
(22, 1)
]
# Attribute 'THD' GUID 50 Data type TYPE_UNSIGNED_NUMBER
# Total Harmonic Distortion
def getTHD(self, moduleID, portnumber=1):
guid = 50
length = 1
valDef = self._guidTable[guid]
data = self._parent.client.getAttribute(
moduleID, guid, portnumber, length)
return self._parent.getObjectFromData(data, valDef, count=length)
def getOscilloscopeTimeData(self, moduleID, portnumber=1):
Ioffset = 258
result = {'voltage': [], 'current': []}
# Get 516 bytes of raw data from device:
rawData = self._parent.client.getOscData(
module=moduleID, outlet=portnumber, dataType="T")
if b'failed' in rawData:
time.sleep(0.1)
rawData = self._parent.client.getOscData(
module=moduleID, outlet=portnumber, dataType="T")
if len(rawData) < 516:
# something is wrong, not enough data
return (101, rawData)
# Extracting values from raw binary data:
voltageCalibration = float(
(struct.unpack('<H', rawData[:2]))[0]) / 12800.0
voltageValues = struct.unpack('<256b', rawData[2:Ioffset])
# the current values is returned in miliampers
currentCalibration = float(
(struct.unpack('<H', rawData[Ioffset:Ioffset + 2]))[0]) / 128.0
currentValues = struct.unpack(
'<256b', rawData[Ioffset + 2:2 * Ioffset])
# Calculate the values based on calibration:
for i in range(256):
result['voltage'].append(voltageValues[i] * voltageCalibration)
result['current'].append(currentValues[i] * currentCalibration)
return (0, result)
def getOscilloscopeFrequencyData(self, moduleID, portnumber=1, dataType="current"): # pylint: disable=W0221
result = {
'current': {'amplitudes': [], 'phases': []},
'voltage': {'amplitudes': [], 'phases': []}
}
dataType = "FC" if dataType == "current" else "FV"
numSamples = 64
rawData = self._parent.client.getOscData(
module=moduleID, outlet=portnumber, dataType=dataType)
if b'failed' in rawData:
time.sleep(0.1)
rawData = self._parent.client.getOscData(
module=moduleID, outlet=portnumber, dataType=dataType)
if len(rawData) < 516:
# something is wrong, not enough data
return (101, rawData)
if dataType == "FC":
# Calculate the values based on calibration:
currentCalibration = float(
(struct.unpack('<H', rawData[:2]))[0]) / 4096.0 / 1000
for i in range(6, 2 + 4 * numSamples, 4): # do not take DC (0th harmonic)
currentAmplitude = struct.unpack('<H', rawData[i:i + 2])[0]
result['current']['amplitudes'].append(
currentAmplitude * currentCalibration)
# if first harmonic is below 0.01 A it makes no sense to read
# as on 0 load, there will be useful information
if len(result['current']['amplitudes']) == 1 and result['current']['amplitudes'][0] < 0.01:
return (100, None)
result['current']['phases'].append(
struct.unpack('<h', rawData[i + 2:i + 4])[0])
else:
length = 256
VOffset = 2 + length
voltageCalibration = float(
(struct.unpack('<H', rawData[VOffset:VOffset + 2]))[0]) * 10 / 4096.0 / 1000
# Calculate the values based on calibration:
# do not take DC (0th harmonic)
for i in range(VOffset + 6, VOffset + 4 * numSamples, 4):
result['voltage']['amplitudes'].append(struct.unpack(
'<H', rawData[i:i + 2])[0] * voltageCalibration)
result['voltage']['phases'].append(
struct.unpack('<h', rawData[i + 2:i + 4])[0])
return (0, result)
| apache-2.0 | -3,109,403,066,861,927,000 | 35.77931 | 112 | 0.510032 | false | 3.75828 | false | false | false |
ycdivfx/YCDIVFX_MaxPlus | packages/maxfb/fbauth.py | 2 | 1074 | import urlparse
import BaseHTTPServer
import webbrowser
ACCESS_TOKEN = None
auth_url = 'http://www.youcandoitvfx.com/fb/'
server_host = '127.0.0.1'
server_port = 80
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
global ACCESS_TOKEN
self.send_response(301)
self.send_header('Location', auth_url + 'close.html')
self.end_headers()
parsed_path = urlparse.urlparse(self.path)
try:
params = dict([p.split('=') for p in parsed_path[4].split('&')])
except:
params = {}
_access_token = params.get('access_token', 'error')
if (_access_token != 'error') and (len(_access_token) != 0):
ACCESS_TOKEN = _access_token
def getAccessToken():
global ACCESS_TOKEN
ACCESS_TOKEN = None
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((server_host, server_port), MyHandler)
webbrowser.open(auth_url)
while ACCESS_TOKEN is None:
httpd.handle_request()
httpd.server_close()
return ACCESS_TOKEN | gpl-2.0 | 7,701,950,246,700,173,000 | 24.595238 | 76 | 0.632216 | false | 3.628378 | false | false | false |
cdvv7788/pinax-blog | pinax/blog/templatetags/pinax_blog_tags.py | 2 | 2206 | from django import template
from ..models import Post
from ..conf import settings
register = template.Library()
class LatestBlogPostsNode(template.Node):
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
latest_posts = Post.objects.current()[:5]
context[self.context_var] = latest_posts
return ""
@register.tag
def latest_blog_posts(parser, token):
bits = token.split_contents()
return LatestBlogPostsNode(bits[2])
class LatestBlogPostNode(template.Node):
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
try:
latest_post = Post.objects.current()[0]
except IndexError:
latest_post = None
context[self.context_var] = latest_post
return ""
@register.tag
def latest_blog_post(parser, token):
bits = token.split_contents()
return LatestBlogPostNode(bits[2])
class LatestSectionPostNode(template.Node):
def __init__(self, section, context_var):
self.section = template.Variable(section)
self.context_var = context_var
def render(self, context):
section = self.section.resolve(context)
post = Post.objects.section(section, queryset=Post.objects.current())
try:
post = post[0]
except IndexError:
post = None
context[self.context_var] = post
return ""
@register.tag
def latest_section_post(parser, token):
"""
{% latest_section_post "articles" as latest_article_post %}
"""
bits = token.split_contents()
return LatestSectionPostNode(bits[1], bits[3])
class BlogSectionsNode(template.Node):
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
sections = [(settings.PINAX_BLOG_ALL_SECTION_NAME, "All")]
sections += settings.PINAX_BLOG_SECTIONS
context[self.context_var] = sections
return ""
@register.tag
def blog_sections(parser, token):
"""
{% blog_sections as blog_sections %}
"""
bits = token.split_contents()
return BlogSectionsNode(bits[2])
| mit | 3,295,916,318,030,949,000 | 23.241758 | 77 | 0.635539 | false | 3.810017 | true | false | false |
orlandov/parpg-game | tests/test_objects_base.py | 1 | 2351 | #!/usr/bin/python
# This file is part of PARPG.
# PARPG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PARPG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PARPG. If not, see <http://www.gnu.org/licenses/>.
import unittest
from scripts.objects.base import *
class TestObjectsBase(unittest.TestCase):
def testWildcard(self):
class Wildcard (GameObject, Lockable, Container, Living, Scriptable,
CharStats, Wearable, Usable, Weapon, Destructable,
Trappable, Carryable, ):
def __init__ (self, ID, *args, **kwargs):
self.name = 'All-purpose carry-all'
self.text = 'What is this? I dont know'
GameObject. __init__( self, ID, **kwargs )
Lockable. __init__( self, **kwargs )
Container. __init__( self, **kwargs )
Living. __init__( self, **kwargs )
Scriptable. __init__( self, **kwargs )
CharStats. __init__( self, **kwargs )
Wearable. __init__( self, **kwargs )
Usable. __init__( self, **kwargs )
Weapon. __init__( self, **kwargs )
Destructable.__init__( self, **kwargs )
Trappable. __init__( self, **kwargs )
Carryable. __init__( self, **kwargs )
wc = Wildcard (2)
# TODO: need to fill the rest of these tests out
attrs = dict(
is_openable = True,
is_open = True,
is_lockable = True,
locked = False,
is_carryable = True,
weight = 1.0,
is_container = True,
items = [],
is_living = True,
is_scriptable = True
)
for attr in attrs:
self.assertEqual(getattr(wc, attr), attrs[attr])
| gpl-3.0 | -4,334,747,343,514,364,400 | 38.183333 | 76 | 0.547852 | false | 4.088696 | true | false | false |
cliftonmcintosh/openstates | openstates/ia/__init__.py | 1 | 3453 | import re
import datetime
import lxml.html
import requests
from billy.utils.fulltext import text_after_line_numbers
from .bills import IABillScraper
from .legislators import IALegislatorScraper
from .events import IAEventScraper
from .votes import IAVoteScraper
try:
# Silencing unverified HTTPS request warnings.
requests.packages.urllib3.disable_warnings()
except AttributeError:
pass
settings = dict(SCRAPELIB_TIMEOUT=240)
metadata = dict(
name = 'Iowa',
abbreviation = 'ia',
capitol_timezone = 'America/Chicago',
legislature_name = 'Iowa General Assembly',
legislature_url = 'https://www.legis.iowa.gov/',
chambers = {
'upper': {'name': 'Senate', 'title': 'Senator'},
'lower': {'name': 'House', 'title': 'Representative'},
},
terms = [
{
'name': '2011-2012',
'start_year': 2011,
'end_year': 2012,
'sessions': ['2011-2012'],
},
{
'name': '2013-2014',
'start_year': 2013,
'end_year': 2014,
'sessions': ['2013-2014'],
},
{
'name': '2015-2016',
'start_year': 2015,
'end_year': 2016,
'sessions': ['2015-2016'],
},
{
'name': '2017-2018',
'start_year': 2017,
'end_year': 2018,
'sessions': ['2017-2018'],
},
],
session_details = {
'2011-2012': {
'display_name': '2011-2012 Regular Session',
'_scraped_name': 'General Assembly: 84',
'number': '84',
'start_date': datetime.date(2011, 1, 10),
'end_date': datetime.date(2013, 1, 13),
},
'2013-2014': {
'display_name': '2013-2014 Regular Session',
'_scraped_name': 'General Assembly: 85',
'number': '85',
},
'2015-2016': {
'display_name': '2015-2016 Regular Session',
'_scraped_name': 'General Assembly: 86',
'number': '86',
},
'2017-2018': {
'display_name': '2017-2018 Regular Session',
'_scraped_name': 'General Assembly: 87',
'number': '87',
},
},
feature_flags = ['events', 'influenceexplorer'],
_ignored_scraped_sessions = [
'Legislative Assembly: 86',
'General Assembly: 83',
'General Assembly: 82',
'General Assembly: 81',
'General Assembly: 80',
'General Assembly: 79',
'General Assembly: 79',
'General Assembly: 78',
'General Assembly: 78',
'General Assembly: 77',
'General Assembly: 77',
'General Assembly: 76',
]
)
def session_list():
def url_xpath(url, path):
doc = lxml.html.fromstring(requests.get(url, verify=False).text)
return doc.xpath(path)
sessions = url_xpath(
'https://www.legis.iowa.gov/legislation/findLegislation',
"//section[@class='grid_6']//li/a/text()[normalize-space()]"
)
sessions = [x[0] for x in filter(lambda x: x != [], [
re.findall(r'^.*Assembly: [0-9]+', session)
for session in sessions
])]
return sessions
def extract_text(doc, data):
doc = lxml.html.fromstring(data)
text = doc.xpath('//pre')[0].text_content()
# strip two sets of line numbers
return text_after_line_numbers(text_after_line_numbers(text))
| gpl-3.0 | -6,124,341,632,348,928,000 | 28.016807 | 72 | 0.536056 | false | 3.627101 | false | false | false |
teddygdev/CarTrackingComputer | backupdec3/python_games/fourinarow.py | 7 | 13080 | # Four-In-A-Row (a Connect Four clone)
# By Al Sweigart [email protected]
# http://inventwithpython.com/pygame
# Released under a "Simplified BSD" license
import random, copy, sys, pygame
from pygame.locals import *
BOARDWIDTH = 7 # how many spaces wide the board is
BOARDHEIGHT = 6 # how many spaces tall the board is
assert BOARDWIDTH >= 4 and BOARDHEIGHT >= 4, 'Board must be at least 4x4.'
DIFFICULTY = 2 # how many moves to look ahead. (>2 is usually too much)
SPACESIZE = 50 # size of the tokens and individual board spaces in pixels
FPS = 30 # frames per second to update the screen
WINDOWWIDTH = 640 # width of the program's window, in pixels
WINDOWHEIGHT = 480 # height in pixels
XMARGIN = int((WINDOWWIDTH - BOARDWIDTH * SPACESIZE) / 2)
YMARGIN = int((WINDOWHEIGHT - BOARDHEIGHT * SPACESIZE) / 2)
BRIGHTBLUE = (0, 50, 255)
WHITE = (255, 255, 255)
BGCOLOR = BRIGHTBLUE
TEXTCOLOR = WHITE
RED = 'red'
BLACK = 'black'
EMPTY = None
HUMAN = 'human'
COMPUTER = 'computer'
def main():
global FPSCLOCK, DISPLAYSURF, REDPILERECT, BLACKPILERECT, REDTOKENIMG
global BLACKTOKENIMG, BOARDIMG, ARROWIMG, ARROWRECT, HUMANWINNERIMG
global COMPUTERWINNERIMG, WINNERRECT, TIEWINNERIMG
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('Four in a Row')
REDPILERECT = pygame.Rect(int(SPACESIZE / 2), WINDOWHEIGHT - int(3 * SPACESIZE / 2), SPACESIZE, SPACESIZE)
BLACKPILERECT = pygame.Rect(WINDOWWIDTH - int(3 * SPACESIZE / 2), WINDOWHEIGHT - int(3 * SPACESIZE / 2), SPACESIZE, SPACESIZE)
REDTOKENIMG = pygame.image.load('4row_red.png')
REDTOKENIMG = pygame.transform.smoothscale(REDTOKENIMG, (SPACESIZE, SPACESIZE))
BLACKTOKENIMG = pygame.image.load('4row_black.png')
BLACKTOKENIMG = pygame.transform.smoothscale(BLACKTOKENIMG, (SPACESIZE, SPACESIZE))
BOARDIMG = pygame.image.load('4row_board.png')
BOARDIMG = pygame.transform.smoothscale(BOARDIMG, (SPACESIZE, SPACESIZE))
HUMANWINNERIMG = pygame.image.load('4row_humanwinner.png')
COMPUTERWINNERIMG = pygame.image.load('4row_computerwinner.png')
TIEWINNERIMG = pygame.image.load('4row_tie.png')
WINNERRECT = HUMANWINNERIMG.get_rect()
WINNERRECT.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2))
ARROWIMG = pygame.image.load('4row_arrow.png')
ARROWRECT = ARROWIMG.get_rect()
ARROWRECT.left = REDPILERECT.right + 10
ARROWRECT.centery = REDPILERECT.centery
isFirstGame = True
while True:
runGame(isFirstGame)
isFirstGame = False
def runGame(isFirstGame):
if isFirstGame:
# Let the computer go first on the first game, so the player
# can see how the tokens are dragged from the token piles.
turn = COMPUTER
showHelp = True
else:
# Randomly choose who goes first.
if random.randint(0, 1) == 0:
turn = COMPUTER
else:
turn = HUMAN
showHelp = False
# Set up a blank board data structure.
mainBoard = getNewBoard()
while True: # main game loop
if turn == HUMAN:
# Human player's turn.
getHumanMove(mainBoard, showHelp)
if showHelp:
# turn off help arrow after the first move
showHelp = False
if isWinner(mainBoard, RED):
winnerImg = HUMANWINNERIMG
break
turn = COMPUTER # switch to other player's turn
else:
# Computer player's turn.
column = getComputerMove(mainBoard)
animateComputerMoving(mainBoard, column)
makeMove(mainBoard, BLACK, column)
if isWinner(mainBoard, BLACK):
winnerImg = COMPUTERWINNERIMG
break
turn = HUMAN # switch to other player's turn
if isBoardFull(mainBoard):
# A completely filled board means it's a tie.
winnerImg = TIEWINNERIMG
break
while True:
# Keep looping until player clicks the mouse or quits.
drawBoard(mainBoard)
DISPLAYSURF.blit(winnerImg, WINNERRECT)
pygame.display.update()
FPSCLOCK.tick()
for event in pygame.event.get(): # event handling loop
if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONUP:
return
def makeMove(board, player, column):
lowest = getLowestEmptySpace(board, column)
if lowest != -1:
board[column][lowest] = player
def drawBoard(board, extraToken=None):
DISPLAYSURF.fill(BGCOLOR)
# draw tokens
spaceRect = pygame.Rect(0, 0, SPACESIZE, SPACESIZE)
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
spaceRect.topleft = (XMARGIN + (x * SPACESIZE), YMARGIN + (y * SPACESIZE))
if board[x][y] == RED:
DISPLAYSURF.blit(REDTOKENIMG, spaceRect)
elif board[x][y] == BLACK:
DISPLAYSURF.blit(BLACKTOKENIMG, spaceRect)
# draw the extra token
if extraToken != None:
if extraToken['color'] == RED:
DISPLAYSURF.blit(REDTOKENIMG, (extraToken['x'], extraToken['y'], SPACESIZE, SPACESIZE))
elif extraToken['color'] == BLACK:
DISPLAYSURF.blit(BLACKTOKENIMG, (extraToken['x'], extraToken['y'], SPACESIZE, SPACESIZE))
# draw board over the tokens
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
spaceRect.topleft = (XMARGIN + (x * SPACESIZE), YMARGIN + (y * SPACESIZE))
DISPLAYSURF.blit(BOARDIMG, spaceRect)
# draw the red and black tokens off to the side
DISPLAYSURF.blit(REDTOKENIMG, REDPILERECT) # red on the left
DISPLAYSURF.blit(BLACKTOKENIMG, BLACKPILERECT) # black on the right
def getNewBoard():
board = []
for x in range(BOARDWIDTH):
board.append([EMPTY] * BOARDHEIGHT)
return board
def getHumanMove(board, isFirstMove):
draggingToken = False
tokenx, tokeny = None, None
while True:
for event in pygame.event.get(): # event handling loop
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEBUTTONDOWN and not draggingToken and REDPILERECT.collidepoint(event.pos):
# start of dragging on red token pile.
draggingToken = True
tokenx, tokeny = event.pos
elif event.type == MOUSEMOTION and draggingToken:
# update the position of the red token being dragged
tokenx, tokeny = event.pos
elif event.type == MOUSEBUTTONUP and draggingToken:
# let go of the token being dragged
if tokeny < YMARGIN and tokenx > XMARGIN and tokenx < WINDOWWIDTH - XMARGIN:
# let go at the top of the screen.
column = int((tokenx - XMARGIN) / SPACESIZE)
if isValidMove(board, column):
animateDroppingToken(board, column, RED)
board[column][getLowestEmptySpace(board, column)] = RED
drawBoard(board)
pygame.display.update()
return
tokenx, tokeny = None, None
draggingToken = False
if tokenx != None and tokeny != None:
drawBoard(board, {'x':tokenx - int(SPACESIZE / 2), 'y':tokeny - int(SPACESIZE / 2), 'color':RED})
else:
drawBoard(board)
if isFirstMove:
# Show the help arrow for the player's first move.
DISPLAYSURF.blit(ARROWIMG, ARROWRECT)
pygame.display.update()
FPSCLOCK.tick()
def animateDroppingToken(board, column, color):
x = XMARGIN + column * SPACESIZE
y = YMARGIN - SPACESIZE
dropSpeed = 1.0
lowestEmptySpace = getLowestEmptySpace(board, column)
while True:
y += int(dropSpeed)
dropSpeed += 0.5
if int((y - YMARGIN) / SPACESIZE) >= lowestEmptySpace:
return
drawBoard(board, {'x':x, 'y':y, 'color':color})
pygame.display.update()
FPSCLOCK.tick()
def animateComputerMoving(board, column):
x = BLACKPILERECT.left
y = BLACKPILERECT.top
speed = 1.0
# moving the black tile up
while y > (YMARGIN - SPACESIZE):
y -= int(speed)
speed += 0.5
drawBoard(board, {'x':x, 'y':y, 'color':BLACK})
pygame.display.update()
FPSCLOCK.tick()
# moving the black tile over
y = YMARGIN - SPACESIZE
speed = 1.0
while x > (XMARGIN + column * SPACESIZE):
x -= int(speed)
speed += 0.5
drawBoard(board, {'x':x, 'y':y, 'color':BLACK})
pygame.display.update()
FPSCLOCK.tick()
# dropping the black tile
animateDroppingToken(board, column, BLACK)
def getComputerMove(board):
potentialMoves = getPotentialMoves(board, BLACK, DIFFICULTY)
# get the best fitness from the potential moves
bestMoveFitness = -1
for i in range(BOARDWIDTH):
if potentialMoves[i] > bestMoveFitness and isValidMove(board, i):
bestMoveFitness = potentialMoves[i]
# find all potential moves that have this best fitness
bestMoves = []
for i in range(len(potentialMoves)):
if potentialMoves[i] == bestMoveFitness and isValidMove(board, i):
bestMoves.append(i)
return random.choice(bestMoves)
def getPotentialMoves(board, tile, lookAhead):
if lookAhead == 0 or isBoardFull(board):
return [0] * BOARDWIDTH
if tile == RED:
enemyTile = BLACK
else:
enemyTile = RED
# Figure out the best move to make.
potentialMoves = [0] * BOARDWIDTH
for firstMove in range(BOARDWIDTH):
dupeBoard = copy.deepcopy(board)
if not isValidMove(dupeBoard, firstMove):
continue
makeMove(dupeBoard, tile, firstMove)
if isWinner(dupeBoard, tile):
# a winning move automatically gets a perfect fitness
potentialMoves[firstMove] = 1
break # don't bother calculating other moves
else:
# do other player's counter moves and determine best one
if isBoardFull(dupeBoard):
potentialMoves[firstMove] = 0
else:
for counterMove in range(BOARDWIDTH):
dupeBoard2 = copy.deepcopy(dupeBoard)
if not isValidMove(dupeBoard2, counterMove):
continue
makeMove(dupeBoard2, enemyTile, counterMove)
if isWinner(dupeBoard2, enemyTile):
# a losing move automatically gets the worst fitness
potentialMoves[firstMove] = -1
break
else:
# do the recursive call to getPotentialMoves()
results = getPotentialMoves(dupeBoard2, tile, lookAhead - 1)
potentialMoves[firstMove] += (sum(results) / BOARDWIDTH) / BOARDWIDTH
return potentialMoves
def getLowestEmptySpace(board, column):
# Return the row number of the lowest empty row in the given column.
for y in range(BOARDHEIGHT-1, -1, -1):
if board[column][y] == EMPTY:
return y
return -1
def isValidMove(board, column):
# Returns True if there is an empty space in the given column.
# Otherwise returns False.
if column < 0 or column >= (BOARDWIDTH) or board[column][0] != EMPTY:
return False
return True
def isBoardFull(board):
# Returns True if there are no empty spaces anywhere on the board.
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
if board[x][y] == EMPTY:
return False
return True
def isWinner(board, tile):
# check horizontal spaces
for x in range(BOARDWIDTH - 3):
for y in range(BOARDHEIGHT):
if board[x][y] == tile and board[x+1][y] == tile and board[x+2][y] == tile and board[x+3][y] == tile:
return True
# check vertical spaces
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT - 3):
if board[x][y] == tile and board[x][y+1] == tile and board[x][y+2] == tile and board[x][y+3] == tile:
return True
# check / diagonal spaces
for x in range(BOARDWIDTH - 3):
for y in range(3, BOARDHEIGHT):
if board[x][y] == tile and board[x+1][y-1] == tile and board[x+2][y-2] == tile and board[x+3][y-3] == tile:
return True
# check \ diagonal spaces
for x in range(BOARDWIDTH - 3):
for y in range(BOARDHEIGHT - 3):
if board[x][y] == tile and board[x+1][y+1] == tile and board[x+2][y+2] == tile and board[x+3][y+3] == tile:
return True
return False
if __name__ == '__main__':
main()
| mit | -5,542,342,291,221,955,000 | 35.033058 | 130 | 0.607034 | false | 3.640412 | false | false | false |
lgarren/spack | var/spack/repos/builtin/packages/r-yarn/package.py | 1 | 2615 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RYarn(RPackage):
"""Expedite large RNA-Seq analyses using a combination of previously
developed tools. YARN is meant to make it easier for the user in
performing basic mis-annotation quality control, filtering, and
condition-aware normalization. YARN leverages many Bioconductor tools
and statistical techniques to account for the large heterogeneity and
sparsity found in very large RNA-seq experiments."""
homepage = "http://www.example.co://www.bioconductor.org/packages/yarn/"
url = "https://git.bioconductor.org/packages/yarn"
list_url = homepage
version('1.2.0', git='https://git.bioconductor.org/packages/yarn', commit='28af616ef8c27dcadf6568e276dea8465486a697')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-biomart', type=('build', 'run'))
depends_on('r-downloader', type=('build', 'run'))
depends_on('r-edger', type=('build', 'run'))
depends_on('r-gplots', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-preprocesscore', type=('build', 'run'))
depends_on('r-readr', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-quantro', type=('build', 'run'))
depends_on('[email protected]:3.4.9', when='@1.2.0')
| lgpl-2.1 | -7,563,019,363,430,410,000 | 48.339623 | 121 | 0.665392 | false | 3.714489 | false | false | false |
cryptoquick/nanoblok | nbServer/main.py | 1 | 2599 | # !/usr/bin/env python
# Bulk of this document is based on code from here: http://code.google.com/appengine/articles/rpc.html
import os
import logging
from django.utils import simplejson
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext import db
# import zlib
class MainPage(webapp.RequestHandler):
"""Renders the main template."""
def get(self):
template_values = { }
self.response.headers['Content-Type'] = "application/xhtml+xml"
path = os.path.join(os.path.dirname(__file__), "index.xhtml")
self.response.out.write(template.render(path, template_values))
class SaveHandler(webapp.RequestHandler):
""" Allows the functions defined in the RPCMethods class to be RPCed."""
def __init__(self):
webapp.RequestHandler.__init__(self)
# self.methods = RPCMethods()
def post(self):
data = simplejson.loads(self.request.body)
length = self.request.headers['content-length']
# data = simplejson.loads(args)
# blockcountlen = len(data[2])
dbdata = BlokField(
author = data[0],
title = data[1],
field = simplejson.dumps(data[2]),
revision = float(data[3]),
blockcount = int(len(data[2])),
repo = data[4]
)
# ["CryptoQuick", "TestBlock", [[20, 12, 0, 0], [19, 11, 0, 0], [18, 11, 0, 0]], 0.01]
dbdata.put()
# if func[0] == '_':
# self.error(403) # access denied
# return
# func = getattr(self.methods, func, None)
# if not func:
# self.error(404) # file not found
# return
#
# result = func(*args)
# self.response.out.write(simplejson.dumps(result))
self.response.out.write(str(length) + ' bytes of data saved to the server.')
# class RPCMethods:
# """ Defines the methods that can be RPCed.
# NOTE: Do not allow remote callers access to private/protected "_*" methods.
# """
#
# def Save(self, *args):
# #
# #return len(args[0])
# return ''.join(args) + ' bytes of data saved to server.'
class BlokField(db.Model):
author = db.StringProperty(required=True)# db.UserProperty()
title = db.StringProperty(required=True)
field = db.StringProperty(required=True)
datetime = db.DateTimeProperty(required=True, auto_now_add=True)
revision = db.FloatProperty(required=True)
blockcount = db.IntegerProperty(required=True)
repo = db.StringProperty(required=True)
def main():
app = webapp.WSGIApplication([
('/', MainPage),
('/save', SaveHandler),
# ('/load', LoadHandler),
], debug=True)
util.run_wsgi_app(app)
if __name__ == '__main__':
main() | mit | 2,575,670,552,081,736,000 | 25.804124 | 102 | 0.672182 | false | 3.004624 | false | false | false |
taigaio/taiga-back | taiga/front/sitemaps/milestones.py | 1 | 2259 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db.models import Q
from django.apps import apps
from datetime import timedelta
from django.utils import timezone
from taiga.front.templatetags.functions import resolve
from .base import Sitemap
class MilestonesSitemap(Sitemap):
def items(self):
milestone_model = apps.get_model("milestones", "Milestone")
# Get US of public projects OR private projects if anon user can view them and us and tasks
queryset = milestone_model.objects.filter(Q(project__is_private=False) |
Q(project__is_private=True,
project__anon_permissions__contains=["view_milestones",
"view_us",
"view_tasks"]))
queryset = queryset.exclude(name="")
# Exclude blocked projects
queryset = queryset.filter(project__blocked_code__isnull=True)
# Project data is needed
queryset = queryset.select_related("project")
return queryset
def location(self, obj):
return resolve("taskboard", obj.project.slug, obj.slug)
def lastmod(self, obj):
return obj.modified_date
def changefreq(self, obj):
if (timezone.now() - obj.modified_date) > timedelta(days=90):
return "monthly"
return "weekly"
def priority(self, obj):
return 0.1
| agpl-3.0 | -6,595,990,861,714,533,000 | 36.65 | 107 | 0.618858 | false | 4.591463 | false | false | false |
GeoffEvans/aol_model | aol_model/ray.py | 1 | 2654 | from error_utils import check_is_unit_vector
from numpy import pi, array, dot, dtype, concatenate
from numpy.linalg import norm
class Ray(object):
"""A ray, free from paraxial assumptions. """
def __init__(self, position, wavevector_unit, wavelength, energy=1):
self.position = array(position, dtype=dtype(float))
self.wavevector_unit = array(wavevector_unit, dtype=dtype(float))
self.wavelength_vac = wavelength
self.energy = energy
@property
def wavevector_unit(self):
return self._wavevector_unit
@wavevector_unit.setter
def wavevector_unit(self, v):
check_is_unit_vector(v) # useful for error checking but slow!
self._wavevector_unit = array(v, dtype=dtype(float))
@property
def wavevector_vac_mag(self):
return 2 * pi / self.wavelength_vac
@wavevector_vac_mag.setter
def wavevector_vac_mag(self, v):
self.wavelength_vac = 2 * pi / v
@property
def wavevector_vac(self):
return self.wavevector_vac_mag * self.wavevector_unit
@wavevector_vac.setter
def wavevector_vac(self, v):
self.wavevector_vac_mag = norm(v)
self.wavevector_unit = array(v, dtype=dtype(float)) / self.wavevector_vac_mag
def propagate_free_space(self, distance):
self.position += self.wavevector_unit * distance
def propagate_to_plane(self, point_on_plane, normal_to_plane):
from_ray_to_point = point_on_plane - self.position
distance = dot(from_ray_to_point, normal_to_plane) / dot(self.wavevector_unit, normal_to_plane)
self.propagate_free_space(distance)
def propagate_from_plane_to_plane(self, plane_z_separation, normal_to_first, normal_to_second):
"""Move ray from one plane to the next. Useful for moving between AOD surfaces in AolFull. """
point_on_first_plane = self.position
z_displacement_from_point_to_origin = dot(point_on_first_plane[0:2], normal_to_first[0:2]) / normal_to_first[2]
displacement_from_point_to_origin = concatenate( (-point_on_first_plane[0:2], [z_displacement_from_point_to_origin]) )
# assumes all AODs are rotated about (x,y)=(0,0), in future would be faster and more realistic to use an AOD centre property
point_on_second_plane = point_on_first_plane + displacement_from_point_to_origin + [0,0,plane_z_separation]
self.propagate_to_plane(point_on_second_plane, normal_to_second)
def propagate_free_space_z(self, distance):
"""Move ray a given distance in the z-direction. Used only in AolSimple. """
self.propagate_to_plane(self.position + [0,0,distance], [0,0,1])
| gpl-3.0 | 3,437,786,645,152,071,700 | 46.392857 | 132 | 0.677845 | false | 3.3723 | false | false | false |
purcell/dbdoc | lib/dbdoc/props.py | 1 | 4784 | #!/usr/bin/env python
# -*- encoding: iso-8859-15 -*-
# Use of this software is subject to the terms specified in the LICENCE
# file included in the distribution package, and also available via
# https://github.com/purcell/dbdoc
#
#
# Provides support for reading and writing Java-style properties files
#
__author__ = 'Steve Purcell <stephen_purcell at yahoo dot com>'
__version__ = "$Revision: 1.2 $"[11:-2]
import UserDict, re, string, os
class Properties(UserDict.UserDict):
"""All-purpose wrapper for properties files. Handles most sanely-
formatted property entries. Does not support special characters in
property names, but does support them in values.
"""
PROPERTY_RE = re.compile(r'^\s*([\w\.\-]+)\s*=\s*(\"?)(.*)\2$')
ATTR_KEYS = () # keys that will look like instance attributes
# translations for \-escaped characters
_LOAD_TRANSLATIONS = {'=':'=', ':':':', ' ':' ', 't':'\t',
'r':'\r', 'n':'\n', 'f':'\f', '#':'#',
'!':'!', '\\':'\\'}
_SAVE_TRANSLATIONS = {}
for k, v in _LOAD_TRANSLATIONS.items():
_SAVE_TRANSLATIONS[v] = k
known_keys = {} # forward def to stop setattr and getattr complaining
def __init__(self):
self.data = {}
self.known_keys = {}
for key in self.ATTR_KEYS: self.known_keys[key] = 1
def save(self, stream):
items = self.items()
items.sort()
for key, value in items:
stream.write("%s=%s%s" % (key, self.escape_value(value), os.linesep))
def __getattr__(self, attr):
if self.known_keys.has_key(attr):
try:
return self[attr]
except KeyError:
pass
raise AttributeError, attr
def __setattr__(self, attr, value):
if self.known_keys.has_key(attr):
self[attr] = value
else:
self.__dict__[attr] = value
def unescape_value(self, value):
chars = []
i = 0
while i < len(value):
c = value[i]
if c == '\\':
i = i + 1
c = value[i]
replacement = self._LOAD_TRANSLATIONS.get(c, None)
if replacement:
chars.append(replacement)
i = i + 1
elif c == 'u':
code = value[i+1:i+5]
if len(code) != 4:
raise ValueError, "illegal unicode escape sequence"
chars.append(chr(string.atoi(code, 16)))
i = i + 5
else:
raise ValueError, "unknown escape \\%s" % c
else:
chars.append(c)
i = i + 1
return string.join(chars, '')
def escape_value(self, value):
chars = []
for c in value:
replacement = self._SAVE_TRANSLATIONS.get(c, None)
if replacement:
chars.append("\\%s" % replacement)
elif ord(c) < 0x20 or ord(c) > 0x7e:
chars.append("\\u%04X" % ord(c))
else:
chars.append(c)
return string.join(chars, '')
def load(self, stream):
while 1:
line = stream.readline()
if not line: break
m = self.PROPERTY_RE.match(line)
if m:
name, quote, value = m.groups()
self[name] = self.unescape_value(value)
##############################################################################
# A sprinkling of test code that runs when the module is imported or executed
##############################################################################
def test():
def checkmatch(regex, s, groups):
match = regex.match(s)
assert match, "failed on %s" % s
assert (match.groups() == groups), str(match.groups())
regex = Properties.PROPERTY_RE
checkmatch(regex, 'blah=foo\n', ('blah','','foo'))
checkmatch(regex, ' blah = "foo"\n', ('blah','"','foo'))
checkmatch(regex, ' blah = "foo "\n', ('blah','"','foo '))
checkmatch(regex, ' blah = "foo "\n', ('blah','"','foo '))
## Trailing comments are not legal
#checkmatch(regex, ' blah = "foo" # blah\n', ('blah','"','foo'))
#checkmatch(regex, ' blah = "fo\\"o" # blah\n', ('blah','"','fo\\"o'))
#checkmatch(regex, ' blah = fo\\"o # blah\n', ('blah','','fo\\"o'))
p = Properties()
from StringIO import StringIO
unquoted = '!"§$%&/()=?ßµ'
quoted = '\!"\u00A7$%&/()\=?\u00DF\u00B5'
i = StringIO('key=%s\n' % quoted)
p.load(i)
assert p['key'] == unquoted
o = StringIO()
p.save(o)
assert o.getvalue() == 'key=%s\n' % quoted
if __name__ == '__main__':
test()
| bsd-3-clause | 3,080,726,106,656,835,000 | 32.929078 | 81 | 0.49143 | false | 3.808917 | false | false | false |
python-control/python-control | control/tests/rlocus_test.py | 1 | 5533 | """rlocus_test.py - unit test for root locus diagrams
RMM, 1 Jul 2011
"""
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_array_almost_equal
import pytest
import control as ct
from control.rlocus import root_locus, _RLClickDispatcher
from control.xferfcn import TransferFunction
from control.statesp import StateSpace
from control.bdalg import feedback
@pytest.mark.usefixtures("mplcleanup")
class TestRootLocus:
"""These are tests for the feedback function in rlocus.py."""
@pytest.fixture(params=[pytest.param((sysclass, sargs + (dt, )),
id=f"{systypename}-{dtstring}")
for sysclass, systypename, sargs in [
(TransferFunction, 'TF', ([1, 2],
[1, 2, 3])),
(StateSpace, 'SS', ([[1., 4.], [3., 2.]],
[[1.], [-4.]],
[[1., 0.]],
[[0.]])),
]
for dt, dtstring in [(0, 'ctime'),
(True, 'dtime')]
])
def sys(self, request):
"""Return some simple LTI systems for testing"""
# avoid construction during collection time: prevent unfiltered
# deprecation warning
sysfn, args = request.param
return sysfn(*args)
def check_cl_poles(self, sys, pole_list, k_list):
for k, poles in zip(k_list, pole_list):
poles_expected = np.sort(feedback(sys, k).pole())
poles = np.sort(poles)
np.testing.assert_array_almost_equal(poles, poles_expected)
def testRootLocus(self, sys):
"""Basic root locus (no plot)"""
klist = [-1, 0, 1]
roots, k_out = root_locus(sys, klist, plot=False)
np.testing.assert_equal(len(roots), len(klist))
np.testing.assert_allclose(klist, k_out)
self.check_cl_poles(sys, roots, klist)
def test_without_gains(self, sys):
roots, kvect = root_locus(sys, plot=False)
self.check_cl_poles(sys, roots, kvect)
@pytest.mark.parametrize('grid', [None, True, False])
def test_root_locus_plot_grid(self, sys, grid):
rlist, klist = root_locus(sys, grid=grid)
ax = plt.gca()
n_gridlines = sum([int(line.get_linestyle() in [':', 'dotted',
'--', 'dashed'])
for line in ax.lines])
if grid is False:
assert n_gridlines == 2
else:
assert n_gridlines > 2
# TODO check validity of grid
def test_root_locus_warnings(self):
sys = TransferFunction([1000], [1, 25, 100, 0])
with pytest.warns(FutureWarning, match="Plot.*deprecated"):
rlist, klist = root_locus(sys, Plot=True)
with pytest.warns(FutureWarning, match="PrintGain.*deprecated"):
rlist, klist = root_locus(sys, PrintGain=True)
def test_root_locus_neg_false_gain_nonproper(self):
""" Non proper TranferFunction with negative gain: Not implemented"""
with pytest.raises(ValueError, match="with equal order"):
root_locus(TransferFunction([-1, 2], [1, 2]))
# TODO: cover and validate negative false_gain branch in _default_gains()
def test_root_locus_zoom(self):
"""Check the zooming functionality of the Root locus plot"""
system = TransferFunction([1000], [1, 25, 100, 0])
plt.figure()
root_locus(system)
fig = plt.gcf()
ax_rlocus = fig.axes[0]
event = type('test', (object,), {'xdata': 14.7607954359,
'ydata': -35.6171379864,
'inaxes': ax_rlocus.axes})()
ax_rlocus.set_xlim((-10.813628105112421, 14.760795435937652))
ax_rlocus.set_ylim((-35.61713798641108, 33.879716621220311))
plt.get_current_fig_manager().toolbar.mode = 'zoom rect'
_RLClickDispatcher(event, system, fig, ax_rlocus, '-')
zoom_x = ax_rlocus.lines[-2].get_data()[0][0:5]
zoom_y = ax_rlocus.lines[-2].get_data()[1][0:5]
zoom_y = [abs(y) for y in zoom_y]
zoom_x_valid = [
-5., - 4.61281263, - 4.16689986, - 4.04122642, - 3.90736502]
zoom_y_valid = [0., 0., 0., 0., 0.]
assert_array_almost_equal(zoom_x, zoom_x_valid)
assert_array_almost_equal(zoom_y, zoom_y_valid)
@pytest.mark.timeout(2)
def test_rlocus_default_wn(self):
"""Check that default wn calculation works properly"""
#
# System that triggers use of y-axis as basis for wn (for coverage)
#
# This system generates a root locus plot that used to cause the
# creation (and subsequent deletion) of a large number of natural
# frequency contours within the `_default_wn` function in `rlocus.py`.
# This unit test makes sure that is fixed by generating a test case
# that will take a long time to do the calculation (minutes).
#
import scipy as sp
import signal
# Define a system that exhibits this behavior
sys = ct.tf(*sp.signal.zpk2tf(
[-1e-2, 1-1e7j, 1+1e7j], [0, -1e7j, 1e7j], 1))
ct.root_locus(sys)
| bsd-3-clause | -4,547,476,951,982,118,400 | 40.291045 | 78 | 0.542021 | false | 3.708445 | true | false | false |
phasedchirp/Assorted-Data-Analysis | experiments/Poisson-bootstrap/online-bootstrap-stream.py | 2 | 1138 | from __future__ import division
import numpy as np
from numpy.random import rand, poisson
from sys import argv
# based on:
# Hanley & MacGibbon (2006) (http://www.ncbi.nlm.nih.gov/pubmed/16730851)
# and
# http://www.unofficialgoogledatascience.com/2015/08/an-introduction-to-poisson-bootstrap_26.html
# see also: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Weighted_incremental_algorithm
def increment(x,reps,ns,ms):
counts = poisson(1,reps)
temp = ns + counts
deltas = x - ms
Rs = [d*c / t if n > 0 else 0 for n,c,t,d in zip(ns,counts,temp,deltas)]
return (Rs,deltas,temp)
def onlineMeanVarBoot(xs,reps):
ns = np.zeros(reps,dtype=np.int)
ms = np.zeros(reps)
M2s = np.zeros(reps)
# for x in xs:
while xs:
Rs,deltas,temp = increment(x,reps,ns,ms)
ms += Rs
M2s += ns * deltas * Rs
ns = temp
if np.min(ns) < 2:
return np.nan
else:
return M2s / ns
if __name__== "__main__":
test = rand(500)
testBoot = onlineMeanVarBoot(test,4000)
print "numpy est: %s, boot est: %s" %(np.var(test),np.mean(testBoot))
| gpl-2.0 | -7,009,139,274,443,061,000 | 24.863636 | 108 | 0.635325 | false | 2.802956 | false | false | false |
perepa2/SQLToPython | json_scraping_sample.py | 5 | 1361 | import urllib2
import json
import MySQLdb
# Function to fetch json of reddit front page
def fetch():
link = "https://www.reddit.com/.json"
# Get the text version
text = urllib2.urlopen(link).read()
# Turn it into a dictionary
data = json.loads(text)
return data
# Returns a list of tuples of titles and links
def extract_links(data):
data = data["data"]['children']
output = []
for post in data:
link = post['data']['url']
title = post['data']['title']
output.append((title, link))
return output
# Puts the data into the MySQL database defined in tables.sql
def store(data):
host = "localhost"
user = "root"
passwd = "adsauiuc"
db = "adsa"
db = MySQLdb.connect(host=host, user=user, passwd=passwd, db=db)
# Creates a cursor that can execute SQL commands
cursor = db.cursor()
table = "reddit"
columns = "link, title"
for link in data:
sql = """ INSERT INTO reddit ( link, title ) VALUES ( %s, %s ) """
cursor.execute(sql, (link[0].encode("latin-1", "replace"), link[1].encode("latin-1", "replace")))
# Commit the changes only after all have succeeded without errors
db.commit()
# Always close the connection
db.close()
if __name__ == "__main__":
data = fetch()
links = extract_links(data)
store(links)
| mit | -7,401,714,130,701,518,000 | 24.203704 | 105 | 0.618663 | false | 3.678378 | false | false | false |
edesiocs/openduckbill | src/helper.py | 4 | 3110 | #!/usr/bin/python2.4
# Copyright 2008 Google Inc.
# Author : Anoop Chandran <[email protected]>
#
# openduckbill is a simple backup application. It offers support for
# transferring data to a local backup directory, NFS. It also provides
# file system monitoring of directories marked for backup. Please read
# the README file for more details.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Helper class, does command execution and returns value.
This class has the method RunCommandPopen which executes commands passed to
it and returns the status.
"""
import os
import subprocess
import sys
class CommandHelper:
"""Run command and return status, either using Popen or call
"""
def __init__(self, log_handle=''):
"""Initialise logging state
Logging enabled in debug mode.
Args:
log_handle: Object - a handle to the logging subsystem.
"""
self.logmsg = log_handle
if self.logmsg.debug:
self.stdout_debug = None
self.stderr_debug = None
else:
self.stdout_debug = 1
self.stderr_debug = 1
def RunCommandPopen(self, runcmd):
"""Uses subprocess.Popen to run the command.
Also prints the command output if being run in debug mode.
Args:
runcmd: List - path to executable and its arguments.
Retuns:
runretval: Integer - exit value of the command, after execution.
"""
stdout_val=self.stdout_debug
stderr_val=self.stderr_debug
if stdout_val:
stdout_l = file(os.devnull, 'w')
else:
stdout_l=subprocess.PIPE
if stderr_val:
stderr_l = file(os.devnull, 'w')
else:
stderr_l=subprocess.STDOUT
try:
run_proc = subprocess.Popen(runcmd, bufsize=0,
executable=None, stdin=None,
stdout=stdout_l, stderr=stderr_l)
if self.logmsg.debug:
output = run_proc.stdout
while 1:
line = output.readline()
if not line:
break
line = line.rstrip()
self.logmsg.logger.debug("Command output: %s" % line)
run_proc.wait()
runretval = run_proc.returncode
except OSError, e:
self.logmsg.logger.error('%s', e)
runretval = 1
except KeyboardInterrupt, e:
self.logmsg.logger.error('User interrupt')
sys.exit(1)
if stdout_l:
pass
#stderr_l.close()
if stderr_l:
pass
#stderr_l.close()
return runretval
| gpl-2.0 | -9,139,896,424,809,712,000 | 28.065421 | 81 | 0.665273 | false | 3.956743 | false | false | false |
adamdoupe/enemy-of-the-state | crawler2_test.py | 1 | 5278 | #!/usr/bin/env python
import crawler2 as crawler
import BaseHTTPServer
import logging
import os
import SimpleHTTPServer
import threading
import unittest
TEST_BASE_PATH = 'test/sites/'
LISTEN_ADDRESS = '127.0.0.1'
LISTEN_PORT = 4566
BASE_URL = 'http://%s:%d/test/sites/' % (LISTEN_ADDRESS, LISTEN_PORT)
EXT_LISTEN_ADDRESS = '127.0.0.1'
EXT_LISTEN_PORT = 80
EXT_BASE_URL = 'http://%s:%d/test/sites/' % (EXT_LISTEN_ADDRESS, EXT_LISTEN_PORT)
class LocalCrawlerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.server = BaseHTTPServer.HTTPServer(
(LISTEN_ADDRESS, LISTEN_PORT),
SimpleHTTPServer.SimpleHTTPRequestHandler)
cls.server_thread = threading.Thread(target=cls.server.serve_forever)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.server.shutdown()
def setUp(self):
self.ff = crawler.FormFiller()
self.e = crawler.Engine(self.ff, None)
def test_single_page(self):
url = BASE_URL + 'single/single.html'
e = self.e
e.main([url])
self.assertIsInstance(e.cr.headreqresp.response.page, crawler.Page)
self.assertEqual(len(e.cr.headreqresp.response.page.links), 1)
class ExtCrawlerTest(unittest.TestCase):
def setUp(self):
self.ff = crawler.FormFiller()
self.e = crawler.Engine(self.ff, None)
def test_single_page(self):
url = EXT_BASE_URL + 'single/single.html'
e = self.e
e.main([url])
self.assertIsInstance(e.cr.headreqresp.response.page, crawler.Page)
self.assertEqual(len(e.cr.headreqresp.response.page.links), 1)
def test_absolute_urls(self):
url = EXT_BASE_URL + 'absolute_urls/index.php'
e = self.e
e.main([url])
self.assertEqual(len(e.ag.absrequests), 2)
self.assertEqual(len(e.ag.abspages), 2)
urls = set(r.split('/')[-1] for ar in e.ag.absrequests for r in ar.requestset)
self.assertEqual(set(['link.php', 'index.php']), urls)
def test_simple(self):
# Truncate status files
fd = os.open(TEST_BASE_PATH + '/simple/pages.data',
os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.fchmod(fd, 0666)
os.close(fd)
fd = os.open(TEST_BASE_PATH + '/simple/pages.lock',
os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.fchmod(fd, 0666)
os.close(fd)
url = EXT_BASE_URL + 'simple/index.php'
e = self.e
e.main([url])
self.assertEqual(len(e.ag.absrequests), 4)
urls = set(r.split('/')[-1] for ar in e.ag.absrequests for r in ar.requestset)
for url in set(['viewpage.php?id=%d' % i for i in range(9)] + ['addpage.php', 'index.php', 'static.php']):
self.assertTrue(url in urls)
self.assertEqual(len(e.ag.abspages), 4)
self.assertTrue(e.ag.nstates == 2)
def test_500_error(self):
url = EXT_BASE_URL + '/500/index.php'
e = self.e
e.main([url])
def test_empty_form(self):
url = EXT_BASE_URL + '/form_empty_params/index.php'
e = self.e
e.main([url])
self.assertEqual(len(e.ag.absrequests), 3)
def test_wackopicko_form(self):
url = EXT_BASE_URL + '/wackopicko_registration_form/index.php'
e = self.e
e.main([url])
def test_changing_state(self):
os.chmod(TEST_BASE_PATH + '/changing_state', 0777)
try:
os.unlink(TEST_BASE_PATH + '/changing_state/.lock')
except OSError:
pass
try:
os.unlink(TEST_BASE_PATH + '/changing_state/a')
except OSError:
pass
url = EXT_BASE_URL + '/changing_state/index.php'
e = self.e
e.main([url])
self.assertEqual(len(e.ag.absrequests), 4)
urls = set(r.split('/')[-1] for ar in e.ag.absrequests for r in ar.requestset)
self.assertEqual(set(['a.php',
'b.php',
'index.php',
'changestate.php']),
urls)
self.assertEqual(len(e.ag.abspages), 4)
self.assertEqual(e.ag.nstates, 2)
def test_traps(self):
url = EXT_BASE_URL + '/traps/root.html'
e = self.e
e.main([url])
self.assertTrue(len(e.ag.absrequests) >= 12)
urls = set(r.split('/')[-1] for ar in e.ag.absrequests for r in ar.requestset)
want_to_see = set(['a.html',
'a1.html',
'a2.html',
'b.html',
'b1.html',
'dead1.html',
'dead2.html',
'private.php',
'root.html'] +
['trap.php?input=%d' % i for i in range(1, 19)] +
['trap2.php?input=%d' % i for i in range(1, 16)])
for url in want_to_see:
self.assertTrue(url in urls)
self.assertEqual(len(e.ag.abspages), 11)
self.assertEqual(e.ag.nstates, 1)
if __name__ == '__main__':
#logging.basicConfig(level=logging.DEBUG)
unittest.main()
| gpl-2.0 | 3,364,519,199,101,717,000 | 33.953642 | 114 | 0.546987 | false | 3.425049 | true | false | false |
willu47/pyrate | pyrate/repositories/file.py | 2 | 3184 | import os
import logging
import zipfile
import io
EXPORT_COMMANDS = [('status', 'report status of this repository.')]
def load(options, readonly=False):
assert 'path' in options
if 'extensions' in options:
allowed_extensions = options['extensions'].split(',')
else:
allowed_extensions = None
if 'recursive' in options:
recursive = bool(options['recursive'])
else:
recursive = True
if 'unzip' in options:
unzip = bool(options['unzip'])
else:
unzip = False
return FileRepository(options['path'], allowedExtensions=allowed_extensions,
recursive=recursive, unzip=unzip)
class FileRepository:
def __init__(self, path, allowedExtensions=None, recursive=True, unzip=False):
self.root = path
self.allowed_extensions = allowedExtensions
self.recursive = recursive
self.unzip = unzip
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
def status(self):
print("Folder at {}".format(self.root))
def iterfiles(self):
"""
Iterate files in this file repository. Returns a generator of 3-tuples,
containing a handle, filename and file extension of the current opened file.
"""
logging.debug("Iterating files in "+ self.root)
failed_files = []
for root, _, files in os.walk(self.root):
# iterate files, filtering only allowed extensions
for filename in files:
_, ext = os.path.splitext(filename)
if self.allowed_extensions == None or ext in self.allowed_extensions:
# hitting errors with decoding the data, iso-8859-1 seems to sort it
with open(os.path.join(root, filename), 'r', encoding='iso-8859-1') as fp:
yield (fp, filename, ext)
# zip file auto-extract
elif self.unzip and ext == '.zip':
try:
with zipfile.ZipFile(os.path.join(root, filename), 'r') as z:
for zname in z.namelist():
_, ext = os.path.splitext(zname)
if self.allowed_extensions == None or ext in self.allowed_extensions:
with z.open(zname, 'r') as fp:
# zipfile returns a binary file, so we require a
# TextIOWrapper to decode it
yield (io.TextIOWrapper(fp, encoding='iso-8859-1'), zname, ext)
except (zipfile.BadZipFile, RuntimeError) as error:
logging.warning("Unable to extract zip file %s: %s ", filename, error)
failed_files.append(filename)
# stop after first iteration if not recursive
if not self.recursive:
break
if len(failed_files) > 0:
logging.warning("Skipped %d files due to errors: %s", len(failed_files), repr(failed_files))
def close(self):
pass
| mit | -466,640,203,837,295,000 | 37.829268 | 104 | 0.549937 | false | 4.724036 | false | false | false |
MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/coordinates/reference.py | 1 | 7511 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import numpy as np
from MDAnalysisTests import datafiles
from MDAnalysisTests.datafiles import (PDB_small, PDB, LAMMPSdata,
LAMMPSdata2, LAMMPSdcd2,
LAMMPSdata_mini, PSF_TRICLINIC,
DCD_TRICLINIC, PSF_NAMD_TRICLINIC,
DCD_NAMD_TRICLINIC)
class RefAdKSmall(object):
"""Mixin class to provide comparison numbers.
Based on small PDB with AdK (:data:`PDB_small`).
.. Note::
All distances must be in ANGSTROEM as this is the MDAnalysis
default unit. All readers must return Angstroem by default.
"""
filename = datafiles.PDB_small
ref_coordinates = {
# G11:CA, copied frm adk_open.pdb
'A10CA': np.array([-1.198, 7.937, 22.654]),
}
ref_distances = {'endtoend': 11.016959}
ref_E151HA2_index = 2314
ref_n_atoms = 3341
ref_charmm_totalcharge = -4.0
ref_charmm_Hcharges = [0.33] + 203 * [0.31]
ref_charmm_ArgCAcharges = 13 * [0.07]
ref_charmm_ProNcharges = 10 * [-0.29]
ref_unitcell = np.array([80.017, 80.017, 80.017, 60., 60., 90.],
dtype=np.float32)
ref_volume = 0.0
class RefAdK(object):
"""Mixin class to provide comparison numbers.
Based on PDB/GRO with AdK in water + Na+ (:data:`PDB`).
.. Note::
All distances must be in ANGSTROEM as this is the MDAnalysis
default unit. All readers must return Angstroem by default.
"""
filename = datafiles.PDB
ref_coordinates = {
# Angstroem as MDAnalysis unit!!
'A10CA': np.array([62.97600174, 62.08800125, 20.2329998]),
}
ref_distances = {'endtoend': 9.3513174}
ref_E151HA2_index = 2314
ref_n_atoms = 47681
ref_Na_sel_size = 4
# CRYST1 80.017 80.017 80.017 60.00 60.00 90.00
ref_unitcell = np.array([80.017, 80.017, 80.017, 60., 60., 90.],
dtype=np.float32)
#ref_volume = 362270.0 # computed with Gromacs ## NOT EXACT!
ref_volume = 362269.520669292
class Ref2r9r(object):
"""Mixin class to provide comparison numbers.
Based on S6 helices of chimeric Kv channel
.. Note::
All distances must be in ANGSTROEM as this is the MDAnalysis
default unit. All readers must return Angstroem by default.
"""
ref_n_atoms = 1284
ref_sum_centre_of_geometry = -98.24146
ref_n_frames = 10
class RefACHE(object):
"""Mixin class to provide comparison numbers.
ACHE peptide
# COM check in VMD::
set p [atomselect top "not water"]
set total {0 0 0};
for {set i 0} {$i < 11} {incr i} {
$p frame $i; set total [vecadd $total [measure center $p]]}
puts [vecsum $total]
# 472.2592159509659
"""
ref_n_atoms = 252
ref_proteinatoms = ref_n_atoms
ref_sum_centre_of_geometry = 472.2592159509659 # 430.44807815551758
ref_n_frames = 11
ref_periodic = False
class RefCappedAla(object):
"""Mixin class to provide comparison numbers.
Capped Ala in water
# COM check in VMD (load trajectory as *AMBER with periodic box*!)::
set p [atomselect top "not water"]
set total {0 0 0};
for {set i 0} {$i < 11} {incr i} {
$p frame $i; set total [vecadd $total [measure center $p]]}
puts [vecsum $total]
# 686.276834487915
"""
ref_n_atoms = 5071
ref_proteinatoms = 22
ref_sum_centre_of_geometry = 686.276834487915
ref_n_frames = 11
ref_periodic = True
class RefVGV(object):
"""Mixin class to provide comparison numbers.
Computed from bala.trj::
w = MDAnalysis.Universe(PRMncdf, TRJncdf)
ref_n_atoms = len(w.atoms) ref_proteinatoms = len(w.select_atoms("protein"))
ref_sum_centre_of_geometry = np.sum([protein.center_of_geometry()
for ts in w.trajectory])
"""
topology = datafiles.PRMncdf
filename = datafiles.NCDF
ref_n_atoms = 2661
ref_proteinatoms = 50
ref_sum_centre_of_geometry = 1552.9125
ref_n_frames = 30
ref_periodic = True
class RefTZ2(object):
"""Reference values for the cpptraj testcase tz2.truncoct.nc
Used under the GPL v3.
"""
topology = datafiles.PRM7
filename = datafiles.NCDFtruncoct
ref_n_atoms = 5827
ref_proteinatoms = 217
ref_sum_centre_of_geometry = -68.575745
ref_n_frames = 10
ref_periodic = True
class RefTRZ(object):
# ref_coordinates = {}
# ref_distances = {'endtoend': }
ref_n_atoms = 8184
ref_dimensions = np.array([55.422830581665039, 55.422830581665039,
55.422830581665039, 90., 90., 90.],
dtype=np.float32)
ref_volume = 170241.762765
ref_n_frames = 6
ref_coordinates = np.array([72.3163681, -130.31130981, 19.97969055],
dtype=np.float32)
ref_velocities = np.array([[14.83297443, 18.02611542, 6.07733774]],
dtype=np.float32)
ref_delta = 0.001
ref_time = 0.01
ref_title = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678901234'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678901234')
class RefLAMMPSData(object):
filename = LAMMPSdata
n_atoms = 18364
pos_atom1 = np.array([11.89985657, 48.4455719, 19.09719849],
dtype=np.float32)
vel_atom1 = np.array([-0.005667593, 0.00791380978, -0.00300779533],
dtype=np.float32)
dimensions = np.array([55.42282867, 55.42282867, 55.42282867, 90., 90., 90.
],
dtype=np.float32)
class RefLAMMPSDataDCD(object):
format = "LAMMPS"
topology = LAMMPSdata2
trajectory = LAMMPSdcd2
n_atoms = 12421
n_frames = 5
dt = 0.5 # ps per frame
mean_dimensions = np.array(
[ 50.66186142, 47.18824387, 52.33762741,
90. , 90. , 90. ], dtype=np.float32)
class RefLAMMPSDataMini(object):
filename = LAMMPSdata_mini
n_atoms = 1
pos_atom1 = np.array([11.89985657, 48.4455719, 19.09719849],
dtype=np.float32)
vel_atom1 = np.array([-0.005667593, 0.00791380978, -0.00300779533],
dtype=np.float32)
dimensions = np.array([60., 50., 30., 90., 90., 90.], dtype=np.float32)
| gpl-2.0 | 8,833,107,045,643,184,000 | 31.799127 | 82 | 0.605379 | false | 3.116598 | false | false | false |
d4g33z/ruffus | ruffus/test/test_mkdir.py | 5 | 5084 | #!/usr/bin/env python
from __future__ import print_function
"""
test_mkdir.py
test product, combine, permute, combine_with_replacement
"""
import os
tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0]))
import os
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
from ruffus import pipeline_run, pipeline_printout, transform, split, mkdir, formatter, Pipeline
from ruffus.ruffus_utility import RUFFUS_HISTORY_FILE, CHECKSUM_FILE_TIMESTAMPS
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#
# imports
#
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
import unittest
import shutil
try:
from StringIO import StringIO
except:
from io import StringIO
import time
#sub-1s resolution in system?
#___________________________________________________________________________
#
# generate_initial_files1
#___________________________________________________________________________
@split(1, [tempdir + "/" + prefix + "_name.tmp1" for prefix in "abcd"])
def generate_initial_files1(in_name, out_names):
for on in out_names:
with open(on, 'w') as outfile:
pass
#___________________________________________________________________________
#
# test_product_task
#___________________________________________________________________________
@mkdir(tempdir + "/test1")
@mkdir(tempdir + "/test2")
@mkdir(generate_initial_files1, formatter(),
["{path[0]}/{basename[0]}.dir", 3, "{path[0]}/{basename[0]}.dir2"])
@transform( generate_initial_files1,
formatter(),
"{path[0]}/{basename[0]}.dir/{basename[0]}.tmp2")
def test_transform( infiles, outfile):
with open(outfile, "w") as p: pass
@mkdir(tempdir + "/test3")
@mkdir(generate_initial_files1, formatter(),
"{path[0]}/{basename[0]}.dir2")
def test_transform2():
print(" Loose cannon!", file=sys.stderr)
def cleanup_tmpdir():
os.system('rm -f %s %s' % (os.path.join(tempdir, '*'), RUFFUS_HISTORY_FILE))
class Testmkdir(unittest.TestCase):
def setUp(self):
try:
os.mkdir(tempdir)
except OSError:
pass
#___________________________________________________________________________
#
# test mkdir() pipeline_printout and pipeline_run
#___________________________________________________________________________
def test_mkdir_printout(self):
"""Input file exists, output doesn't exist"""
cleanup_tmpdir()
s = StringIO()
pipeline_printout(s, [test_transform, test_transform2], verbose=5, wrap_width = 10000, pipeline= "main")
#self.assertIn('Job needs update: Missing files '
# '[tmp_test_mkdir/a_name.tmp1, '
# 'tmp_test_mkdir/e_name.tmp1, '
# 'tmp_test_mkdir/h_name.tmp1, '
# 'tmp_test_mkdir/a_name.e_name.h_name.tmp2]', s.getvalue())
def test_mkdir_run(self):
"""Run mkdir"""
# output is up to date, but function body changed (e.g., source different)
cleanup_tmpdir()
pipeline_run([test_transform, test_transform2], verbose=0, multiprocess = 2, pipeline= "main")
def test_newstyle_mkdir_run(self):
test_pipeline = Pipeline("test")
test_pipeline.split(task_func = generate_initial_files1,
input = 1,
output = [tempdir + "/" + prefix + "_name.tmp1" for prefix in "abcd"])
test_pipeline.transform( task_func = test_transform,
input = generate_initial_files1,
filter = formatter(),
output = "{path[0]}/{basename[0]}.dir/{basename[0]}.tmp2")\
.mkdir(tempdir + "/test1")\
.mkdir(tempdir + "/test2")\
.mkdir(generate_initial_files1, formatter(),
["{path[0]}/{basename[0]}.dir", 3, "{path[0]}/{basename[0]}.dir2"])
test_pipeline.mkdir(test_transform2, tempdir + "/test3")\
.mkdir(generate_initial_files1, formatter(),
"{path[0]}/{basename[0]}.dir2")
cleanup_tmpdir()
pipeline_run([test_transform, test_transform2], verbose=0, multiprocess = 2, pipeline= "main")
#___________________________________________________________________________
#
# cleanup
#___________________________________________________________________________
def tearDown(self):
shutil.rmtree(tempdir)
#
# Necessary to protect the "entry point" of the program under windows.
# see: http://docs.python.org/library/multiprocessing.html#multiprocessing-programming
#
if __name__ == '__main__':
unittest.main()
| mit | -3,070,487,904,357,909,000 | 32.893333 | 112 | 0.534618 | false | 3.968774 | true | false | false |
clearlinux/clearstack | clearstack/tests/utils.py | 1 | 1242 | #
# Copyright (c) 2015 Intel Corporation
#
# Author: Julio Montes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import io
from clearstack import shell as clearstack_shell
def shell(argstr):
orig = sys.stdout
clean_env = {}
_old_env, os.environ = os.environ, clean_env.copy()
try:
sys.stdout = io.StringIO()
_shell = clearstack_shell.ClearstackConfiguratorShell()
_shell.main(argstr.split())
except SystemExit:
exc_type, exc_value, exc_traceback = sys.exc_info()
assert exc_value, 0
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = orig
os.environ = _old_env
return out
| apache-2.0 | 7,482,850,741,262,786,000 | 28.571429 | 74 | 0.688406 | false | 3.752266 | false | false | false |
jrichte43/ProjectEuler | Problem-0110/solutions.py | 1 | 1200 |
__problem_title__ = "Diophantine reciprocals II"
__problem_url___ = "https://projecteuler.net/problem=110"
__problem_description__ = "In the following equation , , and are positive integers. It can be " \
"verified that when = 1260 there are 113 distinct solutions and this " \
"is the least value of for which the total number of distinct " \
"solutions exceeds one hundred. What is the least value of for which " \
"the number of distinct solutions exceeds four million? NOTE: This " \
"problem is a much more difficult version of and as it is well beyond " \
"the limitations of a brute force approach it requires a clever " \
"implementation."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
| gpl-3.0 | 3,951,135,130,805,127,000 | 36.5 | 99 | 0.579167 | false | 4.494382 | false | false | false |
13steinj/praw | tests/unit/models/reddit/test_subreddit.py | 1 | 6055 | import pickle
import pytest
from praw.models import Subreddit, WikiPage
from ... import UnitTest
class TestSubreddit(UnitTest):
def test_equality(self):
subreddit1 = Subreddit(self.reddit,
_data={'display_name': 'dummy1', 'n': 1})
subreddit2 = Subreddit(self.reddit,
_data={'display_name': 'Dummy1', 'n': 2})
subreddit3 = Subreddit(self.reddit,
_data={'display_name': 'dummy3', 'n': 2})
assert subreddit1 == subreddit1
assert subreddit2 == subreddit2
assert subreddit3 == subreddit3
assert subreddit1 == subreddit2
assert subreddit2 != subreddit3
assert subreddit1 != subreddit3
assert 'dummy1' == subreddit1
assert subreddit2 == 'dummy1'
def test_construct_failure(self):
message = 'Either `display_name` or `_data` must be provided.'
with pytest.raises(TypeError) as excinfo:
Subreddit(self.reddit)
assert str(excinfo.value) == message
with pytest.raises(TypeError) as excinfo:
Subreddit(self.reddit, 'dummy', {'id': 'dummy'})
assert str(excinfo.value) == message
def test_fullname(self):
subreddit = Subreddit(self.reddit, _data={'display_name': 'name',
'id': 'dummy'})
assert subreddit.fullname == 't5_dummy'
def test_hash(self):
subreddit1 = Subreddit(self.reddit,
_data={'display_name': 'dummy1', 'n': 1})
subreddit2 = Subreddit(self.reddit,
_data={'display_name': 'Dummy1', 'n': 2})
subreddit3 = Subreddit(self.reddit,
_data={'display_name': 'dummy3', 'n': 2})
assert hash(subreddit1) == hash(subreddit1)
assert hash(subreddit2) == hash(subreddit2)
assert hash(subreddit3) == hash(subreddit3)
assert hash(subreddit1) == hash(subreddit2)
assert hash(subreddit2) != hash(subreddit3)
assert hash(subreddit1) != hash(subreddit3)
def test_pickle(self):
subreddit = Subreddit(self.reddit, _data={'display_name': 'name',
'id': 'dummy'})
for level in range(pickle.HIGHEST_PROTOCOL + 1):
other = pickle.loads(pickle.dumps(subreddit, protocol=level))
assert subreddit == other
def test_repr(self):
subreddit = Subreddit(self.reddit, display_name='name')
assert repr(subreddit) == 'Subreddit(display_name=\'name\')'
def test_search__params_not_modified(self):
params = {'dummy': 'value'}
subreddit = Subreddit(self.reddit, display_name='name')
generator = subreddit.search(None, params=params)
assert generator.params['dummy'] == 'value'
assert params == {'dummy': 'value'}
def test_str(self):
subreddit = Subreddit(self.reddit, _data={'display_name': 'name',
'id': 'dummy'})
assert str(subreddit) == 'name'
def test_submit_failure(self):
message = 'Either `selftext` or `url` must be provided.'
subreddit = Subreddit(self.reddit, display_name='name')
with pytest.raises(TypeError) as excinfo:
subreddit.submit('Cool title')
assert str(excinfo.value) == message
with pytest.raises(TypeError) as excinfo:
subreddit.submit('Cool title', selftext='a', url='b')
assert str(excinfo.value) == message
class TestSubredditFlairTemplates(UnitTest):
def test_bad_add(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
with pytest.raises(TypeError):
subreddit.flair.templates.add('impossible', css_class='conflict',
background_color='#ABCDEF')
with pytest.raises(TypeError):
subreddit.flair.templates.add('impossible', css_class='conflict',
mod_only=False)
with pytest.raises(TypeError):
subreddit.flair.templates.add('impossible', css_class='conflict',
text_color='dark')
with pytest.raises(TypeError):
subreddit.flair.templates.add('impossible', css_class='conflict',
background_color='#ABCDEF',
mod_only=False, text_color='dark')
class TestSubredditLinkFlairTemplates(UnitTest):
def test_bad_add(self):
subreddit = self.reddit.subreddit(pytest.placeholders.test_subreddit)
with pytest.raises(TypeError):
subreddit.flair.link_templates.add('impossible',
css_class='conflict',
background_color='#ABCDEF')
with pytest.raises(TypeError):
subreddit.flair.link_templates.add('impossible',
css_class='conflict',
mod_only=False)
with pytest.raises(TypeError):
subreddit.flair.link_templates.add('impossible',
css_class='conflict',
text_color='dark')
with pytest.raises(TypeError):
subreddit.flair.link_templates.add('impossible',
css_class='conflict',
background_color='#ABCDEF',
mod_only=False,
text_color='dark')
class TestSubredditWiki(UnitTest):
def test__getitem(self):
subreddit = Subreddit(self.reddit, display_name='name')
wikipage = subreddit.wiki['Foo']
assert isinstance(wikipage, WikiPage)
assert 'foo' == wikipage.name
| bsd-2-clause | -1,786,816,192,770,171,000 | 43.19708 | 77 | 0.539884 | false | 4.43265 | true | false | false |
chrta/canfestival-3-ct | objdictgen/doc_index/DS301_index.py | 14 | 2772 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,wx
DS301_PDF_INDEX = {0x1000: 86, 0x1001: 87, 0x1002: 87, 0x1003: 88, 0x1005: 89, 0x1006: 90, 0x1007: 90, 0x1008: 91, 0x1009: 91, 0x100A: 91, 0x100C: 92, 0x100D: 92, 0x1010: 92, 0x1011: 94, 0x1012: 97, 0x1013: 98, 0x1014: 98, 0x1015: 99, 0x1016: 100, 0x1017: 101, 0x1018: 101, 0x1020: 117, 0x1200: 103, 0x1201: 103, 0x1280: 105, 0x1400: 106, 0x1600: 109, 0x1800: 111, 0x1A00: 112}
def get_acroversion():
" Return version of Adobe Acrobat executable or None"
import _winreg
adobesoft = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, r'Software\Adobe')
for index in range(_winreg.QueryInfoKey(adobesoft)[0]):
key = _winreg.EnumKey(adobesoft, index)
if "acrobat" in key.lower():
acrokey = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'Software\\Adobe\\%s' % key)
for index in range(_winreg.QueryInfoKey(acrokey)[0]):
numver = _winreg.EnumKey(acrokey, index)
try:
res = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, 'Software\\Adobe\\%s\\%s\\InstallPath' % (key, numver))
return res
except:
pass
return None
def OpenPDFDocIndex(index, cwd):
if not os.path.isfile(os.path.join(cwd, "doc","301_v04000201.pdf")):
return _("""No documentation file available.
Please read can festival documentation to know how to obtain one.""")
try:
if index in DS301_PDF_INDEX:
if wx.Platform == '__WXMSW__':
readerpath = get_acroversion()
readerexepath = os.path.join(readerpath,"AcroRd32.exe")
if(os.path.isfile(readerexepath)):
os.spawnl(os.P_DETACH, readerexepath, "AcroRd32.exe", "/A", "page=%d=OpenActions" % DS301_PDF_INDEX[index], '"%s"'%os.path.join(cwd, "doc","301_v04000201.pdf"))
else:
os.system("xpdf -remote DS301 %s %d &"%(os.path.join(cwd, "doc","301_v04000201.pdf"), DS301_PDF_INDEX[index]))
else:
if wx.Platform == '__WXMSW__':
readerpath = get_acroversion()
readerexepath = os.path.join(readerpath,"AcroRd32.exe")
if(os.path.isfile(readerexepath)):
os.spawnl(os.P_DETACH, readerexepath, "AcroRd32.exe", '"%s"'%os.path.join(cwd, "doc","301_v04000201.pdf"))
else:
os.system("xpdf -remote DS301 %s &"%os.path.join(cwd, "doc","301_v04000201.pdf"))
return True
except:
if wx.Platform == '__WXMSW__':
return _("Check if Acrobat Reader is correctly installed on your computer")
else:
return _("Check if xpdf is correctly installed on your computer")
| lgpl-2.1 | -7,061,637,513,131,406,000 | 52.326923 | 377 | 0.589105 | false | 3 | false | false | false |
bjsmith/reversallearning | negative-affect/pain_regression.py | 1 | 1787 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 19 17:53:46 2017
@author: benjaminsmith
"""
#do the regression in the
#we need a design matrix
#with linear, square, cubic time point regressors
#plus intercept
#plus whatever Design_Matrix files we want to put in.
onsets_convolved.head()
onsets_convolved['linearterm']=range(1,361)
onsets_convolved['quadraticterm']=[pow(x,2) for x in onsets_convolved['linearterm']]
onsets_convolved['cubicterm']=[pow(x,3) for x in onsets_convolved['linearterm']]
onsets_convolved['ones']=[1]*360
c.head()
onsets_convolved.heatmap()
onsets_convolved
#add in the Design Matrix
msmrl1.X=onsets_convolved#=pd.DataFrame([msmrl1.X,onsets_convolved])
#msmrl1.X=pd.DataFrame(msmrl1.X)
#msmrl1.X
regression=msmrl1.regress()
msm_predicted_pain=regression['t'].similarity(stats['weight_map'],'correlation')
for brainimg in regression['t']:
plotBrain(brainimg)
regression['t'].shape()
plotBrain(regression['t'][1,])
onsets_convolved.head()
plotBrain(regression['t'][1,])
plotBrain(regression['t'][9,])
plotBrain(regression['t'][13,])
#regress out the linear trends
#then dot product with the pain map
#
msm_predicted_pain=regression['beta'].similarity(stats['weight_map'],'dot_product')
plt(msm_predicted_pain)
np.shape(msm_predicted_pain)
#raw data.
msm_predicted_pain=msmrl1.similarity(stats['weight_map'],'dot_product')
onsets_convolved.columns.tolist()
ggplot(
pd.DataFrame(data={
'PainByBeta':msm_predicted_pain[0:9],
'RegressionBetaNum':range(0,9)
}),
aes('RegressionBetaNum','PainByBeta')) +\
geom_line() +\
stat_smooth(colour='blue', span=0.2)+ \
scale_x_continuous(breaks=[1,2,3], \
labels=["horrible", "ok", "awesome"])
| apache-2.0 | -1,746,152,898,568,344,800 | 25.671642 | 84 | 0.691662 | false | 2.845541 | false | false | false |
marcus0x62/cloudha | common.py | 1 | 1658 | #!/usr/bin/env python
#
# common.py -- Methods used by the cloud-ha scripts.
# Created: Marcus Butler, 05-April-2017.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boto3 import client
from botocore.exceptions import ClientError
import sys
import json
def get_rtb_assoc(subnet):
ec2 = client('ec2')
res = ec2.describe_route_tables()
for table in res['RouteTables']:
for assoc in table['Associations']:
if assoc.has_key('SubnetId') and assoc['SubnetId'] == subnet:
return assoc['RouteTableAssociationId']
return None
def change_rtb(old_assoc, rtb):
ec2 = client('ec2')
res = ec2.replace_route_table_association(AssociationId = old_assoc,
RouteTableId = rtb)
return True
def get_config(bucket, file):
s3 = client('s3')
obj = s3.get_object(Bucket=bucket, Key=file)
dict = json.loads(obj['Body'].read())
return dict
def fatal_error(errmsg):
return {
'statusCode': 500,
'headers': { 'Content-Type': 'application/json' },
'body': json.dumps({'errorMessage': errmsg})
}
| apache-2.0 | 919,106,449,813,804,400 | 29.145455 | 75 | 0.656815 | false | 3.717489 | false | false | false |
sauloaldocker/sqlitevcf | restless.py | 1 | 3409 | #!/usr/bin/python
import os
import sys
from collections import defaultdict
import flask
import flask.ext.sqlalchemy
import flask.ext.restless
from flask.ext.cors import CORS
from flask import jsonify
sys.path.insert(0, '.')
import database
def add_cors_headers(response):
response.headers['Access-Control-Allow-Origin' ] = '*'
#response.headers['Access-Control-Allow-Credentials'] = 'true'
# Set whatever other headers you like...
return response
#https://flask-restless.readthedocs.org/en/latest/quickstart.html
# Create the Flask application and the Flask-SQLAlchemy object.
app = flask.Flask(__name__)
db_name = 'sqlite:///' + os.path.abspath( sys.argv[1] )
print db_name
app.config['DEBUG' ] = True
app.config['SQLALCHEMY_DATABASE_URI'] = db_name
app.config['SERVER_PORT' ] = 5000
cors = CORS(app, resources={r"/*": {"origins": "*"}},
headers="Content-Type")
app.secret_key = 's3cr3tkeyverysecret'
db = flask.ext.sqlalchemy.SQLAlchemy(app)
# Create the Flask-Restless API manager.
manager = flask.ext.restless.APIManager(app, flask_sqlalchemy_db=db)
# Create API endpoints, which will be available at /api/<tablename> by
# default. Allowed HTTP methods can be specified as well.
databases_meta = defaultdict(list)
for dbn in database.dbs:
print "exporting", dbn
#manager.create_api(dbn, methods=['GET'], allow_functions=True, collection_name=dbn.__tablename__)
manager.create_api(dbn, methods=['GET'], allow_functions=True)
if len( databases_meta ) == 0:
tables = dbn.metadata.sorted_tables
for table in tables:
#print t
for column in table.c:
#print c
#print dir(c)
#print " anon_label ", column.anon_label
#print " base_columns", column.base_columns
#print " desc ", column.desc
#print " description ", column.description
#print " info ", column.info
#print " key ", column.key
#print " label ", column.label
#print " name ", column.name
#print " table ", column.table
#print " type ", column.type
#name format_str
#table format_col
#type VARCHAR
databases_meta[ str(column.table) ].append( ( column.name, str(column.type) ) )
#print dir(t)
#print "columns", table.columns
#print "desc ", table.description
#print "info ", table.info
#print "meta ", table.metadata
#print "named ", table.named_with_column
#print "schema ", table.schema
#databases_met[ t ] =
#break
#databases_met[0] = 1
#print "metadata ", databases_meta
@app.route('/')
def hello_world():
hlp = {
'/' : 'help',
'/metas' : 'all tables meta data',
'/meta' : 'list of tables',
'/meta/<CHROM>' : 'metadata of table <CHROM>',
'/api/<TABLE>' : 'api for table <TABLE>'
}
return jsonify( { 'help': hlp } )
@app.route('/metas')
def metas():
return jsonify( { 'metas': databases_meta } )
@app.route('/meta')
def meta():
names = databases_meta.keys()
names.sort()
print names
return jsonify( { 'meta': names } )
@app.route('/meta/<table>')
def meta_table(table):
if table not in databases_meta:
flask.abort(404)
dbn_meta = databases_meta[ table ]
return jsonify({ 'meta': dbn_meta })
# start the flask loop
app.run(host='0.0.0.0', debug=True, port=5000)
| mit | -964,582,937,152,875,000 | 28.387931 | 99 | 0.630683 | false | 3.277885 | false | false | false |
capaulson/dronecontrol | GDP_Code/ArduParam.py | 1 | 2261 | # Import necessary modules
from droneapi.lib import VehicleMode
# Assume we are already connected to a vehicle (at the highest
# level) and this has been assigned to __main__.v
from __main__ import v
def FetchParam(par):
global v
"""
Take a list of parameters (par) and returns a corresponding
list of values from the vehicle."""
v.flush() # just an extra one for luck
val = par[:] # create list for values
for (i,p_) in enumerate(par):
if p_=='MODE':
# Treat modes slightly differently
val[i] = v.mode.name
else: val[i] = v.parameters[p_]
return val
def SetMode(mode):
global v
"""
Sets a new mode for the vehicle (e.g. MANUAL, AUTO, RTL).
Function returns nothing."""
v.mode = VehicleMode(mode)
# Assumed that v.flush is run SetParam()
#v.flush()
pass
def SetParam(par,val):
global v
"""
Sets a list of parameters (par) to a corresponding list of
new values (val). Function returns nothing."""
for (p_,v_) in zip(par,val):
if p_=='MODE':
# Treat modes slightly differently
SetMode(v_)
else:
v.parameters[p_] = v_
v.flush() # param changes SHOULD be guaranteed from now
pass
def ChangeParam(par,val,checks=3):
"""
Change a list of parameters (par) to a corresponing list
of new values (val). The parameter is then checked to
ensure it has changed using CheckParam().
Function returns 'True' if successful, otherwise returns
a list of unset parameters."""
SetParam(par,val)
check = CheckParam(par,val)
ci=0
while (check!=True) and (ci<checks):
ci+=1
v.flush()
check = CheckParam(check[0],check[1])
if check!=True:
print("Detected non-matching params: %s" %check[0])
return check
return True
def CheckParam(par,val):
"""
Checks a list of parameters (par) are set to a list of
values (val)."""
valC = FetchParam(par) # load Current parameter values
parW = [] # list of params not correct
valW = [] # list of values to be corrected
# Iterate through each parameter, checking they have been
# changed correctly.
for (p_,v_,vC_) in zip(par,val,valC):
if p_!='MODE' and v_!=vC_: # skips mode changes
parW.append(p_)
valW.append(v_)
# Return unchanged params or True
if len(parW) > 0:
return [parW, valW]
return True # Everything okay
| gpl-2.0 | 8,838,233,716,377,489,000 | 24.693182 | 62 | 0.676692 | false | 3.038978 | false | false | false |
prasanthj/jmeter-hiveserver2 | scripts/counter-diff.py | 1 | 6090 | import imp, json, os, shutil, sys, tempfile, zipfile
import imp
try:
imp.find_module('texttable')
from texttable import Texttable
except ImportError:
sys.stderr.write("Could not import Texttable\nRetry after 'pip install texttable'\n")
exit()
tmpdir = tempfile.mkdtemp()
def extract_zip(filename):
file_dir = os.path.join(tmpdir, os.path.splitext(filename)[0])
if not os.path.exists(file_dir):
os.makedirs(file_dir)
zip_ref = zipfile.ZipFile(os.path.abspath(filename), 'r')
zip_ref.extractall(os.path.abspath(file_dir))
zip_ref.close()
return file_dir
def diff(file1, file2):
# extract ZIP files
file1_dir = extract_zip(file1)
file2_dir = extract_zip(file2)
# tez debugtool writes json data to TEZ_DAG file whereas tez UI writes to dag.json
# also in dag.json data is inside "dag" root node
file1_using_dag_json = True
dag_json_file1 = os.path.join(file1_dir, "dag.json")
if os.path.isfile(dag_json_file1) == False:
file1_using_dag_json = False
dag_json_file1 = os.path.join(file1_dir, "TEZ_DAG")
if os.path.isfile(dag_json_file1) == False:
print "Unable to find dag.json/TEZ_DAG file inside the archive " + file1
exit()
file2_using_dag_json = True
dag_json_file2 = os.path.join(file2_dir, "dag.json")
if os.path.isfile(dag_json_file2) == False:
file2_using_dag_json = False
dag_json_file2 = os.path.join(file2_dir, "TEZ_DAG")
if os.path.isfile(dag_json_file2) == False:
print "Unable to find dag.json/TEZ_DAG file inside the archive " + file1
exit()
# populate diff table
difftable = {}
with open(dag_json_file1) as data_file:
file1_dag_json = json.load(data_file)["dag"] if file1_using_dag_json else json.load(data_file)
counters = file1_dag_json['otherinfo']['counters']
for group in counters['counterGroups']:
countertable = {}
for counter in group['counters']:
counterName = counter['counterName']
countertable[counterName] = []
countertable[counterName].append(counter['counterValue'])
groupName = group['counterGroupName']
difftable[groupName] = countertable
# add other info
otherinfo = file1_dag_json['otherinfo']
countertable = {}
countertable["TIME_TAKEN"] = [otherinfo['timeTaken']]
countertable["COMPLETED_TASKS"] = [otherinfo['numCompletedTasks']]
countertable["SUCCEEDED_TASKS"] = [otherinfo['numSucceededTasks']]
countertable["FAILED_TASKS"] = [otherinfo['numFailedTasks']]
countertable["KILLED_TASKS"] = [otherinfo['numKilledTasks']]
countertable["FAILED_TASK_ATTEMPTS"] = [otherinfo['numFailedTaskAttempts']]
countertable["KILLED_TASK_ATTEMPTS"] = [otherinfo['numKilledTaskAttempts']]
difftable['otherinfo'] = countertable
with open(dag_json_file2) as data_file:
file2_dag_json = json.load(data_file)["dag"] if file2_using_dag_json else json.load(data_file)
counters = file2_dag_json['otherinfo']['counters']
for group in counters['counterGroups']:
groupName = group['counterGroupName']
if groupName not in difftable:
difftable[groupName] = {}
countertable = difftable[groupName]
for counter in group['counters']:
counterName = counter['counterName']
# if counter does not exist in file1, add it with 0 value
if counterName not in countertable:
countertable[counterName] = [0]
countertable[counterName].append(counter['counterValue'])
# append other info
otherinfo = file2_dag_json['otherinfo']
countertable = difftable['otherinfo']
countertable["TIME_TAKEN"].append(otherinfo['timeTaken'])
countertable["COMPLETED_TASKS"].append(otherinfo['numCompletedTasks'])
countertable["SUCCEEDED_TASKS"].append(otherinfo['numSucceededTasks'])
countertable["FAILED_TASKS"].append(otherinfo['numFailedTasks'])
countertable["KILLED_TASKS"].append(otherinfo['numKilledTasks'])
countertable["FAILED_TASK_ATTEMPTS"].append(otherinfo['numFailedTaskAttempts'])
countertable["KILLED_TASK_ATTEMPTS"].append(otherinfo['numKilledTaskAttempts'])
difftable['otherinfo'] = countertable
# if some counters are missing, consider it as 0 and compute delta difference
for k,v in difftable.items():
for key, value in v.items():
# if counter value does not exisit in file2, add it with 0 value
if len(value) == 1:
value.append(0)
# store delta difference
delta = value[1] - value[0]
value.append(("+" if delta > 0 else "") + str(delta))
return difftable
def print_table(difftable, name1, name2, detailed=False):
table = Texttable(max_width=0)
table.set_cols_align(["l", "l", "l", "l", "l"])
table.set_cols_valign(["m", "m", "m", "m", "m"])
table.add_row(["Counter Group", "Counter Name", name1, name2, "delta"]);
for k in sorted(difftable):
# ignore task specific counters in default output
if not detailed and ("_INPUT_" in k or "_OUTPUT_" in k):
continue
v = difftable[k]
row = []
# counter group. using shortname here instead of FQCN
if detailed:
row.append(k)
else:
row.append(k.split(".")[-1])
# keys as list (counter names)
row.append("\n".join(list(v.keys())))
# counter values for dag1
for key, value in v.items():
if len(value) == 1:
value.append(0)
value.append(value[0] - value[1])
# dag1 counter values
name1Val = []
for key, value in v.items():
name1Val.append(str(value[0]))
row.append("\n".join(name1Val))
# dag2 counter values
name2Val = []
for key, value in v.items():
name2Val.append(str(value[1]))
row.append("\n".join(name2Val))
# delta values
deltaVal = []
for key, value in v.items():
deltaVal.append(str(value[2]))
row.append("\n".join(deltaVal))
table.add_row(row)
print table.draw() + "\n"
def main(argv):
sysargs = len(argv)
if sysargs < 2:
print "Usage: python counter-diff.py dag_file1.zip dag_file2.zip [--detail]"
return -1
file1 = argv[0]
file2 = argv[1]
difftable = diff(file1, file2)
detailed = False
if sysargs == 3 and argv[2] == "--detail":
detailed = True
print_table(difftable, os.path.splitext(file1)[0], os.path.splitext(file2)[0], detailed)
if __name__ == "__main__":
try:
sys.exit(main(sys.argv[1:]))
finally:
shutil.rmtree(tmpdir) | apache-2.0 | -6,137,608,370,762,285,000 | 31.924324 | 96 | 0.69491 | false | 2.950581 | false | false | false |
airbnb/superset | superset/charts/dao.py | 1 | 2302 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import List, Optional, TYPE_CHECKING
from sqlalchemy.exc import SQLAlchemyError
from superset.charts.filters import ChartFilter
from superset.dao.base import BaseDAO
from superset.extensions import db
from superset.models.slice import Slice
if TYPE_CHECKING:
from superset.connectors.base.models import BaseDatasource
logger = logging.getLogger(__name__)
class ChartDAO(BaseDAO):
model_cls = Slice
base_filter = ChartFilter
@staticmethod
def bulk_delete(models: Optional[List[Slice]], commit: bool = True) -> None:
item_ids = [model.id for model in models] if models else []
# bulk delete, first delete related data
if models:
for model in models:
model.owners = []
model.dashboards = []
db.session.merge(model)
# bulk delete itself
try:
db.session.query(Slice).filter(Slice.id.in_(item_ids)).delete(
synchronize_session="fetch"
)
if commit:
db.session.commit()
except SQLAlchemyError as ex:
if commit:
db.session.rollback()
raise ex
@staticmethod
def save(slc: Slice, commit: bool = True) -> None:
db.session.add(slc)
if commit:
db.session.commit()
@staticmethod
def overwrite(slc: Slice, commit: bool = True) -> None:
db.session.merge(slc)
if commit:
db.session.commit()
| apache-2.0 | 8,217,947,686,256,482,000 | 32.852941 | 80 | 0.665943 | false | 4.247232 | false | false | false |
keras-team/keras-io | examples/rl/ddpg_pendulum.py | 1 | 13950 | """
Title: Deep Deterministic Policy Gradient (DDPG)
Author: [amifunny](https://github.com/amifunny)
Date created: 2020/06/04
Last modified: 2020/09/21
Description: Implementing DDPG algorithm on the Inverted Pendulum Problem.
"""
"""
## Introduction
**Deep Deterministic Policy Gradient (DDPG)** is a model-free off-policy algorithm for
learning continous actions.
It combines ideas from DPG (Deterministic Policy Gradient) and DQN (Deep Q-Network).
It uses Experience Replay and slow-learning target networks from DQN, and it is based on
DPG,
which can operate over continuous action spaces.
This tutorial closely follow this paper -
[Continuous control with deep reinforcement learning](https://arxiv.org/pdf/1509.02971.pdf)
## Problem
We are trying to solve the classic **Inverted Pendulum** control problem.
In this setting, we can take only two actions: swing left or swing right.
What make this problem challenging for Q-Learning Algorithms is that actions
are **continuous** instead of being **discrete**. That is, instead of using two
discrete actions like `-1` or `+1`, we have to select from infinite actions
ranging from `-2` to `+2`.
## Quick theory
Just like the Actor-Critic method, we have two networks:
1. Actor - It proposes an action given a state.
2. Critic - It predicts if the action is good (positive value) or bad (negative value)
given a state and an action.
DDPG uses two more techniques not present in the original DQN:
**First, it uses two Target networks.**
**Why?** Because it add stability to training. In short, we are learning from estimated
targets and Target networks are updated slowly, hence keeping our estimated targets
stable.
Conceptually, this is like saying, "I have an idea of how to play this well,
I'm going to try it out for a bit until I find something better",
as opposed to saying "I'm going to re-learn how to play this entire game after every
move".
See this [StackOverflow answer](https://stackoverflow.com/a/54238556/13475679).
**Second, it uses Experience Replay.**
We store list of tuples `(state, action, reward, next_state)`, and instead of
learning only from recent experience, we learn from sampling all of our experience
accumulated so far.
Now, let's see how is it implemented.
"""
import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
"""
We use [OpenAIGym](http://gym.openai.com/docs) to create the environment.
We will use the `upper_bound` parameter to scale our actions later.
"""
problem = "Pendulum-v0"
env = gym.make(problem)
num_states = env.observation_space.shape[0]
print("Size of State Space -> {}".format(num_states))
num_actions = env.action_space.shape[0]
print("Size of Action Space -> {}".format(num_actions))
upper_bound = env.action_space.high[0]
lower_bound = env.action_space.low[0]
print("Max Value of Action -> {}".format(upper_bound))
print("Min Value of Action -> {}".format(lower_bound))
"""
To implement better exploration by the Actor network, we use noisy perturbations,
specifically
an **Ornstein-Uhlenbeck process** for generating noise, as described in the paper.
It samples noise from a correlated normal distribution.
"""
class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def reset(self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
"""
The `Buffer` class implements Experience Replay.
---

---
**Critic loss** - Mean Squared Error of `y - Q(s, a)`
where `y` is the expected return as seen by the Target network,
and `Q(s, a)` is action value predicted by the Critic network. `y` is a moving target
that the critic model tries to achieve; we make this target
stable by updating the Target model slowly.
**Actor loss** - This is computed using the mean of the value given by the Critic network
for the actions taken by the Actor network. We seek to maximize this quantity.
Hence we update the Actor network so that it produces actions that get
the maximum predicted value as seen by the Critic, for a given state.
"""
class Buffer:
def __init__(self, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = target_actor(next_state_batch, training=True)
y = reward_batch + gamma * target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(
zip(critic_grad, critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = actor_model(state_batch, training=True)
critic_value = critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(
zip(actor_grad, actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
"""
Here we define the Actor and Critic networks. These are basic Dense models
with `ReLU` activation.
Note: We need the initialization for last layer of the Actor to be between
`-0.003` and `0.003` as this prevents us from getting `1` or `-1` output values in
the initial stages, which would squash our gradients to zero,
as we use the `tanh` activation.
"""
def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(256, activation="relu")(inputs)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1, activation="tanh", kernel_initializer=last_init)(out)
# Our upper bound is 2.0 for Pendulum.
outputs = outputs * upper_bound
model = tf.keras.Model(inputs, outputs)
return model
def get_critic():
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(16, activation="relu")(state_input)
state_out = layers.Dense(32, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(32, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
"""
`policy()` returns an action sampled from our Actor network plus some noise for
exploration.
"""
def policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
# We make sure action is within bounds
legal_action = np.clip(sampled_actions, lower_bound, upper_bound)
return [np.squeeze(legal_action)]
"""
## Training hyperparameters
"""
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
actor_model = get_actor()
critic_model = get_critic()
target_actor = get_actor()
target_critic = get_critic()
# Making the weights equal initially
target_actor.set_weights(actor_model.get_weights())
target_critic.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = 100
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(50000, 64)
"""
Now we implement our main training loop, and iterate over episodes.
We sample actions using `policy()` and train with `learn()` at each time step,
along with updating the Target networks at a rate `tau`.
"""
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# Takes about 4 min to train
for ep in range(total_episodes):
prev_state = env.reset()
episodic_reward = 0
while True:
# Uncomment this to see the Actor in action
# But not in a python notebook.
# env.render()
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
action = policy(tf_prev_state, ou_noise)
# Recieve state and reward from environment.
state, reward, done, info = env.step(action)
buffer.record((prev_state, action, reward, state))
episodic_reward += reward
buffer.learn()
update_target(target_actor.variables, actor_model.variables, tau)
update_target(target_critic.variables, critic_model.variables, tau)
# End this episode when `done` is True
if done:
break
prev_state = state
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep, avg_reward))
avg_reward_list.append(avg_reward)
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
plt.show()
"""
If training proceeds correctly, the average episodic reward will increase with time.
Feel free to try different learning rates, `tau` values, and architectures for the
Actor and Critic networks.
The Inverted Pendulum problem has low complexity, but DDPG work great on many other
problems.
Another great environment to try this on is `LunarLandingContinuous-v2`, but it will take
more episodes to obtain good results.
"""
# Save the weights
actor_model.save_weights("pendulum_actor.h5")
critic_model.save_weights("pendulum_critic.h5")
target_actor.save_weights("pendulum_target_actor.h5")
target_critic.save_weights("pendulum_target_critic.h5")
"""
Before Training:

"""
"""
After 100 episodes:

"""
| apache-2.0 | -7,767,612,422,060,140,000 | 32.373206 | 119 | 0.692258 | false | 3.508551 | false | false | false |
PrivacyScore/PrivacyScore | privacyscore/backend/management/commands/rescanscanlist.py | 1 | 2152 | # Copyright (C) 2018 PrivacyScore Contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from time import sleep
from django.core.management import BaseCommand
from django.utils import timezone
from privacyscore.backend.models import Site, ScanList
from privacyscore.utils import normalize_url
class Command(BaseCommand):
help = 'Rescan all sites in an exisiting ScanList.'
def add_arguments(self, parser):
parser.add_argument('scan_list_id')
parser.add_argument('-s', '--sleep-between-scans', type=float, default=0)
def handle(self, *args, **options):
scan_list = ScanList.objects.get(id=options['scan_list_id'])
sites = scan_list.sites.all()
scan_count = 0
for site in sites:
status_code = site.scan()
if status_code == Site.SCAN_COOLDOWN:
self.stdout.write(
'Rate limiting -- Not scanning site {}'.format(site))
continue
if status_code == Site.SCAN_BLACKLISTED:
self.stdout.write(
'Blacklisted -- Not scanning site {}'.format(site))
continue
scan_count += 1
self.stdout.write('Scanning site {}'.format(
site))
if options['sleep_between_scans']:
self.stdout.write('Sleeping {}'.format(options['sleep_between_scans']))
sleep(options['sleep_between_scans'])
self.stdout.write('read {} sites, scanned {}'.format(
len(sites), scan_count))
| gpl-3.0 | 2,197,571,733,522,232,800 | 38.127273 | 87 | 0.64684 | false | 4.178641 | false | false | false |
kennethreitz/pipenv | pipenv/vendor/tomlkit/toml_char.py | 1 | 1732 | import string
from ._compat import PY2
from ._compat import unicode
if PY2:
from pipenv.vendor.backports.functools_lru_cache import lru_cache
else:
from functools import lru_cache
class TOMLChar(unicode):
def __init__(self, c):
super(TOMLChar, self).__init__()
if len(self) > 1:
raise ValueError("A TOML character must be of length 1")
BARE = string.ascii_letters + string.digits + "-_"
KV = "= \t"
NUMBER = string.digits + "+-_.e"
SPACES = " \t"
NL = "\n\r"
WS = SPACES + NL
@lru_cache(maxsize=None)
def is_bare_key_char(self): # type: () -> bool
"""
Whether the character is a valid bare key name or not.
"""
return self in self.BARE
@lru_cache(maxsize=None)
def is_kv_sep(self): # type: () -> bool
"""
Whether the character is a valid key/value separator ot not.
"""
return self in self.KV
@lru_cache(maxsize=None)
def is_int_float_char(self): # type: () -> bool
"""
Whether the character if a valid integer or float value character or not.
"""
return self in self.NUMBER
@lru_cache(maxsize=None)
def is_ws(self): # type: () -> bool
"""
Whether the character is a whitespace character or not.
"""
return self in self.WS
@lru_cache(maxsize=None)
def is_nl(self): # type: () -> bool
"""
Whether the character is a new line character or not.
"""
return self in self.NL
@lru_cache(maxsize=None)
def is_spaces(self): # type: () -> bool
"""
Whether the character is a space or not
"""
return self in self.SPACES
| mit | 5,225,340,019,896,950,000 | 24.850746 | 81 | 0.562933 | false | 3.677282 | false | false | false |
HalcyonChimera/osf.io | api_tests/applications/views/test_application_list.py | 2 | 4654 | import pytest
import copy
import mock
from osf.models import ApiOAuth2Application
from website.util import api_v2_url
from osf.utils import sanitize
from osf_tests.factories import ApiOAuth2ApplicationFactory, AuthUserFactory
def _get_application_detail_route(app):
path = 'applications/{}/'.format(app.client_id)
return api_v2_url(path, base_route='/')
def _get_application_list_url():
path = 'applications/'
return api_v2_url(path, base_route='/')
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestApplicationList:
@pytest.fixture()
def user_app(self, user):
return ApiOAuth2ApplicationFactory(owner=user)
@pytest.fixture()
def url(self):
return _get_application_list_url()
@pytest.fixture()
def sample_data(self):
return {
'data': {
'type': 'applications',
'attributes': {
'name': 'A shiny new application',
'description': 'It\'s really quite shiny',
'home_url': 'http://osf.io',
'callback_url': 'https://cos.io',
'owner': 'Value discarded',
'client_id': 'Value discarded',
'client_secret': 'Value discarded',
}
}
}
def test_user_should_see_only_their_applications(
self, app, user, user_app, url):
res = app.get(url, auth=user.auth)
assert len(
res.json['data']
) == ApiOAuth2Application.objects.filter(owner=user).count()
def test_other_user_should_see_only_their_applications(self, app, url):
other_user = AuthUserFactory()
other_user_apps = [
ApiOAuth2ApplicationFactory(owner=other_user) for i in range(2)
]
res = app.get(url, auth=other_user.auth)
assert len(res.json['data']) == len(other_user_apps)
@mock.patch('framework.auth.cas.CasClient.revoke_application_tokens')
def test_deleting_application_should_hide_it_from_api_list(
self, mock_method, app, user, user_app, url):
mock_method.return_value(True)
api_app = user_app
delete_url = _get_application_detail_route(api_app)
res = app.delete(delete_url, auth=user.auth)
assert res.status_code == 204
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert len(
res.json['data']
) == ApiOAuth2Application.objects.count() - 1
def test_created_applications_are_tied_to_request_user_with_data_specified(
self, app, user, url, sample_data):
res = app.post_json_api(
url, sample_data,
auth=user.auth,
expect_errors=True
)
assert res.status_code == 201
assert res.json['data']['attributes']['owner'] == user._id
# Some fields aren't writable; make sure user can't set these
assert res.json['data']['attributes']['client_id'] != sample_data['data']['attributes']['client_id']
assert res.json['data']['attributes']['client_secret'] != sample_data['data']['attributes']['client_secret']
def test_creating_application_fails_if_callbackurl_fails_validation(
self, app, user, url, sample_data):
data = copy.copy(sample_data)
data['data']['attributes']['callback_url'] = 'itunes:///invalid_url_of_doom'
res = app.post_json_api(
url, data, auth=user.auth, expect_errors=True
)
assert res.status_code == 400
def test_field_content_is_sanitized_upon_submission(
self, app, user, url, sample_data):
bad_text = '<a href=\'http://sanitized.name\'>User_text</a>'
cleaned_text = sanitize.strip_html(bad_text)
payload = copy.copy(sample_data)
payload['data']['attributes']['name'] = bad_text
payload['data']['attributes']['description'] = bad_text
res = app.post_json_api(url, payload, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['name'] == cleaned_text
def test_created_applications_show_up_in_api_list(
self, app, user, user_app, url, sample_data):
res = app.post_json_api(url, sample_data, auth=user.auth)
assert res.status_code == 201
res = app.get(url, auth=user.auth)
assert len(res.json['data']) == ApiOAuth2Application.objects.count()
def test_returns_401_when_not_logged_in(self, app, url):
res = app.get(url, expect_errors=True)
assert res.status_code == 401
| apache-2.0 | 4,197,137,531,587,055,000 | 34.257576 | 116 | 0.598195 | false | 3.708367 | true | false | false |
AssistiveRoboticsUNH/threespace_ros | scripts/canal_surface_test/3joint2mat.py | 1 | 6960 | #!/usr/env/python
from __future__ import print_function
import rosbag
import sys
import scipy.io as sio
from os import listdir
from os.path import isfile, join
LABELS = {"circle": 0,
"square": 1,
"triangle": 2,
"complex": 3,
"swiperight": 4,
"swipeleft": 5,
"rotateright": 6,
"rotateleft": 7,
"scupcw": 8,
"scupccw": 9
}
SUBJECTS = ['s1']
if len(sys.argv) == 2:
directory = sys.argv[1]
else:
directory = 'exercises/'
data = []
config_files = [f for f in listdir(directory) if (isfile(join(directory, f))) and 'config' in f]
print(config_files)
for cf in config_files:
tokens = cf.strip('\n').split('_')
print(tokens)
subject = tokens[1]
exercise = tokens[0]
print(subject)
exercise_files = [f for f in listdir(directory) if (isfile(join(directory, f)))
and ('_' + subject + '.' in f) and ('.bag' in f) and (exercise in f)]
print(exercise_files)
# read topics from config file
with open(directory + cf) as cf_:
topics = cf_.readline().replace("\n", '').split(' ')
for i in range(0, len(topics)):
topics[i] = '/' + topics[i] + '_data_vec'
topics.append('/tf')
full_data = []
data_compressed = []
for xf in exercise_files:
print(xf)
tk = xf.strip('.bag').split('_')
ex = tk[0]
rep = tk[1]
topics_counter = [0 for i in range(len(topics))]
bag = rosbag.Bag(directory + xf)
hand_imu = []
lower_imu = []
upper_imu = []
tf_upper = []
tf_lower = []
tf_hand = []
start = -1
# get tf from bag and add the readings to the appropriate list
for topic, msg, t in bag.read_messages(topics=[topics[i] for i in range(0, len(topics))]):
if start == -1:
start = t.to_nsec() / 1000000.0
time = t.to_nsec() / 1000000.0 - start
# print('%f' % time)
if topic == '/tf':
transforms = msg.transforms
for tr in transforms:
if tr.child_frame_id == 'upper':
tf_upper.append([
time,
tr.transform.translation.x,
tr.transform.translation.y,
tr.transform.translation.z,
tr.transform.rotation.x,
tr.transform.rotation.y,
tr.transform.rotation.z,
tr.transform.rotation.w
])
if tr.child_frame_id == 'lower':
tf_lower.append([
tr.transform.translation.x,
tr.transform.translation.y,
tr.transform.translation.z,
tr.transform.rotation.x,
tr.transform.rotation.y,
tr.transform.rotation.z,
tr.transform.rotation.w
])
if tr.child_frame_id == 'hand':
tf_hand.append([
tr.transform.translation.x,
tr.transform.translation.y,
tr.transform.translation.z,
tr.transform.rotation.x,
tr.transform.rotation.y,
tr.transform.rotation.z,
tr.transform.rotation.w,
])
elif topic == topics[0]:
upper_imu.append([msg.quat.quaternion.x,
msg.quat.quaternion.y,
msg.quat.quaternion.z,
msg.quat.quaternion.w,
msg.accX,
msg.accY,
msg.accZ,
msg.gyroX,
msg.gyroY,
msg.gyroZ,
msg.comX,
msg.comY,
msg.comZ
])
elif topic == topics[1]:
lower_imu.append([msg.quat.quaternion.x,
msg.quat.quaternion.y,
msg.quat.quaternion.z,
msg.quat.quaternion.w,
msg.accX,
msg.accY,
msg.accZ,
msg.gyroX,
msg.gyroY,
msg.gyroZ,
msg.comX,
msg.comY,
msg.comZ
])
elif topic == topics[2]:
hand_imu.append([msg.quat.quaternion.x,
msg.quat.quaternion.y,
msg.quat.quaternion.z,
msg.quat.quaternion.w,
msg.accX,
msg.accY,
msg.accZ,
msg.gyroX,
msg.gyroY,
msg.gyroZ,
msg.comX,
msg.comY,
msg.comZ
])
minlen = min(len(hand_imu), len(lower_imu))
minlen = min(len(upper_imu), minlen)
minlen = min(len(tf_hand), minlen)
minlen = min(len(tf_lower), minlen)
minlen = min(len(tf_upper), minlen)
data_compressed_ex = []
j = 0
for i in range(minlen):
temp = []
for j in tf_upper[i]:
temp.append(j)
for j in tf_lower[i]:
temp.append(j)
for j in tf_hand[i]:
temp.append(j)
for j in upper_imu[i]:
temp.append(j)
for j in lower_imu[i]:
temp.append(j)
for j in hand_imu[i]:
temp.append(j)
temp.append(LABELS.get(ex))
data_compressed_ex.append(temp)
if 'slow' not in xf:
data_compressed.append(temp)
print(len(data_compressed))
print(len(data_compressed_ex))
sio.savemat('matfiles/' + ex + '_' + rep + '_' + subject + '_data.mat',
mdict={'data': data_compressed_ex})
print('******************')
sio.savemat('matfiles/' + exercise + '_full_data.mat', mdict={'data': data_compressed})
| gpl-3.0 | 6,508,986,143,650,768,000 | 38.101124 | 98 | 0.393822 | false | 4.481648 | false | false | false |
lutris/lutris | lutris/services/steamwindows.py | 1 | 2239 | from gettext import gettext as _
from gi.repository import Gio
from lutris.database.games import get_game_by_field, get_games
from lutris.game import Game
from lutris.services.steam import SteamGame, SteamService
from lutris.util.log import logger
from lutris.util.strings import slugify
STEAM_INSTALLER = "steam-wine" # Lutris installer used to setup the Steam client
class SteamWindowsGame(SteamGame):
service = "steamwindows"
installer_slug = "steamwindows"
runner = "wine"
class SteamWindowsService(SteamService):
id = "steamwindows"
name = _("Steam for Windows")
runner = "wine"
game_class = SteamWindowsGame
def generate_installer(self, db_game, steam_db_game):
"""Generate a basic Steam installer"""
steam_game = Game(steam_db_game["id"])
return {
"name": db_game["name"],
"version": self.name,
"slug": slugify(db_game["name"]) + "-" + self.id,
"game_slug": slugify(db_game["name"]),
"runner": self.runner,
"appid": db_game["appid"],
"script": {
"requires": "steam-wine",
"game": {
"exe": steam_game.config.game_config["exe"],
"args": "-no-cef-sandbox -applaunch %s" % db_game["appid"],
"prefix": steam_game.config.game_config["prefix"],
}
}
}
def install(self, db_game):
steam_game = get_game_by_field("steam-wine", "installer_slug")
if not steam_game:
logger.error("Steam for Windows is not installed in Lutris")
return
appid = db_game["appid"]
db_games = get_games(filters={"service_id": appid, "installed": "1", "service": self.id})
existing_game = self.match_existing_game(db_games, appid)
if existing_game:
logger.debug("Found steam game: %s", existing_game)
game = Game(existing_game.id)
game.save()
return
application = Gio.Application.get_default()
application.show_installer_window(
[self.generate_installer(db_game, steam_game)],
service=self,
appid=appid
)
| gpl-3.0 | 5,146,800,632,597,032,000 | 33.446154 | 97 | 0.58017 | false | 3.622977 | false | false | false |
jaredlunde/cargo-orm | unit_bench/models.py | 1 | 5637 | from cargo import Model, db, Query
from cargo.fields import *
from cargo.relationships import ForeignKey
from cargo.builder import Plan, drop_schema
from vital.debug import Compare, line, banner
class Users(Model):
ordinal = ('uid', 'username', 'password', 'join_date', 'key')
uid = UID()
username = Username(index=True, unique=True, not_null=True)
password = Password(minlen=8, not_null=True)
join_date = Timestamp(default=Timestamp.now())
key = Key(256, index=True, unique=True)
def __repr__(self):
return '<' + str(self.username.value) + ':' + str(self.uid.value) + '>'
class Posts(Model):
ordinal = ('uid', 'author', 'title', 'slug', 'content', 'tags',
'post_date')
uid = UID()
title = Text(500, not_null=True)
author = ForeignKey('Users.uid',
# relation='posts',
index=True,
not_null=True)
slug = Slug(UniqueSlugFactory(8),
unique=True,
index=True,
not_null=True)
content = Text(not_null=True)
tags = Array(Text(), index='gin')
post_date = Timestamp(default=Timestamp.now())
def build(schema):
db.open(schema=schema)
drop_schema(db, schema, cascade=True, if_exists=True)
Plan(Users()).execute()
Plan(Posts()).execute()
def teardown(schema):
drop_schema(db, schema, cascade=True)
if __name__ == '__main__':
#: Build
schema = '_cargo_testing'
build(schema)
ITERATIONS = 5E4
#: Play
c = Compare(Users, Posts, name='Initialization')
c.time(ITERATIONS)
User = Users()
Post = Posts()
c = Compare(User.copy, Post.copy, name='Copying')
c.time(ITERATIONS)
c = Compare(User.clear_copy, Post.clear_copy, name='Clear Copying')
c.time(ITERATIONS)
Jared = User.add(username='Jared',
password='somepassword',
key=User.key.generate())
Katie = User.add(username='Katie',
password='somepassword',
key=User.key.generate())
Cream = User(username='Cream',
password='somepassword',
key=User.key.generate())
Post.add(title='Check out this cool new something.',
slug='Check out this cool new something.',
author=Jared.uid,
content='Lorem ipsum dolor.',
tags=['fishing boats', 'cream-based sauces'])
Post.add(title='Check out this cool new something 2.',
slug='Check out this cool new something 2.',
author=Jared.uid,
content='Lorem ipsum dolor 2.',
tags=['fishing boats', 'cream-based sauces'])
banner('Users Iterator')
user = User.iter()
print('Next:', next(user))
print('Next:', next(user))
print('Next:', next(user))
banner('Users')
for user in User:
print(user)
banner('Posts')
for x, post in enumerate(Post):
if x > 0:
line()
print('Title:', post.title)
print('Slug:', post.slug)
print('Author:', post.author.pull().username)
print('Content:', post.content)
banner('Posts by Jared')
for x, post in enumerate(Jared.posts.pull()):
if x > 0:
line()
print('Title:', post.title)
print('Slug:', post.slug)
print('Author:', post.author.pull().username)
print('Content:', post.content)
banner('One post by Jared')
Jared.posts.pull_one()
print('Title:', Jared.posts.title)
print('Slug:', Jared.posts.slug)
print('Author:', Jared.posts.author.pull().username)
print('Content:', Jared.posts.content)
#: Fetchall
def orm_run():
return User.select()
def raw_exec():
cur = User.client.connection.cursor()
cur.execute('SET search_path TO _cargo_testing;'
'SELECT * FROM users WHERE true;')
ret = cur.fetchall()
return ret
def orm_exec():
cur = User.client.cursor()
cur.execute('SET search_path TO _cargo_testing;'
'SELECT * FROM users WHERE true;')
ret = cur.fetchall()
return ret
Naked = User.copy().naked()
def orm_naked_run():
return Naked.run(Query('SELECT * FROM users WHERE true;'))
def orm_naked_exec():
return Naked.execute('SELECT * FROM users WHERE true;').fetchall()
c = Compare(orm_run,
raw_exec,
orm_exec,
orm_naked_run,
orm_naked_exec,
name='SELECT')
c.time(ITERATIONS)
#: Fetchone
def orm_run():
return User.where(True).get()
def raw_exec():
cur = User.client.connection.cursor()
cur.execute('SET search_path TO _cargo_testing;'
'SELECT * FROM users WHERE true;')
ret = cur.fetchone()
return ret
def orm_exec():
cur = User.client.cursor()
cur.execute('SET search_path TO _cargo_testing;'
'SELECT * FROM users WHERE true;')
ret = cur.fetchone()
return ret
Naked = User.copy().naked()
def orm_naked_run():
q = Query('SELECT * FROM users WHERE true;')
q.one = True
return Naked.run(q)
def orm_naked_exec():
return Naked.execute('SELECT * FROM users WHERE true;').fetchone()
c = Compare(orm_run,
raw_exec,
orm_exec,
orm_naked_run,
orm_naked_exec,
name='GET')
c.time(ITERATIONS)
#: Teardown
teardown(schema)
| mit | -4,515,917,904,017,222,000 | 27.614213 | 79 | 0.552776 | false | 3.821695 | false | false | false |
fcooper8472/useful_scripts | makematch.py | 1 | 1311 | import os
import re
import subprocess
import sys
"""
When called from the Chaste directory, this will make all targets matching a regex pattern passed as a command line
argument.
"""
# Only expect a single extra argument, e.g. python makematch pattern
if len(sys.argv) > 2:
sys.exit("This script expects at most one additional argument.")
# If no argument, print usage
if len(sys.argv) < 2:
sys.exit("Usage: python makematch pattern")
pattern = sys.argv[1]
# Verify that a makefile exists in the current working directory
working_dir = os.getcwd()
make_file = os.path.join(working_dir, 'Makefile')
if not os.path.isfile(make_file):
sys.exit("No Makefile in current working directory - exiting.")
# Get a list containing the target names by invoking 'make help', and split by linebreaks
list_of_targets = subprocess.check_output(['make', 'help']).splitlines()
# Create a make command by matching the pattern to the target
make_command_list = ['nice', '-n19', 'make', '-j4']
for target in list_of_targets:
if re.search(pattern, target):
make_command_list.append(target.replace('... ', ''))
print("Found " + str(len(make_command_list) - 2) + " targets to make:")
for i in range(2, len(make_command_list)):
print('\t' + make_command_list[i])
subprocess.call(make_command_list)
| bsd-3-clause | -2,252,282,689,923,687,700 | 30.97561 | 115 | 0.71167 | false | 3.477454 | false | false | false |
alfa-addon/addon | plugin.video.alfa/servers/archiveorg.py | 1 | 1336 | # -*- coding: utf-8 -*-
# --------------------------------------------------------
# Conector ArchiveOrg By Alfa development Group
# --------------------------------------------------------
from core import httptools
from core import jsontools
from core import scrapertools
from platformcode import logger
def test_video_exists(page_url):
logger.info("(page_url='%s')" % page_url)
data = httptools.downloadpage(page_url)
if data.code == 404:
return False, "[ArchiveOrg] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("url=" + page_url)
host = "https://archive.org/"
video_urls = []
data = httptools.downloadpage(page_url).data
json = jsontools.load( scrapertools.find_single_match(data, """js-play8-playlist" type="hidden" value='([^']+)""") )
# sobtitles
subtitle = ""
for subtitles in json[0]["tracks"]:
if subtitles["kind"] == "subtitles": subtitle = host + subtitles["file"]
# sources
for url in json[0]["sources"]:
video_urls.append(['%s %s[ArchiveOrg]' %(url["label"], url["type"]), host + url["file"], 0, subtitle])
video_urls.sort(key=lambda it: int(it[0].split("p ", 1)[0]))
return video_urls
| gpl-3.0 | -3,825,317,422,124,038,700 | 37.294118 | 120 | 0.568862 | false | 3.711111 | false | false | false |
steder/maroonmpi | test/mpe/mpe-log.py | 1 | 1752 | #mpe-log.py
import mpi
from mpi import mpe
import time
import Numeric
rank,size = mpi.init()
mpe.init_log()
assert (size == 2), "This example requires 2 processors to run properly!"
runEventStart = mpe.log_get_event_number()
runEventEnd = mpe.log_get_event_number()
sendEventStart = mpe.log_get_event_number()
sendEventEnd = mpe.log_get_event_number()
recvEventStart = mpe.log_get_event_number()
recvEventEnd = mpe.log_get_event_number()
sleepEventStart = mpe.log_get_event_number()
sleepEventEnd = mpe.log_get_event_number()
mpe.describe_state( runEventStart, runEventEnd, "Full Runtime", "blue" )
mpe.describe_state( sendEventStart, sendEventEnd, "send", "red" )
mpe.describe_state( recvEventStart, recvEventEnd, "recv", "green" )
mpe.describe_state( sleepEventStart, sleepEventEnd, "sleep", "turquoise" )
mpe.log_event( runEventStart, rank, "starting run")
# Let's send and receive a 100 messages and generate 100(200?) events.
for i in xrange(100):
if( rank == 0 ):
# Generate 100 numbers, send them to rank 1
mpe.log_event( sendEventStart, i, "start send" )
data = Numeric.array( range(10000), Numeric.Int32 )
mpi.send( data, 10000, mpi.MPI_INT, 1, i, mpi.MPI_COMM_WORLD )
mpe.log_event( sendEventEnd, i, "end send")
else:
mpe.log_event( recvEventStart, i, "start recv" )
rdata = mpi.recv( 10000, mpi.MPI_INT, 0, i, mpi.MPI_COMM_WORLD )
mpe.log_event( recvEventEnd, i, "end recv" )
if( i == 50 ):
mpe.log_event( sleepEventStart, i, "start sleep" )
time.sleep(1)
mpi.barrier( mpi.MPI_COMM_WORLD )
mpe.log_event( sleepEventEnd, i, "end sleep")
mpe.log_event( runEventEnd, rank, "stopping run")
mpe.finish_log("test1")
mpi.finalize()
| gpl-2.0 | 8,800,083,652,024,560,000 | 34.755102 | 74 | 0.680936 | false | 2.929766 | false | false | false |
spl0k/supysonic | supysonic/api/jukebox.py | 1 | 2644 | # This file is part of Supysonic.
# Supysonic is a Python implementation of the Subsonic server API.
#
# Copyright (C) 2019 Alban 'spl0k' Féron
#
# Distributed under terms of the GNU AGPLv3 license.
import uuid
from flask import current_app, request
from pony.orm import ObjectNotFound
from ..daemon import DaemonClient
from ..daemon.exceptions import DaemonUnavailableError
from ..db import Track
from . import api_routing
from .exceptions import GenericError, MissingParameter, Forbidden
@api_routing("/jukeboxControl")
def jukebox_control():
if not request.user.jukebox and not request.user.admin:
raise Forbidden()
action = request.values["action"]
index = request.values.get("index")
offset = request.values.get("offset")
id = request.values.getlist("id")
gain = request.values.get("gain")
if action not in (
"get",
"status",
"set",
"start",
"stop",
"skip",
"add",
"clear",
"remove",
"shuffle",
"setGain",
):
raise GenericError("Unknown action")
args = ()
if action == "set":
if id:
args = [uuid.UUID(i) for i in id]
elif action == "skip":
if not index:
raise MissingParameter("index")
if offset:
args = (int(index), int(offset))
else:
args = (int(index), 0)
elif action == "add":
if not id:
raise MissingParameter("id")
else:
args = [uuid.UUID(i) for i in id]
elif action == "remove":
if not index:
raise MissingParameter("index")
else:
args = (int(index),)
elif action == "setGain":
if not gain:
raise MissingParameter("gain")
else:
args = (float(gain),)
try:
status = DaemonClient(current_app.config["DAEMON"]["socket"]).jukebox_control(
action, *args
)
except DaemonUnavailableError:
raise GenericError("Jukebox unavaliable")
rv = dict(
currentIndex=status.index,
playing=status.playing,
gain=status.gain,
position=status.position,
)
if action == "get":
playlist = []
for path in status.playlist:
try:
playlist.append(Track.get(path=path))
except ObjectNotFound:
pass
rv["entry"] = [
t.as_subsonic_child(request.user, request.client) for t in playlist
]
return request.formatter("jukeboxPlaylist", rv)
else:
return request.formatter("jukeboxStatus", rv)
| agpl-3.0 | 3,803,369,992,285,203,500 | 25.43 | 86 | 0.573969 | false | 4.004545 | false | false | false |
tyb0807/angr | angr/analyses/cfg/cfg_node.py | 1 | 10945 | import traceback
import pyvex
import archinfo
from ...codenode import BlockNode, HookNode
from ...engines.successors import SimSuccessors
class CFGNodeCreationFailure(object):
"""
This class contains additional information for whenever creating a CFGNode failed. It includes a full traceback
and the exception messages.
"""
__slots__ = ['short_reason', 'long_reason', 'traceback']
def __init__(self, exc_info=None, to_copy=None):
if to_copy is None:
e_type, e, e_traceback = exc_info
self.short_reason = str(e_type)
self.long_reason = repr(e)
self.traceback = traceback.format_exception(e_type, e, e_traceback)
else:
self.short_reason = to_copy.short_reason
self.long_reason = to_copy.long_reason
self.traceback = to_copy.traceback
def __hash__(self):
return hash((self.short_reason, self.long_reason, self.traceback))
class CFGNode(object):
"""
This class stands for each single node in CFG.
"""
__slots__ = ( 'addr', 'simprocedure_name', 'syscall_name', 'size', 'no_ret', 'is_syscall', 'function_address',
'block_id', 'thumb', 'byte_string', 'name', 'instruction_addrs', 'irsb', 'has_return', '_cfg',
)
def __init__(self,
addr,
size,
cfg,
simprocedure_name=None,
is_syscall=False,
no_ret=False,
function_address=None,
block_id=None,
irsb=None,
instruction_addrs=None,
thumb=False,
byte_string=None):
"""
Note: simprocedure_name is not used to recreate the SimProcedure object. It's only there for better
__repr__.
"""
self.addr = addr
self.simprocedure_name = simprocedure_name
self.size = size
self.no_ret = no_ret
self.is_syscall = is_syscall
self._cfg = cfg
self.function_address = function_address
self.block_id = block_id
self.thumb = thumb
self.byte_string = byte_string
self.name = simprocedure_name
if self.name is None:
sym = cfg.project.loader.find_symbol(addr)
if sym is not None:
self.name = sym.name
if self.name is None and isinstance(cfg.project.arch, archinfo.ArchARM) and addr & 1:
sym = cfg.project.loader.find_symbol(addr - 1)
if sym is not None:
self.name = sym.name
if function_address and self.name is None:
sym = cfg.project.loader.find_symbol(function_address)
if sym is not None:
self.name = sym.name
if self.name is not None:
offset = addr - function_address
self.name = "%s%+#x" % (self.name, offset)
self.instruction_addrs = instruction_addrs if instruction_addrs is not None else tuple()
if not instruction_addrs and not self.is_simprocedure:
# We have to collect instruction addresses by ourselves
if irsb is not None:
self.instruction_addrs = tuple(s.addr + s.delta for s in irsb.statements if type(s) is pyvex.IRStmt.IMark) # pylint:disable=unidiomatic-typecheck
self.irsb = irsb
self.has_return = False
@property
def successors(self):
return self._cfg.get_successors(self)
@property
def predecessors(self):
return self._cfg.get_predecessors(self)
@property
def accessed_data_references(self):
if self._cfg.sort != 'fast':
raise ValueError("Memory data is currently only supported in CFGFast.")
for instr_addr in self.instruction_addrs:
if instr_addr in self._cfg.insn_addr_to_memory_data:
yield self._cfg.insn_addr_to_memory_data[instr_addr]
@property
def is_simprocedure(self):
return self.simprocedure_name is not None
@property
def callstack_key(self):
# A dummy stub for the future support of context sensitivity in CFGFast
return None
def copy(self):
c = CFGNode(self.addr,
self.size,
self._cfg,
simprocedure_name=self.simprocedure_name,
no_ret=self.no_ret,
function_address=self.function_address,
block_id=self.block_id,
irsb=self.irsb,
instruction_addrs=self.instruction_addrs,
thumb=self.thumb,
byte_string=self.byte_string,
)
return c
def __repr__(self):
s = "<CFGNode "
if self.name is not None:
s += self.name + " "
s += hex(self.addr)
if self.size is not None:
s += "[%d]" % self.size
s += ">"
return s
def __eq__(self, other):
if isinstance(other, SimSuccessors):
raise ValueError("You do not want to be comparing a SimSuccessors instance to a CFGNode.")
if not type(other) is CFGNode:
return False
return (self.addr == other.addr and
self.size == other.size and
self.simprocedure_name == other.simprocedure_name
)
def __hash__(self):
return hash((self.addr, self.simprocedure_name, ))
def to_codenode(self):
if self.is_simprocedure:
return HookNode(self.addr, self.size, self.simprocedure_name)
return BlockNode(self.addr, self.size, thumb=self.thumb)
@property
def block(self):
if self.is_simprocedure or self.is_syscall:
return None
project = self._cfg.project # everything in angr is connected with everything...
b = project.factory.block(self.addr, size=self.size, opt_level=self._cfg._iropt_level)
return b
class CFGNodeA(CFGNode):
"""
The CFGNode that is used in CFGAccurate.
"""
__slots__ = [ 'input_state', 'looping_times', 'callstack', 'depth', 'final_states', 'creation_failure_info',
'return_target', 'syscall', '_callstack_key',
]
def __init__(self,
addr,
size,
cfg,
simprocedure_name=None,
no_ret=False,
function_address=None,
block_id=None,
irsb=None,
instruction_addrs=None,
thumb=False,
byte_string=None,
callstack=None,
input_state=None,
final_states=None,
syscall_name=None,
looping_times=0,
is_syscall=False,
syscall=None,
depth=None,
callstack_key=None,
creation_failure_info=None,
):
super(CFGNodeA, self).__init__(addr, size, cfg,
simprocedure_name=simprocedure_name,
is_syscall=is_syscall,
no_ret=no_ret,
function_address=function_address,
block_id=block_id,
irsb=irsb,
instruction_addrs=instruction_addrs,
thumb=thumb,
byte_string=byte_string,
)
self.callstack = callstack
self.input_state = input_state
self.syscall_name = syscall_name
self.looping_times = looping_times
self.syscall = syscall
self.depth = depth
self.creation_failure_info = None
if creation_failure_info is not None:
self.creation_failure_info = CFGNodeCreationFailure(creation_failure_info)
self._callstack_key = self.callstack.stack_suffix(self._cfg.context_sensitivity_level) \
if self.callstack is not None else callstack_key
self.final_states = [ ] if final_states is None else final_states
# If this CFG contains an Ijk_Call, `return_target` stores the returning site.
# Note: this is regardless of whether the call returns or not. You should always check the `no_ret` property if
# you are using `return_target` to do some serious stuff.
self.return_target = None
@property
def callstack_key(self):
return self._callstack_key
@property
def creation_failed(self):
return self.creation_failure_info is not None
def downsize(self):
"""
Drop saved states.
"""
self.input_state = None
self.final_states = [ ]
def __repr__(self):
s = "<CFGNodeA "
if self.name is not None:
s += self.name + " "
s += hex(self.addr)
if self.size is not None:
s += "[%d]" % self.size
if self.looping_times > 0:
s += " - %d" % self.looping_times
if self.creation_failure_info is not None:
s += ' - creation failed: {}'.format(self.creation_failure_info.long_reason)
s += ">"
return s
def __eq__(self, other):
if isinstance(other, SimSuccessors):
raise ValueError("You do not want to be comparing a SimSuccessors instance to a CFGNode.")
if not isinstance(other, CFGNodeA):
return False
return (self.callstack_key == other.callstack_key and
self.addr == other.addr and
self.size == other.size and
self.looping_times == other.looping_times and
self.simprocedure_name == other.simprocedure_name
)
def __hash__(self):
return hash((self.callstack_key, self.addr, self.looping_times, self.simprocedure_name, self.creation_failure_info))
def copy(self):
return CFGNodeA(
self.addr,
self.size,
self._cfg,
simprocedure_name=self.simprocedure_name,
no_ret=self.no_ret,
function_address=self.function_address,
block_id=self.block_id,
irsb=self.irsb,
instruction_addrs=self.instruction_addrs,
thumb=self.thumb,
byte_string=self.byte_string,
callstack=self.callstack,
input_state=self.input_state,
syscall_name=self.syscall_name,
looping_times=self.looping_times,
is_syscall=self.is_syscall,
syscall=self.syscall,
depth=self.depth,
final_states=self.final_states[::],
callstack_key=self.callstack_key,
)
| bsd-2-clause | -1,370,739,727,582,931,200 | 34.420712 | 162 | 0.542074 | false | 4.166349 | false | false | false |
rwightman/tensorflow-litterbox | litterbox/sdc_export_graph.py | 1 | 8142 | # Copyright (C) 2016 Ross Wightman. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# ==============================================================================
"""
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import pandas as pd
import os
from copy import deepcopy
from fabric import util
from models import ModelSdc
from processors import ProcessorSdc
from collections import defaultdict
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'root_network', 'resnet_v1_50',
"""Either resnet_v1_50, resnet_v1_101, resnet_v1_152, inception_resnet_v2, nvidia_sdc""")
tf.app.flags.DEFINE_integer(
'top_version', 5,
"""Top level network version, specifies output layer variations. See model code.""")
tf.app.flags.DEFINE_boolean(
'bayesian', False, """Activate dropout layers for inference.""")
tf.app.flags.DEFINE_integer(
'samples', 0, """Activate dropout layers for inference.""")
tf.app.flags.DEFINE_string(
'checkpoint_path', '', """Checkpoint file for model.""")
tf.app.flags.DEFINE_string(
'ensemble_path', '', """CSV file with ensemble specification. Use as alternative to single model checkpoint.""")
tf.app.flags.DEFINE_string(
'name', 'model', """Name prefix for outputs of exported artifacts.""")
def _weighted_mean(outputs_list, weights_tensor):
assert isinstance(outputs_list[0], tf.Tensor)
print(outputs_list)
outputs_tensor = tf.concat(1, outputs_list)
print('outputs concat', outputs_tensor.get_shape())
if len(outputs_list) > 1:
weighted_outputs = outputs_tensor * weights_tensor
print('weighted outputs ', weighted_outputs.get_shape())
outputs_tensor = tf.reduce_mean(weighted_outputs)
else:
outputs_tensor = tf.squeeze(outputs_tensor)
return outputs_tensor
def _merge_outputs(outputs, weights):
assert outputs
merged = defaultdict(list)
weights_tensor = tf.pack(weights)
print('weights ', weights_tensor.get_shape())
# recombine multiple model outputs by dict key or list position under output name based dict
if isinstance(outputs[0], dict):
for o in outputs:
for name, tensor in o.items():
merged['output_%s' % name].append(tensor)
elif isinstance(outputs[0], list):
for o in outputs:
for index, tensor in enumerate(o):
merged['output_%d' % index].append(tensor)
else:
merged['output'] = outputs
reduced = {name: _weighted_mean(value_list, weights_tensor) for name, value_list in merged.items()}
for k, v in reduced.items():
print(k, v, v.get_shape())
return reduced
def build_export_graph(models, batch_size=1, export_scope=''):
assert models
inputs = tf.placeholder(tf.uint8, [None, None, 3], name='input_placeholder')
print("Graph Inputs: ")
print(inputs.name, inputs.get_shape())
with tf.device('/gpu:0'):
inputs = tf.cast(inputs, tf.float32)
inputs = tf.div(inputs, 255)
input_tensors = [inputs, tf.zeros(shape=()), tf.constant('', dtype=tf.string)]
model_outputs_list = []
weights_list = []
for m in models:
with tf.variable_scope(m['name'], values=input_tensors):
model, processor = m['model'], m['processor']
processed_inputs = processor.process_example(input_tensors, mode='pred')
if batch_size > 1:
processed_inputs = [tf.gather(tf.expand_dims(x, 0), [0] * batch_size) for x in processed_inputs]
processed_inputs = processor.reshape_batch(processed_inputs, batch_size=batch_size)
model_outputs = model.build_tower(
processed_inputs[0], is_training=False, summaries=False)
model_outputs_list += [model.get_predictions(model_outputs, processor)]
weights_list += [m['weight']]
merged_outputs = _merge_outputs(model_outputs_list, weights_list)
print("Graph Outputs: ")
outputs = []
for name, output in merged_outputs.items():
outputs += [tf.identity(output, name)]
[print(x.name, x.get_shape()) for x in outputs]
return inputs, outputs
def main(_):
util.check_tensorflow_version()
assert os.path.isfile(FLAGS.checkpoint_path) or os.path.isfile(FLAGS.ensemble_path)
model_args_list = []
if FLAGS.checkpoint_path:
model_args_list.append(
{
'root_network': FLAGS.root_network,
'top_version': FLAGS.top_version,
'image_norm': FLAGS.image_norm,
'image_size': FLAGS.image_size,
'image_aspect': FLAGS.image_aspect,
'checkpoint_path': FLAGS.checkpoint_path,
'bayesian': FLAGS.bayesian,
'weight': 1.0,
}
)
else:
ensemble_df = pd.DataFrame.from_csv(FLAGS.ensemble_path, index_col=None)
model_args_list += ensemble_df.to_dict('records')
model_params_common = {
'outputs': {
'steer': 1,
# 'xyz': 2,
},
}
model_list = []
for i, args in enumerate(model_args_list):
print(args)
model_name = 'model_%d' % i
model_params = deepcopy(model_params_common)
model_params['network'] = args['root_network']
model_params['version'] = args['top_version']
model_params['bayesian'] = FLAGS.bayesian
model = ModelSdc(params=model_params)
processor_params = {}
processor_params['image_norm'] = args['image_norm']
processor_params['image_size'] = args['image_size']
processor_params['image_aspect'] = args['image_aspect']
processor = ProcessorSdc(params=processor_params)
model_list.append({
'model': model,
'processor': processor,
'weight': args['weight'],
'name': model_name,
'checkpoint_path': args['checkpoint_path']
})
name_prefix = FLAGS.name
with tf.Graph().as_default() as g:
batch_size = 1 if not FLAGS.samples else FLAGS.samples
build_export_graph(models=model_list, batch_size=batch_size)
model_variables = tf.contrib.framework.get_model_variables()
saver = tf.train.Saver(model_variables)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
g_def = g.as_graph_def(add_shapes=True)
tf.train.write_graph(g_def, './', name='%s-graph_def.pb.txt' % name_prefix)
for m in model_list:
checkpoint_variable_set = set()
checkpoint_path, global_step = util.resolve_checkpoint_path(m['checkpoint_path'])
if not checkpoint_path:
print('No checkpoint file found at %s' % m['checkpoint_path'])
return
reader = tf.train.NewCheckpointReader(checkpoint_path)
checkpoint_variable_set.update(reader.get_variable_to_shape_map().keys())
variables_to_restore = m['model'].variables_to_restore(
restore_outputs=True,
checkpoint_variable_set=checkpoint_variable_set,
prefix_scope=m['name'])
saver_local = tf.train.Saver(variables_to_restore)
saver_local.restore(sess, checkpoint_path)
print('Successfully loaded model from %s at step=%d.' % (checkpoint_path, global_step))
saver.export_meta_graph('./%s-meta_graph.pb.txt' % name_prefix, as_text=True)
saver.save(sess, './%s-checkpoint' % name_prefix, write_meta_graph=True)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 | -4,987,899,619,452,618,000 | 36.869767 | 116 | 0.605994 | false | 3.871612 | false | false | false |
ulno/micropython-extra-ulno | lib/netrepl/terminal.py | 1 | 4079 | #!/usr/bin/env python3
#
# Terminal program to connect to encrypted netrepl
#
# author: ulno
# create date: 2017-09-16
#
# based on telnet example at http://www.binarytides.com/code-telnet-client-sockets-python/
_debug = "terminal:"
import select, threading
from netrepl import Netrepl_Parser
input_buffer = ""
input_buffer_lock = threading.Lock()
quit_flag = False
# from: https://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user
# and https://github.com/magmax/python-readchar
# TODO: use whole readchar to support windows?
import tty, sys, termios # raises ImportError if unsupported
# def readchar():
# fd = sys.stdin.fileno()
# old_settings = termios.tcgetattr(fd)
# try:
# tty.setraw(sys.stdin.fileno())
# ch = sys.stdin.read(1)
# finally:
# termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
# return ch
def readchar():
ch = sys.stdin.read(1)
return ch
def char_reader():
# thread reading from stdin into inputbuffer
global input_buffer, input_buffer_lock, quit_flag
while not quit_flag:
ch = readchar()
if ch == "\x1d":
quit_flag = True
ch = ""
elif ch == "\r":
ch = "\r\n"
input_buffer_lock.acquire()
input_buffer += ch
input_buffer_lock.release()
def getChar():
answer = sys.stdin.read(1)
return answer
def main():
global quit_flag, input_buffer_lock, input_buffer
parser = Netrepl_Parser('Connect to netrepl and open an'
'interactive terminal.', debug=_debug)
con = parser.connect()
if _debug is not None:
print(_debug, 'Press ctrl-] to quit.\n')
print
print("Try to type help to get an initial help screen.")
# Do not request help-screen, gets too confusing later
# if _debug: print(_debug,'Requesting startscreen.')
# con.send(b"\r\nhelp\r\n")
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
input_thread = threading.Thread(target=char_reader)
input_thread.start()
while not quit_flag:
# Check if we received anything via network
# Get the list sockets which are readable
read_sockets, write_sockets, error_sockets = \
select.select([con.socket], [], [], 0.01)
for sock in read_sockets:
# incoming message from remote server
if sock == con.socket:
data = con.receive()
l = len(data)
# TODO: figure out how to detect connection close
# #print("recvd:",data)
# if not data:
# print('\nterminal: Connection closed.')
# sys.exit()
# else:
if l > 0:
# print data
try:
sys.stdout.write(bytes(data[0:l]).decode())
except:
if _debug:
print("\r\n{} Got some weird data of len "
"{}: >>{}<<\r\n".format(_debug, len(data), data))
# print("data:", str(data[0:l]))
sys.stdout.flush()
if len(input_buffer) > 0:
# user entered a message
input_buffer_lock.acquire()
send_buffer = input_buffer.encode()
input_buffer = ""
input_buffer_lock.release()
# msg = sys.stdin.readline().strip()+'\r\n'
# print("\r\nmsg {} <<\r\n".format(send_buffer))
con.send(send_buffer)
# cs.send(send_buffer+b'\r\n')
# cs.send(send_buffer+b'!')
input_thread.join() # finsih input_thread
if _debug: print("\r\n{} Closing connection.\r".format(_debug))
con.repl_normal() # normal repl
con.close(report=True)
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
if _debug: print("\r\n{} Connection closed.\r\n".format(_debug))
# main function
if __name__ == "__main__":
main()
| mit | -93,694,822,450,537,700 | 29.440299 | 95 | 0.558715 | false | 3.742202 | false | false | false |
miamitops/SWIMS | sites/all/modules/olfp/lib/OL-FeaturePopups/doc/_scripts/split-classes.py | 7 | 1780 | #!/usr/bin/env python
import re
import os
import sys
import shutil
def splitClasses (inputFilename, outputDirectory):
print "Splitting classes, input: %s output: %s " % (inputFilename, outputDirectory)
if not os.path.isfile(inputFilename):
print "\nProcess aborted due to errors."
sys.exit('ERROR: Input file "%s" does not exist!' % inputFilename)
if os.path.isdir(outputDirectory):
try:
shutil.rmtree(outputDirectory, False)
except Exception, E:
print "\nAbnormal termination: Unable to clear or create working folder \"%s\"," % outputDirectory
print " check if there is a process that blocks the folder."
sys.exit("ERROR: %s" % E)
# if not os.path.exists(outputDirectory):
# os.makedirs(outputDirectory)
pathName, fileName = os.path.split(inputFilename)
pathName = os.path.join(outputDirectory, pathName.replace("../",""))
if not os.path.exists(pathName):
print "Creating folder:", pathName
os.makedirs(pathName)
fileName, fileExt = os.path.splitext(fileName)
fIn = open(inputFilename)
sourceIn = fIn.read()
fIn.close()
sourceOut = re.split('/\*\* *\r*\n *\* *Class:', sourceIn)
for i, text in enumerate(sourceOut):
if i == 0:
outputFileName = os.path.join(pathName, fileName + fileExt)
else:
outputFileName = os.path.join(pathName, fileName + "-" + format(i) + fileExt)
print "Splited to:", outputFileName
fOut = open(outputFileName,"w")
fOut.write("/**\n * Class:" + text)
fOut.close()
print "Done!"
# -----------------
# main
# -----------------
if __name__ == '__main__':
splitClasses(sys.argv[1], sys.argv[2])
| lgpl-3.0 | 1,821,927,025,806,907,000 | 33.901961 | 110 | 0.602247 | false | 3.755274 | false | false | false |
fxia22/ASM_xf | PythonD/lib/python2.4/idlelib/RemoteDebugger.py | 15 | 11645 | """Support for remote Python debugging.
Some ASCII art to describe the structure:
IN PYTHON SUBPROCESS # IN IDLE PROCESS
#
# oid='gui_adapter'
+----------+ # +------------+ +-----+
| GUIProxy |--remote#call-->| GUIAdapter |--calls-->| GUI |
+-----+--calls-->+----------+ # +------------+ +-----+
| Idb | # /
+-----+<-calls--+------------+ # +----------+<--calls-/
| IdbAdapter |<--remote#call--| IdbProxy |
+------------+ # +----------+
oid='idb_adapter' #
The purpose of the Proxy and Adapter classes is to translate certain
arguments and return values that cannot be transported through the RPC
barrier, in particular frame and traceback objects.
"""
import sys
import types
import rpc
import Debugger
debugging = 0
idb_adap_oid = "idb_adapter"
gui_adap_oid = "gui_adapter"
#=======================================
#
# In the PYTHON subprocess:
frametable = {}
dicttable = {}
codetable = {}
tracebacktable = {}
def wrap_frame(frame):
fid = id(frame)
frametable[fid] = frame
return fid
def wrap_info(info):
"replace info[2], a traceback instance, by its ID"
if info is None:
return None
else:
traceback = info[2]
assert isinstance(traceback, types.TracebackType)
traceback_id = id(traceback)
tracebacktable[traceback_id] = traceback
modified_info = (info[0], info[1], traceback_id)
return modified_info
class GUIProxy:
def __init__(self, conn, gui_adap_oid):
self.conn = conn
self.oid = gui_adap_oid
def interaction(self, message, frame, info=None):
# calls rpc.SocketIO.remotecall() via run.MyHandler instance
# pass frame and traceback object IDs instead of the objects themselves
self.conn.remotecall(self.oid, "interaction",
(message, wrap_frame(frame), wrap_info(info)),
{})
class IdbAdapter:
def __init__(self, idb):
self.idb = idb
#----------called by an IdbProxy----------
def set_step(self):
self.idb.set_step()
def set_quit(self):
self.idb.set_quit()
def set_continue(self):
self.idb.set_continue()
def set_next(self, fid):
frame = frametable[fid]
self.idb.set_next(frame)
def set_return(self, fid):
frame = frametable[fid]
self.idb.set_return(frame)
def get_stack(self, fid, tbid):
##print >>sys.__stderr__, "get_stack(%r, %r)" % (fid, tbid)
frame = frametable[fid]
if tbid is None:
tb = None
else:
tb = tracebacktable[tbid]
stack, i = self.idb.get_stack(frame, tb)
##print >>sys.__stderr__, "get_stack() ->", stack
stack = [(wrap_frame(frame), k) for frame, k in stack]
##print >>sys.__stderr__, "get_stack() ->", stack
return stack, i
def run(self, cmd):
import __main__
self.idb.run(cmd, __main__.__dict__)
def set_break(self, filename, lineno):
msg = self.idb.set_break(filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.idb.clear_break(filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.idb.clear_all_file_breaks(filename)
return msg
#----------called by a FrameProxy----------
def frame_attr(self, fid, name):
frame = frametable[fid]
return getattr(frame, name)
def frame_globals(self, fid):
frame = frametable[fid]
dict = frame.f_globals
did = id(dict)
dicttable[did] = dict
return did
def frame_locals(self, fid):
frame = frametable[fid]
dict = frame.f_locals
did = id(dict)
dicttable[did] = dict
return did
def frame_code(self, fid):
frame = frametable[fid]
code = frame.f_code
cid = id(code)
codetable[cid] = code
return cid
#----------called by a CodeProxy----------
def code_name(self, cid):
code = codetable[cid]
return code.co_name
def code_filename(self, cid):
code = codetable[cid]
return code.co_filename
#----------called by a DictProxy----------
def dict_keys(self, did):
dict = dicttable[did]
return dict.keys()
def dict_item(self, did, key):
dict = dicttable[did]
value = dict[key]
value = repr(value)
return value
#----------end class IdbAdapter----------
def start_debugger(rpchandler, gui_adap_oid):
"""Start the debugger and its RPC link in the Python subprocess
Start the subprocess side of the split debugger and set up that side of the
RPC link by instantiating the GUIProxy, Idb debugger, and IdbAdapter
objects and linking them together. Register the IdbAdapter with the
RPCServer to handle RPC requests from the split debugger GUI via the
IdbProxy.
"""
gui_proxy = GUIProxy(rpchandler, gui_adap_oid)
idb = Debugger.Idb(gui_proxy)
idb_adap = IdbAdapter(idb)
rpchandler.register(idb_adap_oid, idb_adap)
return idb_adap_oid
#=======================================
#
# In the IDLE process:
class FrameProxy:
def __init__(self, conn, fid):
self._conn = conn
self._fid = fid
self._oid = "idb_adapter"
self._dictcache = {}
def __getattr__(self, name):
if name[:1] == "_":
raise AttributeError, name
if name == "f_code":
return self._get_f_code()
if name == "f_globals":
return self._get_f_globals()
if name == "f_locals":
return self._get_f_locals()
return self._conn.remotecall(self._oid, "frame_attr",
(self._fid, name), {})
def _get_f_code(self):
cid = self._conn.remotecall(self._oid, "frame_code", (self._fid,), {})
return CodeProxy(self._conn, self._oid, cid)
def _get_f_globals(self):
did = self._conn.remotecall(self._oid, "frame_globals",
(self._fid,), {})
return self._get_dict_proxy(did)
def _get_f_locals(self):
did = self._conn.remotecall(self._oid, "frame_locals",
(self._fid,), {})
return self._get_dict_proxy(did)
def _get_dict_proxy(self, did):
if self._dictcache.has_key(did):
return self._dictcache[did]
dp = DictProxy(self._conn, self._oid, did)
self._dictcache[did] = dp
return dp
class CodeProxy:
def __init__(self, conn, oid, cid):
self._conn = conn
self._oid = oid
self._cid = cid
def __getattr__(self, name):
if name == "co_name":
return self._conn.remotecall(self._oid, "code_name",
(self._cid,), {})
if name == "co_filename":
return self._conn.remotecall(self._oid, "code_filename",
(self._cid,), {})
class DictProxy:
def __init__(self, conn, oid, did):
self._conn = conn
self._oid = oid
self._did = did
def keys(self):
return self._conn.remotecall(self._oid, "dict_keys", (self._did,), {})
def __getitem__(self, key):
return self._conn.remotecall(self._oid, "dict_item",
(self._did, key), {})
def __getattr__(self, name):
##print >>sys.__stderr__, "failed DictProxy.__getattr__:", name
raise AttributeError, name
class GUIAdapter:
def __init__(self, conn, gui):
self.conn = conn
self.gui = gui
def interaction(self, message, fid, modified_info):
##print "interaction: (%s, %s, %s)" % (message, fid, modified_info)
frame = FrameProxy(self.conn, fid)
self.gui.interaction(message, frame, modified_info)
class IdbProxy:
def __init__(self, conn, shell, oid):
self.oid = oid
self.conn = conn
self.shell = shell
def call(self, methodname, *args, **kwargs):
##print "**IdbProxy.call %s %s %s" % (methodname, args, kwargs)
value = self.conn.remotecall(self.oid, methodname, args, kwargs)
##print "**IdbProxy.call %s returns %r" % (methodname, value)
return value
def run(self, cmd, locals):
# Ignores locals on purpose!
seq = self.conn.asyncqueue(self.oid, "run", (cmd,), {})
self.shell.interp.active_seq = seq
def get_stack(self, frame, tbid):
# passing frame and traceback IDs, not the objects themselves
stack, i = self.call("get_stack", frame._fid, tbid)
stack = [(FrameProxy(self.conn, fid), k) for fid, k in stack]
return stack, i
def set_continue(self):
self.call("set_continue")
def set_step(self):
self.call("set_step")
def set_next(self, frame):
self.call("set_next", frame._fid)
def set_return(self, frame):
self.call("set_return", frame._fid)
def set_quit(self):
self.call("set_quit")
def set_break(self, filename, lineno):
msg = self.call("set_break", filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.call("clear_break", filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.call("clear_all_file_breaks", filename)
return msg
def start_remote_debugger(rpcclt, pyshell):
"""Start the subprocess debugger, initialize the debugger GUI and RPC link
Request the RPCServer start the Python subprocess debugger and link. Set
up the Idle side of the split debugger by instantiating the IdbProxy,
debugger GUI, and debugger GUIAdapter objects and linking them together.
Register the GUIAdapter with the RPCClient to handle debugger GUI
interaction requests coming from the subprocess debugger via the GUIProxy.
The IdbAdapter will pass execution and environment requests coming from the
Idle debugger GUI to the subprocess debugger via the IdbProxy.
"""
global idb_adap_oid
idb_adap_oid = rpcclt.remotecall("exec", "start_the_debugger",\
(gui_adap_oid,), {})
idb_proxy = IdbProxy(rpcclt, pyshell, idb_adap_oid)
gui = Debugger.Debugger(pyshell, idb_proxy)
gui_adap = GUIAdapter(rpcclt, gui)
rpcclt.register(gui_adap_oid, gui_adap)
return gui
def close_remote_debugger(rpcclt):
"""Shut down subprocess debugger and Idle side of debugger RPC link
Request that the RPCServer shut down the subprocess debugger and link.
Unregister the GUIAdapter, which will cause a GC on the Idle process
debugger and RPC link objects. (The second reference to the debugger GUI
is deleted in PyShell.close_remote_debugger().)
"""
close_subprocess_debugger(rpcclt)
rpcclt.unregister(gui_adap_oid)
def close_subprocess_debugger(rpcclt):
rpcclt.remotecall("exec", "stop_the_debugger", (idb_adap_oid,), {})
def restart_subprocess_debugger(rpcclt):
idb_adap_oid_ret = rpcclt.remotecall("exec", "start_the_debugger",\
(gui_adap_oid,), {})
assert idb_adap_oid_ret == idb_adap_oid, 'Idb restarted with different oid'
| gpl-2.0 | -859,692,191,938,554,100 | 29.564304 | 79 | 0.5617 | false | 3.780844 | false | false | false |
devalbo/roboplexx | roboplexx/rpx_prop.py | 1 | 1658 | __author__ = 'ajb'
import rpx_proto.descriptions_pb2
def boolean_property_description(prop_name, persist=False):
prop_desc = rpx_proto.descriptions_pb2.PropertyDescription()
prop_desc.propertyId = prop_name
prop_desc.propertyLabel = prop_name
prop_desc.propertyType = rpx_proto.descriptions_pb2.BooleanType
prop_desc.persist = persist
return prop_desc
def string_prop_desc(prop_name, persist=False):
prop_desc = rpx_proto.descriptions_pb2.PropertyDescription()
prop_desc.propertyId = prop_name
prop_desc.propertyLabel = prop_name
prop_desc.propertyType = rpx_proto.descriptions_pb2.StringType
prop_desc.persist = persist
return prop_desc
def integer_prop_desc(prop_name, persist=False):
prop_desc = rpx_proto.descriptions_pb2.PropertyDescription()
prop_desc.propertyId = prop_name
prop_desc.propertyLabel = prop_name
prop_desc.propertyType = rpx_proto.descriptions_pb2.IntType
prop_desc.persist = persist
return prop_desc
def ranged_double_property_description(prop_name, min_val, max_val, persist=False):
prop_desc = rpx_proto.descriptions_pb2.PropertyDescription()
prop_desc.propertyId = prop_name
prop_desc.propertyLabel = prop_name
prop_desc.propertyType = rpx_proto.descriptions_pb2.DoubleType
constraint = prop_desc.constraints
prop_desc.persist = persist
constraint.doubleTypeMinVal = min_val
constraint.doubleTypeMaxVal = max_val
return prop_desc
def command_description(cmd_name):
cmd_desc = rpx_proto.descriptions_pb2.CommandDescription()
cmd_desc.commandId = cmd_name
cmd_desc.commandLabel = cmd_name
return cmd_desc
| agpl-3.0 | 5,645,255,756,157,430,000 | 36.681818 | 83 | 0.743667 | false | 3.483193 | false | false | false |
juslop/3G-tunnel | pi_sms.py | 1 | 4338 | #!/usr/bin/python
# Huawei 3131 3G dongle python API
# Based on the work of http://chaddyhv.wordpress.com/2012/08/13/programming-and-installing-huawei-hilink-e3131-under-linux/
# Usage: Import in your python project and use: send_sms and read_sms
# Test your Dongle by running this file from console and send smss to you dongle
# python /path_to_file/pi_sms.py
import requests
from BeautifulSoup import BeautifulSoup, NavigableString
import time
import sys
import logging
import datetime
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
SMS_LIST_TEMPLATE = '<request>' +\
'<PageIndex>1</PageIndex>' +\
'<ReadCount>20</ReadCount>' +\
'<BoxType>1</BoxType>' +\
'<SortType>0</SortType>' +\
'<Ascending>0</Ascending>' +\
'<UnreadPreferred>0</UnreadPreferred>' +\
'</request>'
SMS_READ_TEMPLATE = '<request><Index>{index}</Index></request>'
SMS_SEND_TEMPLATE = '<request>' +\
'<Index>-1</Index>' +\
'<Phones><Phone>{phone}</Phone></Phones>' +\
'<Sca></Sca>' +\
'<Content>{content}</Content>' +\
'<Length>{length}</Length>' +\
'<Reserved>1</Reserved>' +\
'<Date>{timestamp}</Date>' +\
'</request>'
BASE_URL = 'http://hi.link/api/'
SMS_READ_HEADER = {'referer': 'http://hi.link/html/smsinbox.html?smsinbox'}
class NotConnected(Exception):
pass
class DeviceNotReachable(Exception):
pass
def _pretty_traffic(n):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if n < 1024.0:
return "%3.1f %s" % (n ,x)
n /= 1024.0
return 'unknown'
def _connected(s):
r1 = s.get(BASE_URL + 'monitoring/status')
resp = BeautifulSoup(r1.content)
if resp.connectionstatus.string == u'901':
return True
else:
return False
def _sms_count(s):
r1 = s.get(BASE_URL + 'monitoring/check-notifications')
resp = BeautifulSoup(r1.content)
return int(resp.unreadmessage.string)
def _read_sms_list(s):
result = []
r1 = s.post(BASE_URL + 'sms/sms-list', data=SMS_LIST_TEMPLATE, headers=SMS_READ_HEADER)
resp = BeautifulSoup(r1.content)
if int(resp.count.string):
for msg in resp.messages:
if not isinstance(msg, NavigableString):
logger.info('SMS Received. From number: {phone}, Content: {content}'.format(phone=msg.phone.string, content=msg.content.string.encode('ascii', 'replace')))
result.append({'phone':msg.phone.string, 'content':msg.content.string})
_delete_sms(s, msg.find('index').string)
return result
def _delete_sms(s, ind):
r1 = s.post(BASE_URL + 'sms/delete-sms', data=SMS_READ_TEMPLATE.format(index=ind))
def _read_sms(s):
if _connected(s):
if _sms_count(s):
return _read_sms_list(s)
else:
return []
else:
raise NotConnected('No data link')
def info(s):
r1 = s.get(BASE_URL + 'monitoring/traffic-statistics')
resp = BeautifulSoup(r1.content)
upload = int(resp.totalupload.string)
download = int(resp.totaldownload.string)
return 'Modem status: connected: {con}, upload: {up}, download: {down}, total: {tot}'.format(con=_connected(s), up=_pretty_traffic(upload), down=_pretty_traffic(download), tot=_pretty_traffic(upload+download))
def read_sms(s):
try:
return _read_sms(s)
except requests.exceptions.ConnectionError, e:
raise DeviceNotReachable('Unable to connect to device')
def send_sms(s, phone, content):
timestamp = datetime.datetime.now() + datetime.timedelta(seconds=15)
r1 = s.post(BASE_URL + 'sms/send-sms', data=SMS_SEND_TEMPLATE.format(phone=phone, content=content, length=len(content), timestamp=str(timestamp.strftime("%Y-%m-%d %H:%M:%S"))))
resp = BeautifulSoup(r1.content)
if resp.response.string == 'OK':
logger.info('sms sent. Number: {phone}, Content: {content}'.format(phone=phone, content=content))
else:
logger.error('sms sending failed. Response from dongle: {r}'.format(r=resp))
if __name__ == "__main__":
logger.addHandler(logging.StreamHandler())
s = requests.Session()
print info(s)
while True:
try:
smss = _read_sms(s)
if smss:
print smss
except requests.exceptions.ConnectionError, e:
print e
sys.exit(1)
time.sleep(10)
| mit | -5,518,781,127,888,677,000 | 32.369231 | 213 | 0.634855 | false | 3.336923 | false | false | false |
samsath/cpcc_backend | src/website/calendar/migrations/0001_initial.py | 2 | 7234 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-21 21:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Calendar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(verbose_name='Date')),
('sun_rise', models.TimeField(blank=True, null=True, verbose_name='Sun rise')),
('sun_set', models.TimeField(blank=True, null=True, verbose_name='Sun set')),
('temperature', models.FloatField(blank=True, null=True, verbose_name='Temperature')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True)),
],
options={
'verbose_name_plural': 'Calendar',
'verbose_name': 'Calendar',
'ordering': ['date'],
},
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('is_public', models.BooleanField(default=False, verbose_name='Is Public')),
('is_featured', models.BooleanField(default=False, verbose_name='Is Featured')),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, overwrite=True, populate_from=('title',), unique=True)),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True)),
('event_type', models.CharField(choices=[('closed', 'Closed'), ('notice', 'Notice'), ('event', 'Event'), ('trip', 'Trip')], max_length=255, verbose_name='Event Type')),
('start_time', models.TimeField(blank=True, null=True, verbose_name='Start Time')),
('end_time', models.TimeField(blank=True, null=True, verbose_name='End Time')),
('description', models.TextField(blank=True, null=True, verbose_name='Description')),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('date', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='calendar.Calendar')),
],
options={
'verbose_name_plural': 'Event',
'verbose_name': 'Event',
'ordering': ['date'],
},
),
migrations.CreateModel(
name='ExtraFields',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('value', models.CharField(max_length=255, verbose_name='Value')),
('sort_value', models.IntegerField(blank=True, null=True, verbose_name='Sort Value')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True)),
],
options={
'verbose_name_plural': 'Trips',
'verbose_name': 'Extra Field',
'ordering': ['sort_value'],
},
),
migrations.CreateModel(
name='Tide',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.TimeField(verbose_name='Time')),
('level', models.FloatField(verbose_name='Level')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True)),
('day', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='calendar.Calendar')),
],
),
migrations.CreateModel(
name='Trips',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('is_public', models.BooleanField(default=False, verbose_name='Is Public')),
('is_featured', models.BooleanField(default=False, verbose_name='Is Featured')),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, overwrite=True, populate_from=('title',), unique=True)),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True)),
('list_description', models.CharField(blank=True, max_length=255, null=True, verbose_name='List Description')),
('description', models.TextField(blank=True, null=True, verbose_name='Description')),
('start_date', models.DateTimeField(blank=True, null=True, verbose_name='Start Date and Time')),
('end_date', models.DateTimeField(blank=True, null=True, verbose_name='End Date and Time')),
('day', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='calendar.Calendar')),
],
options={
'verbose_name_plural': 'Trips',
'verbose_name': 'Trip',
'ordering': ['day'],
},
),
migrations.CreateModel(
name='WeatherTypes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, overwrite=True, populate_from=('title',), unique=True)),
('class_code', models.CharField(max_length=255, verbose_name='Code')),
],
),
migrations.AddField(
model_name='extrafields',
name='trips',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='calendar.Trips'),
),
migrations.AddField(
model_name='calendar',
name='weather',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='calendar.WeatherTypes'),
),
]
| gpl-3.0 | 2,356,455,679,640,295,400 | 55.515625 | 184 | 0.585983 | false | 4.326555 | false | false | false |
lindong28/kafka | tests/kafkatest/services/streams.py | 1 | 32695 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import signal
from . import streams_property
from . import consumer_property
from ducktape.services.service import Service
from ducktape.utils.util import wait_until
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from kafkatest.services.kafka import KafkaConfig
from kafkatest.services.monitor.jmx import JmxMixin
from kafkatest.version import LATEST_0_10_0, LATEST_0_10_1
STATE_DIR = "state.dir"
class StreamsTestBaseService(KafkaPathResolverMixin, JmxMixin, Service):
"""Base class for Streams Test services providing some common settings and functionality"""
PERSISTENT_ROOT = "/mnt/streams"
# The log file contains normal log4j logs written using a file appender. stdout and stderr are handled separately
CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "streams.properties")
LOG_FILE = os.path.join(PERSISTENT_ROOT, "streams.log")
STDOUT_FILE = os.path.join(PERSISTENT_ROOT, "streams.stdout")
STDERR_FILE = os.path.join(PERSISTENT_ROOT, "streams.stderr")
JMX_LOG_FILE = os.path.join(PERSISTENT_ROOT, "jmx_tool.log")
JMX_ERR_FILE = os.path.join(PERSISTENT_ROOT, "jmx_tool.err.log")
LOG4J_CONFIG_FILE = os.path.join(PERSISTENT_ROOT, "tools-log4j.properties")
PID_FILE = os.path.join(PERSISTENT_ROOT, "streams.pid")
CLEAN_NODE_ENABLED = True
logs = {
"streams_config": {
"path": CONFIG_FILE,
"collect_default": True},
"streams_config.1": {
"path": CONFIG_FILE + ".1",
"collect_default": True},
"streams_config.0-1": {
"path": CONFIG_FILE + ".0-1",
"collect_default": True},
"streams_config.1-1": {
"path": CONFIG_FILE + ".1-1",
"collect_default": True},
"streams_log": {
"path": LOG_FILE,
"collect_default": True},
"streams_stdout": {
"path": STDOUT_FILE,
"collect_default": True},
"streams_stderr": {
"path": STDERR_FILE,
"collect_default": True},
"streams_log.1": {
"path": LOG_FILE + ".1",
"collect_default": True},
"streams_stdout.1": {
"path": STDOUT_FILE + ".1",
"collect_default": True},
"streams_stderr.1": {
"path": STDERR_FILE + ".1",
"collect_default": True},
"streams_log.2": {
"path": LOG_FILE + ".2",
"collect_default": True},
"streams_stdout.2": {
"path": STDOUT_FILE + ".2",
"collect_default": True},
"streams_stderr.2": {
"path": STDERR_FILE + ".2",
"collect_default": True},
"streams_log.3": {
"path": LOG_FILE + ".3",
"collect_default": True},
"streams_stdout.3": {
"path": STDOUT_FILE + ".3",
"collect_default": True},
"streams_stderr.3": {
"path": STDERR_FILE + ".3",
"collect_default": True},
"streams_log.0-1": {
"path": LOG_FILE + ".0-1",
"collect_default": True},
"streams_stdout.0-1": {
"path": STDOUT_FILE + ".0-1",
"collect_default": True},
"streams_stderr.0-1": {
"path": STDERR_FILE + ".0-1",
"collect_default": True},
"streams_log.0-2": {
"path": LOG_FILE + ".0-2",
"collect_default": True},
"streams_stdout.0-2": {
"path": STDOUT_FILE + ".0-2",
"collect_default": True},
"streams_stderr.0-2": {
"path": STDERR_FILE + ".0-2",
"collect_default": True},
"streams_log.0-3": {
"path": LOG_FILE + ".0-3",
"collect_default": True},
"streams_stdout.0-3": {
"path": STDOUT_FILE + ".0-3",
"collect_default": True},
"streams_stderr.0-3": {
"path": STDERR_FILE + ".0-3",
"collect_default": True},
"streams_log.0-4": {
"path": LOG_FILE + ".0-4",
"collect_default": True},
"streams_stdout.0-4": {
"path": STDOUT_FILE + ".0-4",
"collect_default": True},
"streams_stderr.0-4": {
"path": STDERR_FILE + ".0-4",
"collect_default": True},
"streams_log.0-5": {
"path": LOG_FILE + ".0-5",
"collect_default": True},
"streams_stdout.0-5": {
"path": STDOUT_FILE + ".0-5",
"collect_default": True},
"streams_stderr.0-5": {
"path": STDERR_FILE + ".0-5",
"collect_default": True},
"streams_log.0-6": {
"path": LOG_FILE + ".0-6",
"collect_default": True},
"streams_stdout.0-6": {
"path": STDOUT_FILE + ".0-6",
"collect_default": True},
"streams_stderr.0-6": {
"path": STDERR_FILE + ".0-6",
"collect_default": True},
"streams_log.1-1": {
"path": LOG_FILE + ".1-1",
"collect_default": True},
"streams_stdout.1-1": {
"path": STDOUT_FILE + ".1-1",
"collect_default": True},
"streams_stderr.1-1": {
"path": STDERR_FILE + ".1-1",
"collect_default": True},
"streams_log.1-2": {
"path": LOG_FILE + ".1-2",
"collect_default": True},
"streams_stdout.1-2": {
"path": STDOUT_FILE + ".1-2",
"collect_default": True},
"streams_stderr.1-2": {
"path": STDERR_FILE + ".1-2",
"collect_default": True},
"streams_log.1-3": {
"path": LOG_FILE + ".1-3",
"collect_default": True},
"streams_stdout.1-3": {
"path": STDOUT_FILE + ".1-3",
"collect_default": True},
"streams_stderr.1-3": {
"path": STDERR_FILE + ".1-3",
"collect_default": True},
"streams_log.1-4": {
"path": LOG_FILE + ".1-4",
"collect_default": True},
"streams_stdout.1-4": {
"path": STDOUT_FILE + ".1-4",
"collect_default": True},
"streams_stderr.1-4": {
"path": STDERR_FILE + ".1-4",
"collect_default": True},
"streams_log.1-5": {
"path": LOG_FILE + ".1-5",
"collect_default": True},
"streams_stdout.1-5": {
"path": STDOUT_FILE + ".1-5",
"collect_default": True},
"streams_stderr.1-5": {
"path": STDERR_FILE + ".1-5",
"collect_default": True},
"streams_log.1-6": {
"path": LOG_FILE + ".1-6",
"collect_default": True},
"streams_stdout.1-6": {
"path": STDOUT_FILE + ".1-6",
"collect_default": True},
"streams_stderr.1-6": {
"path": STDERR_FILE + ".1-6",
"collect_default": True},
"jmx_log": {
"path": JMX_LOG_FILE,
"collect_default": True},
"jmx_err": {
"path": JMX_ERR_FILE,
"collect_default": True},
}
def __init__(self, test_context, kafka, streams_class_name, user_test_args1, user_test_args2=None, user_test_args3=None, user_test_args4=None):
Service.__init__(self, test_context, num_nodes=1)
self.kafka = kafka
self.args = {'streams_class_name': streams_class_name,
'user_test_args1': user_test_args1,
'user_test_args2': user_test_args2,
'user_test_args3': user_test_args3,
'user_test_args4': user_test_args4}
self.log_level = "DEBUG"
@property
def node(self):
return self.nodes[0]
@property
def expectedMessage(self):
return 'StreamsTest instance started'
def pids(self, node):
try:
pids = [pid for pid in node.account.ssh_capture("cat " + self.PID_FILE, callback=str)]
return [int(pid) for pid in pids]
except Exception as exception:
self.logger.debug(str(exception))
return []
def stop_nodes(self, clean_shutdown=True):
for node in self.nodes:
self.stop_node(node, clean_shutdown)
def stop_node(self, node, clean_shutdown=True):
self.logger.info((clean_shutdown and "Cleanly" or "Forcibly") + " stopping Streams Test on " + str(node.account))
pids = self.pids(node)
sig = signal.SIGTERM if clean_shutdown else signal.SIGKILL
for pid in pids:
node.account.signal(pid, sig, allow_fail=True)
if clean_shutdown:
for pid in pids:
wait_until(lambda: not node.account.alive(pid), timeout_sec=120, err_msg="Streams Test process on " + str(node.account) + " took too long to exit")
node.account.ssh("rm -f " + self.PID_FILE, allow_fail=False)
def restart(self):
# We don't want to do any clean up here, just restart the process.
for node in self.nodes:
self.logger.info("Restarting Kafka Streams on " + str(node.account))
self.stop_node(node)
self.start_node(node)
def abortThenRestart(self):
# We don't want to do any clean up here, just abort then restart the process. The running service is killed immediately.
for node in self.nodes:
self.logger.info("Aborting Kafka Streams on " + str(node.account))
self.stop_node(node, False)
self.logger.info("Restarting Kafka Streams on " + str(node.account))
self.start_node(node)
def wait(self, timeout_sec=1440):
for node in self.nodes:
self.wait_node(node, timeout_sec)
def wait_node(self, node, timeout_sec=None):
for pid in self.pids(node):
wait_until(lambda: not node.account.alive(pid), timeout_sec=timeout_sec, err_msg="Streams Test process on " + str(node.account) + " took too long to exit")
def clean_node(self, node):
node.account.kill_process("streams", clean_shutdown=False, allow_fail=True)
if self.CLEAN_NODE_ENABLED:
node.account.ssh("rm -rf " + self.PERSISTENT_ROOT, allow_fail=False)
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \
" %(config_file)s %(user_test_args1)s %(user_test_args2)s %(user_test_args3)s" \
" %(user_test_args4)s & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing streams cmd: " + cmd)
return cmd
def prop_file(self):
cfg = KafkaConfig(**{streams_property.STATE_DIR: self.PERSISTENT_ROOT, streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()})
return cfg.render()
def start_node(self, node):
node.account.mkdirs(self.PERSISTENT_ROOT)
prop_file = self.prop_file()
node.account.create_file(self.CONFIG_FILE, prop_file)
node.account.create_file(self.LOG4J_CONFIG_FILE, self.render('tools_log4j.properties', log_file=self.LOG_FILE))
self.logger.info("Starting StreamsTest process on " + str(node.account))
with node.account.monitor_log(self.STDOUT_FILE) as monitor:
node.account.ssh(self.start_cmd(node))
monitor.wait_until(self.expectedMessage, timeout_sec=60, err_msg="Never saw message indicating StreamsTest finished startup on " + str(node.account))
if len(self.pids(node)) == 0:
raise RuntimeError("No process ids recorded")
class StreamsSmokeTestBaseService(StreamsTestBaseService):
"""Base class for Streams Smoke Test services providing some common settings and functionality"""
def __init__(self, test_context, kafka, command, processing_guarantee = 'at_least_once', num_threads = 3, replication_factor = 3):
super(StreamsSmokeTestBaseService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsSmokeTest",
command)
self.NUM_THREADS = num_threads
self.PROCESSING_GUARANTEE = processing_guarantee
self.KAFKA_STREAMS_VERSION = ""
self.UPGRADE_FROM = None
self.REPLICATION_FACTOR = replication_factor
def set_version(self, kafka_streams_version):
self.KAFKA_STREAMS_VERSION = kafka_streams_version
def set_upgrade_from(self, upgrade_from):
self.UPGRADE_FROM = upgrade_from
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
streams_property.PROCESSING_GUARANTEE: self.PROCESSING_GUARANTEE,
streams_property.NUM_THREADS: self.NUM_THREADS,
"replication.factor": self.REPLICATION_FACTOR,
"num.standby.replicas": 2,
"buffered.records.per.partition": 100,
"commit.interval.ms": 1000,
"auto.offset.reset": "earliest",
"acks": "all"}
if self.UPGRADE_FROM is not None:
properties['upgrade.from'] = self.UPGRADE_FROM
cfg = KafkaConfig(**properties)
return cfg.render()
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['version'] = self.KAFKA_STREAMS_VERSION
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\";" \
" INCLUDE_TEST_JARS=true UPGRADE_KAFKA_STREAMS_TEST_VERSION=%(version)s" \
" %(kafka_run_class)s %(streams_class_name)s" \
" %(config_file)s %(user_test_args1)s" \
" & echo $! >&3 ) " \
"1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing streams cmd: " + cmd)
return cmd
class StreamsEosTestBaseService(StreamsTestBaseService):
"""Base class for Streams EOS Test services providing some common settings and functionality"""
clean_node_enabled = True
def __init__(self, test_context, kafka, processing_guarantee, command):
super(StreamsEosTestBaseService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsEosTest",
command)
self.PROCESSING_GUARANTEE = processing_guarantee
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
streams_property.PROCESSING_GUARANTEE: self.PROCESSING_GUARANTEE}
cfg = KafkaConfig(**properties)
return cfg.render()
def clean_node(self, node):
if self.clean_node_enabled:
super(StreamsEosTestBaseService, self).clean_node(node)
class StreamsSmokeTestDriverService(StreamsSmokeTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsSmokeTestDriverService, self).__init__(test_context, kafka, "run")
self.DISABLE_AUTO_TERMINATE = ""
def disable_auto_terminate(self):
self.DISABLE_AUTO_TERMINATE = "disableAutoTerminate"
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['disable_auto_terminate'] = self.DISABLE_AUTO_TERMINATE
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \
" %(config_file)s %(user_test_args1)s %(disable_auto_terminate)s" \
" & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing streams cmd: " + cmd)
return cmd
class StreamsSmokeTestJobRunnerService(StreamsSmokeTestBaseService):
def __init__(self, test_context, kafka, processing_guarantee, num_threads = 3, replication_factor = 3):
super(StreamsSmokeTestJobRunnerService, self).__init__(test_context, kafka, "process", processing_guarantee, num_threads, replication_factor)
class StreamsEosTestDriverService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsEosTestDriverService, self).__init__(test_context, kafka, "not-required", "run")
class StreamsEosTestJobRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka, processing_guarantee):
super(StreamsEosTestJobRunnerService, self).__init__(test_context, kafka, processing_guarantee, "process")
class StreamsComplexEosTestJobRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka, processing_guarantee):
super(StreamsComplexEosTestJobRunnerService, self).__init__(test_context, kafka, processing_guarantee, "process-complex")
class StreamsEosTestVerifyRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsEosTestVerifyRunnerService, self).__init__(test_context, kafka, "not-required", "verify")
class StreamsComplexEosTestVerifyRunnerService(StreamsEosTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsComplexEosTestVerifyRunnerService, self).__init__(test_context, kafka, "not-required", "verify-complex")
class StreamsSmokeTestShutdownDeadlockService(StreamsSmokeTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsSmokeTestShutdownDeadlockService, self).__init__(test_context, kafka, "close-deadlock-test")
class StreamsBrokerCompatibilityService(StreamsTestBaseService):
def __init__(self, test_context, kafka, processingMode):
super(StreamsBrokerCompatibilityService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.BrokerCompatibilityTest",
processingMode)
class StreamsBrokerDownResilienceService(StreamsTestBaseService):
def __init__(self, test_context, kafka, configs):
super(StreamsBrokerDownResilienceService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsBrokerDownResilienceTest",
configs)
def start_cmd(self, node):
args = self.args.copy()
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true %(kafka_run_class)s %(streams_class_name)s " \
" %(config_file)s %(user_test_args1)s %(user_test_args2)s %(user_test_args3)s" \
" %(user_test_args4)s & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing: " + cmd)
return cmd
class StreamsStandbyTaskService(StreamsTestBaseService):
def __init__(self, test_context, kafka, configs):
super(StreamsStandbyTaskService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsStandByReplicaTest",
configs)
class StreamsResetter(StreamsTestBaseService):
def __init__(self, test_context, kafka, topic, applicationId):
super(StreamsResetter, self).__init__(test_context,
kafka,
"kafka.tools.StreamsResetter",
"")
self.topic = topic
self.applicationId = applicationId
@property
def expectedMessage(self):
return 'Done.'
def start_cmd(self, node):
args = self.args.copy()
args['bootstrap.servers'] = self.kafka.bootstrap_servers()
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['application.id'] = self.applicationId
args['input.topics'] = self.topic
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "(export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"%(kafka_run_class)s %(streams_class_name)s " \
"--bootstrap-servers %(bootstrap.servers)s " \
"--force " \
"--application-id %(application.id)s " \
"--input-topics %(input.topics)s " \
"& echo $! >&3 ) " \
"1>> %(stdout)s " \
"2>> %(stderr)s " \
"3> %(pidfile)s "% args
self.logger.info("Executing: " + cmd)
return cmd
class StreamsOptimizedUpgradeTestService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsOptimizedUpgradeTestService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsOptimizedTest",
"")
self.OPTIMIZED_CONFIG = 'none'
self.INPUT_TOPIC = None
self.AGGREGATION_TOPIC = None
self.REDUCE_TOPIC = None
self.JOIN_TOPIC = None
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}
properties['topology.optimization'] = self.OPTIMIZED_CONFIG
properties['input.topic'] = self.INPUT_TOPIC
properties['aggregation.topic'] = self.AGGREGATION_TOPIC
properties['reduce.topic'] = self.REDUCE_TOPIC
properties['join.topic'] = self.JOIN_TOPIC
# Long.MAX_VALUE lets us do the assignment without a warmup
properties['acceptable.recovery.lag'] = "9223372036854775807"
cfg = KafkaConfig(**properties)
return cfg.render()
class StreamsUpgradeTestJobRunnerService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsUpgradeTestJobRunnerService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsUpgradeTest",
"")
self.UPGRADE_FROM = None
self.UPGRADE_TO = None
self.extra_properties = {}
def set_config(self, key, value):
self.extra_properties[key] = value
def set_version(self, kafka_streams_version):
self.KAFKA_STREAMS_VERSION = kafka_streams_version
def set_upgrade_from(self, upgrade_from):
self.UPGRADE_FROM = upgrade_from
def set_upgrade_to(self, upgrade_to):
self.UPGRADE_TO = upgrade_to
def prop_file(self):
properties = self.extra_properties.copy()
properties[streams_property.STATE_DIR] = self.PERSISTENT_ROOT
properties[streams_property.KAFKA_SERVERS] = self.kafka.bootstrap_servers()
if self.UPGRADE_FROM is not None:
properties['upgrade.from'] = self.UPGRADE_FROM
if self.UPGRADE_TO == "future_version":
properties['test.future.metadata'] = "any_value"
cfg = KafkaConfig(**properties)
return cfg.render()
def start_cmd(self, node):
args = self.args.copy()
if self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_0) or self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_1):
args['zk'] = self.kafka.zk.connect_setting()
else:
args['zk'] = ""
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['version'] = self.KAFKA_STREAMS_VERSION
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true UPGRADE_KAFKA_STREAMS_TEST_VERSION=%(version)s " \
" %(kafka_run_class)s %(streams_class_name)s %(zk)s %(config_file)s " \
" & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing: " + cmd)
return cmd
class StreamsNamedRepartitionTopicService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(StreamsNamedRepartitionTopicService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsNamedRepartitionTest",
"")
self.ADD_ADDITIONAL_OPS = 'false'
self.INPUT_TOPIC = None
self.AGGREGATION_TOPIC = None
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}
properties['input.topic'] = self.INPUT_TOPIC
properties['aggregation.topic'] = self.AGGREGATION_TOPIC
properties['add.operations'] = self.ADD_ADDITIONAL_OPS
cfg = KafkaConfig(**properties)
return cfg.render()
class StaticMemberTestService(StreamsTestBaseService):
def __init__(self, test_context, kafka, group_instance_id, num_threads):
super(StaticMemberTestService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StaticMemberTestClient",
"")
self.INPUT_TOPIC = None
self.GROUP_INSTANCE_ID = group_instance_id
self.NUM_THREADS = num_threads
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers(),
streams_property.NUM_THREADS: self.NUM_THREADS,
consumer_property.GROUP_INSTANCE_ID: self.GROUP_INSTANCE_ID,
consumer_property.SESSION_TIMEOUT_MS: 60000}
properties['input.topic'] = self.INPUT_TOPIC
# TODO KIP-441: consider rewriting the test for HighAvailabilityTaskAssignor
properties['internal.task.assignor.class'] = "org.apache.kafka.streams.processor.internals.assignment.StickyTaskAssignor"
cfg = KafkaConfig(**properties)
return cfg.render()
class CooperativeRebalanceUpgradeService(StreamsTestBaseService):
def __init__(self, test_context, kafka):
super(CooperativeRebalanceUpgradeService, self).__init__(test_context,
kafka,
"org.apache.kafka.streams.tests.StreamsUpgradeToCooperativeRebalanceTest",
"")
self.UPGRADE_FROM = None
# these properties will be overridden in test
self.SOURCE_TOPIC = None
self.SINK_TOPIC = None
self.TASK_DELIMITER = "#"
self.REPORT_INTERVAL = None
self.standby_tasks = None
self.active_tasks = None
self.upgrade_phase = None
def set_tasks(self, task_string):
label = "TASK-ASSIGNMENTS:"
task_string_substr = task_string[len(label):]
all_tasks = task_string_substr.split(self.TASK_DELIMITER)
self.active_tasks = set(all_tasks[0].split(","))
if len(all_tasks) > 1:
self.standby_tasks = set(all_tasks[1].split(","))
def set_version(self, kafka_streams_version):
self.KAFKA_STREAMS_VERSION = kafka_streams_version
def set_upgrade_phase(self, upgrade_phase):
self.upgrade_phase = upgrade_phase
def start_cmd(self, node):
args = self.args.copy()
if self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_0) or self.KAFKA_STREAMS_VERSION == str(LATEST_0_10_1):
args['zk'] = self.kafka.zk.connect_setting()
else:
args['zk'] = ""
args['config_file'] = self.CONFIG_FILE
args['stdout'] = self.STDOUT_FILE
args['stderr'] = self.STDERR_FILE
args['pidfile'] = self.PID_FILE
args['log4j'] = self.LOG4J_CONFIG_FILE
args['version'] = self.KAFKA_STREAMS_VERSION
args['kafka_run_class'] = self.path.script("kafka-run-class.sh", node)
cmd = "( export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%(log4j)s\"; " \
"INCLUDE_TEST_JARS=true UPGRADE_KAFKA_STREAMS_TEST_VERSION=%(version)s " \
" %(kafka_run_class)s %(streams_class_name)s %(zk)s %(config_file)s " \
" & echo $! >&3 ) 1>> %(stdout)s 2>> %(stderr)s 3> %(pidfile)s" % args
self.logger.info("Executing: " + cmd)
return cmd
def prop_file(self):
properties = {streams_property.STATE_DIR: self.PERSISTENT_ROOT,
streams_property.KAFKA_SERVERS: self.kafka.bootstrap_servers()}
if self.UPGRADE_FROM is not None:
properties['upgrade.from'] = self.UPGRADE_FROM
else:
try:
del properties['upgrade.from']
except KeyError:
self.logger.info("Key 'upgrade.from' not there, better safe than sorry")
if self.upgrade_phase is not None:
properties['upgrade.phase'] = self.upgrade_phase
properties['source.topic'] = self.SOURCE_TOPIC
properties['sink.topic'] = self.SINK_TOPIC
properties['task.delimiter'] = self.TASK_DELIMITER
properties['report.interval'] = self.REPORT_INTERVAL
cfg = KafkaConfig(**properties)
return cfg.render()
| apache-2.0 | 2,284,239,895,744,913,400 | 42.362069 | 167 | 0.568894 | false | 3.803956 | true | false | false |
radiasoft/radtrack | radtrack/rt_jinja.py | 1 | 2150 | # -*- coding: utf-8 -*-
"""Simplify jinja templating.
:copyright: Copyright (c) 2015 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkdebug import pkdc, pkdp
import re
import jinja2
def render(template, params):
"""Parse template for $name and replace with special filter then render
Since a common case is to render floating point numbers, we
have a special pattern ``$var`` which is replaced with
``{{ var | rt_filter }}``, which maps to the appropriate float filter.
If the template begins with a newline, leading newlines will be stripped
from the amount of the first indent, e.g.::
template = '''
first line whitespace sets the mark to delete to
this line will be left indented.
this line will have four spaces
another line
'''
A trailing newline will always be added.
Args:
template (str): what to render
params (dict): variables to render
Returns:
str: rendered template with params.
"""
je = jinja2.Environment(
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True,
)
je.filters['rt_filter'] = _rt_filter
jt = je.from_string(_template(template))
# TODO: Maybe a namespace need vars(params) if only can be dict
return jt.render(params)
def _rt_filter(v):
"""Format floats as .3f or .3e depending on size"""
if type(v) is not float:
return v
a = abs(v)
f = 'e' if a >= 1000 or a < 0.001 else 'f'
return ('{:.3' + f + '}').format(v)
def _template(t):
"""Parse template"""
if t.startswith('\n'):
t2 = t.lstrip()
i = str(len(t) - len(t2) - 1)
t = re.sub(
r'^\s{1,' + i + '}',
'',
t2,
flags=re.IGNORECASE + re.MULTILINE,
)
if not t.endswith('\n'):
t += '\n'
return re.sub(
r'\$([a-z]\w*)',
r'{{\1|rt_filter}}',
t,
flags=re.IGNORECASE,
)
| apache-2.0 | 8,222,702,969,434,492,000 | 25.875 | 76 | 0.57907 | false | 3.694158 | false | false | false |
sumpfgottheit/rhc-sint-app | wsgi/flaskapp.py | 1 | 12436 | # -*- encoding: utf-8 -*-
from flask import Flask, render_template, flash, url_for, redirect, request
from database import db
from sqlalchemy.exc import OperationalError, IntegrityError
import os
from forms import *
import requests
from filters import register_filters
from googleapi import get_distance, DistanceServiceError
from rest import rest_get_all, rest_get_id, rest_delete_id, rest_post, rest_put
from requests.exceptions import HTTPError
import flask.ext.restless
from collections import namedtuple
from dateutil import parser
app = Flask(__name__)
app.config.from_object('wsgi.config')
db.init_app(app)
register_filters(app)
CAR_FIELDS=["id", "manufacturer", "typ", "platenumber", "color", "consumption", "price", "seats", "station"]
KUNDE_FIELDS = [ "id", "name", "street", "plz", "city", "country", "leihen"]
STATION_FIELDS = [ "id", "name", "street", "plz", "city", "country"]
LEIHE_FIELDS = ["id", "kunde_id", "car_id", "von", "bis", "returned", "station_abhol_id", "station_return_id"]
class Car(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
return "%s %s %s" % (self.manufacturer, self.typ, self.platenumber)
class Station(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@property
def adresse(self):
return "%s, %s %s, %s" % (self.street, self.plz, self.city, self.country)
class Kunde(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@property
def adresse(self):
return "%s, %s %s, %s" % (self.street, self.plz, self.city, self.country)
class Leihe(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
if 'kunde_id' in self.__dict__.keys():
try:
self.kunde = rest_get_id("kunden", self.kunde_id)["name"]
except KeyError:
self.kunde = 'None'
if 'von' in self.__dict__.keys():
self.von = parser.parse(self.von)
if 'bis' in self.__dict__.keys():
self.bis = parser.parse(self.bis)
if 'car_id' in self.__dict__.keys():
car = rest_get_id("cars", self.car_id)
car = Car(**car)
self.car = car.__repr__()
if hasattr(self, 'station_return_id'):
self.station_return = rest_get_id("stations", self.station_return_id)["name"]
if hasattr(self, 'station_abhol_id'):
self.station_abhol = rest_get_id("stations", self.station_abhol_id)["name"]
def tidy_objects(objects, fields):
objects.sort(key=lambda x: x["id"])
for o in objects:
for key in o.keys():
if key not in fields:
del(o[key])
continue
if isinstance(o[key], dict):
if "name" in o[key].keys():
o[key] = o[key]["name"]
elif "id" in o[key].keys():
o[key] = o[key]["id"]
elif isinstance(o[key], list):
del(o[key])
return objects
COUNTRIES = [ (u'Österreich', u'AT', u'EUR'), (u"Ungarn", u"HU", u"HUF" ), (u"Schweiz", u"CH", u"CHF")]
@app.before_first_request
def fill_countries():
"""
Die Liste von Ländern wird über 2 Webservices geholt. Das erste hlt von www.oorsprong.org eine Liste mit allen Ländern.
Dabei sind diese als JSON verfügbar und werden nach Kontinente aufgeteilt.
Wir nehmen nur die europäischen Länder und verwendet das zweite Webservice - wieder Json - um aus den ISO-Codes die
deutsche Bezeichnung zu erhalten.
Da beide Services von Privatpersonen zur Verfügung gestellt werden, möchte ich deren Server nicht überlasten und die
Services werden nur genutzt, wenn die app nicht im Debugmode läuft.
"""
global COUNTRIES
if app.debug:
try:
response = requests.get('https://raw.githubusercontent.com/umpirsky/country-list/master/country/cldr/de_AT/country.json')
dict_iso_name = response.json()
response = requests.get('https://raw.githubusercontent.com/mledoze/countries/master/countries.json')
_countries = [(c[u"name"], dict_iso_name.get(c[u'cca2'], ''), c[u"currency"][0]) for c in response.json() if c[u"region"] == u"Europe"]
COUNTRIES = sorted(_countries, key = lambda x: x[1])
flash('Countrycode loaded')
except Exception:
flash('Countrycodes konnten nicht geladen werden - falling back to fallback', category='warning')
else:
flash('Not loading Countrycodes in Debugmode')
@app.route("/")
def index():
return render_template('index.html')
@app.route("/create_db")
def create_db():
"""
Call :py:meth:`models.initialize_database` and recreate the database
"""
d = {}
try:
flash("Database Queryied")
d["cars"] = tidy_objects(rest_get_all("cars"), CAR_FIELDS)
d["stations"] = tidy_objects(rest_get_all("stations"), STATION_FIELDS)
d["kunden"] = tidy_objects(rest_get_all("kunden"), KUNDE_FIELDS)
d["leihen"] = rest_get_all("leihen")
d["car_fields"] = CAR_FIELDS
d["stations_fields"] = STATION_FIELDS
d["kunden_fields"] = KUNDE_FIELDS
d["leihen_fields"] = LEIHE_FIELDS
except OperationalError, e:
flash(str(e), category='error')
return render_template("create_db.html", **d)
@app.route("/cars/")
def cars_list():
"""
List all Cars
"""
cars = rest_get_all("cars")
cars = tidy_objects(cars, CAR_FIELDS)
return render_template("cars_list.html", cars=cars, fields=CAR_FIELDS)
@app.route("/kunden/")
def kunden_list():
"""
List all Kunden
"""
kunden = rest_get_all("kunden")
kunden = tidy_objects(kunden, KUNDE_FIELDS)
return render_template("kunden_list.html", kunden=kunden)
@app.route("/kunde/", methods=['POST', 'GET'], endpoint='kunden_new')
@app.route("/kunde/<int:kunde_id>/", methods=['POST', 'GET'], endpoint='kunden_edit')
def kunden_edit(kunde_id=None):
"""
Legt entweder einen neuen Kunden an oder editiert einen bestehenden. Wie immer gibt es das Problem, das create und
update sehr ähnlich sind - hier habe ich beides auf einmale implementiert.
Mgl kann mit Classbased Views strukturierter vorgegangen werden
"""
leihen = []
kunde = None
if kunde_id:
try:
kunde_dict = rest_get_id("kunden", kunde_id)
kunde = Kunde(**kunde_dict)
leihen = rest_get_all("leihen")
leihen = [Leihe(**d) for d in leihen if d["kunde_id"] == kunde_id]
form = KundenForm(request.form, kunde)
except HTTPError, e:
return errorpage(e)
else:
form = KundenForm()
laendernamen = [c[1] for c in COUNTRIES]
if kunde_id:
if kunde.country in laendernamen:
del(laendernamen[laendernamen.index(kunde.country)])
laendernamen.insert(0, kunde.country)
choices = zip(laendernamen, laendernamen)
form.country.choices = zip(laendernamen, laendernamen)
if form.validate_on_submit():
try:
if not kunde_id: # Delete possible id-Field for new Kunden
del(form['id'])
rest_post("kunden", form.data)
flash(("Kunde %s wurde neu angelegt" % form.data["name"]))
else:
rest_put("kunden", kunde_id, form.data)
flash(("Kunde %s wurde gespeichert" % form.data["name"]))
except HTTPError, e:
return errorpage(e)
return redirect(url_for('kunden_list'))
return render_template("kunden_edit.html", form=form, kunde=kunde, leihen=leihen, leihen_fields=LEIHE_FIELDS)
@app.route("/kunde/<int:kunde_id>/delete/")
def kunden_delete(kunde_id):
"""
Löschen eines Kunden - aber nur wenn er keine offenen Leihen gibt.
"""
kunde = Kunde(**rest_get_id("kunden", kunde_id))
name = kunde.name
leihen_not_beendet = len([leihe["returned"] for leihe in kunde.leihen if not leihe["returned"]])
if leihen_not_beendet>0:
flash(u'%s hat noch offene Leihen und kann nicht gelöscht werden.' % name, category='warning')
return redirect(url_for('kunden_edit', kunde_id=kunde_id))
try:
rest_delete_id("kunden", kunde_id)
except HTTPError, e:
return errorpage(e)
flash(u'%s aus Kundendatei gelöscht' % name)
return redirect(url_for('kunden_list'))
@app.route("/leihen/")
def leihen_list():
leihen = rest_get_all("leihen")
leihen = [Leihe(**d) for d in leihen]
leihen.sort(key=lambda x: x.id)
return render_template("leihen_list.html", leihen=leihen)
@app.route("/leihe/", methods=["POST", "GET"], endpoint='leihen_new')
@app.route("/leihe/<int:leihe_id>/", methods=["POST", "GET"], endpoint='leihen_edit')
def leihen_edit(leihe_id=None):
"""
Create und update for Leihen.
Wertet auf die URL-Query Parameter kunde_id und station_id aus, die die entsprechenden Felder voreinstellen.
"""
leihe = None
try:
if leihe_id:
leihe = Leihe(**rest_get_id("leihen", leihe_id))
form = LeiheForm(request.form, leihe)
else:
form = LeiheForm()
form.kunde_id.choices = [(kunde["id"], kunde["name"]) for kunde in rest_get_all("kunden")]
form.car_id.choices = [(car["id"], car["platenumber"]) for car in rest_get_all("cars")]
form.station_abhol_id.choices = [(station["id"], station["name"]) for station in rest_get_all("stations")]
form.station_return_id.choices = [(station["id"], station["name"]) for station in rest_get_all("stations")]
if u'kunde_id' in request.args:
form.kunde_id.data = int(request.args[u'kunde_id'])
if u'station_id' in request.args:
form.station_abhol_id.data = int(request.args[u'station_id'])
form.station_return_id.data = int(request.args[u'station_id'])
if form.validate_on_submit():
kunde = Kunde(**rest_get_id("kunden", form.data["kunde_id"]))
data = form.data
data["von"] = form.data["von"].isoformat()
data["bis"] = form.data["bis"].isoformat()
if not leihe_id:
del(data["id"])
rest_post("leihen", data)
flash(u"Leihe für %s wurde neu angelegt" % kunde.name, category="success")
else:
rest_put("leihen", leihe_id, data)
flash(u"Leihe für %s wurde neu geändert" % kunde.name, category="success")
return redirect(url_for('leihen_list'))
except HTTPError, e:
return errorpage(e)
return render_template("leihe_edit.html", form=form, leihe=leihe)
@app.route("/kunde/<int:kunde_id>/neueleihe")
def leihe_neu1(kunde_id):
"""
Klickt man bei der Kunden-Detail View auf "Neue Leihe" werden in dieser View anhand der Adresse des Kunden
die Distanzen zu unseren Verleihstationen berechnet und als Tabelle angezeigt.
"""
try:
kunde = Kunde(**rest_get_id("kunden", kunde_id))
stationen = [Station(**d) for d in rest_get_all("stations")]
cachehits = 0
for station in stationen:
try:
station.distance, cachehit = get_distance(kunde.adresse, station.adresse)
except DistanceServiceError:
station.distance = {'distance' : { 'text' : 'Unavailable', 'value' : -1}, 'duration' : { 'text' : "bis ans Ende der Zeit", 'value' : -1}}
cachehit = False
if cachehit:
cachehits += 1
stationen.sort(key=lambda x: x.distance['duration']['value'])
flash("Cachehits bei der Distanzberechnung: %d" % cachehits)
return render_template("neue_leihe_kunde1.html", kunde=kunde, stationen=stationen)
except HTTPError, e:
return errorpage(e)
@app.errorhandler(404)
def errorpage(e=None):
status = 404
content = "Page not found"
message = "Page not Found"
if e:
if hasattr(e, "response"):
try:
status = e.response.status_code
content = e.response.content
except Exception:
pass
if hasattr(e, "message"):
message = e.message
return render_template('errorpage.html', status=status, content=content, message=message), status
if __name__ == "__main__":
app.run()
| mit | -1,421,221,711,509,973,500 | 38.92926 | 153 | 0.606136 | false | 3.063148 | false | false | false |
avida/BOMBuilds | converter.py | 1 | 6969 | #!/usr/bin/python
#coding=utf8
import sys
import uuid
import re
time_exp = re.compile('^\d{1,2}:\d{1,2}$')
supply_exp = re.compile('^\d{1,3}$')
ColloquialToCodeDictionary = {
#terran
'SupplyDepot' : ['supply', 'Supply'],
'Barracks' : ['Barracks', 'barrack'],
'Bunker' : ['Bunker','bunker'],
'Refinery' : ['refinery','Refinery', 'gas'],
'EngineeringBay': ['Engineering Bay', 'engeneering_bay'],
'CommandCenter' : ['command', 'Command Center', 'command_center'],
'Factory' : ['Factory', 'factory'],
'Starport' : ['cosmoport','Starport', 'starport'],
'Armory' : ['Armory', 'arsenal'],
'SCV' : ['SCV'],
'Marine' : ['Marine', 'marine'],
'Marauder' : ['Marauder', 'marauder'],
'Medivac' : ['Medivac', 'medivac'],
'Hellion' : ['Hellion', 'hellion'],
'HellionTank' : ['Hellbat'],
'Reaper' : ['Reaper'],
'SiegeTank' : ['tank'],
'BarracksReactor' : ['BarracksReactor','reactor_barrack'],
'FactoryReactor' : ['FactoryReactor', 'reactor_fact'],
'StarportReactor' : ['StarportReactor'],
'FactoryTechLab' : ['FactoryTechLab', 'lab_fact'],
'BarracksTechLab' : ['BarracksTechLab', 'lab_barrack'],
'StarportTechLab' : ['StarportTechLab'],
'MissleTurret' : ['turret'],
'VikingFighter' : ['viking'],
'HellionTank' : ['hellbat'],
# zerg
'Overlord' : ['Overlord'],
'Hatchery' : ['Hatchery'],
'SpawningPool' : ['Pool'],
'Extractor' : ['Extractor'],
'Queen' : ['queen'],
'Drone' : ['Drone'],
'Zergling' : ['Zergling'],
'Spire' : ['Spire'],
'EvolutionChamber' : ['evolution chamber'],
'BanelingNest' : ['Baneling Nest'],
'RoachWarren' : ['RoachWarren'],
'InfestationPit' : ['InfestationPit'],
'HydraliskDen' : ['HydraliskDen'],
# protoss
}
AbilityCodeDictionary = {
#terran
'"UpgradeToOrbital", 0' : ['Orbital', 'orbital_command' ],
'"EngineeringBayResearch", 2' : ['+1_terran_infantry_attack', '+1 Infantry Attack'],
'"EngineeringBayResearch", 6' : ['+1 Infantry Armor'],
'"EngineeringBayResearch", 3' : ['+2 Infantry Attack'],
'"EngineeringBayResearch", 7' : ['+2 Infantry Armor'],
'"ArmoryResearchSwarm", 0' : ['+1 Vehicle Weapon', '+1 Vehicle weapon'],
'"ArmoryResearchSwarm", 1' : ['+2 Vehicle Weapon', '+2 Vehicle weapon'],
'"ArmoryResearchSwarm", 3' : ['+1 Vehicle Armor', '+1 Vehicle armor'],
'"ArmoryResearchSwarm", 4' : ['+2 Vehicle Armor', '+2 Vehicle armor'],
'"BarracksTechLabResearch", 0': ['Stimpack'],
'"BarracksTechLabResearch", 1': ['Combat Shield', 'Combat shields', 'shields'],
'"BarracksTechLabResearch", 2': ['Fugas', 'Concussive shells'],
'"FactoryTechLabResearch", 2': ['servos'],
#zerg
'"evolutionchamberresearch", 0' : ['+1 melee attack'],
'"evolutionchamberresearch", 3' : ['+1 ground armor'],
'"evolutionchamberresearch", 6' : ['+1 range attack'],
'"BanelingNestResearch", 0' : ['Central hooks'],
'"UpgradeToLair", 0' : ['Lair'],
'"SpawningPoolResearch", 1' : ['metabolic boost'],
'"LairResearch", 1' : ['carapace', 'Overlord speed' ],
'"RoachWarrenResearch", 1' : ['roach speed'],
#protoss
}
def ColloqiualToCode(name):
for key in ColloquialToCodeDictionary.keys():
if (name in ColloquialToCodeDictionary[key]):
return key
raise Exception('Unknown name ' + name);
def GetAbilityCode(name):
for key in AbilityCodeDictionary.keys():
if (name in AbilityCodeDictionary[key]):
return key
raise Exception('Unknown ability ' + name);
def isAbility(name):
if name[:1] == '+':
return True
for key in AbilityCodeDictionary.keys():
if (name in AbilityCodeDictionary[key]):
return True
return False
try:
filename = sys.argv[1]
except Exception:
print 'specify correct file name in cmd line'
sys.exit(0)
f = open(filename,'r')
outfile = '.'.join(filename.split('.')[:1]+['bo'])
out = open(outfile, 'w')
counter = 0
infoCounter = 0
for s in f:
if 'Objectives:' in s:
break;
line = s.strip().split()
if len(line) == 0:
continue
if(s[:1] == '#'):
continue
if(s[:1] == 'I'):
#read Info tip
time = line[1]
try:
minutes = int(time.split(':')[0])
seconds = int(time.split(':')[1])
except ValueError:
print "error parsing line\n %s" % " ".join(line)
break
info = " ".join(line[2:])
out.write('gv_bOInfoTips[%d] = "%s";\n' % (infoCounter, info) )
out.write('gv_bOInfoTimings[%d] = %d;\n' % (infoCounter, minutes* 60 + seconds) )
infoCounter += 1
else:
time = line[0]
minutes = int(time.split(':')[0])
seconds = int(time.split(':')[1])
try:
supply = line[1]
if not supply_exp.match(supply):
supply = 0
name = " ".join(line[1:])
else:
supply = int(line[1])
name = " ".join(line[2:])
except ValueError:
print "error parsing line\n %s" % " ".join(line)
break
if not isAbility(name):
name = ColloqiualToCode(name)
out.write("gv_bOUnits[%d] = \"%s\";\n" % (counter, name))
else:
ability_code = GetAbilityCode(name)
out.write("gv_bOAbilities[%d] = AbilityCommand(%s);\n" % (counter, ability_code))
out.write("gv_bOAbilities_Name[%d] = gf_GetAbilityName(AbilityCommand(%s));\n" % (counter, ability_code))
out.write("gv_bOSupplies[%d] = %d;\n" % (counter, supply))
out.write("gv_bOTimings[%d][0] = %d;\n" % (counter, minutes))
out.write("gv_bOTimings[%d][1] = %d;\n" % (counter, seconds))
counter +=1
out.write('gv_bOEnd = %d;\n'% counter)
counter =0
# Writing objectives
for s in f:
try:
line = s.strip().split()
if len(line) == 0:
continue
time = line[0]
try:
supply = int(line[1])
name = " ".join(line[2:])
minutes = int(time.split(':')[0])
seconds = int(time.split(':')[1])
except ValueError:
print "error parsing line\n %s" % " ".join(line)
break
except IndexError:
print "error in line " + s
out.write("gv_bOObjectivesUnits[%d] = \"%s\";\n" % (counter, ColloqiualToCode(name)))
out.write("gv_bOObjectivesUnitsAmount[%d] = %d;\n" % (counter, supply))
out.write("gv_bOObjectivesTimings[%d] = %d;\n" % (counter, minutes*60 + seconds))
counter += 1
out.write("gv_bOObjectivesCount = %d;\n" % (counter))
#out.write('gv_bOuuid="%s"' % uuid.uuid4())
out.close()
print 'Converting done'
print 'out file is '+ outfile
| mit | -1,400,849,885,622,735,400 | 35.67027 | 114 | 0.548572 | false | 3.035279 | false | false | false |
Benocs/core | src/daemon/core/services/quagga.py | 1 | 33565 | #
# CORE
# Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# author: Jeff Ahrenholz <[email protected]>
#
#
# Copyright (c) 2014 Benocs GmbH
#
# author: Robert Wuttke <[email protected]>
#
# See the LICENSE file included in this distribution.
#
'''
quagga.py: defines routing services provided by Quagga.
'''
import os
if os.uname()[0] == 'Linux':
from core.netns import nodes
elif os.uname()[0] == 'FreeBSD':
from core.bsd import nodes
from core.service import CoreService, addservice
from core.misc.ipaddr import IPPrefix, isIPAddress
from core.misc.ipaddr import IPv4Addr, IPv4Prefix, isIPv4Address
from core.misc.ipaddr import IPv6Addr, IPv6Prefix, isIPv6Address
from core.misc.ipaddr import Loopback, Interface
from core.misc.netid import NetIDNodeMap
from core.api import coreapi
from core.constants import *
from core.services import service_helpers
from core.services import service_flags
QUAGGA_USER='quagga'
QUAGGA_GROUP='quagga'
if os.uname()[0] == 'FreeBSD':
QUAGGA_GROUP='wheel'
class Zebra(CoreService):
'''
'''
_name = 'zebra'
_group = 'Quagga'
_depends = ('vtysh',)
_dirs = ('/etc/quagga', '/var/run/quagga')
_configs = ('/etc/quagga/daemons',
'/etc/quagga/zebra.conf',
'/etc/quagga/vtysh.conf')
_startindex = 35
_startup = ('/etc/init.d/quagga start',)
_shutdown = ('/etc/init.d/quagga stop', )
_validate = ('pidof zebra', )
_starttime = 10
@classmethod
def generateconfig(cls, node, filename, services):
''' Return the Quagga.conf or quaggaboot.sh file contents.
'''
if filename == cls._configs[0]:
return cls.generateDaemonsConf(node, services)
elif filename == cls._configs[1]:
return cls.generateZebraConf(node, services)
elif filename == cls._configs[2]:
return cls.generateVtyshConf(node, services)
else:
raise ValueError
@classmethod
def generateVtyshConf(cls, node, services):
''' Returns configuration file text.
'''
return 'service integrated-vtysh-config'
@classmethod
def generateZebraConf(cls, node, services):
''' Returns configuration file text that defines which daemons to run.
'''
cfg = []
cfg.append('log file /tmp/quagga-zebra-%s.log\n' % node.name)
cfg.append('hostname %s\n' % node.name)
cfg.append('agentx\n')
cfg.append('interface lo\n')
if node.enable_ipv4:
cfg.append(' ip address %s/32\n' % node.getLoopbackIPv4())
if node.enable_ipv6:
cfg.append(' ipv6 address %s/128\n' % node.getLoopbackIPv6())
for ifc in node.netifs():
# do not ever include control interfaces in anything
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg.append('interface %s\n' % ifc.name)
for a in ifc.addrlist:
if isIPv4Address(a):
cfg.append(' ip address %s\n' % a)
if isIPv6Address(a):
cfg.append(' ipv6 address %s\n' % a)
cfg.append('!\n')
if node.enable_ipv4:
cfg.append('ip forwarding\n')
if node.enable_ipv6:
cfg.append('ipv6 forwarding\n')
return ''.join(cfg)
@classmethod
def generateDaemonsConf(cls, node, services):
''' Returns configuration file text that defines which daemons to run.
'''
cfg = []
cfg.extend([cls._name, '=yes\n'])
for s in services:
if cls._name not in s._depends:
continue
cfg.extend([s._daemonname, '=yes\n'])
return ''.join(cfg)
addservice(Zebra)
class QuaggaService(CoreService):
''' Parent class for Quagga services. Defines properties and methods
common to Quagga's routing daemons.
'''
_name = 'QuaggaDaemon'
_daemonname = ''
_group = 'Quagga'
_depends = ('zebra', )
_dirs = ()
_configs = ()
_startindex = 40
_startup = ()
_shutdown = ()
_meta = 'The config file for this service can be found in the Zebra service.'
_ipv4_routing = False
_ipv6_routing = False
@staticmethod
def routerid(node):
''' Helper to return the first IPv4 address of a node as its router ID.
'''
# Use IPv4 loopback address of this node as the routerID.
# Don't get v4-loopback-addr directly from node as the node might has
# IPv4 disabled. Instead, directly query the central 'database' for the
# IPv4 address that would be the nodes IPv4 loopback.
return str(Loopback.getLoopbackIPv4(node))
@staticmethod
def rj45check(ifc):
''' Helper to detect whether interface is connected an external RJ45
link.
'''
if ifc.net:
for peerifc in ifc.net.netifs():
if peerifc == ifc:
continue
if isinstance(peerifc, nodes.RJ45Node):
return True
return False
@classmethod
def generateconfig(cls, node, filename, services):
return cls.generatequaggaconfig(node)
@classmethod
def generatequaggaloconfig(cls, node):
return ''
@classmethod
def generatequaggaifcconfig(cls, node, ifc):
return ''
@classmethod
def generatequaggaconfig(cls, node):
return ''
@classmethod
def interface_iterator(cls, node, callback=None):
result = []
for ifc in node.netifs():
# do not ever include control interfaces in anything
if hasattr(ifc, 'control') and ifc.control == True:
continue
if not callback is None:
result.extend(callback(node, ifc))
return result
@staticmethod
def addrstr(x):
''' helper for mapping IP addresses to zebra config statements
'''
if isIPv4Address(x):
return 'ip address %s' % x
elif isIPv6Address(x):
return 'ipv6 address %s' % x
else:
raise Value('invalid address: %s').with_traceback(x)
class Ospfv2(QuaggaService):
''' The OSPFv2 service provides IPv4 routing for wired networks. It does
not build its own configuration file but has hooks for adding to the
unified Quagga.conf file.
'''
_name = 'OSPFv2'
_daemonname = 'ospfd'
#_startup = ('sh quaggaboot.sh ospfd',)
#_shutdown = ('killall ospfd', )
_configs = ('/etc/quagga/ospfd.conf',)
_validate = ('pidof ospfd', )
_ipv4_routing = True
#_starttime = 10
@staticmethod
def mtucheck(ifc):
''' Helper to detect MTU mismatch and add the appropriate OSPF
mtu-ignore command. This is needed when e.g. a node is linked via a
GreTap device.
'''
if ifc.mtu != 1500:
# a workaround for PhysicalNode GreTap, which has no knowledge of
# the other nodes/nets
return ' ip ospf mtu-ignore\n'
if not ifc.net:
return ''
for i in ifc.net.netifs():
if i.mtu != ifc.mtu:
return ' ip ospf mtu-ignore\n'
return ''
@staticmethod
def ptpcheck(ifc):
''' Helper to detect whether interface is connected to a notional
point-to-point link.
'''
#if isinstance(ifc.net, nodes.PtpNet):
# return ' ip ospf network point-to-point\n'
return ''
@classmethod
def generate_network_statement(cls, node, ifc):
cfg = []
# find any link on which two equal netid's (i.e., AS numbers) are
# present and configure an ospf-session on this interface
# on all other interfaces, disable ospf
for idx, net_netif in list(ifc.net._netif.items()):
# skip our own interface
if ifc == net_netif:
continue
# skip control interface
if hasattr(ifc, 'control') and ifc.control == True:
continue
# found the same AS, enable IGP/OSPF
if node.netid == net_netif.node.netid:
if not service_flags.Router in net_netif.node.services:
cfg.append(' passive-interface %s\n' % ifc.name)
for a in ifc.addrlist:
if not isIPv4Address(a):
continue
cfg.append(' network %s area 0.0.0.0\n' % a)
return cfg
@classmethod
def generatequaggaconfig(cls, node):
if not node.enable_ipv4:
return ''
cfg = ''
cfg += cls.generatequaggaloconfig(node)
for ifc in node.netifs():
# do not ever include control interfaces in anything
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += 'interface %s\n' % ifc.name
cfg += cls.generatequaggaifcconfig(node, ifc)
ipv4list = [x for x in ifc.addrlist if isIPv4Address(x)]
cfg += ' '
cfg += '\n '.join(map(cls.addrstr, ipv4list))
cfg += '\n'
cfg += '!\n'
cfg = '!\n! OSPFv2 (for IPv4) configuration\n!\n'
cfg += 'log file /tmp/quagga-ospf-%s.log\n' % node.name
cfg += 'router ospf\n'
cfg += ' router-id %s\n' % cls.routerid(node)
cfg += ' redistribute connected\n'
cfg += '!\n'
cfg += ''.join(set(cls.interface_iterator(node,
cls.generate_network_statement)))
cfg += '!\n'
return cfg
@classmethod
def generatequaggaloconfig(cls, node):
if not node.enable_ipv4 and not node.enable_ipv6:
return ''
cfg = ''
cfg += 'interface lo\n'
cfg += ' ip address %s\n' % node.getLoopbackIPv4()
cfg += '!\n'
return cfg
@classmethod
def generatequaggaifcconfig(cls, node, ifc):
if not node.enable_ipv4:
return ''
# do not include control interfaces
if hasattr(ifc, 'control') and ifc.control == True:
return ''
cfg = ''
cfg += cls.mtucheck(ifc)
cfg += cls.ptpcheck(ifc)
cfg += '!\n'
return cfg
addservice(Ospfv2)
class Ospfv3(QuaggaService):
''' The OSPFv3 service provides IPv6 routing for wired networks. It does
not build its own configuration file but has hooks for adding to the
unified Quagga.conf file.
'''
_name = 'OSPFv3'
_daemonname = 'ospf6d'
#_startup = ('sh quaggaboot.sh ospf6d',)
#_shutdown = ('killall ospf6d', )
_configs = ('/etc/quagga/ospf6d.conf',)
_validate = ('pidof ospf6d', )
_ipv4_routing = True
_ipv6_routing = True
#_starttime = 10
@staticmethod
def minmtu(ifc):
''' Helper to discover the minimum MTU of interfaces linked with the
given interface.
'''
mtu = ifc.mtu
if not ifc.net:
return mtu
for i in ifc.net.netifs():
if i.mtu < mtu:
mtu = i.mtu
return mtu
@classmethod
def mtucheck(cls, ifc):
''' Helper to detect MTU mismatch and add the appropriate OSPFv3
ifmtu command. This is needed when e.g. a node is linked via a
GreTap device.
'''
minmtu = cls.minmtu(ifc)
if minmtu < ifc.mtu:
return ' ipv6 ospf6 ifmtu %d\n' % minmtu
else:
return ''
@classmethod
def generate_area_statement(cls, node, ifc):
cfg = []
for idx, net_netif in list(ifc.net._netif.items()):
# skip our own interface
if ifc == net_netif:
continue
# skip control interface
if hasattr(ifc, 'control') and ifc.control == True:
continue
# found the same AS, enable IGP/OSPF
if node.netid == net_netif.node.netid:
if service_flags.Router in net_netif.node.services:
cfg.append(' interface %s area 0.0.0.0\n' % ifc.name)
return cfg
@classmethod
def generatequaggaconfig(cls, node):
if not node.enable_ipv6:
return ''
cfg = ''
cfg += cls.generatequaggaloconfig(node)
for ifc in node.netifs():
# do not ever include control interfaces in anything
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += 'interface %s\n' % ifc.name
cfg += cls.generatequaggaifcconfig(node, ifc)
cfg += '!\n'
cfg += 'log file /tmp/quagga-ospf6-%s.log\n' % node.name
cfg += 'router ospf6\n'
rtrid = cls.routerid(node)
cfg += ' router-id %s\n' % rtrid
cfg += ' redistribute connected\n'
cfg += '!\n'
cfg += ''.join(set(cls.interface_iterator(node,
cls.generate_area_statement)))
return cfg
@classmethod
def generatequaggaloconfig(cls, node):
if not node.enable_ipv4 and not node.enable_ipv6:
return ''
cfg = ''
return cfg
@classmethod
def generatequaggaifcconfig(cls, node, ifc):
if not node.enable_ipv4 and not node.enable_ipv6:
return ''
if hasattr(ifc, 'control') and ifc.control == True:
return ''
enable_ifc = False
for a in ifc.addrlist:
if node.enable_ipv4 and isIPv4Address(a):
enable_ifc = True
if node.enable_ipv6 and isIPv6Address(a):
enable_ifc = True
cfg = ''
if enable_ifc:
cfg += cls.mtucheck(ifc)
for idx, net_netif in list(ifc.net._netif.items()):
# skip our own interface
if ifc == net_netif:
continue
# skip control interface
if hasattr(ifc, 'control') and ifc.control == True:
continue
# found the same AS, enable IGP/OSPF
if node.netid == net_netif.node.netid:
if not service_flags.Router in net_netif.node.services:
# other end of link is not router. don't send hellos
cfg += ' ipv6 ospf6 passive\n'
break
return cfg
addservice(Ospfv3)
class Ospfv3mdr(Ospfv3):
''' The OSPFv3 MANET Designated Router (MDR) service provides IPv6
routing for wireless networks. It does not build its own
configuration file but has hooks for adding to the
unified Quagga.conf file.
'''
_name = 'OSPFv3MDR'
_daemonname = 'ospf6d'
_ipv4_routing = True
#_starttime = 10
@classmethod
def generatequaggaifcconfig(cls, node, ifc):
cfg = super().generatequaggaifcconfig(cls, node, ifc)
# super() decides whether this interface is to be included.
# honor its decision
if len(cfg) == 0:
return cfg
return cfg + '''\
ipv6 ospf6 instance-id 65
ipv6 ospf6 hello-interval 2
ipv6 ospf6 dead-interval 6
ipv6 ospf6 retransmit-interval 5
ipv6 ospf6 network manet-designated-router
ipv6 ospf6 diffhellos
ipv6 ospf6 adjacencyconnectivity uniconnected
ipv6 ospf6 lsafullness mincostlsa
'''
addservice(Ospfv3mdr)
class Bgp(QuaggaService):
'''' The BGP service provides interdomain routing.
Peers must be manually configured, with a full mesh for those
having the same AS number.
'''
_name = 'BGP'
_daemonname = 'bgpd'
#_startup = ('sh quaggaboot.sh bgpd',)
#_shutdown = ('killall bgpd', )
_configs = ('/etc/quagga/bgpd.conf',)
_validate = ('pidof bgpd', )
_custom_needed = False
_ipv4_routing = True
_ipv6_routing = True
#_starttime = 20
@classmethod
def configure_EGP(cls, node):
cfg = ''
v6cfg = []
# find any link on which two different netid's (i.e., AS numbers) are
# present and configure a bgp-session between the two corresponding nodes.
for localnetif in node.netifs():
# do not include control interfaces
if hasattr(localnetif, 'control') and localnetif.control == True:
continue
for idx, net_netif in list(localnetif.net._netif.items()):
candidate_node = net_netif.node
# skip our own interface
if localnetif == net_netif.node:
continue
# found two different ASes.
if not node.netid == net_netif.node.netid and \
service_flags.EGP in net_netif.node.services:
for local_node_addr in localnetif.addrlist:
local_node_addr_str = str(local_node_addr.split('/')[0])
if not node.enable_ipv4 and \
isIPv4Address(local_node_addr):
continue
if not node.enable_ipv6 and \
isIPv6Address(local_node_addr):
continue
for remote_node_addr in net_netif.addrlist:
remote_node_addr_str = str(remote_node_addr.split('/')[0])
if not net_netif.node.enable_ipv4 and \
isIPv4Address(remote_node_addr):
continue
if not net_netif.node.enable_ipv6 and \
isIPv6Address(remote_node_addr):
continue
# for inter-AS links, use interface addresses
# instead of loopback addresses
if (isIPv4Address(local_node_addr) and \
isIPv4Address(remote_node_addr)):
cfg += ' neighbor %s remote-as %s\n' % \
(remote_node_addr_str, \
str(net_netif.node.netid))
elif (isIPv6Address(local_node_addr) and \
isIPv6Address(remote_node_addr)):
cfg += ' neighbor %s remote-as %s\n' % \
(remote_node_addr_str, str(net_netif.node.netid))
v6cfg.append((' neighbor %s activate\n' %
remote_node_addr_str))
v6cfg.append((' network %s\n' %
str(local_node_addr)))
return cfg, v6cfg
@classmethod
def generatequaggaconfig(cls, node):
v6cfg = []
v6prefixes = []
if not node.enable_ipv4 and not node.enable_ipv6:
return ''
cfg = '!\n! BGP configuration\n!\n'
cfg += 'log file /tmp/quagga-bgp-%s.log\n' % node.name
cfg += 'router bgp %s\n' % node.netid
cfg += ' bgp router-id %s\n' % cls.routerid(node)
cfg += ' redistribute kernel\n'
cfg += ' redistribute static\n'
cfg += '!\n'
if hasattr(node, 'netid') and not node.netid is None:
netid = node.netid
else:
# TODO: netid 0 is invalid
netid = 0
# configure EBGP connections:
if service_flags.EGP in node.services:
tmpcfg, tmpv6cfg = cls.configure_EGP(node)
cfg += tmpcfg
v6cfg.extend(tmpv6cfg)
# configure IBGP connections
confstr_list = [cfg]
# full mesh
service_helpers.nodewalker(node, node, confstr_list,
cls.nodewalker_ibgp_find_neighbors_callback)
cfg = ''.join(confstr_list)
if node.enable_ipv4 and service_flags.EGP in node.services:
interface_net = Interface.getInterfaceNet_per_net(\
node.session.sessionid, netid, 4)
loopback_net = Loopback.getLoopbackNet_per_net(\
node.session.sessionid, netid, 4)
cfg += ' network %s\n' % str(loopback_net)
cfg += ' network %s\n' % str(interface_net)
cfg += ' aggregate-address %s summary-only\n' % str(loopback_net)
cfg += ' aggregate-address %s summary-only\n' % str(interface_net)
if node.enable_ipv6:
v6_ibgp_neighbor_list = []
service_helpers.nodewalker(node, node, v6_ibgp_neighbor_list,
cls.nodewalker_ibgp_find_neighbor_addrs_v6_callback)
cfg += ' address-family ipv6\n'
# activate IBGP neighbors
cfg += ''.join([(' neighbor %s activate\n') % \
(str(remote_addr).split('/')[0]) \
for local_addr, remote_addr in v6_ibgp_neighbor_list])
# activate EBGP neighbors
cfg += ''.join(v6cfg)
if service_flags.EGP in node.services:
# announce networks
interface_net = Interface.getInterfaceNet_per_net(\
node.session.sessionid, netid, 6)
loopback_net = Loopback.getLoopbackNet_per_net(\
node.session.sessionid, netid, 6)
cfg += ' network %s\n' % str(loopback_net)
cfg += ' network %s\n' % str(interface_net)
cfg += ' aggregate-address %s summary-only\n' % str(loopback_net)
cfg += ' aggregate-address %s summary-only\n' % str(interface_net)
adj_addrs = cls.collect_adjacent_loopback_addrs_v6(cls, node)
for adj_addr in adj_addrs:
cfg += ' network %s/128\n' % str(adj_addr)
cfg += '\n exit-address-family\n'
return cfg
@staticmethod
def nodewalker_ibgp_find_neighbors_callback(startnode, currentnode):
result = []
if service_flags.Router in startnode.services and \
service_flags.Router in currentnode.services and \
not startnode == currentnode and \
startnode.netid == currentnode.netid:
startnode_ipversions = startnode.getIPversions()
currentnode_ipversions = currentnode.getIPversions()
ipversions = []
for ipversion in 4, 6:
if ipversion in startnode_ipversions and currentnode_ipversions:
ipversions.append(ipversion)
for ipversion in ipversions:
if ipversion == 4:
startnode_addr = startnode.getLoopbackIPv4()
currentnode_addr = currentnode.getLoopbackIPv4()
elif ipversion == 6:
startnode_addr = startnode.getLoopbackIPv6()
currentnode_addr = currentnode.getLoopbackIPv6()
result.extend([' neighbor %s remote-as %s\n' % \
(str(currentnode_addr), str(currentnode.netid)),
' neighbor %s update-source %s\n' % \
(str(currentnode_addr), str(startnode_addr))
])
if service_flags.EGP in startnode.services:
result.append(' neighbor %s next-hop-self\n' % str(currentnode_addr))
return result
@staticmethod
def nodewalker_ibgp_find_neighbor_addrs_v6_callback(startnode, currentnode):
result = []
if service_flags.Router in startnode.services and \
service_flags.Router in currentnode.services and \
not startnode == currentnode and \
startnode.netid == currentnode.netid:
if startnode.enable_ipv6 and currentnode.enable_ipv6:
result.append((startnode.getLoopbackIPv6(),
currentnode.getLoopbackIPv6()))
return result
@staticmethod
def collect_adjacent_loopback_addrs_v6(cls, node):
addrs = []
for ifc in node.netifs():
for idx, net_netif in list(ifc.net._netif.items()):
# skip our own interface
if ifc == net_netif:
continue
# skip control interface
if hasattr(ifc, 'control') and ifc.control == True:
continue
# found the same AS, collect loopback addresses
if node.netid == net_netif.node.netid:
# other end of link is no router. announce its loopback addr
if not service_flags.Router in net_netif.node.services:
if net_netif.node.enable_ipv6:
addrs.append(net_netif.node.getLoopbackIPv6())
return addrs
addservice(Bgp)
class Rip(QuaggaService):
''' The RIP service provides IPv4 routing for wired networks.
'''
_name = 'RIP'
_daemonname = 'ripd'
#_startup = ('sh quaggaboot.sh ripd',)
#_shutdown = ('killall ripd', )
_configs = ('/etc/quagga/ripd.conf',)
_validate = ('pidof ripd', )
_ipv4_routing = True
#_starttime = 10
@classmethod
def generatequaggaconfig(cls, node):
cfg = '''\
log file /tmp/quagga-ospf-%s.log
router rip
redistribute static
redistribute connected
redistribute ospf
network 0.0.0.0/0
!
''' % node.name
return cfg
addservice(Rip)
class Ripng(QuaggaService):
''' The RIP NG service provides IPv6 routing for wired networks.
'''
_name = 'RIPNG'
_daemonname = 'ripngd'
#_startup = ('sh quaggaboot.sh ripngd',)
#_shutdown = ('killall ripngd', )
_configs = ('/etc/quagga/ripngd.conf',)
_validate = ('pidof ripngd', )
_ipv6_routing = True
#_starttime = 10
@classmethod
def generatequaggaconfig(cls, node):
cfg = '''\
log file /tmp/quagga-ospf-%s.log
router ripng
redistribute static
redistribute connected
redistribute ospf6
network ::/0
!
''' % node.name
return cfg
addservice(Ripng)
class Babel(QuaggaService):
''' The Babel service provides a loop-avoiding distance-vector routing
protocol for IPv6 and IPv4 with fast convergence properties.
'''
_name = 'Babel'
_daemonname = 'babeld'
#_startup = ('sh quaggaboot.sh babeld',)
#_shutdown = ('killall babeld', )
_configs = ('/etc/quagga/babeld.conf',)
_validate = ('pidof babeld', )
_ipv6_routing = True
#_starttime = 10
@classmethod
def generatequaggaconfig(cls, node):
cfg += 'log file /tmp/quagga-babel-%s.log\n' % node.name
cfg = 'router babel\n'
for ifc in node.netifs():
if hasattr(ifc, 'control') and ifc.control == True:
continue
cfg += ' network %s\n' % ifc.name
cfg += ' redistribute static\n redistribute connected\n'
return cfg
@classmethod
def generatequaggaifcconfig(cls, node, ifc):
type = 'wired'
if ifc.net and ifc.net.linktype == coreapi.CORE_LINK_WIRELESS:
return ' babel wireless\n no babel split-horizon\n'
else:
return ' babel wired\n babel split-horizon\n'
addservice(Babel)
class ISIS(QuaggaService):
''' The user generated service isisd provides a ISIS!
'''
_name = 'ISIS'
_daemonname = 'isisd'
#_startup = ('sh quaggaboot.sh isisd',)
#_shutdown = ('killall isisd', )
_configs = ('/etc/quagga/isisd.conf',)
_validate = ('pidof isisd', )
_ipv4_routing = True
_ipv6_routing = True
#_starttime = 10
@classmethod
def generatequaggaifcconfig(cls, node, ifc):
added_ifc = False
if not node.enable_ipv4 and not node.enable_ipv6:
return ''
# do not include control interfaces
if hasattr(ifc, 'control') and ifc.control == True:
return ''
cfg = ''
# find any link on which two equal netid's (e.g., AS numbers) are
# present and on which two routers are present.
# then configure an isis-session on this interface
# on all other interfaces, disable isis
for idx, net_netif in list(ifc.net._netif.items()):
# only add each interface once
if added_ifc:
break
# skip our own interface
if ifc == net_netif:
continue
# skip control interface
if hasattr(ifc, 'control') and ifc.control == True:
continue
# found the same AS, enable IGP/ISIS
if not added_ifc:
if node.enable_ipv4:
cfg += ' ip router isis 1\n'
if node.enable_ipv6:
cfg += ' ipv6 router isis 1\n'
if node.netid == net_netif.node.netid:
# other end of link is not router. don't send ISIS hellos
if not service_flags.Router in net_netif.node.services:
cfg += ' isis passive\n'
else:
cfg += ' isis circuit-type level-2-only\n'
# if this interface is connected via a point-to-point-link,
# set isis network point-to-point.
# if this directive is not set, isis will speak mode lan
if isinstance(ifc.net, nodes.PtpNet):
cfg += ' isis network point-to-point\n'
elif service_flags.Router in net_netif.node.services:
cfg += ' isis passive\n'
cfg += '!\n'
# only add each interface once
added_ifc = True
return cfg
@classmethod
def generatequaggaloconfig(cls, node):
if not node.enable_ipv4 and not node.enable_ipv6:
return ''
cfg = ''
cfg += 'interface lo\n'
if node.enable_ipv4:
cfg += ' ip router isis 1\n'
if node.enable_ipv6:
cfg += ' ipv6 router isis 1\n'
cfg += ' isis passive\n'
cfg += ' isis metric 0\n'
cfg += '!\n'
return cfg
@classmethod
def generatequaggaconfig(cls, node):
cfg = ''
cfg += cls.generatequaggaloconfig(node)
for ifc in node.netifs():
# do not ever include control interfaces in anything
if hasattr(ifc, 'control') and ifc.control == True:
continue
tmpcfg = 'interface %s\n' % ifc.name
cfgv4 = ''
cfgv6 = ''
cfgvall = ''
want_ipv4 = False
want_ipv6 = False
ifccfg = cls.generatequaggaifcconfig(node, ifc)
if cls._ipv4_routing and node.enable_ipv4:
want_ipv4 = True
if cls._ipv6_routing and node.enable_ipv6:
want_ipv6 = True
if want_ipv4 and not want_ipv6:
cfgv4 += ifccfg
elif not want_ipv4 and want_ipv6:
cfgv6 += ifccfg
elif want_ipv4 and want_ipv6:
cfgvall += ifccfg
if want_ipv4 and not want_ipv6:
ipv4list = [x for x in ifc.addrlist if isIPv4Address(x)]
tmpcfg += cfgv4
elif not want_ipv4 and want_ipv6:
ipv6list = [x for x in ifc.addrlist if isIPv6Address(x)]
tmpcfg += cfgv6
elif want_ipv4 and want_ipv6:
tmpcfg += cfgv4
tmpcfg += cfgv6
tmpcfg += cfgvall
tmpcfg += '!\n'
if want_ipv4 or want_ipv6:
cfg += tmpcfg
cfg += '! ISIS configuration\n'
if node.enable_ipv4 or node.enable_ipv6:
cfg += 'log file /tmp/quagga-isis-%s.log\n' % node.name
cfg += 'router isis 1\n'
cfg += ' net %s\n' % cls.get_ISIS_ID(cls.routerid(node), str(node.netid))
cfg += ' metric-style wide\n'
cfg += '!\n'
return cfg
@staticmethod
def get_ISIS_ID(ipstr, netid):
''' calculates and returns an ISIS-ID based on the supplied IP addr '''
# isis-is is 12 characters long
# it has the format: 49.nnnn.aaaa.bbbb.cccc.00
# where nnnn == netid (i.e., same on all routers)
# abc == routerid (i.e., unique among all routers)
#hexip = hex(int(IPv4Addr(ipstr).addr))[2:]
#if len(hexip) < 8:
# hexip = '0%s' % hexip
#netid = str(netid)
#isisnetidlist = [ '0' for i in range(4 - len(netid)) ]
#isisnetidlist.append(netid)
#isisnetid = ''.join(isisnetidlist)
# 49.1000.
splitted = ''.join(['%03d' % int(e) for e in ipstr.split('.')])
isisid = '49.1000.%s.%s.%s.00' % (splitted[:4], splitted[4:8], splitted[8:])
print('[DEBUG] isis id: %s' % isisid)
return isisid
addservice(ISIS)
class Vtysh(CoreService):
''' Simple service to run vtysh -b (boot) after all Quagga daemons have
started.
'''
_name = 'vtysh'
_group = 'Quagga'
_startindex = 45
#_startup = ('sh quaggaboot.sh vtysh',)
_shutdown = ()
#_starttime = 30
_configs = ('/etc/quagga/vtysh.conf',)
@classmethod
def generateconfig(cls, node, filename, services):
return ''
addservice(Vtysh)
| bsd-3-clause | 5,469,669,532,570,034,000 | 32.298611 | 90 | 0.545836 | false | 3.837316 | true | false | false |
wdzhou/mantid | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/IndirectResolution.py | 3 | 6199 | #pylint: disable=no-init
from __future__ import (absolute_import, division, print_function)
from mantid.simpleapi import *
from mantid.api import *
from mantid.kernel import *
#pylint: disable=too-many-instance-attributes
class IndirectResolution(DataProcessorAlgorithm):
_input_files = None
_out_ws = None
_instrument = None
_analyser = None
_reflection = None
_detector_range = None
_background = None
_rebin_string = None
_scale_factor = None
_load_logs = None
def category(self):
return 'Workflow\\Inelastic;Inelastic\\Indirect'
def summary(self):
return 'Creates a resolution workspace for an indirect inelastic instrument.'
def PyInit(self):
self.declareProperty(StringArrayProperty(name='InputFiles'),
doc='Comma seperated list if input files')
self.declareProperty(name='Instrument', defaultValue='',
validator=StringListValidator(['IRIS', 'OSIRIS', 'TOSCA']),
doc='Instrument used during run.')
self.declareProperty(name='Analyser', defaultValue='',
validator=StringListValidator(['graphite', 'mica', 'fmica']),
doc='Analyser used during run.')
self.declareProperty(name='Reflection', defaultValue='',
validator=StringListValidator(['002', '004', '006']),
doc='Reflection used during run.')
self.declareProperty(IntArrayProperty(name='DetectorRange', values=[0, 1]),
doc='Range of detetcors to use in resolution calculation.')
self.declareProperty(FloatArrayProperty(name='BackgroundRange', values=[0.0, 0.0]),
doc='Energy range to use as background.')
self.declareProperty(name='RebinParam', defaultValue='',
doc='Rebinning parameters (min,width,max)')
self.declareProperty(name='ScaleFactor', defaultValue=1.0,
doc='Factor to scale resolution curve by')
self.declareProperty(name = "LoadLogFiles", defaultValue=True,
doc='Option to load log files')
self.declareProperty(WorkspaceProperty('OutputWorkspace', '',
direction=Direction.Output),
doc='Output resolution workspace.')
def PyExec(self):
self._setup()
iet_alg = self.createChildAlgorithm(name='ISISIndirectEnergyTransfer', startProgress=0.0,
endProgress=0.7, enableLogging=True)
iet_alg.setProperty('Instrument', self._instrument)
iet_alg.setProperty('Analyser', self._analyser)
iet_alg.setProperty('Reflection', self._reflection)
iet_alg.setProperty('GroupingMethod', 'All')
iet_alg.setProperty('SumFiles', True)
iet_alg.setProperty('InputFiles', self._input_files)
iet_alg.setProperty('SpectraRange', self._detector_range)
iet_alg.setProperty('LoadLogFiles', self._load_logs)
iet_alg.execute()
group_ws = iet_alg.getProperty('OutputWorkspace').value
icon_ws = group_ws.getItem(0).name()
workflow_prog = Progress(self, start=0.7, end=0.9, nreports=4)
if self._scale_factor != 1.0:
workflow_prog.report('Scaling Workspace')
Scale(InputWorkspace=icon_ws,
OutputWorkspace=icon_ws,
Factor=self._scale_factor)
workflow_prog.report('Calculating flat background')
CalculateFlatBackground(InputWorkspace=icon_ws,
OutputWorkspace=self._out_ws,
StartX=self._background[0],
EndX=self._background[1],
Mode='Mean',
OutputMode='Subtract Background')
workflow_prog.report('Rebinning Workspace')
Rebin(InputWorkspace=self._out_ws,
OutputWorkspace=self._out_ws,
Params=self._rebin_string)
workflow_prog.report('Completing Post Processing')
self._post_process()
self.setProperty('OutputWorkspace', self._out_ws)
def _setup(self):
"""
Gets algorithm properties.
"""
self._input_files = self.getProperty('InputFiles').value
self._out_ws = self.getPropertyValue('OutputWorkspace')
self._instrument = self.getProperty('Instrument').value
self._analyser = self.getProperty('Analyser').value
self._reflection = self.getProperty('Reflection').value
self._detector_range = self.getProperty('DetectorRange').value
self._background = self.getProperty('BackgroundRange').value
self._rebin_string = self.getProperty('RebinParam').value
self._scale_factor = self.getProperty('ScaleFactor').value
self._load_logs = self.getProperty('LoadLogFiles').value
def _post_process(self):
"""
Handles adding logs, saving and plotting.
"""
sample_logs = [('res_back_start', self._background[0]),
('res_back_end', self._background[1])]
if self._scale_factor != 1.0:
sample_logs.append(('res_scale_factor', self._scale_factor))
rebin_params = self._rebin_string.split(',')
if len(rebin_params) == 3:
sample_logs.append(('rebin_low', rebin_params[0]))
sample_logs.append(('rebin_width', rebin_params[1]))
sample_logs.append(('rebin_high', rebin_params[2]))
log_alg = self.createChildAlgorithm(name='AddSampleLogMultiple', startProgress=0.9,
endProgress=1.0, enableLogging=True)
log_alg.setProperty('Workspace', self._out_ws)
log_alg.setProperty('LogNames', [log[0] for log in sample_logs])
log_alg.setProperty('LogValues',[log[1] for log in sample_logs])
self.setProperty('OutputWorkspace', self._out_ws)
log_alg.execute()
AlgorithmFactory.subscribe(IndirectResolution)
| gpl-3.0 | 4,928,819,682,498,818,000 | 40.885135 | 97 | 0.593967 | false | 4.399574 | false | false | false |
testvidya11/ejrf | questionnaire/migrations/0010_auto__add_comment__add_questionoption__add_answer__add_answergroup__ad.py | 1 | 22456 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Comment'
db.create_table(u'questionnaire_comment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('text', self.gf('django.db.models.fields.CharField')(max_length=100)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('questionnaire', ['Comment'])
# Adding M2M table for field answer on 'Comment'
db.create_table(u'questionnaire_comment_answer', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('comment', models.ForeignKey(orm['questionnaire.comment'], null=False)),
('answer', models.ForeignKey(orm['questionnaire.answer'], null=False))
))
db.create_unique(u'questionnaire_comment_answer', ['comment_id', 'answer_id'])
# Adding model 'QuestionOption'
db.create_table(u'questionnaire_questionoption', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('text', self.gf('django.db.models.fields.CharField')(max_length=100)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['questionnaire.Question'])),
))
db.send_create_signal('questionnaire', ['QuestionOption'])
# Adding model 'Answer'
db.create_table(u'questionnaire_answer', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['questionnaire.Question'], null=True)),
('country', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['questionnaire.Country'], null=True)),
('status', self.gf('django.db.models.fields.CharField')(default='Draft', max_length=15)),
('version', self.gf('django.db.models.fields.IntegerField')(default=1, null=True)),
))
db.send_create_signal('questionnaire', ['Answer'])
# Adding model 'AnswerGroup'
db.create_table(u'questionnaire_answergroup', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('answer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['questionnaire.Answer'], null=True)),
('grouped_question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['questionnaire.QuestionGroup'], null=True)),
('row', self.gf('django.db.models.fields.CharField')(max_length=6)),
))
db.send_create_signal('questionnaire', ['AnswerGroup'])
# Adding model 'TextAnswer'
db.create_table(u'questionnaire_textanswer', (
(u'answer_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['questionnaire.Answer'], unique=True, primary_key=True)),
('response', self.gf('django.db.models.fields.CharField')(max_length=100, null=True)),
))
db.send_create_signal('questionnaire', ['TextAnswer'])
# Adding model 'MultiChoiceAnswer'
db.create_table(u'questionnaire_multichoiceanswer', (
(u'answer_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['questionnaire.Answer'], unique=True, primary_key=True)),
('response', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['questionnaire.QuestionOption'])),
))
db.send_create_signal('questionnaire', ['MultiChoiceAnswer'])
# Adding model 'QuestionGroup'
db.create_table(u'questionnaire_questiongroup', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('subsection', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['questionnaire.SubSection'])),
('order', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('questionnaire', ['QuestionGroup'])
# Adding M2M table for field question on 'QuestionGroup'
db.create_table(u'questionnaire_questiongroup_question', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('questiongroup', models.ForeignKey(orm['questionnaire.questiongroup'], null=False)),
('question', models.ForeignKey(orm['questionnaire.question'], null=False))
))
db.create_unique(u'questionnaire_questiongroup_question', ['questiongroup_id', 'question_id'])
# Adding model 'NumericalAnswer'
db.create_table(u'questionnaire_numericalanswer', (
(u'answer_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['questionnaire.Answer'], unique=True, primary_key=True)),
('response', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2)),
))
db.send_create_signal('questionnaire', ['NumericalAnswer'])
# Adding model 'DateAnswer'
db.create_table(u'questionnaire_dateanswer', (
(u'answer_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['questionnaire.Answer'], unique=True, primary_key=True)),
('response', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal('questionnaire', ['DateAnswer'])
# Changing field 'Question.answer_type'
db.alter_column(u'questionnaire_question', 'answer_type', self.gf('django.db.models.fields.CharField')(max_length=20))
# Adding field 'Section.name'
db.add_column(u'questionnaire_section', 'name',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'Comment'
db.delete_table(u'questionnaire_comment')
# Removing M2M table for field answer on 'Comment'
db.delete_table('questionnaire_comment_answer')
# Deleting model 'QuestionOption'
db.delete_table(u'questionnaire_questionoption')
# Deleting model 'Answer'
db.delete_table(u'questionnaire_answer')
# Deleting model 'AnswerGroup'
db.delete_table(u'questionnaire_answergroup')
# Deleting model 'TextAnswer'
db.delete_table(u'questionnaire_textanswer')
# Deleting model 'MultiChoiceAnswer'
db.delete_table(u'questionnaire_multichoiceanswer')
# Deleting model 'QuestionGroup'
db.delete_table(u'questionnaire_questiongroup')
# Removing M2M table for field question on 'QuestionGroup'
db.delete_table('questionnaire_questiongroup_question')
# Deleting model 'NumericalAnswer'
db.delete_table(u'questionnaire_numericalanswer')
# Deleting model 'DateAnswer'
db.delete_table(u'questionnaire_dateanswer')
# Changing field 'Question.answer_type'
db.alter_column(u'questionnaire_question', 'answer_type', self.gf('django.db.models.fields.CharField')(max_length=10))
# Deleting field 'Section.name'
db.delete_column(u'questionnaire_section', 'name')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.answer': {
'Meta': {'object_name': 'Answer'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Question']", 'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Draft'", 'max_length': '15'}),
'version': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'})
},
'questionnaire.answergroup': {
'Meta': {'object_name': 'AnswerGroup'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Answer']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'grouped_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.QuestionGroup']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'row': ('django.db.models.fields.CharField', [], {'max_length': '6'})
},
'questionnaire.comment': {
'Meta': {'object_name': 'Comment'},
'answer': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'comments'", 'symmetrical': 'False', 'to': "orm['questionnaire.Answer']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'questionnaire.country': {
'Meta': {'object_name': 'Country'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'countries'", 'null': 'True', 'to': "orm['questionnaire.Region']"})
},
'questionnaire.dateanswer': {
'Meta': {'object_name': 'DateAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.DateField', [], {})
},
'questionnaire.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.QuestionOption']"})
},
'questionnaire.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '2'})
},
'questionnaire.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'questionnaire.question': {
'Meta': {'object_name': 'Question'},
'UID': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '6'}),
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'questionnaire.questiongroup': {
'Meta': {'object_name': 'QuestionGroup'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['questionnaire.Question']", 'symmetrical': 'False'}),
'subsection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.SubSection']"})
},
'questionnaire.questionnaire': {
'Meta': {'object_name': 'Questionnaire'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'questionnaire.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'questionnaire.region': {
'Meta': {'object_name': 'Region'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'regions'", 'null': 'True', 'to': "orm['questionnaire.Organization']"})
},
'questionnaire.section': {
'Meta': {'object_name': 'Section'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['questionnaire.Questionnaire']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'questionnaire.subsection': {
'Meta': {'object_name': 'SubSection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_sections'", 'to': "orm['questionnaire.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'questionnaire.textanswer': {
'Meta': {'object_name': 'TextAnswer', '_ormbases': ['questionnaire.Answer']},
u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
}
}
complete_apps = ['questionnaire'] | bsd-3-clause | -3,063,533,105,820,729,300 | 68.311728 | 188 | 0.594808 | false | 3.758326 | false | false | false |
bukun/TorCMS | torcms/handlers/comment_hander.py | 1 | 1405 | import config
from config import CMS_CFG, router_post
from torcms.core.base_handler import BaseHandler
from torcms.model.comment_model import MComment
class CommentHandler(BaseHandler):
def initialize(self):
super().initialize()
def get(self, *args, **kwargs):
url_str = args[0]
url_arr = self.parse_url(url_str)
if url_str == '' or url_str == 'list':
self.list(url_str)
elif len(url_arr) == 2:
self.list(url_arr[0], cur_p=url_arr[1])
def list(self, list, **kwargs):
'''
List the replies.
'''
def get_pager_idx():
'''
Get the pager index.
'''
cur_p = kwargs.get('cur_p')
the_num = int(cur_p) if cur_p else 1
the_num = 1 if the_num < 1 else the_num
return the_num
current_page_num = get_pager_idx()
num_of_cat = MComment.count_of_certain()
page_num = int(num_of_cat / CMS_CFG['list_num']) + 1
kwd = {
'current_page': current_page_num,
'count': num_of_cat,
}
self.render('static_pages/comment/index.html',
postinfo=MComment.query_pager_by_comment(current_page_num),
userinfo=self.userinfo,
cfg=CMS_CFG,
kwd=kwd,
router_post=router_post)
| mit | -776,017,749,374,172,400 | 30.222222 | 79 | 0.520285 | false | 3.59335 | false | false | false |
dednal/chromium.src | mojo/public/tools/bindings/pylib/mojom/parse/parser.py | 9 | 12696 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates a syntax tree from a Mojo IDL file."""
import imp
import os.path
import sys
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
imp.find_module("ply")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("public"), "public/third_party"))
from ply import lex
from ply import yacc
from ..error import Error
from . import ast
from .lexer import Lexer
_MAX_ORDINAL_VALUE = 0xffffffff
_MAX_ARRAY_SIZE = 0xffffffff
class ParseError(Error):
"""Class for errors from the parser."""
def __init__(self, filename, message, lineno=None, snippet=None):
Error.__init__(self, filename, message, lineno=lineno,
addenda=([snippet] if snippet else None))
# We have methods which look like they could be functions:
# pylint: disable=R0201
class Parser(object):
def __init__(self, lexer, source, filename):
self.tokens = lexer.tokens
self.source = source
self.filename = filename
# Names of functions
#
# In general, we name functions after the left-hand-side of the rule(s) that
# they handle. E.g., |p_foo_bar| for a rule |foo_bar : ...|.
#
# There may be multiple functions handling rules for the same left-hand-side;
# then we name the functions |p_foo_bar_N| (for left-hand-side |foo_bar|),
# where N is a number (numbered starting from 1). Note that using multiple
# functions is actually more efficient than having single functions handle
# multiple rules (and, e.g., distinguishing them by examining |len(p)|).
#
# It's also possible to have a function handling multiple rules with different
# left-hand-sides. We do not do this.
#
# See http://www.dabeaz.com/ply/ply.html#ply_nn25 for more details.
# TODO(vtl): Get rid of the braces in the module "statement". (Consider
# renaming "module" -> "package".) Then we'll be able to have a single rule
# for root (by making module "optional").
def p_root_1(self, p):
"""root : """
p[0] = ast.Mojom(None, ast.ImportList(), [])
def p_root_2(self, p):
"""root : root module"""
if p[1].module is not None:
raise ParseError(self.filename,
"Multiple \"module\" statements not allowed:",
p[2].lineno, snippet=self._GetSnippet(p[2].lineno))
if p[1].import_list.items or p[1].definition_list:
raise ParseError(
self.filename,
"\"module\" statements must precede imports and definitions:",
p[2].lineno, snippet=self._GetSnippet(p[2].lineno))
p[0] = p[1]
p[0].module = p[2]
def p_root_3(self, p):
"""root : root import"""
if p[1].definition_list:
raise ParseError(self.filename,
"\"import\" statements must precede definitions:",
p[2].lineno, snippet=self._GetSnippet(p[2].lineno))
p[0] = p[1]
p[0].import_list.Append(p[2])
def p_root_4(self, p):
"""root : root definition"""
p[0] = p[1]
p[0].definition_list.append(p[2])
def p_import(self, p):
"""import : IMPORT STRING_LITERAL SEMI"""
# 'eval' the literal to strip the quotes.
# TODO(vtl): This eval is dubious. We should unquote/unescape ourselves.
p[0] = ast.Import(eval(p[2]), filename=self.filename, lineno=p.lineno(2))
def p_module(self, p):
"""module : attribute_section MODULE identifier_wrapped SEMI"""
p[0] = ast.Module(p[3], p[1], filename=self.filename, lineno=p.lineno(2))
def p_definition(self, p):
"""definition : struct
| union
| interface
| enum
| const"""
p[0] = p[1]
def p_attribute_section_1(self, p):
"""attribute_section : """
p[0] = None
def p_attribute_section_2(self, p):
"""attribute_section : LBRACKET attribute_list RBRACKET"""
p[0] = p[2]
def p_attribute_list_1(self, p):
"""attribute_list : """
p[0] = ast.AttributeList()
def p_attribute_list_2(self, p):
"""attribute_list : nonempty_attribute_list"""
p[0] = p[1]
def p_nonempty_attribute_list_1(self, p):
"""nonempty_attribute_list : attribute"""
p[0] = ast.AttributeList(p[1])
def p_nonempty_attribute_list_2(self, p):
"""nonempty_attribute_list : nonempty_attribute_list COMMA attribute"""
p[0] = p[1]
p[0].Append(p[3])
def p_attribute(self, p):
"""attribute : NAME EQUALS evaled_literal
| NAME EQUALS NAME"""
p[0] = ast.Attribute(p[1], p[3], filename=self.filename, lineno=p.lineno(1))
def p_evaled_literal(self, p):
"""evaled_literal : literal"""
# 'eval' the literal to strip the quotes.
p[0] = eval(p[1])
def p_struct(self, p):
"""struct : attribute_section STRUCT NAME LBRACE struct_body RBRACE SEMI"""
p[0] = ast.Struct(p[3], p[1], p[5])
def p_struct_body_1(self, p):
"""struct_body : """
p[0] = ast.StructBody()
def p_struct_body_2(self, p):
"""struct_body : struct_body const
| struct_body enum
| struct_body struct_field"""
p[0] = p[1]
p[0].Append(p[2])
def p_struct_field(self, p):
"""struct_field : typename NAME ordinal default SEMI"""
p[0] = ast.StructField(p[2], p[3], p[1], p[4])
def p_union(self, p):
"""union : UNION NAME LBRACE union_body RBRACE SEMI"""
p[0] = ast.Union(p[2], p[4])
def p_union_body_1(self, p):
"""union_body : """
p[0] = ast.UnionBody()
def p_union_body_2(self, p):
"""union_body : union_body union_field"""
p[0] = p[1]
p[1].Append(p[2])
def p_union_field(self, p):
"""union_field : typename NAME ordinal SEMI"""
p[0] = ast.UnionField(p[2], p[3], p[1])
def p_default_1(self, p):
"""default : """
p[0] = None
def p_default_2(self, p):
"""default : EQUALS constant"""
p[0] = p[2]
def p_interface(self, p):
"""interface : attribute_section INTERFACE NAME LBRACE interface_body \
RBRACE SEMI"""
p[0] = ast.Interface(p[3], p[1], p[5])
def p_interface_body_1(self, p):
"""interface_body : """
p[0] = ast.InterfaceBody()
def p_interface_body_2(self, p):
"""interface_body : interface_body const
| interface_body enum
| interface_body method"""
p[0] = p[1]
p[0].Append(p[2])
def p_response_1(self, p):
"""response : """
p[0] = None
def p_response_2(self, p):
"""response : RESPONSE LPAREN parameter_list RPAREN"""
p[0] = p[3]
def p_method(self, p):
"""method : NAME ordinal LPAREN parameter_list RPAREN response SEMI"""
p[0] = ast.Method(p[1], p[2], p[4], p[6])
def p_parameter_list_1(self, p):
"""parameter_list : """
p[0] = ast.ParameterList()
def p_parameter_list_2(self, p):
"""parameter_list : nonempty_parameter_list"""
p[0] = p[1]
def p_nonempty_parameter_list_1(self, p):
"""nonempty_parameter_list : parameter"""
p[0] = ast.ParameterList(p[1])
def p_nonempty_parameter_list_2(self, p):
"""nonempty_parameter_list : nonempty_parameter_list COMMA parameter"""
p[0] = p[1]
p[0].Append(p[3])
def p_parameter(self, p):
"""parameter : typename NAME ordinal"""
p[0] = ast.Parameter(p[2], p[3], p[1],
filename=self.filename, lineno=p.lineno(2))
def p_typename(self, p):
"""typename : nonnullable_typename QSTN
| nonnullable_typename"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] + "?"
def p_nonnullable_typename(self, p):
"""nonnullable_typename : basictypename
| array
| fixed_array
| associative_array
| interfacerequest"""
p[0] = p[1]
def p_basictypename(self, p):
"""basictypename : identifier
| handletype"""
p[0] = p[1]
def p_handletype(self, p):
"""handletype : HANDLE
| HANDLE LANGLE NAME RANGLE"""
if len(p) == 2:
p[0] = p[1]
else:
if p[3] not in ('data_pipe_consumer',
'data_pipe_producer',
'message_pipe',
'shared_buffer'):
# Note: We don't enable tracking of line numbers for everything, so we
# can't use |p.lineno(3)|.
raise ParseError(self.filename, "Invalid handle type %r:" % p[3],
lineno=p.lineno(1),
snippet=self._GetSnippet(p.lineno(1)))
p[0] = "handle<" + p[3] + ">"
def p_array(self, p):
"""array : ARRAY LANGLE typename RANGLE"""
p[0] = p[3] + "[]"
def p_fixed_array(self, p):
"""fixed_array : ARRAY LANGLE typename COMMA INT_CONST_DEC RANGLE"""
value = int(p[5])
if value == 0 or value > _MAX_ARRAY_SIZE:
raise ParseError(self.filename, "Fixed array size %d invalid:" % value,
lineno=p.lineno(5),
snippet=self._GetSnippet(p.lineno(5)))
p[0] = p[3] + "[" + p[5] + "]"
def p_associative_array(self, p):
"""associative_array : MAP LANGLE identifier COMMA typename RANGLE"""
p[0] = p[5] + "{" + p[3] + "}"
def p_interfacerequest(self, p):
"""interfacerequest : identifier AMP"""
p[0] = p[1] + "&"
def p_ordinal_1(self, p):
"""ordinal : """
p[0] = None
def p_ordinal_2(self, p):
"""ordinal : ORDINAL"""
value = int(p[1][1:])
if value > _MAX_ORDINAL_VALUE:
raise ParseError(self.filename, "Ordinal value %d too large:" % value,
lineno=p.lineno(1),
snippet=self._GetSnippet(p.lineno(1)))
p[0] = ast.Ordinal(value, filename=self.filename, lineno=p.lineno(1))
def p_enum(self, p):
"""enum : ENUM NAME LBRACE nonempty_enum_value_list RBRACE SEMI
| ENUM NAME LBRACE nonempty_enum_value_list COMMA RBRACE SEMI"""
p[0] = ast.Enum(p[2], p[4], filename=self.filename, lineno=p.lineno(1))
def p_nonempty_enum_value_list_1(self, p):
"""nonempty_enum_value_list : enum_value"""
p[0] = ast.EnumValueList(p[1])
def p_nonempty_enum_value_list_2(self, p):
"""nonempty_enum_value_list : nonempty_enum_value_list COMMA enum_value"""
p[0] = p[1]
p[0].Append(p[3])
def p_enum_value(self, p):
"""enum_value : NAME
| NAME EQUALS int
| NAME EQUALS identifier_wrapped"""
p[0] = ast.EnumValue(p[1], p[3] if len(p) == 4 else None,
filename=self.filename, lineno=p.lineno(1))
def p_const(self, p):
"""const : CONST typename NAME EQUALS constant SEMI"""
p[0] = ast.Const(p[3], p[2], p[5])
def p_constant(self, p):
"""constant : literal
| identifier_wrapped"""
p[0] = p[1]
def p_identifier_wrapped(self, p):
"""identifier_wrapped : identifier"""
p[0] = ('IDENTIFIER', p[1])
# TODO(vtl): Make this produce a "wrapped" identifier (probably as an
# |ast.Identifier|, to be added) and get rid of identifier_wrapped.
def p_identifier(self, p):
"""identifier : NAME
| NAME DOT identifier"""
p[0] = ''.join(p[1:])
def p_literal(self, p):
"""literal : int
| float
| TRUE
| FALSE
| DEFAULT
| STRING_LITERAL"""
p[0] = p[1]
def p_int(self, p):
"""int : int_const
| PLUS int_const
| MINUS int_const"""
p[0] = ''.join(p[1:])
def p_int_const(self, p):
"""int_const : INT_CONST_DEC
| INT_CONST_HEX"""
p[0] = p[1]
def p_float(self, p):
"""float : FLOAT_CONST
| PLUS FLOAT_CONST
| MINUS FLOAT_CONST"""
p[0] = ''.join(p[1:])
def p_error(self, e):
if e is None:
# Unexpected EOF.
# TODO(vtl): Can we figure out what's missing?
raise ParseError(self.filename, "Unexpected end of file")
raise ParseError(self.filename, "Unexpected %r:" % e.value, lineno=e.lineno,
snippet=self._GetSnippet(e.lineno))
def _GetSnippet(self, lineno):
return self.source.split('\n')[lineno - 1]
def Parse(source, filename):
lexer = Lexer(filename)
parser = Parser(lexer, source, filename)
lex.lex(object=lexer)
yacc.yacc(module=parser, debug=0, write_tables=0)
tree = yacc.parse(source)
return tree
| bsd-3-clause | 1,105,459,608,062,332,300 | 29.815534 | 80 | 0.575142 | false | 3.260401 | false | false | false |
jas02/easybuild-easyblocks | easybuild/easyblocks/m/mothur.py | 12 | 3988 | ##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Mothur, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
class EB_Mothur(ConfigureMake):
"""Support for building and installing Mothur."""
def guess_start_dir(self):
"""Set correct start directory."""
# Mothur zip files tend to contain multiple directories next to the actual source dir (e.g. __MACOSX),
# so the default start directory guess is most likely incorrect
mothur_dirs = glob.glob(os.path.join(self.builddir, 'Mothur.*'))
if len(mothur_dirs) == 1:
self.cfg['start_dir'] = mothur_dirs[0]
elif len(os.listdir(self.builddir)) > 1:
# we only have an issue if the default guessing approach will not work
raise EasyBuildError("Failed to guess start directory from %s", mothur_dirs)
super(EB_Mothur, self).guess_start_dir()
def configure_step(self, cmd_prefix=''):
"""Configure Mothur build by setting make options."""
# Fortran compiler and options
self.cfg.update('buildopts', 'FORTAN_COMPILER="%s" FORTRAN_FLAGS="%s"' % (os.getenv('F77'), os.getenv('FFLAGS')))
# enable 64-bit build
if not self.toolchain.options['32bit']:
self.cfg.update('buildopts', '64BIT_VERSION=yes')
# enable readline support
if get_software_root('libreadline') and get_software_root('ncurses'):
self.cfg.update('buildopts', 'USEREADLINE=yes')
# enable MPI support
if self.toolchain.options.get('usempi', None):
self.cfg.update('buildopts', 'USEMPI=yes CXX="%s"' % os.getenv('MPICXX'))
self.cfg.update('prebuildopts', 'CXXFLAGS="$CXXFLAGS -DMPICH_IGNORE_CXX_SEEK"')
# enable compression
if get_software_root('bzip2') or get_software_root('gzip'):
self.cfg.update('buildopts', 'USE_COMPRESSION=yes')
def install_step(self):
"""
Install by copying files to install dir
"""
srcdir = os.path.join(self.builddir, self.cfg['start_dir'])
destdir = os.path.join(self.installdir, 'bin')
srcfile = None
try:
os.makedirs(destdir)
for filename in ['mothur', 'uchime']:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
except OSError, err:
raise EasyBuildError("Copying %s to installation dir %s failed: %s", srcfile, destdir, err)
def sanity_check_step(self):
"""Custom sanity check for Mothur."""
custom_paths = {
'files': ["bin/mothur"],
'dirs': [],
}
super(EB_Mothur, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 | -3,498,293,905,837,848,000 | 40.541667 | 121 | 0.661484 | false | 3.638686 | false | false | false |
libnano/libnano | libnano/_virtualhelix/virtualhelix.py | 1 | 11546 | import sys
from typing import (
List,
Tuple,
Union,
NamedTuple
)
import __init__
from libnano.seqrecord.seqrecordbase import (
AlphaEnum,
ALPHABETS
)
from libnano.seqstr import (
reverseComplement,
complement,
reverse
)
from enum import IntEnum
from strand import Strand
from oligo import Oligo
from dseq import DSeq
NEWLINE_STR: str = '\r\n' if sys.platform == 'win32' else '\n'
STR_OR_STRAND_T = Union[str, Strand]
def str2Oligo(x: str) -> Tuple[bool, Oligo, Strand]:
if isinstance(x, Strand):
return False, x.oligo, x
else:
oligo = Oligo(x)
return True, oligo, oligo.strand5p
# end def
StrandArray = NamedTuple('StrandArray', [
('strands', List[Strand]),
('idx_offsets', List[int]),
('seq', List[str]) # only a List of one element for assigment since strings are immutable in Python
]
)
class VHDirEnum(IntEnum):
Forward: int = 0
Reverse: int = 1
@staticmethod
def check(x: int):
if x not in (0, 1):
raise ValueError('{} not in value in VHDirEnum'.format(x))
class VirtualHelix:
FWD: int = 0
REV: int = 1
def __init__(self,
fwd_strands: List[Strand],
fwd_idx_offsets: List[int],
rev_strands: List[Strand],
rev_idx_offsets: List[int],
alphabet: int = AlphaEnum.DNA):
'''
Args:
fwd_strands: List of strands in the 5' to 3' direction
rev_strands: List of strands in the 3' to 5' direction
fwd_idx_offsets: List of integer offsets from the 5' end of the
forward strand
rev_idx_offsets: List of integer offsets from the 5' end of the
forward strand
alphabet: DNA or RNA
'''
self.strand_arrays: Tuple[StrandArray, ...] = (
StrandArray(fwd_strands, fwd_idx_offsets, ['']),
StrandArray(rev_strands, rev_idx_offsets, [''])
)
self.alphabet: int = alphabet
fwd_gaps: list = [] # track the free space in the VirtualHelix
fwd_endpoints: list = []
rev_gaps: list = [] # track the free space in the VirtualHelix
rev_endpoints: list = []
# end def
def oligos(self) -> Tuple[List[Oligo], List[Oligo]]:
strand_arrays: Tuple[StrandArray, ...] = self.strand_arrays
strands_f: List[Strand] = strand_arrays[0].strands
strands_r: List[Strand] = strand_arrays[0].strands
out = ([x.oligo for x in strands_f], [x.oligo for x in strands_r])
return out
@staticmethod
def reverseStrandArray(
strands: List[Strand],
idx_offset: List[int]) -> StrandArray:
strands_out: List[Strand] = strands[::-1]
seq_len: int = sum(len(x) for x in strands)
x0: int = idx_offsets[0] + len(strands[0])
for x1, strand1 in zip(idx_offsets[1:], strands[1:]):
check = x1 + len(strand1)
assert(check >= x0)
x0 = check
total_length: int = x0
gener = zip(idx_offsets[::-1], strands_out)
idx_offsets_out: List[int] = [total_length - (y + len(z) - 1) for y, z in gener]
return StrandArray(strands_out, idx_offsets_out, [''])
# end def
def get_seq(self,
dir_idx: int,
do_cache: bool = False) -> str:
strand_array: StrandArray = self.strand_arrays[dir_idx]
the_seq: str = strand_array.seq[0]
if the_seq:
return the_seq
else:
strand_array.strands
strand_seq_list: List[str] = []
idx_last: int = 0
for strand, idx in zip(strand_array.strands, strand_array.idx_offsets):
strand_seq_list.append(' '*(idx - idx_last))
seq = strand.seq
strand_seq_list.append(seq)
idx_last = idx + len(seq)
the_seq: str = ''.join(strand_seq_list)
if do_cache:
strand_array.seq[0] = the_seq
return the_seq
# end def
@property
def fwd_strands(self) -> List[Strand]:
return self.strand_arrays[self.FWD].strands
@property
def rev_strands(self) -> List[Strand]:
return reverse(self.strand_arrays[self.REV].strands)
def fwd_seq(self, do_cache=True):
return self.get_seq(self.FWD, do_cache=do_cache)
# end def
def rev_seq(self, do_cache=True):
return self.get_seq(self.REV, do_cache=do_cache)
# end def
def len_strands(self,
dir_idx: int) -> int:
'''Return the length of all the strands including the offsets
while checking to make sure strands do not overlap
'''
strand_array: StrandArray = self.strand_arrays[dir_idx]
strands: List[Strand] = strand_array.strands
idx_offsets: List[int] = strand_array.idx_offsets
seq_len: int = sum(len(x) for x in strands)
x0: int = idx_offsets[0] + len(strands[0])
for x1, strand1 in zip(idx_offsets[1:], strands[1:]):
check = x1 + len(strand1)
assert(check >= x0)
x0 = check
return x0
# end def
def len_fwd(self) -> int:
return self.len_strands(self.FWD)
def len_rev(self) -> int:
return self.len_strands(self.REV)
def __str__(self) -> str:
return '%s%s%s' % (self.fwd_seq(), NEWLINE_STR, reverse(self.rev_seq()))
# end def
def addForwardStrand(self, strand: Strand, offset: int):
pass
# end def
# def addSequence(self, seq: str) -> Oligo:
# pass
def breakStrand(self,
dir_idx: int,
strand: Strand,
idx: int) -> Tuple[Oligo, Oligo, Oligo]:
'''Break a Strand in two and create two new Oligos to assign the all of
the strands in the pre-existing Oligo to
Args:
dir_idx: is this on the forward [0] or reverse [1] direction of the
:class:`VirtualHelix`. Also use :enum:`VHDirEnum` to get these idxs
strand: :class:`Strand` object to break
idx: index to break the strand at in terms of it's sequence
Returns:
Two new :class:`Oligo` objects of form::
Oligo_5p, Oligo_3p
'''
VHDirEnum.check(dir_idx)
strand_array = self.strand_arrays[dir_idx]
vh_strands: List[Strand] = strand_array.strands
if strand not in vh_strands:
dir_name: str = VHDirEnum(dir_idx).name
err: str = "Strand {} not in the {} StrandArray of the VirtualHelix"
raise ValueError(err.format(strand, dir_name))
idx_offsets: List[int] = strand_array.idx_offsets
seq: str = strand.seq
oligo_old: Oligo = strand.oligo
# 1. Do the 5' portion of the break
oligo_break5p: Oligo = Oligo(seq[0:idx])
strand_break5p: Strand = oligo_break5p.strand5p
neighbor_5p: Strand = strand.strand5p
if neighbor_5p is not None: # update existing neighbor oligos
strand_break5p.strand5p = neighbor_5p
neighbor_5p.strand3p = strand_break5p
for seg in neighbor_5p.gen5p():
seg.oligo = oligo_break5p
# 2. Do the 3' portion of the break
oligo_break3p: Oligo = Oligo(seq[idx:])
strand_break3p: Strand = oligo_break3p.strand5p
neighbor_3p: Strand = strand.strand3p
if neighbor_3p is not None: # update existing neighbor oligos
strand_break3p.strand3p = neighbor_3p
neighbor_3p.strand5p = strand_break3p
for seg in neighbor_3p.gen3p():
seg.oligo = oligo_break3p
# 3. Update the strands
list_idx: int = vh_strands.index(strand)
offset_5p: int = idx_offsets[list_idx]
list_idx_plus_1: int = list_idx + 1
vh_strands.insert(list_idx_plus_1, strand_break3p)
vh_strands.insert(list_idx_plus_1, strand_break5p)
idx_offsets.insert(list_idx_plus_1, offset_5p + len(strand_break5p))
vh_strands.pop(list_idx) # pop out the original strand
return oligo_break5p, oligo_break3p
# end def
def __add__(self, b: 'VirtualHelix') -> 'VirtualHelix':
'''(1) Concatenates the forward strand with forward strand
and the reverse strand with the reverse strand and preserves order
(2) Realligns the two :class:`VirtualHelix` objects involved
Args:
b: a :class:`VirtualHelix` object
Returns:
a :class:`VirtualHelix` object
Raises:
ValueError, TypeError
'''
if isinstance(b, VirtualHelix):
type3, seq3 = self.three_prime_end()
type5, seq5 = b.five_prime_end()
if type3 == type5 and len(seq3) == len(seq5):
if seq3 != reverseComplement(seq5):
raise TypeError("Ends not complimentary")
fwd = self.fwd + b.fwd
rev = self.rev + b.rev
return VirtualHelix(fwd, rev, self.overhang)
else:
raise TypeError("Ends not compatible")
else:
raise ValueError("{} object not a DSeq".format(b))
# end def
def five_prime_end(self) -> Tuple[int, str]:
'''Return what kind of end is overhanging the 5' end of the
forward strand
'''
fwd_idx0: int = self.fwd_idx_offsets[0]
rev_idx0: int = self.rev_idx_offsets[-1]
res, val = five_prime_type(self.alignment, self.fwd, self.rev)
return res, val
# end def
def three_prime_end(self) -> Tuple[int, str]:
'''Return what kind of end is overhanging the 3' end of the forward
strand
'''
res, val = three_prime_type(self.alignment, self.fwd, self.rev)
return res, val
# end def
# end class
def DSeqVH( fwd: str,
rev: str = None,
overhang: int = None,
alphabet: int = AlphaEnum.DNA) -> VirtualHelix:
'''Helper function for creating :class:`VirtualHelix` in the style of
the :class:`DSeq` with strings
'''
dseq: DSeq = DSeq(fwd, rev, overhang, alphabet)
overhang: int = dseq.overhang
if overhang > 0:
fwd_idx_offsets = [overhang]
rev_idx_offsets = [0]
else:
fwd_idx_offsets = [0]
rev_idx_offsets = [overhang]
oligo_fwd = Oligo(fwd)
if rev is None:
rev = reverseComplement(fwd)
oligo_rev = Oligo(rev)
return VirtualHelix([oligo_fwd.strand5p],
fwd_idx_offsets,
[oligo_rev.strand5p],
rev_idx_offsets)
# end def
if __name__ == '__main__':
fwd = 'GGTCTCGAATTCAAA'
oligo_fwd = Oligo(fwd)
rev = 'TTTGAATTCGAGACC'
oligo_rev = Oligo(rev)
BsaI_vh = VirtualHelix( [oligo_fwd.strand5p], [0],
[oligo_rev.strand5p], [0])
print("1.\n%s" % BsaI_vh)
BsaI_vh = DSeqVH(fwd, rev, 0)
print("2.\n%s" % BsaI_vh)
print(BsaI_vh.fwd_strands)
BsaI_vh = DSeqVH(fwd)
print("3.\n%s" % BsaI_vh)
print("Da Oligos", BsaI_vh.oligos())
strand0 = BsaI_vh.fwd_strands[0]
print(strand0.oligo)
broken_oligos = BsaI_vh.breakStrand(dir_idx=0, strand=strand0, idx=4)
import pprint
pprint.pprint(broken_oligos)
print(BsaI_vh.len_fwd())
bonus_oligo = Oligo('ACGT')
try:
BsaI_vh.breakStrand(dir_idx=1, strand=bonus_oligo.strand5p, idx=2)
except ValueError:
print("Handled bad strand successfully")
| gpl-2.0 | 3,997,879,121,160,744,000 | 31.988571 | 104 | 0.576044 | false | 3.262504 | false | false | false |
utcoupe/coupe18 | ros_ws/src/memory_map/src/map_node.py | 1 | 1237 | #!/usr/bin/python
import rospy
import map_manager
import map_communication
from markers import MarkersPublisher
from occupancy import OccupancyGenerator
from ai_game_manager import StatusServices
class MapNode():
def __init__(self):
rospy.init_node("map", log_level=rospy.INFO)
rospy.logdebug("Started /memory/map node.")
map_manager.Map.load()
# Starting and publishing the table STL to RViz
self.markers = MarkersPublisher()
# Generate static occupancy images for pathfinder, etc.
occupancy = OccupancyGenerator()
# Starting service handlers (Get, Set, Transfer, GetOccupancy)
map_communication.MapServices(occupancy)
rospy.logdebug("[memory/map] Map request servers ready.")
# Tell ai/game_manager the node initialized successfuly.
StatusServices("memory", "map").ready(True)
self.run()
def run(self):
r = rospy.Rate(5)
while not rospy.is_shutdown():
if rospy.has_param("/current_team"):
map_manager.Map.swap_team(rospy.get_param("/current_team"))
self.markers.updateMarkers(map_manager.Map)
r.sleep()
if __name__ == "__main__":
MapNode()
| gpl-3.0 | -6,607,812,416,757,065,000 | 28.452381 | 75 | 0.646726 | false | 3.952077 | false | false | false |
esiivola/GPYgradients | GPy/models/sparse_gp_classification.py | 1 | 6586 | # Copyright (c) 2013, Ricardo Andrade
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..core import SparseGP
from .. import likelihoods
from .. import kern
from ..inference.latent_function_inference import EPDTC
from copy import deepcopy
class SparseGPClassification(SparseGP):
"""
Sparse Gaussian Process model for classification
This is a thin wrapper around the sparse_GP class, with a set of sensible defaults
:param X: input observations
:param Y: observed values
:param likelihood: a GPy likelihood, defaults to Bernoulli
:param kernel: a GPy kernel, defaults to rbf+white
:param inference_method: Latent function inference to use, defaults to EPDTC
:type inference_method: :class:`GPy.inference.latent_function_inference.LatentFunctionInference`
:param normalize_X: whether to normalize the input data before computing (predictions will be in original scales)
:type normalize_X: False|True
:param normalize_Y: whether to normalize the input data before computing (predictions will be in original scales)
:type normalize_Y: False|True
:rtype: model object
"""
def __init__(self, X, Y=None, likelihood=None, kernel=None, Z=None, num_inducing=10, Y_metadata=None,
mean_function=None, inference_method=None, normalizer=False):
if kernel is None:
kernel = kern.RBF(X.shape[1])
if likelihood is None:
likelihood = likelihoods.Bernoulli()
if Z is None:
i = np.random.permutation(X.shape[0])[:num_inducing]
Z = X[i].copy()
else:
assert Z.shape[1] == X.shape[1]
if inference_method is None:
inference_method = EPDTC()
SparseGP.__init__(self, X, Y, Z, kernel, likelihood, mean_function=mean_function, inference_method=inference_method,
normalizer=normalizer, name='SparseGPClassification', Y_metadata=Y_metadata)
@staticmethod
def from_sparse_gp(sparse_gp):
from copy import deepcopy
sparse_gp = deepcopy(sparse_gp)
SparseGPClassification(sparse_gp.X, sparse_gp.Y, sparse_gp.Z, sparse_gp.kern, sparse_gp.likelihood, sparse_gp.inference_method, sparse_gp.mean_function, name='sparse_gp_classification')
def to_dict(self, save_data=True):
"""
Store the object into a json serializable dictionary
:param boolean save_data: if true, it adds the data self.X and self.Y to the dictionary
:return dict: json serializable dictionary containing the needed information to instantiate the object
"""
model_dict = super(SparseGPClassification,self).to_dict(save_data)
model_dict["class"] = "GPy.models.SparseGPClassification"
return model_dict
@staticmethod
def _build_from_input_dict(input_dict, data=None):
input_dict = SparseGPClassification._format_input_dict(input_dict, data)
input_dict.pop('name', None) # Name parameter not required by SparseGPClassification
return SparseGPClassification(**input_dict)
@staticmethod
def from_dict(input_dict, data=None):
"""
Instantiate an SparseGPClassification object using the information
in input_dict (built by the to_dict method).
:param data: It is used to provide X and Y for the case when the model
was saved using save_data=False in to_dict method.
:type data: tuple(:class:`np.ndarray`, :class:`np.ndarray`)
"""
import GPy
m = GPy.core.model.Model.from_dict(input_dict, data)
from copy import deepcopy
sparse_gp = deepcopy(m)
return SparseGPClassification(sparse_gp.X, sparse_gp.Y, sparse_gp.Z, sparse_gp.kern, sparse_gp.likelihood, sparse_gp.inference_method, sparse_gp.mean_function, name='sparse_gp_classification')
def save_model(self, output_filename, compress=True, save_data=True):
"""
Method to serialize the model.
:param string output_filename: Output file
:param boolean compress: If true compress the file using zip
:param boolean save_data: if true, it serializes the training data
(self.X and self.Y)
"""
self._save_model(output_filename, compress=True, save_data=True)
class SparseGPClassificationUncertainInput(SparseGP):
"""
Sparse Gaussian Process model for classification with uncertain inputs.
This is a thin wrapper around the sparse_GP class, with a set of sensible defaults
:param X: input observations
:type X: np.ndarray (num_data x input_dim)
:param X_variance: The uncertainty in the measurements of X (Gaussian variance, optional)
:type X_variance: np.ndarray (num_data x input_dim)
:param Y: observed values
:param kernel: a GPy kernel, defaults to rbf+white
:param Z: inducing inputs (optional, see note)
:type Z: np.ndarray (num_inducing x input_dim) | None
:param num_inducing: number of inducing points (ignored if Z is passed, see note)
:type num_inducing: int
:rtype: model object
.. Note:: If no Z array is passed, num_inducing (default 10) points are selected from the data. Other wise num_inducing is ignored
.. Note:: Multiple independent outputs are allowed using columns of Y
"""
def __init__(self, X, X_variance, Y, kernel=None, Z=None, num_inducing=10, Y_metadata=None, normalizer=None):
from GPy.core.parameterization.variational import NormalPosterior
if kernel is None:
kernel = kern.RBF(X.shape[1])
likelihood = likelihoods.Bernoulli()
if Z is None:
i = np.random.permutation(X.shape[0])[:num_inducing]
Z = X[i].copy()
else:
assert Z.shape[1] == X.shape[1]
X = NormalPosterior(X, X_variance)
SparseGP.__init__(self, X, Y, Z, kernel, likelihood,
inference_method=EPDTC(),
name='SparseGPClassification', Y_metadata=Y_metadata, normalizer=normalizer)
def parameters_changed(self):
#Compute the psi statistics for N once, but don't sum out N in psi2
self.psi0 = self.kern.psi0(self.Z, self.X)
self.psi1 = self.kern.psi1(self.Z, self.X)
self.psi2 = self.kern.psi2n(self.Z, self.X)
self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.Z, self.likelihood, self.Y, self.Y_metadata, psi0=self.psi0, psi1=self.psi1, psi2=self.psi2)
self._update_gradients()
| bsd-3-clause | -1,283,444,939,233,665,500 | 43.201342 | 220 | 0.67188 | false | 3.833527 | false | false | false |
tpokorra/pykolab | pykolab/plugins/dynamicquota/__init__.py | 1 | 2664 | # -*- coding: utf-8 -*-
# Copyright 2010-2013 Kolab Systems AG (http://www.kolabsys.com)
#
# Jeroen van Meeuwen (Kolab Systems) <vanmeeuwen a kolabsys.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import pykolab
from pykolab.translate import _
conf = pykolab.getConf()
log = pykolab.getLogger('pykolab.plugins.dynamicquota')
class KolabDynamicquota(object):
"""
Example plugin making quota adjustments given arbitrary conditions.
"""
def __init__(self):
pass
def add_options(self, *args, **kw):
pass
def set_user_folder_quota(self, *args, **kw):
"""
The arguments passed to the 'set_user_folder_quota' hook:
- used (integer, in KB)
- imap_quota (current imap quota obtained from IMAP, integer, in KB)
- ldap_quota (current LDAP quota obtained from LDAP, integer, in KB)
- default_quota (integer, in KB)
Returns:
- None - an error has occurred and this plugin doesn't care.
- Negative 1 - remove quota.
- Zero - Absolute 0.
- Positive Integer - set new quota.
"""
for keyword in [ 'used', 'imap_quota', 'ldap_quota', 'default_quota' ]:
if not kw.has_key(keyword):
log.warning(
_("No keyword %s passed to set_user_folder_quota") % (
keyword
)
)
return
else:
try:
if not kw[keyword] == None:
kw[keyword] = (int)(kw[keyword])
except:
log.error(_("Quota '%s' not an integer!") % (keyword))
return
# Escape the user without quota
if kw['ldap_quota'] == None:
return kw['default_quota']
elif kw['ldap_quota'] == -1:
return -1
elif kw['ldap_quota'] > 0:
return kw['ldap_quota']
else:
return kw['default_quota'] | gpl-3.0 | -509,678,689,789,942,600 | 31.901235 | 80 | 0.567943 | false | 4.149533 | false | false | false |
metaperl/clickmob | src/dhash.py | 2 | 2158 | __author__ = 'anicca'
# core
import math
import sys
from itertools import izip
# 3rd party
from PIL import Image, ImageChops
import argh
def dhash(image, hash_size=8):
# Grayscale and shrink the image in one step.
image = image.convert('L').resize(
(hash_size + 1, hash_size),
Image.ANTIALIAS,
)
pixels = list(image.getdata())
# Compare adjacent pixels.
difference = []
for row in range(hash_size):
for col in range(hash_size):
pixel_left = image.getpixel((col, row))
pixel_right = image.getpixel((col + 1, row))
difference.append(pixel_left > pixel_right)
# Convert the binary array to a hexadecimal string.
decimal_value = 0
hex_string = []
for index, value in enumerate(difference):
if value:
decimal_value += 2 ** (index % 8)
if (index % 8) == 7:
hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
decimal_value = 0
return ''.join(hex_string)
def rosetta(image1, image2):
i1 = Image.open(image1)
i2 = Image.open(image2)
assert i1.mode == i2.mode, "Different kinds of images."
print i1.size, i2.size
assert i1.size == i2.size, "Different sizes."
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1 - p2) for p1, p2 in pairs)
else:
dif = sum(abs(c1 - c2) for p1, p2 in pairs for c1, c2 in zip(p1, p2))
ncomponents = i1.size[0] * i1.size[1] * 3
retval = (dif / 255.0 * 100) / ncomponents
return retval
def rmsdiff_2011(im1, im2):
"Calculate the root-mean-square difference between two images"
im1 = Image.open(im1)
im2 = Image.open(im2)
diff = ImageChops.difference(im1, im2)
h = diff.histogram()
sq = (value * (idx ** 2) for idx, value in enumerate(h))
sum_of_squares = sum(sq)
rms = math.sqrt(sum_of_squares / float(im1.size[0] * im1.size[1]))
return rms
def main(image_filename1, image_filename2, dhash=False, rosetta=False, rmsdiff=False):
pass
if __name__ == '__main__':
argh.dispatch_command(main)
| mit | 5,644,511,999,859,032,000 | 25.975 | 86 | 0.60519 | false | 3.132075 | false | false | false |
jirsis/zurawina | zurawina.py | 1 | 4043 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import paramiko
import os
import transmissionrpc
import getpass
import sys
import traceback
from termcolor import colored
from stat import *
user = "xbian"
host = "zurawina.local"
ssh_username = user
ssh_server = host
ssh_port = 22
transmission_user = user
transmission_host = host
transmission_port = 9091
transmission_timeout = 180
download_dir="downloads"
class SCP_Client:
scp = ''
ssh = ''
def __init__(self):
self.ssh = paramiko.SSHClient()
self.ssh.load_system_host_keys()
try:
self.ssh.connect(ssh_server, username=ssh_username, look_for_keys=True, port=ssh_port);
self.scp = paramiko.SFTPClient.from_transport(self.ssh.get_transport())
except socket.error:
print "Error al conectar con %@" % ssh_server
def prepare_dir(self, path):
os.mkdir(path)
os.chdir(path)
def get_recursive(self, path, extra_path=" "):
self.scp.chdir(path)
for file in self.scp.listdir("."):
print extra_path+"/"+file
file_stat = self.scp.lstat(file)
if S_ISDIR(file_stat.st_mode):
os.mkdir(file)
os.chdir(file)
self.get_recursive(file, extra_path+"/"+file)
self.scp.chdir("..")
os.chdir("..")
else:
self.scp.get(file, file)
def get_resource(self, resource):
resource_stat = self.scp.lstat(download_dir+"/"+resource)
resource=resource.replace(" ", "\\ ").replace("[", "\\[").replace("]", "\\]")
resource=resource.replace("(", "\\(").replace(")", "\\)")
if S_ISDIR(resource_stat.st_mode):
return self.get_directory(resource)
else:
return self.get_file(resource)
def get_directory(self, path):
self.prepare_dir(path)
scp_copy="scp -r \"%s@%s:~/%s/%s/*\" ." % (ssh_username, ssh_server, download_dir, path)
status = os.system(scp_copy)
os.chdir("..")
return status
def get_file(self, file):
scp_copy="scp \"%s@%s:~/%s/%s\" ." % (ssh_username, ssh_server, download_dir, file)
return os.system(scp_copy)
def close(self):
self.scp.close()
self.ssh.close()
class Transmission_Client():
connected = colored("Connected", 'green', attrs=['dark'])
error = colored("FAIL", 'red', attrs=['dark'])
thinking = colored("...", 'white', attrs=['dark'])
copy = colored("Downloaded and ready to copy", 'cyan', attrs=['dark'])
delete = colored("Copied and deleted", 'green', attrs=['dark'])
transmission_password = ""
transmission = ""
def __init__(self):
self.transmission_password = getpass.getpass("Enter your password [%s@%s:%s]:" % (transmission_user, transmission_host, transmission_port))
def print_info(self, msg, status):
print "%-100s [%s]" % (msg, status)
def connect(self):
self.print_info("Connecting to %s:%s" % (transmission_host, transmission_port), self.thinking)
try:
self.transmission = transmissionrpc.Client(transmission_host, port=transmission_port,
user=transmission_user, password=self.transmission_password,
timeout=transmission_timeout)
self.print_info("Connecting to %s:%s" % (transmission_host, transmission_port), self.connected)
except:
self.print_info("Connecting to %s:%s" % (transmission_host, transmission_port), self.error)
sys.exit(0)
def get_torrents(self, scp_client):
for torrent in self.transmission.get_torrents(timeout=transmission_timeout):
if torrent.doneDate != 0:
self.print_info(torrent.name, self.copy)
if (scp_client.get_resource(torrent.name) == 0):
self.transmission.remove_torrent(torrent.id, delete_data=True)
self.print_info(torrent.name, self.delete)
else:
self.print_info(torrent.name, self.error)
else:
downloading_text = "Downloading "+ str(torrent.percentDone*100)+"%"
self.print_info(torrent.name, colored(downloading_text, 'cyan', attrs=['dark']))
transmission = Transmission_Client()
transmission.connect()
scp = SCP_Client()
transmission.get_torrents(scp)
scp.close()
| mit | -5,705,614,639,488,228,000 | 30.834646 | 143 | 0.648034 | false | 3.330313 | false | false | false |
alviproject/alvi | alvi/tests/test_client/test_tree.py | 1 | 3175 | import logging
from alvi.tests.test_client.base import TestContainer
import alvi.tests.pages as pages
logger = logging.getLogger(__name__)
class TestTree(TestContainer):
options = dict(n=7, parents="0, 0, 1, 1, 4, 4")
def test_create_node(self):
page = pages.Tree(self._browser.driver, "TreeCreateNode")
page.run(options=TestTree.options)
self.assertEqual(7, len(page.svg.nodes), "create_node does not work properly")
node_data = sorted(page.svg.node_data, key=lambda d: d['id'])
expected = [
{'name': 0, 'id': 0, 'parent': 0},
{'name': 1, 'id': 1, 'parent': 0},
{'name': 2, 'id': 2, 'parent': 0},
{'name': 3, 'id': 3, 'parent': 1},
{'name': 4, 'id': 4, 'parent': 1},
{'name': 5, 'id': 5, 'parent': 4},
{'name': 6, 'id': 6, 'parent': 4}
]
self.assertEqual(expected, node_data, "create_node does not work properly")
def test_append_and_insert(self):
page = pages.Tree(self._browser.driver, "TreeAppendAndInsert")
page.run(options=TestTree.options)
self.assertEqual(7, len(page.svg.nodes), "create_node does not work properly")
node_data = sorted(page.svg.node_data, key=lambda d: d['id'])
expected = [
{'id': 0, 'parent': 0, 'name': 0},
{'id': 1, 'parent': 0, 'name': 1},
{'id': 2, 'parent': 0, 'name': 2},
{'id': 3, 'parent': 2, 'name': 3},
{'id': 4, 'parent': 2, 'name': 4},
{'id': 5, 'parent': 2, 'name': 5},
{'id': 6, 'parent': 4, 'name': 6}
]
self.assertEqual(expected, node_data, "insert or append does not work properly")
def test_marker(self):
page = pages.Tree(self._browser.driver, "TreeMarker")
page.run(options=TestTree.options)
self.assertEqual(7, len(page.svg.nodes), "create_node does not work properly")
self.assertEqual(2, len(page.svg.markers), "create_marker does not work properly")
marker0_color = page.svg.markers[0].value_of_css_property('fill')
marker1_color = page.svg.markers[1].value_of_css_property('fill')
marked = [n for n in page.svg.nodes if n.value_of_css_property('fill') == marker0_color]
self.assertEquals(len(marked), 1, "create_marker does not work properly")
marked = [n for n in page.svg.nodes if n.value_of_css_property('fill') == marker1_color]
self.assertEquals(len(marked), 1, "create_marker or move_marker does not work properly")
def test_multi_marker(self):
page = pages.Tree(self._browser.driver, "TreeMultiMarker")
page.run(options=TestTree.options)
self.assertEqual(7, len(page.svg.nodes), "create_node does not work properly")
self.assertEqual(1, len(page.svg.markers), "create_multi_marker does not work properly")
marker_color = page.svg.markers[0].value_of_css_property('fill')
marked = [n for n in page.svg.nodes if n.value_of_css_property('fill') == marker_color]
self.assertEquals(len(marked), 2, "multi_marker_append or multi_marker_remove does not work properly") | mit | 2,378,627,623,283,486,000 | 42.506849 | 110 | 0.594961 | false | 3.388474 | true | false | false |
ucodev/elastictools | testing/incoming/elastictools-shard-reassign.py | 1 | 4905 | #!/usr/bin/python
# @file elastictools-shard-reassign.py
# @brief uCodev Elastic Tools
# Elasticsearch shard reassigning tool.
#
# Date: 02/08/2015
#
# Copyright 2015 Pedro A. Hortas ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# uCodev Elastic Tools v0.1
#
# Description: Elasticsearch analysis, report and handling tools.
#
# Author: Pedro A. Hortas
# Email: [email protected]
# Date: 02/08/2015
#
import sys
import time
from elastictools import *
# Globals
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
ES_NODE_HOST = "localhost"
ES_NODE_PORT = "9200"
ES_REQ_WAIT_SECS = 1
# Class: Elasticsearch Shard
class UETReassign(ESShard):
# SECTION: Handler
def tool_reassign_shards(self):
for count, shard in enumerate(filter(lambda s: (s[3] == "UNASSIGNED"), self.shard_data_list)):
sys.stdout.write(" * Re-assigning shard '%s' of type '%s' from index '%s' to node '%s' on host '%s' (%s of %s)... " % (shard[1], shard[2], shard[0], self.shard_node_list[count % len(self.shard_node_list)], self.es_host, count + 1, self.shard_status_unassigned_count()))
post_data = {
"commands" : [ {
"allocate" : {
"index" : "%s" % shard[0],
"shard" : "%s" % shard[1],
"node" : "%s" % self.shard_node_list[count % len(self.shard_node_list)],
"allow_primary" : True
}
} ]
}
# Request cluster reroute op for the current unassigned shard
res = self.es_request_http("POST", "/_cluster/reroute", post_data)
# TODO: 400 is obviously an error, but is there more stuff to be handled here?
if res["status"] == 400:
print(res["error"])
print("Failed.")
continue
print("Reassigned.")
self.shard_stat_reassigned += 1
time.sleep(ES_REQ_WAIT_SECS)
def do(self):
print("Loading shard status...")
self.shard_status_load()
print("Parsing shard data...")
self.shard_status_parse()
print(" * Number of shards started: %s" % self.shard_status_started_count())
print(" * Number of shards initializing: %s" % self.shard_status_initializing_count())
print(" * Number of shards unassigned: %s" % self.shard_status_unassigned_count())
print(" * Total number of shards: %s" % self.shard_status_total_count())
# Re-assign shards if unassigned shards are present
if self.shard_status_unassigned_count():
print("Enabling routing allocation...")
self.cluster_settings_set("cluster.routing.allocation.enable", "all")
print("Reassigning unassigned shards...")
self.tool_reassign_shards()
# Check if there are shards in initializing state
if self.shard_status_initializing_count():
print("There are shards in initialization state. If the problem persists, restart the node.")
print("\nSummary:")
print(" * Reassigned shards: %d" % self.shard_stat_reassigned)
print(" * Failed Reassigns: %d" % self.shard_stat_failed_reassign)
print(" * Moved shards: %d" % self.shard_stat_moved)
print(" * Failed moves: %d" % self.shard_stat_failed_move)
print(" * Total warnings: %d" % self.warnings)
print(" * Total errors: %d" % self.errors)
print("\nResetting data...")
self.shard_reset_data_status()
print("Loading shard status...")
self.shard_status_load()
print("Parsing shard data...")
self.shard_status_parse()
print("\nCurrent status:")
print(" * Number of shards started: %s" % self.shard_status_started_count())
print(" * Number of shards initializing: %s" % self.shard_status_initializing_count())
print(" * Number of shards unassigned: %s" % self.shard_status_unassigned_count())
print(" * Total number of shards: %s" % self.shard_status_total_count())
print("\nDone.")
# Class: Usage
class Usage:
args = {
"target": None
}
def usage_show(self):
print("Usage: %s <target filename OR url>" % (sys.argv[0]))
def usage_check(self):
if len(sys.argv) > 2:
self.usage_show()
sys.exit(EXIT_FAILURE)
elif len(sys.argv) < 2:
self.args["target"] = "http://%s:%s" % (ES_NODE_HOST, ES_NODE_PORT)
def do(self):
self.usage_check()
if self.args["target"] == None:
self.args["target"] = sys.argv[1]
class Main:
def do(self):
usage = Usage()
usage.do()
elastic = UETReassign(usage.args["target"])
elastic.do()
## Entry Point
if __name__ == "__main__":
Main().do()
| apache-2.0 | 2,218,248,546,542,065,700 | 29.849057 | 275 | 0.647706 | false | 3.098547 | false | false | false |
epyatopal/geocoder-1 | geocoder/google_timezone.py | 4 | 1813 | #!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
import time
from geocoder.base import Base
from geocoder.location import Location
class Timezone(Base):
"""
Google Time Zone API
====================
The Time Zone API provides time offset data for locations on the surface of the earth.
Requesting the time zone information for a specific Latitude/Longitude pair will
return the name of that time zone, the time offset from UTC, and the Daylight Savings offset.
API Reference
-------------
https://developers.google.com/maps/documentation/timezone/
"""
provider = 'google'
method = 'timezone'
def __init__(self, location, **kwargs):
self.url = 'https://maps.googleapis.com/maps/api/timezone/json'
self.location = str(Location(location))
self.timestamp = kwargs.get('timestamp', time.time())
self.params = {
'location': self.location,
'timestamp': self.timestamp,
}
self._initialize(**kwargs)
def __repr__(self):
return "<[{0}] {1} [{2}]>".format(self.status, self.provider, self.timeZoneName)
def _exceptions(self):
# Build intial Tree with results
if self.parse['results']:
self._build_tree(self.parse['results'][0])
@property
def ok(self):
return bool(self.timeZoneName)
@property
def timeZoneId(self):
return self.parse.get('timeZoneId')
@property
def timeZoneName(self):
return self.parse.get('timeZoneName')
@property
def rawOffset(self):
return self.parse.get('rawOffset')
@property
def dstOffset(self):
return self.parse.get('dstOffset')
if __name__ == '__main__':
g = Timezone([45.5375801, -75.2465979])
g.debug()
| mit | 7,053,612,911,848,936,000 | 26.892308 | 97 | 0.62107 | false | 3.915767 | false | false | false |
tino1b2be/LARMAS | prompts/models.py | 1 | 1085 | from django.contrib.auth.models import User
from django.db import models
from django.utils.timezone import now
from user.models import Language
class Prompt(models.Model):
"""
Model for the prompts
"""
text = models.TextField(blank=False)
language = models.ForeignKey(Language, blank=False)
number_of_recordings = models.IntegerField(default=0)
def __str__(self):
return self.language.name + ' - "' + self.text + '"'
class DistributedPrompt(models.Model):
"""
Model for distributed prompts
"""
user = models.ForeignKey(User, blank=False)
prompt = models.ForeignKey(Prompt, blank=False)
rejected = models.BooleanField(default=False)
recorded = models.BooleanField(default=False)
translated = models.BooleanField(default=False)
date = models.DateTimeField(default=now)
def __str__(self):
return self.user.username + ' - ' + \
str(self.prompt.id) + ' - ' + \
str(self.rejected) + ' - ' + \
str(self.recorded) + ' - ' + \
str(self.date)
| agpl-3.0 | -4,607,705,863,196,241,400 | 28.324324 | 60 | 0.630415 | false | 3.931159 | false | false | false |
datastreaming/mflow_nodes | tests/helpers.py | 1 | 1389 | import json
from mflow_nodes.processors.base import BaseProcessor
from mflow_nodes.stream_node import get_processor_function, get_receiver_function
from mflow_nodes.node_manager import NodeManager
def setup_file_writing_receiver(connect_address, output_filename):
"""
Setup a node that writis the message headers into an output file for later inspection.
:param connect_address: Address the node connects to.
:param output_filename: Output file.
:return: Instance of ExternalProcessWrapper.
"""
# Format the output file.
with open(output_filename, 'w') as output_file:
output_file.write("[]")
def process_message(message):
with open(output_filename, 'r') as input_file:
test_data = json.load(input_file)
test_data.append(message.get_header())
with open(output_filename, 'w') as output:
output.write(json.dumps(test_data, indent=4))
processor = BaseProcessor()
processor.process_message = process_message
receiver = NodeManager(processor_function=get_processor_function(processor=processor,
connection_address=connect_address),
receiver_function=get_receiver_function(connection_address=connect_address),
processor_instance=processor)
return receiver
| gpl-3.0 | -3,615,498,578,825,868,300 | 38.685714 | 105 | 0.663787 | false | 4.423567 | false | false | false |
spitfire-sidra/DateRanger | DateRanger/__init__.py | 1 | 8849 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import calendar
from datetime import date
from datetime import timedelta
from DateRanger.utils import get_quarter
from DateRanger.utils import get_monthrange
from DateRanger.objects import DateFrame
from DateRanger.exceptions import InvalidDateRange
from DateRanger.exceptions import InvalidQuarter
class DateRanger(object):
"""
A class for getting common bussiness date ranges.
"""
def __init__(self, base_date=None):
"""
Argus:
base_date - the base day. Example: date(2009, 11, 1)
"""
self.set_base_date(base_date)
def set_base_date(self, base_date=None):
"""
Set base date.
Argus:
base_date - Example: date(2009, 11, 1)
Note:
self.b____, 'b' means 'base'
"""
self.bdate = base_date or date.today()
self.bmonth = self.bdate.month
self.byear = self.bdate.year
self.bquarter = get_quarter(self.bmonth)
def base_day(self):
"""
Get the DateRange of self.bdate.
"""
return DateFrame(self.bdate, self.bdate)
def relative_day(self, days=0):
"""
Calcuate a relative date from self.bdate.
"""
rday = self.bdate + timedelta(days=days)
return (rday, rday)
def prev_day(self, days=1):
"""
Get the DateRange that n days before self.bdate.
Argus:
days - n days ago
"""
ndays = days * -1
start, end = self.relative_day(days=ndays)
return DateFrame(start, end)
def next_day(self, days=1):
"""
Get the DateRange that n days after self.bdate.
Argus:
days - next n days
"""
start, end = self.relative_day(days=days)
return DateFrame(start, end)
def get_week_range(self, base_date):
"""
Find the first/last day of the week for the given day.
Weeks start on Sunday and end on Saturday.
Argus:
base_date - any date
"""
start = base_date - timedelta(days=base_date.weekday()+1)
end = start + timedelta(days=6)
return (start, end)
def base_week(self):
"""
Get DateRange of the week that contains self.bdate.
"""
start, end = self.get_week_range(self.bdate)
return DateFrame(start, end)
def relative_week(self, weeks=0):
"""
Calcuate a relative week range from self.bdate.
"""
_, end_date = self.base_week().get_range()
start, end = self.get_week_range(end_date + timedelta(days=7*weeks))
return (start, end)
def prev_week(self, weeks=1):
"""
Get the DateRange that n weeks before self.bdate.
Argus:
weeks - n week ago
"""
nweeks = weeks * -1
start, end = self.relative_week(weeks=nweeks)
return DateFrame(start, end)
def next_week(self, weeks=1):
"""
Get the DateRange that n weeks after self.bdate.
Argus:
weeks - next n weeks
"""
start, end = self.relative_week(weeks=weeks)
return DateFrame(start, end)
def get_month_range(self, year, month):
"""
Get the first and last day of the given month in given year.
Args:
year
month
"""
days = calendar.monthrange(year, month)[1]
start = date(year, month, 1)
end = date(year, month, days)
return (start, end)
def relative_month(self, months=0):
"""
Calcuate a relative month range from self.bdate.
"""
month_sum = self.bmonth + months
if month_sum < 0:
back_months = abs(month_sum)
yeardelta = ((back_months // 12) + 1) * -1
month = 12 - (back_months % 12)
elif month_sum == 0:
yeardelta = -1
month = 12
elif month_sum <= 12:
yeardelta = 0
month = month_sum
else:
yeardelta = month_sum // 12
month = month_sum % 12
year = self.byear + yeardelta
start, end = self.get_month_range(year, month)
return (start, end)
def base_month(self):
"""
Get the DateRange of the month that contains self.bdate
"""
year, month = self.byear, self.bmonth
start, end = self.get_month_range(year, month)
return DateFrame(start, end)
def prev_month(self, months=1):
"""
Get the DateRange that n months before self.bdate.
Argus:
months - n months ago
"""
nmonths = months * -1
start, end = self.relative_month(months=nmonths)
return DateFrame(start, end)
def next_month(self, months=1):
"""
Get the DateRange that n months after self.bdate.
Argus:
months - next n months
"""
start, end = self.relative_month(months=months)
return DateFrame(start, end)
def get_quarter_range(self, year, quarter):
"""
Get time range with specific year and quarter.
"""
if quarter not in (1, 2, 3, 4):
raise InvalidQuarter()
start_month, end_month = get_monthrange(quarter)
days = calendar.monthrange(year, end_month)[1]
start = date(year, start_month, 1)
end = date(year, end_month, days)
return start, end
def relative_quarter(self, quarters=0):
"""
Calcuate a relative quarters range from self.bdate.
"""
quarter_sum = self.bquarter + quarters
if quarter_sum < 0:
back_quarters = abs(quarter_sum)
yeardelta = ((back_quarters // 4) + 1) * -1
quarter = 4 - (back_quarters % 4)
elif quarter_sum == 0:
yeardelta = -1
quarter = 4
elif quarter_sum <= 4:
yeardelta = 0
quarter = quarter_sum
else:
yeardelta = quarter_sum // 4
quarter = quarter_sum % 4
year = self.byear + yeardelta
start, end = self.get_quarter_range(year, quarter)
return (start, end)
def base_quarter(self):
"""
Get the DateRange of the quarter that contains self.bdate.
"""
quarter = get_quarter(self.bmonth)
start, end = self.get_quarter_range(self.byear, quarter)
return DateFrame(start, end)
def prev_quarter(self, quarters=1):
"""
Get the DateRange that n quarters before self.bdate.
Argus:
quarters - n quarters ago
"""
nquarters = quarters * -1
start, end = self.relative_quarter(quarters=nquarters)
return DateFrame(start, end)
def next_quarter(self, quarters=1):
"""
Get the DateRange that n quarters after self.bdate.
Argus:
quarters - next n quarters
"""
start, end = self.relative_quarter(quarters=quarters)
return DateFrame(start, end)
def get_year_range(self, year):
"""
Get time range of the year.
"""
start = date(year, 1, 1)
end = date(year, 12, 31)
return (start, end)
def relative_year(self, years=0):
year = self.byear + years
start, end = self.get_year_range(year)
return (start, end)
def base_year(self):
"""
Get the DateRange of the year that contains self.bdate.
"""
start, end = self.get_year_range(self.byear)
return DateFrame(start, end)
def prev_year(self, years=1):
"""
Get the DateRange that n years before self.bdate.
Argus:
years - n years ago
"""
nyears = years * -1
start, end = self.relative_year(years=nyears)
return DateFrame(start, end)
def next_year(self, years=1):
"""
Get the DateRange that n years after self.bdate.
Argus:
year - next n years
"""
start, end = self.relative_year(years=years)
return DateFrame(start, end)
def from_date(self, from_date):
"""
Return the DateRange from `from_date` to self.bdate
Argus:
from_date - Example: date(2015, 1, 1)
"""
if from_date > self.bdate:
raise InvalidDateRange()
return DateFrame(from_date, self.bdate + timedelta(days=1))
def to_date(self, to_date):
"""
Return the DateRange from self.bdate to `to_date`
Argus:
to_date - Example: date(2015, 1, 1)
"""
if to_date < self.bdate:
raise InvalidDateRange()
return DateFrame(self.bdate, to_date + timedelta(days=1))
| mit | 5,173,313,311,686,591,000 | 27.092063 | 76 | 0.546954 | false | 3.773561 | false | false | false |
RIPE-Atlas-Community/RIPE-Atlas-data-analysis | ripeatlas/analysis/pingstats.py | 1 | 13265 | #!/usr/bin/env python
# Class defitions for Atlas Ping statistics
# statistics are collected at three levels:
#
# first the raw per probe, per target IP data
# second the aggregate over all probes for each target IP
# finally overall statistics for the measurement
from collections import defaultdict
import json
import time
import sys
from ripeatlas.analysis.msmstats import measurementStats, measurementDestStats, probeResults
from ripeatlas.analysis.utils import dnsnamelookup
class pingStatistics(measurementStats):
"""
top level statistics for ping measurement
the "measurementsStats" base class holds most of the methods
here we only need to override the addData() method with
one specific one for pings
"""
def addData(self, probe_id, dst_addr, data):
"""
Add data from one sample of the measurement
Statistics are grouuped by destination IP upto a maximum of maxDestinations
When the measurement involves more destinations, statistics are aggregated
"""
self.samples += 1
self.probes[probe_id] += 1
self.destinationIPs[dst_addr] += 1
if self.aggregateDestinations:
dst_addr = "All IPs combined"
else:
if not dst_addr in self.destinationStats:
# do we want to add another destination specific report?
if len(self.destinationStats.keys()) < self.maxDestinations:
# yes we do
self.destinationStats[dst_addr] = pingDestinationStats(self.msm_id,dst_addr)
else:
# no, there are too many destinations for this msm_id
# aggregate the lot
self.aggregateDestinations = 1
dst_addr = "All IPs combined"
self.aggregateDestStats(dst_addr)
self.destinationStats[dst_addr].addData(probe_id,data)
def aggregateDestStats(self, aggregate_dst):
"""
We have too many different destination IPs to report on seperately
Aggregate per destination stats collected thusfar into new aggregate stats object
All new data will be added there.
"""
aggrStats = pingDestinationStats(self.msm_id, aggregate_dst)
for dest in self.destinationStats.keys():
aggrStats.addDestinationStats(self.destinationStats[dest])
del self.destinationStats[dest]
self.destinationStats[aggregate_dst] = aggrStats
class pingDestinationStats:
'summary of ping results from one measurement to a single IP'
def __init__(self, msm_id, dst_addr):
self.msm_id = msm_id
self.dst_addr = dst_addr
self.probeReport = {}
self.aggregationDone = 0
self.packets_sent = 0
self.packets_rcvd = 0
self.packets_dup = 0
self.nodata = 0
self.allerrors = 0
self.errorMsgs = defaultdict(int)
self.loss100 = 0
self.loss80 = 0
self.loss60 = 0
self.loss40 = 0
self.loss20 = 0
self.loss5 = 0
self.loss0 = 0
self.lossless = 0
self.minimumRtts = []
self.medianRtts = []
self.maximumRtts = []
self.min = 0
self.max = 0
self.level975 = 0
self.level025 = 0
self.starttime = 9999999999
self.endtime = 0
def addData(self, probe_id, data):
if not probe_id in self.probeReport:
self.probeReport[probe_id] = probePingResults(self.msm_id,self.dst_addr,probe_id)
self.probeReport[probe_id].addData(data)
self.aggregationDone = 0
def addDestinationStats(self, source):
"""
add data from another destinationStats object to present one
primary use case is creating aggregate stats over all
destinations of the measurement
"""
for probe_id in source.probeReport:
if not probe_id in self.probeReport:
self.probeReport[probe_id] = probePingResults(self.msm_id,self.dst_addr,probe_id)
self.probeReport[probe_id].addProbeResults(source.probeReport[probe_id])
if source.aggregationDone:
self.aggregateProbeData()
def aggregateProbeData(self):
"""
compile aggregate statistics from the per probe results
"""
for probe_id in self.probeReport.keys():
counters = self.probeReport[probe_id].getCounts()
percentiles = self.probeReport[probe_id].rttPercentiles()
loss = self.probeReport[probe_id].lossRate()
errorrate = self.probeReport[probe_id].errorRate()
self.packets_sent += counters['sent']
self.packets_rcvd += counters['received']
self.packets_dup += counters['duplicate']
for msg in self.probeReport[probe_id].errors:
self.errorMsgs[msg] += 1
if (counters['sent'] == 0):
if errorrate == 1:
self.allerrors += 1
else:
self.nodata += 1
elif (counters['received'] == 0):
self.loss100 += 1
elif (counters['received'] == counters['sent']):
self.lossless += 1
elif (loss > 0.80):
self.loss80 += 1
elif (loss > 0.60):
self.loss60 += 1
elif (loss > 0.40):
self.loss40 += 1
elif (loss > 0.20):
self.loss20 += 1
elif (loss > 0.05):
self.loss5 += 1
elif (loss > 0.0):
self.loss0 += 1
if '0' in percentiles and '50' in percentiles and '100' in percentiles:
self.minimumRtts += [percentiles.get('2.5')]
self.medianRtts += [percentiles.get('50')]
self.maximumRtts += [percentiles.get('97.5')]
starttime = self.probeReport[probe_id].starttime
endtime = self.probeReport[probe_id].endtime
if (starttime < self.starttime):
self.starttime = starttime
if (endtime > self.endtime):
self.endtime = endtime
self.minimumRtts.sort()
self.medianRtts.sort()
self.maximumRtts.sort()
self.aggregationDone = 1
def report(self,detail):
"""
Output a report on the collected statistics
The 'detail' argument controls how much detail is provided
(more detail, longer reports)
"""
if not self.aggregationDone:
# aggregate the per probe results before reporting
self.aggregateProbeData()
# Look for reverse DNS (if any)
host = dnsnamelookup(self.dst_addr)
if (detail==0):
if host:
print "Destination:", self.dst_addr, " / " , host
else:
print "Destination:", self.dst_addr
else:
print "Destination:", self.dst_addr
if host:
print "Reverse DNS:", host
nprobes = len(self.probeReport.keys())
if (self.packets_sent>0):
lost = 100 * (self.packets_sent - self.packets_rcvd)/float(self.packets_sent)
lost = "%.2f%%" % lost
else:
lost = "NA"
if (detail==0):
#minimal view; report median of the medians
if len(self.medianRtts) > 0:
numprobes = len(self.medianRtts)
level500 = int(numprobes * 0.5)
median = self.medianRtts[level500]
median = "%.2fms" % median
else:
median = "NA"
print "sent/received/loss/median %d/%d/%s/%s" % (self.packets_sent,self.packets_rcvd,lost,median)
else:
print "Timeinterval:" , time.strftime("%Y-%m-%dT%H:%MZ",time.gmtime(self.starttime)), " - ", time.strftime("%Y-%m-%dT%H:%MZ",time.gmtime(self.endtime))
print "Packets sent:", self.packets_sent
print "Packets received:", self.packets_rcvd
print "Overall loss rate: %s" % lost
print
print "Total probes measuring: %6d" % nprobes
print "Probes with 100%% errors:%6d" % self.allerrors
if len(self.errorMsgs)>0:
print 'Total errors on probes: %6d' % sum(self.errorMsgs.values())
print 'Most common error:"%s (%dx)"' % sorted(self.errorMsgs.items(),key=lambda x: x[1], reverse=True)[0]
if (nprobes > 1):
print
print "Probes with no packets lost: %6d" % self.lossless
print "Probes with 0%%-5%% loss: %6d" % self.loss0
print "Probes with 5%%-20%% loss: %6d" % self.loss5
print "Probes with 20%%-40%% loss: %6d" % self.loss20
print "Probes with 40%%-60%% loss: %6d" % self.loss40
print "Probes with 60%%-80%% loss: %6d" % self.loss60
print "Probes with 80%%-100%% loss: %6d" % self.loss80
print "Probes with 100%% loss: %6d" % self.loss100
print "Probes not sending any packets:%6d" % self.nodata
print
if len(self.medianRtts) > 0:
numprobes = len(self.medianRtts)
level025 = int(numprobes * 0.025)
level250 = int(numprobes * 0.25)
level500 = int(numprobes * 0.5)
level750 = int(numprobes * 0.75)
level975 = int(numprobes * 0.975)
print "RTT distributions:"
print "-----------------\n"
print '2.5 percentile ("Minimum")'
print "lowest 2.5 percentile RTT in all probes:%8.2fms" % self.minimumRtts[0]
print "2.5%% of probes had 2.5 percentile <= %8.2fms" % self.minimumRtts[level025]
print "25%% of probes had 2.5 percentile <= %8.2fms" % (self.minimumRtts[level250])
print "50%% of probes had 2.5 percentile <= %8.2fms" % (self.minimumRtts[level500])
print "75%% of probes had 2.5 percentile <= %8.2fms" % (self.minimumRtts[level750])
print "97.5%% of probes had 2.5 percentile <= %8.2fms" % (self.minimumRtts[level975])
print "highest 2.5 percentile in all probes %8.2fms" % (self.minimumRtts[numprobes-1])
print
print "Median"
print "lowest median RTT in all probes %9.2fms" % self.medianRtts[0]
print "2.5%% of probes had median RTT <= %9.2fms" % self.medianRtts[level025]
print "25%% of probes had median RTT <= %9.2fms" % (self.medianRtts[level250])
print "50%% of probes had median RTT <= %9.2fms" % (self.medianRtts[level500])
print "75%% of probes had median RTT <= %9.2fms" % (self.medianRtts[level750])
print "97.5%% of probes had median RTT <= %9.2fms" % (self.medianRtts[level975])
print "highest median RTT in all probes %9.2fms" % (self.medianRtts[numprobes-1])
print
print '97.5 percentile ("Maximum")'
print "lowest 97.5 percentile RTT in all probes:%8.2fms" % self.maximumRtts[0]
print "2.5%% of probes had 97.5 percentile <= %8.2fms" % self.maximumRtts[level025]
print "25%% of probes had 97.5 percentile <= %8.2fms" % (self.maximumRtts[level250])
print "50%% of probes had 97.5 percentile <= %8.2fms" % (self.maximumRtts[level500])
print "75%% of probes had 97.5 percentile <= %8.2fms" % (self.maximumRtts[level750])
print "97.5%% of probes had 97.5 percentile <= %8.2fms" % (self.maximumRtts[level975])
print "highest 97.5 percentile in all probes %8.2fms" % (self.maximumRtts[numprobes-1])
print
print
return
class probePingResults(probeResults):
"""
collect ping data from one probe to one destination'
"""
def __init__(self, msm_id, dst_addr, probe_id):
self.probe_id = probe_id
self.msm_id = msm_id
self.dst_addr = dst_addr
self.samples =0
self.packets_sent = 0
self.packets_rcvd = 0
self.packets_dup = 0
self.errors = defaultdict(int)
self.rtts = []
self.rtts_sorted = 0
self.starttime = 9999999999
self.endtime = 0
def getCounts(self):
counters = {}
counters['sent'] = self.packets_sent
counters['received'] = self.packets_rcvd
counters['duplicate'] = self.packets_dup
return(counters)
def lossRate(self):
if (self.packets_sent>0):
loss = (self.packets_sent-self.packets_rcvd) / float(self.packets_sent)
else:
loss = 99999999999
return(loss)
def errorRate(self):
total = len(self.errors) + len(self.rtts)
if total>0:
errorrate = len(self.errors) / total
else:
errorrate = 99999999999
return(errorrate)
def addData (self, data):
"""
Process one record of an Atlas ping measurement, update statistics
See https://atlas.ripe.net/doc/data_struct#v4460_ping for details on
the possible fields found in 'data' dictionary
"""
self.samples += 1
self.packets_sent += data['sent']
self.packets_dup += data['dup']
self.packets_rcvd += data['rcvd']
self.updateStartEnd(data['timestamp'])
for item in data['result']:
if 'error' in item:
self.errors[item['error']] += 1
if 'rtt' in item and not 'dup' in item:
# rtt for duplicates is not representative, often too late
self.rtts += [item['rtt']]
return
def addProbeResults(self, source):
"""
Add data from another pingResults object to present stats
main use case is collecting aggregate stats, not specific to one target IP
"""
self.samples += source.samples
self.packets_sent += source.packets_sent
self.packets_dup += source.packets_dup
self.packets_rcvd += source.packets_rcvd
self.updateStartEnd(source.starttime)
self.updateStartEnd(source.endtime)
self.rtts += source.rtts
if self.rtts_sorted:
self.rtts.sort()
def rttPercentiles(self):
percentiles={}
if (len(self.rtts) > 0):
if not self.rtts_sorted:
self.rtts.sort()
self.rtts_sorted = 1
index025 = int(len(self.rtts)*0.025)
index500 = int(len(self.rtts)*0.5)
index975 = int(len(self.rtts)*0.975)
percentiles['100'] = self.rtts[len(self.rtts)-1]
percentiles['97.5'] = self.rtts[index975]
percentiles['50'] = self.rtts[index500]
percentiles['2.5'] = self.rtts[index025]
percentiles['0'] = self.rtts[0]
return(percentiles)
| lgpl-3.0 | 249,741,515,502,609 | 31.834158 | 156 | 0.642819 | false | 3.140388 | false | false | false |
hdknr/django-mediafiles | src/mediafiles/views.py | 1 | 6555 | # -*- coding: utf-8 -*-
from django import template
from django.http import HttpResponse,HttpResponseRedirect,Http404
from django.shortcuts import render_to_response
#
from models import MediaFile,Gallery
from forms import GalleryMediaFileForm
import uuid
#
def preview(request,id):
m = MediaFile.objects.get(id=id )
return m.response( HttpResponse )
def download(request,id):
m = MediaFile.objects.get(id=id )
return m.response( HttpResponse,meta=True )
def thumbnail(request,id,width,height):
m = MediaFile.objects.get(id=id )
return m.response( HttpResponse,size=(int(width),int(height)) )
#######
from django.utils import simplejson
from django.core.urlresolvers import reverse
from django.conf import settings
from django.views.generic import CreateView, DeleteView, UpdateView, ListView,DetailView
from django.contrib.auth.decorators import login_required, permission_required
def response_mimetype(request):
# if "application/json" in request.META['HTTP_ACCEPT']:
if "application/json" in request.META.get('HTTP_ACCEPT',[]):
return "application/json"
else:
return "text/plain"
class GalleryAdminDetail(DetailView):
model = Gallery
def get_object(self, queryset=None):
return self.model.objects.get(id=self.kwargs['id'] )
def get_context_data(self, **kwargs):
'''
{'mediafiles': <django.db.models.fields.related.ManyRelatedManager object >
'object': <Gallery: Gallery object>,
'gallery': <Gallery: Gallery object>,
'form': <django.forms.models.GalleryForm object >,
'view': <mediafiles.views.GalleryAdminEdit object > }
'''
context = super(GalleryAdminDetail, self).get_context_data(**kwargs)
context['mediafiles'] = self.object.medias
context['container'] = self.object
context['mediafile_uploader'] = reverse('gallery_admin_media_create',
kwargs={'id':self.kwargs['id'] } )
context['mediafile_delete_url'] ="gallery_admin_media_delete"
context['mediafile_image_url'] ="gallery_admin_media_image"
context['mediafile_thumbnail_url'] ="gallery_admin_media_thumb"
context['mediafile_url_hint'] = {}
# context['salt'] = uuid.uuid1().hex
return context
class GalleryAdminList(ListView):
'''
Template(by default): mediafiles/gallery_list.html
'''
model = Gallery
class GalleryAdminMediaCreate(CreateView):
model = MediaFile
form_class = GalleryMediaFileForm
# def get(self,request,*args,**kwargs): #: for debugging
# print "HDKNR:", "Ajax?",request.is_ajax()
# response = super(GalleryAdminEdit,self).get(request,*args,**kwargs)
# if request.is_ajax():
# print response.render()
# return response
# def post(self, request, *args, **kwargs): #: for debugging
# print "HDKNR:post()","Ajax?=",request.is_ajax()
# print args,kwargs,request.POST,request.FILES
# response = super(GalleryAdminMediaCreate,self).post(request,*args,**kwargs)
# return response
# def form_invalid(self,form):
# print "HDKNR(form_invalid):",type(form),form.errors
# return super(GalleryAdminMediaCreate,self).form_invalid(form)
def form_valid(self, form):
form.instance.user = self.request.user #:ログインユーザー
new_media = form.save()
self.gallery = Gallery.objects.get(id=self.kwargs['id']) #: 別なところで
self.gallery.medias.add( new_media )
# f = self.request.FILES.get('file')
url = reverse('mediafiles_preview',kwargs={'id': new_media.id ,} )
#: jquery file upload API data (JSON)
data = [{'name': new_media.name,
'url': url,
'thumbnail_url': new_media.get_thumbnail_url(size=(100,30),),
'gallery' : 'gallery' if new_media.is_image() else "",
'delete_url': reverse('gallery_admin_media_delete',
kwargs={'id':self.kwargs['id'], 'mid':new_media.id,} ),
'delete_type': "DELETE"
}]
response = JSONResponse(data, {}, response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
class JSONResponse(HttpResponse):
"""JSON response class."""
def __init__(self,obj='',json_opts={},mimetype="application/json",*args,**kwargs):
content = simplejson.dumps(obj,**json_opts)
super(JSONResponse,self).__init__(content,mimetype,*args,**kwargs)
class GalleryAdminMediaDelete(DeleteView):
model = MediaFile
def get_object(self,*args,**kwargs):
return Gallery.objects.get(id=self.kwargs['id'] ).medias.get(id=self.kwargs['mid'])
def delete(self, request, *args, **kwargs):
"""
This does not actually delete the file, only the database record. But
that is easy to implement.
"""
self.object = self.get_object()
self.object.delete() #:削除
if request.is_ajax():
response = JSONResponse(True, {}, response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
else:
return render_to_response( 'mediafiles/mediafile_deleted.html',
context_instance=template.RequestContext(request), )
class GalleryAdminMediaImage(DetailView):
model = MediaFile
def get(self, request, *args, **kwargs):
media = Gallery.objects.get(id=self.kwargs['id'] ).medias.get(id=self.kwargs['mid'])
return media.response( HttpResponse )
class GalleryAdminMediaThumb(DetailView):
model = MediaFile
def get(self, request, *args, **kwargs):
media = Gallery.objects.get(id=self.kwargs['id'] ).medias.get(id=self.kwargs['mid'])
size=( int(request.GET.get('width',100)), int(request.GET.get('height',30)) )
return media.response( HttpResponse ,size=size )
GalleryAdminDetailView = login_required(GalleryAdminDetail.as_view())
GalleryAdminListView = login_required(GalleryAdminList.as_view())
GalleryAdminMediaCreateView = login_required(GalleryAdminMediaCreate.as_view())
GalleryAdminMediaDeleteView = login_required(GalleryAdminMediaDelete.as_view())
GalleryAdminMediaImageView = login_required(GalleryAdminMediaImage.as_view())
GalleryAdminMediaThumbView = login_required(GalleryAdminMediaThumb.as_view())
| mit | -7,385,137,013,392,610,000 | 39.76875 | 92 | 0.650621 | false | 3.72105 | false | false | false |
gburd/dbsql | src/py/misc/lru.py | 1 | 5206 | #-*- coding: ISO-8859-1 -*-
#
# DBSQL - A SQL database engine.
#
# Copyright (C) 2007-2008 The DBSQL Group, Inc. - All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# There are special exceptions to the terms and conditions of the GPL as it
# is applied to this software. View the full text of the exception in file
# LICENSE_EXCEPTIONS in the directory of this software distribution.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# http://creativecommons.org/licenses/GPL/2.0/
#
# Copyright (C) 2004 Gerhard Häring <[email protected]>
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
# A simple LRU cache, which will be rewritten in C later
class Node:
def __init__(self, key, data):
self.key = key
self.data = data
self.count = 1
self.prev, self.next = None, None
class Cache:
def __init__(self, factory, maxlen):
self.first, self.last = None, None
self.maxlen = maxlen
self.mapping = {}
self.factory = factory
def get(self, key):
if key in self.mapping:
nd = self.mapping[key]
nd.count += 1
if nd.prev and nd.count > nd.prev.count:
ptr = nd.prev
while ptr.prev is not None and nd.count > ptr.prev.count:
ptr = ptr.prev
# Move nd before ptr
if nd.next:
nd.next.prev = nd.prev
else:
self.last = nd.prev
if nd.prev:
nd.prev.next = nd.next
if ptr.prev:
ptr.prev.next = nd
else:
self.first = nd
save = nd.next
nd.next = ptr
nd.prev = ptr.prev
if nd.prev is None:
self.first = nd
ptr.prev = nd
#ptr.next = save
else:
if len(self.mapping) == self.maxlen:
if self.last:
nd = self.last
self.mapping[self.last.key] = None
del self.mapping[self.last.key]
if nd.prev:
nd.prev.next = None
self.last = nd.prev
nd.prev = None
obj = self.factory(key)
nd = Node(key, obj)
nd.prev = self.last
nd.next = None
if self.last:
self.last.next = nd
else:
self.first = nd
self.last = nd
self.mapping[key] = nd
return nd.data
def display(self):
nd = self.first
while nd:
prevkey, nextkey = None, None
if nd.prev: prevkey = nd.prev.key
if nd.next: nextkey = nd.next.key
print "%4s <- %4s -> %s\t(%i)" % (prevkey, nd.key, nextkey, nd.count)
nd = nd.next
if __name__ == "__main__":
def create(s):
return s
import random
cache = Cache(create, 5)
if 1:
chars = list("abcdefghijklmnopqrstuvwxyz")
lst = []
for i in range(100):
idx = random.randint(0, len(chars) - 1)
what = chars[idx]
lst.append(what)
cache.get(chars[idx])
cache.display()
#print "-" * 50
#print lst
#print "-" * 50
else:
lst = \
['y', 'y', 'b', 'v', 'x', 'f', 'h', 'n', 'g', 'k', 'o', 'q', 'p', 'e', 'm', 'c', 't', 'y', 'c', 's', 'p', 's', 'j', 'm', \
'u', 'f', 'z', 'x', 'v', 'r', 'w', 'e', 'm', 'd', 'w', 's', 'b', 'r', 'd', 'e', 'h', 'g', 'e', 't', 'p', 'b', 'e', 'i', \
'g', 'n']
#lst = ['c', 'c', 'b', 'b', 'd', 'd', 'g', 'c', 'c', 'd']
for item in lst:
cache.get(item)
cache.display()
| gpl-3.0 | -7,226,144,713,881,507,000 | 33.657534 | 134 | 0.516135 | false | 3.769732 | false | false | false |
vlinhart/django-smsbrana | smsbrana/views.py | 1 | 1174 | # -*- coding: utf-8 -*-
from datetime import datetime
from django.http import HttpResponse
from smsbrana import SmsConnect
from smsbrana import signals
from smsbrana.const import DELIVERY_STATUS_DELIVERED, DATETIME_FORMAT
from smsbrana.models import SentSms
def smsconnect_notification(request):
sc = SmsConnect()
result = sc.inbox()
# print result
for delivered in result['delivery_report']:
sms_id = delivered['idsms']
if delivered['status'] != DELIVERY_STATUS_DELIVERED:
continue
try:
sms = SentSms.objects.get(sms_id=sms_id)
if sms.delivered:
continue
sms.delivered = True
sms.delivered_date = datetime.strptime(delivered['time'], DATETIME_FORMAT)
sms.save()
except SentSms.DoesNotExist:
# logger.error('sms delivered which wasn\'t sent' + str(delivered))
pass
# delete the inbox if there are 100+ items
if len(result['delivery_report']) > 100:
sc.inbox(delete=True)
signals.smsconnect_notification_received.send(sender=None, inbox=result, request=request)
return HttpResponse('OK')
| bsd-3-clause | -2,464,962,330,264,614,400 | 33.529412 | 93 | 0.655026 | false | 3.726984 | false | false | false |
divio/django-shop | shop/cascade/catalog.py | 1 | 6242 | from django.contrib.admin import StackedInline
from django.forms import fields, widgets
from django.template.loader import select_template
from django.utils.translation import ugettext_lazy as _, ugettext
from entangled.forms import EntangledModelFormMixin, EntangledModelForm
from cms.plugin_pool import plugin_pool
from cms.utils.compat.dj import is_installed
from cmsplugin_cascade.mixins import WithSortableInlineElementsMixin
from cmsplugin_cascade.models import SortableInlineCascadeElement
from shop.cascade.plugin_base import ShopPluginBase, ProductSelectField
from shop.conf import app_settings
from shop.models.product import ProductModel
if is_installed('adminsortable2'):
from adminsortable2.admin import SortableInlineAdminMixin
else:
SortableInlineAdminMixin = type('SortableInlineAdminMixin', (object,), {})
class ShopCatalogPluginForm(EntangledModelFormMixin):
CHOICES = [
('paginator', _("Use Paginator")),
('manual', _("Manual Infinite")),
('auto', _("Auto Infinite")),
]
pagination = fields.ChoiceField(
choices=CHOICES,
widget=widgets.RadioSelect,
label=_("Pagination"),
initial='paginator',
help_text=_("Shall the product list view use a paginator or scroll infinitely?"),
)
class Meta:
entangled_fields = {'glossary': ['pagination']}
class ShopCatalogPlugin(ShopPluginBase):
name = _("Catalog List View")
require_parent = True
form = ShopCatalogPluginForm
parent_classes = ['BootstrapColumnPlugin', 'SimpleWrapperPlugin']
cache = False
def get_render_template(self, context, instance, placeholder):
templates = []
if instance.glossary.get('render_template'):
templates.append(instance.glossary['render_template'])
templates.extend([
'{}/catalog/list.html'.format(app_settings.APP_LABEL),
'shop/catalog/list.html',
])
return select_template(templates)
def render(self, context, instance, placeholder):
context['pagination'] = instance.glossary.get('pagination', 'paginator')
return context
@classmethod
def get_identifier(cls, obj):
pagination = obj.glossary.get('pagination')
if pagination == 'paginator':
return ugettext("Manual Pagination")
return ugettext("Infinite Scroll")
plugin_pool.register_plugin(ShopCatalogPlugin)
class ShopAddToCartPluginForm(EntangledModelFormMixin):
use_modal_dialog = fields.BooleanField(
label=_("Use Modal Dialog"),
initial=True,
required=False,
help_text=_("After adding product to cart, render a modal dialog"),
)
class Meta:
entangled_fields = {'glossary': ['use_modal_dialog']}
class ShopAddToCartPlugin(ShopPluginBase):
name = _("Add Product to Cart")
require_parent = True
form = ShopAddToCartPluginForm
parent_classes = ['BootstrapColumnPlugin']
cache = False
def get_render_template(self, context, instance, placeholder):
templates = []
if instance.glossary.get('render_template'):
templates.append(instance.glossary['render_template'])
if context['product'].managed_availability():
template_prefix = 'available-'
else:
template_prefix = ''
templates.extend([
'{}/catalog/{}product-add2cart.html'.format(app_settings.APP_LABEL, template_prefix),
'shop/catalog/{}product-add2cart.html'.format(template_prefix),
])
return select_template(templates)
def render(self, context, instance, placeholder):
context = super(ShopAddToCartPlugin, self).render(context, instance, placeholder)
context['use_modal_dialog'] = bool(instance.glossary.get('use_modal_dialog', True))
return context
plugin_pool.register_plugin(ShopAddToCartPlugin)
class ProductGalleryForm(EntangledModelForm):
order = fields.IntegerField(
widget=widgets.HiddenInput,
initial=0,
)
product = ProductSelectField(
required=False,
label=_("Related Product"),
help_text=_("Choose related product"),
)
class Meta:
entangled_fields = {'glossary': ['product']}
untangled_fields = ['order']
class ProductGalleryInline(SortableInlineAdminMixin, StackedInline):
model = SortableInlineCascadeElement
form = ProductGalleryForm
extra = 5
ordering = ['order']
verbose_name = _("Product")
verbose_name_plural = _("Product Gallery")
class ShopProductGallery(WithSortableInlineElementsMixin, ShopPluginBase):
name = _("Product Gallery")
require_parent = True
parent_classes = ('BootstrapColumnPlugin',)
inlines = (ProductGalleryInline,)
# until this bug https://github.com/applegrew/django-select2/issues/65 is fixed
# we hide the a "add row" button and instead use `extra = 5` in ProductGalleryInline
class Media:
css = {'all': ('shop/css/admin/product-gallery.css',)}
def get_render_template(self, context, instance, placeholder):
templates = []
if instance.glossary.get('render_template'):
templates.append(instance.glossary['render_template'])
templates.extend([
'{}/catalog/product-gallery.html'.format(app_settings.APP_LABEL),
'shop/catalog/product-gallery.html',
])
return select_template(templates)
def render(self, context, instance, placeholder):
product_ids = []
for inline in instance.sortinline_elements.all():
try:
product_ids.append(inline.glossary['product']['pk'])
except KeyError:
pass
queryset = ProductModel.objects.filter(pk__in=product_ids, active=True)
serializer_class = app_settings.PRODUCT_SUMMARY_SERIALIZER
serialized = serializer_class(queryset, many=True, context={'request': context['request']})
# sort the products according to the order provided by `sortinline_elements`.
context['products'] = [product for id in product_ids for product in serialized.data if product['id'] == id]
return context
plugin_pool.register_plugin(ShopProductGallery)
| bsd-3-clause | -2,517,049,768,745,971,000 | 34.668571 | 115 | 0.676226 | false | 4.186452 | false | false | false |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v2.512/resnet-tpuv2-512/code/resnet/model/models/official/recommendation/constants.py | 4 | 3207 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Central location for NCF specific values."""
import os
import time
# ==============================================================================
# == Main Thread Data Processing ===============================================
# ==============================================================================
class Paths(object):
"""Container for various path information used while training NCF."""
def __init__(self, data_dir, cache_id=None):
self.cache_id = cache_id or int(time.time())
self.data_dir = data_dir
self.cache_root = os.path.join(
self.data_dir, "{}_ncf_recommendation_cache".format(self.cache_id))
self.train_shard_subdir = os.path.join(self.cache_root,
"raw_training_shards")
self.train_shard_template = os.path.join(self.train_shard_subdir,
"positive_shard_{}.pickle")
self.train_epoch_dir = os.path.join(self.cache_root, "training_epochs")
self.eval_data_subdir = os.path.join(self.cache_root, "eval_data")
self.subproc_alive = os.path.join(self.cache_root, "subproc.alive")
APPROX_PTS_PER_TRAIN_SHARD = 128000
# Keys for data shards
TRAIN_KEY = "train"
EVAL_KEY = "eval"
# In both datasets, each user has at least 20 ratings.
MIN_NUM_RATINGS = 20
# The number of negative examples attached with a positive example
# when performing evaluation.
NUM_EVAL_NEGATIVES = 999
# keys for evaluation metrics
TOP_K = 10 # Top-k list for evaluation
HR_KEY = "HR"
NDCG_KEY = "NDCG"
DUPLICATE_MASK = "duplicate_mask"
# Metric names
HR_METRIC_NAME = "HR_METRIC"
NDCG_METRIC_NAME = "NDCG_METRIC"
# ==============================================================================
# == Subprocess Data Generation ================================================
# ==============================================================================
CYCLES_TO_BUFFER = 3 # The number of train cycles worth of data to "run ahead"
# of the main training loop.
FLAGFILE_TEMP = "flagfile.temp"
FLAGFILE = "flagfile"
READY_FILE_TEMP = "ready.json.temp"
READY_FILE = "ready.json"
TRAIN_RECORD_TEMPLATE = "train_{}.tfrecords"
EVAL_RECORD_TEMPLATE = "eval_{}.tfrecords"
TIMEOUT_SECONDS = 3600 * 2 # If the train loop goes more than two hours without
# consuming an epoch of data, this is a good
# indicator that the main thread is dead and the
# subprocess is orphaned.
| apache-2.0 | 1,484,619,227,631,777,800 | 38.109756 | 80 | 0.568756 | false | 4.143411 | false | false | false |
Featuretools/featuretools | featuretools/tests/utils_tests/test_list_primitives.py | 1 | 1228 | from featuretools import list_primitives
from featuretools.primitives import (
Day,
GreaterThan,
Last,
NumCharacters,
get_aggregation_primitives,
get_transform_primitives
)
from featuretools.primitives.utils import _get_descriptions
def test_list_primitives_order():
df = list_primitives()
all_primitives = get_transform_primitives()
all_primitives.update(get_aggregation_primitives())
for name, primitive in all_primitives.items():
assert name in df['name'].values
row = df.loc[df['name'] == name].iloc[0]
actual_desc = _get_descriptions([primitive])[0]
if actual_desc:
assert actual_desc == row['description']
types = df['type'].values
assert 'aggregation' in types
assert 'transform' in types
def test_descriptions():
primitives = {NumCharacters: 'Calculates the number of characters in a string.',
Day: 'Determines the day of the month from a datetime.',
Last: 'Determines the last value in a list.',
GreaterThan: 'Determines if values in one list are greater than another list.'}
assert _get_descriptions(list(primitives.keys())) == list(primitives.values())
| bsd-3-clause | -1,712,432,361,529,430,500 | 34.085714 | 97 | 0.666938 | false | 4.093333 | false | false | false |
m4rx9/rna-pdb-tools | rna_tools/tools/PyMOL4RNA/PyMOL4Spliceosome.py | 2 | 11183 | """
See the PyMOL Sessions processed with this code here <https://github.com/mmagnus/PyMOL4Spliceosome>
"""
from pymol import cmd
from rna_tools.tools.PyMOL4RNA import code_for_color_spl
from rna_tools.tools.PyMOL4RNA import code_for_spl
try:
from pymol import cmd
except ImportError:
print("PyMOL Python lib is missing")
# sys.exit(0)
def spl(arg=''):
"""
action='', name=''
"""
if ' ' in arg:
action, name = arg.split()
name = name.lower()
else:
action = arg
name = ''
#import pandas as pd
#df = pd.read_excel("/home/magnus/Desktop/pyMoL_colors-EMX.xlsx")
if not action or action == 'help':
spl_help()
elif action == 'color' or arg=='c':
code_for_color_spl.spl_color()
elif arg == 'extract all' or arg == 'ea' or arg == 'e':
code_for_spl.spl_extract()
elif arg.startswith('hprp28'):
cmd.do("color purple, PRP28_h* and resi 240-361") # RecA1
cmd.do("color blue, PRP28_h* and resi 361-631") # RecA1
cmd.do("color orange, PRP28_h* and resi 631-811") # RecA2
elif arg.startswith('hprp8'):
print("RT, skyblue, 885-1251")
print("Thumb/X, cyan, 1257-1375")
cmd.do("color yellow, PRP8_h* and resi 1581-1752") # rt
cmd.do("color wheat, PRP8_h* and resi 1767-2020") # rh
cmd.do("color salmon, PRP8_h* and resi 2103-2234") # jab
cmd.do("color smudge, PRP8_h* and resi 1304-1577") # linker
cmd.do("color skyblue, PRP8_h* and resi 812-1303") # rt
elif arg.startswith('prp8'):
print("RT, skyblue, 885-1251")
print("Thumb/X, cyan, 1257-1375")
cmd.do("color skyblue, PRP8_y* and resi 885-1251") # rt
cmd.do("color cyan, PRP8_y* and resi 1257-1375") # thumb/x
cmd.do("color smudge, PRP8_y* and resi 1376-1649") # linker
cmd.do("color wheat, PRP8_y* and resi 1840-2090") # rh
cmd.do("color salmon, PRP8_y* and resi 2150-2395") # jab
cmd.do("color yellow, PRP8_y* and resi 1650-1840") # endo
elif arg.startswith(''):
if 'hjab' in arg.lower():
cmd.select('PRP8_h* and resi 2103-2234')
if 'hlinker' in arg.lower():
cmd.select('PRP8_h* and resi 1304-1577')
if 'hrt' in arg.lower():
cmd.select('PRP8_h* and resi 812-1303')
if 'hrh' in arg.lower():
cmd.select('PRP8_h* and resi 1767-2020')
if 'he' in arg.lower():
cmd.select('PRP8_h* and resi 1581-1752')
elif arg == 'align' or arg=='a':
cmd.do("""
align /5gm6//6, /5lj3//V;
align /5mps//6, /5lj3//V;
align /6exn//6, /5lj3//V;
align /5y88//D, /5lj3//V;
align /5ylz//D, /5lj3//V;
""")
else:
spl_help()
cmd.extend('spl', spl)
def spl_help():
print("""################ SPL #################
extract all (ea) - show
colors - list all colors
######################################
""")
spl_help()
def __spl_color():
for m in mapping:
protein = m[0]
chain = m[1]
color = m[2]
print('\_' + ' '.join([protein, chain, color]))
cmd.do('color ' + color + ', chain ' + chain)
# cmd.do('color firebrick, chain V') # U6
def _spl_color():
"""Color spl RNAs (for only color spl RNA and use 4-color code for residues see `spl2`)
"""
AllObj = cmd.get_names("all")
for name in AllObj:
if 'Exon' in name or 'exon' in name:
cmd.color('yellow', name)
if 'Intron' in name or 'intron' in name or '5splicing-site' in name:
cmd.color('gray40', name)
if '3exon-intron' in name.lower():
cmd.color('gray20', name)
if name.startswith("U2_snRNA"):
cmd.color('forest', name)
if name.startswith("U5_snRNA"):
cmd.color('blue', name)
if name.startswith("U4_snRNA"):
cmd.color('orange', name)
if name.startswith("U6_snRNA"):
cmd.color('red', name)
cmd.do('color gray')
# trisnrp
cmd.do('color orange, chain V') # conflict
cmd.do('color red, chain W')
cmd.do('color blue, chain U')
#
cmd.do('color blue, chain 5')
cmd.do('color forest, chain 2')
cmd.do('color red, chain 6')
cmd.do('color orange, chain 4')
cmd.do('color yellow, chain Y')
# shi
cmd.do('color blue, chain D') # u5
cmd.do('color forest, chain L') # u2
cmd.do('color red, chain E') # u6
cmd.do('color yellow, chain M')
cmd.do('color yellow, chain N')
# afte branch
cmd.do('color blue, chain U') # u5
cmd.do('color forest, chain Z') # u2
cmd.do('color red, chain V') # u6
cmd.do('color yellow, chain E')
cmd.do('color black, chain I')
# 5WSG
# Cryo-EM structure of the Catalytic Step II spliceosome (C* complex) at 4.0 angstrom resolution
cmd.do('color blue, chain D') # u5
#cmd.do('color forest, chain L') # u2
cmd.do('color yellow, chain B')
cmd.do('color yellow, chain b')
cmd.do('color black, chain N')
cmd.do('color black, chain M')
cmd.do('color black, chain 3') # orange
cmd.do('color black, chain E') # yellow
cmd.do('color black, chain i')
cmd.do('color black, chain e')
cmd.do('color black, chain e')
cmd.do('color dirtyviolet, chain L') # bud31
cmd.do('color rasberry, chain L') # CERF1
cmd.do('color skyblue, chain A') # PRP8
cmd.do('color grey60, chain B') # BRR2
cmd.do('color dirtyiolet, chain L') # BUD31
cmd.do('color rasberry, chain O') # CEF1
cmd.do('color rasberry, chain S') # CLF1
cmd.do('color dirtyviolet, chain P') # CWC15
cmd.do('color lightteal, chain D') # CWC16/YJU2
cmd.do('color ruby, chain M') # CWC2
cmd.do('color violetpurple, chain R') # CWC21
cmd.do('color bluewhite, chain H') # CWC22
cmd.do('color deepteal, chain F') # CWC25
cmd.do('color black, chain I') # Intron
cmd.do('color dirtyviolet, chain G') # ISY1
cmd.do('color palegreen, chain W') # LEA1
cmd.do('color palegreen, chain Y') # Msl1
cmd.do('color lightpink, chain K') # PRP45
cmd.do('color smudge, chain Q') # Prp16
cmd.do('color grey70, chain t') # Prp19
cmd.do('color lightblue, chain J') # PRP46
cmd.do('color chocolate, chain N') # SLT11/ECM2
cmd.do('color grey70, chain s') # Snt309
cmd.do('color slate, chain C') # SNU114
cmd.do('color brightorange, chain T') # SYF1
cmd.do('color forest, chain Z') # U2
cmd.do('color density, chain U') # U5
cmd.do('color deepblue, chain b') # U5_Sm
cmd.do('bg gray')
# cmd.do('remove (polymer.protein)')
cmd.set("cartoon_tube_radius", 1.0)
ino()
def spl2():
"""Color spl RNAs and use 4-color code for residues (for only color spl RNA see `spl`)
"""
AllObj = cmd.get_names("all")
for name in AllObj:
if 'Exon' in name or 'exon' in name:
cmd.color('yellow', name)
if 'Intron' in name or 'intron' in name or '5splicing-site' in name:
cmd.color('gray40', name)
if '3exon-intron' in name.lower():
cmd.color('gray20', name)
if name.startswith("U2_snRNA"):
cmd.color('forest', name)
if name.startswith("U5_snRNA"):
cmd.color('blue', name)
if name.startswith("U4_snRNA"):
cmd.color('orange', name)
if name.startswith("U6_snRNA"):
cmd.color('red', name)
cmd.do('color gray')
# trisnrp
cmd.do('color orange, chain V') # conflict
cmd.do('color red, chain W')
cmd.do('color blue, chain U')
#
cmd.do('color blue, chain 5')
cmd.do('color forest, chain 2')
cmd.do('color red, chain 6')
cmd.do('color orange, chain 4')
cmd.do('color yellow, chain Y')
# shi
cmd.do('color blue, chain D') # u5
cmd.do('color forest, chain L') # u2
cmd.do('color red, chain E') # u6
cmd.do('color yellow, chain M')
cmd.do('color yellow, chain N')
# afte branch
cmd.do('color blue, chain U') # u5
cmd.do('color forest, chain Z') # u2
cmd.do('color red, chain V') # u6
cmd.do('color yellow, chain E')
cmd.do('color black, chain I')
# 5WSG
# Cryo-EM structure of the Catalytic Step II spliceosome (C* complex) at 4.0 angstrom resolution
cmd.do('color blue, chain D') # u5
#cmd.do('color forest, chain L') # u2
cmd.do('color yellow, chain B')
cmd.do('color yellow, chain b')
cmd.do('color black, chain N')
cmd.do('color black, chain M')
cmd.do('color black, chain 3') # orange
cmd.do('color black, chain E') # yellow
cmd.do('color black, chain i')
cmd.do('color black, chain e')
cmd.do('bg gray')
cmd.do('remove (polymer.protein)')
cmd.color("red",'resn rG+G and name n1+c6+o6+c5+c4+n7+c8+n9+n3+c2+n1+n2')
cmd.color("forest",'resn rC+C and name n1+c2+o2+n3+c4+n4+c5+c6')
cmd.color("orange",'resn rA+A and name n1+c6+n6+c5+n7+c8+n9+c4+n3+c2')
cmd.color("blue",'resn rU+U and name n3+c4+o4+c5+c6+n1+c2+o2')
cmd.set("cartoon_tube_radius", 1.0)
ino()
def _spli():
"""
# this trick is taken from Rhiju's Das code
color red,resn rG+G and name n1+c6+o6+c5+c4+n7+c8+n9+n3+c2+n1+n2
color forest,resn rC+C and name n1+c2+o2+n3+c4+n4+c5+c6
color orange, resn rA+A and name n1+c6+n6+c5+n7+c8+n9+c4+n3+c2
color blue, resn rU+U and name n3+c4+o4+c5+c6+n1+c2+o2
#
#cmd.color("yellow", "*intron*")
#cmd.color("yellow", "*exon*")
#cmd.show("spheres", "inorganic")
#cmd.color("yellow", "inorganic")
"""
cmd.color("orange", "U4_snRNA*")
cmd.color("red", "U6_snRNA*")
cmd.color("blue", "U5_snRNA*")
cmd.color("green", "U2_snRNA*")
cmd.color("red",'resn rG+G and name n1+c6+o6+c5+c4+n7+c8+n9+n3+c2+n1+n2')
cmd.color("forest",'resn rC+C and name n1+c2+o2+n3+c4+n4+c5+c6')
cmd.color("orange",'resn rA+A and name n1+c6+n6+c5+n7+c8+n9+c4+n3+c2')
cmd.color("blue",'resn rU+U and name n3+c4+o4+c5+c6+n1+c2+o2')
try:
from pymol import cmd
except ImportError:
print("PyMOL Python lib is missing")
else:
#cmd.extend("spl", spl)
cmd.extend("spl2", spl2)
# colors taken from https://github.com/maxewilkinson/Spliceosome-PyMOL-sessions
cmd.set_color('lightgreen', [144, 238, 144])
cmd.set_color('darkgreen', [0, 100, 0])
cmd.set_color('darkseagreen', [143, 188, 143])
cmd.set_color('greenyellow', [173, 255, 47])
cmd.set_color('coral', [255, 127, 80])
cmd.set_color('darkorange', [255, 140, 0])
cmd.set_color('gold', [255, 215, 0])
cmd.set_color('lemonchiffon', [255,250,205])
cmd.set_color('moccasin', [255,228,181])
cmd.set_color('skyblue', [135,206,235])
cmd.set_color('lightyellow', [255,255,224])
cmd.set_color('powderblue', [176,224,230])
cmd.set_color('royalblue', [65,105,225])
cmd.set_color('cornflowerblue', [100,149,237])
cmd.set_color('steelblue', [70,130,180])
cmd.set_color('lightsteelblue', [176,196,222])
cmd.set_color('violetBlue', [40, 0, 120])
cmd.set_color('mediumpurple', [147,112,219])
print("""
PyMOL4Spliceosome
-----------------------
spl hprp8
spl prp8
""")
| mit | -8,813,542,898,538,824,000 | 33.622291 | 100 | 0.582223 | false | 2.773562 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.