repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
jfrygeo/solutions-geoprocessing-toolbox | data_management/toolboxes/scripts/LineFeatureAngle.py | 2 | 4563 | #------------------------------------------------------------------------------
# Copyright 2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# for line features, finds the bearing angle from the first point to the last point
import os, sys, traceback, math
import arcpy
def Geo2Arithmetic(inAngle):
inAngle = math.fmod(inAngle,360.0)
#0 to 90
if (inAngle >= 0.0 and inAngle <= 90.0):
outAngle = math.fabs(inAngle - 90.0)
# 90 to 360
if (inAngle >= 90.0 and inAngle < 360.0):
outAngle = 360.0 - (inAngle - 90.0)
return float(outAngle)
inputFeatures = arcpy.GetParameterAsText(0) # C:\Workspace\ArcGIS Defense 10.1\path slope\default.gdb\roads
inputAngleField = arcpy.GetParameterAsText(1) # aoo
deleteme = []
debug = False
try:
arcpy.AddMessage("Updating " + inputAngleField + " field for " + str(arcpy.GetCount_management(inputFeatures).getOutput(0)) + " rows ...")
with arcpy.da.UpdateCursor(inputFeatures,["OID@","SHAPE@",inputAngleField]) as rows:
for row in rows:
angle = None
geometry = row[1] # firstPoint, lastPoint
firstPoint = geometry.firstPoint
lastPoint = geometry.lastPoint
xdiff = (lastPoint.X - firstPoint.X)
ydiff = (lastPoint.Y - firstPoint.Y)
#distance = math.sqrt(math.pow(xdiff,2.0) + math.pow(ydiff,2.0))
# Convert from quadrants to arithmetic
if (xdiff == 0.0 and ydiff > 0.0):
# vertical line, slope infinity
angle = 90.0
if (xdiff == 0.0 and ydiff < 0.0):
# vertical line, slope infinity
angle = 270.0
if (xdiff > 0.0 and ydiff == 0.0):
angle = 0.0
if (xdiff < 0.0 and ydiff == 0.0):
angle = 180.0
if (xdiff > 0.0 and ydiff > 0.0): # Quadrant I (+,+)
angle = math.degrees(math.atan(ydiff/xdiff))
if (xdiff < 0.0 and ydiff > 0.0): # Quadrant II (-,+)
angle = 180.0 - math.fabs(math.degrees(math.atan(ydiff/xdiff)))
if (xdiff < 0.0 and ydiff < 0.0): # Quadrant III (-,-)
angle = 180.0 + math.fabs(math.degrees(math.atan(ydiff/xdiff)))
if (xdiff > 0.0 and ydiff < 0.0): # Quadrant IV (+,-)
angle = 360.0 - math.fabs(math.degrees(math.atan(ydiff/xdiff)))
#if debug == True: arcpy.AddMessage(str(xdiff) + " -- " + str(angle) + " -- " + str(ydiff))
if not angle == None:
row[2] = Geo2Arithmetic(angle)
else:
arcpy.AddWarning("Empty angle for feature " + str(row[0]) + ". This could be a closed loop feature.")
row[2] = None
#if debug == True: arcpy.AddMessage(" " + str(row))
rows.updateRow(row)
arcpy.SetParameter(2,inputFeatures)
except arcpy.ExecuteError:
# Get the tool error messages
msgs = arcpy.GetMessages()
arcpy.AddError(msgs)
#print msgs #UPDATE
print (msgs)
except:
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"
# Return python error messages for use in script tool or Python Window
arcpy.AddError(pymsg)
arcpy.AddError(msgs)
# Print Python error messages for use in Python / Python Window
#print pymsg + "\n" UPDATE
print ((pymsg + "\n"))
#print msgs #UPDATE
print (msgs)
finally:
# cleanup intermediate datasets
if debug == True: arcpy.AddMessage("Removing intermediate datasets...")
for i in deleteme:
if debug == True: arcpy.AddMessage("Removing: " + str(i))
arcpy.Delete_management(i)
if debug == True: arcpy.AddMessage("Done")
| apache-2.0 | 7,598,028,985,885,510,000 | 39.026316 | 142 | 0.586895 | false | 3.627186 | false | false | false |
sergiosuarez/MapTweets | mysite/models.py | 1 | 1918 | from django.db import models
import string
class Tweet(models.Model):
user = models.BigIntegerField()
tid = models.BigIntegerField()
lat = models.FloatField()
lon = models.FloatField()
text = models.TextField(max_length=256)
time = models.DateField()
kwd = models.CharField(max_length=50)
class Tweets(models.Model):
id = models.IntegerField(primary_key=True) # AutoField?
longitude = models.CharField(max_length=255, blank=True)
latitude = models.CharField(max_length=255, blank=True)
tweet_date = models.CharField(max_length=255, blank=True)
tweet_time = models.CharField(max_length=255, blank=True)
user_id = models.CharField(max_length=255, blank=True)
user_location = models.CharField(max_length=255, blank=True)
user_lang = models.CharField(max_length=255, blank=True)
text_id = models.CharField(max_length=255, blank=True)
text_msg = models.CharField(max_length=255, blank=True)
class Meta:
managed = False
db_table = 'tweets'
class Grids(models.Model):
id_grid = models.IntegerField(primary_key=True)
lat_ini = models.FloatField(blank=True, null=True)
lat_fin = models.FloatField(blank=True, null=True)
long_ini = models.FloatField(blank=True, null=True)
long_fin = models.FloatField(blank=True, null=True)
class Meta:
managed = False
db_table = 'grids'
class UserTweets(models.Model):
id = models.TextField(primary_key=True)
number = models.TextField(blank=True)
n = models.BigIntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'user_tweets'
class TwTabTime(models.Model):
id_time = models.IntegerField(primary_key=True)
lapso = models.CharField(max_length=30)
cantidad_twts = models.IntegerField(blank=True, null=True)
class Meta:
managed = False
db_table = 'tw_tab_time'
| mit | 792,373,430,063,922,400 | 32.068966 | 64 | 0.684046 | false | 3.455856 | false | false | false |
araksin/leandiary | diary/models.py | 1 | 1148 | from django.db import models
from django.contrib.auth.models import User
class Product(models.Model):
"""
A product is an entity related to any food
having nutritional consistency
"""
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=30)
# Proteins/fats/carbohydrates
pfc = models.CharField(max_length=10)
# Just get a listed nutritional consistency
def get_pfc(self):
return self.pfc.split("/")
cal_value = models.IntegerField(default=0)
class Record(models.Model):
"""
A record. It is a main entity in a diary,
contains what and when a user ate
"""
id = models.AutoField(primary_key=True)
datetime = models.DateTimeField()
user = models.ForeignKey(User)
products = models.ManyToManyField(Product, through='RecordProducts')
class RecordProducts(models.Model):
"""
Extending default many-to-many relationship table to add a weight for each product
"""
record = models.ForeignKey(Record, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
weight = models.IntegerField()
| mit | -669,879,884,202,535,700 | 28.435897 | 86 | 0.699477 | false | 3.891525 | false | false | false |
ASKBOT/python-import-utils | import_utils/__init__.py | 1 | 1790 | """Utilities for loading modules,
supporting programmatic imports of types::
>>>from import_utils import import_module_from, import_module
>>>import x as mod_x
>>>mod_x = import_module('x')
>>>
>>>import x.y as mod_xy
>>>mod_xy = import_module('x.y')
>>>
>>>from x.y import z
>>>z = import_module_from('x.y.z')
"""
import sys
__version__ = '0.0.1'
def import_module_from(mod_path):
"""``mod_path`` is python path to module.
Examples:
1) call with dotted path:
>>>import_module_from('x.y.z')
is equivalent to
>>>from x.y import z
2) call with path without dots:
>>>import_module_from('x')
is the same as
>>>import x
"""
if '.' in mod_path:
bits = mod_path.split('.')
mod_name = bits.pop()
mod_path = '.'.join(bits)
return import_module(mod_path, mod_name)
else:
return import_module(mod_path)
def import_module(mod_path, mod_name = None):
"""first parameter must be
a dotted python module path,
second parameter is optional - module name.
Examples:
1) call with one parameter:
>>>import_module('x.y.z')
is equivalent to
>>>import x.y.z
2) call with two parameters
>>>import_module('x.y', 'z')
is equivalent to
>> from x.y import z
Relative imports are not supported
"""
if mod_name is None:
try:
return sys.modules[mod_path]
except KeyError:
__import__(mod_path)
return sys.modules[mod_path]
else:
if mod_name.find('.') != -1:
raise ValueError('second argument to import_module must not contain dots')
mod_ = __import__(mod_path, globals(), locals(), [mod_name,], -1)
return getattr(mod_, mod_name)
| bsd-2-clause | -4,917,323,192,772,831,000 | 24.942029 | 86 | 0.575978 | false | 3.594378 | false | false | false |
avanzosc/event-wip | event_registration_analytic/models/event.py | 1 | 12735 | # -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api, _
class EventEvent(models.Model):
_inherit = 'event.event'
no_employee_registration_ids = fields.One2many(
comodel_name='event.registration', inverse_name='event_id',
string='Registered students', readonly=False,
states={'done': [('readonly', True)]},
domain=[('employee', '=', False)])
employee_registration_ids = fields.One2many(
comodel_name='event.registration', inverse_name='event_id',
string='Registered teachers', readonly=False,
states={'done': [('readonly', True)]},
domain=[('employee', '!=', False)])
count_all_registrations = fields.Integer(
string='All assistants',
compute='_count_registrations')
count_teacher_registrations = fields.Integer(
string='Teacher assistants',
compute='_count_registrations')
count_pickings = fields.Integer(
string='Pickings',
compute='_compute_count_teacher_pickings_moves')
count_moves = fields.Integer(
string='Moves',
compute='_compute_count_teacher_pickings_moves')
seats_canceled = fields.Integer(
string='Canceled registrations', store=True, readonly=True,
compute='_compute_seats')
count_presences = fields.Integer(
string='Presences',
compute='_compute_count_presences')
count_parents = fields.Integer(
string='Parents',
compute='_compute_count_parents')
@api.multi
@api.depends('registration_ids')
def _count_registrations(self):
for record in self:
super(EventEvent, record)._count_registrations()
record.count_registrations =\
len(record.no_employee_registration_ids)
record.count_all_registrations = len(record.registration_ids)
record.count_teacher_registrations =\
len(record.employee_registration_ids)
@api.multi
@api.depends('no_employee_registration_ids',
'no_employee_registration_ids.state',
'no_employee_registration_ids.partner_id',
'no_employee_registration_ids.partner_id.parent_id')
def _compute_count_parents(self):
for event in self:
reg = event.no_employee_registration_ids.filtered(
lambda x: x.state in ('done', 'open'))
event.count_parents = len(reg.mapped('partner_id.parent_id'))
@api.multi
def _compute_count_teacher_pickings_moves(self):
picking_obj = self.env['stock.picking']
move_obj = self.env['stock.move']
for event in self:
partners = event.mapped('employee_registration_ids.partner_id')
cond = [('partner_id', 'in', partners.ids)]
pickings = picking_obj.search(cond)
event.count_pickings = len(pickings)
cond = [('picking_id.partner_id', 'in', partners.ids)]
moves = move_obj.search(cond)
event.count_moves = len(moves)
@api.multi
def _compute_count_presences(self):
for event in self:
event.count_presences = len(event.mapped('track_ids.presences'))
@api.multi
@api.depends('seats_max', 'registration_ids', 'registration_ids.state',
'registration_ids.nb_register')
def _compute_seats(self):
super(EventEvent, self)._compute_seats()
for event in self:
event.seats_unconfirmed = len(
event.no_employee_registration_ids.filtered(
lambda x: x.state == 'draft'))
event.seats_reserved = len(
event.no_employee_registration_ids.filtered(
lambda x: x.state in ('open', 'done')))
event.seats_canceled = len(
event.no_employee_registration_ids.filtered(
lambda x: x.state == 'cancel'))
event.seats_available = (event.seats_unconfirmed +
event.seats_reserved)
def _create_event_from_sale(self, by_task, sale, line=False):
event = super(EventEvent, self)._create_event_from_sale(
by_task, sale, line=line)
if by_task:
self._create_event_ticket(event, line)
else:
sale_lines = sale.order_line.filtered(
lambda x: x.recurring_service)
for line in sale_lines:
self._create_event_ticket(event, line)
return event
def _create_event_ticket(self, event, line):
ticket_obj = self.env['event.event.ticket']
line.product_id.event_ok = True
ticket_vals = {'event_id': event.id,
'product_id': line.product_id.id,
'name': line.name,
'price': line.price_subtotal,
'sale_line': line.id}
ticket_obj.create(ticket_vals)
@api.multi
def write(self, vals):
if (vals.get('employee_registration_ids', False) and
vals.get('no_employee_registration_ids', False)):
new_lines = []
for line in vals.get('no_employee_registration_ids'):
if line[0] != 2 and line[2] is not False:
new_lines.append(line)
if new_lines:
vals['no_employee_registration_ids'] = new_lines
else:
vals.pop('no_employee_registration_ids')
new_lines = []
for line in vals.get('employee_registration_ids'):
if line[0] != 2 and line[2] is not False:
new_lines.append(line)
if new_lines:
vals['employee_registration_ids'] = new_lines
else:
vals.pop('employee_registration_ids')
return super(EventEvent, self).write(vals)
@api.multi
def show_all_registrations(self):
self.ensure_one()
return {'name': _('Teacher assistants'),
'type': 'ir.actions.act_window',
'view_mode': 'tree,form,calendar,graph',
'view_type': 'form',
'res_model': 'event.registration',
'domain': [('id', 'in', self.registration_ids.ids)]}
@api.multi
def show_teacher_registrations(self):
self.ensure_one()
return {'name': _('Teacher assistants'),
'type': 'ir.actions.act_window',
'view_mode': 'tree,form,calendar,graph',
'view_type': 'form',
'res_model': 'event.registration',
'domain': [('id', 'in', self.employee_registration_ids.ids)]}
@api.multi
def button_show_parents(self):
self.ensure_one()
reg = self.no_employee_registration_ids.filtered(
lambda x: x.state in ('done', 'open'))
parents = reg.mapped('partner_id.parent_id')
return {'name': _('Parents'),
'type': 'ir.actions.act_window',
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'res.partner',
'domain': [('id', 'in', parents.ids)]}
@api.multi
def show_presences(self):
self.ensure_one()
context = self.env.context.copy()
context.update({'search_default_students_filter': 1})
if context.get('group_by', False):
context.pop('group_by')
return {'name': _('Event presences'),
'type': 'ir.actions.act_window',
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'event.track.presence',
'context': context,
'domain': [('id', 'in',
self.mapped('track_ids.presences').ids)]}
@api.multi
def show_teacher_pickings(self):
partners = self.mapped('employee_registration_ids.partner_id')
return {'name': _('Teachers pickings'),
'type': 'ir.actions.act_window',
'view_mode': 'tree,form,calendar',
'view_type': 'form',
'res_model': 'stock.picking',
'domain': [('partner_id', 'in', partners.ids)]}
@api.multi
def show_teacher_moves(self):
partners = self.mapped('employee_registration_ids.partner_id')
return {'name': _('Teachers moves'),
'type': 'ir.actions.act_window',
'view_mode': 'tree,form',
'view_type': 'form',
'res_model': 'stock.move',
'domain': [('picking_id.partner_id', 'in', partners.ids)]}
def _delete_canceled_presences_registrations(self):
for event in self:
presences = event.mapped('track_ids.presences').filtered(
lambda x: x.state == 'canceled')
presences.unlink()
registrations = event.registration_ids.filtered(
lambda x: x.state == 'cancel')
for registration in registrations:
presences = event.mapped('track_ids.presences').filtered(
lambda x: x.state != 'canceled' and
x.partner.id == registration.partner_id.id)
if not presences:
registration.analytic_account.unlink()
registration.write({'state': 'draft'})
registration.unlink()
class EventRegistration(models.Model):
_inherit = 'event.registration'
@api.depends('event_id', 'event_id.sale_order',
'event_id.sale_order.project_id',
'event_id.sale_order.project_id.recurring_invoices',
'employee', 'analytic_account')
def _calculate_required_account(self):
for reg in self:
reg.required_account = True
if (reg.employee or reg.analytic_account or
reg.event_id.sale_order.project_id.recurring_invoices):
reg.required_account = False
required_account = fields.Boolean(
string='Required account', compute='_calculate_required_account',
store=True)
analytic_account = fields.Many2one(
comodel_name='account.analytic.account', string='Analytic account')
employee = fields.Many2one(
comodel_name='hr.employee', string='Employee',
related='partner_id.employee_id', store=True)
parent_num_bank_accounts = fields.Integer(
string='# bank accounts', store=True,
related='partner_id.parent_num_bank_accounts')
parent_num_valid_mandates = fields.Integer(
string='# valid mandates', store=True,
related='partner_id.parent_num_valid_mandates')
@api.onchange('partner_id')
def _onchange_partner(self):
result = super(EventRegistration, self)._onchange_partner()
self.employee = self.partner_id.employee_id
return result
def _prepare_wizard_registration_open_vals(self):
wiz_vals = super(EventRegistration,
self)._prepare_wizard_registration_open_vals()
wiz_vals.update({'create_account': self.required_account})
return wiz_vals
@api.multi
def button_reg_cancel(self):
self.mapped('analytic_account').set_cancel()
super(EventRegistration, self).button_reg_cancel()
class EventEventTicket(models.Model):
_inherit = 'event.event.ticket'
sale_line = fields.Many2one(
comodel_name='sale.order.line', string='Sale line')
class EventTrackPresence(models.Model):
_inherit = 'event.track.presence'
employee = fields.Many2one(
comodel_name='hr.employee', string='Employee',
related='partner.employee_id', store=True)
class EventTrack(models.Model):
_inherit = 'event.track'
@api.depends('presences', 'presences.real_duration')
def _compute_real_duration(self):
for track in self:
track.real_duration = (max(track.mapped('presences.real_duration'))
if track.presences else 0)
no_employee_presences = fields.One2many(
comodel_name='event.track.presence', inverse_name='session',
string='Student presences', readonly=False,
domain=[('employee', '=', False)])
employee_presences = fields.One2many(
comodel_name='event.track.presence', inverse_name='session',
string='Teacher presences', readonly=False,
domain=[('employee', '!=', False)])
@api.multi
def write(self, vals):
if 'no_employee_presences' in vals and 'employee_presences' in vals:
vals.pop('presences', None)
return super(EventTrack, self).write(vals)
| agpl-3.0 | 6,228,787,012,194,687,000 | 39.557325 | 79 | 0.573773 | false | 4.000943 | false | false | false |
Maspear/odoo | addons/mass_mailing/models/mass_mailing.py | 68 | 27592 | # -*- coding: utf-8 -*-
from datetime import datetime
from dateutil import relativedelta
import json
import random
from openerp import tools
from openerp.exceptions import Warning
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp.tools import ustr
from openerp.osv import osv, fields
class MassMailingCategory(osv.Model):
"""Model of categories of mass mailing, i.e. marketing, newsletter, ... """
_name = 'mail.mass_mailing.category'
_description = 'Mass Mailing Category'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
}
class MassMailingList(osv.Model):
"""Model of a contact list. """
_name = 'mail.mass_mailing.list'
_order = 'name'
_description = 'Mailing List'
def _get_contact_nbr(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, 0)
Contacts = self.pool.get('mail.mass_mailing.contact')
for group in Contacts.read_group(cr, uid, [('list_id', 'in', ids), ('opt_out', '!=', True)], ['list_id'], ['list_id'], context=context):
result[group['list_id'][0]] = group['list_id_count']
return result
_columns = {
'name': fields.char('Mailing List', required=True),
'contact_nbr': fields.function(
_get_contact_nbr, type='integer',
string='Number of Contacts',
),
}
class MassMailingContact(osv.Model):
"""Model of a contact. This model is different from the partner model
because it holds only some basic information: name, email. The purpose is to
be able to deal with large contact list to email without bloating the partner
base."""
_name = 'mail.mass_mailing.contact'
_inherit = 'mail.thread'
_description = 'Mass Mailing Contact'
_order = 'email'
_rec_name = 'email'
_columns = {
'name': fields.char('Name'),
'email': fields.char('Email', required=True),
'create_date': fields.datetime('Create Date'),
'list_id': fields.many2one(
'mail.mass_mailing.list', string='Mailing List',
ondelete='cascade', required=True,
),
'opt_out': fields.boolean('Opt Out', help='The contact has chosen not to receive mails anymore from this list'),
}
def _get_latest_list(self, cr, uid, context={}):
lid = self.pool.get('mail.mass_mailing.list').search(cr, uid, [], limit=1, order='id desc', context=context)
return lid and lid[0] or False
_defaults = {
'list_id': _get_latest_list
}
def get_name_email(self, name, context):
name, email = self.pool['res.partner']._parse_partner_name(name, context=context)
if name and not email:
email = name
if email and not name:
name = email
return name, email
def name_create(self, cr, uid, name, context=None):
name, email = self.get_name_email(name, context=context)
rec_id = self.create(cr, uid, {'name': name, 'email': email}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
def add_to_list(self, cr, uid, name, list_id, context=None):
name, email = self.get_name_email(name, context=context)
rec_id = self.create(cr, uid, {'name': name, 'email': email, 'list_id': list_id}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
def message_get_default_recipients(self, cr, uid, ids, context=None):
res = {}
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = {'partner_ids': [], 'email_to': record.email, 'email_cc': False}
return res
class MassMailingStage(osv.Model):
"""Stage for mass mailing campaigns. """
_name = 'mail.mass_mailing.stage'
_description = 'Mass Mailing Campaign Stage'
_order = 'sequence'
_columns = {
'name': fields.char('Name', required=True, translate=True),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'sequence': 0,
}
class MassMailingCampaign(osv.Model):
"""Model of mass mailing campaigns. """
_name = "mail.mass_mailing.campaign"
_description = 'Mass Mailing Campaign'
def _get_statistics(self, cr, uid, ids, name, arg, context=None):
""" Compute statistics of the mass mailing campaign """
results = {}
cr.execute("""
SELECT
c.id as campaign_id,
COUNT(s.id) AS total,
COUNT(CASE WHEN s.sent is not null THEN 1 ELSE null END) AS sent,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is null THEN 1 ELSE null END) AS scheduled,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is not null THEN 1 ELSE null END) AS failed,
COUNT(CASE WHEN s.id is not null AND s.bounced is null THEN 1 ELSE null END) AS delivered,
COUNT(CASE WHEN s.opened is not null THEN 1 ELSE null END) AS opened,
COUNT(CASE WHEN s.replied is not null THEN 1 ELSE null END) AS replied ,
COUNT(CASE WHEN s.bounced is not null THEN 1 ELSE null END) AS bounced
FROM
mail_mail_statistics s
RIGHT JOIN
mail_mass_mailing_campaign c
ON (c.id = s.mass_mailing_campaign_id)
WHERE
c.id IN %s
GROUP BY
c.id
""", (tuple(ids), ))
for row in cr.dictfetchall():
results[row.pop('campaign_id')] = row
total = row['total'] or 1
row['delivered'] = row['sent'] - row['bounced']
row['received_ratio'] = 100.0 * row['delivered'] / total
row['opened_ratio'] = 100.0 * row['opened'] / total
row['replied_ratio'] = 100.0 * row['replied'] / total
return results
_columns = {
'name': fields.char('Name', required=True),
'stage_id': fields.many2one('mail.mass_mailing.stage', 'Stage', required=True),
'user_id': fields.many2one(
'res.users', 'Responsible',
required=True,
),
'category_ids': fields.many2many(
'mail.mass_mailing.category', 'mail_mass_mailing_category_rel',
'category_id', 'campaign_id', string='Categories'),
'mass_mailing_ids': fields.one2many(
'mail.mass_mailing', 'mass_mailing_campaign_id',
'Mass Mailings',
),
'unique_ab_testing': fields.boolean(
'AB Testing',
help='If checked, recipients will be mailed only once, allowing to send'
'various mailings in a single campaign to test the effectiveness'
'of the mailings.'),
'color': fields.integer('Color Index'),
# stat fields
'total': fields.function(
_get_statistics, string='Total',
type='integer', multi='_get_statistics'
),
'scheduled': fields.function(
_get_statistics, string='Scheduled',
type='integer', multi='_get_statistics'
),
'failed': fields.function(
_get_statistics, string='Failed',
type='integer', multi='_get_statistics'
),
'sent': fields.function(
_get_statistics, string='Sent Emails',
type='integer', multi='_get_statistics'
),
'delivered': fields.function(
_get_statistics, string='Delivered',
type='integer', multi='_get_statistics',
),
'opened': fields.function(
_get_statistics, string='Opened',
type='integer', multi='_get_statistics',
),
'replied': fields.function(
_get_statistics, string='Replied',
type='integer', multi='_get_statistics'
),
'bounced': fields.function(
_get_statistics, string='Bounced',
type='integer', multi='_get_statistics'
),
'received_ratio': fields.function(
_get_statistics, string='Received Ratio',
type='integer', multi='_get_statistics',
),
'opened_ratio': fields.function(
_get_statistics, string='Opened Ratio',
type='integer', multi='_get_statistics',
),
'replied_ratio': fields.function(
_get_statistics, string='Replied Ratio',
type='integer', multi='_get_statistics',
),
}
def _get_default_stage_id(self, cr, uid, context=None):
stage_ids = self.pool['mail.mass_mailing.stage'].search(cr, uid, [], limit=1, context=context)
return stage_ids and stage_ids[0] or False
_defaults = {
'user_id': lambda self, cr, uid, ctx=None: uid,
'stage_id': lambda self, *args: self._get_default_stage_id(*args),
}
def get_recipients(self, cr, uid, ids, model=None, context=None):
"""Return the recipients of a mailing campaign. This is based on the statistics
build for each mailing. """
Statistics = self.pool['mail.mail.statistics']
res = dict.fromkeys(ids, False)
for cid in ids:
domain = [('mass_mailing_campaign_id', '=', cid)]
if model:
domain += [('model', '=', model)]
stat_ids = Statistics.search(cr, uid, domain, context=context)
res[cid] = set(stat.res_id for stat in Statistics.browse(cr, uid, stat_ids, context=context))
return res
class MassMailing(osv.Model):
""" MassMailing models a wave of emails for a mass mailign campaign.
A mass mailing is an occurence of sending emails. """
_name = 'mail.mass_mailing'
_description = 'Mass Mailing'
# number of periods for tracking mail_mail statistics
_period_number = 6
_order = 'sent_date DESC'
def __get_bar_values(self, cr, uid, obj, domain, read_fields, value_field, groupby_field, date_begin, context=None):
""" Generic method to generate data for bar chart values using SparklineBarWidget.
This method performs obj.read_group(cr, uid, domain, read_fields, groupby_field).
:param obj: the target model (i.e. crm_lead)
:param domain: the domain applied to the read_group
:param list read_fields: the list of fields to read in the read_group
:param str value_field: the field used to compute the value of the bar slice
:param str groupby_field: the fields used to group
:return list section_result: a list of dicts: [
{ 'value': (int) bar_column_value,
'tootip': (str) bar_column_tooltip,
}
]
"""
date_begin = date_begin.date()
section_result = [{'value': 0,
'tooltip': ustr((date_begin + relativedelta.relativedelta(days=i)).strftime('%d %B %Y')),
} for i in range(0, self._period_number)]
group_obj = obj.read_group(cr, uid, domain, read_fields, groupby_field, context=context)
field = obj._fields.get(groupby_field.split(':')[0])
pattern = tools.DEFAULT_SERVER_DATE_FORMAT if field.type == 'date' else tools.DEFAULT_SERVER_DATETIME_FORMAT
for group in group_obj:
group_begin_date = datetime.strptime(group['__domain'][0][2], pattern).date()
timedelta = relativedelta.relativedelta(group_begin_date, date_begin)
section_result[timedelta.days] = {'value': group.get(value_field, 0), 'tooltip': group.get(groupby_field)}
return section_result
def _get_daily_statistics(self, cr, uid, ids, field_name, arg, context=None):
""" Get the daily statistics of the mass mailing. This is done by a grouping
on opened and replied fields. Using custom format in context, we obtain
results for the next 6 days following the mass mailing date. """
obj = self.pool['mail.mail.statistics']
res = {}
for mailing in self.browse(cr, uid, ids, context=context):
res[mailing.id] = {}
date = mailing.sent_date if mailing.sent_date else mailing.create_date
date_begin = datetime.strptime(date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
date_end = date_begin + relativedelta.relativedelta(days=self._period_number - 1)
date_begin_str = date_begin.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
date_end_str = date_end.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
domain = [('mass_mailing_id', '=', mailing.id), ('opened', '>=', date_begin_str), ('opened', '<=', date_end_str)]
res[mailing.id]['opened_daily'] = json.dumps(self.__get_bar_values(cr, uid, obj, domain, ['opened'], 'opened_count', 'opened:day', date_begin, context=context))
domain = [('mass_mailing_id', '=', mailing.id), ('replied', '>=', date_begin_str), ('replied', '<=', date_end_str)]
res[mailing.id]['replied_daily'] = json.dumps(self.__get_bar_values(cr, uid, obj, domain, ['replied'], 'replied_count', 'replied:day', date_begin, context=context))
return res
def _get_statistics(self, cr, uid, ids, name, arg, context=None):
""" Compute statistics of the mass mailing """
results = {}
cr.execute("""
SELECT
m.id as mailing_id,
COUNT(s.id) AS total,
COUNT(CASE WHEN s.sent is not null THEN 1 ELSE null END) AS sent,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is null THEN 1 ELSE null END) AS scheduled,
COUNT(CASE WHEN s.scheduled is not null AND s.sent is null AND s.exception is not null THEN 1 ELSE null END) AS failed,
COUNT(CASE WHEN s.sent is not null AND s.bounced is null THEN 1 ELSE null END) AS delivered,
COUNT(CASE WHEN s.opened is not null THEN 1 ELSE null END) AS opened,
COUNT(CASE WHEN s.replied is not null THEN 1 ELSE null END) AS replied,
COUNT(CASE WHEN s.bounced is not null THEN 1 ELSE null END) AS bounced
FROM
mail_mail_statistics s
RIGHT JOIN
mail_mass_mailing m
ON (m.id = s.mass_mailing_id)
WHERE
m.id IN %s
GROUP BY
m.id
""", (tuple(ids), ))
for row in cr.dictfetchall():
results[row.pop('mailing_id')] = row
total = row['total'] or 1
row['received_ratio'] = 100.0 * row['delivered'] / total
row['opened_ratio'] = 100.0 * row['opened'] / total
row['replied_ratio'] = 100.0 * row['replied'] / total
return results
def _get_mailing_model(self, cr, uid, context=None):
res = []
for model_name in self.pool:
model = self.pool[model_name]
if hasattr(model, '_mail_mass_mailing') and getattr(model, '_mail_mass_mailing'):
res.append((model._name, getattr(model, '_mail_mass_mailing')))
res.append(('mail.mass_mailing.contact', _('Mailing List')))
return res
# indirections for inheritance
_mailing_model = lambda self, *args, **kwargs: self._get_mailing_model(*args, **kwargs)
_columns = {
'name': fields.char('Subject', required=True),
'email_from': fields.char('From', required=True),
'create_date': fields.datetime('Creation Date'),
'sent_date': fields.datetime('Sent Date', oldname='date', copy=False),
'body_html': fields.html('Body'),
'attachment_ids': fields.many2many(
'ir.attachment', 'mass_mailing_ir_attachments_rel',
'mass_mailing_id', 'attachment_id', 'Attachments'
),
'mass_mailing_campaign_id': fields.many2one(
'mail.mass_mailing.campaign', 'Mass Mailing Campaign',
ondelete='set null',
),
'state': fields.selection(
[('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')],
string='Status', required=True, copy=False,
),
'color': fields.related(
'mass_mailing_campaign_id', 'color',
type='integer', string='Color Index',
),
# mailing options
'reply_to_mode': fields.selection(
[('thread', 'In Document'), ('email', 'Specified Email Address')],
string='Reply-To Mode', required=True,
),
'reply_to': fields.char('Reply To', help='Preferred Reply-To Address'),
# recipients
'mailing_model': fields.selection(_mailing_model, string='Recipients Model', required=True),
'mailing_domain': fields.char('Domain', oldname='domain'),
'contact_list_ids': fields.many2many(
'mail.mass_mailing.list', 'mail_mass_mailing_list_rel',
string='Mailing Lists',
),
'contact_ab_pc': fields.integer(
'AB Testing percentage',
help='Percentage of the contacts that will be mailed. Recipients will be taken randomly.'
),
# statistics data
'statistics_ids': fields.one2many(
'mail.mail.statistics', 'mass_mailing_id',
'Emails Statistics',
),
'total': fields.function(
_get_statistics, string='Total',
type='integer', multi='_get_statistics',
),
'scheduled': fields.function(
_get_statistics, string='Scheduled',
type='integer', multi='_get_statistics',
),
'failed': fields.function(
_get_statistics, string='Failed',
type='integer', multi='_get_statistics',
),
'sent': fields.function(
_get_statistics, string='Sent',
type='integer', multi='_get_statistics',
),
'delivered': fields.function(
_get_statistics, string='Delivered',
type='integer', multi='_get_statistics',
),
'opened': fields.function(
_get_statistics, string='Opened',
type='integer', multi='_get_statistics',
),
'replied': fields.function(
_get_statistics, string='Replied',
type='integer', multi='_get_statistics',
),
'bounced': fields.function(
_get_statistics, string='Bounced',
type='integer', multi='_get_statistics',
),
'received_ratio': fields.function(
_get_statistics, string='Received Ratio',
type='integer', multi='_get_statistics',
),
'opened_ratio': fields.function(
_get_statistics, string='Opened Ratio',
type='integer', multi='_get_statistics',
),
'replied_ratio': fields.function(
_get_statistics, string='Replied Ratio',
type='integer', multi='_get_statistics',
),
# daily ratio
'opened_daily': fields.function(
_get_daily_statistics, string='Opened',
type='char', multi='_get_daily_statistics',
),
'replied_daily': fields.function(
_get_daily_statistics, string='Replied',
type='char', multi='_get_daily_statistics',
)
}
def default_get(self, cr, uid, fields, context=None):
res = super(MassMailing, self).default_get(cr, uid, fields, context=context)
if 'reply_to_mode' in fields and not 'reply_to_mode' in res and res.get('mailing_model'):
if res['mailing_model'] in ['res.partner', 'mail.mass_mailing.contact']:
res['reply_to_mode'] = 'email'
else:
res['reply_to_mode'] = 'thread'
return res
_defaults = {
'state': 'draft',
'email_from': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
'reply_to': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx),
'mailing_model': 'mail.mass_mailing.contact',
'contact_ab_pc': 100,
'mailing_domain': [],
}
#------------------------------------------------------
# Technical stuff
#------------------------------------------------------
def copy_data(self, cr, uid, id, default=None, context=None):
mailing = self.browse(cr, uid, id, context=context)
default = dict(default or {},
name=_('%s (copy)') % mailing.name)
return super(MassMailing, self).copy_data(cr, uid, id, default, context=context)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
""" Override read_group to always display all states. """
if groupby and groupby[0] == "state":
# Default result structure
# states = self._get_state_list(cr, uid, context=context)
states = [('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')]
read_group_all_states = [{
'__context': {'group_by': groupby[1:]},
'__domain': domain + [('state', '=', state_value)],
'state': state_value,
'state_count': 0,
} for state_value, state_name in states]
# Get standard results
read_group_res = super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
# Update standard results with default results
result = []
for state_value, state_name in states:
res = filter(lambda x: x['state'] == state_value, read_group_res)
if not res:
res = filter(lambda x: x['state'] == state_value, read_group_all_states)
res[0]['state'] = [state_value, state_name]
result.append(res[0])
return result
else:
return super(MassMailing, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
#------------------------------------------------------
# Views & Actions
#------------------------------------------------------
def on_change_model_and_list(self, cr, uid, ids, mailing_model, list_ids, context=None):
value = {}
if mailing_model == 'mail.mass_mailing.contact':
mailing_list_ids = set()
for item in list_ids:
if isinstance(item, (int, long)):
mailing_list_ids.add(item)
elif len(item) == 3:
mailing_list_ids |= set(item[2])
if mailing_list_ids:
value['mailing_domain'] = "[('list_id', 'in', %s), ('opt_out', '=', False)]" % list(mailing_list_ids)
else:
value['mailing_domain'] = "[('list_id', '=', False)]"
else:
value['mailing_domain'] = []
return {'value': value}
def action_duplicate(self, cr, uid, ids, context=None):
copy_id = None
for mid in ids:
copy_id = self.copy(cr, uid, mid, context=context)
if copy_id:
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.mass_mailing',
'res_id': copy_id,
'context': context,
}
return False
def action_test_mailing(self, cr, uid, ids, context=None):
ctx = dict(context, default_mass_mailing_id=ids[0])
return {
'name': _('Test Mailing'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'mail.mass_mailing.test',
'target': 'new',
'context': ctx,
}
def action_edit_html(self, cr, uid, ids, context=None):
if not len(ids) == 1:
raise ValueError('One and only one ID allowed for this action')
mail = self.browse(cr, uid, ids[0], context=context)
url = '/website_mail/email_designer?model=mail.mass_mailing&res_id=%d&template_model=%s&return_action=%d&enable_editor=1' % (ids[0], mail.mailing_model, context['params']['action'])
return {
'name': _('Open with Visual Editor'),
'type': 'ir.actions.act_url',
'url': url,
'target': 'self',
}
#------------------------------------------------------
# Email Sending
#------------------------------------------------------
def get_recipients(self, cr, uid, mailing, context=None):
if mailing.mailing_domain:
domain = eval(mailing.mailing_domain)
res_ids = self.pool[mailing.mailing_model].search(cr, uid, domain, context=context)
else:
res_ids = []
domain = [('id', 'in', res_ids)]
# randomly choose a fragment
if mailing.contact_ab_pc < 100:
contact_nbr = self.pool[mailing.mailing_model].search(cr, uid, domain, count=True, context=context)
topick = int(contact_nbr / 100.0 * mailing.contact_ab_pc)
if mailing.mass_mailing_campaign_id and mailing.mass_mailing_campaign_id.unique_ab_testing:
already_mailed = self.pool['mail.mass_mailing.campaign'].get_recipients(cr, uid, [mailing.mass_mailing_campaign_id.id], context=context)[mailing.mass_mailing_campaign_id.id]
else:
already_mailed = set([])
remaining = set(res_ids).difference(already_mailed)
if topick > len(remaining):
topick = len(remaining)
res_ids = random.sample(remaining, topick)
return res_ids
def send_mail(self, cr, uid, ids, context=None):
author_id = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id.id
for mailing in self.browse(cr, uid, ids, context=context):
# instantiate an email composer + send emails
res_ids = self.get_recipients(cr, uid, mailing, context=context)
if not res_ids:
raise Warning('Please select recipients.')
comp_ctx = dict(context, active_ids=res_ids)
composer_values = {
'author_id': author_id,
'attachment_ids': [(4, attachment.id) for attachment in mailing.attachment_ids],
'body': mailing.body_html,
'subject': mailing.name,
'model': mailing.mailing_model,
'email_from': mailing.email_from,
'record_name': False,
'composition_mode': 'mass_mail',
'mass_mailing_id': mailing.id,
'mailing_list_ids': [(4, l.id) for l in mailing.contact_list_ids],
'no_auto_thread': mailing.reply_to_mode != 'thread',
}
if mailing.reply_to_mode == 'email':
composer_values['reply_to'] = mailing.reply_to
composer_id = self.pool['mail.compose.message'].create(cr, uid, composer_values, context=comp_ctx)
self.pool['mail.compose.message'].send_mail(cr, uid, [composer_id], context=comp_ctx)
self.write(cr, uid, [mailing.id], {'sent_date': fields.datetime.now(), 'state': 'done'}, context=context)
return True
| agpl-3.0 | 5,891,476,312,638,237,000 | 43.938111 | 189 | 0.56038 | false | 3.95641 | true | false | false |
nansencenter/nansat | nansat/mappers/mapper_radarsat2.py | 1 | 15452 | # Name: mapper_radarsat2
# Purpose: Nansat mapping for Radarsat2 data
# Authors: Morten W. Hansen, Knut-Frode Dagestad, Anton Korosov
# Licence: This file is part of NANSAT. You can redistribute it or modify
# under the terms of GNU General Public License, v.3
# http://www.gnu.org/licenses/gpl-3.0.html
from __future__ import unicode_literals, division, absolute_import
import os
import zipfile
import json
from dateutil.parser import parse
import numpy as np
try:
import scipy.ndimage
except:
IMPORT_SCIPY = False
else:
IMPORT_SCIPY = True
import pythesint as pti
from nansat.nsr import NSR
from nansat.vrt import VRT
from nansat.domain import Domain
from nansat.node import Node
from nansat.utils import initial_bearing, gdal
from nansat.exceptions import WrongMapperError, NansatReadError
class Mapper(VRT):
''' Create VRT with mapping of WKV for Radarsat2 '''
def __init__(self, inputFileName, gdalDataset, gdalMetadata,
xmlonly=False, **kwargs):
''' Create Radarsat2 VRT '''
fPathName, fExt = os.path.splitext(inputFileName)
if zipfile.is_zipfile(inputFileName):
# Open zip file using VSI
fPath, fName = os.path.split(fPathName)
filename = '/vsizip/%s/%s' % (inputFileName, fName)
if not 'RS' in fName[0:2]:
raise WrongMapperError('%s: Provided data is not Radarsat-2'
% fName)
gdalDataset = gdal.Open(filename)
gdalMetadata = gdalDataset.GetMetadata()
else:
filename = inputFileName
# if it is not RADARSAT-2, return
if (not gdalMetadata or not 'SATELLITE_IDENTIFIER' in list(gdalMetadata.keys())):
raise WrongMapperError(filename)
elif gdalMetadata['SATELLITE_IDENTIFIER'] != 'RADARSAT-2':
raise WrongMapperError(filename)
if zipfile.is_zipfile(inputFileName):
# Open product.xml to get additional metadata
zz = zipfile.ZipFile(inputFileName)
productXmlName = os.path.join(os.path.basename(
inputFileName).split('.')[0], 'product.xml')
productXml = zz.open(productXmlName).read()
else:
# product.xml to get additionali metadata
productXmlName = os.path.join(filename, 'product.xml')
if not os.path.isfile(productXmlName):
raise WrongMapperError(filename)
productXml = open(productXmlName).read()
if not IMPORT_SCIPY:
raise NansatReadError('Radarsat-2 data cannot be read because scipy is not installed')
# parse product.XML
rs2_0 = Node.create(productXml)
if xmlonly:
self.init_from_xml(rs2_0, filename)
return
# Get additional metadata from product.xml
rs2_1 = rs2_0.node('sourceAttributes')
rs2_2 = rs2_1.node('radarParameters')
if rs2_2['antennaPointing'].lower() == 'right':
antennaPointing = 90
else:
antennaPointing = -90
rs2_3 = rs2_1.node('orbitAndAttitude').node('orbitInformation')
passDirection = rs2_3['passDirection']
# create empty VRT dataset with geolocation only
self._init_from_gdal_dataset(gdalDataset)
self.dataset.SetGCPs(self.dataset.GetGCPs(), NSR().wkt)
# define dictionary of metadata and band specific parameters
pol = []
metaDict = []
# Get the subdataset with calibrated sigma0 only
for dataset in gdalDataset.GetSubDatasets():
if dataset[1] == 'Sigma Nought calibrated':
s0dataset = gdal.Open(dataset[0])
s0datasetName = dataset[0][:]
band = s0dataset.GetRasterBand(1)
s0datasetPol = band.GetMetadata()['POLARIMETRIC_INTERP']
for i in range(1, s0dataset.RasterCount+1):
iBand = s0dataset.GetRasterBand(i)
polString = iBand.GetMetadata()['POLARIMETRIC_INTERP']
suffix = polString
# The nansat data will be complex
# if the SAR data is of type 10
dtype = iBand.DataType
if dtype == 10:
# add intensity band
metaDict.append(
{'src': {'SourceFilename':
('RADARSAT_2_CALIB:SIGMA0:'
+ filename + '/product.xml'),
'SourceBand': i,
'DataType': dtype},
'dst': {'wkv': 'surface_backwards_scattering_coefficient_of_radar_wave',
'PixelFunctionType': 'intensity',
'SourceTransferType': gdal.GetDataTypeName(dtype),
'suffix': suffix,
'polarization': polString,
'dataType': 6}})
# modify suffix for adding the compled band below
suffix = polString+'_complex'
pol.append(polString)
metaDict.append(
{'src': {'SourceFilename': ('RADARSAT_2_CALIB:SIGMA0:'
+ filename
+ '/product.xml'),
'SourceBand': i,
'DataType': dtype},
'dst': {'wkv': 'surface_backwards_scattering_coefficient_of_radar_wave',
'suffix': suffix,
'polarization': polString}})
if dataset[1] == 'Beta Nought calibrated':
b0dataset = gdal.Open(dataset[0])
b0datasetName = dataset[0][:]
for j in range(1, b0dataset.RasterCount+1):
jBand = b0dataset.GetRasterBand(j)
polString = jBand.GetMetadata()['POLARIMETRIC_INTERP']
if polString == s0datasetPol:
b0datasetBand = j
###############################
# Add SAR look direction
###############################
d = Domain(ds=gdalDataset)
lon, lat = d.get_geolocation_grids(100)
'''
(GDAL?) Radarsat-2 data is stored with maximum latitude at first
element of each column and minimum longitude at first element of each
row (e.g. np.shape(lat)=(59,55) -> latitude maxima are at lat[0,:],
and longitude minima are at lon[:,0])
In addition, there is an interpolation error for direct estimate along
azimuth. We therefore estimate the heading along range and add 90
degrees to get the "satellite" heading.
'''
if str(passDirection).upper() == 'DESCENDING':
sat_heading = initial_bearing(lon[:, :-1], lat[:, :-1],
lon[:, 1:], lat[:, 1:]) + 90
elif str(passDirection).upper() == 'ASCENDING':
sat_heading = initial_bearing(lon[:, 1:], lat[:, 1:],
lon[:, :-1], lat[:, :-1]) + 90
else:
print('Can not decode pass direction: ' + str(passDirection))
# Calculate SAR look direction
look_direction = sat_heading + antennaPointing
# Interpolate to regain lost row
look_direction = np.mod(look_direction, 360)
look_direction = scipy.ndimage.interpolation.zoom(
look_direction, (1, 11./10.))
# Decompose, to avoid interpolation errors around 0 <-> 360
look_direction_u = np.sin(np.deg2rad(look_direction))
look_direction_v = np.cos(np.deg2rad(look_direction))
look_u_VRT = VRT.from_array(look_direction_u)
look_v_VRT = VRT.from_array(look_direction_v)
# Note: If incidence angle and look direction are stored in
# same VRT, access time is about twice as large
lookVRT = VRT.from_lonlat(lon, lat)
lookVRT.create_band(
[{'SourceFilename': look_u_VRT.filename, 'SourceBand': 1},
{'SourceFilename': look_v_VRT.filename, 'SourceBand': 1}],
{'PixelFunctionType': 'UVToDirectionTo'})
# Blow up to full size
lookVRT = lookVRT.get_resized_vrt(gdalDataset.RasterXSize, gdalDataset.RasterYSize)
# Store VRTs so that they are accessible later
self.band_vrts['look_u_VRT'] = look_u_VRT
self.band_vrts['look_v_VRT'] = look_v_VRT
self.band_vrts['lookVRT'] = lookVRT
# Add band to full sized VRT
lookFileName = self.band_vrts['lookVRT'].filename
metaDict.append({'src': {'SourceFilename': lookFileName,
'SourceBand': 1},
'dst': {'wkv': 'sensor_azimuth_angle',
'name': 'look_direction'}})
###############################
# Create bands
###############################
self.create_bands(metaDict)
###################################################
# Add derived band (incidence angle) calculated
# using pixel function "BetaSigmaToIncidence":
###################################################
src = [{'SourceFilename': b0datasetName,
'SourceBand': b0datasetBand,
'DataType': dtype},
{'SourceFilename': s0datasetName,
'SourceBand': 1,
'DataType': dtype}]
dst = {'wkv': 'angle_of_incidence',
'PixelFunctionType': 'BetaSigmaToIncidence',
'SourceTransferType': gdal.GetDataTypeName(dtype),
'_FillValue': -10000, # NB: this is also hard-coded in
# pixelfunctions.c
'dataType': 6,
'name': 'incidence_angle'}
self.create_band(src, dst)
self.dataset.FlushCache()
###################################################################
# Add sigma0_VV - pixel function of sigma0_HH and beta0_HH
# incidence angle is calculated within pixel function
# It is assummed that HH is the first band in sigma0 and
# beta0 sub datasets
###################################################################
if 'VV' not in pol and 'HH' in pol:
s0datasetNameHH = pol.index('HH')+1
src = [{'SourceFilename': s0datasetName,
'SourceBand': s0datasetNameHH,
'DataType': 6},
{'SourceFilename': b0datasetName,
'SourceBand': b0datasetBand,
'DataType': 6}]
dst = {'wkv': 'surface_backwards_scattering_coefficient_of_radar_wave',
'PixelFunctionType': 'Sigma0HHBetaToSigma0VV',
'polarization': 'VV',
'suffix': 'VV'}
self.create_band(src, dst)
self.dataset.FlushCache()
############################################
# Add SAR metadata
############################################
if antennaPointing == 90:
self.dataset.SetMetadataItem('ANTENNA_POINTING', 'RIGHT')
if antennaPointing == -90:
self.dataset.SetMetadataItem('ANTENNA_POINTING', 'LEFT')
self.dataset.SetMetadataItem('ORBIT_DIRECTION',
str(passDirection).upper())
# set valid time
self.dataset.SetMetadataItem('time_coverage_start',
(parse(gdalMetadata['FIRST_LINE_TIME']).
isoformat()))
self.dataset.SetMetadataItem('time_coverage_end',
(parse(gdalMetadata['LAST_LINE_TIME']).
isoformat()))
# Get dictionary describing the instrument and platform according to
# the GCMD keywords
mm = pti.get_gcmd_instrument("C-SAR")
ee = pti.get_gcmd_platform('radarsat-2')
# TODO: Validate that the found instrument and platform are indeed what we
# want....
self.dataset.SetMetadataItem('instrument', json.dumps(mm))
self.dataset.SetMetadataItem('platform', json.dumps(ee))
self.dataset.SetMetadataItem('entry_title', 'Radarsat-2 SAR')
self.dataset.SetMetadataItem('provider', 'MDA/GSI')
self.dataset.SetMetadataItem('dataset_parameters', json.dumps(
['surface_backwards_scattering_coefficient_of_radar_wave']))
self.dataset.SetMetadataItem('entry_id', os.path.basename(filename))
def init_from_xml(self, productXml, filename):
''' Fast init from metada in XML only '''
numberOfLines = int(productXml
.node('imageAttributes')
.node('rasterAttributes')
.node('numberOfLines')
.value)
numberOfSamples = int(productXml
.node('imageAttributes')
.node('rasterAttributes')
.node('numberOfSamplesPerLine')
.value)
VRT.__init__(self, srcRasterXSize=numberOfSamples, srcRasterYSize=numberOfLines)
gcps = []
geogrid = productXml.node(
'imageAttributes').node('geographicInformation').node('geolocationGrid')
for child in geogrid.children:
pix = float(child.node('imageCoordinate').node('pixel').value)
lin = float(child.node('imageCoordinate').node('line').value)
lon = float(child.node('geodeticCoordinate').node('longitude').value)
lat = float(child.node('geodeticCoordinate').node('latitude').value)
gcps.append(gdal.GCP(lon, lat, 0, pix, lin))
self.dataset.SetGCPs(gcps, NSR().wkt)
dates = list(map(parse, [child.node('timeStamp').value for child in
(productXml.node('sourceAttributes')
.node('orbitAndAttitude')
.node('orbitInformation')
.nodeList('stateVector'))]))
self.dataset.SetMetadataItem('time_coverage_start', min(dates).isoformat())
self.dataset.SetMetadataItem('time_coverage_end', max(dates).isoformat())
self.dataset.SetMetadataItem('platform', json.dumps(pti.get_gcmd_platform('radarsat-2')))
self.dataset.SetMetadataItem('instrument', json.dumps(pti.get_gcmd_instrument('C-SAR')))
self.dataset.SetMetadataItem('Entry Title', 'Radarsat-2 SAR')
self.dataset.SetMetadataItem('Data Center', 'CSA')
self.dataset.SetMetadataItem('ISO Topic Category', 'Oceans')
self.dataset.SetMetadataItem('Summary', 'Radarsat-2 SAR data')
self.dataset.SetMetadataItem('provider', 'MDA/GSI')
self.dataset.SetMetadataItem('dataset_parameters', json.dumps(
'surface_backwards_scattering_coefficient_of_radar_wave'))
self.dataset.SetMetadataItem('entry_id', os.path.basename(filename))
| gpl-3.0 | 174,930,700,646,732,770 | 45.402402 | 101 | 0.533135 | false | 4.330717 | false | false | false |
gkoh/pynab | pynab/ids.py | 2 | 14902 | import unicodedata
import regex
import roman
import datetime
import pytz
import time
from pynab import log
import pynab.util
from pynab.interfaces.movie import INTERFACES as MOVIE_INTERFACES
from pynab.interfaces.tv import INTERFACES as TV_INTERFACES
from pynab.db import db_session, windowed_query, Release, MetaBlack, Category, Movie, TvShow, DBID, DataLog, Episode
import config
CLEANING_REGEX = regex.compile(r'\b(hdtv|dvd|divx|xvid|mpeg2|x264|aac|flac|bd|dvdrip|10 bit|264|720p|1080p\d+x\d+)\b', regex.I)
def process(type, interfaces=None, limit=None, online=True):
"""
Process ID fetching for releases.
:param type: tv/movie
:param interfaces: interfaces to use or None will use all
:param limit: optional limit
:param online: whether to check online apis
:return:
"""
expiry = datetime.datetime.now(pytz.utc) - datetime.timedelta(config.postprocess.get('fetch_blacklist_duration', 7))
with db_session() as db:
# noinspection PyComparisonWithNone,PyComparisonWithNone
db.query(MetaBlack).filter((MetaBlack.movie != None)|(MetaBlack.tvshow != None)).filter(MetaBlack.time <= expiry).delete(synchronize_session='fetch')
if type == 'movie':
# noinspection PyComparisonWithNone
query = db.query(Release).filter(Release.movie == None).join(Category).filter(Category.parent_id == 2000)
if online:
# noinspection PyComparisonWithNone
query = query.filter(Release.movie_metablack_id == None)
elif type == 'tv':
# noinspection PyComparisonWithNone
query = db.query(Release).filter(Release.tvshow == None).join(Category).filter(Category.parent_id == 5000)
if online:
# noinspection PyComparisonWithNone
query = query.filter(Release.tvshow_metablack_id == None)
else:
raise Exception('wrong release type')
query = query.order_by(Release.posted.desc())
if limit:
releases = query.limit(limit)
else:
releases = windowed_query(query, Release.id, config.scan.get('binary_process_chunk_size'))
if type == 'movie':
parse_func = parse_movie
iface_list = MOVIE_INTERFACES
obj_class = Movie
attr = 'movie'
def extract_func(data):
return {'name': data.get('name'), 'genre': data.get('genre', None), 'year': data.get('year', None)}
elif type == 'tv':
parse_func = parse_tv
iface_list = TV_INTERFACES
obj_class = TvShow
attr = 'tvshow'
def extract_func(data):
return {'name': data.get('name'), 'country': data.get('country', None)}
else:
raise Exception('wrong release type')
for release in releases:
method = 'local'
data = parse_func(release.search_name)
if data:
if type == 'movie':
q = db.query(Movie).filter(Movie.name.ilike('%'.join(clean_name(data['name']).split(' ')))).filter(Movie.year == data['year'])
elif type == 'tv':
q = db.query(TvShow).filter(TvShow.name.ilike('%'.join(clean_name(data['name']).split(' '))))
else:
q = None
entity = q.first()
if not entity and online:
method = 'online'
ids = {}
for iface in iface_list:
if interfaces and iface.NAME not in interfaces:
continue
exists = q.join(DBID).filter(DBID.db==iface.NAME).first()
if not exists:
id = iface.search(data)
if id:
ids[iface.NAME] = id
if ids:
entity = obj_class(**extract_func(data))
db.add(entity)
for interface_name, id in ids.items():
i = DBID()
i.db = interface_name
i.db_id = id
setattr(i, attr, entity)
db.add(i)
if entity:
log.info('{}: [{}] - [{}] - data added: {}'.format(
attr,
release.id,
release.search_name,
method
))
if type == 'tv':
# episode processing
ep = db.query(Episode).filter(Episode.tvshow_id == entity.id).filter(Episode.series_full == data['series_full']).first()
if not ep:
ep = Episode(
season=data.get('season'),
episode=data.get('episode'),
series_full=data.get('series_full'),
air_date=data.get('air_date'),
year=data.get('year'),
tvshow=entity
)
release.episode = ep
setattr(release, attr, entity)
db.add(release)
else:
log.info('{}: [{}] - data not found: {}'.format(
attr,
release.search_name,
method
))
if online:
mb = MetaBlack(status='ATTEMPTED')
setattr(mb, attr, release)
db.add(mb)
else:
log.info('{}: [{}] - {} data not found: no suitable regex for {} name'.format(
attr,
release.id,
release.search_name,
attr
))
mb = MetaBlack(status='IMPOSSIBLE')
setattr(mb, attr, release)
db.add(mb)
db.add(DataLog(description='parse_{} regex'.format(attr), data=release.search_name))
db.commit()
if method != 'local':
time.sleep(1)
def clean_name(name):
"""
Cleans a show/movie name for searching.
:param name: release name
:return: cleaned name
"""
name = unicodedata.normalize('NFKD', name)
name = regex.sub('[._\-]', ' ', name)
name = regex.sub('[\':!"#*’,()?]', '', name)
name = regex.sub('\s{2,}', ' ', name)
name = regex.sub('\[.*?\]', '', name)
replace_chars = {
'$': 's',
'&': 'and',
'ß': 'ss'
}
for k, v in replace_chars.items():
name = name.replace(k, v)
name = CLEANING_REGEX.sub('', name)
return name.lower()
def parse_tv(search_name):
"""
Parse a TV show name for episode, season, airdate and name information.
:param search_name: release name
:return: show data (dict)
"""
# i fucking hate this function and there has to be a better way of doing it
# named capturing groups in a list and semi-intelligent processing?
show = {}
match = pynab.util.Match()
if match.match('^(.*?)[\. \-]s(\d{1,2})\.?e(\d{1,3})(?:\-e?|\-?e)(\d{1,3})\.', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': int(match.match_obj.group(2)),
'episode': [int(match.match_obj.group(3)), int(match.match_obj.group(4))],
}
elif match.match('^(.*?)[\. \-]s(\d{2})\.?e(\d{2})(\d{2})\.', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': int(match.match_obj.group(2)),
'episode': [int(match.match_obj.group(3)), int(match.match_obj.group(4))],
}
elif match.match('^(.*?)[\. \-]s(\d{1,2})\.?e(\d{1,3})\.?', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': int(match.match_obj.group(2)),
'episode': int(match.match_obj.group(3)),
}
elif match.match('^(.*?)[\. \-]s(\d{1,2})\.', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': int(match.match_obj.group(2)),
'episode': 'all',
}
elif match.match('^(.*?)[\. \-]s(\d{1,2})d\d{1}\.', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': int(match.match_obj.group(2)),
'episode': 'all',
}
elif match.match('^(.*?)[\. \-](\d{1,2})x(\d{1,3})\.', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': int(match.match_obj.group(2)),
'episode': int(match.match_obj.group(3)),
}
elif match.match('^(.*?)[\. \-](19|20)(\d{2})[\.\-](\d{2})[\.\-](\d{2})\.', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': match.match_obj.group(2) + match.match_obj.group(3),
'episode': '{}/{}'.format(match.match_obj.group(4), match.match_obj.group(5)),
'air_date': '{}{}-{}-{}'.format(match.match_obj.group(2), match.match_obj.group(3),
match.match_obj.group(4), match.match_obj.group(5))
}
elif match.match('^(.*?)[\. \-](\d{2}).(\d{2})\.(19|20)(\d{2})\.', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': match.match_obj.group(4) + match.match_obj.group(5),
'episode': '{}/{}'.format(match.match_obj.group(2), match.match_obj.group(3)),
'air_date': '{}{}-{}-{}'.format(match.match_obj.group(4), match.match_obj.group(5),
match.match_obj.group(2), match.match_obj.group(3))
}
elif match.match('^(.*?)[\. \-](\d{2}).(\d{2})\.(\d{2})\.', search_name, regex.I):
# this regex is particularly awful, but i don't think it gets used much
# seriously, > 15? that's going to be a problem in 2 years
if 15 < int(match.match_obj.group(4)) <= 99:
season = '19' + match.match_obj.group(4)
else:
season = '20' + match.match_obj.group(4)
show = {
'name': match.match_obj.group(1),
'season': season,
'episode': '{}/{}'.format(match.match_obj.group(2), match.match_obj.group(3)),
'air_date': '{}-{}-{}'.format(season, match.match_obj.group(2), match.match_obj.group(3))
}
elif match.match('^(.*?)[\. \-]20(\d{2})\.e(\d{1,3})\.', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': '20' + match.match_obj.group(2),
'episode': int(match.match_obj.group(3)),
}
elif match.match('^(.*?)[\. \-]20(\d{2})\.Part(\d{1,2})\.', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': '20' + match.match_obj.group(2),
'episode': int(match.match_obj.group(3)),
}
elif match.match('^(.*?)[\. \-](?:Part|Pt)\.?(\d{1,2})\.', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': 1,
'episode': int(match.match_obj.group(2)),
}
elif match.match('^(.*?)[\. \-](?:Part|Pt)\.?([ivx]+)', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': 1,
'episode': roman.fromRoman(str.upper(match.match_obj.group(2)))
}
elif match.match('^(.*?)[\. \-]EP?\.?(\d{1,3})', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': 1,
'episode': int(match.match_obj.group(2)),
}
elif match.match('^(.*?)[\. \-]Seasons?\.?(\d{1,2})', search_name, regex.I):
show = {
'name': match.match_obj.group(1),
'season': int(match.match_obj.group(2)),
'episode': 'all'
}
elif match.match('^(.+)\s{1,3}(\d{1,3})\s\[([\w\d]+)\]', search_name, regex.I):
# mostly anime
show = {
'name': match.match_obj.group(1),
'season': 1,
'episode': int(match.match_obj.group(2))
}
if 'name' in show and show['name']:
# check for country code or name (Biggest Loser Australia etc)
country = regex.search('[\._ ](US|UK|AU|NZ|CA|NL|Canada|Australia|America)', show['name'], regex.I)
if country:
if str.lower(country.group(1)) == 'canada':
show['country'] = 'CA'
elif str.lower(country.group(1)) == 'australia':
show['country'] = 'AU'
elif str.lower(country.group(1)) == 'america':
show['country'] = 'US'
else:
show['country'] = str.upper(country.group(1))
if not isinstance(show['season'], int) and len(show['season']) == 4:
show['series_full'] = '{}/{}'.format(show['season'], show['episode'])
else:
year = regex.search('[\._ ](19|20)(\d{2})', search_name, regex.I)
if year:
show['year'] = year.group(1) + year.group(2)
show['season'] = 'S{:02d}'.format(show['season'])
# check to see what episode ended up as
if isinstance(show['episode'], list):
show['episode'] = ''.join(['E{:02d}'.format(s) for s in show['episode']])
elif isinstance(show['episode'], int):
show['episode'] = 'E{:02d}'.format(int(show['episode']))
# if it's a date string, leave it as that
show['series_full'] = show['season'] + show['episode']
return show
return None
def parse_movie(search_name):
"""
Parse a movie name into name/year.
:param search_name: release name
:return: (name, year)
"""
result = regex.search('^(?P<name>.*)[\.\-_\( ](?P<year>19\d{2}|20\d{2})', search_name, regex.I)
if result:
result = result.groupdict()
if 'year' not in result:
result = regex.search(
'^(?P<name>.*)[\.\-_ ](?:dvdrip|bdrip|brrip|bluray|hdtv|divx|xvid|proper|repack|real\.proper|sub\.?fix|sub\.?pack|ac3d|unrated|1080i|1080p|720p|810p)',
search_name, regex.I)
if result:
result = result.groupdict()
if 'name' in result:
name = regex.sub('\(.*?\)|\.|_', ' ', result['name'])
if 'year' in result:
year = result['year']
else:
year = ''
return {'name': name, 'year': year}
return None | gpl-2.0 | 5,586,043,542,865,554,000 | 38.62766 | 167 | 0.483657 | false | 3.689698 | false | false | false |
apaksoy/automatetheboringstuff | practice projects/chap 16/umbrella_reminder.py | 1 | 3075 | #!/usr/bin/env python3
''' Chapter 11 showed you how to use the requests module to scrape data
from http://weather.gov/. Write a program that runs just before you wake
up in the morning and checks whether it’s raining that day. If so, have
the program text you a reminder to pack an umbrella before leaving the
house.
This script gets the first full-day verbal weather forecast in Turkish
at Turkish Meteorological Institute's (MGM) website for Istanbul and
emails it to the specified address.
'''
import getpass
import sys
import smtplib
import bs4
from selenium import webdriver
rain_words = ["yağış", "yağmur", "sağanak"]
# see stackoverflow 45448994 for phantomjs
if sys.platform == 'win32':
print('Install PhantomJS')
sys.exit()
elif sys.platform == 'darwin':
# Executable path specified as otherwise PhantomJS headless browser
# does not work.
# Service log path spedified as otherwise script can not be run from
# a .plist file due to the permission problems with the ghostdriver.log
# file.
browser = webdriver.PhantomJS(executable_path=
'/usr/local/Cellar/phantomjs211/bin/phantomjs',
service_log_path='/tmp/ghostdriver.log')
else:
print('Warning - Unknown OS:', sys.platform)
print("Install PhantomJS")
sys.exit()
url = 'https://www.mgm.gov.tr/?il=Istanbul'
browser.get(url)
html = browser.page_source
soup = bs4.BeautifulSoup(html, 'html.parser')
# Get day of the week
elems_day = soup.select('div.tahminTarih.ng-binding')
# Get city and district names in order to make sure query
# returned correct results.
elems_il = soup.select('ziko.ng-binding')
# The weather forecasts at MGM's site is per city and district ("ilce")
# but many district names in Turkish have non-ascii characters.
# Therefore, district-based queries not implemented.
# elems_ilce = soup.select('span.ng-binding')
# Get weather verbal
elems_tahmin = soup.select('div.tahminHadise.ng-binding')
# Reading of weather off of internat completed. Quit browser.
browser.quit()
# Check if the verbal weather forecast in Turkish includes words
# implying rain
umbrella = False
for keyword in rain_words:
if elems_tahmin[0].getText().replace("I", "ı").replace("İ", "i").\
lower().find(keyword) > 0:
umbrella = True
break
if umbrella:
# Send email to yourself about the weather
smtpObj = smtplib.SMTP('smtp.gmail.com', 587)
smtpObj.ehlo()
smtpObj.starttls()
from_addr = '[email protected]'
pswd = 'your_password' # getpass.getpass() not useful when run scheduled
smtpObj.login(from_addr, pswd)
to_addr = '[email protected]'
subject = 'Dışarı çıkarsan Şemsiye almayı unutma!'
body_text = elems_day[0].getText() + '\n' + \
elems_il[0].getText() + '\n' + \
elems_tahmin[0].getText()
body = ('\r\n').join([
'From: %s' % from_addr,
'To: %s' % to_addr,
'Subject: %s' % subject ,
'',
body_text]
)
smtpObj.sendmail(from_addr, to_addr, body.encode('utf-8'))
# log out of email server
smtpObj.quit()
| mit | -5,161,179,202,040,499,000 | 29.89899 | 74 | 0.691729 | false | 3.143885 | false | false | false |
OCA/l10n-italy | assets_management/models/asset_depreciation_line_type.py | 1 | 1809 | # Author(s): Silvio Gregorini ([email protected])
# Copyright 2019 Openforce Srls Unipersonale (www.openforce.it)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class DepLineType(models.Model):
_name = 'asset.depreciation.line.type'
_description = "Depreciation Line Type"
_table = 'asset_dep_line_type'
_order = 'name asc, code asc'
@api.model
def get_default_company_id(self):
return self.env.user.company_id
code = fields.Char(
string="Code"
)
company_id = fields.Many2one(
'res.company',
default=get_default_company_id,
string="Company"
)
name = fields.Char(
required=True,
string="Name"
)
type = fields.Selection(
[('in', 'In'),
('out', 'Out')],
string="Type",
)
@api.multi
def unlink(self):
for line_type in self:
if self.env['asset.depreciation.line'].search([
('depreciation_line_type_id', '=', line_type.id)
]):
raise ValidationError(
_("Cannot remove type {}: there is some depreciation"
" line linked to it.".format(line_type.name))
)
return super().unlink()
@api.multi
def name_get(self):
return [(line_type.id, line_type.make_name()) for line_type in self]
def make_name(self):
self.ensure_one()
name = ""
if self.code:
name += "[{}] ".format(self.code)
name += self.name
type_name = dict(self._fields['type'].selection).get(self.type)
if type_name:
name += " - " + type_name
return name.strip()
| agpl-3.0 | 7,932,399,071,929,598,000 | 26.409091 | 76 | 0.559425 | false | 3.699387 | false | false | false |
thopiekar/Cura | plugins/UM3NetworkPrinting/src/Models/Http/ClusterPrintJobConfigurationChange.py | 1 | 1249 | # Copyright (c) 2019 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Optional
from ..BaseModel import BaseModel
## Model for the types of changes that are needed before a print job can start
class ClusterPrintJobConfigurationChange(BaseModel):
## Creates a new print job constraint.
# \param type_of_change: The type of configuration change, one of: "material", "print_core_change"
# \param index: The hotend slot or extruder index to change
# \param target_id: Target material guid or hotend id
# \param origin_id: Original/current material guid or hotend id
# \param target_name: Target material name or hotend id
# \param origin_name: Original/current material name or hotend id
def __init__(self, type_of_change: str, target_id: str, origin_id: str,
index: Optional[int] = None, target_name: Optional[str] = None, origin_name: Optional[str] = None,
**kwargs) -> None:
self.type_of_change = type_of_change
self.index = index
self.target_id = target_id
self.origin_id = origin_id
self.target_name = target_name
self.origin_name = origin_name
super().__init__(**kwargs)
| lgpl-3.0 | 8,741,208,696,800,755,000 | 45.259259 | 115 | 0.671737 | false | 3.784848 | false | false | false |
kpreid/shinysdr | shinysdr/plugins/simulate.py | 1 | 10793 | # Copyright 2013, 2014, 2015, 2016, 2017 Kevin Reid and the ShinySDR contributors
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
import math
from zope.interface import implementer # available via Twisted
from gnuradio import analog
from gnuradio import blocks
from gnuradio import channels
from gnuradio import gr
from gnuradio.filter import rational_resampler
from shinysdr.devices import Device, IRXDriver
from shinysdr.filters import make_resampler
from shinysdr.interfaces import IModulator
from shinysdr.math import dB, rotator_inc, to_dB
from shinysdr.i.modes import lookup_mode
from shinysdr.i.pycompat import defaultstr
from shinysdr.signals import SignalType, no_signal
from shinysdr.types import RangeT, ReferenceT
from shinysdr import units
from shinysdr.values import CellDict, CollectionState, ExportedState, LooseCell, exported_value, setter
__all__ = [] # appended later
def SimulatedDevice(
name='Simulated RF',
freq=0.0,
allow_tuning=False):
"""
See documentation in shinysdr/i/webstatic/manual/configuration.html.
"""
return SimulatedDeviceForTest(
name=name,
freq=freq,
allow_tuning=allow_tuning,
add_transmitters=True)
__all__.append('SimulatedDevice')
def SimulatedDeviceForTest(
name='Simulated RF',
freq=0.0,
allow_tuning=False,
add_transmitters=False):
"""Identical to SimulatedDevice except that the defaults are arranged to be minimal for fast testing rather than to provide a rich simulation."""
rx_driver = _SimulatedRXDriver(name, add_transmitters=add_transmitters)
return Device(
name=name,
vfo_cell=LooseCell(
value=freq,
type=RangeT([(-1e9, 1e9)]) if allow_tuning else RangeT([(freq, freq)]), # TODO kludge magic numbers
writable=True,
persists=False,
post_hook=rx_driver._set_sim_freq),
rx_driver=rx_driver)
__all__.append('SimulatedDeviceForTest')
@implementer(IRXDriver)
class _SimulatedRXDriver(ExportedState, gr.hier_block2):
# TODO: be not hardcoded; for now this is convenient
audio_rate = 1e4
rf_rate = 200e3
def __init__(self, name, add_transmitters):
gr.hier_block2.__init__(
self, defaultstr(type(self).__name__ + ' ' + name),
gr.io_signature(0, 0, 0),
gr.io_signature(1, 1, gr.sizeof_gr_complex * 1),
)
rf_rate = self.rf_rate
audio_rate = self.audio_rate
self.__noise_level = -22
self.__transmitters = CellDict(dynamic=True)
self.__transmitters_cs = CollectionState(self.__transmitters)
self.__bus = blocks.add_vcc(1)
self.__channel_model = channels.channel_model(
noise_voltage=dB(self.__noise_level),
frequency_offset=0,
epsilon=1.01, # TODO: expose this parameter
# taps=..., # TODO: apply something here?
)
self.__rotator = blocks.rotator_cc()
self.__throttle = blocks.throttle(gr.sizeof_gr_complex, rf_rate)
self.connect(
self.__bus,
self.__throttle,
self.__channel_model,
self.__rotator,
self)
signals = []
def add_modulator(freq, key, mode_or_modulator_ctor, **kwargs):
if isinstance(mode_or_modulator_ctor, type):
mode = None
ctor = mode_or_modulator_ctor
else:
mode = mode_or_modulator_ctor
mode_def = lookup_mode(mode)
if mode_def is None: # missing plugin, say
return
ctor = mode_def.mod_class
context = None # TODO implement context
modulator = ctor(context=context, mode=mode, **kwargs)
tx = _SimulatedTransmitter(modulator, audio_rate, rf_rate, freq)
self.connect(audio_signal, tx)
signals.append(tx)
self.__transmitters[key] = tx
# Audio input signal
pitch = analog.sig_source_f(audio_rate, analog.GR_SAW_WAVE, -1, 2000, 1000)
audio_signal = vco = blocks.vco_f(audio_rate, 1, 1)
self.connect(pitch, vco)
# Channels
if add_transmitters:
add_modulator(0.0, 'usb', 'USB')
add_modulator(10e3, 'am', 'AM')
add_modulator(30e3, 'fm', 'NFM')
add_modulator(50e3, 'rtty', 'RTTY', message='The quick brown fox jumped over the lazy dog.\n')
add_modulator(80e3, 'chirp', ChirpModulator)
if signals:
for bus_input, signal in enumerate(signals):
self.connect(signal, (self.__bus, bus_input))
else:
# kludge up a correct-sample-rate no-op
self.connect(
audio_signal,
blocks.multiply_const_ff(0),
make_resampler(audio_rate, rf_rate),
blocks.float_to_complex(),
self.__bus)
self.__signal_type = SignalType(
kind='IQ',
sample_rate=rf_rate)
self.__usable_bandwidth = RangeT([(-rf_rate / 2, rf_rate / 2)])
@exported_value(type=ReferenceT(), changes='never')
def get_transmitters(self):
return self.__transmitters_cs
# implement IRXDriver
@exported_value(type=SignalType, changes='never')
def get_output_type(self):
return self.__signal_type
def _set_sim_freq(self, freq):
self.__rotator.set_phase_inc(rotator_inc(rate=self.rf_rate, shift=-freq))
# implement IRXDriver
def get_tune_delay(self):
return 0.0
# implement IRXDriver
def get_usable_bandwidth(self):
return self.__usable_bandwidth
# implement IRXDriver
def close(self):
pass
@exported_value(type=RangeT([(-50, 0)]), changes='this_setter', label='White noise')
def get_noise_level(self):
return self.__noise_level
@setter
def set_noise_level(self, value):
self.__channel_model.set_noise_voltage(dB(value))
self.__noise_level = value
def notify_reconnecting_or_restarting(self):
# The throttle block runs on a clock which does not stop when the flowgraph stops; resetting the sample rate restarts the clock.
# The necessity of this kludge has been filed as a gnuradio bug at <http://gnuradio.org/redmine/issues/649>
self.__throttle.set_sample_rate(self.__throttle.sample_rate())
class _SimulatedTransmitter(gr.hier_block2, ExportedState):
"""provides frequency parameters"""
def __init__(self, modulator, audio_rate, rf_rate, freq):
modulator = IModulator(modulator)
gr.hier_block2.__init__(
self, type(self).__name__,
gr.io_signature(1, 1, gr.sizeof_float * 1),
gr.io_signature(1, 1, gr.sizeof_gr_complex * 1),
)
self.__freq = freq
self.__rf_rate = rf_rate
self.__modulator = modulator
modulator_input_type = modulator.get_input_type()
if modulator_input_type.get_kind() == 'MONO':
audio_resampler = make_resampler(audio_rate, modulator_input_type.get_sample_rate())
self.connect(self, audio_resampler, modulator)
elif modulator_input_type.get_kind() == 'NONE':
self.connect(self, blocks.null_sink(gr.sizeof_float))
else:
raise Exception('don\'t know how to supply input of type %s' % modulator_input_type)
rf_resampler = rational_resampler.rational_resampler_ccf(
interpolation=int(rf_rate),
decimation=int(modulator.get_output_type().get_sample_rate()))
self.__rotator = blocks.rotator_cc(rotator_inc(rate=rf_rate, shift=freq))
self.__mult = blocks.multiply_const_cc(dB(-10))
self.connect(modulator, rf_resampler, self.__rotator, self.__mult, self)
@exported_value(type=ReferenceT(), changes='never')
def get_modulator(self):
return self.__modulator
@exported_value(
type_fn=lambda self: RangeT([(-self.__rf_rate / 2, self.__rf_rate / 2)], unit=units.Hz, strict=False),
changes='this_setter',
label='Frequency')
def get_freq(self):
return self.__freq
@setter
def set_freq(self, value):
self.__freq = float(value)
self.__rotator.set_phase_inc(rotator_inc(rate=self.__rf_rate, shift=self.__freq))
@exported_value(
type=RangeT([(-50.0, 0.0)], unit=units.dB, strict=False),
changes='this_setter',
label='Gain')
def get_gain(self):
return to_dB(self.__mult.k().real)
@setter
def set_gain(self, value):
self.__mult.set_k(dB(value))
@implementer(IModulator)
class ChirpModulator(gr.hier_block2, ExportedState):
def __init__(self, context, mode, chirp_rate=0.1, output_rate=10000):
gr.hier_block2.__init__(
self, type(self).__name__,
gr.io_signature(0, 0, 0),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
self.__output_rate = output_rate
self.__chirp_rate = chirp_rate
self.__control = analog.sig_source_f(output_rate, analog.GR_SAW_WAVE, chirp_rate, output_rate * 2 * math.pi, 0)
chirp_vco = blocks.vco_c(output_rate, 1, 1)
self.connect(
self.__control,
chirp_vco,
self)
def get_input_type(self):
return no_signal
def get_output_type(self):
return SignalType(kind='IQ', sample_rate=self.__output_rate)
@exported_value(
parameter='chirp_rate',
type=RangeT([(-10.0, 10.0)], unit=units.Hz, strict=False),
changes='this_setter',
label='Chirp rate')
def get_chirp_rate(self):
return self.__chirp_rate
@setter
def set_chirp_rate(self, value):
self.__chirp_rate = value
self.__control.set_frequency(value)
| gpl-3.0 | 6,109,872,255,961,333,000 | 34.620462 | 149 | 0.60706 | false | 3.648749 | false | false | false |
meee1/ardupilot | Tools/scripts/rcda_decode.py | 22 | 1487 | #!/usr/bin/env python
'''
decode RCDA messages from a log and optionally play back to a serial port. The RCDA message is
captures RC input bytes when RC_OPTIONS=16 is set
'''
import struct
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--condition", default=None, help="select packets by condition")
parser.add_argument("--baudrate", type=int, default=115200, help="baudrate")
parser.add_argument("--port", type=str, default=None, help="port")
parser.add_argument("--delay-mul", type=float, default=1.0, help="delay multiplier")
parser.add_argument("log", metavar="LOG")
import time
import serial
args = parser.parse_args()
from pymavlink import mavutil
print("Processing log %s" % args.log)
mlog = mavutil.mavlink_connection(args.log)
if args.port:
port = serial.Serial(args.port, args.baudrate, timeout=1.0)
tlast = -1
counter = 0
while True:
msg = mlog.recv_match(type=['RCDA'], condition=args.condition)
if msg is None:
mlog.rewind()
tlast = -1
continue
tnow = msg.TimeUS
if tlast == -1:
tlast = tnow
buf = struct.pack("<IIIIIIIIII",
msg.U0, msg.U1, msg.U2, msg.U3, msg.U4,
msg.U5, msg.U6, msg.U7, msg.U8, msg.U9)[0:msg.Len]
ibuf = [ ord(b) for b in buf ]
dt = tnow - tlast
tlast = tnow
print(len(ibuf), ibuf, dt)
if args.port:
time.sleep(dt*1.0e-6*args.delay_mul)
port.write(buf)
| gpl-3.0 | 85,140,476,076,589,260 | 28.156863 | 94 | 0.652993 | false | 3.085062 | false | false | false |
OpenTrading/OpenTrader | OpenTrader/OTCmd2_utils.py | 1 | 3890 | # -*-mode: python; py-indent-offset: 4; indent-tabs-mode: nil; encoding: utf-8-dos; coding: utf-8 -*-
import sys
from optparse import OptionParser, make_option
from OpenTrader.deps.cmd2plus import remaining_args, ParsedString
lOPTIONS_DEFINED = []
def options(option_list, arg_desc="arg", usage=None):
'''Used as a decorator and passed a list of optparse-style options,
alters a cmd2 method to populate its ``opts`` argument from its
raw text argument.
Example: transform
def do_something(self, arg):
into
@options([make_option('-q', '--quick', action="store_true",
help="Makes things fast")],
"source dest")
def do_something(self, arg, opts):
if opts.quick:
self.fast_button = True
'''
global lOPTIONS_DEFINED
import optparse
import pyparsing
if not isinstance(option_list, list):
option_list = [option_list]
for opt in option_list:
# opt is an optparse Option
lOPTIONS_DEFINED.append(pyparsing.Literal(opt.get_opt_string()))
def option_setup(func):
optionParser = OptionParser(usage=usage)
optionParser.disable_interspersed_args()
for opt in option_list:
# opt is an optparse Option
optionParser.add_option(opt)
optionParser.set_usage("%s [options] %s" % (func.__name__[3:], arg_desc))
optionParser._func = func
def oUpdateOptionParser(instance):
if func.__name__.startswith('do_'):
sName = func.__name__[3:]
if hasattr(instance, 'oConfig') and sName in instance.oConfig:
oConfigSection = instance.oConfig[sName]
# iterate over optionParser
for sKey, gVal in oConfigSection.iteritems():
sOption = '--' +sKey
if optionParser.has_option(sOption):
oOption = optionParser.get_option(sOption)
# FixMe: only if the default is optparse.NO_DEFAULT?
if oOption.default is optparse.NO_DEFAULT:
# FixMe: does this set the default?
oOption.default = gVal
# FixMe: how about this?
optionParser.defaults[oOption.dest] = oOption.default
return optionParser
def new_func(instance, arg):
try:
# makebe return a list and prepend it
optionParser = oUpdateOptionParser(instance)
opts, newArgList = optionParser.parse_args(arg.split())
# Must find the remaining args in the original argument list, but
# mustn't include the command itself
#if hasattr(arg, 'parsed') and newArgList[0] == arg.parsed.command:
# newArgList = newArgList[1:]
newArgs = remaining_args(arg, newArgList)
if isinstance(arg, ParsedString):
arg = arg.with_args_replaced(newArgs)
else:
arg = newArgs
except optparse.OptParseError as e:
print (e)
optionParser.print_help()
return
if hasattr(opts, '_exit'):
return None
result = func(instance, arg, opts)
return result
func._optionParser = optionParser
if func.__doc__ is None and usage is None:
func.__doc__ = ""
elif func.__doc__ is None and usage:
func.__doc__ = usage
elif usage:
func.__doc__ = '%s\n%s' % (usage, func.__doc__, )
new_func.__doc__ = '%s\n%s' % (func.__doc__, optionParser.format_help())
return new_func
return option_setup
| lgpl-3.0 | 3,259,144,815,768,460,300 | 41.282609 | 101 | 0.542931 | false | 4.445714 | false | false | false |
MangoMangoDevelopment/neptune | lib/ros_comm-1.12.0/tools/rosgraph/src/rosgraph/rosgraph_main.py | 2 | 3474 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import sys
import time
from . import roslogging
from . import masterapi
from .impl import graph
def fullusage():
print("""rosgraph is a command-line tool for debugging the ROS Computation Graph.
Usage:
\trosgraph
""")
def rosgraph_main():
if len(sys.argv) == 1:
pass
elif len(sys.argv) == 2 and (sys.argv[1] == '-h' or sys.argv[1] == '--help'):
fullusage()
return
else:
fullusage()
sys.exit(-1)
roslogging.configure_logging('rosgraph')
# make sure master is available
master = masterapi.Master('rosgraph')
try:
master.getPid()
except:
print("ERROR: Unable to communicate with master!", file=sys.stderr)
return
g = graph.Graph()
try:
while 1:
g.update()
if not g.nn_nodes and not g.srvs:
print("empty")
else:
print('\n')
if g.nn_nodes:
print('Nodes:')
for n in g.nn_nodes:
prefix = n + '|'
print(' ' + n + ' :')
print(' Inbound:')
for k in g.nn_edges.edges_by_end.keys():
if k.startswith(prefix):
for c in g.nn_edges.edges_by_end[k]:
print(' ' + c.start)
print(' Outbound:')
for k in g.nn_edges.edges_by_start.keys():
if k.startswith(prefix):
for c in g.nn_edges.edges_by_start[k]:
print(' ' + c.end)
if g.srvs:
print('Services:')
for s in g.srvs:
print(' ' + s)
time.sleep(1.0)
except KeyboardInterrupt:
pass
| bsd-3-clause | 1,588,533,345,502,253,300 | 32.728155 | 85 | 0.609384 | false | 4.190591 | false | false | false |
TheAlgorithms/Python | machine_learning/logistic_regression.py | 1 | 2480 | #!/usr/bin/python
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
"""
Implementing logistic regression for classification problem
Helpful resources:
Coursera ML course
https://medium.com/@martinpella/logistic-regression-from-scratch-in-python-124c5636b8ac
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
# get_ipython().run_line_magic('matplotlib', 'inline')
# In[67]:
# sigmoid function or logistic function is used as a hypothesis function in
# classification problems
def sigmoid_function(z):
return 1 / (1 + np.exp(-z))
def cost_function(h, y):
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
def log_likelihood(X, Y, weights):
scores = np.dot(X, weights)
return np.sum(Y * scores - np.log(1 + np.exp(scores)))
# here alpha is the learning rate, X is the feature matrix,y is the target matrix
def logistic_reg(alpha, X, y, max_iterations=70000):
theta = np.zeros(X.shape[1])
for iterations in range(max_iterations):
z = np.dot(X, theta)
h = sigmoid_function(z)
gradient = np.dot(X.T, h - y) / y.size
theta = theta - alpha * gradient # updating the weights
z = np.dot(X, theta)
h = sigmoid_function(z)
J = cost_function(h, y)
if iterations % 100 == 0:
print(f"loss: {J} \t") # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
iris = datasets.load_iris()
X = iris.data[:, :2]
y = (iris.target != 0) * 1
alpha = 0.1
theta = logistic_reg(alpha, X, y, max_iterations=70000)
print("theta: ", theta) # printing the theta i.e our weights vector
def predict_prob(X):
return sigmoid_function(
np.dot(X, theta)
) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color="b", label="0")
plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color="r", label="1")
(x1_min, x1_max) = (X[:, 0].min(), X[:, 0].max())
(x2_min, x2_max) = (X[:, 1].min(), X[:, 1].max())
(xx1, xx2) = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max))
grid = np.c_[xx1.ravel(), xx2.ravel()]
probs = predict_prob(grid).reshape(xx1.shape)
plt.contour(xx1, xx2, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| mit | -6,395,101,424,357,179,000 | 27.505747 | 87 | 0.612903 | false | 3.061728 | false | false | false |
aipescience/django-daiquiri | daiquiri/oai/renderers.py | 1 | 7874 | from daiquiri.core.renderers import XMLRenderer
from daiquiri.core.renderers.datacite import DataciteRendererMixin
from daiquiri.core.renderers.dublincore import DublincoreRendererMixin
from daiquiri.core.renderers.voresource import VoresourceRendererMixin
class OaiRenderer(DublincoreRendererMixin, DataciteRendererMixin, VoresourceRendererMixin, XMLRenderer):
def render_document(self, data, accepted_media_type=None, renderer_context=None):
self.start('oai:OAI-PMH', {
'xmlns:oai': 'http://www.openarchives.org/OAI/2.0/',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:schemaLocation': 'http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd'
})
self.node('oai:responseDate', {}, data['responseDate'])
request_arguments = data['arguments']
for error_code, _ in data['errors']:
if error_code in ['badVerb', 'badArgument']:
request_arguments = {}
self.node('oai:request', request_arguments, data['baseUrl'])
if data['errors']:
self.render_errors(data['errors'])
elif data['verb'] == 'GetRecord':
self.render_get_record(data['response'])
elif data['verb'] == 'Identify':
self.render_identify(data['response'], data['baseUrl'])
elif data['verb'] == 'ListIdentifiers':
self.render_list_identifiers(data['response']['items'], data['response']['resumptionToken'])
elif data['verb'] == 'ListMetadataFormats':
self.render_list_metadata_formats(data['response'])
elif data['verb'] == 'ListRecords':
self.render_list_records(data['response']['items'], data['response']['resumptionToken'])
elif data['verb'] == 'ListSets':
self.render_list_sets(data['response'])
self.end('oai:OAI-PMH')
def render_errors(self, errors):
for error_code, error_message in errors:
self.node('error', {'code': error_code}, error_message)
def render_get_record(self, item):
self.start('oai:GetRecord')
self.render_record(item)
self.end('oai:GetRecord')
def render_identify(self, repository_metadata, base_url):
self.start('oai:Identify')
self.node('oai:repositoryName', {}, repository_metadata.get('repository_name'))
self.node('oai:baseURL', {}, base_url)
self.node('oai:protocolVersion', {}, '2.0')
self.node('oai:adminEmail', {}, repository_metadata['admin_email'])
self.node('oai:earliestDatestamp', {}, repository_metadata.get('earliest_datestamp').strftime('%Y-%m-%dT%H:%M:%SZ'))
self.node('oai:deletedRecord', {}, repository_metadata.get('deleted_record'))
self.node('oai:granularity', {}, 'YYYY-MM-DDThh:mm:ssZ')
self.render_identify_description(repository_metadata)
self.end('oai:Identify')
def render_identify_description(self, repository_metadata):
self.start('oai:description')
if repository_metadata['identifier'] is not None:
self.render_oai_identifier(repository_metadata.get('identifier'))
self.end('oai:description')
self.start('oai:description')
if repository_metadata['registry'] is not None:
self.render_voresource(repository_metadata.get('registry'))
self.end('oai:description')
def render_oai_identifier(self, identifier_metadata):
self.start('oai-identifier', {
'xmlns': 'http://www.openarchives.org/OAI/2.0/oai-identifier',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:schemaLocation': 'http://www.openarchives.org/OAI/2.0/oai-identifier http://www.openarchives.org/OAI/2.0/oai-identifier.xsd'
})
self.node('scheme', {}, identifier_metadata.get('scheme'))
self.node('repositoryIdentifier', {}, identifier_metadata.get('repository_identifier'))
self.node('delimiter', {}, identifier_metadata.get('delimiter'))
self.node('sampleIdentifier', {}, identifier_metadata.get('sample_identifier'))
self.end('oai-identifier')
def render_list_identifiers(self, items, resumption_token):
self.start('oai:ListIdentifiers')
for item in items:
self.render_header(item['header'])
if resumption_token:
self.node('oai:resumptionToken', {
'oai:expirationDate': resumption_token.get('expirationDate'),
'oai:completeListSize': resumption_token.get('completeListSize'),
'oai:cursor': resumption_token.get('cursor')
}, resumption_token['token'])
self.end('oai:ListIdentifiers')
def render_list_metadata_formats(self, metadata_formats):
self.start('oai:ListMetadataFormats')
for metadata_format in metadata_formats:
self.start('oai:metadataFormat')
self.node('oai:metadataPrefix', {}, metadata_format['prefix'])
self.node('oai:schema', {}, metadata_format.get('schema'))
self.node('oai:metadataNamespace', {}, metadata_format.get('namespace'))
self.end('oai:metadataFormat')
self.end('oai:ListMetadataFormats')
def render_list_records(self, items, resumption_token):
self.start('oai:ListRecords')
for item in items:
self.render_record(item)
if resumption_token:
self.node('oai:resumptionToken', {
'oai:expirationDate': resumption_token.get('expirationDate'),
'oai:completeListSize': resumption_token.get('completeListSize'),
'oai:cursor': resumption_token.get('cursor')
}, resumption_token['token'])
self.end('oai:ListRecords')
def render_list_sets(self, data):
self.start('oai:ListSets')
for oai_set in data['oai_sets']:
self.start('oai:set')
self.node('oai:setSpec', {}, oai_set['setSpec'])
self.node('oai:setName', {}, oai_set['setName'])
if oai_set['setDescription'] is not None:
self.node('oai:setDescription', {}, oai_set['setDescription'])
self.end('oai:set')
self.end('oai:ListSets')
def render_record(self, record):
self.start('oai:record')
self.render_header(record['header'])
if record['metadata'] is not None:
self.start('oai:metadata')
self.render_metadata(record['metadata'])
self.end('oai:metadata')
self.end('oai:record')
def render_header(self, header):
self.start('oai:header', {'status': 'deleted'} if header['deleted'] else {})
self.node('oai:identifier', {}, header['identifier'])
self.node('oai:datestamp', {}, header['datestamp'])
for spec in header.get('setSpec', []):
self.node('oai:setSpec', {}, spec)
self.end('oai:header')
def render_metadata(self, metadata):
raise NotImplementedError()
class DublincoreRenderer(OaiRenderer):
def render_metadata(self, metadata):
self.render_dublincore(metadata)
class OaiDataciteRenderer(OaiRenderer):
def render_metadata(self, metadata):
self.start('oai_datacite', {
'xmlns': 'http://schema.datacite.org/oai/oai-1.0/',
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:schemaLocation': 'http://schema.datacite.org/oai/oai-1.0/ oai_datacite.xsd'
})
self.start('payload')
self.render_datacite(metadata)
self.end('payload')
self.end('oai_datacite')
class DataciteRenderer(OaiRenderer):
def render_metadata(self, metadata):
self.render_datacite(metadata)
class VoresourceRenderer(OaiRenderer):
def render_metadata(self, metadata):
self.render_voresource(metadata)
| apache-2.0 | -2,084,496,972,795,742,200 | 41.793478 | 141 | 0.625476 | false | 3.826045 | false | false | false |
alainr85/bio_primers | find_differential_primers/find_differential_primers.py | 1 | 83235 | #!/usr/bin/env python
# find_differential_primers.py
#
# A Python script that identifies pairs of forward and reverse primers which
# are capable of amplifying either individual organisms, or a particular
# family of organisms, from a set of genome sequences. Primers are expected
# to be located within CDS features, in an attempt to maximise sequence
# stability of the primers.
#
# The script reads from a configuration file containing sequence names and,
# at a minimum, the location of a complete genome sequence. Optionally, the
# configuration file may also indicate:
# - the location of a GenBank file containing CDS feature locations,
# or an equivalent output file from the Prodigal genefinder
# (http://compbio.ornl.gov/prodigal/)
# - the locations on the genome, and sequences of, primers predicted in
# EMBOSS ePrimer3 output format
# (http://emboss.bioinformatics.nl/cgi-bin/emboss/help/eprimer3)
#
# The first step of the script, if no primer file is specified, is to use
# the sequence file as the basis for a call to EMBOSS ePrimer3
# (http://emboss.bioinformatics.nl/cgi-bin/emboss/help/eprimer3), which must
# be installed and either on the $PATH, or its location specified at the
# command line. This will generate an output file with the same stem as the
# sequence file, but with the extension '.eprimer3'. Some ePrimer3 settings,
# such as the number of primers to find, are command-line options.
#
# If no CDS feature file is specified, and the --noCDS flag is not set,
# the script will attempt first to use Prodigal
# (http://compbio.ornl.gov/prodigal/) to predict CDS locations, placing the
# output in the same directory as the sequence source. If Prodigal cannot be
# found, a warning will be given, and the script will proceed as if the
# --noCDS flag is set. If this flag is set, then all primers are carried
# through to a query with the EMBOSS PrimerSearch package
# (http://emboss.bioinformatics.nl/cgi-bin/emboss/help/primersearch) against
# all other sequences in the dataset. If the flag is not set, then all
# primers that are not located within a CDS feature are excluded from the
# PrimerSearch input. To enable this, the PrimerSearch input is written to
# an intermediate file with the same stem as the input sequence, but the
# extension '.primers'.
#
# A run of PrimerSearch is carried out with every set of primers against
# all other sequences in the dataset. The output of this search is written to
# a file with the following naming convention:
# <query>_primers_vs_<target>.primersearch
# Where <query> is the name given to the query sequence in the config file, and
# <target> is the name given to the target sequence in the config file. This
# step is not carried out if the --noprimersearch flag is set. When this flag
# is set, the script will look for the corresponding PrimerSearch output in
# the same directory as the sequence file, and will report an error if it is
# not present.
#
# Finally, the script uses the PrimerSearch results to identify primers that
# are unique to each query sequence, and to each family named in the config
# file. These are reported in files with the following naming convention:
# <query>_specific_primers.eprimer3
# <family>_specific_primers.primers
# We use ePrimer3 format for the family-specific primers, even though the
# start and end positions are meaningless, as they will amplify at different
# sites in each family member. However, the source sequence is indicated in a
# comment line, and the primer sequences and T_m/GC% values should be the same,
# regardless.
# Primers that are universal to all sequences in the sample are written in
# ePrimer3 format to the file:
# universal_primers.eprimer3
# This file has the same caveats as the family-specific file above.
#
# (c) The James Hutton Institute 2011
# Authors: Leighton Pritchard, Benjamin Leopold, Michael Robeson
#
# Contact:
# [email protected]
#
# Leighton Pritchard,
# Information and Computing Sciences,
# James Hutton Institute,
# Errol Road,
# Invergowrie,
# Dundee,
# DD6 9LH,
# Scotland,
# UK
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# script version
# should match r"^__version__ = '(?P<version>[^']+)'$" for setup.py
__version__ = '0.1.2'
###
# IMPORTS
import logging
import logging.handlers
import multiprocessing
import os
import subprocess
import sys
import time
import traceback
import re
from collections import defaultdict # Syntactic sugar
from optparse import OptionParser # Cmd-line parsing
try:
from Bio import SeqIO # Parsing biological sequence data
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast import NCBIXML # BLAST XML parser
from Bio.Emboss.Applications import Primer3Commandline, \
PrimerSearchCommandline
from Bio.Emboss import Primer3, PrimerSearch # EMBOSS parsers
from Bio.GenBank import _FeatureConsumer # For GenBank locations
from Bio.Seq import Seq # Represents a sequence
from Bio.SeqRecord import SeqRecord # Represents annotated record
from Bio.SeqFeature import SeqFeature # Represents annotated record
except ImportError:
sys.stderr.write("Biopython required for script, but not found (exiting)")
sys.exit(1)
try:
from bx.intervals.cluster import ClusterTree # Interval tree building
except ImportError:
sys.stderr.write("bx-python required for script, but not found (exiting)")
sys.exit(1)
###
# CLASSES
# Class describing an organism's genome, and associated data.
class GenomeData(object):
""" Describes an organism's genome, and has attributes:
name - short, unique (not enforced) identification string
families - string indicating family memberships
seqfilename - location of representative genome sequence file
ftfilename - location of GBK/Prodigal feature file
primerfilename - location of ePrimer3 format primers file
primers - dictionary collection of Bio.Emboss.Primer3.Primer
objects, keyed by primer name
Exposed methods are:
"""
def __init__(self, name, families=None, seqfilename=None,
ftfilename=None, primerfilename=None,
primersearchfilename=None):
""" Expects at minimum a name to identify the organism. Optionally
filenames describing the location of sequence, feature, and
primer data may be specified, along with a family classification.
name - short, unique (not enforced) identification string
family - string indicating a family membership
seqfilename - location of representative genome sequence file
ftfilename - location of GBK/Prodigal feature file
primerfilename - location of ePrimer3 format primers file
primersearchfilename - location of PrimerSearch format primers file
Rather hackily, passing '-' to any of the keyword arguments also
sets them to None; this is to aid in config file parsing, and
is a wee bit ugly.
"""
self.name = name # Short identifier
self.families = families.split(',') if families != '-' else None
self.seqfilename = seqfilename if seqfilename != '-' else None
self.ftfilename = ftfilename if ftfilename != '-' else None
self.primerfilename = primerfilename if primerfilename != '-' \
else None
self.primersearchfilename = primersearchfilename if\
primersearchfilename != '-' else None
self.primers = {} # Dict of Primer objects, keyed by name
self.sequence = None # Will hold genome sequence
self.load_sequence()
def load_sequence(self):
""" Load the sequence defined in self.seqfile into memory. We
assume it's FASTA format. This can then be used to calculate
amplicons when loading primers in.
"""
if self.seqfilename is not None:
try:
self.sequence = SeqIO.read(open(self.seqfilename, 'rU'),
'fasta')
except ValueError:
logger.error("Loading sequence file %s failed",
self.seqfilename)
logger.error(last_exception())
sys.exit(1)
def write_primers(self):
""" Write the primer pairs in self.primers out to file in an
appropriate format for PrimerSearch. If the filename is not
already defined, the filestem of the
source sequencefile is used for the output file, with the
extension '.primers'.
The method returns the number of lines written.
"""
# Define output filename, if not already defined
if self.primersearchfilename is None:
self.primersearchfilename = \
os.path.splitext(self.seqfilename)[0] + '.primers'
time_start = time.time()
logger.info("Writing primers to file %s ...",
self.primersearchfilename)
# Open handle and write data
outfh = open(self.primersearchfilename, 'w')
outfh.write("# Primers for %s\n" % self.name)
outfh.write("# Automatically generated by find_differential_primers\n")
for primers in self.primers.values():
outfh.write("%s\t%s\t%s\n" %
(primers.name, primers.forward_seq,
primers.reverse_seq))
if not len(self.primers):
logger.warning("WARNING: no primers written to %s!",
self.primersearchfilename)
# Being tidy
outfh.close()
logger.info("... wrote %d primers to %s (%.3fs)",
len(self.primers),
self.primersearchfilename, time.time() - time_start)
def get_unique_primers(self, cds_overlap=False,
oligovalid=False,
blastfilter=False):
""" Returns a list of primers that have the .amplifies_organism
attribute, but where this is an empty set.
If cds_overlap is True, then this list is restricted to those
primers whose .cds_overlap attribute is also True
"""
return self.get_primers_amplify_count(0, cds_overlap, blastfilter)
def get_family_unique_primers(self, family_members, cds_overlap=False,
blastfilter=False):
""" Returns a list of primers that have the .amplifies_organism
attribute, and where the set of organisms passed in family_members
is the same as that in .amplifies_organism, with the addition of
self.name.
If cds_overlap is True, then this list is restricted to those
primers whose .cds_overlap attribute is also True
"""
primerlist = []
for primer in self.primers.values():
if family_members == \
set([self.name]).union(primer.amplifies_organism):
primerlist.append(primer)
logger.info("[%s] %d family primers",
self.name, len(primerlist))
if cds_overlap:
primerlist = [p for p in primerlist if p.cds_overlap]
logger.info("[%s] %d primers after CDS filter",
self.name, len(primerlist))
if options.filtergc3prime:
primerlist = [p for p in primerlist if p.gc3primevalid]
logger.info("[%s] %d primers after GC 3` filter",
self.name, len(primerlist))
if options.hybridprobe:
primerlist = [p for p in primerlist if p.oligovalid]
logger.info("[%s] %d primers after oligo filter",
self.name, len(primerlist))
if blastfilter:
primerlist = [p for p in primerlist if p.blastpass]
logger.info("[%s] %d primers after BLAST filter",
self.name, len(primerlist))
if options.single_product:
primerlist = [p for p in primerlist if
p.negative_control_amplimers == 1]
logger.info("[%s] %d primers after single_product filter",
self.name, len(primerlist))
logger.info("[%s] returning %d primers",
self.name, len(primerlist))
return primerlist
def get_primers_amplify_count(self, count, cds_overlap=False,
blastfilter=False):
""" Returns a list of primers that have the .amplifies_organism
attribute and the length of this set is equal to the passed count.
If cds_overlap is True, then this list is restricted to those
primers whose .cds_overlap attribute is also True
"""
primerlist = [p for p in self.primers.values() if
count == len(p.amplifies_organism)]
logger.info("[%s] %d family primers that amplify %d orgs",
self.name, len(primerlist), count)
if cds_overlap:
primerlist = [p for p in primerlist if p.cds_overlap]
logger.info("[%s] %d primers after CDS filter",
self.name, len(primerlist))
if options.filtergc3prime:
primerlist = [p for p in primerlist if p.gc3primevalid]
logger.info("[%s] %d primers after GC 3` filter",
self.name, len(primerlist))
if options.hybridprobe:
primerlist = [p for p in primerlist if p.oligovalid]
logger.info("[%s] %d primers after oligo filter",
self.name, len(primerlist))
if blastfilter:
primerlist = [p for p in primerlist if p.blastpass]
logger.info("[%s] %d primers after BLAST filter",
self.name, len(primerlist))
if options.single_product:
primerlist = [p for p in primerlist if
p.negative_control_amplimers == 1]
logger.info("[%s] %d primers after single_product filter",
self.name, len(primerlist))
logger.info("[%s] returning %d primers",
self.name, len(primerlist))
return primerlist
# Filter primers on the basis of CDS feature overlap
def filter_primers(self, psizemin):
""" Takes the minimum size of an amplified
region, and then uses a ClusterTree to find clusters of CDS and
primer regions that overlap by this minimum size.
There is a possibility that, by stacking primer regions, some of
the reported overlapping primers may in fact not overlap CDS
regions directly, so this method may overreport primers.
- psizemin (int): minimum size of an amplified region
"""
# Load in the feature data. This is done using either SeqIO for
# files with the .gbk extension, or an ad hoc parser for
# .prodigalout prediction files
time_start = time.time()
logger.info("Loading feature data from %s ...", self.ftfilename)
if os.path.splitext(self.ftfilename)[-1] == '.gbk': # GenBank
seqrecord = [r for r in SeqIO.parse(open(self.ftfilename, 'rU'),
'genbank')]
elif os.path.splitext(self.ftfilename)[-1] == '.prodigalout':
seqrecord = parse_prodigal_features(self.ftfilename)
else:
raise IOError("Expected .gbk or .prodigalout file extension")
logger.info("... loaded %d features ...", len(seqrecord.features))
# Use a ClusterTree as an interval tree to identify those
# primers that overlap with features. By setting the minimum overlap
# to the minimum size for a primer region, we ensure that we capture
# every primer that overlaps a CDS feature by this amount, but we may
# also extend beyond the CDS by stacking primers, in principle.
logger.info("... adding CDS feature locations to ClusterTree ...")
ctree = ClusterTree(-psizemin, 2)
# Loop over CDS features and add them to the tree with ID '-1'. This
# allows us to easily separate the features from primers when
# reviewing clusters.
for feature in [f for f in seqrecord.features if f.type == 'CDS']:
ctree.insert(feature.location.nofuzzy_start,
feature.location.nofuzzy_end, -1)
# ClusterTree requires us to identify elements on the tree by integers,
# so we have to relate each primer added to an integer in a temporary
# list of the self.primers values
logger.info("... adding primer locations to cluster tree ...")
aux = {}
for i, primer in enumerate(self.primers.values()):
ctree.insert(primer.forward_start,
primer.reverse_start + primer.reverse_length, i)
aux[i] = primer
# Now we find the overlapping regions, extracting all element ids
# that are not -1. These are the indices for aux, and we modify the
# self.cds_overlap attribute directly
logger.info("... finding overlapping primers ...")
overlap_primer_ids = set() # CDS overlap primers
for (s, e, ids) in ctree.getregions():
primer_ids = set([i for i in ids if i != -1]) # get non-ft ids
overlap_primer_ids = overlap_primer_ids.union(primer_ids)
logger.info("... %d primers overlap CDS features (%.3fs)",
len(overlap_primer_ids), time.time() - time_start)
for i in overlap_primer_ids:
aux[i].cds_overlap = True
# Filter primers on the basis of internal oligo characteristics
def filter_primers_oligo(self):
""" Loops over the primer pairs in this GenomeData object and
mark primer.oligovalid as False if the internal oligo corresponds
to any of the following criteria:
- G at 5` end or 3` end
- two or more counts of 'CC'
- G in second position at 5` end
"""
time_start = time.time()
logger.info("Filtering %s primers on internal oligo...",
self.name)
invalidcount = 0
for primer in self.primers.values():
primer.oligovalid = not(primer.internal_seq.startswith('G')
or primer.internal_seq.endswith('G')
or primer.internal_seq[1:-1].count('CC')
> 1 or primer.internal_seq[1] == 'G')
if not primer.oligovalid:
invalidcount += 1
#if (primer.internal_seq.startswith('G') or
# primer.internal_seq.endswith('G') or
# primer.internal_seq[1:-1].count('CC') > 1 or
# primer.internal_seq[1] == 'G'):
# primer.oligovalid = False
# invalidcount += 1
logger.info("... %d primers failed (%.3fs)", invalidcount,
time.time() - time_start)
# Filter primers on the basis of GC content at 3` end
def filter_primers_gc_3prime(self):
""" Loops over the primer pairs in the passed GenomeData object and,
if either primer has more than 2 G+C in the last five nucleotides,
sets the .gc3primevalid flag to False.
"""
time_start = time.time()
logger.info("Filtering %s primers on 3` GC content ...", self.name)
invalidcount = 0
for primer in self.primers.values():
fseq, rseq = primer.forward_seq[-5:], primer.reverse_seq[-5:]
if (fseq.count('C') + fseq.count('G') > 2) or \
(rseq.count('C') + fseq.count('G') > 2):
primer.gc3primevalid = False
invalidcount += 1
logger.info("... %d primers failed (%.3fs)", invalidcount,
time.time() - time_start)
# Concatenate multiple fragments of a genome to a single file
def concatenate_sequences(self):
""" Takes a GenomeData object and concatenates sequences with the
spacer sequence NNNNNCATCCATTCATTAATTAATTAATGAATGAATGNNNNN (this
contains start and stop codons in all frames, to cap individual
sequences). We write this data out to a new file
For filename convention, we just add '_concatenated' to the end
of the sequence filestem, and use the '.fas' extension.
"""
# Spacer contains start and stop codons in all six frames
spacer = 'NNNNNCATCCATTCATTAATTAATTAATGAATGAATGNNNNN'
time_start = time.time()
logger.info("Concatenating sequences from %s ...", self.seqfilename)
newseq = SeqRecord(Seq(spacer.join([s.seq.data for s in
SeqIO.parse(open(self.seqfilename,
'rU'),
'fasta')])),
id=self.name + "_concatenated",
description="%s, concatenated with spacers" %
self.name)
outfilename = ''.join([os.path.splitext(self.seqfilename)[0],
'_concatenated', '.fas'])
SeqIO.write([newseq], open(outfilename, 'w'), 'fasta')
logger.info("... wrote concatenated data to %s (%.3fs)",
outfilename, time.time() - time_start)
return outfilename
def __str__(self):
""" Pretty string description of object contents
"""
outstr = ['GenomeData object: %s' % self.name]
outstr.append('Families: %s' % list(self.families))
outstr.append('Sequence file: %s' % self.seqfilename)
outstr.append('Feature file: %s' % self.ftfilename)
outstr.append('Primers file: %s' % self.primerfilename)
outstr.append('PrimerSearch file: %s' % self.primersearchfilename)
outstr.append('Primers: %d' % len(self.primers))
if len(self.primers):
outstr.append('Primers overlapping CDS: %d' %
len([p for p in self.primers.values() if
p.cds_overlap]))
return os.linesep.join(outstr) + os.linesep
###
# FUNCTIONS
# Parse command-line options
def parse_cmdline():
""" Parse command line, accepting args obtained from sys.argv
"""
usage = "usage: %prog [options] arg"
parser = OptionParser(usage)
parser.add_option("-i", "--infile", dest="filename", action="store",
help="location of configuration file",
default=None)
parser.add_option("-o", "--outdir", dest="outdir", action="store",
help="directory for output files",
default="differential_primer_results")
parser.add_option("--numreturn", dest="numreturn", action="store",
help="number of primers to find",
default=20, type="int")
parser.add_option("--hybridprobe", dest="hybridprobe", action="store_true",
help="generate internal oligo as a hybridisation probe",
default=False)
parser.add_option("--filtergc3prime", dest="filtergc3prime",
action="store_true",
help="allow no more than two GC at the 3` " +
"end of primers",
default=False)
parser.add_option("--single_product", dest="single_product",
action="store",
help="location of FASTA sequence file containing " +
"sequences from which a sequence-specific " +
"primer must amplify exactly one product.",
default=None)
parser.add_option("--prodigal", dest="prodigal_exe", action="store",
help="location of Prodigal executable",
default="prodigal")
parser.add_option("--eprimer3", dest="eprimer3_exe", action="store",
help="location of EMBOSS eprimer3 executable",
default="eprimer3")
parser.add_option("--blast_exe", dest="blast_exe", action="store",
help="location of BLASTN/BLASTALL executable",
default="blastn")
parser.add_option("--blastdb", dest="blastdb", action="store",
help="location of BLAST database",
default=None)
parser.add_option("--useblast", dest="useblast", action="store_true",
help="use existing BLAST results",
default=False)
parser.add_option("--nocds", dest="nocds", action="store_true",
help="do not restrict primer prediction to CDS",
default=False)
parser.add_option("--noprodigal", dest="noprodigal", action="store_true",
help="do not carry out Prodigal prediction step",
default=False)
parser.add_option("--noprimer3", dest="noprimer3", action="store_true",
help="do not carry out ePrimer3 prediction step",
default=False)
parser.add_option("--noprimersearch", dest="noprimersearch",
action="store_true",
help="do not carry out PrimerSearch step",
default=False)
parser.add_option("--noclassify", dest="noclassify",
action="store_true",
help="do not carry out primer classification step",
default=False)
parser.add_option("--osize", dest="osize", action="store",
help="optimal size for primer oligo",
default=20, type="int")
parser.add_option("--minsize", dest="minsize", action="store",
help="minimum size for primer oligo",
default=18, type="int")
parser.add_option("--maxsize", dest="maxsize", action="store",
help="maximum size for primer oligo",
default=22, type="int")
parser.add_option("--otm", dest="otm", action="store",
help="optimal melting temperature for primer oligo",
default=59, type="int")
parser.add_option("--mintm", dest="mintm", action="store",
help="minimum melting temperature for primer oligo",
default=58, type="int")
parser.add_option("--maxtm", dest="maxtm", action="store",
help="maximum melting temperature for primer oligo",
default=60, type="int")
parser.add_option("--ogcpercent", dest="ogcpercent", action="store",
help="optimal %GC for primer oligo",
default=55, type="int")
parser.add_option("--mingc", dest="mingc", action="store",
help="minimum %GC for primer oligo",
default=30, type="int")
parser.add_option("--maxgc", dest="maxgc", action="store",
help="maximum %GC for primer oligo",
default=80, type="int")
parser.add_option("--psizeopt", dest="psizeopt", action="store",
help="optimal size for amplified region",
default=100, type="int")
parser.add_option("--psizemin", dest="psizemin", action="store",
help="minimum size for amplified region",
default=50, type="int")
parser.add_option("--psizemax", dest="psizemax", action="store",
help="maximum size for amplified region",
default=150, type="int")
parser.add_option("--maxpolyx", dest="maxpolyx", action="store",
help="maximum run of repeated nucleotides in primer",
default=3, type="int")
parser.add_option("--mismatchpercent", dest="mismatchpercent",
action="store",
help="allowed percentage mismatch in primersearch",
default=10, type="int")
parser.add_option("--oligoosize", dest="oligoosize", action="store",
help="optimal size for internal oligo",
default=20, type="int")
parser.add_option("--oligominsize", dest="oligominsize", action="store",
help="minimum size for internal oligo",
default=13, type="int")
parser.add_option("--oligomaxsize", dest="oligomaxsize", action="store",
help="maximum size for internal oligo",
default=30, type="int")
parser.add_option("--oligootm", dest="oligootm", action="store",
help="optimal melting temperature for internal oligo",
default=69, type="int")
parser.add_option("--oligomintm", dest="oligomintm", action="store",
help="minimum melting temperature for internal oligo",
default=68, type="int")
parser.add_option("--oligomaxtm", dest="oligomaxtm", action="store",
help="maximum melting temperature for internal oligo",
default=70, type="int")
parser.add_option("--oligoogcpercent", dest="oligoogcpercent",
action="store",
help="optimal %GC for internal oligo",
default=55, type="int")
parser.add_option("--oligomingc", dest="oligomingc", action="store",
help="minimum %GC for internal oligo",
default=30, type="int")
parser.add_option("--oligomaxgc", dest="oligomaxgc", action="store",
help="maximum %GC for internal oligo",
default=80, type="int")
parser.add_option("--oligomaxpolyx", dest="oligomaxpolyx", action="store",
help="maximum run of repeated nt in internal oligo",
default=3, type="int")
parser.add_option("--cpus", dest="cpus", action="store",
help="number of CPUs to use in multiprocessing",
default=multiprocessing.cpu_count(), type="int")
parser.add_option("--sge", dest="sge", action="store_true",
help="use SGE job scheduler",
default=False)
parser.add_option("--clean", action="store_true", dest="clean",
help="clean up old output files before running",
default=False)
parser.add_option("--cleanonly", action="store_true", dest="cleanonly",
help="clean up old output files and exit",
default=False)
parser.add_option("-l", "--logfile", dest="logfile",
action="store", default=None,
help="script logfile location")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="report progress to log",
default=False)
parser.add_option("--debug", action="store_true", dest="debug",
help="report extra progress to log for debugging",
default=False)
parser.add_option("--keep_logs", action="store_true", dest="keep_logs",
help="store log files from each process",
default=False)
parser.add_option("--log_dir", action="store", dest="log_dir",
help="store called process log files in this directory",
default=None)
(optsparsed, argsparsed) = parser.parse_args()
return (optsparsed, argsparsed, parser)
# Report last exception as string
def last_exception():
""" Returns last exception as a string, or use in logging.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
return ''.join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
# Create a list of GenomeData objects corresponding to config file entries
def create_gd_from_config(filename):
""" Parses data from a configuration file into a list of GenomeData
objects.
Returns a list of GenomeData objects.
Each line of the config file describes a single genome.
The config file format is six tab-separated columns, where columns
may be separated by multiple tabs. 'Empty' data values are indicated
by the '-' symbol, and these are converted into None objects in
parsing.
Comment lines start with '#', as in Python.
The five columns are:
1) Genome name
2) Genome family
3) Location of FASTA format sequence data
4) Location of GENBANK/PRODIGAL format feature data
5) Location of EPRIMER3 format primer data
6) Location of PRIMERSEARCH input format primer data
The data would, of course, be better presented as an XML file, but it
might be useful to maintain both tab- and XML-formatted approaches to
facilitate human construction as well as computational.
"""
time_start = time.time()
logger.info("Creating list of genomes from config file %s ...", filename)
gd_list = [] # Hold GenomeData objects
# Ignore blank lines and comments...
for line in [l.strip() for l in open(filename, 'rU')
if l.strip() and not l.startswith('#')]:
# Split data and create new GenomeData object, adding it to the list
data = [e.strip() for e in line.strip().split('\t') if e.strip()]
name, family, sfile, ffile, pfile, psfile = tuple(data)
gd_list.append(GenomeData(name, family, sfile, ffile, pfile, psfile))
logger.info("... created GenomeData object for %s ...", name)
logger.info(gd_list[-1])
logger.info("... created %d GenomeData objects (%.3fs)",
len(gd_list), time.time() - time_start)
return gd_list
# Check whether each GenomeData object has multiple sequence and, if so,
# concatenate them sensibly, resetting feature and primer file locations to
# None
def check_single_sequence(gd_list):
""" Loops over the GenomeData objects in the passed list and, where the
sequence file contains multiple sequences, concatenates them into
a single sequence using a spacer that facilitates gene-finding. As
this process changes feature and primer locations, the ftfilename and
primerfilename attributes are reset to None, and these are
recalculated later on in the script, where necessary.
"""
time_start = time.time()
logger.info("Checking for multiple sequences ...")
for gd_obj in gd_list:
# Verify that the sequence file contains a single sequence
seqdata = [s for s in SeqIO.parse(open(gd_obj.seqfilename, 'rU'),
'fasta')]
if len(seqdata) != 1:
logger.info("... %s describes multiple sequences ...",
gd_obj.seqfilename)
gd_obj.seqfilename = gd_obj.concatenate_sequences() # Concatenate
logger.info("... clearing feature and primer file locations ...")
(gd_obj.ftfilename, gd_obj.primerfilename,
gd_obj.primersearchfilename) = \
(None, None, None)
logger.info("... checked %d GenomeData objects (%.3fs)",
len(gd_list), time.time() - time_start)
# Check for each GenomeData object in a passed list, the existence of
# the feature file, and create one using Prodigal if it doesn't exist already
def check_ftfilenames(gd_list):
""" Loop over the GenomeData objects in gdlist and, where no feature file
is specified, add the GenomeData object to the list of
packets to be processed in parallel by Prodigal using multiprocessing.
"""
logger.info("Checking and predicting features for GenomeData files ...")
# We split the GenomeData objects into those with, and without,
# defined feature files, but we don't test the validity of the files
# that were predefined, here.
# We don't use the objects with features here, though
#gds_with_ft = [gd_obj for gd_obj in gd_list if
# (gd_obj.ftfilename is not None and
# os.path.isfile(gd_obj.ftfilename))]
gds_no_ft = [gd_obj for gd_obj in gd_list if
(gd_obj.ftfilename is None or
not os.path.isfile(gd_obj.ftfilename))]
# Predict features for those GenomeData objects with no feature file
logger.info("... %d GenomeData objects have no feature file ...",
len(gds_no_ft))
logger.info("... running %d Prodigal jobs to predict CDS ...",
len(gds_no_ft))
# Create a list of command-line tuples, for Prodigal
# gene prediction applied to each GenomeData object in gds_no_ft.
clines = []
for gd_obj in gds_no_ft:
gd_obj.ftfilename = os.path.splitext(gd_obj.seqfilename)[0] +\
'.prodigalout'
seqfilename = os.path.splitext(gd_obj.seqfilename)[0] + '.features'
cline = "%s -a %s < %s > %s" % (options.prodigal_exe, seqfilename,
gd_obj.seqfilename, gd_obj.ftfilename)
clines.append(cline + log_output(gd_obj.name + ".prodigal"))
logger.info("... Prodigal jobs to run:")
logger.info("Running:\n" + "\n".join(clines))
# Depending on the type of parallelisation required, these command-lines
# are either run locally via multiprocessing, or passed out to SGE
if not options.sge:
multiprocessing_run(clines)
else:
sge_run(clines)
# Check whether GenomeData objects have a valid primer definition file
def check_primers(gd_list):
""" Loop over GenomeData objects in the passed gdlist and, if they have
a defined primerfilename attribute, attempt to parse it. If this
is successful, do nothing. If it fails, set the primerfilename
attribute to None.
"""
logger.info("Checking ePrimer3 output files ...")
for gd_obj in [g for g in gd_list if g.primerfilename]:
try:
Primer3.read(open(gd_obj.primerfilename, 'rU'))
logger.info("... %s primer file %s OK ...",
gd_obj.name, gd_obj.primerfilename)
except IOError:
logger.info("... %s primer file %s not OK ...",
gd_obj.name, gd_obj.primerfilename)
gd_obj.primerfilename = None
# Check for each GenomeData object in a passed list, the existence of
# the ePrimer3 file, and create one using ePrimer3 if it doesn't exist already
def predict_primers(gd_list, emboss_version):
""" Loop over the GenomeData objects in gdlist and, where no primer file
is specified, add the GenomeData object to the list of
packets to be processed in parallel by Prodigal using multiprocessing.
"""
logger.info("Checking and predicting primers for GenomeData files ...")
# We need to split the GenomeData objects into those with, and without,
# defined primer files, but we don't test the validity of these files
# We don't use the gds
#gds_with_primers = [g for g in gd_list if g.primerfilename is not None]
gds_no_primers = [g for g in gd_list if g.primerfilename is None]
# Predict primers for those GenomeData objects with no primer file
logger.info("... %d GenomeData objects have no primer file ...",
len(gds_no_primers))
logger.info("... running %d ePrimer3 jobs to predict CDS ...",
len(gds_no_primers))
# Create command-lines to run ePrimer3
clines = []
for gd_obj in gds_no_primers:
# Create ePrimer3 command-line.
cline = Primer3Commandline(cmd=options.eprimer3_exe)
cline.sequence = gd_obj.seqfilename
cline.auto = True
cline.osize = "%d" % options.osize # Optimal primer size
cline.minsize = "%d" % options.minsize # Min primer size
cline.maxsize = "%d" % options.maxsize # Max primer size
# Optimal primer Tm option dependent on EMBOSS version
if float('.'.join(emboss_version.split('.')[:2])) >= 6.5:
cline.opttm = "%d" % options.otm # Optimal primer Tm
else:
cline.otm = "%d" % options.otm
cline.mintm = "%d" % options.mintm # Min primer Tm
cline.maxtm = "%d" % options.maxtm # Max primer Tm
cline.ogcpercent = "%d" % options.ogcpercent # Optimal primer %GC
cline.mingc = "%d" % options.mingc # Min primer %GC
cline.maxgc = "%d" % options.maxgc # Max primer %GC
cline.psizeopt = "%d" % options.psizeopt # Optimal product size
# Longest polyX run in primer
cline.maxpolyx = "%d" % options.maxpolyx
# Allowed product sizes
cline.prange = "%d-%d" % (options.psizemin, options.psizemax)
# Number of primers to predict
cline.numreturn = "%d" % options.numreturn
cline.hybridprobe = options.hybridprobe # Predict internal oligo?
# Internal oligo parameters;
cline.osizeopt = "%d" % options.oligoosize
# We use EMBOSS v6 parameter names, here.
cline.ominsize = "%d" % options.oligominsize
cline.omaxsize = "%d" % options.oligomaxsize
cline.otmopt = "%d" % options.oligootm
cline.otmmin = "%d" % options.oligomintm
cline.otmmax = "%d" % options.oligomaxtm
cline.ogcopt = "%d" % options.oligoogcpercent
cline.ogcmin = "%d" % options.oligomingc
cline.ogcmax = "%d" % options.oligomaxgc
cline.opolyxmax = "%d" % options.oligomaxpolyx
cline.outfile = os.path.splitext(gd_obj.seqfilename)[0] + '.eprimer3'
gd_obj.primerfilename = cline.outfile
clines.append(str(cline) + log_output(gd_obj.name + ".eprimer3"))
logger.info("... ePrimer3 jobs to run:")
logger.info("Running:\n" + '\n'.join(clines))
# Parallelise jobs
if not options.sge:
multiprocessing_run(clines)
else:
sge_run(clines)
# Load primers from ePrimer3 files into each GenomeData object
def load_primers(gd_list):
""" Load primer data from an ePrimer3 output file into a dictionary of
Bio.Emboss.Primer3.Primer objects (keyed by primer name) in a
GenomeData object, for each such object in the passed list.
Each primer object is given a new ad hoc attribute 'cds_overlap' which
takes a Boolean, indicating whether the primer is found wholly within
a CDS defined in the GenomeData object's feature file; this status
is determined using an interval tree approach.
"""
logger.info("Loading primers, %sfiltering on CDS overlap",
'not ' if options.nocds else '')
# Load in the primers, assigning False to a new, ad hoc attribute called
# cds_overlap in each
for gd_obj in gd_list:
logger.info("... loading primers into %s from %s ...",
gd_obj.name, gd_obj.primerfilename)
try:
os.path.isfile(gd_obj.primerfilename)
except TypeError:
raise IOError("Primer file %s does not exist." %
gd_obj.primerfilename)
primers = Primer3.read(open(gd_obj.primerfilename, 'rU')).primers
# Add primer pairs to the gd.primers dictionary
primercount = 0
for primer in primers:
primercount += 1
primer.cds_overlap = False # default state
primer.name = "%s_primer_%04d" % (gd_obj.name, primercount)
primer.amplifies_organism = set() # Organisms amplified
primer.amplifies_family = set() # Organism families amplified
primer.gc3primevalid = True # Passes GC 3` test
primer.oligovalid = True # Oligo passes filter
primer.blastpass = True # Primers pass BLAST screen
gd_obj.primers.setdefault(primer.name, primer)
primer.amplicon = \
gd_obj.sequence[primer.forward_start - 1:
primer.reverse_start - 1 +
primer.reverse_length]
primer.amplicon.description = primer.name
logger.info("... loaded %d primers into %s ...",
len(gd_obj.primers), gd_obj.name)
# Now that the primers are in the GenomeData object, we can filter
# them on location, if necessary
if not options.nocds:
gd_obj.filter_primers(options.psizemin)
# We also filter primers on the basis of GC presence at the 3` end
if options.filtergc3prime:
gd_obj.filter_primers_gc_3prime()
# Filter primers on the basis of internal oligo characteristics
if options.hybridprobe:
gd_obj.filter_primers_oligo()
# Screen passed GenomeData primers against BLAST database
def blast_screen(gd_list):
""" The BLAST screen takes three stages. Firstly we construct a FASTA
sequence file containing all primer forward and reverse sequences,
for all primers in each GenomeData object of the list.
We then use the local BLAST+ (not legacy BLAST) interface to BLASTN to
query the named database with the input file. The multiprocessing
of BLASTN is handled by either our multiprocessing threading approach,
or by SGE; we don't use the built-in threading of BLAST so that we
retain flexibility when moving to SGE. It's a small modification to
revert to using the BLAST multithreading code. The output file is
named according to the GenomeData object.
The final step is to parse the BLAST output, and label the primers
that make hits as not having passed the BLAST filter.
"""
build_blast_input(gd_list)
run_blast(gd_list)
parse_blast(gd_list)
# Write BLAST input files for each GenomeData object
def build_blast_input(gd_list):
""" Loops over each GenomeData object in the list, and writes forward
and reverse primer sequences out in FASTA format to a file with
filename derived from the GenomeData object name.
"""
time_start = time.time()
logger.info("Writing files for BLAST input ...")
for gd_obj in gd_list:
gd_obj.blastinfilename =\
os.path.join(os.path.split(gd_obj.seqfilename)[0],
"%s_BLAST_input.fas" % gd_obj.name)
seqrecords = []
for name, primer in gd_obj.primers.items():
seqrecords.append(SeqRecord(Seq(primer.forward_seq),
id=name + '_forward'))
seqrecords.append(SeqRecord(Seq(primer.reverse_seq),
id=name + '_reverse'))
logger.info("... writing %s ...", gd_obj.blastinfilename)
SeqIO.write(seqrecords,
open(gd_obj.blastinfilename, 'w'),
'fasta')
logger.info("... done (%.3fs)", time.time() - time_start)
# Run BLAST screen for each GenomeData object
def run_blast(gd_list):
""" Loop over the GenomeData objects in the passed list, and run a
suitable BLASTN query with the primer sequences, writing to a file
with name derived from the GenomeData object, in XML format.
"""
logger.info("Compiling BLASTN command-lines ...")
clines = []
for gd_obj in gd_list:
gd_obj.blastoutfilename =\
os.path.join(os.path.split(gd_obj.seqfilename)[0],
"%s_BLAST_output.xml" % gd_obj.name)
cline = NcbiblastnCommandline(query=gd_obj.blastinfilename,
db=options.blastdb,
task='blastn', # default: MEGABLAST
out=gd_obj.blastoutfilename,
num_alignments=1,
num_descriptions=1,
outfmt=5,
perc_identity=90,
ungapped=True)
clines.append(str(cline) + log_output(gd_obj.name + ".blastn"))
logger.info("... BLASTN+ jobs to run:")
logger.info("Running:\n" + '\n'.join(clines))
if not options.sge:
multiprocessing_run(clines)
else:
sge_run(clines)
# Parse BLAST output for each GenomeData object
def parse_blast(gd_list):
""" Loop over the GenomeData objects in the passed list, and parse the
BLAST XML output indicated in the .blastoutfilename attribute.
For each query that makes a suitable match, mark the appropriate
primer's .blastpass attribute as False
"""
time_start = time.time()
logger.info("Parsing BLASTN output with multiprocessing ...")
# Here I'm cheating a bit and using multiprocessing directly so that
# we can speed up the parsing process a bit
pool = multiprocessing.Pool(processes=options.cpus)
pool_results = [pool.apply_async(process_blastxml,
(g.blastoutfilename, g.name))
for g in gd_list]
pool.close()
pool.join()
# Process the results returned from the BLAST searches. Create a
# dictionary of GenomeData objects, keyed by name, and loop over the
# result sets, setting .blastpass attributes for the primers as we go
gddict = {}
for gd_obj in gd_list:
gddict.setdefault(gd_obj.name, gd_obj)
failcount = 0
for result in [r.get() for r in pool_results]:
for name in result:
gd_obj = gddict[name.split('_primer_')[0]]
gd_obj.primers[name].blastpass = False
failcount += 1
logger.info("... %d primers failed BLAST screen ...", failcount)
logger.info("... multiprocessing BLAST parsing complete (%.3fs)",
time.time() - time_start)
# BLAST XML parsing function for multiprocessing
def process_blastxml(filename, name):
""" Takes a BLAST output file, and a process name as input. Returns
a set of query sequence names that make a suitably strong hit to
the database.
We are using the database as a screen, so *any* hit that passes
our criteria will do; BLAST+ reports the hits in quality order, so
we only need to see this top hit.
We care if the screening match is identical for at least 90% of
the query, and we're using ungapped alignments, so we check
the alignment HSP identities against the length of the query.
"""
time_start = time.time()
logger.info("[process name: %s] Parsing BLAST XML ...", name)
# List to hold queries that hit the database
matching_primers = set()
recordcount = 0
# Parse the file
try:
for record in NCBIXML.parse(open(filename, 'rU')):
recordcount += 1 # Increment our count of matches
# We check whether the number of identities in the alignment is
# greater than our (arbitrary) 90% cutoff. If so, we add the
# query name to our set of failing/matching primers
if len(record.alignments):
identities = float(record.alignments[0].hsps[0].identities) / \
float(record.query_letters)
if 0.9 <= identities:
matching_primers.add('_'.join(
record.query.split('_')[:-1]))
logger.info("[process name: %s] Parsed %d records",
name, recordcount)
except IOError:
logger.info("[process name: %s] Error reading BLAST XML file", name)
logger.info("[process name: %s] Time spent in process: (%.3fs)",
name, time.time() - time_start)
# Return the list of matching primers
return matching_primers
# A function for parsing features from Prodigal output
def parse_prodigal_features(filename):
""" Parse Prodigal 'GenBank' output.
We try to emulate SeqIO.read() SeqRecord output as much as possible,
but the information provided by Prodigal is limited to feature type
and location, on a single line.
Amended: Newer versions of Prodigal write closer match to GenBank
format, and thus if the first line matches "DEFINITION" we use SeqIO.
RE-amended: Latest version of Prodigal is still not good enough for
SeqIO, so a new function is created to parse line-by-line.
"""
record = SeqRecord(None) # record gets a dummy sequence
# Open filehandle and parse contents
handle = open(filename, 'rU')
# init feature list from file parsing
record.features = seqrecord_parse(handle)
return record
# Parse record features from the lines of prodigal or genbank format file
def seqrecord_parse(filehandle):
""" Parses the head lines of CDS features from a Prodigal or GenBank
file.
This is still necessary, as Prodigal's GenBank output is not
SeqIO.read() friendly.
"""
features = []
for line in filehandle:
if re.search("CDS", line):
data = [e.strip() for e in line.split()]
feature = gb_string_to_feature(data[-1])
feature.type = data[0]
features.append(feature)
return features
# Parse record features from sequence file, using SeqIO
def seqrecord_parse_seqio(filehandle, seqformat):
""" NOTE: Latest version of prodigal output is *closer* to GenBank format
but not close enough for SeqIO to find the genome.features
Thus: this function NOT USED (until potential update to prodigal
or SeqIO).
"""
features = []
seqrecord = list(SeqIO.parse(filehandle, seqformat))
for record in seqrecord:
logger.debug("record seq: [%s]...", record.seq[0:12])
features.append(record.features)
return features
# Code (admittedly hacky) from Brad Chapman to parse a GenBank command line
def gb_string_to_feature(content, use_fuzziness=True):
"""Convert a GenBank location string into a SeqFeature.
"""
consumer = _FeatureConsumer(use_fuzziness)
consumer._cur_feature = SeqFeature()
consumer.location(content)
return consumer._cur_feature
# Run PrimerSearch all-against-all on a list of GenomeData objects
def primersearch(gd_list):
""" Loop over the GenomeData objects in the passed list, and construct
command lines for an all-against-all PrimerSearch run.
Output files are of the format
<query name>_vs_<target name>.primersearch
Where <query name> and <target name> are the gd.name attributes of
the source and target GenomeData objects, respectively.
The output file goes in the same location as the source sequence
file.
"""
logger.info("Constructing all-against-all PrimerSearch runs " +
"for %d objects ...", len(gd_list))
# Create list of command-lines
clines = []
for query_gd in gd_list:
query_gd.primersearch_output = []
for target_gd in gd_list:
if query_gd != target_gd:
# Location of PrimerSearch output
outdir = os.path.split(query_gd.seqfilename)[0]
outfilename = os.path.join(outdir, "%s_vs_%s.primersearch" %
(query_gd.name, target_gd.name))
query_gd.primersearch_output.append(outfilename)
# Create command-line
cline = PrimerSearchCommandline()
cline.auto = True
cline.seqall = target_gd.seqfilename
cline.infile = query_gd.primersearchfilename
cline.outfile = outfilename
cline.mismatchpercent = options.mismatchpercent
clines.append(str(cline) +
log_output(os.path.basename(outfilename)))
logger.info("... PrimerSearch jobs to run: ...")
logger.info("Running:\n" + '\n'.join(clines))
# Parallelise jobs
if not options.sge:
multiprocessing_run(clines)
else:
sge_run(clines)
# Load in existing PrimerSearch output
def load_existing_primersearch_results(gd_list):
""" Associates PrimerSearch output files with each GenomeData object
and returns a list of (name, filename) tuples for all GenomeData
objects
"""
time_start = time.time()
logger.info("Locating existing PrimerSearch input files ...")
primersearch_results = []
for gd_obj in gd_list:
gd_obj.primersearch_output = []
filedir = os.path.split(gd_obj.seqfilename)[0]
primersearch_files = [f for f in os.listdir(filedir) if
os.path.splitext(f)[-1] == '.primersearch' and
f.startswith(gd_obj.name)]
for filename in primersearch_files:
logger.info("... found %s for %s ...", filename, gd_obj.name)
gd_obj.primersearch_output.append(os.path.join(filedir,
filename))
logger.info("... found %d PrimerSearch input files (%.3fs)",
len(primersearch_results), time.time() - time_start)
# Run primersearch to find whether and where the predicted primers amplify
# our negative target (the one we expect exactly one match to)
def find_negative_target_products(gd_list):
""" We run primersearch using the predicted primers as queries, with
options.single_product as the target sequence. We exploit
multiprocessing, and use the prescribed number of
CPUs. Happily, primersearch accepts multiple sequence FASTA files.
"""
logger.info("Constructing negative control PrimerSearch runs " +
"for %d objects ...", len(gd_list))
# Create list of command-lines
clines = []
for query_gd in gd_list:
query_gd.primersearch_output = []
outdir = os.path.split(query_gd.seqfilename)[0]
outfilename = os.path.join(outdir, "%s_negative_control.primersearch" %
query_gd.name)
query_gd.primersearch_output.append(outfilename)
# Create command-line
cline = PrimerSearchCommandline()
cline.auto = True
cline.seqall = options.single_product
cline.infile = query_gd.primersearchfilename
cline.outfile = outfilename
cline.mismatchpercent = options.mismatchpercent
clines.append(str(cline) + log_output(os.path.basename(outfilename)))
logger.info("... PrimerSearch jobs to run: ...")
logger.info("Running:\n" + '\n'.join(clines))
# Parallelise jobs and run
if not options.sge:
multiprocessing_run(clines)
else:
sge_run(clines)
# Classify the primers in a list of GenomeData objects according to the
# other sequences that they amplify
def classify_primers(gd_list):
""" Takes a list of GenomeData objects and loops over the primersearch
results, loading in the primersearch results and applying them to the
associated query GenomeData object.
If a primer is reported, by PrimerSearch, to amplify a region of the
target genome, two changes are made to the corresponding Primer
object in the amplifies_object and amplifies_family ad hoc attributes,
with the target name and family, respectively, being added to those
sets.
"""
time_start = time.time()
logger.info("Classifying primers by PrimerSearch results ...")
# Convenience dictionary, keying each GenomeData object by name
# We need to load this fully before checking the PrimerSearch output
# below.
gddict = {}
for gd_obj in gd_list:
gddict.setdefault(gd_obj.name, gd_obj)
# Parse the PrimerSearch output, updating the primer contents of the
# appropriate GenomeData object, for each set of results
for gd_obj in gd_list:
logger.info("... GenomeData for %s ...", gd_obj.name)
for filename in gd_obj.primersearch_output:
logger.info("... processing %s ...", filename)
# Identify the target organism
targetname = \
os.path.splitext(os.path.split(
filename)[-1])[0].split('_vs_')[-1]
# Only classify amplimers to sequences in the gdlist dataset
# This avoids problems with recording counts of matches to
# sequences that we're not considering, artifically lowering the
# specificity counts.
if targetname in gddict:
# Load the contents of the PrimerSearch output
psdata = PrimerSearch.read(open(filename, 'rU'))
# We loop over each primer in psdata and, if the primer has a
# length this indicates that it amplifies the target. When
# this is the case we add the organism name and the family
# name to the appropriate primer in the query GenomeData object
for pname, pdata in psdata.amplifiers.items():
if len(pdata):
# Primer amplifies
gd_obj.primers[pname].amplifies_organism.add(
targetname)
for family in gddict[targetname].families:
gd_obj.primers[pname].amplifies_family.add(family)
# Consider the negative control primersearch output
elif 'negative_control' in filename:
# Load PrimerSearch data
psdata = PrimerSearch.read(open(filename, 'rU'))
# We loop over each primer, and find the number of amplimers.
# We note the number of amplimers as an attribute of the primer
for pname, pdata in psdata.amplifiers.items():
gd_obj.primers[pname].negative_control_amplimers =\
len(pdata)
logger.info("Found %d amplimers in negative control",
len(pdata))
logger.info("... processed %d Primersearch results for %s ...",
len(gd_obj.primersearch_output), gd_obj.name)
logger.info("... processed PrimerSearch results (%.3fs)",
time.time() - time_start)
# Write analysis data to files
def write_report(gd_list, blastfilter):
""" Write a tab-separated table of information to the passed
filename, summarising the distribution of unique, family-unique,
and universal (for this set) primers amongst the GenomeData objects
in gdlist. Also write out to this file the locations of the files
containing the data used to generate the information.
In addition, write out the following files in ePrimer3 format:
i) <query_name>_specific.eprimer3 - unique primers for each query
GenomeData object
ii) <family>_specific.eprimer3 - unique primers for each family in
the GenomeData set
iii) universal_primers.eprimer3 - primers that amplify all members of
the GenomeData set
"""
time_start = time.time()
logger.info("Creating summary output ...")
# First we need to generate a dictionary of GenomeData object names, keyed
# by family
families = defaultdict(set)
for gd_obj in gd_list:
for family in gd_obj.families:
families[family].add(gd_obj.name)
# Rectify nocds flag
cds_overlap = not options.nocds
# Check whether output directory exists and, if not, create it
if not os.path.isdir(options.outdir):
os.mkdir(options.outdir)
# Open output file, and write header
outfh = open(os.path.join(options.outdir,
'differential_primer_results.tab'), 'w')
outfh.write(os.linesep.join([
"# Summary information table",
"# Generated by find_differential_primers",
"# Columns in the table:",
"# 1) Query organism ID",
"# 2) Query organism families",
"# 3) Count of organism-unique primers",
"# 4) Count of universal primers",
"# 5) Query sequence filename",
"# 6) Query feature filename",
"# 7) Query ePrimer3 primers filename"]) + '\n')
# Write data for each GenomeData object
other_org_count = len(gd_list) - 1 # Amplifications for 'universal' set
# We store 'universal' primers in their own list, and family-specific
# primers in a dicitonary, keyed by family
all_universal_primers = []
family_specific_primers = defaultdict(list)
# Loop over each GenomeData object and populate family-specific and
# universal primer collections, as well as organism-specific and
# summary information
for gd_obj in gd_list:
logger.info('\n'.join([
"... writing data for %s ..." % gd_obj.name,
"... cds_overlap: %s ..." % cds_overlap,
"... gc3primevalid: %s ..." % options.filtergc3prime,
"... oligovalid: %s ..." % options.hybridprobe,
"... blastpass: %s ..." % blastfilter,
"... single_product %s ..." % (options.single_product is
not None),
"... retrieving primer pairs ...",
"... finding strain-specific primers for %s ..." % gd_obj.name
]))
unique_primers = gd_obj.get_unique_primers(cds_overlap, blastfilter)
logger.info("... finding family-specific primers for %s ...",
gd_obj.name)
family_unique_primers = {}
for family in gd_obj.families:
logger.info("Checking family: %s" % family)
logger.info("families[%s]: %s" % (family, families[family]))
family_unique_primers[family] = \
gd_obj.get_family_unique_primers(families[family], cds_overlap,
blastfilter)
family_specific_primers[family] += family_unique_primers[family]
logger.info("family_unique_primers[%s]: %d" %
(family, len(family_unique_primers[family])))
logger.info("family_specific_primers[%s]: %d" %
(family, len(family_specific_primers[family])))
logger.info("... finding universal primers for %s ...", gd_obj.name)
universal_primers = \
gd_obj.get_primers_amplify_count(other_org_count, cds_overlap,
blastfilter)
all_universal_primers.extend(universal_primers)
# Write summary data to file
outfh.write('\t'.join([gd_obj.name, ','.join(gd_obj.families),
str(len(unique_primers)),
str(len(universal_primers)),
str(gd_obj.seqfilename),
str(gd_obj.ftfilename),
str(gd_obj.primerfilename)]) + '\n')
# Write organism-specific primers to file
write_eprimer3(unique_primers,
os.path.join(options.outdir,
"%s_specific_primers.eprimer3" %
gd_obj.name), gd_obj.seqfilename)
# Write organism-specific amplicons to file
SeqIO.write([p.amplicon for p in unique_primers],
os.path.join(options.outdir,
"%s_specific_amplicons.fas" % gd_obj.name),
'fasta')
outfh.close()
# Write universal primers to file
write_eprimer3(universal_primers,
os.path.join(options.outdir, "universal_primers.eprimer3"),
'', append=True)
# Write organism-specific amplicons to file
SeqIO.write([p.amplicon for p in universal_primers],
open(os.path.join(options.outdir,
"universal_amplicons.fas"), 'w'),
'fasta')
# Write family-specific primers to files
outfh = open(os.path.join(options.outdir,
'differential_primer_results-families.tab'),
'w')
outfh.write(os.linesep.join([
"# Summary information table",
"# Generated by find_differential_primers",
"# Columns in the table:",
"# 1) Family",
"# 2) Count of family-specific primers",
"# 3) Family-specific primer file",
"# 4) Family-specific amplicon file"]) + '\n')
for family, primers in family_specific_primers.items():
outstr = [family, str(len(primers))]
fname = os.path.join(options.outdir,
"%s_family-specific_primers.eprimer3" %
family)
write_eprimer3(primers, fname, '')
outstr.append(fname)
# Write family-specific amplicons to file
fname = os.path.join(options.outdir,
"%s_family-specific_amplicons.fas" %
family)
SeqIO.write([p.amplicon for p in primers], open(fname, 'w'), 'fasta')
outstr.append(fname)
outfh.write('\t'.join(outstr) + '\n')
# Being tidy...
outfh.close()
logger.info("... data written (%.3fs)", time.time() - time_start)
# Write ePrimer3 format primer file
def write_eprimer3(primers, filename, sourcefilename, append=False):
""" Write the passed primers to the passed file, in ePrimer3-
compatible format.
"""
logger.info("Writing %d primer pairs to %s ...", len(primers), filename)
# Open file
filemode = 'a' if append else 'w' # Do we append or write anew?
outfh = open(filename, filemode)
# Write header
outfh.write(os.linesep.join([
"# EPRIMER3 PRIMERS %s " % filename,
"# Start Len Tm GC% Sequence",
os.linesep]) + '\n')
primercount = 0
for primer in primers:
primercount += 1
outfh.write("# %s %s\n" % (primer.name, sourcefilename))
outfh.write("%-4d PRODUCT SIZE: %d\n" % (primercount, primer.size))
outfh.write(" FORWARD PRIMER %-9d %-3d %.02f %.02f %s\n" %
(primer.forward_start, primer.forward_length,
primer.forward_tm, primer.forward_gc,
primer.forward_seq))
outfh.write(" REVERSE PRIMER %-9d %-3d %.02f %.02f %s\n" %
(primer.reverse_start, primer.reverse_length,
primer.reverse_tm, primer.reverse_gc,
primer.reverse_seq))
if hasattr(primer, 'internal_start'):
outfh.write(" INTERNAL OLIGO %-9d %-3d %.02f %.02f %s\n" %
(primer.internal_start, primer.internal_length,
primer.internal_tm, primer.internal_gc,
primer.internal_seq))
outfh.write(os.linesep * 3)
# Be tidy
outfh.close()
# Run the passed list of command-lines using a multiprocessing.Pool
def multiprocessing_run(clines):
""" We create a multiprocessing Pool to handle command-lines We
pass the (unique) GenomeData object name, and the location of the
sequence file. The called function returns the GenomeData name and the
corresponding location of the generated feature file. The GenomeData
objects are stored in a temporary dictionary, keyed by gd.name, to
allow association of the results of the asynchronous pool jobs with the
correct GenomeData object
"""
time_start = time.time()
logger.info("Running %d jobs with multiprocessing ...",
len(clines))
pool = multiprocessing.Pool(processes=options.cpus) # create process pool
completed = []
if options.verbose:
callback_fn = multiprocessing_callback
else:
callback_fn = completed.append
for cline in clines:
pool.apply_async(subprocess.call,
(str(cline), ),
{'stderr': subprocess.PIPE,
'shell': sys.platform != "win32"},
callback=callback_fn)
pool.close() # Run jobs
pool.join()
logger.info("Completed:\n" + '\n'.join([str(e) for e in completed]))
logger.info("... all multiprocessing jobs ended (%.3fs)",
time.time() - time_start)
# Add a multiprocessing callback function here
def multiprocessing_callback(val):
""" A verbose callback function for multiprocessing runs. It uses the
return value to indicate run completion or failure. Failure is
indicated by a nonzero return from the multiprocessing call.
"""
if 0 == val:
logger.info("... multiprocessing run completed (status: %s) ...", val)
else:
logger.error("... problem with multiprocessing run (status: %s) ...",
val)
# Clean output for each GenomeData object in the passed list
def clean_output(gd_list):
""" Remove .eprimer3, .primers, .prodigalout, and .primersearch files
from the same directory as the sequence file for each passed
PrimerSearch object
"""
time_start = time.time()
logger.info("Cleaning up output files for GenomeData objects ...")
# Loop over each GenomeData object, and remove each output file
for gd_obj in gd_list:
seqdir = os.path.split(gd_obj.seqfilename)[0]
for filename in [f for f in os.listdir(seqdir)
if os.path.splitext(f)[-1] in
['.eprimer3', 'primers', '.prodigalout',
'.primersearch', '.xml']]:
abspath = os.path.join(seqdir, filename)
logger.info("... deleting %s ...", abspath)
os.remove(abspath) # You can never go back after this point
logger.info("... done (%.3fs)", time.time() - time_start)
# construct str to concat on end of cline if option.keep_logs is set
def log_output(filename):
""" predefine file extension and stream to print to.
if log_dir exists, join it to filename
else output to base filename.
"""
log_extension = ".log"
log_out_handle = " 2> "
if options.keep_logs and options.log_dir:
return log_out_handle + os.path.join(options.log_dir, filename) +\
log_extension
elif options.keep_logs:
return log_out_handle + filename + log_extension
else:
return ""
# run list of command-line jobs with SGE
def sge_run(*args):
""" Function intended to compile a passed list of command lines, and
run them on SGE.
"""
raise NotImplementedError
###
# SCRIPT
if __name__ == '__main__':
# Parse cmd-line
options, arguments, optparser = parse_cmdline()
# Set up logging, and modify loglevel according to whether we need
# verbosity or not
# err_handler points to sys.stderr
# err_handler_file points to a logfile, if named
logger = logging.getLogger('find_differential_primers.py')
logger.setLevel(logging.DEBUG)
err_handler = logging.StreamHandler(sys.stderr)
err_formatter = logging.Formatter('%(levelname)s: %(message)s')
err_handler.setFormatter(err_formatter)
if options.logfile is not None:
try:
logstream = open(options.logfile, 'w')
err_handler_file = logging.StreamHandler(logstream)
err_handler_file.setFormatter(err_formatter)
err_handler_file.setLevel(logging.INFO)
logger.addHandler(err_handler_file)
except IOError:
logger.error("Could not open %s for logging",
options.logfile)
sys.exit(1)
if options.verbose:
err_handler.setLevel(logging.INFO)
else:
err_handler.setLevel(logging.WARNING)
logger.addHandler(err_handler)
logger.info('# find_differential_primers.py logfile')
logger.info('# Run: %s', time.asctime())
# Report arguments, if verbose
logger.info(options)
logger.info(arguments)
# Create our GenomeData objects. If there is no configuration file
# specified, raise an error and exit. Otherwise we end up with a list
# of GenomeData objects that are populated only with the data from the
# config file
if options.filename is None:
optparser.print_help()
raise IOError("No configuration file specified")
gdlist = create_gd_from_config(options.filename)
# If the user wants to clean the directory before starting, do so
if options.clean or options.cleanonly:
clean_output(gdlist)
if options.cleanonly:
sys.exit(0)
# It is possible that the sequence file for a GenomeData object might
# be a multi-sequence file describing scaffolds or contigs. We create a
# concatenated sequence to facilitate further analyses, if this is the
# case. Where a sequence needs to be concatenated, this will affect the
# placement of features and/or primers, so any specified files are
# reset to None
check_single_sequence(gdlist)
# What EMBOSS version is available? This is important as the ePrimer3
# command-line changes in v6.6.0, which is awkward for the Biopython
# interface.
embossversion = \
subprocess.check_output("embossversion",
stderr=subprocess.PIPE,
shell=sys.platform != "win32").strip()
logger.info("EMBOSS version reported as: %s", embossversion)
# We need to check the existence of a prescribed feature file and, if
# there is not one, create it. We don't bother if the --nocds flag is set.
if not (options.nocds or options.noprodigal):
logger.info("--nocds option not set: " +
"Checking existence of features...")
check_ftfilenames(gdlist)
elif options.nocds:
logger.warning("--nocds option set: Not checking or " +
"creating feature files")
else:
logger.warning("--noprodigal option set: Not predicting new CDS")
# We need to check for the existence of primer sequences for the organism
# and, if they do not exist, create them using ePrimer3. If the
# --noprimer3 flag is set, we do not create new primers, but even if the
# --noprimersearch flag is set, we still need to check whether the
# primer files are valid
if not options.noprimer3:
logger.info("--noprimer3 flag not set: Predicting new primers")
check_primers(gdlist)
predict_primers(gdlist, embossversion)
else:
logger.warning("--noprimer3 flag set: Not predicting new primers")
# With a set of primers designed for the organism, we can load them into
# the GenomeData object, filtering for those present only in the CDS,
# if required. This step is necessary, whether or not a new ePrimer3
# prediction is made. We also filter on GC content at the primer 3' end,
# if required.
logger.info("Loading primers...")
load_primers(gdlist)
# At this point, we can check our primers against a prescribed BLAST
# database. How we filter these depends on the user's preference.
# We screen against BLAST here so that we can flag an attribute on
# each primer to say whether or not it passed the BLAST screen.
if options.blastdb and not options.useblast:
logger.info("--blastdb options set: BLAST screening primers...")
blast_screen(gdlist)
elif options.useblast:
logger.warning("--useblast option set: " +
"using existing BLAST results...")
else:
logger.warning("No BLAST options set, not BLAST screening primers...")
# Having a set of (potentially CDS-filtered) primers for each organism,
# we then scan these primers against each of the other organisms in the
# set, using the EMBOSS PrimerSearch package
# (http://embossgui.sourceforge.net/demo/manual/primersearch.html)
# Now we have all the data we need to run PrimerSearch in an all-vs-all
# manner, so make a cup of tea, put your feet up, and do the comparisons
# with EMBOSS PrimerSearch
# (http://embossgui.sourceforge.net/demo/manual/primersearch.html)
if options.noprimersearch:
logger.warning("--noprimersearch flag set: Not running PrimerSearch")
# Load the appropriate primersearch output files for each
# GenomeData object
load_existing_primersearch_results(gdlist)
else:
logger.info("--noprimersearch flag not set: Running PrimerSearch")
# We write input for PrimerSearch ignoring all the filters; this lets
# us turn off PrimerSearch and rerun the analysis with alternative
# filter settings
for gd in gdlist:
gd.write_primers()
# Run PrimerSearch
primersearch(gdlist)
# If the --single_product option is specified, we load in the sequence
# file to which the passed argument refers, and filter the primer
# sequences on the basis of how many amplification products are produced
# from these sequences. We expect exactly one amplification product per
# primer set, if it's not degenerate on the target sequence
# (note that this filter is meaningless for family-specific primers)
if options.single_product:
find_negative_target_products(gdlist)
logger.info("--blastdb options set: BLAST screening primers...")
blast_screen(gdlist)
# Now we classify the primer sets according to which sequences they amplify
if not options.noclassify:
logger.info("Classifying primers and writing output files ...")
# Classify the primers in each GenomeData object according to
# the organisms and families that they amplify, using the
# PrimerSearch results.
classify_primers(gdlist)
# All the data has been loaded and processed, so we can now create our
# plaintext summary report of the number of unique, family-unique and
# universal primers in each of the organisms
write_report(gdlist, (options.blastdb is not None or options.useblast))
| gpl-3.0 | 1,479,732,124,699,790,600 | 47.392442 | 79 | 0.609431 | false | 4.033485 | false | false | false |
RedHatQE/cfme_tests | cfme/tests/cloud_infra_common/test_provisioning.py | 1 | 23712 | # -*- coding: utf-8 -*-
# These tests don't work at the moment, due to the security_groups multi select not working
# in selenium (the group is selected then immediately reset)
from textwrap import dedent
import fauxfactory
import pytest
from riggerlib import recursive_update
from widgetastic_patternfly import CheckableBootstrapTreeview as Check_tree
from cfme import test_requirements
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.infrastructure.provider import InfraProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils import normalize_text
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
from cfme.utils.providers import ProviderFilter
from cfme.utils.update import update
from cfme.utils.version import LOWEST
from cfme.utils.version import VersionPicker
from cfme.utils.wait import TimedOutError
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.meta(server_roles="+automate +notifier"),
test_requirements.provision, pytest.mark.tier(2),
pytest.mark.provider(gen_func=providers,
filters=[ProviderFilter(classes=[CloudProvider, InfraProvider],
required_flags=['provision'])],
scope="function"),
pytest.mark.usefixtures('setup_provider')
]
@pytest.fixture()
def vm_name():
return random_vm_name(context='prov', max_length=12)
@pytest.fixture()
def instance_args(request, provider, provisioning, vm_name):
""" Fixture to prepare instance parameters for provisioning
"""
inst_args = dict(template_name=provisioning.get('image', {}).get('image') or provisioning.get(
'template'))
# Base instance info
inst_args['request'] = {
'notes': 'Testing provisioning from image {} to vm {} on provider {}'
.format(inst_args.get('template_name'), vm_name, provider.key),
}
# Check whether auto-selection of environment is passed
auto = False # By default provisioning will be manual
try:
parameter = request.param
auto = parameter
except AttributeError:
# in case nothing was passed just skip
pass
if auto:
inst_args.update({'environment': {'automatic_placement': auto}})
yield vm_name, inst_args
@pytest.fixture()
def provisioned_instance(provider, instance_args, appliance):
""" Checks provisioning status for instance """
vm_name, inst_args = instance_args
collection = appliance.provider_based_collection(provider)
instance = collection.create(vm_name, provider, form_values=inst_args)
if not instance:
raise Exception("instance returned by collection.create is 'None'")
yield instance
logger.info('Instance cleanup, deleting %s', instance.name)
try:
instance.cleanup_on_provider()
except Exception as ex:
logger.warning('Exception while deleting instance fixture, continuing: {}'
.format(ex.message))
@pytest.mark.rhel_testing
@pytest.mark.parametrize('instance_args', [True, False], ids=["Auto", "Manual"], indirect=True)
def test_provision_from_template(provider, provisioned_instance):
""" Tests instance provision from template via CFME UI
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
assert provisioned_instance.exists_on_provider, "Instance wasn't provisioned successfully"
@pytest.mark.provider([GCEProvider], required_fields=[['provisioning', 'image']],
override=True)
@pytest.mark.usefixtures('setup_provider')
def test_gce_preemptible_provision(appliance, provider, instance_args, soft_assert):
"""
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/6h
"""
vm_name, inst_args = instance_args
inst_args['properties']['is_preemptible'] = True
instance = appliance.collections.cloud_instances.create(vm_name,
provider,
form_values=inst_args)
view = navigate_to(instance, "Details")
preemptible = view.entities.summary("Properties").get_text_of("Preemptible")
soft_assert('Yes' in preemptible, "GCE Instance isn't Preemptible")
soft_assert(instance.exists_on_provider, "Instance wasn't provisioned successfully")
@pytest.mark.rhv2
@pytest.mark.parametrize("edit", [True, False], ids=["edit", "approve"])
def test_provision_approval(appliance, provider, vm_name, smtp_test, request,
edit):
""" Tests provisioning approval. Tests couple of things.
* Approve manually
* Approve by editing the request to conform
Prerequisities:
* A provider that can provision.
* Automate role enabled
* User with e-mail set so you can receive and view them
Steps:
* Create a provisioning request that does not get automatically approved (eg. ``num_vms``
bigger than 1)
* Wait for an e-mail to come, informing you that the auto-approval was unsuccessful.
* Depending on whether you want to do manual approval or edit approval, do:
* MANUAL: manually approve the request in UI
* EDIT: Edit the request in UI so it conforms the rules for auto-approval.
* Wait for an e-mail with approval
* Wait until the request finishes
* Wait until an email, informing about finished provisioning, comes.
Metadata:
test_flag: provision
suite: infra_provisioning
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/8h
"""
# generate_tests makes sure these have values
# template, host, datastore = map(provisioning.get, ('template', 'host', 'datastore'))
# It will provision two of them
vm_names = [vm_name + "001", vm_name + "002"]
if BZ(1628240).blocks and provider.one_of(CloudProvider):
requester = ""
else:
requester = "[email protected] "
collection = appliance.provider_based_collection(provider)
inst_args = {'catalog': {
'vm_name': vm_name,
'num_vms': '2'
}}
vm = collection.create(vm_name, provider, form_values=inst_args, wait=False)
try:
if provider.one_of(CloudProvider):
vm_type = "instance"
else:
vm_type = "virtual machine"
subject = VersionPicker({
LOWEST: "your request for a new vms was not autoapproved",
"5.10": "your {} request is pending".format(vm_type)
}).pick()
wait_for(
lambda:
len(filter(
lambda mail:
normalize_text(subject) in normalize_text(mail["subject"]),
smtp_test.get_emails())) == 1,
num_sec=90, delay=5)
subject = VersionPicker({
LOWEST: "virtual machine request was not approved",
"5.10": "{} request from {}pending approval".format(vm_type, requester)
}).pick()
wait_for(
lambda:
len(filter(
lambda mail:
normalize_text(subject) in normalize_text(mail["subject"]),
smtp_test.get_emails())) == 1,
num_sec=90, delay=5)
except TimedOutError:
subjects = ",".join([normalize_text(m["subject"]) for m in smtp_test.get_emails()])
logger.error("expected: %s, got emails: %s", subject, subjects)
raise
smtp_test.clear_database()
cells = {'Description': 'Provision from [{}] to [{}###]'.format(vm.template_name, vm.name)}
provision_request = appliance.collections.requests.instantiate(cells=cells)
navigate_to(provision_request, 'Details')
if edit:
# Automatic approval after editing the request to conform
new_vm_name = '{}-xx'.format(vm_name)
modifications = {
'catalog': {'num_vms': "1", 'vm_name': new_vm_name},
'Description': 'Provision from [{}] to [{}]'.format(vm.template_name, new_vm_name)}
provision_request.edit_request(values=modifications)
vm_names = [new_vm_name] # Will be just one now
request.addfinalizer(
lambda: collection.instantiate(new_vm_name, provider).cleanup_on_provider()
)
else:
# Manual approval
provision_request.approve_request(method='ui', reason="Approved")
vm_names = [vm_name + "001", vm_name + "002"] # There will be two VMs
request.addfinalizer(
lambda: [appliance.collections.infra_vms.instantiate(name,
provider).cleanup_on_provider()
for name in vm_names]
)
subject = VersionPicker({
LOWEST: "your virtual machine configuration was approved",
"5.10": "your {} request was approved".format(vm_type)
}).pick()
try:
wait_for(
lambda:
len(filter(
lambda mail:
normalize_text(subject) in normalize_text(mail["subject"]),
smtp_test.get_emails())) == 1,
num_sec=120, delay=5)
except TimedOutError:
subjects = ",".join([normalize_text(m["subject"]) for m in smtp_test.get_emails()])
logger.error("expected: %s, got emails: %s", subject, subjects)
raise
smtp_test.clear_database()
# Wait for the VM to appear on the provider backend before proceeding to ensure proper cleanup
logger.info('Waiting for vms %s to appear on provider %s', ", ".join(vm_names), provider.key)
wait_for(
lambda: all(map(provider.mgmt.does_vm_exist, vm_names)),
handle_exception=True, num_sec=600)
provision_request.wait_for_request(method='ui')
msg = "Provisioning failed with the message {}".format(provision_request.row.last_message.text)
assert provision_request.is_succeeded(method='ui'), msg
subject = VersionPicker({
LOWEST: "your virtual machine request has completed vm {}".format(vm_name),
"5.10": "your {} request has completed vm name {}".format(vm_type, vm_name)
}).pick()
# Wait for e-mails to appear
def verify():
return (
len(filter(
lambda mail: normalize_text(subject) in normalize_text(mail["subject"]),
smtp_test.get_emails())) == len(vm_names)
)
try:
wait_for(verify, message="email receive check", delay=5)
except TimedOutError:
subjects = ",".join([normalize_text(m["subject"]) for m in smtp_test.get_emails()])
logger.error("expected: %s, got emails: %s", subject, subjects)
raise
@pytest.mark.parametrize('auto', [True, False], ids=["Auto", "Manual"])
def test_provision_from_template_using_rest(appliance, request, provider, vm_name, auto):
""" Tests provisioning from a template using the REST API.
Metadata:
test_flag: provision, rest
Polarion:
assignee: pvala
casecomponent: Rest
caseimportance: high
initialEstimate: 1/30h
"""
if auto:
form_values = {"vm_fields": {"placement_auto": True}}
else:
form_values = None
collection = appliance.provider_based_collection(provider)
instance = collection.create_rest(vm_name, provider, form_values=form_values)
wait_for(
lambda: instance.exists,
num_sec=1000, delay=5, message="VM {} becomes visible".format(vm_name))
VOLUME_METHOD = ("""
prov = $evm.root["miq_provision"]
prov.set_option(
:clone_options,
{{ :block_device_mapping => [{}] }})
""")
ONE_FIELD = """{{:volume_id => "{}", :device_name => "{}"}}"""
@pytest.fixture(scope="module")
def domain(request, appliance):
domain = appliance.collections.domains.create(name=fauxfactory.gen_alphanumeric(), enabled=True)
request.addfinalizer(domain.delete_if_exists)
return domain
@pytest.fixture(scope="module")
def original_request_class(appliance):
return (appliance.collections.domains.instantiate(name='ManageIQ')
.namespaces.instantiate(name='Cloud')
.namespaces.instantiate(name='VM')
.namespaces.instantiate(name='Provisioning')
.namespaces.instantiate(name='StateMachines')
.classes.instantiate(name='Methods'))
@pytest.fixture(scope="module")
def modified_request_class(request, domain, original_request_class):
with pytest.raises(Exception, match="error: Error during 'Automate Class copy'"):
# methods of this class might have been copied by other fixture, so this error can occur
original_request_class.copy_to(domain)
klass = (domain
.namespaces.instantiate(name='Cloud')
.namespaces.instantiate(name='VM')
.namespaces.instantiate(name='Provisioning')
.namespaces.instantiate(name='StateMachines')
.classes.instantiate(name='Methods'))
request.addfinalizer(klass.delete_if_exists)
return klass
@pytest.fixture(scope="module")
def copy_domains(original_request_class, domain):
methods = ['openstack_PreProvision', 'openstack_CustomizeRequest']
for method in methods:
original_request_class.methods.instantiate(name=method).copy_to(domain)
# Not collected for EC2 in generate_tests above
@pytest.mark.parametrize("disks", [1, 2])
@pytest.mark.provider([OpenStackProvider], required_fields=[['provisioning', 'image']],
override=True)
def test_cloud_provision_from_template_with_attached_disks(
appliance, request, instance_args, provider, disks, soft_assert, domain,
modified_request_class, copy_domains, provisioning):
""" Tests provisioning from a template and attaching disks
Metadata:
test_flag: provision
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
vm_name, inst_args = instance_args
# Modify availiability_zone for Azure provider
if provider.one_of(AzureProvider):
recursive_update(inst_args, {'environment': {'availability_zone': provisioning("av_set")}})
device_name = "/dev/sd{}"
device_mapping = []
with provider.mgmt.with_volumes(1, n=disks) as volumes:
for i, volume in enumerate(volumes):
device_mapping.append((volume, device_name.format(chr(ord("b") + i))))
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_PreProvision")
with update(method):
disk_mapping = []
for mapping in device_mapping:
disk_mapping.append(ONE_FIELD.format(*mapping))
method.script = VOLUME_METHOD.format(", ".join(disk_mapping))
def _finish_method():
with update(method):
method.script = """prov = $evm.root["miq_provision"]"""
request.addfinalizer(_finish_method)
instance = appliance.collections.cloud_instances.create(vm_name,
provider,
form_values=inst_args)
for volume_id in volumes:
soft_assert(vm_name in provider.mgmt.volume_attachments(volume_id))
for volume, device in device_mapping:
soft_assert(provider.mgmt.volume_attachments(volume)[vm_name] == device)
instance.mgmt.delete() # To make it possible to delete the volume
wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5)
# Not collected for EC2 in generate_tests above
@pytest.mark.provider([OpenStackProvider], required_fields=[['provisioning', 'image']],
override=True)
def test_provision_with_boot_volume(request, instance_args, provider, soft_assert,
modified_request_class, appliance, copy_domains):
""" Tests provisioning from a template and attaching one booting volume.
Metadata:
test_flag: provision, volumes
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
vm_name, inst_args = instance_args
image = inst_args.get('template_name')
with provider.mgmt.with_volume(1, imageRef=provider.mgmt.get_template_id(image)) as volume:
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest")
with update(method):
method.script = dedent('''\
$evm.root["miq_provision"].set_option(
:clone_options, {{
:image_ref => nil,
:block_device_mapping_v2 => [{{
:boot_index => 0,
:uuid => "{}",
:device_name => "vda",
:source_type => "volume",
:destination_type => "volume",
:volume_size => 1,
:delete_on_termination => false
}}]
}}
)
'''.format(volume))
@request.addfinalizer
def _finish_method():
with update(method):
method.script = """prov = $evm.root["miq_provision"]"""
instance = appliance.collections.cloud_instances.create(vm_name,
provider,
form_values=inst_args)
request_description = 'Provision from [{}] to [{}]'.format(image,
instance.name)
provision_request = appliance.collections.requests.instantiate(request_description)
try:
provision_request.wait_for_request(method='ui')
except Exception as e:
logger.info(
"Provision failed {}: {}".format(e, provision_request.request_state))
raise
msg = "Provisioning failed with the message {}".format(
provision_request.row.last_message.text)
assert provision_request.is_succeeded(method='ui'), msg
soft_assert(instance.name in provider.mgmt.volume_attachments(volume))
soft_assert(provider.mgmt.volume_attachments(volume)[instance.name] == "/dev/vda")
instance.mgmt.delete() # To make it possible to delete the volume
wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5)
# Not collected for EC2 in generate_tests above
@pytest.mark.provider([OpenStackProvider], required_fields=[['provisioning', 'image']],
override=True)
def test_provision_with_additional_volume(request, instance_args, provider, small_template,
soft_assert, modified_request_class, appliance,
copy_domains):
""" Tests provisioning with setting specific image from AE and then also making it create and
attach an additional 3G volume.
Metadata:
test_flag: provision, volumes
Polarion:
assignee: jhenner
caseimportance: high
casecomponent: Provisioning
initialEstimate: 1/4h
"""
vm_name, inst_args = instance_args
# Set up automate
method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest")
try:
image_id = provider.mgmt.get_template(small_template.name).uuid
except KeyError:
pytest.skip("No small_template in provider data!")
with update(method):
method.script = dedent('''\
$evm.root["miq_provision"].set_option(
:clone_options, {{
:image_ref => nil,
:block_device_mapping_v2 => [{{
:boot_index => 0,
:uuid => "{}",
:device_name => "vda",
:source_type => "image",
:destination_type => "volume",
:volume_size => 3,
:delete_on_termination => false
}}]
}}
)
'''.format(image_id))
def _finish_method():
with update(method):
method.script = """prov = $evm.root["miq_provision"]"""
request.addfinalizer(_finish_method)
instance = appliance.collections.cloud_instances.create(vm_name,
provider,
form_values=inst_args)
request_description = 'Provision from [{}] to [{}]'.format(small_template.name, instance.name)
provision_request = appliance.collections.requests.instantiate(request_description)
try:
provision_request.wait_for_request(method='ui')
except Exception as e:
logger.info(
"Provision failed {}: {}".format(e, provision_request.request_state))
raise
assert provision_request.is_succeeded(method='ui'), (
"Provisioning failed with the message {}".format(
provision_request.row.last_message.text))
instance.mgmt.refresh()
prov_instance_raw = instance.mgmt.raw
try:
assert hasattr(prov_instance_raw, 'os-extended-volumes:volumes_attached')
volumes_attached = getattr(prov_instance_raw, 'os-extended-volumes:volumes_attached')
assert len(volumes_attached) == 1
volume_id = volumes_attached[0]["id"]
assert provider.mgmt.volume_exists(volume_id)
volume = provider.mgmt.get_volume(volume_id)
assert volume.size == 3
finally:
instance.cleanup_on_provider()
wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5)
if "volume_id" in locals(): # To handle the case of 1st or 2nd assert
if provider.mgmt.volume_exists(volume_id):
provider.mgmt.delete_volume(volume_id)
def test_provision_with_tag(appliance, vm_name, tag, provider, request):
""" Tests tagging instance using provisioning dialogs.
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, pick a tag.
* Submit the provisioning request and wait for it to finish.
* Visit instance page, it should display the selected tags
Metadata:
test_flag: provision
Polarion:
assignee: anikifor
casecomponent: Tagging
initialEstimate: 1/4h
"""
inst_args = {'purpose': {
'apply_tags': Check_tree.CheckNode(
['{} *'.format(tag.category.display_name), tag.display_name])}}
collection = appliance.provider_based_collection(provider)
instance = collection.create(vm_name, provider, form_values=inst_args)
request.addfinalizer(instance.cleanup_on_provider)
tags = instance.get_tags()
assert any(
instance_tag.category.display_name == tag.category.display_name and
instance_tag.display_name == tag.display_name for instance_tag in tags), (
"{}: {} not in ({})".format(tag.category.display_name, tag.display_name, str(tags)))
| gpl-2.0 | 935,850,192,545,471,500 | 38.785235 | 100 | 0.61568 | false | 4.19014 | true | false | false |
pythonalliance/uno2bot | settings.py | 3 | 3597 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Telegram bot to play UNO in group chats
# Copyright (c) 2016 Jannes Höke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from telegram import ReplyKeyboardMarkup
from telegram.ext import CommandHandler, RegexHandler
from utils import send_async
from user_setting import UserSetting
from shared_vars import dispatcher
from locales import available_locales
from internationalization import _, user_locale
@user_locale
def show_settings(bot, update):
chat = update.message.chat
if update.message.chat.type != 'private':
send_async(bot, chat.id,
text=_("Please edit your settings in a private chat with "
"the bot."))
return
us = UserSetting.get(id=update.message.from_user.id)
if not us:
us = UserSetting(id=update.message.from_user.id)
if not us.stats:
stats = '📊' + ' ' + _("Enable statistics")
else:
stats = '❌' + ' ' + _("Delete all statistics")
kb = [[stats], ['🌍' + ' ' + _("Language")]]
send_async(bot, chat.id, text='🔧' + ' ' + _("Settings"),
reply_markup=ReplyKeyboardMarkup(keyboard=kb,
one_time_keyboard=True))
@user_locale
def kb_select(bot, update, groups):
chat = update.message.chat
user = update.message.from_user
option = groups[0]
if option == '📊':
us = UserSetting.get(id=user.id)
us.stats = True
send_async(bot, chat.id, text=_("Enabled statistics!"))
elif option == '🌍':
kb = [[locale + ' - ' + descr]
for locale, descr
in sorted(available_locales.items())]
send_async(bot, chat.id, text=_("Select locale"),
reply_markup=ReplyKeyboardMarkup(keyboard=kb,
one_time_keyboard=True))
elif option == '❌':
us = UserSetting.get(id=user.id)
us.stats = False
us.first_places = 0
us.games_played = 0
us.cards_played = 0
send_async(bot, chat.id, text=_("Deleted and disabled statistics!"))
@user_locale
def locale_select(bot, update, groups):
chat = update.message.chat
user = update.message.from_user
option = groups[0]
if option in available_locales:
us = UserSetting.get(id=user.id)
us.lang = option
_.push(option)
send_async(bot, chat.id, text=_("Set locale!"))
_.pop()
def register():
dispatcher.add_handler(CommandHandler('settings', show_settings))
dispatcher.add_handler(RegexHandler('^([' + '📊' +
'🌍' +
'❌' + ']) .+$',
kb_select, pass_groups=True))
dispatcher.add_handler(RegexHandler(r'^(\w\w_\w\w) - .*',
locale_select, pass_groups=True))
| agpl-3.0 | -8,267,733,360,761,920,000 | 32.669811 | 77 | 0.591482 | false | 3.808965 | false | false | false |
MicroPyramid/MicroSite | micro_blog/tasks.py | 1 | 5094 | from celery.decorators import task
import sendgrid
from django.conf import settings
from micro_blog.models import Subscribers, Post, Category
from .send_grid import *
import datetime
from micro_admin.models import User
from django.template import loader
@task
def create_contact_in_category(category_name, email_address):
'''
Checks whether the category exists on not if it does then it will create a contact and save in
database under sendgrid_user_id field and then add contact to list. If not create a category list
then contact then adds contact to the new category
list which eliminates the duplicates, also if any contact or list already exists then it return
the object which avoids creating duplicates.
Tested Cases:
existing category new user -PASS
existing category existing user -PASS
new category existing user - PASS
new catergory new user -PASS
'''
contact_lists = get_contact_lists()
if category_name in contact_lists:
contact_id = create_contact(email_address)
CONTACTS_ENDPOINT = "https://api.sendgrid.com/v3/contactdb/lists/" + contact_lists[category_name] + \
"/" + "recipients/" + contact_id
response = requests.post(CONTACTS_ENDPOINT, headers=headers)
else:
contact_list_id = create_contact_list(category_name)
contact_id = create_contact(email_address)
CONTACTS_ENDPOINT = "https://api.sendgrid.com/v3/contactdb/" + \
"lists/{0}/recipients/{1}".format(contact_list_id, contact_id)
response = requests.post(CONTACTS_ENDPOINT, headers=headers)
@task
def sending_mail_to_subscribers():
blog_posts = Post.objects.filter(published_on=datetime.datetime.today(), status='P')
subscribers = Subscribers.objects.filter(blog_post=True)
for blog_post in blog_posts:
blog_url = 'https://www.micropyramid.com/blog/' + str(blog_post.slug) + '/'
for subscriber in subscribers:
sg = sendgrid.SendGridClient(settings.SG_USER, settings.SG_PWD)
contact_msg = sendgrid.Mail()
contact_msg.set_subject("New Blog Post | MicroPyramid")
message_reply = 'Hello ' + str(subscriber.email) + ',\n\n'
message_reply = '<p>New blog post has been created by ' + str(blog_post.author) + \
' with the name ' + str(blog_post.title) + ' in the category ' + str(blog_post.category.name) + '.</p>'
message_reply = message_reply + 'Regards<br>'
message_reply = message_reply + 'The MicroPyramid Team.<br>'
contact_msg.set_html(message_reply)
contact_msg.set_from("[email protected]")
contact_msg.add_to(subscriber.email)
sg.send(contact_msg)
@task
def report_on_blog_post_published_limit():
import datetime
date = datetime.date.today()
start_week = date - \
datetime.timedelta(date.weekday()) - datetime.timedelta(1)
end_week = start_week + datetime.timedelta(6)
posts = Post.objects.filter(published_on__range=(start_week, end_week))
blog_posts = Post.objects.filter(created_on__range=(start_week, end_week))
from django.db.models import Sum, Count, Q, F
incomplete_categories = Category.objects.filter(blog_posts__published_on__range=(start_week, end_week)).annotate(total_blog_posts=Count('blog_posts')).filter(total_blog_posts__lt=F('min_published_blogs'))
categories = Category.objects.filter()
incomplete_categories = []
for each in categories:
blog_posts = each.blog_posts.filter(published_on__range=(start_week, end_week))
each_dict = {}
if blog_posts.count() < each.min_published_blogs:
each_dict['category'] = each
each_dict['total_blog_posts'] = blog_posts.count()
incomplete_categories.append(each_dict)
complete_categories = Category.objects.filter(blog_posts__published_on__range=(start_week, end_week)).annotate(total_blog_posts=Count('blog_posts')).filter(total_blog_posts__gte=F('min_published_blogs'))
users = User.objects.filter(is_admin=True)
formatted_start_week = datetime.datetime.strptime(
str(start_week), "%Y-%m-%d").strftime("%d-%m-%Y")
formatted_end_week = datetime.datetime.strptime(
str(end_week), "%Y-%m-%d").strftime("%d-%m-%Y")
min_blogposts = 0
for user in users:
sg = sendgrid.SendGridClient(settings.SG_USER, settings.SG_PWD)
contact_msg = sendgrid.Mail()
temp = loader.get_template('admin/blogposts_report.html')
rendered = temp.render({'posts': posts, 'blog_posts': blog_posts, 'start_week': start_week, 'end_week': end_week, 'user': user, 'complete_categories': complete_categories, 'incomplete_categories': incomplete_categories})
contact_msg.set_html(rendered)
contact_msg.set_text("Report")
contact_msg.set_subject('Blog Post Report '+ formatted_start_week + ' - ' + formatted_end_week + ' - MicroPyramid')
contact_msg.set_from("[email protected]")
contact_msg.add_to(user.email)
sg.send(contact_msg) | gpl-2.0 | -5,380,669,181,917,490,000 | 47.990385 | 228 | 0.672163 | false | 3.662114 | false | false | false |
wiredrive/wtframework | wtframework/wtf/utils/file_utils.py | 1 | 4013 | ##########################################################################
# This file is part of WTFramework.
#
# WTFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WTFramework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WTFramework. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
import re
"""
This module contains functions for working with files.
"""
import os
import tempfile
from wtframework.wtf.utils.data_utils import generate_timestamped_string
import urllib
import codecs
def temp_path(file_name=None):
"""
Gets a temp path.
Kwargs:
file_name (str) : if file name is specified, it gets appended to the temp dir.
Usage::
temp_file_path = temp_path("myfile")
copyfile("myfile", temp_file_path) # copies 'myfile' to '/tmp/myfile'
"""
if file_name is None:
file_name = generate_timestamped_string("wtf_temp_file")
return os.path.join(tempfile.gettempdir(), file_name)
def create_temp_file(file_name=None, string_or_another_file=""):
"""
Creates a temp file using a given name. Temp files are placed in the Project/temp/
directory. Any temp files being created with an existing temp file, will be
overridden. This is useful for testing uploads, where you would want to create a
temporary file with a desired name, upload it, then delete the file when you're
done.
Kwargs:
file_name (str): Name of file
string_or_another_file: Contents to set this file to. If this is set to a file,
it will copy that file. If this is set to a string, then
it will write this string to the temp file.
Return:
str - Returns the file path to the generated temp file.
Usage::
temp_file_path = create_temp_file("mytestfile", "The nimble fox jumps over the lazy dog.")
file_obj = open(temp_file_path)
os.remove(temp_file_path)
"""
temp_file_path = temp_path(file_name)
if isinstance(string_or_another_file, file):
# attempt to read it as a file.
temp_file = open(temp_file_path, "wb")
temp_file.write(string_or_another_file.read())
else:
# handle as a string type if we can't handle as a file.
temp_file = codecs.open(temp_file_path, "w+", "utf-8")
temp_file.write(string_or_another_file)
temp_file.close()
return temp_file_path
def download_to_tempfile(url, file_name=None, extension=None):
"""
Downloads a URL contents to a tempfile. This is useful for testing downloads.
It will download the contents of a URL to a tempfile, which you then can
open and use to validate the downloaded contents.
Args:
url (str) : URL of the contents to download.
Kwargs:
file_name (str): Name of file.
extension (str): Extension to use.
Return:
str - Returns path to the temp file.
"""
if not file_name:
file_name = generate_timestamped_string("wtf_temp_file")
if extension:
file_path = temp_path(file_name + extension)
else:
ext = ""
try:
ext = re.search(u"\\.\\w+$", file_name).group(0)
except:
pass
file_path = temp_path(file_name + ext)
webFile = urllib.urlopen(url)
localFile = open(file_path, 'w')
localFile.write(webFile.read())
webFile.close()
localFile.close()
return file_path
| gpl-3.0 | 8,690,845,209,346,460,000 | 31.362903 | 98 | 0.622228 | false | 3.922776 | false | false | false |
mondwan/ProjectCUWG | cuwg/xss/sanitization.py | 1 | 5666 | """
- `File`: sanitization.py
- `Description`: xss challenge 1: How importatn to do sanitization
"""
import webapp2
from google.appengine.ext.webapp import template
import re
import os
import constants
import random
import logging
import urlparse
import urllib
class ResultVerifyHandler(webapp2.RequestHandler):
def get(self):
url = urllib.unquote(self.request.url).decode('utf8')
url = url.replace(" ", "")
parsed = urlparse.urlparse(url)
logging.info('ResultVerifyHandler %s ' % url)
user = urlparse.parse_qs(parsed.query)['user']
logging.info('user=%s' % user[0])
ctx = SanitizationHandler.getBreadcrumbContext()
ctx['isSucceeded'] = True
if(user[0] != 'mary'):
ctx['isSucceeded'] = False
ctx['userError'] = True
self.response.write(
template.render(
os.path.join(
constants.TPL_DIR,
'sanitization.tpl'
),
ctx
)
)
else:
logging.info('ResultVerifyHandler')
self.response.write(
template.render(
os.path.join(
constants.TPL_DIR,
'sanitization.tpl'
),
ctx
)
)
class ReviewFormHandler(webapp2.RequestHandler):
"""Handler for /xss/sanitization/reviewForm"""
def deleteSID(self):
# Also delete cookie set by browser's console
self.response.delete_cookie(
'sid',
path='/xss/sanitization'
)
self.response.delete_cookie(
'sid',
path='/xss/sanitization/reviewForm'
)
def get(self):
sid = self.request.cookies.get('sid')
if isinstance(sid, basestring) and re.match('^john:[0-9]{1,2}', sid):
ctx = SanitizationHandler.getBreadcrumbContext()
ctx['sid'] = sid
ctx['owner'] = 'john'
self.response.write(
template.render(
os.path.join(
constants.TPL_DIR,
'sanitization.tpl'
),
ctx
)
)
else:
# Back to /xss/sanitization if there is no valid cookie
# present
self.deleteSID()
self.redirect('/xss/sanitization')
def post(self):
sid = self.request.cookies.get('sid')
action = self.request.POST['action']
logging.info('sid=%s' % sid)
if action == 'logout':
# Delete cookie and back to /transaction/sessionHijack
self.deleteSID()
self.redirect('/xss/sanitization')
else:
review = self.request.get('comment')
ctx = SanitizationHandler.getBreadcrumbContext()
ctx['sid'] = sid
ctx['review'] = review;
logging.info('review=%s' % review)
# Transfer on behalf of john
user = self.request.get('user')
ctx['owner'] = self.request.get('user')
self.response.set_cookie(
'user',
user,
max_age=60,
path='/xss/sanitization/reviewForm'
)
self.response.headers['X-XSS-Protection'] = '0'
self.response.write(
template.render(
os.path.join(
constants.TPL_DIR,
'sanitization.tpl'
),
ctx
)
)
class SanitizationHandler(webapp2.RequestHandler):
"""Handler for /xss/sanitization"""
@classmethod
def getBreadcrumbContext(cls):
return {
'breadcrumb': [{
'name': 'Home',
'href': '/',
'active': False,
}, {
'name': 'XSS',
'href': '/xss',
'active': False,
}, {
'name': 'Sanitizing Input',
'href': '/xss/sanitization',
'active': True,
}],
}
def get(self):
ctx = SanitizationHandler.getBreadcrumbContext()
self.response.write(
template.render(
os.path.join(constants.TPL_DIR, 'sanitization.tpl'),
ctx
)
)
def post(self):
# Get username and password
name = self.request.get('name')
pw = self.request.get('pw')
if (name == 'john' and pw == 'john'):
redirectPath = '/xss/sanitization/reviewForm'
# Redirect to /xss/sanitization/reviewForm
sid = 'john:%d' % random.randint(1, 10)
logging.info('sid=%s' % sid)
self.response.status = 302
self.response.set_cookie(
'sid',
sid,
max_age=60,
path=redirectPath
)
self.redirect(redirectPath)
else:
passwordIncorrect = True
ctx = SanitizationHandler.getBreadcrumbContext()
ctx['passwordIncorrect'] = passwordIncorrect
self.response.write(
template.render(
os.path.join(constants.TPL_DIR, 'sanitization.tpl'),
ctx
)
)
| mit | -6,960,506,244,541,080,000 | 28.821053 | 77 | 0.465055 | false | 4.543705 | false | false | false |
kevinkahn/softconsole | githubutil.py | 1 | 3389 | import datetime
import os
import requests
import shutil
import subprocess
import time
"""
NOTE: This gets used in initial setup of console by the setup program
** Don't add any dependencies on other parts of the console (E.g., no logging
"""
def StageVersion(vdir, tag, label):
logf = open('stagelog.log', 'w')
print(datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y"), file=logf)
print("Staging " + tag + " in " + vdir + ' because ' + label, file=logf)
cwd = os.getcwd()
try:
os.chdir(vdir)
except Exception as E:
print("Staging directory {} doesn't exist - try to create it ({})".format(vdir, E))
os.mkdir(vdir)
os.chdir(vdir)
shutil.rmtree('stagedversion', True)
os.mkdir('stagedversion')
os.chdir('stagedversion')
if tag == '*live*':
subprocess.call('wget https://github.com/kevinkahn/softconsole/tarball/master', shell=True, stdout=logf, stderr=logf)
subprocess.call('tar -zxls --strip-components=1 < master', shell=True, stdout=logf, stderr=logf)
subprocess.call('chown -R pi: *', shell=True, stdout=logf, stderr=logf)
os.remove('master')
else:
subprocess.call('wget https://github.com/kevinkahn/softconsole/archive/' + tag + '.tar.gz', shell=True, stdout=logf, stderr=logf)
subprocess.call('tar -zxls --strip-components=1 < ' + tag + '.tar.gz', shell=True, stdout=logf, stderr=logf)
sha, cdate = GetSHA(tag)
with open('versioninfo', 'w') as f:
f.writelines(['{0}\n'.format(tag), '{0}\n'.format(sha), label + ': ' + time.strftime('%m-%d-%y %H:%M:%S\n'),
'Commit of: {0}\n'.format(cdate)])
os.remove(tag + '.tar.gz')
# noinspection PyBroadException
try:
os.chmod('runconsole.py', 0o555)
except:
pass
# noinspection PyBroadException
try:
os.chmod('console.py', 0o555)
except:
pass
os.chdir(cwd)
logf.close()
# noinspection PyBroadException
def InstallStagedVersion(d):
logf = open('stagelog.log', 'a')
print("Installing", file=logf)
shutil.rmtree(d + '/previousversion', True) # don't keep multiple previous version in tree
os.rename(d, d + '.TMP') # move active directory to temp
os.rename(d + '.TMP/stagedversion', d) # move new version into place
os.rename(d + '.TMP', d + '/previousversion') # save previous version
os.chdir(d)
if os.path.exists('../homesystem'):
# noinspection PyBroadException
try:
subprocess.call('cp -u -r -p "example configs"/* ../Console', shell=True, stdout=logf, stderr=logf)
except:
print('Copy of example configs failed on homesystem', file=logf)
if not os.path.exists('../Console/termshortenlist'):
try:
os.rename('example configs/termshortenlist', '../Console/termshortenlist')
print("Initialized termshortenlist", file=logf)
except:
print("Couldn't move termshortenlist in " + str(os.getcwd()), file=logf)
print('Process upgrade extras script', file=logf)
subprocess.call('sudo bash ' + './scripts/upgradeprep.sh', shell=True, stdout=logf, stderr=logf)
print('End upgrade extras script', file=logf)
logf.close()
os.chdir('..')
def GetSHA(tag):
r = requests.get('https://api.github.com/repos/kevinkahn/softconsole/tags')
d = r.json()
sha = 'not found'
url = 'none'
for i in d:
if i['name'] == tag:
sha = i['commit']['sha']
url = i['commit']['url']
break
if sha == 'not found':
return 'no current sha', 'no release info'
r = requests.get(url)
d = r.json()
c = d['commit']['committer']['date']
return sha, c
| apache-2.0 | -4,751,111,268,660,103,000 | 32.22549 | 131 | 0.676011 | false | 2.872034 | false | false | false |
wa4557/chromecast-player | playlist_manager.py | 1 | 22058 | from gi import require_version
require_version("Gtk", "3.0")
from gi.repository import Gtk, GdkPixbuf, GLib, Gdk
import helpers
from stream_select import FileChooserWindow, NetworkStream
class PlaylistManager(Gtk.Window):
def __init__(self, playlist, enable_web, transcoder, probe, preferred_transcoder, counter):
self.win = Gtk.Window(type=Gtk.WindowType.TOPLEVEL)
theme = Gtk.IconTheme.get_default()
self.playimage = theme.load_icon("media-playback-start", 16,0)
self.store = Gtk.ListStore(GdkPixbuf.Pixbuf, str, str, int, int, str, str, str, str)
self.selection_index = None
self.create_model(playlist)
if counter:
self.store[counter][0] = self.playimage
self.playlist_counter = None
self.play_now = False
self.playlist_changed = False
self.double_clicked = False
self.drag_index = None
self.transcoder = transcoder
self.number_clicked = 0
self.double_clicked_index = None
self.probe = probe
self.preferred_transcoder = preferred_transcoder
self.enable_web = enable_web
self.show_image = True
self.sorted_index = None
def exit(self, *args):
self.win.close()
def check_uris(self, play_uri):
uri_win = []
item = self.store.get_iter_first()
while (item != None):
uri_win.append(self.store.get_value(item, 1))
item = self.store.iter_next(item)
player_uri = [pl[0] for pl in play_uri]
if uri_win != player_uri:
self.create_model(play_uri)
def main(self):
self.win.set_title("Manage playlist")
vboxall = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
vboxmanager = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
hboxbuttons = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
filebutton = Gtk.Button('_Open', use_underline=True)
filebutton.connect('clicked', self._on_file_clicked)
self.netbutton = Gtk.Button('_Open network stream', use_underline=True)
self.netbutton.connect('clicked', self._on_net_stream_clicked)
deletebutton = Gtk.Button()
deleteButtonImage = Gtk.Image()
deleteButtonImage.set_from_stock(Gtk.STOCK_REMOVE, Gtk.IconSize.BUTTON)
deletebutton.add(deleteButtonImage)
topbutton = Gtk.Button()
topButtonImage = Gtk.Image()
topButtonImage.set_from_stock(Gtk.STOCK_GOTO_TOP, Gtk.IconSize.BUTTON)
topbutton.add(topButtonImage)
upbutton = Gtk.Button()
upButtonImage = Gtk.Image()
upButtonImage.set_from_stock(Gtk.STOCK_GO_UP, Gtk.IconSize.BUTTON)
upbutton.add(upButtonImage)
bottombutton = Gtk.Button()
bottomButtonImage = Gtk.Image()
bottomButtonImage.set_from_stock(Gtk.STOCK_GOTO_BOTTOM, Gtk.IconSize.BUTTON)
bottombutton.add(bottomButtonImage)
downbutton = Gtk.Button()
downButtonImage = Gtk.Image()
downButtonImage.set_from_stock(Gtk.STOCK_GO_DOWN, Gtk.IconSize.BUTTON)
downbutton.add(downButtonImage)
okbutton = Gtk.Button('_Close', use_underline=True)
okbutton.connect("clicked", self.exit)
mainmenu = Gtk.Menu()
filem = Gtk.MenuItem("Open")
self.streamm = Gtk.MenuItem("Open network stream")
if not self.enable_web:
self.streamm.set_sensitive(False)
exit = Gtk.MenuItem("Close")
root_menu = Gtk.MenuItem('File')
root_menu.set_submenu(mainmenu)
menu_bar = Gtk.MenuBar()
mainmenu.append(filem)
mainmenu.append(self.streamm)
mainmenu.append(exit)
menu_bar.append(root_menu)
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.IN)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.treeView = Gtk.TreeView(self.store)
self.treeView.set_grid_lines(Gtk.TreeViewGridLines.BOTH)
self.create_columns(self.treeView)
targets = []
self.treeView.enable_model_drag_source(Gdk.ModifierType.BUTTON1_MASK, self.treeView,
Gdk.DragAction.MOVE)
self.treeView.drag_dest_set(Gtk.DestDefaults.ALL, targets, Gdk.DragAction.MOVE)
self.treeView.connect("drag-data-received", self._on_drag_data_received)
self.treeView.connect("drag-drop", self._drag_dropped)
self.treeView.connect("drag-end", self._drag_finished)
self.drag_finished = False
sw.add(self.treeView)
self.treeView.set_reorderable(True)
okbutton.set_margin_right(10)
filebutton.set_margin_left(10)
deletebutton.set_margin_left(200)
hboxbuttons.pack_start(filebutton, False, False, 0)
hboxbuttons.pack_start(self.netbutton, False, False, 10)
hboxbuttons.pack_start(deletebutton, False, False, 0)
hboxbuttons.pack_start(bottombutton, False, False, 10)
hboxbuttons.pack_start(downbutton, False, False, 0)
hboxbuttons.pack_start(upbutton, False, False, 10)
hboxbuttons.pack_start(topbutton, False, False, 0)
hboxbuttons.pack_end(okbutton, False, False, 0)
vboxmanager.pack_start(sw, True, True, 0)
vboxall.pack_start(vboxmanager, True, True, 0)
vboxall.pack_end(hboxbuttons, False, False, 10)
vboxall.pack_start(menu_bar, False, False, 0)
deletebutton.connect("clicked", self._on_delete_clicked)
upbutton.connect("clicked", self._on_up_clicked)
downbutton.connect("clicked", self._on_down_clicked)
topbutton.connect("clicked", self._on_top_clicked)
bottombutton.connect("clicked", self._on_bottom_clicked)
filem.connect('activate', self._on_file_clicked)
self.streamm.connect('activate', self._on_net_stream_clicked)
self.treeView.connect("row-activated", self._double_clicked)
exit.connect("activate", self.exit)
self.win.set_size_request(1200, 700)
self.win.add(vboxall)
self.win.show_all()
def _on_drag_data_received(self, *args):
if not self.drag_finished:
return
else:
self.drag_finished = False
index_source = self.get_selected_index()
self.index_source = index_source
self.source_uri = self.store[index_source][1]
def _drag_finished(self, *args):
if self.index_source is None:
return
for i, row in enumerate(self.store):
if row[1] == self.source_uri:
index_drop = i
break
index_source = self.index_source
self.index_source = None
self.index_drop = None
if self.playlist_counter is not None:
if self.playlist_counter == index_source:
self.store[index_source][0] = None
self.store[index_drop][0] = self.playimage
self.sorted_index = index_drop
elif index_source < self.playlist_counter and index_drop >= self.playlist_counter:
self.store[self.playlist_counter][0] = None
self.store[self.playlist_counter-1][0] = self.playimage
self.sorted_index = self.playlist_counter -1
elif index_source > self.playlist_counter and index_drop <= self.playlist_counter:
self.store[self.playlist_counter][0] = None
self.store[self.playlist_counter+1][0] = self.playimage
self.sorted_index = self.playlist_counter + 1
popped = self.play_uri.pop(index_source)
self.play_uri.insert(index_drop, popped)
self.selection_index = index_drop
self.playlist_changed = True
self.treeView.set_cursor(index_drop)
def _drag_dropped(self, *args):
self.remove_sort_indicator()
self.drag_finished = True
self.index_source = None
self.index_drop = None
def _on_delete_clicked(self, *args):
if len(self.store) == 1:
self.play_uri = []
self.delete_at_index(0)
self.playlist_changed = True
return
index = self.get_selected_index()
if self.playlist_counter is not None:
plc = self.playlist_counter + self.number_clicked
if plc == index and self.show_image:
self.number_clicked += -1
elif index < plc:
self.number_clicked += -1
self.delete_at_index(index)
if plc == index and self.show_image:
self.show_image = False
self.selection_index = index - 1
popped = self.play_uri.pop(index)
self.playlist_changed = True
self.remove_sort_indicator()
def _on_up_clicked(self, *args):
index = self.get_selected_index()
if self.playlist_counter is not None:
plc = self.playlist_counter + self.number_clicked
else:
plc = None
if not index == 0:
if self.playlist_counter is not None:
if plc == index:
self.number_clicked += -1
elif plc == index - 1:
self.number_clicked += 1
self.move_item_up()
if plc == index:
self.store[index][0] = None
self.store[index-1][0] = self.playimage
elif plc == index - 1:
self.store[index-1][0] = None
self.store[index][0] = self.playimage
self.selection_index = index - 1
popped = self.play_uri.pop(index)
self.play_uri.insert(index-1, popped)
self.playlist_changed = True
self.remove_sort_indicator()
def _on_down_clicked(self, *args):
index = self.get_selected_index()
if self.playlist_counter is not None:
plc = self.playlist_counter + self.number_clicked
else:
plc = None
if not index == len(self.store)-1:
if self.playlist_counter is not None:
if plc == index:
self.number_clicked += 1
elif plc == index + 1:
self.number_clicked += -1
self.move_item_down()
if plc == index:
self.store[index][0] = None
self.store[index+1][0] = self.playimage
elif plc == index + 1:
self.store[index+1][0] = None
self.store[index][0] = self.playimage
self.selection_index = index + 1
popped = self.play_uri.pop(index)
self.play_uri.insert(index+1, popped)
self.playlist_changed = True
self.remove_sort_indicator()
def _on_top_clicked(self, *args):
index = self.get_selected_index()
if self.playlist_counter is not None:
plc = self.playlist_counter + self.number_clicked
else:
plc = None
if not index == 0:
if self.playlist_counter is not None:
if plc == index:
self.number_clicked += -plc
elif index > plc:
self.number_clicked += 1
self.move_item_top()
if plc == index:
self.store[plc][0] = None
self.store[0][0] = self.playimage
elif plc and index > plc:
self.store[plc][0] = None
self.store[plc+1][0] = self.playimage
self.selection_index = 0
popped = self.play_uri.pop(index)
self.play_uri.insert(0, popped)
self.playlist_changed = True
self.remove_sort_indicator()
def _on_bottom_clicked(self, *args):
index = self.get_selected_index()
if self.playlist_counter is not None:
plc = self.playlist_counter + self.number_clicked
else:
plc = None
if not index == len(self.store)-1:
if self.playlist_counter is not None:
if plc == index:
self.number_clicked += len(self.store) - plc - 1
elif index < plc:
self.number_clicked += -1
self.move_item_bottom()
if plc == index:
self.store[plc][0] = None
self.store[-1][0] = self.playimage
elif plc and index < plc:
self.store[plc][0] = None
self.store[plc-1][0] = self.playimage
self.selection_index = len(self.store)-1
popped = self.play_uri.pop(index)
self.play_uri.append(popped)
self.playlist_changed = True
self.remove_sort_indicator()
def _double_clicked(self, *args):
index = args[1].get_indices()[0]
self.double_clicked_index = index
self.double_clicked = True
self.show_image = True
def _on_file_clicked(self, *args):
win = FileChooserWindow()
ret = win.main()
playlist = self.play_uri.copy()
if ret:
if ret[1] == 1:
self.play_now = True
self.play_uri = []
for i,u in enumerate(ret[0]):
self.play_uri.append(helpers.decode_local_uri(u, self.transcoder, self.probe, self.preferred_transcoder))
else:
for i, u in enumerate(ret[0]):
self.play_uri.append(helpers.decode_local_uri(u, self.transcoder, self.probe, self.preferred_transcoder))
self.playlist_changed = True
self.remove_sort_indicator()
def _on_net_stream_clicked(self, *args):
win = NetworkStream()
ret = win.main()
playlist = self.play_uri.copy()
if ret:
if ret[1] == 1:
self.play_now = True
self.play_uri = []
n = helpers.decode_network_uri(ret[0])
if n:
self.play_uri.append(n)
else:
n = helpers.decode_network_uri(ret[0])
if n:
self.play_uri.append(n)
self.playlist_changed = True
self.remove_sort_indicator()
def _on_column_clicked(self, *args):
column = args[0]
column_index = args[1]
index = self.playlist_counter
order = column.get_sort_order()
self.sort_rows(column, column_index, order)
uri_win = []
item = self.store.get_iter_first()
while (item != None):
uri_win.append(self.store.get_value(item, 1))
item = self.store.iter_next(item)
player_uri = [pl[0] for pl in self.play_uri]
indices = []
for uri in player_uri:
indices.append(uri_win.index(uri))
l = [x for (y,x) in sorted(zip(indices,self.play_uri))]
if index is not None:
self.store[index][0] = None
new_index = indices[index]
self.store[new_index][0] = self.playimage
self.sorted_index = new_index
self.play_uri = l
self.playlist_changed = True
column.set_sort_indicator(True)
def sort_rows(self, column, index, sortorder):
""" Sort the rows based on the given column """
self.remove_sort_indicator()
rows = [tuple(r) + (i,) for i, r in enumerate(self.store)]
if sortorder == Gtk.SortType.ASCENDING:
sortorder = Gtk.SortType.DESCENDING
reverse = False
else:
sortorder = Gtk.SortType.ASCENDING
reverse = True
rows.sort(key=lambda x: x[index], reverse=reverse)
self.store.reorder([r[-1] for r in rows])
column.set_sort_order(sortorder)
def remove_sort_indicator(self):
for k in self.sort_columns:
k[0].set_sort_indicator(False)
def create_model(self, playlist):
self.store.clear()
self.play_uri = playlist[:]
if playlist:
for k in playlist:
self.store.append([None] + self.add_to_playlist(k))
if self.selection_index:
self.treeView.set_cursor(self.selection_index)
self.selection_index = None
def create_columns(self, treeView):
rendererPixbuf = Gtk.CellRendererPixbuf()
pixcolumn = Gtk.TreeViewColumn(None, rendererPixbuf, pixbuf=0)
pixcolumn.set_fixed_width(20)
pixcolumn.set_resizable(False)
treeView.append_column(pixcolumn)
self.sort_columns = [[pixcolumn, 0]]
rendererText = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("URI", rendererText, text=1)
column.set_fixed_width(180)
column.set_resizable(True)
column.set_clickable(True)
column.connect("clicked", self._on_column_clicked, 1)
treeView.append_column(column)
self.sort_columns.append([column, 0])
rendererText = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Title", rendererText, text=2)
column.set_fixed_width(180)
column.set_resizable(True)
column.set_clickable(True)
column.connect("clicked", self._on_column_clicked, 2)
treeView.append_column(column)
self.sort_columns.append([column, 0])
rendererText = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Nr", rendererText, text=3)
column.set_fixed_width(40)
column.set_resizable(True)
column.set_clickable(True)
column.connect("clicked", self._on_column_clicked, 3)
treeView.append_column(column)
self.sort_columns.append([column, 0])
rendererText = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("CD", rendererText, text=4)
column.set_fixed_width(40)
column.set_resizable(True)
column.set_clickable(True)
column.connect("clicked", self._on_column_clicked, 4)
treeView.append_column(column)
self.sort_columns.append([column, 0])
rendererText = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Album", rendererText, text=5)
column.set_fixed_width(180)
column.set_resizable(True)
column.set_clickable(True)
column.connect("clicked", self._on_column_clicked, 5)
treeView.append_column(column)
self.sort_columns.append([column, 0])
rendererText = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Artist", rendererText, text=6)
column.set_fixed_width(180)
column.set_resizable(True)
column.set_clickable(True)
column.connect("clicked", self._on_column_clicked, 6)
treeView.append_column(column)
self.sort_columns.append([column, 0])
rendererText = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("AlbumArtist", rendererText, text=7)
column.set_fixed_width(180)
column.set_resizable(True)
column.set_clickable(True)
column.connect("clicked", self._on_column_clicked, 7)
treeView.append_column(column)
self.sort_columns.append([column, 0])
rendererText = Gtk.CellRendererText()
column = Gtk.TreeViewColumn("Composer", rendererText, text=8)
column.set_fixed_width(180)
column.set_resizable(True)
column.set_clickable(True)
column.connect("clicked", self._on_column_clicked, 8)
treeView.append_column(column)
self.sort_columns.append([column, 0])
def get_selected_index(self):
sel = self.treeView.get_selection()
model, i = sel.get_selected()
res = model[i].path.get_indices()
return res[0]
def delete_at_index(self, index):
for row in self.store:
if row.path.get_indices()[0] == index:
self.store.remove(row.iter)
break
def move_item_down(self):
selection = self.treeView.get_selection()
selections, model = selection.get_selected_rows()
for row in selections:
if selection.iter_is_selected(row.iter) and row.next:
self.store.swap(row.iter, row.next.iter)
break
def move_item_up(self):
selection = self.treeView.get_selection()
selections, model = selection.get_selected_rows()
for row in selections:
if selection.iter_is_selected(row.iter) and row.previous:
self.store.swap(row.iter, row.previous.iter)
break
def move_item_top(self):
selection = self.treeView.get_selection()
selections, model = selection.get_selected_rows()
for row in selections:
if selection.iter_is_selected(row.iter):
self.store.move_after(row.iter)
def move_item_bottom(self):
selection = self.treeView.get_selection()
selections, model = selection.get_selected_rows()
for row in selections:
if selection.iter_is_selected(row.iter):
self.store.move_before(row.iter)
def add_to_playlist(self, data):
uri = data[0]
metadata = data[4]
title = None
album = None
artist = None
albumartist = None
composer = None
track = None
cdnumber = None
if metadata:
if 'title' in metadata.keys():
title = metadata['title']
if 'artist' in metadata.keys():
artist = metadata['artist']
if 'albumArtist' in metadata.keys():
albumartist = metadata['albumArtist']
if 'composer' in metadata.keys():
composer = metadata['composer']
if 'albumName' in metadata.keys():
album = metadata['albumName']
if 'trackNumber' in metadata.keys():
track = metadata['trackNumber']
if 'cdNumber' in metadata.keys():
cdnumber = metadata['cdNumber']
return [uri, title, track, cdnumber, album, artist, albumartist, composer]
| gpl-3.0 | 2,185,413,142,309,863,700 | 36.072269 | 125 | 0.582102 | false | 3.781588 | false | false | false |
aa403/betfair.py | betfair/exceptions.py | 1 | 1112 | # -*- coding: utf-8 -*-
import bf_logging
class BetfairError(Exception):
def __init__(self, message):
bf_logging.main_logger.exception(message)
# pass
pass
class BetfairLoginError(BetfairError):
def __init__(self, response, data):
self.response = response
self.message = data.get('loginStatus', 'UNKNOWN')
super(BetfairLoginError, self).__init__(self.message)
class BetfairAuthError(BetfairError):
def __init__(self, response, data):
self.response = response
self.message = data.get('error', 'UNKNOWN')
super(BetfairAuthError, self).__init__(self.message)
class BetfairAPIError(BetfairError):
def __init__(self, response, data):
self.response = response
try:
error_data = data['error']['data']['APINGException']
self.message = error_data.get('errorCode', 'UNKNOWN')
self.details = error_data.get('errorDetails')
except KeyError:
self.message = 'UNKNOWN'
self.details = None
super(BetfairAPIError, self).__init__(self.message)
| mit | 5,364,621,063,707,154,000 | 28.263158 | 65 | 0.616906 | false | 3.821306 | false | false | false |
pstreck/django-videokit | example/media/migrations/0001_initial.py | 1 | 1517 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-19 21:15
from __future__ import unicode_literals
from django.db import migrations, models
import media.models
import videokit.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MediaItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('video', videokit.models.VideoField(duration_field='video_duration', height_field='video_height', mimetype_field='video_mimetype', rotation_field='video_rotation', thumbnail_field='video_thumbnail', upload_to=media.models.upload_to, width_field='video_width')),
('video_width', models.IntegerField(blank=True, null=True)),
('video_height', models.IntegerField(blank=True, null=True)),
('video_rotation', models.FloatField(blank=True, null=True)),
('video_mimetype', models.CharField(blank=True, max_length=32, null=True)),
('video_duration', models.IntegerField(blank=True, null=True)),
('video_thumbnail', models.ImageField(blank=True, null=True, upload_to=b'')),
('video_mp4', videokit.models.VideoSpecField(blank=True, null=True, upload_to=b'')),
('video_ogg', videokit.models.VideoSpecField(blank=True, null=True, upload_to=b'')),
],
),
]
| mit | 3,036,916,101,428,625,400 | 44.969697 | 278 | 0.626895 | false | 3.860051 | false | false | false |
pypa/virtualenv | src/virtualenv/activation/via_template.py | 4 | 2372 | from __future__ import absolute_import, unicode_literals
import os
import sys
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from virtualenv.util.six import ensure_text
from .activator import Activator
if sys.version_info >= (3, 7):
from importlib.resources import read_binary
else:
from importlib_resources import read_binary
@add_metaclass(ABCMeta)
class ViaTemplateActivator(Activator):
@abstractmethod
def templates(self):
raise NotImplementedError
def generate(self, creator):
dest_folder = creator.bin_dir
replacements = self.replacements(creator, dest_folder)
generated = self._generate(replacements, self.templates(), dest_folder, creator)
if self.flag_prompt is not None:
creator.pyenv_cfg["prompt"] = self.flag_prompt
return generated
def replacements(self, creator, dest_folder):
return {
"__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
"__VIRTUAL_ENV__": ensure_text(str(creator.dest)),
"__VIRTUAL_NAME__": creator.env_name,
"__BIN_NAME__": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),
"__PATH_SEP__": ensure_text(os.pathsep),
}
def _generate(self, replacements, templates, to_folder, creator):
generated = []
for template in templates:
text = self.instantiate_template(replacements, template, creator)
dest = to_folder / self.as_name(template)
# use write_bytes to avoid platform specific line normalization (\n -> \r\n)
dest.write_bytes(text.encode("utf-8"))
generated.append(dest)
return generated
def as_name(self, template):
return template.name
def instantiate_template(self, replacements, template, creator):
# read content as binary to avoid platform specific line normalization (\n -> \r\n)
binary = read_binary(self.__module__, str(template))
text = binary.decode("utf-8", errors="strict")
for key, value in replacements.items():
value = self._repr_unicode(creator, value)
text = text.replace(key, value)
return text
@staticmethod
def _repr_unicode(creator, value):
# by default we just let it be unicode
return value
| mit | 8,950,317,815,485,758,000 | 34.402985 | 91 | 0.64629 | false | 4.19823 | false | false | false |
Frikster/Mesoscale-Brain-Explorer | src/plugins/scatterplot.py | 1 | 2703 | #!/usr/bin/env python3
import numpy as np
import pyqtgraph.opengl as gl
from pyqtgraph.Qt import QtCore
w = gl.GLViewWidget()
w.opts['distance'] = 20
g = gl.GLGridItem()
w.addItem(g)
##
## First example is a set of points with pxMode=False
## These demonstrate the ability to have points with real size down to a very small scale
##
pos = np.empty((53, 3))
size = np.empty((53))
color = np.empty((53, 4))
pos[0] = (1,0,0); size[0] = 0.5; color[0] = (1.0, 0.0, 0.0, 0.5)
pos[1] = (0,1,0); size[1] = 0.2; color[1] = (0.0, 0.0, 1.0, 0.5)
pos[2] = (0,0,1); size[2] = 2./3.; color[2] = (0.0, 1.0, 0.0, 0.5)
z = 0.5
d = 6.0
for i in range(3,53):
pos[i] = (0,0,z)
size[i] = 2./d
color[i] = (0.0, 1.0, 0.0, 0.5)
z *= 0.5
d *= 2.0
sp1 = gl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=False)
sp1.translate(5,5,0)
w.addItem(sp1)
##
## Second example shows a volume of points with rapidly updating color
## and pxMode=True
##
pos = np.random.random(size=(100000,3))
pos *= [10,-10,10]
pos[0] = (0,0,0)
color = np.ones((pos.shape[0], 4))
d2 = (pos**2).sum(axis=1)**0.5
size = np.random.random(size=pos.shape[0])*10
sp2 = gl.GLScatterPlotItem(pos=pos, color=(1,1,1,1), size=size)
phase = 0.
w.addItem(sp2)
##
## Third example shows a grid of points with rapidly updating position
## and pxMode = False
##
pos3 = np.zeros((100, 100, 3))
pos3[:,:,:2] = np.mgrid[:100, :100].transpose(1,2,0) * [-0.1,0.1]
pos3 = pos3.reshape(10000, 3)
d3 = (pos3**2).sum(axis=1)**0.5
sp3 = gl.GLScatterPlotItem(pos=pos3, color=(1,1,1,.3), size=0.1, pxMode=False)
w.addItem(sp3)
def update():
## update volume colors
global phase, sp2, d2
s = -np.cos(d2*2+phase)
color = np.empty((len(d2),4), dtype=np.float32)
color[:, 3] = np.clip(s * 0.1, 0, 1)
color[:, 0] = np.clip(s * 3.0, 0, 1)
color[:, 1] = np.clip(s * 1.0, 0, 1)
color[:, 2] = np.clip(s ** 3, 0, 1)
sp2.setData(color=color)
phase -= 0.1
## update surface positions and colors
global sp3, d3, pos3
z = -np.cos(d3*2+phase)
pos3[:, 2] = z
color = np.empty((len(d3),4), dtype=np.float32)
color[:, 3] = 0.3
color[:, 0] = np.clip(z * 3.0, 0, 1)
color[:, 1] = np.clip(z * 1.0, 0, 1)
color[:, 2] = np.clip(z ** 3, 0, 1)
sp3.setData(pos=pos3, color=color)
t = QtCore.QTimer()
t.timeout.connect(update)
class MyPlugin:
def __init__(self, project, plugin_position):
self.name = 'OpenGL Showcase'
self.widget = w
self.widget.setWhatsThis("This is just a quick showcase that 3D visualization is possible for future developers "
"looking to extend the application's functionality")
t.start(50)
def run(self):
t.start(50)
def stop(self):
t.stop()
| mit | -5,207,972,922,242,495,000 | 23.572727 | 117 | 0.606363 | false | 2.383598 | false | false | false |
openplans/shareabouts-api | src/sa_api_v2/migrations/0001_initial.py | 1 | 20388 | # -*- coding: utf-8 -*-
from django.db import models, migrations
import sa_api_v2.models.caching
import django.contrib.gis.db.models.fields
import sa_api_v2.models.core
import django.utils.timezone
import sa_api_v2.models.mixins
from django.conf import settings
import sa_api_v2.apikey.models
import django.core.files.storage
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
],
options={
'db_table': 'auth_user',
},
bases=(sa_api_v2.models.caching.CacheClearingModel, models.Model),
),
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_datetime', models.DateTimeField(default=django.utils.timezone.now, db_index=True, blank=True)),
('updated_datetime', models.DateTimeField(auto_now=True, db_index=True)),
('action', models.CharField(default='create', max_length=16)),
('source', models.TextField(null=True, blank=True)),
],
options={
'ordering': ['-created_datetime'],
'db_table': 'sa_api_activity',
},
bases=(sa_api_v2.models.caching.CacheClearingModel, models.Model),
),
migrations.CreateModel(
name='ApiKey',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(default=sa_api_v2.apikey.models.generate_unique_api_key, unique=True, max_length=32)),
('logged_ip', models.IPAddressField(null=True, blank=True)),
('last_used', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
],
options={
'db_table': 'apikey_apikey',
},
bases=(sa_api_v2.models.mixins.CloneableModelMixin, models.Model),
),
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_datetime', models.DateTimeField(default=django.utils.timezone.now, db_index=True, blank=True)),
('updated_datetime', models.DateTimeField(auto_now=True, db_index=True)),
('file', models.FileField(storage=django.core.files.storage.FileSystemStorage(), upload_to=sa_api_v2.models.core.timestamp_filename)),
('name', models.CharField(max_length=128, null=True, blank=True)),
],
options={
'db_table': 'sa_api_attachment',
},
bases=(sa_api_v2.models.caching.CacheClearingModel, models.Model),
),
migrations.CreateModel(
name='DataIndex',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('attr_name', models.CharField(max_length=100, verbose_name='Attribute name', db_index=True)),
('attr_type', models.CharField(default='string', max_length=10, verbose_name='Attribute type', choices=[('string', 'String')])),
],
options={
},
bases=(sa_api_v2.models.mixins.CloneableModelMixin, models.Model),
),
migrations.CreateModel(
name='DataSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('display_name', models.CharField(max_length=128)),
('slug', models.SlugField(default='', max_length=128)),
('owner', models.ForeignKey(on_delete=models.CASCADE, related_name='datasets', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'sa_api_dataset',
},
bases=(sa_api_v2.models.mixins.CloneableModelMixin, sa_api_v2.models.caching.CacheClearingModel, models.Model),
),
migrations.CreateModel(
name='DataSetPermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('submission_set', models.CharField(help_text='Either the name of a submission set (e.g., "comments"), or "places". Leave blank to refer to all things.', max_length=128, blank=True)),
('can_retrieve', models.BooleanField(default=True)),
('can_create', models.BooleanField(default=False)),
('can_update', models.BooleanField(default=False)),
('can_destroy', models.BooleanField(default=False)),
('priority', models.PositiveIntegerField(blank=True)),
('dataset', models.ForeignKey(on_delete=models.CASCADE, related_name='permissions', to='sa_api_v2.DataSet')),
],
options={
},
bases=(sa_api_v2.models.mixins.CloneableModelMixin, sa_api_v2.models.caching.CacheClearingModel, models.Model),
),
migrations.CreateModel(
name='DataSnapshot',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('json', models.TextField()),
('csv', models.TextField()),
],
options={
'db_table': 'sa_api_datasnapshot',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DataSnapshotRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('submission_set', models.CharField(max_length=128)),
('include_private', models.BooleanField(default=False)),
('include_invisible', models.BooleanField(default=False)),
('include_submissions', models.BooleanField(default=False)),
('requested_at', models.DateTimeField(auto_now_add=True)),
('status', models.TextField(default='', blank=True)),
('fulfilled_at', models.DateTimeField(null=True)),
('guid', models.TextField(default='', unique=True, blank=True)),
('dataset', models.ForeignKey(on_delete=models.CASCADE, to='sa_api_v2.DataSet')),
('requester', models.ForeignKey(on_delete=models.CASCADE, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'db_table': 'sa_api_datasnapshotrequest',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='What is the name of the group to which users with this group belong? For example: "judges", "administrators", "winners", ...', max_length=32)),
('dataset', models.ForeignKey(on_delete=models.CASCADE, related_name='groups', to='sa_api_v2.DataSet', help_text='Which dataset does this group apply to?')),
('submitters', models.ManyToManyField(related_name='_groups', to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
'db_table': 'sa_api_group',
},
bases=(sa_api_v2.models.mixins.CloneableModelMixin, models.Model),
),
migrations.CreateModel(
name='GroupPermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('submission_set', models.CharField(help_text='Either the name of a submission set (e.g., "comments"), or "places". Leave blank to refer to all things.', max_length=128, blank=True)),
('can_retrieve', models.BooleanField(default=True)),
('can_create', models.BooleanField(default=False)),
('can_update', models.BooleanField(default=False)),
('can_destroy', models.BooleanField(default=False)),
('priority', models.PositiveIntegerField(blank=True)),
('group', models.ForeignKey(on_delete=models.CASCADE, related_name='permissions', to='sa_api_v2.Group')),
],
options={
},
bases=(sa_api_v2.models.mixins.CloneableModelMixin, sa_api_v2.models.caching.CacheClearingModel, models.Model),
),
migrations.CreateModel(
name='IndexedValue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=100, null=True, db_index=True)),
('index', models.ForeignKey(on_delete=models.CASCADE, related_name='values', to='sa_api_v2.DataIndex')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='KeyPermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('submission_set', models.CharField(help_text='Either the name of a submission set (e.g., "comments"), or "places". Leave blank to refer to all things.', max_length=128, blank=True)),
('can_retrieve', models.BooleanField(default=True)),
('can_create', models.BooleanField(default=False)),
('can_update', models.BooleanField(default=False)),
('can_destroy', models.BooleanField(default=False)),
('priority', models.PositiveIntegerField(blank=True)),
('key', models.ForeignKey(on_delete=models.CASCADE, related_name='permissions', to='sa_api_v2.ApiKey')),
],
options={
},
bases=(sa_api_v2.models.mixins.CloneableModelMixin, sa_api_v2.models.caching.CacheClearingModel, models.Model),
),
migrations.CreateModel(
name='Origin',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('pattern', models.CharField(help_text='The origin pattern, e.g., https://*.github.io, http://localhost:*, http*://map.phila.gov', max_length=100)),
('logged_ip', models.IPAddressField(null=True, blank=True)),
('last_used', models.DateTimeField(default=django.utils.timezone.now, blank=True)),
('dataset', models.ForeignKey(on_delete=models.CASCADE, related_name='origins', blank=True, to='sa_api_v2.DataSet')),
],
options={
'db_table': 'cors_origin',
},
bases=(sa_api_v2.models.mixins.CloneableModelMixin, models.Model),
),
migrations.CreateModel(
name='OriginPermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('submission_set', models.CharField(help_text='Either the name of a submission set (e.g., "comments"), or "places". Leave blank to refer to all things.', max_length=128, blank=True)),
('can_retrieve', models.BooleanField(default=True)),
('can_create', models.BooleanField(default=False)),
('can_update', models.BooleanField(default=False)),
('can_destroy', models.BooleanField(default=False)),
('priority', models.PositiveIntegerField(blank=True)),
('origin', models.ForeignKey(on_delete=models.CASCADE, related_name='permissions', to='sa_api_v2.Origin')),
],
options={
},
bases=(sa_api_v2.models.mixins.CloneableModelMixin, sa_api_v2.models.caching.CacheClearingModel, models.Model),
),
migrations.CreateModel(
name='SubmittedThing',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_datetime', models.DateTimeField(default=django.utils.timezone.now, db_index=True, blank=True)),
('updated_datetime', models.DateTimeField(auto_now=True, db_index=True)),
('data', models.TextField(default='{}')),
('visible', models.BooleanField(default=True, db_index=True)),
],
options={
'db_table': 'sa_api_submittedthing',
},
bases=(sa_api_v2.models.mixins.CloneableModelMixin, sa_api_v2.models.caching.CacheClearingModel, models.Model),
),
migrations.CreateModel(
name='Submission',
fields=[
('submittedthing_ptr', models.OneToOneField(on_delete=models.CASCADE, parent_link=True, auto_created=True, primary_key=True, serialize=False, to='sa_api_v2.SubmittedThing')),
('set_name', models.TextField(db_index=True)),
],
options={
'ordering': ['-updated_datetime'],
'db_table': 'sa_api_submission',
},
bases=('sa_api_v2.submittedthing',),
),
migrations.CreateModel(
name='Place',
fields=[
('submittedthing_ptr', models.OneToOneField(on_delete=models.CASCADE, parent_link=True, auto_created=True, primary_key=True, serialize=False, to='sa_api_v2.SubmittedThing')),
(
('geometry', django.contrib.gis.db.models.fields.GeometryField(srid=4326))
if settings.USE_GEODB else
('geometry', models.TextField())
),
],
options={
'ordering': ['-updated_datetime'],
'db_table': 'sa_api_place',
},
bases=('sa_api_v2.submittedthing',),
),
migrations.CreateModel(
name='Webhook',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_datetime', models.DateTimeField(default=django.utils.timezone.now, db_index=True, blank=True)),
('updated_datetime', models.DateTimeField(auto_now=True, db_index=True)),
('submission_set', models.CharField(max_length=128)),
('event', models.CharField(default='add', max_length=128, choices=[('add', 'On add')])),
('url', models.URLField(max_length=2048)),
('dataset', models.ForeignKey(on_delete=models.CASCADE, related_name='webhooks', to='sa_api_v2.DataSet')),
],
options={
'db_table': 'sa_api_webhook',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='submittedthing',
name='dataset',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='things', blank=True, to='sa_api_v2.DataSet'),
preserve_default=True,
),
migrations.AddField(
model_name='submittedthing',
name='submitter',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='things', blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='submission',
name='place',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='submissions', to='sa_api_v2.Place'),
preserve_default=True,
),
migrations.AddField(
model_name='indexedvalue',
name='thing',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='indexed_values', to='sa_api_v2.SubmittedThing'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='group',
unique_together=set([('name', 'dataset')]),
),
migrations.AddField(
model_name='datasnapshot',
name='request',
field=models.OneToOneField(on_delete=models.CASCADE, related_name='fulfillment', to='sa_api_v2.DataSnapshotRequest'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='dataset',
unique_together=set([('owner', 'slug')]),
),
migrations.AddField(
model_name='dataindex',
name='dataset',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='indexes', to='sa_api_v2.DataSet'),
preserve_default=True,
),
migrations.AddField(
model_name='attachment',
name='thing',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='attachments', to='sa_api_v2.SubmittedThing'),
preserve_default=True,
),
migrations.AddField(
model_name='apikey',
name='dataset',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='keys', blank=True, to='sa_api_v2.DataSet'),
preserve_default=True,
),
migrations.AddField(
model_name='action',
name='thing',
field=models.ForeignKey(on_delete=models.CASCADE, related_name='actions', db_column='data_id', to='sa_api_v2.SubmittedThing'),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='groups',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups'),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions'),
preserve_default=True,
),
]
| gpl-3.0 | 7,301,804,444,198,530,000 | 53.368 | 289 | 0.578428 | false | 4.260815 | false | false | false |
andrewyoung1991/abjad | abjad/tools/documentationtools/test/test_documentationtools_GraphvizGraph_pickle.py | 1 | 1221 | # -*- encoding: utf-8 -*-
import pickle
from abjad import *
def test_documentationtools_GraphvizGraph_pickle_01():
graph = documentationtools.GraphvizGraph()
graph.append(documentationtools.GraphvizSubgraph())
graph[0].append(documentationtools.GraphvizNode())
graph[0].append(documentationtools.GraphvizNode())
graph[0].append(documentationtools.GraphvizNode())
graph[0].append(documentationtools.GraphvizSubgraph())
graph[0][-1].append(documentationtools.GraphvizNode())
graph.append(documentationtools.GraphvizNode())
edge = documentationtools.GraphvizEdge()(graph[0][1], graph[1])
edge = documentationtools.GraphvizEdge()(graph[0][0], graph[0][-1][0])
assert str(graph) == systemtools.TestManager.clean_string(
r'''
digraph G {
subgraph cluster_0 {
node_0_0;
node_0_1;
node_0_2;
subgraph cluster_0_3 {
node_0_3_0;
}
node_0_0 -> node_0_3_0;
}
node_1;
node_0_1 -> node_1;
}
'''
)
pickled = pickle.loads(pickle.dumps(graph))
assert str(graph) == str(pickled) | gpl-3.0 | -7,160,647,781,701,008,000 | 30.333333 | 74 | 0.588862 | false | 3.815625 | false | false | false |
roofit-dev/parallel-roofit-scripts | scaling/unbinned_scaling_analysis.py | 1 | 1300 | # -*- coding: utf-8 -*-
# @Author: Patrick Bos
# @Date: 2016-11-16 16:23:55
# @Last Modified by: Patrick Bos
# @Last Modified time: 2016-11-21 18:10:00
import pandas as pd
# fn = "timings.json"
# fn = "timings_stbc_20161117.json"
fn = "timings_stbc_20161121.json"
with open(fn, 'r') as fh:
json_array_inside_text = fh.read()
json_array_text = "[" + json_array_inside_text[:-2] + "]" # :-1 removes ,\n
df = pd.read_json(json_array_text)
df.groupby([u'N_events', u'N_gaussians', u'N_parameters', u'num_cpu', u'parallel_interleave']).mean().timing_ns/1e9
# almost perfectly linear:
df.groupby(['N_events']).mean().timing_ns.plot()
plt.show()
# 20161117: very strange, maxes out, then drops again
# 20161121: strangeness gone, just goes up. Maybe faster than linear.
df.groupby(['N_parameters']).mean().plot(y='timing_ns')
plt.show()
# moet hier iets met pivot doen ofzo...
df.groupby(['N_events','N_parameters','num_cpu']).mean().timing_ns.plot()
plt.show()
# moet hier iets met pivot doen ofzo...
df[df.N_events == 10000].groupby(['num_cpu']).mean().timing_ns.plot()
plt.show()
### MET WOUTER, 21 nov 2016
t = np.array( [115.835, 67.6071, 51.3018, 44.8939, 31.6365, 33.413, 28.5969, 24.7553])
t_ideal = 115.835 / np.arange(1,9)
c = range(1,9)
plt.plot(c,t,c,t_ideal)
plt.show()
| apache-2.0 | 3,840,109,117,953,163,300 | 26.083333 | 115 | 0.656923 | false | 2.480916 | false | true | false |
MrJohz/snooble | snooble/oauth.py | 1 | 8536 | from . import utils
from .utils import cbc
from requests.auth import HTTPBasicAuth
from urllib.parse import urljoin
__all__ = [
# constants
'SCRIPT_KIND', 'EXPLICIT_KIND', 'IMPLICIT_KIND', 'APPLICATION_INSTALLED_KIND',
'APPLICATION_EXPLICIT_KIND', 'ALL_SCOPES',
# Classes
'OAuth', 'Authorization'
]
SCRIPT_KIND = "script"
EXPLICIT_KIND = "explicit"
IMPLICIT_KIND = "implicit"
APPLICATION_INSTALLED_KIND = "application/installed"
APPLICATION_EXPLICIT_KIND = "application/explicit"
ALL_KINDS = (SCRIPT_KIND, EXPLICIT_KIND, IMPLICIT_KIND,
APPLICATION_EXPLICIT_KIND, APPLICATION_INSTALLED_KIND)
ALL_SCOPES = ()
REVERSE_KINDS = {
SCRIPT_KIND: "SCRIPT_KIND",
EXPLICIT_KIND: "EXPLICIT_KIND",
IMPLICIT_KIND: "IMPLICIT_KIND",
APPLICATION_INSTALLED_KIND: "APPLICATION_INSTALLED_KIND",
APPLICATION_EXPLICIT_KIND: "APPLICATION_EXPLICIT_KIND"
}
# Different kinds of authentication require different parameters. This is a mapping of
# kind to required parameter keys for use in OAuth's __init__ method.
KIND_PARAMETER_MAPPING = {
SCRIPT_KIND: ('client_id', 'secret_id', 'username', 'password'),
EXPLICIT_KIND: ('client_id', 'secret_id', 'redirect_uri'),
APPLICATION_EXPLICIT_KIND: ('client_id', 'secret_id'),
IMPLICIT_KIND: ('client_id', 'redirect_uri'),
APPLICATION_INSTALLED_KIND: ('client_id',)
}
class OAuth(object):
"""Class representing a set of OAuth credentials. May be authorized.
This class is used to represent a complete set of credentials to log in to Reddit's
OAuth API using one of the script, explicit, implicit, or application authentication
forms. An object of this kind can be passed to the :class:`~snooble.Snooble`
intializer, or via the :meth:`~snooble.Snooble.oauth` method. An OAuth object may
also be returned by the :meth:`~snooble.Snooble.oauth` method.
.. seealso::
:meth:`~snooble.oauth.OAuth.__init__`:
All arguments passed in to this class will also be available as
attributes to read and modify.
"""
def __init__(self, kind, scopes, **kwargs):
"""Intialise the object with the correct keyword arguments.
Arguments:
kind (str): This should be one of the five kind strings. These are all
available as constants in this module - use these constants! If this
kind is wrong, initialisation will fail with a ValueError
scopes (list[str]): A list of all of the requested scopes from the API. For
your convenience, the constant `ALL_SCOPES` is made available in this
module, which will provide the correct scopes for all possible API
requests.
client_id (str): Always needed. Client ID as provided on the apps
preferences page.
secret_id (str): Needed for script kind, explicit kind, and
application/explicit kind. As provided on the apps preferences page.
username/password (str): Only needed for script kind. Username and password
of the user to log in to.
redirect_uri (str): Needed for explicit and implicit kinds. When the user
has authenticated with Reddit, they will be sent to this uri. *Must* be
the same as provided on the apps preferences page.
mobile (bool): If ``True``, for explicit and implicit kinds, this will cause
any generated authentication links to use Reddit's mobile-friendly page.
Defaults to ``False``.
duration (str): One of ``'temporary'`` or ``'permanent'``. Only applicable
for explicit authentication kinds. Defaults to ``'temporary'``.
device_id (str): A unique string to identify a user, used to help Reddit
track unique users and improve their analytics. If the user does not
want to be tracked, use ``'DO_NOT_TRACK_THIS_USER'``. Defaults to
``'DO_NOT_TRACK_THIS_USER'``.
"""
if kind not in ALL_KINDS:
raise ValueError("Invalid oauth kind {kind}".format(kind=kind))
self.kind = kind
self.scopes = scopes
self.mobile = kwargs.pop('mobile', False)
self.duration = kwargs.pop('duration', 'temporary')
self.device_id = kwargs.pop('device_id', 'DO_NOT_TRACK_THIS_USER')
utils.assign_parameters(self, kwargs, KIND_PARAMETER_MAPPING[self.kind])
self.authorization = None
"""The details of this account's authorization request, or ``None``.
Will be ``None`` by default. If an authorization request has been successfully
completed, the :class:`~snooble.Snooble` class will set this to the
corresponding :class:`~snooble.oauth.Authorization` object.
"""
def __repr__(self):
cls = self.__class__.__name__
kind = REVERSE_KINDS.get(self.kind)
args = ((k, v) for k, v in self.__dict__.items() if k != 'kind')
args = ", ".join("{k}={v!r}".format(k=k, v=v) for k, v in args)
return '{cls}({kind}, {args})'.format(cls=cls, kind=kind, args=args)
@property
def authorized(self):
"""True if this instance has an authorization property.
Does not fully check the validity of the authorization property,
only that it exists.
"""
return self.authorization is not None
class Authorization(object):
"""A class containing the details of a successful authorization attempt.
Contains the :attr:`~.token_type`, and the :attr:`~.token`. It also stores the time
the token was :attr:`~.recieved`, and the :attr:`~.length` that this token will last.
Note that these last two attributes are not currently used by Snooble, but may be
useful in future, or to users.
"""
def __init__(self, token_type, token, recieved, length):
self.token_type = token_type
"*(str)* Should always be the string ``'bearer'``."
self.token = token
"*(str)* A Reddit session token."
self.recieved = recieved
"*(int)* When the token was recieved in seconds since the epoch. (Always UTC)."
self.length = length
"*(int)* The length of time the token will last in seconds."
def __repr__(self):
cls = self.__class__.__name__
args = ("{k}={v}".format(k=k, v=v) for k, v in self.__dict__.items())
return "{cls}({args})".format(cls=cls, args=", ".join(args))
def __eq__(self, other):
if type(self) == type(other):
return self.__dict__ == other.__dict__
return False
class AUTHORIZATION_METHODS(cbc.CallbackClass):
@cbc.CallbackClass.key(SCRIPT_KIND)
def authorize_script(snoo, auth, session, code):
client_auth = HTTPBasicAuth(auth.client_id, auth.secret_id)
post_data = {"scope": ",".join(auth.scopes), "grant_type": "password",
"username": auth.username, "password": auth.password}
url = urljoin(snoo.domain.www, 'api/v1/access_token')
return session.post(url, auth=client_auth, data=post_data)
@cbc.CallbackClass.key(EXPLICIT_KIND)
def authorize_explicit(snoo, auth, session, code):
client_auth = HTTPBasicAuth(auth.client_id, auth.secret_id)
post_data = {"grant_type": "authorization_code", "code": code,
"redirect_uri": auth.redirect_uri}
url = urljoin(snoo.domain.www, 'api/v1/access_token')
return session.post(url, auth=client_auth, data=post_data)
@cbc.CallbackClass.key(IMPLICIT_KIND)
def authorize_implicit(snoo, auth, session, code):
return None
@cbc.CallbackClass.key(APPLICATION_EXPLICIT_KIND)
def authorize_application_explicit(snoo, auth, session, code):
client_auth = HTTPBasicAuth(auth.client_id, auth.secret_id)
post_data = {"grant_type": "client_credentials"}
url = urljoin(snoo.domain.www, 'api/v1/access_token')
return session.post(url, auth=client_auth, data=post_data)
@cbc.CallbackClass.key(APPLICATION_INSTALLED_KIND)
def authorize_application_implicit(snoo, auth, session, code):
client_auth = HTTPBasicAuth(auth.client_id, '')
post_data = {"grant_type": "https://oauth.reddit.com/grants/installed_client",
"device_id": auth.device_id}
url = urljoin(snoo.domain.www, 'api/v1/access_token')
return session.post(url, auth=client_auth, data=post_data)
| mit | -891,335,432,208,975,000 | 43 | 89 | 0.63859 | false | 3.826087 | false | false | false |
markkness/ColorPy | colorpy/figures.py | 1 | 2481 | '''
figures.py - Create all the ColorPy sample figures.
Description:
Creates the sample figures.
This can also create the figures with some non-default initialization conditions.
Functions:
figures() -
Create all the sample figures.
figures_clip_clamp_to_zero () -
Adjust the color clipping method, and create the sample figures.
figures_gamma_245 () -
Adjust the gamma correction to a power law gamma = 2.45 and create samples.
figures_white_A () -
Adjust the white point (for Luv/Lab) and create sample figures.
License:
Copyright (C) 2008 Mark Kness
Author - Mark Kness - [email protected]
This file is part of ColorPy.
ColorPy is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ColorPy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with ColorPy. If not, see <http://www.gnu.org/licenses/>.
'''
import colormodels
import illuminants
import plots
import blackbody
import rayleigh
import thinfilm
import misc
def figures ():
'''Create all the ColorPy sample figures.'''
# no figures for colormodels and ciexyz
colormodels.init() # default
illuminants.figures()
plots.figures()
blackbody.figures()
rayleigh.figures()
thinfilm.figures()
misc.figures()
def figures_clip_clamp_to_zero ():
'''Adjust the color clipping method, and create the sample figures.'''
colormodels.init()
colormodels.init_clipping (colormodels.CLIP_CLAMP_TO_ZERO)
figures()
def figures_gamma_245 ():
'''Adjust the gamma correction to a power law gamma = 2.45 and create samples.'''
colormodels.init()
colormodels.init_gamma_correction (
display_from_linear_function = colormodels.simple_gamma_invert,
linear_from_display_function = colormodels.simple_gamma_correct,
gamma = 2.45)
figures()
def figures_white_A ():
'''Adjust the white point (for Luv/Lab) and create sample figures.'''
colormodels.init()
colormodels.init_Luv_Lab_white_point (colormodels.WhiteA)
figures()
if __name__ == '__main__':
figures()
| lgpl-2.1 | -6,055,264,009,291,869,000 | 27.517241 | 85 | 0.727529 | false | 3.664697 | false | false | false |
oostende/dvbapp2-gui-egami | tests/events.py | 80 | 1670 | import time
import tests
recorded_events = [ ]
def event(self, name, args, kwargs):
global recorded_events
print "*EVENT*", time.time(), self, name, args, kwargs
recorded_events.append((time.time(), self, name, args, kwargs))
def eventfnc(f):
name = f.__name__
def wrapper(self, *args, **kwargs):
event(self, name, args, kwargs)
return f(self, *args, **kwargs)
return wrapper
def get_events():
global recorded_events
r = recorded_events
recorded_events = [ ]
return r
def start_log():
global base_time
base_time = time.time()
def end_log(test_name):
global base_time
results = ""
for (t, self, method, args, kwargs) in get_events():
results += "%s T+%f: %s::%s(%s, *%s, *%s)\n" % (time.ctime(t), t - base_time, str(self.__class__), method, self, args, kwargs)
expected = None
try:
f = open(test_name + ".results", "rb")
expected = f.read()
f.close()
except:
print "NO TEST RESULT FOUND, creating new"
f = open(test_name + ".new_results", "wb")
f.write(results)
f.close()
print results
if expected is not None:
print "expected:"
if expected != results:
f = open(test_name + ".bogus_results", "wb")
f.write(results)
f.close()
raise tests.TestError("test data does not match")
else:
print "test compared ok"
else:
print "no test data to compare with."
def log(fnc, base_time = 0, test_name = "test", *args, **kwargs):
import fake_time
fake_time.setTime(base_time)
start_log()
try:
fnc(*args, **kwargs)
event(None, "test_completed", [], {"test_name": test_name})
except tests.TestError,c:
event(None, "test_failed", [], {"test_name": test_name, "reason": str(c)})
end_log(test_name)
| gpl-2.0 | 433,176,637,897,280,260 | 22.194444 | 129 | 0.642515 | false | 2.751236 | true | false | false |
mbaumBielefeld/popkin | popkin/visualization/PlaceFieldVisualizer.py | 1 | 2395 | import matplotlib.pyplot as plt
class PlaceFieldVisualizer:
def __init__(self, fields):
self.fields=fields
self.callbacks={}
fig=plt.figure()
cid = fig.canvas.mpl_connect('button_press_event', self.onclick)
cid_k=fig.canvas.mpl_connect('key_press_event', self.onKeyPress)
#layout_rows=2
#layout_cols=max(len(self.arm.fields_sm),len(self.arm.fields_hidden)+1)
#layout_cols=max(layout_cols,4)
layout_rows=2
layout_cols=2
panel_layout=layout_rows*100+10*layout_cols
self.subplot_activation={}
for i,f in enumerate(fields):
self.subplot_activation[f]=plt.subplot(panel_layout+i+1)
self.subplot_activation[f].set_title(f.name)
f.plot_activation('green')
'''
self.subplot_arm_schematic=plt.subplot(panel_layout+len(self.arm.fields_sm)+1)
self.subplot_arm_schematic.set_xlim(-self.arm.field_ee.radius_max,self.arm.field_ee.radius_max)
self.subplot_arm_schematic.set_ylim(-self.arm.field_ee.radius_max,self.arm.field_ee.radius_max)
self.subplot_arm_target_line=plt.Line2D([0.0,0.0],[0.0,0.0])
self.subplot_arm_schematic.add_line(self.subplot_arm_target_line)
'''
'''
self.subplot_poses={}
for i,hidden in enumerate(self.arm.fields_hidden):
field_sum,field_summand0,field_summand1=hidden.fields_sm
radius_max=field_sum.radius_max
plot_id=len(self.arm.fields_sm)+2+i
self.subplot_poses[hidden]=plt.subplot(layout_rows,layout_cols,plot_id)
self.subplot_poses[hidden].set_xlim(-radius_max,radius_max)
self.subplot_poses[hidden].set_ylim(-radius_max,radius_max)
self.subplot_poses[hidden].set_title(hidden.name)
'''
plt.show()
def onClick(self, event):
pass
def onKeyPress(self, event):
if event.key in self.callbacks.keys():
self.callbacks[event]()
self.refreshPlot()
def addCallback(self, key, callback):
self.callbacks[key]=callback
def refreshPlot(self):
for field, subplot in self.subplot_activation.items():
subplot.cla()
subplot.set_title(field.name)
field.plot_activation('green',subplot)
plt.draw()
| gpl-2.0 | 8,130,452,641,855,284,000 | 30.513158 | 103 | 0.608351 | false | 3.481105 | false | false | false |
thinkle/gourmet | gourmet/plugins/field_editor/__init__.py | 1 | 1147 | from gourmet.plugin import ToolPlugin
from . import fieldEditor
from gi.repository import Gtk
from gettext import gettext as _
class FieldEditorPlugin (ToolPlugin):
menu_items = '''<placeholder name="DataTool">
<menuitem action="FieldEditor"/>
</placeholder>
'''
def setup_action_groups (self):
self.action_group = Gtk.ActionGroup(name='FieldEditorPluginActionGroup')
self.action_group.add_actions([
('FieldEditor',None,_('Field Editor'),
None,_('Edit fields across multiple recipes at a time.'),self.show_field_editor
),
])
self.action_groups.append(self.action_group)
def show_field_editor (self, *args):
from gourmet.GourmetRecipeManager import get_application
self.app = get_application()
self.field_editor = fieldEditor.FieldEditor(self.app.rd, self.app)
self.field_editor.valueDialog.connect('response',self.response_cb)
self.field_editor.show()
def response_cb (self, d, r):
if r==Gtk.ResponseType.APPLY:
self.app.update_attribute_models()
plugins = [FieldEditorPlugin]
| gpl-2.0 | 2,432,208,913,031,276,500 | 30.861111 | 92 | 0.660854 | false | 3.928082 | false | false | false |
mrocklin/unification | unification/utils.py | 1 | 2844 | from functools import partial
from toolz.compatibility import range, map
def hashable(x):
try:
hash(x)
return True
except TypeError:
return False
def transitive_get(key, d):
""" Transitive dict.get
>>> d = {1: 2, 2: 3, 3: 4}
>>> d.get(1)
2
>>> transitive_get(1, d)
4
"""
while hashable(key) and key in d:
key = d[key]
return key
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
# Taken from theano/theano/gof/sched.py
# Avoids licensing issues because this was written by Matthew Rocklin
def _toposort(edges):
""" Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> _toposort({1: (2, 3), 2: (3, )})
[1, 2, 3]
Closely follows the wikipedia page [2]
[1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
Communications of the ACM
[2] http://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())
S = set((v for v in edges if v not in incoming_edges))
L = []
while S:
n = S.pop()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S.add(m)
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
def reverse_dict(d):
"""Reverses direction of dependence dict
>>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = {}
for key in d:
for val in d[key]:
result[val] = result.get(val, tuple()) + (key, )
return result
def xfail(func):
try:
func()
raise Exception("XFailed test passed") # pragma:nocover
except:
pass
def freeze(d):
""" Freeze container to hashable form
>>> freeze(1)
1
>>> freeze([1, 2])
(1, 2)
>>> freeze({1: 2}) # doctest: +SKIP
frozenset([(1, 2)])
"""
if isinstance(d, dict):
return frozenset(map(freeze, d.items()))
if isinstance(d, set):
return frozenset(map(freeze, d))
if isinstance(d, (tuple, list)):
return tuple(map(freeze, d))
return d
| bsd-3-clause | -734,053,786,170,655,100 | 23.101695 | 77 | 0.565401 | false | 3.45565 | false | false | false |
jnez71/demos | estimation/recursive_bayes.py | 1 | 2129 | #!/usr/bin/env python2
"""
Recursive Bayes for POMDP belief-state tracking.
Max-a-posteriori estimation.
"""
# Dependencies
from __future__ import division
import numpy as np; npl = np.linalg
import matplotlib.pyplot as plt
# State, action, measurement, and time cardinalities
nS = 3; nA = 2; nM = 2; nT = 100
# Transition conditional probability matrix, A by S by S'
P = np.array([[[ 1, 0, 0],
[ 1, 0, 0],
[ 0, 0.3, 0.7]],
[[0.4, 0, 0.6],
[0.1, 0.6, 0.3],
[ 0, 0.1, 0.9]]], dtype=np.float64)
# Sensor conditional probability matrix, A by S by O
Qr = 0.5
Q = np.array([[[ 1, 0],
[ 1, 0],
[1-Qr, Qr]],
[[ 1, 0],
[ 1, 0],
[ 0, 1]]], dtype=np.float64)
# Cost function, c(a,x)
c = np.array([[-1, -1, -3],
[ 0, 0, -2]], dtype=np.float64)
# State, estimate, measurement, belief, and cost histories
x = np.zeros(nT, dtype=np.int64)
xh = np.zeros(nT, dtype=np.int64)
y = np.zeros(nT, dtype=np.int64)
b = np.zeros((nT, nS), dtype=np.float64)
cost = np.zeros(nT, dtype=np.float64)
# Initial conditions
x[0] = 0
b[0] = [1, 0, 0]
# Function for randomly sampling with a given discrete probability density
sample_from = lambda p: np.argwhere(np.random.sample() < np.cumsum(p))[0][0]
# Simulation
time = np.arange(nT)
for t in time[1:]:
# Estimate state as the posterior maximizer
xh[t-1] = np.argmax(b[t-1])
# Randomly choose action, accept cost
u = sample_from([0.5, 0.5])
cost[t] = cost[t-1] + c[u, x[t-1]]
# Advance state, obtain measurement
x[t] = sample_from(P[u, x[t-1]])
y[t] = sample_from(Q[u, x[t]])
# Update belief
b[t] = (b[t-1].dot(P[u]))*Q[u, :, y[t]]
b[t] = b[t] / np.sum(b[t])
# Plot estimation error
print("Accuracy: {}%".format(100*len(np.argwhere(np.logical_not(np.abs(x - xh))))/nT))
plt.title("Estimation Error", fontsize=22)
plt.ylabel("x - xh", fontsize=22)
plt.xlabel("Time (iteration)", fontsize=22)
plt.scatter(time, x-xh)
plt.xlim([0, nT])
plt.grid(True)
plt.show()
| mit | 4,669,991,582,916,422,000 | 26.649351 | 86 | 0.565993 | false | 2.586877 | false | false | false |
rogeriopradoj/SublimeLinter-phplint | linter.py | 1 | 1574 | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Aparajita Fishman
# Copyright (c) 2015 The SublimeLinter Community
#
# License: MIT
#
"""This module exports the PHPLint plugin class."""
from SublimeLinter.lint import Linter
class PHPLint(Linter):
"""Provides an interface to the phplint executable."""
syntax = ('php', 'html')
cmd = 'phpl --php-version 5 --print-path relative --print-column-number --tab-size 4 --no-overall'
version_args = '--version'
version_re = r'PHPLint (?P<version>\d+\.\d+)'
version_requirement = '>= 2.0'
regex = (
r'(?i)^(?:'
r'\t.*?\r?\n)?'
r'==== (?P<line>\d+):(?P<col>.*): '
r'(?:(?P<error>error)|(?P<warning>warning|notice)): '
r'(?P<message>[^`\r\n]*(?:`(?P<near>[^\']+)\')?[^\r\n]*)'
)
multiline = True
tempfile_suffix = 'php'
def split_match(self, match):
"""Return the match with ` quotes transformed to '."""
match, line, col, error, warning, message, near = super().split_match(match)
if message == 'no PHP code found at all':
match = None
else:
message = message.replace('`', '\'')
# If the message contains a complaint about a function
# and near looks like a function reference, remove the trailing
# () so it can be found.
if 'function \'' in message and near and near.endswith('()'):
near = near[:-2]
return match, line, col, error, warning, message, near
| mit | 3,513,506,060,172,176,000 | 30.48 | 102 | 0.570521 | false | 3.553047 | false | false | false |
bswartz/manila | manila/share/drivers/netapp/dataontap/cluster_mode/drv_single_svm.py | 1 | 12154 | # Copyright (c) 2015 Clinton Knight. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NetApp Data ONTAP cDOT single-SVM storage driver.
This driver requires a Data ONTAP (Cluster-mode) storage system with
installed CIFS and/or NFS licenses, as well as a FlexClone license. This
driver does not manage share servers, meaning it uses a single Data ONTAP
storage virtual machine (i.e. 'vserver') as defined in manila.conf to
provision shares. This driver supports NFS & CIFS protocols.
"""
from manila.share import driver
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_single_svm
class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
"""NetApp Cluster-mode single-SVM share driver."""
DRIVER_NAME = 'NetApp_Cluster_SingleSVM'
def __init__(self, *args, **kwargs):
super(NetAppCmodeSingleSvmShareDriver, self).__init__(
False, *args, **kwargs)
self.library = lib_single_svm.NetAppCmodeSingleSVMFileStorageLibrary(
self.DRIVER_NAME, **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
def check_for_setup_error(self):
self.library.check_for_setup_error()
def get_pool(self, share):
return self.library.get_pool(share)
def create_share(self, context, share, **kwargs):
return self.library.create_share(context, share, **kwargs)
def create_share_from_snapshot(self, context, share, snapshot, **kwargs):
return self.library.create_share_from_snapshot(context, share,
snapshot, **kwargs)
def create_snapshot(self, context, snapshot, **kwargs):
return self.library.create_snapshot(context, snapshot, **kwargs)
def revert_to_snapshot(self, context, snapshot, share_access_rules,
snapshot_access_rules, **kwargs):
return self.library.revert_to_snapshot(context, snapshot, **kwargs)
def delete_share(self, context, share, **kwargs):
self.library.delete_share(context, share, **kwargs)
def delete_snapshot(self, context, snapshot, **kwargs):
self.library.delete_snapshot(context, snapshot, **kwargs)
def extend_share(self, share, new_size, **kwargs):
self.library.extend_share(share, new_size, **kwargs)
def shrink_share(self, share, new_size, **kwargs):
self.library.shrink_share(share, new_size, **kwargs)
def ensure_share(self, context, share, **kwargs):
pass
def manage_existing(self, share, driver_options):
return self.library.manage_existing(share, driver_options)
def unmanage(self, share):
self.library.unmanage(share)
def manage_existing_snapshot(self, snapshot, driver_options):
return self.library.manage_existing_snapshot(snapshot, driver_options)
def unmanage_snapshot(self, snapshot):
self.library.unmanage_snapshot(snapshot)
def update_access(self, context, share, access_rules, add_rules,
delete_rules, **kwargs):
self.library.update_access(context, share, access_rules, add_rules,
delete_rules, **kwargs)
def _update_share_stats(self, data=None):
data = self.library.get_share_stats(
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function())
super(NetAppCmodeSingleSvmShareDriver, self)._update_share_stats(
data=data)
def get_default_filter_function(self):
return self.library.get_default_filter_function()
def get_default_goodness_function(self):
return self.library.get_default_goodness_function()
def get_share_server_pools(self, share_server):
return self.library.get_share_server_pools(share_server)
def get_network_allocations_number(self):
return self.library.get_network_allocations_number()
def get_admin_network_allocations_number(self):
return self.library.get_admin_network_allocations_number()
def _setup_server(self, network_info, metadata=None):
return self.library.setup_server(network_info, metadata)
def _teardown_server(self, server_details, **kwargs):
self.library.teardown_server(server_details, **kwargs)
def create_replica(self, context, replica_list, replica, access_rules,
replica_snapshots, **kwargs):
return self.library.create_replica(context, replica_list, replica,
access_rules, replica_snapshots,
**kwargs)
def delete_replica(self, context, replica_list, replica_snapshots, replica,
**kwargs):
self.library.delete_replica(context, replica_list, replica,
replica_snapshots, **kwargs)
def promote_replica(self, context, replica_list, replica, access_rules,
share_server=None):
return self.library.promote_replica(context, replica_list, replica,
access_rules,
share_server=share_server)
def update_replica_state(self, context, replica_list, replica,
access_rules, replica_snapshots,
share_server=None):
return self.library.update_replica_state(context,
replica_list,
replica,
access_rules,
replica_snapshots,
share_server=share_server)
def create_replicated_snapshot(self, context, replica_list,
replica_snapshots, share_server=None):
return self.library.create_replicated_snapshot(
context, replica_list, replica_snapshots,
share_server=share_server)
def delete_replicated_snapshot(self, context, replica_list,
replica_snapshots, share_server=None):
return self.library.delete_replicated_snapshot(
context, replica_list, replica_snapshots,
share_server=share_server)
def update_replicated_snapshot(self, context, replica_list,
share_replica, replica_snapshots,
replica_snapshot, share_server=None):
return self.library.update_replicated_snapshot(
replica_list, share_replica, replica_snapshots, replica_snapshot,
share_server=share_server)
def revert_to_replicated_snapshot(self, context, active_replica,
replica_list, active_replica_snapshot,
replica_snapshots, share_access_rules,
snapshot_access_rules,
**kwargs):
return self.library.revert_to_replicated_snapshot(
context, active_replica, replica_list, active_replica_snapshot,
replica_snapshots, **kwargs)
def migration_check_compatibility(self, context, source_share,
destination_share, share_server=None,
destination_share_server=None):
return self.library.migration_check_compatibility(
context, source_share, destination_share,
share_server=share_server,
destination_share_server=destination_share_server)
def migration_start(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
return self.library.migration_start(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)
def migration_continue(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
return self.library.migration_continue(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)
def migration_get_progress(self, context, source_share,
destination_share, source_snapshots,
snapshot_mappings, share_server=None,
destination_share_server=None):
return self.library.migration_get_progress(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)
def migration_cancel(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
return self.library.migration_cancel(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)
def migration_complete(self, context, source_share, destination_share,
source_snapshots, snapshot_mappings,
share_server=None, destination_share_server=None):
return self.library.migration_complete(
context, source_share, destination_share,
source_snapshots, snapshot_mappings, share_server=share_server,
destination_share_server=destination_share_server)
def create_share_group_snapshot(self, context, snap_dict,
share_server=None):
fallback_create = super(NetAppCmodeSingleSvmShareDriver,
self).create_share_group_snapshot
return self.library.create_group_snapshot(context, snap_dict,
fallback_create,
share_server)
def delete_share_group_snapshot(self, context, snap_dict,
share_server=None):
fallback_delete = super(NetAppCmodeSingleSvmShareDriver,
self).delete_share_group_snapshot
return self.library.delete_group_snapshot(context, snap_dict,
fallback_delete,
share_server)
def create_share_group_from_share_group_snapshot(
self, context, share_group_dict, snapshot_dict,
share_server=None):
fallback_create = super(
NetAppCmodeSingleSvmShareDriver,
self).create_share_group_from_share_group_snapshot
return self.library.create_group_from_snapshot(context,
share_group_dict,
snapshot_dict,
fallback_create,
share_server)
def get_configured_ip_versions(self):
return self.library.get_configured_ip_versions()
| apache-2.0 | 1,042,782,491,344,359,300 | 46.662745 | 79 | 0.60153 | false | 4.621293 | false | false | false |
jdfekete/progressivis | tests/test_00_file_buffer.py | 1 | 1924 | from progressivis.core.utils import filepath_to_buffer
from . import ProgressiveTest, skip, skipIf
import requests, tempfile, os
HTTP_URL = ('http://s3.amazonaws.com/h2o-release/h2o/master'
'/1193/docs-website/resources/publicdata.html')
S3_URL = ('s3://h2o-release/h2o/master/1193/docs-website'
'/resources/publicdata.html')
@skipIf(os.getenv('TRAVIS'),'skipped on Travis=>avoids: "certificate verify failed: IP address mismatch, certificate is not valid"')
class TestFileBuffer(ProgressiveTest):
def setUp(self):
req = requests.get(HTTP_URL)
_, self.tmp_file = tempfile.mkstemp(prefix='p10s_', suffix='.html')
with open(self.tmp_file, 'wb') as f:
f.write(req.content)
def tearDown(self):
os.remove(self.tmp_file)
def test_file_buffer(self):
reader_http, _, _, size_http = filepath_to_buffer(
HTTP_URL
)
self.assertGreater(size_http, 0)
reader_s3, _, _, size_s3 = filepath_to_buffer(
S3_URL
)
self.assertEqual(size_http, size_s3)
reader_file, _, _, size_file = filepath_to_buffer(self.tmp_file)
self.assertEqual(size_file, size_s3)
n1 = 12
n2 = 34
n3 = 56
_ = reader_http.read(n1)
_ = reader_http.read(n2)
_ = reader_http.read(n3)
self.assertEqual(reader_http.tell(), n1 + n2 + n3)
_ = reader_s3.read(n1)
_ = reader_s3.read(n2)
_ = reader_s3.read(n3)
self.assertEqual(reader_s3.tell(), n1 + n2 + n3)
_ = reader_file.read(n1)
_ = reader_file.read(n2)
_ = reader_file.read(n3)
self.assertEqual(reader_file.tell(), n1 + n2 + n3)
try:
reader_s3.close()
except:
pass
try:
reader_file.close()
except:
pass
if __name__ == '__main__':
ProgressiveTest.main()
| bsd-2-clause | 7,882,744,153,001,346,000 | 33.981818 | 132 | 0.573285 | false | 3.206667 | false | false | false |
bgshih/tf_resnet_cifar | src/model_utils.py | 1 | 2183 | import tensorflow as tf
def shape_probe(tensor):
return tf.Print(tensor, [tf.shape(tensor)], message='Shape=', summarize=10)
def min_max_probe(tensor):
return tf.Print(tensor, [tf.reduce_min(tensor), tf.reduce_max(tensor)], message='Min, max=', summarize=10)
def conv_map_montage(conv_maps):
"""
Montage of convolutional feature maps.
Args:
conv_maps: 4D tensor [B x H x W x C]
maxWidth: maximum output width
maxHeight: maximum output height
Return:
montage: [B x H' x W']
"""
raise NotImplementedError
# shape = tf.shape(conv_maps)
# B, H, W, C = shape[0], shape[1], shape[2], shape[3]
# maps = tf.transpose(conv_maps, [0,3,1,2])
# tf.gather(maps, )
def activation_summary(x):
tensor_name = x.op.name
tf.histogram_summary('activations/' + tensor_name, x)
# tf.scalar_summary(tensor_name + '/max', tf.reduce_max(x))
# tf.scalar_summary(tensor_name + '/min', tf.reduce_min(x))
# tf.scalar_summary(tensor_name + '/mean', tf.reduce_mean(x))
tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def histogram_summary_for_all_variables():
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
def add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(l.op.name +' (raw)', l)
tf.scalar_summary(l.op.name, loss_averages.average(l))
return loss_averages_op
| mit | -4,850,249,143,528,783,000 | 35.383333 | 108 | 0.694457 | false | 3.263079 | false | false | false |
Caranarq/01_Dmine | Scripts/VarInt/VarInt.py | 1 | 2170 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 30 17:21:27 2017
@author: carlos.arana
Descripcion:
Procesa los datos del parámetro para regresar la variable de integridad.
Si la Variable de Integridad es del tipo:
Tipo 1 (Binaria), la funcion busca celdas vacías en una sola columna y regresa 0
si la celda esta vacía o 1 si la celda tiene datos.
Tipo 2, la funcion busca celdas vacías en varias columnas y calcula un porcentaje
de integridad en el rango entre 0 y 1, en donde cero es igual a ningun dato (0%)
y uno es igual a todos los datos (100%)
Tipo 3, la funcion considera que el dataset está completo y asigna integridad 1 a
todos los registros. Utilizado
para parámetros
Input:
par_dataset: [pandas dataframe] indexado por CVE_MUN, una sola columna con los datos para construir el parámetro
dataset: [Pandas dataframe] dataset estandar, indexado por CVE_MUN, contiene toda la serie de datos disponibles,
unicamente columnas con datos del parametro, evitar columnas de descripciones.
"""
import pandas as pd
def VarInt(par_dataset, dataset, tipo = 1):
# 'tipo de integridad: [1] para Binaria, [2] para Serie
if tipo == 1:
par_dataset['EXISTE'] = ~par_dataset.isnull() # el simbolo ~ es para invertir los valores de true / false
par_dataset['VAR_INTEGRIDAD'] = par_dataset['EXISTE'].astype(int)
if tipo == 2:
par_dataset['NUM_REGISTROS'] = len(list(dataset)) # ¿Cuantos registros debería tener cada caso?
par_dataset['REGISTROS_EXISTEN'] = dataset.notnull().sum(axis=1) # ¿Cuantas registros tienen informacion?
par_dataset['VAR_INTEGRIDAD'] = par_dataset['REGISTROS_EXISTEN'] / par_dataset['NUM_REGISTROS']
if tipo == 3: # Adicionalmente, para este caso se reasigna la integridad en el compilador
# durante la Revision de integridad, luego de ejecutar SUN_Integridad
par_dataset['EXISTE'] = True
par_dataset['VAR_INTEGRIDAD'] = par_dataset['EXISTE'].astype(int)
variables_par_dataset = list(par_dataset)
par_dataset['CVE_MUN'] = dataset.index
return par_dataset, variables_par_dataset
| gpl-3.0 | 7,854,610,695,471,103,000 | 48.090909 | 116 | 0.7 | false | 2.954856 | false | false | false |
msteghofer/CSnake | src/csnVisualStudio2010.py | 2 | 2413 | ## @package csnVisualStudio2010
# Definition of the csnVisualStudio2010 compilers.
# \ingroup compiler
import csnCompiler
import os
class Compiler(csnCompiler.Compiler):
""" Abstract Visual Studio 2010 compiler. """
def __init__(self):
csnCompiler.Compiler.__init__(self)
self.postProcessor = PostProcessor()
def GetCompileFlags(self):
return [""]
def IsForPlatform(self, _WIN32, _NOT_WIN32):
return _WIN32 or (not _WIN32 and not _NOT_WIN32)
def GetOutputSubFolder(self, _configuration = "${CMAKE_CFG_INTDIR}"):
"""
Returns the folder where the compiler should place binaries for _configuration.
The default value for _configuration returns the output folder for the current configuration.
for storing binaries.
"""
if _configuration == "DebugAndRelease":
return "bin"
else:
return "bin/%s" % (_configuration)
def GetBuildSubFolder(self, _projectType, _projectName):
return "%s/%s" % (_projectType, _projectName)
def GetThirdPartySubFolder(self):
return ""
def GetThirdPartyCMakeParameters(self):
return []
def GetProjectCMakeParameters(self):
return []
def GetAllowedConfigurations(self):
return ["DebugAndRelease"]
def GetPostProcessor(self):
return self.postProcessor
def TargetIsMac(self):
return False
def TargetIsLinux(self):
return False
class Compiler32(Compiler):
""" Visual Studio 2010 32bits compiler. """
def GetName(self):
return "Visual Studio 10"
def TargetIs32Bits(self):
return True
def TargetIs64Bits(self):
return False
class Compiler64(Compiler):
""" Visual Studio 2010 64bits compiler. """
def GetName(self):
return "Visual Studio 10 Win64"
def TargetIs32Bits(self):
return False
def TargetIs64Bits(self):
return True
class PostProcessor:
def Do(self, _project):
"""
Post processes the vcproj file generated for _project.
"""
# vc proj to patch
if not _project.dependenciesManager.isTopLevel:
slnFilename = "%s/%s.sln" % (_project.GetBuildFolder(), _project.name)
if os.path.exists(slnFilename):
os.remove(slnFilename)
| bsd-3-clause | -2,766,193,454,900,575,700 | 27.05814 | 101 | 0.62039 | false | 4.371377 | true | false | false |
mozilla/pulseguardian | pulseguardian/model/queue.py | 2 | 1025 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from pulseguardian.model.base import Base
from pulseguardian.model.binding import Binding
class Queue(Base):
__tablename__ = 'queues'
name = Column(String(255), primary_key=True)
owner_id = Column(Integer, ForeignKey('pulse_users.id'), nullable=True)
size = Column(Integer)
# whether the queue can grow beyond the deletion size without being deleted
unbounded = Column(Boolean, default=False)
warned = Column(Boolean)
durable = Column(Boolean, nullable=False, default=False)
bindings = relationship(Binding, cascade='save-update, merge, delete')
def __repr__(self):
return "<Queue(name='{0}', owner='{1}')>".format(self.name, self.owner)
__str__ = __repr__
| mpl-2.0 | -5,574,268,999,701,741,000 | 34.344828 | 79 | 0.707317 | false | 3.853383 | false | false | false |
butala/pyrsss | pyrsss/emission/v6300.py | 1 | 2194 | from __future__ import division
import math
from datetime import datetime
from pyglow.pyglow import Point
# using back-port for python2 (enum34 package) --- this is in the
# python3 standard library (>=3.4)
from enum import Enum
class OplusType(Enum):
ne = 1
charge_neutrality = 2
"""
The following (alpha1 through BETA_1D) are from Table 2.2 from
Makela's dissertation. They are in turn from:
Link and Cogger, A reexamination of the O I 6300 \AA nightglow,
Journal of Geophysical Research, vol. 93, pp. 9883--9892, 1988.
"""
def alpha1(Te):
"""[cm^3 / s]"""
return 1.95e-7 * (Te / 300.)**(-0.7)
def alpha2(Te):
"""[cm^3 / s]"""
return 4.00e-7 * (Te / 300.)**(-0.9)
def k1(Ti, exp=math.exp):
"""[cm^3 / s]"""
return 3.23e-12 * exp(3.72/(Ti/300) - 1.87/(Ti/300)**2)
def k2(Ti, exp=math.exp):
"""[cm^3 / s]"""
return 2.78e-13 * exp(2.07/(Ti/300) - 0.61/(Ti/300)**2)
def k3(Tn, exp=math.exp):
"""[cm^3 / s]"""
return 2.0e-11 * exp(111.8/Tn)
def k4(Tn, exp=math.exp):
"""[cm^3 / s]"""
return 2.9e-11 * exp(67.5/Tn)
def k5(Tn):
"""[cm^3 / s]"""
return 1.6e-12 * Tn**(0.91)
A_1D = 7.45e-3
"""[1/s]"""
A_6300 = 5.63e-3
"""[1/s]"""
BETA_1D = 1.1
def Oplus_simple(ne):
"""
"""
return ne
def Oplus(ne,
Te,
Ti,
O2,
N2,
exp=math.exp):
"""
"""
return ne / (1 \
+ k1(Ti, exp=exp) * O2 / (alpha1(Te) * ne) \
+ k2(Ti, exp=exp) * N2 / (alpha2(Te) * ne))
def emission_v6300(ne,
Te,
Ti,
Tn,
O2,
N2,
oplus_type=OplusType.charge_neutrality,
exp=math.exp):
"""
"""
if oplus_type == OplusType.ne:
oplus = Oplus_simple(ne)
elif oplus_type == OplusType.charge_neutrality:
oplus = Oplus(ne, Te, Ti, O2, N2, exp=exp)
else:
raise NotImplemented('oplus_type = ' + str(oplus_type))
N = (A_1D / A_6300) * BETA_1D * k1(Ti, exp=exp) * O2 * oplus
D = 1 + (k3(Tn, exp=exp) * N2 + k4(Tn, exp=exp) * O2 + k5(Tn) * ne) / A_1D
return N / D
| mit | -2,333,410,579,308,403,700 | 19.314815 | 78 | 0.503646 | false | 2.510297 | false | false | false |
dimasad/ceacoest | examples/circular_orbit_min_time.py | 2 | 5096 | """Limited-thrust circular orbit transfer."""
import functools
import numpy as np
import sympy
from sympy import sin, cos
from scipy import constants, integrate, interpolate
import sym2num.model
import sym2num.utils
import sym2num.var
from ceacoest import oc
from ceacoest.modelling import symoc
@symoc.collocate(order=2)
class CircularOrbit:
"""Symbolic limited-thrust circular orbit transfer optimal control model."""
@sym2num.utils.classproperty
@functools.lru_cache()
def variables(cls):
"""Model variables definition."""
consts = ['mu', 've', 'T_max', 'R_final']
obj = sym2num.var.SymbolObject(
'self',
sym2num.var.SymbolArray('consts', consts)
)
vars = [obj,
sym2num.var.SymbolArray('x', ['X', 'Y', 'vx', 'vy', 'm']),
sym2num.var.SymbolArray('u', ['T', 'Txn', 'Tyn']),
sym2num.var.SymbolArray('p', ['tf'])]
return sym2num.var.make_dict(vars)
@sym2num.model.collect_symbols
def f(self, x, u, p, *, s):
"""ODE function."""
R3 = (s.X**2 + s.Y**2) ** 1.5
gx = - s.mu * s.X / R3
gy = - s.mu * s.Y / R3
Tx = s.T * s.Txn * s.T_max
Ty = s.T * s.Tyn * s.T_max
ax = gx + Tx / s.m
ay = gy + Ty / s.m
mdot = - s.T * s.T_max / s.ve
f = [s.vx, s.vy, ax, ay, mdot]
return sympy.Array(f) * s.tf
@sym2num.model.collect_symbols
def g(self, x, u, p, *, s):
"""Path constraints."""
return sympy.Array([s.Txn**2 + s.Tyn**2 - 1])
@sym2num.model.collect_symbols
def h(self, xe, p, *, s):
"""Endpoint constraints."""
R_error = (s.X_final ** 2 + s.Y_final ** 2)/s.R_final - s.R_final
v_dot_r = (s.X_final * s.vx_final + s.Y_final * s.vy_final) / s.R_final
r_cross_v = s.X_final * s.vy_final - s.Y_final * s.vx_final
V = sympy.sqrt(s.vx_final**2 + s.vy_final**2)
V_error = r_cross_v * V - s.mu
return sympy.Array([R_error, v_dot_r, V_error])
@sym2num.model.collect_symbols
def M(self, xe, p, *, s):
"""Mayer (endpoint) cost."""
return sympy.Array(s.tf)
@sym2num.model.collect_symbols
def L(self, x, u, p, *, s):
"""Lagrange (running) cost."""
return sympy.Array(0)
if __name__ == '__main__':
symb_mdl = CircularOrbit()
GeneratedCircularOrbit = sym2num.model.compile_class(symb_mdl)
mu = 1
ve = 50
T_max = 0.025
R_final = 2
mdl_consts = dict(mu=mu, ve=ve, T_max=T_max, R_final=R_final)
mdl = GeneratedCircularOrbit(**mdl_consts)
t = np.linspace(0, 1, 500)
problem = oc.Problem(mdl, t)
tc = problem.tc
dec_bounds = np.repeat([[-np.inf], [np.inf]], problem.ndec, axis=-1)
dec_L, dec_U = dec_bounds
problem.set_decision_item('tf', 0, dec_L)
#problem.set_decision_item('tf', 10, dec_U)
problem.set_decision_item('m', 0, dec_L)
problem.set_decision_item('T', 0, dec_L)
problem.set_decision_item('T', 1, dec_U)
problem.set_decision_item('Txn', -1.5, dec_L)
problem.set_decision_item('Txn', 1.5, dec_U)
problem.set_decision_item('Tyn', -1.5, dec_L)
problem.set_decision_item('Tyn', 1.5, dec_U)
problem.set_decision_item('X_initial', 1, dec_L)
problem.set_decision_item('X_initial', 1, dec_U)
problem.set_decision_item('Y_initial', 0, dec_L)
problem.set_decision_item('Y_initial', 0, dec_U)
problem.set_decision_item('vx_initial', 0, dec_L)
problem.set_decision_item('vx_initial', 0, dec_U)
problem.set_decision_item('vy_initial', 1, dec_L)
problem.set_decision_item('vy_initial', 1, dec_U)
problem.set_decision_item('m_initial', 1, dec_L)
problem.set_decision_item('m_initial', 1, dec_U)
constr_bounds = np.zeros((2, problem.ncons))
constr_L, constr_U = constr_bounds
dec_scale = np.ones(problem.ndec)
problem.set_decision_item('m', 1, dec_scale)
constr_scale = np.ones(problem.ncons)
problem.set_constraint('h', 10, constr_scale)
problem.set_defect_scale('m', 1, dec_scale)
obj_scale = 1
dec0 = np.zeros(problem.ndec)
problem.set_decision_item('m', 1, dec0)
problem.set_decision_item('tf', 2*np.pi, dec0)
problem.set_decision_item('X', np.cos(2*np.pi*tc), dec0)
problem.set_decision_item('Y', np.sin(2*np.pi*tc), dec0)
problem.set_decision_item('vx', -np.sin(2*np.pi*tc), dec0)
problem.set_decision_item('vy', np.cos(2*np.pi*tc), dec0)
problem.set_decision_item('Txn', 1, dec0)
with problem.ipopt(dec_bounds, constr_bounds) as nlp:
nlp.add_str_option('linear_solver', 'ma57')
nlp.add_num_option('tol', 1e-6)
nlp.add_int_option('max_iter', 3000)
nlp.set_scaling(obj_scale, dec_scale, constr_scale)
decopt, info = nlp.solve(dec0)
opt = problem.variables(decopt)
xopt = opt['x']
uopt = opt['u']
Topt = opt['p']
iopt = mdl.g(xopt, uopt, Topt)
topt = problem.tc * Topt
| mit | 7,281,658,488,657,289,000 | 32.526316 | 80 | 0.575746 | false | 2.81547 | false | false | false |
django/djangosnippets.org | cab/tests/tests.py | 1 | 26406 | from django.contrib.auth.models import AnonymousUser, User
from django.template import Context, Template
from django.test import SimpleTestCase, TestCase
from django.urls import reverse
from rest_framework import status
from ..api.serializers import SnippetSerializer
from ..models import Bookmark, Language, Snippet
from ..templatetags.markup import safe_markdown
# @skip("These tests don't test production code.")
# @override_settings(ROOT_URLCONF='cab.tests.urls')
class BaseCabTestCase(TestCase):
def setUp(self):
"""
Because tags and ratings use GFKs which require content-type-ids, and
as I am running 1.1.X at the moment, do all this stuff in the setUp()
"""
self.user_a = User.objects.create_user('a', 'a', 'a')
self.user_b = User.objects.create_user('b', 'b', 'b')
self.python = Language.objects.create(
name='Python',
slug='python',
language_code='python',
mime_type='text/x-python',
file_extension='py')
self.sql = Language.objects.create(
name='SQL',
slug='sql',
language_code='sql',
mime_type='text/x-sql',
file_extension='sql')
self.snippet1 = Snippet.objects.create(
title='Hello world',
language=self.python,
author=self.user_a,
description='A greeting\n==========',
code='print "Hello, world"')
self.snippet1.tags.add('hello', 'world')
self.snippet2 = Snippet.objects.create(
title='Goodbye world',
language=self.python,
author=self.user_b,
description='A farewell\n==========',
code='print "Goodbye, world"')
self.snippet2.tags.add('goodbye', 'world')
self.snippet3 = Snippet.objects.create(
title='One of these things is not like the others',
language=self.sql,
author=self.user_a,
description='Haxor some1z db',
code='DROP TABLE accounts;')
self.snippet3.tags.add('haxor')
self.bookmark1 = Bookmark.objects.create(snippet=self.snippet1,
user=self.user_a)
self.bookmark2 = Bookmark.objects.create(snippet=self.snippet1,
user=self.user_b)
self.bookmark3 = Bookmark.objects.create(snippet=self.snippet3,
user=self.user_a)
self.snippet1.ratings.rate(self.user_a, 1)
self.snippet1.ratings.rate(self.user_b, 1)
self.snippet2.ratings.rate(self.user_a, -1)
self.snippet2.ratings.rate(self.user_b, -1)
self.snippet3.ratings.rate(self.user_a, 1)
self.snippet3.ratings.rate(self.user_b, -1)
self.snippet1 = Snippet.objects.get(pk=self.snippet1.pk)
self.snippet2 = Snippet.objects.get(pk=self.snippet2.pk)
self.snippet3 = Snippet.objects.get(pk=self.snippet3.pk)
def ensure_login_required(self, url, username, password):
"""
A little shortcut that will hit a url, check for a login required
redirect, then after logging in return the logged-in response
"""
self.client.logout()
resp = self.client.get(url)
self.assertRedirects(resp, '/accounts/login/?next=%s' % url, fetch_redirect_response=False)
self.client.login(username=username, password=password)
resp = self.client.get(url)
self.client.logout()
return resp
class ManagerTestCase(BaseCabTestCase):
"""
Tests covering manager methods -- currently most "popular" this or that
are handled by managers.
"""
def test_top_languages(self):
top_languages = Language.objects.top_languages()
self.assertEqual(top_languages[0], self.python)
self.assertEqual(top_languages[1], self.sql)
self.assertEqual(top_languages[0].score, 2)
self.assertEqual(top_languages[1].score, 1)
def test_top_authors(self):
top_authors = Snippet.objects.top_authors()
self.assertEqual(top_authors[0], self.user_a)
self.assertEqual(top_authors[1], self.user_b)
self.assertEqual(top_authors[0].score, 2)
self.assertEqual(top_authors[1].score, 1)
def test_top_tags(self):
top_tags = Snippet.objects.top_tags()
self.assertEqual(top_tags[0].name, 'world')
self.assertEqual(top_tags[0].num_times, 2)
self.assertEqual(top_tags[1].name, 'goodbye')
self.assertEqual(top_tags[2].name, 'haxor')
self.assertEqual(top_tags[3].name, 'hello')
def test_top_rated(self):
top_rated = Snippet.objects.top_rated()
self.assertEqual(top_rated[0], self.snippet1)
self.assertEqual(top_rated[1], self.snippet3)
self.assertEqual(top_rated[2], self.snippet2)
def test_most_bookmarked(self):
most_bookmarked = Snippet.objects.most_bookmarked()
self.assertEqual(most_bookmarked[0], self.snippet1)
self.assertEqual(most_bookmarked[1], self.snippet3)
self.assertEqual(most_bookmarked[2], self.snippet2)
class ModelTestCase(BaseCabTestCase):
"""
Tests to make sure that custom model signal handlers, denormalized fields,
work as expected
"""
def test_snippet_escaping(self):
self.snippet1.description = '<script>alert("hacked");</script>'
self.snippet1.save()
self.assertEqual(
self.snippet1.description_html,
'<script>alert("hacked");</script>'
)
def test_ratings_hooks(self):
# setUp() will actually fire off most of these hooks
self.assertEqual(self.snippet1.rating_score, 2)
# calling the hooks manually doesn't affect the results
self.snippet1.update_rating()
self.assertEqual(self.snippet1.rating_score, 2)
# check the other snippets
self.assertEqual(self.snippet2.rating_score, -2)
self.assertEqual(self.snippet3.rating_score, 0)
self.snippet1.ratings.rate(self.user_a, -1)
# refresh from the db
self.snippet1 = Snippet.objects.get(pk=self.snippet1.pk)
self.assertEqual(self.snippet1.rating_score, 0)
self.snippet3.ratings.rate(self.user_a, -1)
# refresh from the db
self.snippet3 = Snippet.objects.get(pk=self.snippet3.pk)
self.assertEqual(self.snippet3.rating_score, -2)
def test_bookmark_hooks(self):
self.assertEqual(self.snippet1.bookmark_count, 2)
# update_bookmark_count() doesn't screw things up
self.snippet1.update_bookmark_count()
self.assertEqual(self.snippet1.bookmark_count, 2)
self.assertEqual(self.snippet2.bookmark_count, 0)
self.assertEqual(self.snippet3.bookmark_count, 1)
# create a new bookmark and check that the count got updated
b = Bookmark.objects.create(user=self.user_b, snippet=self.snippet2)
# refresh from the db
self.snippet2 = Snippet.objects.get(pk=self.snippet2.pk)
self.assertEqual(self.snippet2.bookmark_count, 1)
# delete a bookmark and check that the count got updated
b.delete()
# refresh from the db
self.snippet2 = Snippet.objects.get(pk=self.snippet2.pk)
self.assertEqual(self.snippet2.bookmark_count, 0)
def test_snippet_description(self):
# these may be pointless, but make sure things get marked-down on save
self.assertEqual(self.snippet1.description_html, '<h1>A greeting</h1>')
self.snippet1.description = '**Booyakasha**'
self.snippet1.save()
self.assertTrue('<strong>Booyakasha</strong>' in
self.snippet1.description_html)
def test_tag_string(self):
# yes. test a list comprehension
self.assertEqual(self.snippet1.get_tagstring(), 'hello, world')
self.assertEqual(self.snippet2.get_tagstring(), 'goodbye, world')
self.assertEqual(self.snippet3.get_tagstring(), 'haxor')
class ViewTestCase(BaseCabTestCase):
def test_bookmark_views(self):
# gotta have it
user_bookmarks = reverse('cab_user_bookmarks')
self.assertEqual(user_bookmarks, '/bookmarks/')
# test for the login-required bits
resp = self.ensure_login_required(user_bookmarks, 'a', 'a')
self.assertCountEqual(resp.context['object_list'], [self.bookmark1, self.bookmark3])
resp = self.ensure_login_required(user_bookmarks, 'b', 'b')
self.assertCountEqual(resp.context['object_list'], [self.bookmark2])
add_bookmark = reverse('cab_bookmark_add', args=[self.snippet2.pk])
self.assertEqual(add_bookmark, '/bookmarks/add/%d/' % self.snippet2.pk)
# add a bookmark -- this does *not* require a POST for some reason so
# this test will need to be amended when I get around to fixing this
resp = self.ensure_login_required(add_bookmark, 'a', 'a')
self.assertRedirects(resp, '/snippets/%d/' % self.snippet2.pk)
new_bookmark = Bookmark.objects.get(user=self.user_a,
snippet=self.snippet2)
resp = self.ensure_login_required(user_bookmarks, 'a', 'a')
self.assertCountEqual(resp.context['object_list'], [self.bookmark1, self.bookmark3, new_bookmark])
# make sure we have to log in to delete a bookmark
delete_bookmark = reverse('cab_bookmark_delete',
args=[self.snippet2.pk])
self.assertEqual(delete_bookmark,
'/bookmarks/delete/%d/' % self.snippet2.pk)
resp = self.ensure_login_required(delete_bookmark, 'a', 'a')
# login and post to delete the bookmark
self.client.login(username='a', password='a')
resp = self.client.post(delete_bookmark)
self.assertRedirects(resp, '/snippets/%d/' % self.snippet2.pk)
# the bookmark is gone!
self.assertRaises(Bookmark.DoesNotExist, Bookmark.objects.get,
user=self.user_a, snippet=self.snippet2)
# check the bookmark list view and make sure
resp = self.ensure_login_required(user_bookmarks, 'a', 'a')
self.assertCountEqual(resp.context['object_list'], [self.bookmark1, self.bookmark3])
def test_language_views(self):
# where would we be without you
language_url = reverse('cab_language_list')
self.assertEqual(language_url, '/languages/')
resp = self.client.get(language_url)
self.assertEqual(resp.status_code, 200)
self.assertCountEqual(resp.context['object_list'], [self.python, self.sql])
language_detail = reverse('cab_language_detail', args=['python'])
self.assertEqual(language_detail, '/languages/python/')
resp = self.client.get(language_detail)
self.assertEqual(resp.status_code, 200)
self.assertCountEqual(resp.context['object_list'], [self.snippet1, self.snippet2])
self.assertEqual(resp.context['language'], self.python)
def test_popular_views(self):
top_authors = reverse('cab_top_authors')
self.assertEqual(top_authors, '/users/')
resp = self.client.get(top_authors)
self.assertEqual(resp.status_code, 200)
user_a, user_b = resp.context['object_list']
self.assertEqual(user_a, self.user_a)
self.assertEqual(user_b, self.user_b)
top_languages = reverse('cab_top_languages')
self.assertEqual(top_languages, '/popular/languages/')
resp = self.client.get(top_languages)
self.assertEqual(resp.status_code, 200)
python, sql = resp.context['object_list']
self.assertEqual(python, self.python)
self.assertEqual(sql, self.sql)
top_tags = reverse('cab_top_tags')
self.assertEqual(top_tags, '/tags/')
resp = self.client.get(top_tags)
self.assertEqual(resp.status_code, 200)
tag_names = [tag.name for tag in resp.context['object_list']]
self.assertEqual(tag_names, ['world', 'goodbye', 'haxor', 'hello'])
top_bookmarked = reverse('cab_top_bookmarked')
self.assertEqual(top_bookmarked, '/popular/bookmarked/')
resp = self.client.get(top_bookmarked)
self.assertEqual(resp.status_code, 200)
s1, s3, s2 = resp.context['object_list']
self.assertEqual(s1, self.snippet1)
self.assertEqual(s3, self.snippet3)
self.assertEqual(s2, self.snippet2)
top_rated = reverse('cab_top_rated')
self.assertEqual(top_rated, '/popular/rated/')
resp = self.client.get(top_rated)
self.assertEqual(resp.status_code, 200)
s1, s3, s2 = resp.context['object_list']
self.assertEqual(s1, self.snippet1)
self.assertEqual(s3, self.snippet3)
self.assertEqual(s2, self.snippet2)
def test_tag_detail(self):
tag_detail = reverse('cab_snippet_matches_tag', args=['world'])
self.assertEqual(tag_detail, '/tags/world/')
resp = self.client.get(tag_detail)
self.assertEqual(resp.status_code, 200)
self.assertCountEqual(resp.context['object_list'], [self.snippet1, self.snippet2])
def test_author_detail(self):
author_detail = reverse('cab_author_snippets', args=['a'])
self.assertEqual(author_detail, '/users/a/')
resp = self.client.get(author_detail)
self.assertEqual(resp.status_code, 200)
self.assertCountEqual(resp.context['object_list'], [self.snippet1, self.snippet3])
def test_feeds(self):
# I don't want to put much time into testing these since the response
# is kind of fucked up.
resp = self.client.get('/feeds/latest/')
self.assertEqual(resp.status_code, 200)
resp = self.client.get('/feeds/author/a/')
self.assertEqual(resp.status_code, 200)
resp = self.client.get('/feeds/author/c/')
self.assertEqual(resp.status_code, 404)
resp = self.client.get('/feeds/tag/world/')
self.assertEqual(resp.status_code, 200)
resp = self.client.get('/feeds/tag/nothing/')
self.assertEqual(resp.status_code, 404)
resp = self.client.get('/feeds/language/python/')
self.assertEqual(resp.status_code, 200)
resp = self.client.get('/feeds/language/java/')
self.assertEqual(resp.status_code, 404)
class SnippetViewsTestCase(BaseCabTestCase):
def test_index(self):
snippet_index = reverse('cab_snippet_list')
self.assertEqual(snippet_index, '/snippets/')
resp = self.client.get(snippet_index)
self.assertEqual(resp.status_code, 200)
self.assertCountEqual(resp.context['object_list'], [self.snippet1, self.snippet2, self.snippet3])
def test_snippet_detail(self):
snippet_detail = reverse('cab_snippet_detail', args=[self.snippet1.pk])
self.assertEqual(snippet_detail, '/snippets/%d/' % self.snippet1.pk)
resp = self.client.get(snippet_detail)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.context['object'], self.snippet1)
def test_snippet_download(self):
snippet_download = reverse('cab_snippet_download',
args=[self.snippet1.pk])
self.assertEqual(snippet_download,
'/snippets/%d/download/' % self.snippet1.pk)
resp = self.client.get(snippet_download)
self.assertEqual(resp['content-type'], 'text/x-python')
self.assertEqual(resp.content, b'print "Hello, world"')
def test_snippet_rate(self):
self.snippet1.ratings.clear()
self.snippet1 = Snippet.objects.get(pk=self.snippet1.pk)
self.assertEqual(self.snippet1.rating_score, 0)
self.assertEqual(self.snippet1.ratings.count(), 0)
snippet_rate = reverse('cab_snippet_rate', args=[self.snippet1.pk])
self.assertEqual(snippet_rate, '/snippets/%d/rate/' % self.snippet1.pk)
resp = self.client.get(snippet_rate + '?score=up')
self.assertEqual(resp.status_code, 302)
self.assertTrue('accounts/login' in resp['location'])
self.client.login(username='a', password='a')
resp = self.client.get(snippet_rate + '?score=NaN')
self.assertEqual(self.snippet1.ratings.count(), 0)
resp = self.client.get(snippet_rate + '?score=up')
self.assertEqual(self.snippet1.ratings.count(), 1)
self.assertEqual(self.snippet1.ratings.cumulative_score(), 1)
resp = self.client.get(snippet_rate + '?score=down')
self.assertEqual(self.snippet1.ratings.count(), 1)
self.assertEqual(self.snippet1.ratings.cumulative_score(), -1)
def test_snippet_unrate_up(self):
"""
Sending the score "reset" should remove a user's vote.
"""
self.snippet1.ratings.clear()
self.snippet1 = Snippet.objects.get(pk=self.snippet1.pk)
snippet_rate = reverse('cab_snippet_rate', args=[self.snippet1.pk])
self.assertEqual(self.snippet1.rating_score, 0)
self.assertEqual(self.snippet1.ratings.count(), 0)
self.client.login(username='a', password='a')
self.client.get(snippet_rate + '?score=up')
self.assertEqual(self.snippet1.ratings.count(), 1)
self.snippet1.update_rating()
self.assertEqual(self.snippet1.rating_score, 1)
self.client.get(snippet_rate + '?score=reset')
self.assertEqual(self.snippet1.ratings.count(), 0)
self.snippet1.update_rating()
self.assertEqual(self.snippet1.rating_score, 0)
def test_snippet_edit(self):
snippet_edit = reverse('cab_snippet_edit', args=[self.snippet1.pk])
self.assertEqual(snippet_edit, '/snippets/%d/edit/' % self.snippet1.pk)
resp = self.client.get(snippet_edit)
self.assertEqual(resp.status_code, 302)
self.assertTrue('accounts/login' in resp['location'])
self.client.login(username='b', password='b')
resp = self.client.get(snippet_edit)
self.assertEqual(resp.status_code, 403)
self.client.login(username='a', password='a')
resp = self.client.get(snippet_edit)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.context['form'].instance, self.snippet1)
payload = {'title': 'Hi', 'version': '1.1',
'language': str(self.python.pk),
'description': 'wazzah\n======',
'code': 'print "Hi"', 'tags': 'hi, world'}
resp = self.client.post(snippet_edit, payload)
snippet1 = Snippet.objects.get(pk=self.snippet1.pk)
self.assertEqual(snippet1.title, 'Hi')
self.assertEqual(snippet1.description_html, '<h1>wazzah</h1>')
self.assertEqual(snippet1.code, 'print "Hi"')
self.assertEqual([t.name for t in snippet1.tags.all()], ['world', 'hi'])
self.assertRedirects(resp, '/snippets/%d/' % snippet1.pk)
def test_snippet_edit_no_tags(self):
"""
The user should be able to create/edit a snippet and remove all tags or create it without any.
"""
snippet_edit = reverse('cab_snippet_edit', args=[self.snippet1.pk])
self.assertEqual(snippet_edit, '/snippets/%d/edit/' % self.snippet1.pk)
resp = self.client.get(snippet_edit)
self.assertEqual(resp.status_code, 302)
self.assertTrue('accounts/login' in resp['location'])
self.client.login(username='a', password='a')
resp = self.client.get(snippet_edit)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.context['form'].instance, self.snippet1)
payload = {'title': 'Hi', 'version': '1.1',
'language': str(self.python.pk),
'description': 'wazzah\n======',
'code': 'print "Hi"'}
resp = self.client.post(snippet_edit, payload)
snippet1 = Snippet.objects.get(pk=self.snippet1.pk)
self.assertEqual(snippet1.title, 'Hi')
self.assertEqual(snippet1.description_html, '<h1>wazzah</h1>')
self.assertEqual(snippet1.code, 'print "Hi"')
self.assertEqual(0, snippet1.tags.count())
self.assertRedirects(resp, '/snippets/%d/' % snippet1.pk)
def test_snippet_add(self):
snippet_add = reverse('cab_snippet_add')
self.assertEqual(snippet_add, '/snippets/add/')
resp = self.ensure_login_required(snippet_add, 'a', 'a')
self.client.login(username='a', password='a')
payload = {'title': 'Hi', 'version': '1.1',
'language': str(self.python.pk),
'description': 'wazzah\n======',
'code': 'print "Hi"', 'tags': 'hi, world'}
resp = self.client.post(snippet_add, payload)
new_snippet = Snippet.objects.get(title='Hi')
self.assertEqual(new_snippet.title, 'Hi')
self.assertEqual(new_snippet.description_html, '<h1>wazzah</h1>')
self.assertEqual(new_snippet.code, 'print "Hi"')
self.assertEqual([t.name for t in new_snippet.tags.all()],
['world', 'hi'])
self.assertRedirects(resp, '/snippets/%d/' % new_snippet.pk)
class TemplatetagTestCase(BaseCabTestCase):
def test_cab_tags(self):
t = Template("""{% load cab_tags %}{% if snippet|is_bookmarked:user %}Y{% else %}N{% endif %}""")
c = Context({'snippet': self.snippet1, 'user': self.user_a})
rendered = t.render(c)
self.assertEqual(rendered, 'Y')
Bookmark.objects.filter(user=self.user_a,
snippet=self.snippet1).delete()
rendered = t.render(c)
self.assertEqual(rendered, 'N')
c = Context({'snippet': self.snippet1, 'user': AnonymousUser()})
rendered = t.render(c)
self.assertEqual(rendered, 'N')
def test_core_tags(self):
t = Template('''{% load core_tags %}{% for s in "cab.snippet"|latest:2 %}{{ s.title }}|{% endfor %}''')
rendered = t.render(Context({}))
self.assertEqual(rendered,
'%s|%s|' % (self.snippet3.title, self.snippet2.title))
t = Template(
'{% load core_tags %}{% for t in "cab.snippet"|call_manager:"top_tags"|slice:":2" %}'
'{{ t.name }}|{% endfor %}'
)
rendered = t.render(Context({}))
self.assertEqual(rendered, 'world|goodbye|')
class MarkupTests(SimpleTestCase):
def test_safe_markdown(self):
self.assertEqual(safe_markdown('<p>foo</p>'), '<p>foo</p>')
self.assertEqual(safe_markdown('<pre>foo</pre>'), '<pre>foo</pre>')
class SearchViewsTestCase(BaseCabTestCase):
def test_index(self):
search_index = reverse('cab_search')
self.assertEqual(search_index, '/search/')
resp = self.client.get(search_index)
self.assertEqual(resp.status_code, 200)
self.assertCountEqual(resp.context['object_list'], [self.snippet1, self.snippet2, self.snippet3])
def test_q_search(self):
search_index = reverse('cab_search')
resp = self.client.get(search_index + '?q=greeting')
self.assertCountEqual(resp.context['object_list'], [self.snippet1])
resp = self.client.get(search_index + '?q=doesnotexistforsure')
self.assertCountEqual(resp.context['object_list'], [])
class ApiTestCase(TestCase):
def setUp(self):
"""
Because tags and ratings use GFKs which require content-type-ids, and
as I am running 1.1.X at the moment, do all this stuff in the setUp()
"""
self.user_a = User.objects.create_user('a', 'a', 'a')
self.user_b = User.objects.create_user('b', 'b', 'b')
self.python = Language.objects.create(
name='Python',
slug='python',
language_code='python',
mime_type='text/x-python',
file_extension='py')
self.sql = Language.objects.create(
name='SQL',
slug='sql',
language_code='sql',
mime_type='text/x-sql',
file_extension='sql')
self.snippet1 = Snippet.objects.create(
title='Hello world',
language=self.python,
author=self.user_a,
description='A greeting\n==========',
code='print "Hello, world"')
self.snippet1.tags.add('hello', 'world')
self.snippet2 = Snippet.objects.create(
title='Goodbye world',
language=self.python,
author=self.user_b,
description='A farewell\n==========',
code='print "Goodbye, world"')
self.snippet2.tags.add('goodbye', 'world')
self.snippet3 = Snippet.objects.create(
title='One of these things is not like the others',
language=self.sql,
author=self.user_a,
description='Haxor some1z db',
code='DROP TABLE accounts;')
self.snippet3.tags.add('haxor')
self.bookmark1 = Bookmark.objects.create(snippet=self.snippet1,
user=self.user_a)
self.bookmark2 = Bookmark.objects.create(snippet=self.snippet1,
user=self.user_b)
self.bookmark3 = Bookmark.objects.create(snippet=self.snippet3,
user=self.user_a)
self.snippet1.ratings.rate(self.user_a, 1)
self.snippet1.ratings.rate(self.user_b, 1)
self.snippet2.ratings.rate(self.user_a, -1)
self.snippet2.ratings.rate(self.user_b, -1)
self.snippet3.ratings.rate(self.user_a, 1)
self.snippet3.ratings.rate(self.user_b, -1)
self.snippet1 = Snippet.objects.get(pk=self.snippet1.pk)
self.snippet2 = Snippet.objects.get(pk=self.snippet2.pk)
self.snippet3 = Snippet.objects.get(pk=self.snippet3.pk)
def test_get_all_snippets(self):
# get API response
response = self.client.get(reverse('api_snippet_list'))
# get data from db
snippets = Snippet.objects.all()
serializer = SnippetSerializer(snippets, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| bsd-3-clause | 3,212,347,291,658,060,000 | 39.376147 | 111 | 0.616564 | false | 3.698319 | true | false | false |
gforsyth/doctr_testing | doctr/tests/test_travis.py | 1 | 4183 | """
So far, very little is actually tested here, because there aren't many
functions that can be tested outside of actually running them on Travis.
"""
import tempfile
import os
from os.path import join
import pytest
from ..travis import sync_from_log
@pytest.mark.parametrize("src", ["src"])
@pytest.mark.parametrize("dst", ['.', 'dst'])
def test_sync_from_log(src, dst):
with tempfile.TemporaryDirectory() as dir:
try:
old_curdir = os.path.abspath(os.curdir)
os.chdir(dir)
# Set up a src directory with some files
os.makedirs(src)
with open(join(src, 'test1'), 'w') as f:
f.write('test1')
os.makedirs(join(src, 'testdir'))
with open(join(src, 'testdir', 'test2'), 'w') as f:
f.write('test2')
# Test that the sync happens
added, removed = sync_from_log(src, dst, 'logfile')
assert added == [
join(dst, 'test1'),
join(dst, 'testdir', 'test2'),
'logfile',
]
assert removed == []
with open(join(dst, 'test1')) as f:
assert f.read() == 'test1'
with open(join(dst, 'testdir', 'test2')) as f:
assert f.read() == 'test2'
with open('logfile') as f:
assert f.read() == '\n'.join([
join(dst, 'test1'),
join(dst, 'testdir', 'test2'),
])
# Create a new file
with open(join(src, 'test3'), 'w') as f:
f.write('test3')
added, removed = sync_from_log(src, dst, 'logfile')
assert added == [
join(dst, 'test1'),
join(dst, 'test3'),
join(dst, 'testdir', 'test2'),
'logfile',
]
assert removed == []
with open(join(dst, 'test1')) as f:
assert f.read() == 'test1'
with open(join(dst, 'testdir', 'test2')) as f:
assert f.read() == 'test2'
with open(join(dst, 'test3')) as f:
assert f.read() == 'test3'
with open('logfile') as f:
assert f.read() == '\n'.join([
join(dst, 'test1'),
join(dst, 'test3'),
join(dst, 'testdir', 'test2'),
])
# Delete a file
os.remove(join(src, 'test3'))
added, removed = sync_from_log(src, dst, 'logfile')
assert added == [
join(dst, 'test1'),
join(dst, 'testdir', 'test2'),
'logfile',
]
assert removed == [
join(dst, 'test3'),
]
with open(join(dst, 'test1')) as f:
assert f.read() == 'test1'
with open(join(dst, 'testdir', 'test2')) as f:
assert f.read() == 'test2'
assert not os.path.exists(join(dst, 'test3'))
with open('logfile') as f:
assert f.read() == '\n'.join([
join(dst, 'test1'),
join(dst, 'testdir', 'test2'),
])
# Change a file
with open(join(src, 'test1'), 'w') as f:
f.write('test1 modified')
added, removed = sync_from_log(src, dst, 'logfile')
assert added == [
join(dst, 'test1'),
join(dst, 'testdir', 'test2'),
'logfile',
]
assert removed == []
with open(join(dst, 'test1')) as f:
assert f.read() == 'test1 modified'
with open(join(dst, 'testdir', 'test2')) as f:
assert f.read() == 'test2'
assert not os.path.exists(join(dst, 'test3'))
with open('logfile') as f:
assert f.read() == '\n'.join([
join(dst, 'test1'),
join(dst, 'testdir', 'test2'),
])
finally:
os.chdir(old_curdir)
| mit | -4,881,216,847,673,967,000 | 28.048611 | 72 | 0.434616 | false | 4.045455 | true | false | false |
h2oai/h2o-3 | h2o-py/h2o/automl/autoh2o.py | 2 | 39006 | # -*- encoding: utf-8 -*-
import functools as ft
from inspect import getdoc
import re
import h2o
from h2o.automl._base import H2OAutoMLBaseMixin
from h2o.automl._h2o_automl_output import H2OAutoMLOutput
from h2o.base import Keyed
from h2o.estimators import H2OEstimator
from h2o.exceptions import H2OResponseError, H2OValueError
from h2o.frame import H2OFrame
from h2o.job import H2OJob
from h2o.utils.shared_utils import check_id
from h2o.utils.typechecks import assert_is_type, is_type, numeric
_params_doc_ = dict() # holds the doc per param extracted from H2OAutoML constructor
def _extract_params_doc(docstr):
pat = re.compile(r"^:param (\w+ )?(?P<name>\w+):\s?(?P<doc>.*)") # match param doc-start in Sphinx format ":param type name: description"
lines = docstr.splitlines()
param, doc = None, None
for l in lines:
m = pat.match(l)
if m:
if param:
_params_doc_[param] = "\n".join(doc)
param = m.group('name')
doc = [m.group('doc')]
elif param:
doc.append(l)
def _aml_property(param_path, name=None, types=None, validate_fn=None, freezable=False, set_input=True):
path = param_path.split('.')
name = name or path[-1]
def attr_name(self, attr):
return ("_"+self.__class__.__name__+attr) if attr.startswith('__') and not attr.endswith('__') else attr
def _fget(self):
_input = getattr(self, attr_name(self, '__input'))
return _input.get(name)
def _fset(self, value):
if freezable and getattr(self, attr_name(self, '__frozen'), False):
raise H2OValueError("Param ``%s`` can not be modified after the first call to ``train``." % name, name)
if types is not None:
assert_is_type(value, *types)
input_val = value
if validate_fn:
value = validate_fn(self, value)
_input = getattr(self, attr_name(self, '__input'))
_input[name] = input_val if set_input else value
group = getattr(self, attr_name(self, path[0]))
if group is None:
group = {}
setattr(self, attr_name(self, path[0]), group)
obj = group
for t in path[1:-1]:
tmp = obj.get(t)
if tmp is None:
tmp = obj[t] = {}
obj = tmp
obj[path[-1]] = value
return property(fget=_fget, fset=_fset, doc=_params_doc_.get(name, None))
class H2OAutoML(H2OAutoMLBaseMixin, Keyed):
"""
Automatic Machine Learning
The Automatic Machine Learning (AutoML) function automates the supervised machine learning model training process.
The current version of AutoML trains and cross-validates the following algorithms (in the following order):
three pre-specified XGBoost GBM (Gradient Boosting Machine) models,
a fixed grid of GLMs,
a default Random Forest (DRF),
five pre-specified H2O GBMs,
a near-default Deep Neural Net,
an Extremely Randomized Forest (XRT),
a random grid of XGBoost GBMs,
a random grid of H2O GBMs,
and a random grid of Deep Neural Nets.
In some cases, there will not be enough time to complete all the algorithms, so some may be missing from the
leaderboard. AutoML then trains two Stacked Ensemble models, one of all the models, and one of only the best
models of each kind.
:examples:
>>> import h2o
>>> from h2o.automl import H2OAutoML
>>> h2o.init()
>>> # Import a sample binary outcome train/test set into H2O
>>> train = h2o.import_file("https://s3.amazonaws.com/erin-data/higgs/higgs_train_10k.csv")
>>> test = h2o.import_file("https://s3.amazonaws.com/erin-data/higgs/higgs_test_5k.csv")
>>> # Identify the response and set of predictors
>>> y = "response"
>>> x = list(train.columns) #if x is defined as all columns except the response, then x is not required
>>> x.remove(y)
>>> # For binary classification, response should be a factor
>>> train[y] = train[y].asfactor()
>>> test[y] = test[y].asfactor()
>>> # Run AutoML for 30 seconds
>>> aml = H2OAutoML(max_runtime_secs = 30)
>>> aml.train(x = x, y = y, training_frame = train)
>>> # Print Leaderboard (ranked by xval metrics)
>>> aml.leaderboard
>>> # (Optional) Evaluate performance on a test set
>>> perf = aml.leader.model_performance(test)
>>> perf.auc()
"""
def __init__(self,
nfolds=5,
balance_classes=False,
class_sampling_factors=None,
max_after_balance_size=5.0,
max_runtime_secs=None,
max_runtime_secs_per_model=None,
max_models=None,
stopping_metric="AUTO",
stopping_tolerance=None,
stopping_rounds=3,
seed=None,
project_name=None,
exclude_algos=None,
include_algos=None,
exploitation_ratio=0,
modeling_plan=None,
preprocessing=None,
monotone_constraints=None,
keep_cross_validation_predictions=False,
keep_cross_validation_models=False,
keep_cross_validation_fold_assignment=False,
sort_metric="AUTO",
export_checkpoints_dir=None,
verbosity="warn",
**kwargs):
"""
Create a new H2OAutoML instance.
:param int nfolds: Number of folds for k-fold cross-validation.
Use ``0`` to disable cross-validation; this will also disable Stacked Ensemble (thus decreasing the overall model performance).
Defaults to ``5``.
:param bool balance_classes: Balance training data class counts via over/under-sampling (for imbalanced data).
Defaults to ``False``.
:param class_sampling_factors: Desired over/under-sampling ratios per class (in lexicographic order).
If not specified, sampling factors will be automatically computed to obtain class balance during training.
:param float max_after_balance_size: Maximum relative size of the training data after balancing class counts (can be less than 1.0).
Requires ``balance_classes``.
Defaults to ``5.0``.
:param int max_runtime_secs: Specify the maximum time that the AutoML process will run for, prior to training the final Stacked Ensemble models.
If neither ``max_runtime_secs`` nor ``max_models`` are specified by the user, then ``max_runtime_secs``.
Defaults to 3600 seconds (1 hour).
:param int max_runtime_secs_per_model: Controls the max time the AutoML run will dedicate to each individual model.
Defaults to ``0`` (disabled: no time limit).
:param int max_models: Specify the maximum number of models to build in an AutoML run, excluding the Stacked Ensemble models.
Defaults to ``0`` (disabled: no limitation).
:param str stopping_metric: Specifies the metric to use for early stopping.
The available options are:
``"AUTO"`` (This defaults to ``"logloss"`` for classification, ``"deviance"`` for regression),
``"deviance"``, ``"logloss"``, ``"mse"``, ``"rmse"``, ``"mae"``, ``"rmsle"``, ``"auc"``, ``aucpr``, ``"lift_top_group"``,
``"misclassification"``, ``"mean_per_class_error"``, ``"r2"``.
Defaults to ``"AUTO"``.
:param float stopping_tolerance: Specify the relative tolerance for the metric-based stopping to stop the AutoML run if the improvement is less than this value.
Defaults to ``0.001`` if the dataset is at least 1 million rows;
otherwise it defaults to a value determined by the size of the dataset and the non-NA-rate, in which case the value is computed as 1/sqrt(nrows * non-NA-rate).
:param int stopping_rounds: Stop training new models in the AutoML run when the option selected for
stopping_metric doesn't improve for the specified number of models, based on a simple moving average.
To disable this feature, set it to ``0``.
Defaults to ``3`` and must be an non-negative integer.
:param int seed: Set a seed for reproducibility.
AutoML can only guarantee reproducibility if ``max_models`` or early stopping is used because ``max_runtime_secs`` is resource limited,
meaning that if the resources are not the same between runs, AutoML may be able to train more models on one run vs another.
Defaults to ``None``.
:param str project_name: Character string to identify an AutoML project.
Defaults to ``None``, which means a project name will be auto-generated based on the training frame ID.
More models can be trained on an existing AutoML project by specifying the same project name in multiple calls to the AutoML function
(as long as the same training frame, or a sample, is used in subsequent runs).
:param exclude_algos: List the algorithms to skip during the model-building phase.
The full list of options is:
``"DRF"`` (Random Forest and Extremely-Randomized Trees),
``"GLM"``,
``"XGBoost"``,
``"GBM"``,
``"DeepLearning"``,
``"StackedEnsemble"``.
Defaults to ``None``, which means that all appropriate H2O algorithms will be used, if the search stopping criteria allow. Optional.
Usage example: ``exclude_algos = ["GLM", "DeepLearning", "DRF"]``.
:param include_algos: List the algorithms to restrict to during the model-building phase.
This can't be used in combination with `exclude_algos` param.
Defaults to ``None``, which means that all appropriate H2O algorithms will be used, if the search stopping criteria allow. Optional.
:param exploitation_ratio: The budget ratio (between 0 and 1) dedicated to the exploitation (vs exploration) phase.
By default, the exploitation phase is disabled (exploitation_ratio=0) as this is still experimental;
to activate it, it is recommended to try a ratio around 0.1.
Note that the current exploitation phase only tries to fine-tune the best XGBoost and the best GBM found during exploration.
:param modeling_plan: List of modeling steps to be used by the AutoML engine (they may not all get executed, depending on other constraints).
Defaults to None (Expert usage only).
:param preprocessing: List of preprocessing steps to run. Only 'target_encoding' is currently supported.
:param monotone_constraints: Dict representing monotonic constraints.
Use +1 to enforce an increasing constraint and -1 to specify a decreasing constraint.
:param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation predictions.
This needs to be set to ``True`` if running the same AutoML object for repeated runs because CV predictions are required to build
additional Stacked Ensemble models in AutoML.
Defaults to ``False``.
:param keep_cross_validation_models: Whether to keep the cross-validated models.
Keeping cross-validation models may consume significantly more memory in the H2O cluster.
Defaults to ``False``.
:param keep_cross_validation_fold_assignment: Whether to keep fold assignments in the models.
Deleting them will save memory in the H2O cluster.
Defaults to ``False``.
:param sort_metric: Metric to sort the leaderboard by.
For binomial classification choose between ``"auc"``, ``"aucpr"``, ``"logloss"``, ``"mean_per_class_error"``, ``"rmse"``, ``"mse"``.
For multinomial classification choose between ``"mean_per_class_error"``, ``"logloss"``, ``"rmse"``, ``"mse"``.
For regression choose between ``"deviance"``, ``"rmse"``, ``"mse"``, ``"mae"``, ``"rmlse"``.
Defaults to ``"AUTO"`` (This translates to ``"auc"`` for binomial classification, ``"mean_per_class_error"`` for multinomial classification, ``"deviance"`` for regression).
:param export_checkpoints_dir: Path to a directory where every model will be stored in binary form.
:param verbosity: Verbosity of the backend messages printed during training.
Available options are None (live log disabled), ``"debug"``, ``"info"`` or ``"warn"``.
Defaults to ``"warn"``.
"""
# early validate kwargs, extracting hidden parameters:
algo_parameters = {}
for k in kwargs:
if k == 'algo_parameters':
algo_parameters = kwargs[k] or {}
else:
raise TypeError("H2OAutoML got an unexpected keyword argument '%s'" % k)
# Check if H2O jar contains AutoML
try:
h2o.api("GET /3/Metadata/schemas/AutoMLV99")
except h2o.exceptions.H2OResponseError as e:
print(e)
print("*******************************************************************\n" \
"*Please verify that your H2O jar has the proper AutoML extensions.*\n" \
"*******************************************************************\n" \
"\nVerbose Error Message:")
self._job = None
self._leader_id = None
self._leaderboard = None
self._verbosity = verbosity
self._event_log = None
self._training_info = None
self._state_json = None
self._build_resp = None # contains all the actual parameters used on backend
self.__frozen = False
self.__input = dict() # contains all the input params as entered by the user
# Make bare minimum params containers
self.build_control = dict()
self.build_models = dict()
self.input_spec = dict()
self.project_name = project_name
self.nfolds = nfolds
self.balance_classes = balance_classes
self.class_sampling_factors = class_sampling_factors
self.max_after_balance_size = max_after_balance_size
self.keep_cross_validation_models = keep_cross_validation_models
self.keep_cross_validation_fold_assignment = keep_cross_validation_fold_assignment
self.keep_cross_validation_predictions = keep_cross_validation_predictions
self.export_checkpoints_dir = export_checkpoints_dir
self.max_runtime_secs = max_runtime_secs
self.max_runtime_secs_per_model = max_runtime_secs_per_model
self.max_models = max_models
self.stopping_metric = stopping_metric
self.stopping_tolerance = stopping_tolerance
self.stopping_rounds = stopping_rounds
self.seed = seed
self.exclude_algos = exclude_algos
self.include_algos = include_algos
self.exploitation_ratio = exploitation_ratio
self.modeling_plan = modeling_plan
self.preprocessing = preprocessing
if monotone_constraints is not None:
algo_parameters['monotone_constraints'] = monotone_constraints
self._algo_parameters = algo_parameters
self.sort_metric = sort_metric
#---------------------------------------------------------------------------
# AutoML params
#---------------------------------------------------------------------------
def __validate_not_set(self, val, prop=None, message=None):
assert val is None or getattr(self, prop, None) is None, message
return val
def __validate_project_name(self, project_name):
check_id(project_name, "H2OAutoML")
return project_name
def __validate_nfolds(self, nfolds):
assert nfolds == 0 or nfolds > 1, "nfolds set to %s; use nfolds >=2 if you want cross-validated metrics and Stacked Ensembles or use nfolds = 0 to disable." % nfolds
return nfolds
def __validate_modeling_plan(self, modeling_plan):
if modeling_plan is None:
return None
supported_aliases = ['all', 'defaults', 'grids']
def assert_is_step_def(sd):
assert 'name' in sd, "each definition must have a 'name' key"
assert 0 < len(sd) < 3, "each definition must have only 1 or 2 keys: name, name+alias or name+steps"
assert len(sd) == 1 or 'alias' in sd or 'steps' in sd, "steps definitions support only the following keys: name, alias, steps"
assert 'alias' not in sd or sd['alias'] in supported_aliases, "alias must be one of %s" % supported_aliases
assert 'steps' not in sd or (is_type(sd['steps'], list) and all(assert_is_step(s) for s in sd['steps']))
def assert_is_step(s):
assert is_type(s, dict), "each step must be a dict with an 'id' key and an optional 'weight' key"
assert 'id' in s, "each step must have an 'id' key"
assert len(s) == 1 or ('weight' in s and is_type(s['weight'], int)), "weight must be an integer"
return True
plan = []
for step_def in modeling_plan:
assert_is_type(step_def, dict, tuple, str)
if is_type(step_def, dict):
assert_is_step_def(step_def)
plan.append(step_def)
elif is_type(step_def, str):
plan.append(dict(name=step_def))
else:
assert 0 < len(step_def) < 3
assert_is_type(step_def[0], str)
name = step_def[0]
if len(step_def) == 1:
plan.append(dict(name=name))
else:
assert_is_type(step_def[1], str, list)
ids = step_def[1]
if is_type(ids, str):
assert_is_type(ids, *supported_aliases)
plan.append(dict(name=name, alias=ids))
else:
plan.append(dict(name=name, steps=[dict(id=i) for i in ids]))
return plan
def __validate_preprocessing(self, preprocessing):
if preprocessing is None:
return
assert all(p in ['target_encoding'] for p in preprocessing)
return [dict(type=p.replace("_", "")) for p in preprocessing]
def __validate_monotone_constraints(self, monotone_constraints):
if monotone_constraints is None:
self._algo_parameters.pop('monotone_constraints', None)
else:
self._algo_parameters['monotone_constraints'] = monotone_constraints
return self.__validate_algo_parameters(self._algo_parameters)
def __validate_algo_parameters(self, algo_parameters):
if algo_parameters is None:
return None
algo_parameters_json = []
for k, v in algo_parameters.items():
scope, __, name = k.partition('__')
if len(name) == 0:
name, scope = scope, 'any'
value = [dict(key=k, value=v) for k, v in v.items()] if isinstance(v, dict) else v # we can't use stringify_dict here as this will be converted into a JSON string
algo_parameters_json.append(dict(scope=scope, name=name, value=value))
return algo_parameters_json
def __validate_frame(self, fr, name=None, required=False):
return H2OFrame._validate(fr, name, required=required)
_extract_params_doc(getdoc(__init__))
project_name = _aml_property('build_control.project_name', types=(None, str), freezable=True,
validate_fn=__validate_project_name)
nfolds = _aml_property('build_control.nfolds', types=(int,), freezable=True,
validate_fn=__validate_nfolds)
balance_classes = _aml_property('build_control.balance_classes', types=(bool,), freezable=True)
class_sampling_factors = _aml_property('build_control.class_sampling_factors', types=(None, [numeric]), freezable=True)
max_after_balance_size = _aml_property('build_control.max_after_balance_size', types=(None, numeric), freezable=True)
keep_cross_validation_models = _aml_property('build_control.keep_cross_validation_models', types=(bool,), freezable=True)
keep_cross_validation_fold_assignment = _aml_property('build_control.keep_cross_validation_fold_assignment', types=(bool,), freezable=True)
keep_cross_validation_predictions = _aml_property('build_control.keep_cross_validation_predictions', types=(bool,), freezable=True)
export_checkpoints_dir = _aml_property('build_control.export_checkpoints_dir', types=(None, str), freezable=True)
max_runtime_secs = _aml_property('build_control.stopping_criteria.max_runtime_secs', types=(None, int), freezable=True)
max_runtime_secs_per_model = _aml_property('build_control.stopping_criteria.max_runtime_secs_per_model', types=(None, int), freezable=True)
max_models = _aml_property('build_control.stopping_criteria.max_models', types=(None, int), freezable=True)
stopping_metric = _aml_property('build_control.stopping_criteria.stopping_metric', types=(None, str), freezable=True)
stopping_tolerance = _aml_property('build_control.stopping_criteria.stopping_tolerance', types=(None, numeric), freezable=True)
stopping_rounds = _aml_property('build_control.stopping_criteria.stopping_rounds', types=(None, int), freezable=True)
seed = _aml_property('build_control.stopping_criteria.seed', types=(None, int), freezable=True)
exclude_algos = _aml_property('build_models.exclude_algos', types=(None, [str]), freezable=True,
validate_fn=ft.partial(__validate_not_set, prop='include_algos',
message="Use either `exclude_algos` or `include_algos`, not both."))
include_algos = _aml_property('build_models.include_algos', types=(None, [str]), freezable=True,
validate_fn=ft.partial(__validate_not_set, prop='exclude_algos',
message="Use either `exclude_algos` or `include_algos`, not both."))
exploitation_ratio = _aml_property('build_models.exploitation_ratio', types=(None, numeric), freezable=True)
modeling_plan = _aml_property('build_models.modeling_plan', types=(None, list), freezable=True,
validate_fn=__validate_modeling_plan)
preprocessing = _aml_property('build_models.preprocessing', types=(None, [str]), freezable=True,
validate_fn=__validate_preprocessing)
monotone_constraints = _aml_property('build_models.algo_parameters', name='monotone_constraints', types=(None, dict), freezable=True,
validate_fn=__validate_monotone_constraints)
_algo_parameters = _aml_property('build_models.algo_parameters', types=(None, dict), freezable=True,
validate_fn=__validate_algo_parameters)
sort_metric = _aml_property('input_spec.sort_metric', types=(None, str))
fold_column = _aml_property('input_spec.fold_column', types=(None, int, str))
weights_column = _aml_property('input_spec.weights_column', types=(None, int, str))
training_frame = _aml_property('input_spec.training_frame', set_input=False,
validate_fn=ft.partial(__validate_frame, name='training_frame', required=True))
validation_frame = _aml_property('input_spec.validation_frame', set_input=False,
validate_fn=ft.partial(__validate_frame, name='validation_frame'))
leaderboard_frame = _aml_property('input_spec.leaderboard_frame', set_input=False,
validate_fn=ft.partial(__validate_frame, name='leaderboard_frame'))
blending_frame = _aml_property('input_spec.blending_frame', set_input=False,
validate_fn=ft.partial(__validate_frame, name='blending_frame'))
response_column = _aml_property('input_spec.response_column', types=(str,))
#---------------------------------------------------------------------------
# Basic properties
#---------------------------------------------------------------------------
@property
def key(self):
return self._job.dest_key if self._job else self.project_name
@property
def leader(self):
return None if self._leader_id is None else h2o.get_model(self._leader_id)
@property
def leaderboard(self):
return H2OFrame([]) if self._leaderboard is None else self._leaderboard
@property
def event_log(self):
return H2OFrame([]) if self._event_log is None else self._event_log
@property
def training_info(self):
return dict() if self._training_info is None else self._training_info
@property
def modeling_steps(self):
"""
expose the modeling steps effectively used by the AutoML run.
This executed plan can be directly reinjected as the `modeling_plan` property of a new AutoML instance
to improve reproducibility across AutoML versions.
:return: a list of dictionaries representing the effective modeling plan.
"""
# removing alias key to be able to reinject result to a new AutoML instance
return list(map(lambda sdef: dict(name=sdef['name'], steps=sdef['steps']), self._state_json['modeling_steps']))
#---------------------------------------------------------------------------
# Training AutoML
#---------------------------------------------------------------------------
def train(self, x=None, y=None, training_frame=None, fold_column=None,
weights_column=None, validation_frame=None, leaderboard_frame=None, blending_frame=None):
"""
Begins an AutoML task, a background task that automatically builds a number of models
with various algorithms and tracks their performance in a leaderboard. At any point
in the process you may use H2O's performance or prediction functions on the resulting
models.
:param x: A list of column names or indices indicating the predictor columns.
:param y: An index or a column name indicating the response column.
:param fold_column: The name or index of the column in training_frame that holds per-row fold
assignments.
:param weights_column: The name or index of the column in training_frame that holds per-row weights.
:param training_frame: The H2OFrame having the columns indicated by x and y (as well as any
additional columns specified by fold_column or weights_column).
:param validation_frame: H2OFrame with validation data. This argument is ignored unless the user sets
nfolds = 0. If cross-validation is turned off, then a validation frame can be specified and used
for early stopping of individual models and early stopping of the grid searches. By default and
when nfolds > 1, cross-validation metrics will be used for early stopping and thus validation_frame will be ignored.
:param leaderboard_frame: H2OFrame with test data for scoring the leaderboard. This is optional and
if this is set to None (the default), then cross-validation metrics will be used to generate the leaderboard
rankings instead.
:param blending_frame: H2OFrame used to train the the metalearning algorithm in Stacked Ensembles (instead of relying on cross-validated predicted values).
This is optional, but when provided, it is also recommended to disable cross validation
by setting `nfolds=0` and to provide a leaderboard frame for scoring purposes.
:returns: An H2OAutoML object.
:examples:
>>> # Set up an H2OAutoML object
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> # Launch an AutoML run
>>> aml.train(y=y, training_frame=train)
"""
# Minimal required arguments are training_frame and y (response)
self.training_frame = training_frame
ncols = self.training_frame.ncols
names = self.training_frame.names
if y is None and self.response_column is None:
raise H2OValueError('The response column (y) is not set; please set it to the name of the column that you are trying to predict in your data.')
elif y is not None:
assert_is_type(y, int, str)
if is_type(y, int):
if not (-ncols <= y < ncols):
raise H2OValueError("Column %d does not exist in the training frame" % y)
y = names[y]
else:
if y not in names:
raise H2OValueError("Column %s does not exist in the training frame" % y)
self.response_column = y
self.fold_column = fold_column
self.weights_column = weights_column
self.validation_frame = validation_frame
self.leaderboard_frame = leaderboard_frame
self.blending_frame = blending_frame
if x is not None:
assert_is_type(x, list)
xset = set()
if is_type(x, int, str): x = [x]
for xi in x:
if is_type(xi, int):
if not (-ncols <= xi < ncols):
raise H2OValueError("Column %d does not exist in the training frame" % xi)
xset.add(names[xi])
else:
if xi not in names:
raise H2OValueError("Column %s not in the training frame" % xi)
xset.add(xi)
ignored_columns = set(names) - xset
for col in [y, fold_column, weights_column]:
if col is not None and col in ignored_columns:
ignored_columns.remove(col)
if ignored_columns is not None:
self.input_spec['ignored_columns'] = list(ignored_columns)
def clean_params(params):
return ({k: clean_params(v) for k, v in params.items() if v is not None} if isinstance(params, dict)
else H2OEstimator._keyify(params))
automl_build_params = clean_params(dict(
build_control=self.build_control,
build_models=self.build_models,
input_spec=self.input_spec,
))
resp = self._build_resp = h2o.api('POST /99/AutoMLBuilder', json=automl_build_params)
if 'job' not in resp:
raise H2OResponseError("Backend failed to build the AutoML job: {}".format(resp))
if not self.project_name:
self.project_name = resp['build_control']['project_name']
self.__frozen = True
self._job = H2OJob(resp['job'], "AutoML")
poll_updates = ft.partial(self._poll_training_updates, verbosity=self._verbosity, state={})
try:
self._job.poll(poll_updates=poll_updates)
finally:
poll_updates(self._job, 1)
self._fetch()
return self.leader
#---------------------------------------------------------------------------
# Predict with AutoML
#---------------------------------------------------------------------------
def predict(self, test_data):
leader = self.leader
if leader is None:
self._fetch()
leader = self.leader
if leader is not None:
return leader.predict(test_data)
print("No model built yet...")
#-------------------------------------------------------------------------------------------------------------------
# Overrides
#-------------------------------------------------------------------------------------------------------------------
def detach(self):
self.__frozen = False
self.project_name = None
h2o.remove(self.leaderboard)
h2o.remove(self.event_log)
#-------------------------------------------------------------------------------------------------------------------
# Private
#-------------------------------------------------------------------------------------------------------------------
def _fetch(self):
state = H2OAutoML._fetch_state(self.key)
self._leader_id = state['leader_id']
self._leaderboard = state['leaderboard']
self._event_log = el = state['event_log']
self._training_info = { r[0]: r[1]
for r in el[el['name'] != '', ['name', 'value']]
.as_data_frame(use_pandas=False, header=False)
}
self._state_json = state['json']
return self._leader_id is not None
def _poll_training_updates(self, job, bar_progress=0, verbosity=None, state=None):
"""
the callback function used to print verbose info when polling AutoML job.
"""
levels = ['Debug', 'Info', 'Warn']
if verbosity is None or verbosity.capitalize() not in levels:
return
levels = levels[levels.index(verbosity.capitalize()):]
try:
if job.progress > state.get('last_job_progress', 0):
# print("\nbar_progress={}, job_progress={}".format(bar_progress, job.progress))
events = H2OAutoML._fetch_state(job.dest_key, properties=['event_log'])['event_log']
events = events[events['level'].isin(levels), :]
last_nrows = state.get('last_events_nrows', 0)
if events.nrows > last_nrows:
fr = events[last_nrows:, ['timestamp', 'message']].as_data_frame(use_pandas=False, header=False)
print('')
for r in fr:
print("{}: {}".format(r[0], r[1]))
print('')
state['last_events_nrows'] = events.nrows
state['last_job_progress'] = job.progress
except Exception as e:
print("Failed polling AutoML progress log: {}".format(e))
@staticmethod
def _fetch_leaderboard(aml_id, extensions=None):
assert_is_type(extensions, None, str, [str])
extensions = ([] if extensions is None
else [extensions] if is_type(extensions, str)
else extensions)
resp = h2o.api("GET /99/Leaderboards/%s" % aml_id, data=dict(extensions=extensions))
dest_key = resp['project_name'].split('@', 1)[0]+"_custom_leaderboard"
lb = H2OAutoML._fetch_table(resp['table'], key=dest_key, progress_bar=False)
return h2o.assign(lb[1:], dest_key)
@staticmethod
def _fetch_table(table, key=None, progress_bar=True):
try:
# Intentionally mask the progress bar here since showing multiple progress bars is confusing to users.
# If any failure happens, revert back to user's original setting for progress and display the error message.
ori_progress_state = H2OJob.__PROGRESS_BAR__
H2OJob.__PROGRESS_BAR__ = progress_bar
# Parse leaderboard H2OTwoDimTable & return as an H2OFrame
return h2o.H2OFrame(table.cell_values, destination_frame=key, column_names=table.col_header, column_types=table.col_types)
finally:
H2OJob.__PROGRESS_BAR__ = ori_progress_state
@staticmethod
def _fetch_state(aml_id, properties=None):
state_json = h2o.api("GET /99/AutoML/%s" % aml_id)
project_name = state_json["project_name"]
if project_name is None:
raise H2OValueError("No AutoML instance with id {}.".format(aml_id))
leaderboard_list = [key["name"] for key in state_json['leaderboard']['models']]
leader_id = leaderboard_list[0] if (leaderboard_list is not None and len(leaderboard_list) > 0) else None
should_fetch = lambda prop: properties is None or prop in properties
leader = None
if should_fetch('leader'):
leader = h2o.get_model(leader_id) if leader_id is not None else None
leaderboard = None
if should_fetch('leaderboard'):
leaderboard = H2OAutoML._fetch_table(state_json['leaderboard_table'], key=project_name+"_leaderboard", progress_bar=False)
leaderboard = h2o.assign(leaderboard[1:], project_name+"_leaderboard") # removing index and reassign id to ensure persistence on backend
event_log = None
if should_fetch('event_log'):
event_log = H2OAutoML._fetch_table(state_json['event_log_table'], key=project_name+"_eventlog", progress_bar=False)
event_log = h2o.assign(event_log[1:], project_name+"_eventlog") # removing index and reassign id to ensure persistence on backend
return dict(
project_name=project_name,
json=state_json,
leader_id=leader_id,
leader=leader,
leaderboard=leaderboard,
event_log=event_log,
)
def get_automl(project_name):
"""
Retrieve information about an AutoML instance.
:param str project_name: A string indicating the project_name of the automl instance to retrieve.
:returns: A dictionary containing the project_name, leader model, leaderboard, event_log.
"""
state = H2OAutoML._fetch_state(project_name)
return H2OAutoMLOutput(state)
def get_leaderboard(aml, extra_columns=None):
"""
Retrieve the leaderboard from the AutoML instance.
Contrary to the default leaderboard attached to the automl instance, this one can return columns other than the metrics.
:param H2OAutoML aml: the instance for which to return the leaderboard.
:param extra_columns: a string or a list of string specifying which optional columns should be added to the leaderboard. Defaults to None.
Currently supported extensions are:
- 'ALL': adds all columns below.
- 'training_time_ms': column providing the training time of each model in milliseconds (doesn't include the training of cross validation models).
- 'predict_time_per_row_ms`: column providing the average prediction time by the model for a single row.
- 'algo': column providing the algorithm name for each model.
:return: An H2OFrame representing the leaderboard.
:examples:
>>> aml = H2OAutoML(max_runtime_secs=30)
>>> aml.train(y=y, training_frame=train)
>>> lb_all = h2o.automl.get_leaderboard(aml, 'ALL')
>>> lb_custom = h2o.automl.get_leaderboard(aml, ['predict_time_per_row_ms', 'training_time_ms'])
>>> lb_custom_sorted = lb_custom.sort(by='predict_time_per_row_ms')
"""
assert_is_type(aml, H2OAutoML, H2OAutoMLOutput)
return H2OAutoML._fetch_leaderboard(aml.key, extra_columns)
| apache-2.0 | 7,905,688,082,203,474,000 | 52.727273 | 184 | 0.602446 | false | 4.130241 | false | false | false |
hirunatan/anillo | anillo/middlewares/json.py | 1 | 3579 | import json
import functools
from cgi import parse_header
def wrap_json(func=None, *, encoder=json.JSONEncoder):
"""
A middleware that parses the body of json requests and
encodes the json responses.
NOTE: this middleware exists just for backward compatibility,
but it has some limitations in terms of response body encoding
because it only accept list or dictionary outputs and json
specification allows store other values also.
It is recommended use the `wrap_json_body` and wrap_json_response`
instead of this.
"""
if func is None:
return functools.partial(wrap_json, encoder=encoder)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/json":
request.body = json.loads(request.body.decode("utf-8")) if request.body else None
response = func(request, *args, **kwargs)
if "Content-Type" in response.headers and response.headers['Content-Type'] is not None:
ctype, pdict = parse_header(response.headers.get('Content-Type', ''))
if ctype == "application/json" and (isinstance(response.body, dict) or isinstance(response.body, list)):
response.body = json.dumps(response.body, cls=encoder)
return response
return wrapper
def wrap_json_body(func=None, *, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
add it to the request under the `body` attribute (replacing
the previous value). Can preserve the original value in
a new attribute `raw_body` if you give preserve_raw_body=True.
"""
if func is None:
return functools.partial(wrap_json_body, preserve_raw_body=preserve_raw_body)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if preserve_raw_body:
request.raw_body = request.body
if ctype == "application/json":
request.body = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_params(func):
"""
A middleware that parses the body of json requests and
add it to the request under the `params` key.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/json":
request.params = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_response(func=None, *, encoder=json.JSONEncoder):
"""
A middleware that encodes in json the response body in case
of that the "Content-Type" header is "application/json".
This middlware accepts and optional `encoder` parameter, that
allow to the user specify its own json encoder class.
"""
if func is None:
return functools.partial(wrap_json_response, encoder=encoder)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if not response.body:
return response
ctype, pdict = parse_header(response.headers.get('Content-Type', ''))
if ctype == "application/json":
response.body = json.dumps(response.body, cls=encoder)
return response
return wrapper
| bsd-2-clause | -8,240,903,876,236,988 | 35.896907 | 116 | 0.663593 | false | 4.10907 | false | false | false |
charettes/django-mutant | mutant/apps.py | 1 | 2824 | from __future__ import unicode_literals
from django.apps import AppConfig
from django.db import models
from django.utils.module_loading import import_string
from . import settings
class MutantConfig(AppConfig):
name = 'mutant'
def ready(self):
self.state_handler = import_string(settings.STATE_HANDLER)()
from . import management
ModelDefinition = self.get_model('ModelDefinition')
models.signals.post_save.connect(
management.model_definition_post_save,
sender=ModelDefinition,
dispatch_uid='mutant.management.model_definition_post_save',
)
models.signals.pre_delete.connect(
management.model_definition_pre_delete,
sender=ModelDefinition,
dispatch_uid='mutant.management.model_definition_pre_delete',
)
models.signals.post_delete.connect(
management.model_definition_post_delete,
sender=ModelDefinition,
dispatch_uid='mutant.management.model_definition_post_delete',
)
BaseDefinition = self.get_model('BaseDefinition')
models.signals.post_save.connect(
management.base_definition_post_save,
sender=BaseDefinition,
dispatch_uid='mutant.management.base_definition_post_save',
)
models.signals.pre_delete.connect(
management.base_definition_pre_delete,
sender=BaseDefinition,
dispatch_uid='mutant.management.base_definition_pre_delete',
)
models.signals.post_delete.connect(
management.base_definition_post_delete,
sender=BaseDefinition,
dispatch_uid='mutant.management.base_definition_post_delete',
)
UniqueTogetherDefinition = self.get_model('UniqueTogetherDefinition')
models.signals.m2m_changed.connect(
management.unique_together_field_defs_changed,
sender=UniqueTogetherDefinition.field_defs.through,
dispatch_uid='mutant.management.unique_together_field_defs_changed',
)
FieldDefinition = self.get_model('FieldDefinition')
models.signals.post_save.connect(
management.raw_field_definition_proxy_post_save,
sender=FieldDefinition,
dispatch_uid='mutant.management.raw_field_definition_proxy_post_save',
)
models.signals.pre_delete.connect(
management.field_definition_pre_delete,
sender=FieldDefinition,
dispatch_uid='mutant.management.field_definition_pre_delete',
)
models.signals.post_delete.connect(
management.field_definition_post_delete,
sender=FieldDefinition,
dispatch_uid='mutant.management.field_definition_post_delete',
)
| mit | 4,812,761,891,276,211,000 | 37.162162 | 82 | 0.65262 | false | 4.36476 | false | false | false |
rafaelvasco/SpriteMator | src/model/application_settings.py | 1 | 1601 | from PyQt5.QtCore import QSettings
class SettingData(object):
def __init__(self, name, value, write_to_disk=False):
self._name = name
self._value = value
self._writeToDisk = write_to_disk
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
@property
def write_to_disk(self):
return self._writeToDisk
@write_to_disk.setter
def write_to_disk(self, value):
self._writeToDisk = value
class ApplicationSettings(object):
def __init__(self):
self._settings = {}
self._settingsClient = None
QSettings.setPath(QSettings.IniFormat, QSettings.UserScope, "settings")
QSettings.setDefaultFormat(QSettings.IniFormat)
@property
def settings_map(self):
return self._settings
def load_settings(self):
self._settingsClient = QSettings()
# SettingData: (Name, Value, WriteToDisk)
self._settings["last_folder_path"] = SettingData("last_folder_path",
self._settingsClient.value
("last_folder_path", None))
def write_settings(self):
for _, setting in self._settings.items():
if setting.write_to_disk and setting.value is not None:
self._settingsClient.setValue(setting.name, setting.value)
| apache-2.0 | -4,443,230,125,401,103,000 | 22.895522 | 84 | 0.580262 | false | 4.23545 | false | false | false |
jplitza/kino | kino/controller.py | 1 | 5284 | from flask import render_template, make_response, request, redirect, url_for, Response, g, jsonify
from datetime import datetime
from operator import attrgetter
import json
from . import app, db
from .model import *
@app.before_request
def before_request():
"""Ensures that user is authenticated and fills some global variables"""
try:
user = request.authorization.username
if user:
g.user = User.query.filter_by(name=user).first()
if not g.user:
g.user = User(name=user)
db.session.add(g.user)
db.session.commit()
else:
return login()
except AttributeError:
return login()
g.events = Event.query \
.filter_by(canceled=False) \
.filter(Event.date >= datetime.now()) \
.order_by(Event.date.asc())
g.now = datetime.now()
@app.route('/login')
def login():
"""Sends a 401 response that enables basic auth"""
return Response('You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
@app.route('/')
def index():
return render_template('index.html')
@app.route('/event/<int:id>')
def event(id):
"""Shows the voted movies for an event"""
event = Event.query.filter_by(id=id).first()
if not event:
return make_response(
render_template('error.html', errormsg='The event you requested was not found.'),
404
)
event.movies = {}
voted_movies = [vote.movie for vote in Vote.query.filter_by(user=g.user, event=event)]
for vote in event.votes:
if event.movies.has_key(vote.movie.id):
event.movies[vote.movie.id].count += 1
else:
event.movies[vote.movie.id] = vote.movie
event.movies[vote.movie.id].voted = vote.movie in voted_movies
event.movies[vote.movie.id].count = 1
event.movies = sorted(event.movies.values(), key=attrgetter('count'), reverse=True)
event.voted = len(voted_movies) > 0
return render_template('event.html', event=event)
@app.route('/find_movie')
def find_film():
"""Searches for movies using a partial movie name"""
movies = Movie.query.filter(Movie.name.like('%%%s%%' % request.args['term'])).all()
return Response(
json.dumps([{'id': movie.id, 'value': movie.name + ' (' + movie.year + ')'} for movie in movies]),
200,
None,
'application/json'
)
@app.route('/movie/<int:id>')
def movie_info(id):
"""Gives detailed information about a movie"""
movie = Movie.query.filter_by(id=id).first()
if not movie:
return jsonify({})
return jsonify(movie.serialize)
@app.route('/movie/next_winning')
def next_winning_movie_info():
"""Gives detailed information about the currently winning movie of the next event"""
# to get the currently running event if some event is running, we ask for
# the next event after today's mitdnight
event = Event.query \
.filter_by(canceled=False) \
.filter(Event.date >= datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)) \
.order_by(Event.date.asc()) \
.first()
if not event:
return jsonify({})
if not event.votes:
return jsonify({})
event.movies = {}
for vote in event.votes:
if vote.movie.id in event.movies:
event.movies[vote.movie.id].count += 1
else:
event.movies[vote.movie.id] = vote.movie
event.movies[vote.movie.id].count = 1
event.movies = sorted(event.movies.values(), key=attrgetter('count'), reverse=True)
return movie_info(event.movies[0].id)
@app.route('/vote', methods=['POST'])
def vote():
"""Votes for a set of movies for an event. Can update previous votes."""
event_id = request.form['event_id']
event = Event.query.filter_by(id=event_id).first()
if not event:
return make_response(
render_template('error.html', errormsg='The event you voted for doesn\'t exist!'),
404
)
if event.date < datetime.now():
return make_response(
render_template('error.html', errormsg='Voting for an event in the past isn\'t possible!'),
403
)
if event.canceled:
return make_response(
render_template('error.html', errormsg='Voting for a canceled event isn\'t possible!'),
403
)
votes = Vote.query.filter_by(user=g.user, event=event)
voted_movies = dict((vote.movie.id, vote) for vote in votes)
for movie_id in request.form.getlist('movies[]'):
movie = Movie.query.filter_by(id=movie_id)
if movie:
if movie_id in voted_movies.keys():
# change nothing about this vote and remove it from the list
votes.remove(voted_movies[movie_id])
else:
vote = Vote(user=g.user, event=event, movie_id=movie_id)
db.session.add(vote)
# the votes remaining in the list are no longer voted, so remove them
for vote in votes:
db.session.delete(vote)
db.session.commit()
return redirect(url_for('event', id=event_id))
| mit | 9,070,815,388,826,939,000 | 35.951049 | 106 | 0.604845 | false | 3.739561 | false | false | false |
patriczek/faf | src/pyfaf/storage/externalfaf.py | 2 | 1039 | # Copyright (C) 2014 ABRT Team
# Copyright (C) 2014 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
from . import Column
from . import GenericTable
from . import Integer
from . import String
class ExternalFafInstance(GenericTable):
__tablename__ = "externalfafinstances"
id = Column(Integer, primary_key=True)
name = Column(String(256), nullable=False, index=True)
baseurl = Column(String(1024), nullable=False)
| gpl-3.0 | 3,819,824,388,280,074,000 | 33.633333 | 70 | 0.739172 | false | 3.764493 | false | false | false |
superchilli/webapp | venv/lib/python2.7/site-packages/httpie/downloads.py | 4 | 13054 | # coding=utf-8
"""
Download mode implementation.
"""
from __future__ import division
import os
import re
import sys
import mimetypes
import threading
from time import sleep, time
from mailbox import Message
from .output import RawStream
from .models import HTTPResponse
from .utils import humanize_bytes
from .compat import urlsplit
PARTIAL_CONTENT = 206
CLEAR_LINE = '\r\033[K'
PROGRESS = (
'{percentage: 6.2f} %'
' {downloaded: >10}'
' {speed: >10}/s'
' {eta: >8} ETA'
)
PROGRESS_NO_CONTENT_LENGTH = '{downloaded: >10} {speed: >10}/s'
SUMMARY = 'Done. {downloaded} in {time:0.5f}s ({speed}/s)\n'
SPINNER = '|/-\\'
class ContentRangeError(ValueError):
pass
def parse_content_range(content_range, resumed_from):
"""
Parse and validate Content-Range header.
<http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html>
:param content_range: the value of a Content-Range response header
eg. "bytes 21010-47021/47022"
:param resumed_from: first byte pos. from the Range request header
:return: total size of the response body when fully downloaded.
"""
if content_range is None:
raise ContentRangeError('Missing Content-Range')
pattern = (
'^bytes (?P<first_byte_pos>\d+)-(?P<last_byte_pos>\d+)'
'/(\*|(?P<instance_length>\d+))$'
)
match = re.match(pattern, content_range)
if not match:
raise ContentRangeError(
'Invalid Content-Range format %r' % content_range)
content_range_dict = match.groupdict()
first_byte_pos = int(content_range_dict['first_byte_pos'])
last_byte_pos = int(content_range_dict['last_byte_pos'])
instance_length = (
int(content_range_dict['instance_length'])
if content_range_dict['instance_length']
else None
)
# "A byte-content-range-spec with a byte-range-resp-spec whose
# last- byte-pos value is less than its first-byte-pos value,
# or whose instance-length value is less than or equal to its
# last-byte-pos value, is invalid. The recipient of an invalid
# byte-content-range- spec MUST ignore it and any content
# transferred along with it."
if (first_byte_pos >= last_byte_pos
or (instance_length is not None
and instance_length <= last_byte_pos)):
raise ContentRangeError(
'Invalid Content-Range returned: %r' % content_range)
if (first_byte_pos != resumed_from
or (instance_length is not None
and last_byte_pos + 1 != instance_length)):
# Not what we asked for.
raise ContentRangeError(
'Unexpected Content-Range returned (%r)'
' for the requested Range ("bytes=%d-")'
% (content_range, resumed_from)
)
return last_byte_pos + 1
def filename_from_content_disposition(content_disposition):
"""
Extract and validate filename from a Content-Disposition header.
:param content_disposition: Content-Disposition value
:return: the filename if present and valid, otherwise `None`
"""
# attachment; filename=jkbr-httpie-0.4.1-20-g40bd8f6.tar.gz
msg = Message('Content-Disposition: %s' % content_disposition)
filename = msg.get_filename()
if filename:
# Basic sanitation.
filename = os.path.basename(filename).lstrip('.').strip()
if filename:
return filename
def filename_from_url(url, content_type):
fn = urlsplit(url).path.rstrip('/')
fn = os.path.basename(fn) if fn else 'index'
if '.' not in fn and content_type:
content_type = content_type.split(';')[0]
if content_type == 'text/plain':
# mimetypes returns '.ksh'
ext = '.txt'
else:
ext = mimetypes.guess_extension(content_type)
if ext == '.htm': # Python 3
ext = '.html'
if ext:
fn += ext
return fn
def get_unique_filename(fn, exists=os.path.exists):
attempt = 0
while True:
suffix = '-' + str(attempt) if attempt > 0 else ''
if not exists(fn + suffix):
return fn + suffix
attempt += 1
class Download(object):
def __init__(self, output_file=None,
resume=False, progress_file=sys.stderr):
"""
:param resume: Should the download resume if partial download
already exists.
:type resume: bool
:param output_file: The file to store response body in. If not
provided, it will be guessed from the response.
:type output_file: file
:param progress_file: Where to report download progress.
:type progress_file: file
"""
self._output_file = output_file
self._resume = resume
self._resumed_from = 0
self.finished = False
self.status = Status()
self._progress_reporter = ProgressReporterThread(
status=self.status,
output=progress_file
)
def pre_request(self, request_headers):
"""Called just before the HTTP request is sent.
Might alter `request_headers`.
:type request_headers: dict
"""
# Disable content encoding so that we can resume, etc.
request_headers['Accept-Encoding'] = None
if self._resume:
bytes_have = os.path.getsize(self._output_file.name)
if bytes_have:
# Set ``Range`` header to resume the download
# TODO: Use "If-Range: mtime" to make sure it's fresh?
request_headers['Range'] = 'bytes=%d-' % bytes_have
self._resumed_from = bytes_have
def start(self, response):
"""
Initiate and return a stream for `response` body with progress
callback attached. Can be called only once.
:param response: Initiated response object with headers already fetched
:type response: requests.models.Response
:return: RawStream, output_file
"""
assert not self.status.time_started
try:
total_size = int(response.headers['Content-Length'])
except (KeyError, ValueError, TypeError):
total_size = None
if self._output_file:
if self._resume and response.status_code == PARTIAL_CONTENT:
total_size = parse_content_range(
response.headers.get('Content-Range'),
self._resumed_from
)
else:
self._resumed_from = 0
try:
self._output_file.seek(0)
self._output_file.truncate()
except IOError:
pass # stdout
else:
# TODO: Should the filename be taken from response.history[0].url?
# Output file not specified. Pick a name that doesn't exist yet.
fn = None
if 'Content-Disposition' in response.headers:
fn = filename_from_content_disposition(
response.headers['Content-Disposition'])
if not fn:
fn = filename_from_url(
url=response.url,
content_type=response.headers.get('Content-Type'),
)
self._output_file = open(get_unique_filename(fn), mode='a+b')
self.status.started(
resumed_from=self._resumed_from,
total_size=total_size
)
stream = RawStream(
msg=HTTPResponse(response),
with_headers=False,
with_body=True,
on_body_chunk_downloaded=self.chunk_downloaded,
chunk_size=1024 * 8
)
self._progress_reporter.output.write(
'Downloading %sto "%s"\n' % (
(humanize_bytes(total_size) + ' '
if total_size is not None
else ''),
self._output_file.name
)
)
self._progress_reporter.start()
return stream, self._output_file
def finish(self):
assert not self.finished
self.finished = True
self.status.finished()
def failed(self):
self._progress_reporter.stop()
@property
def interrupted(self):
return (
self.finished
and self.status.total_size
and self.status.total_size != self.status.downloaded
)
def chunk_downloaded(self, chunk):
"""
A download progress callback.
:param chunk: A chunk of response body data that has just
been downloaded and written to the output.
:type chunk: bytes
"""
self.status.chunk_downloaded(len(chunk))
class Status(object):
"""Holds details about the downland status."""
def __init__(self):
self.downloaded = 0
self.total_size = None
self.resumed_from = 0
self.time_started = None
self.time_finished = None
def started(self, resumed_from=0, total_size=None):
assert self.time_started is None
if total_size is not None:
self.total_size = total_size
self.downloaded = self.resumed_from = resumed_from
self.time_started = time()
def chunk_downloaded(self, size):
assert self.time_finished is None
self.downloaded += size
@property
def has_finished(self):
return self.time_finished is not None
def finished(self):
assert self.time_started is not None
assert self.time_finished is None
self.time_finished = time()
class ProgressReporterThread(threading.Thread):
"""
Reports download progress based on its status.
Uses threading to periodically update the status (speed, ETA, etc.).
"""
def __init__(self, status, output, tick=.1, update_interval=1):
"""
:type status: Status
:type output: file
"""
super(ProgressReporterThread, self).__init__()
self.status = status
self.output = output
self._tick = tick
self._update_interval = update_interval
self._spinner_pos = 0
self._status_line = ''
self._prev_bytes = 0
self._prev_time = time()
self._should_stop = threading.Event()
def stop(self):
"""Stop reporting on next tick."""
self._should_stop.set()
def run(self):
while not self._should_stop.is_set():
if self.status.has_finished:
self.sum_up()
break
self.report_speed()
sleep(self._tick)
def report_speed(self):
now = time()
if now - self._prev_time >= self._update_interval:
downloaded = self.status.downloaded
try:
speed = ((downloaded - self._prev_bytes)
/ (now - self._prev_time))
except ZeroDivisionError:
speed = 0
if not self.status.total_size:
self._status_line = PROGRESS_NO_CONTENT_LENGTH.format(
downloaded=humanize_bytes(downloaded),
speed=humanize_bytes(speed),
)
else:
try:
percentage = downloaded / self.status.total_size * 100
except ZeroDivisionError:
percentage = 0
if not speed:
eta = '-:--:--'
else:
s = int((self.status.total_size - downloaded) / speed)
h, s = divmod(s, 60 * 60)
m, s = divmod(s, 60)
eta = '{0}:{1:0>2}:{2:0>2}'.format(h, m, s)
self._status_line = PROGRESS.format(
percentage=percentage,
downloaded=humanize_bytes(downloaded),
speed=humanize_bytes(speed),
eta=eta,
)
self._prev_time = now
self._prev_bytes = downloaded
self.output.write(
CLEAR_LINE
+ ' '
+ SPINNER[self._spinner_pos]
+ ' '
+ self._status_line
)
self.output.flush()
self._spinner_pos = (self._spinner_pos + 1
if self._spinner_pos + 1 != len(SPINNER)
else 0)
def sum_up(self):
actually_downloaded = (self.status.downloaded
- self.status.resumed_from)
time_taken = self.status.time_finished - self.status.time_started
self.output.write(CLEAR_LINE)
self.output.write(SUMMARY.format(
downloaded=humanize_bytes(actually_downloaded),
total=(self.status.total_size
and humanize_bytes(self.status.total_size)),
speed=humanize_bytes(actually_downloaded / time_taken),
time=time_taken,
))
self.output.flush()
| mit | -292,126,573,933,683,100 | 29.571429 | 79 | 0.558909 | false | 4.242444 | false | false | false |
jedi22/osquery | tools/deployment/getfiles.py | 6 | 1617 | #!/usr/bin/env python
# Copyright (c) 2014-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under both the Apache 2.0 license (found in the
# LICENSE file in the root directory of this source tree) and the GPLv2 (found
# in the COPYING file in the root directory of this source tree).
# You may select, at your option, one of the above-listed licenses.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import sys
try:
import argparse
except ImportError:
print("Cannot import argparse.")
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=(
"List files from compile_commands.json."
))
parser.add_argument(
"--build", metavar="PATH",
help="Path to osquery build (./build/<sys>/) directory"
)
parser.add_argument(
"--base", metavar="PATH", default="",
help="Real path of source base."
)
args = parser.parse_args()
commands_path = os.path.join(args.build, "compile_commands.json")
if not os.path.exists(commands_path):
print("Cannot find '%s'" % (commands_path))
exit(1)
with open(commands_path, 'r') as fh: content = fh.read()
data = json.loads(content)
for file in data:
if file['file'].find("_tests.cpp") > 0 or file['file'].find("_benchmark") > 0:
continue
if file['file'].find("gtest") > 0:
continue
print(file['file'].replace(args.base, ""))
pass | bsd-3-clause | 3,401,102,705,293,106,700 | 28.418182 | 86 | 0.63389 | false | 3.786885 | false | false | false |
lpryszcz/bin | plot_2d.py | 1 | 4538 | #!/usr/bin/env python
desc="""2D plot"""
epilog="""Author:
[email protected]
Barcelona, 10/05/2013
"""
import argparse, math, os, sys
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
def plot_2d(inputs, output, title, xlab, ylab, xmax, xmin, log, ndivide=20):
"""
"""
#define number of subplots
bi = math.sqrt(len(inputs))
bi = round(bi)
bj = len(inputs)/bi
if bj < round(bj):
bj = round(bj)+1
else:
bj = round(bj)+1
print(len(inputs),bi,bj)
#get figure
plt.figure(figsize=(bj*4, bi*4))
plt.subplots_adjust(hspace = .3, wspace = .3)
#process inputs
sys.stderr.write("Loading data...\n")
for ii, handle in enumerate(inputs, 1):
#load data
x, y = [], []
for l in handle:
l = l[:-1]
if not l or l.startswith('#'):
continue
i, c = l.split()[:2]
i, c = int(i), int(c)
if xmin <= i <= xmax:
x.append(i)
y.append(c/10**3)
maxy10 = max(y[xmax/ndivide:])
xi = y.index(maxy10)
freqk = x[xi]
gsize = maxy10*freqk/10.0**3
sys.stderr.write("[%s] Max freq: %s @ k-mer freq: %s\nEstimated genome size: %s Mb\n" %(input.name, maxy10, freqk, gsize))
plt.subplot(bi,bj,ii)
plt.plot(x, y, linewidth=2.0)
#add title and axis labels
if input.name!="<stdin>":
plt.title(input.name.split('.')[0])
elif title:
plt.title(input.name)
#plot x-axis label only on bottom plots
if ii/bi>bj-1:
plt.xlabel(xlab)
#plot y-axis label only on left-most plots
if ii%bj==1:
plt.ylabel(ylab)
plt.ylim(0,1.5*maxy10)
#plt.grid(True)
#add local max
plt.annotate("~%.2f Mb\n(%s, %sK)" % (gsize, freqk, maxy10), xy=(freqk*1.01, maxy10*1.01), xytext=(freqk*1.2, maxy10*1.2),arrowprops=dict(facecolor='black', shrink=0.05))
#plt.text(freqk, maxy10*1.1, 'Genome size: ~%.2f Mb\n(%s, %s)' % (gsize, freqk, maxy10))
#show plot if not outfile provided
if output.name=="<stdout>":
plt.show()
else:
fpath = output.name #"%s.%s" % (output.name, format)
format = fpath.split('.')[-1]
plt.savefig(fpath, dpi=200, facecolor='w', edgecolor='w',\
orientation='landscape', format=format, transparent=False)
def main():
usage = "%(prog)s [options] -v"
parser = argparse.ArgumentParser( usage=usage,description=desc,epilog=epilog )
parser.add_argument("-v", dest="verbose", default=False, action="store_true", help="verbose")
parser.add_argument('--version', action='version', version='1.0')
parser.add_argument("-i", dest="input", default=[sys.stdin,], type=argparse.FileType("r"), nargs="+",
help="input stream [stdin]")
parser.add_argument("-o", dest="output", default=sys.stdout, type=argparse.FileType("w"),
help="output stream [stdout]")
parser.add_argument("-c", dest="col", default=0, type=int,
help="column to use [%(default)s]")
parser.add_argument("-t", dest="title", default="",
help="histogram title [%(default)s]")
parser.add_argument("-x", dest="xlab", default="k-mer frequency",
help="x-axis label [%(default)s]")
parser.add_argument("-y", dest="ylab", default="k-mers with this frequency [10e3]",
help="y-axis label [%(default)s]")
parser.add_argument("-n", dest="ndivide", default=20, type=int,
help="discard 1/n first [%(default)s]")
parser.add_argument("--log", dest="log", default=False, action="store_true",
help="log scale [%(default)s]")
parser.add_argument("--xmax", dest="xmax", default=100, type=int,
help="max x value [%(default)s]")
parser.add_argument("--xmin", dest="xmin", default=0, type=int,
help="min x value [%(default)s]")
o = parser.parse_args()
if o.verbose:
sys.stderr.write( "Options: %s\n" % str(o) )
plot_2d(o.input, o.output, o.title, o.xlab, o.ylab, o.xmax, o.xmin, o.log, o.ndivide)
if __name__=='__main__':
t0 = datetime.now()
main()
dt = datetime.now()-t0
sys.stderr.write( "#Time elapsed: %s\n" % dt )
| gpl-3.0 | 5,600,041,490,826,155,000 | 38.46087 | 178 | 0.538784 | false | 3.260057 | false | false | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/traits-4.3.0-py2.7-macosx-10.10-x86_64.egg/traits/trait_base.py | 1 | 19017 | #------------------------------------------------------------------------------
#
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 06/21/2002
#
# Refactored into a separate module: 07/04/2003
#
#------------------------------------------------------------------------------
""" Defines common, low-level capabilities needed by the Traits package.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from __future__ import absolute_import
import os
import sys
from os import getcwd
from os.path import dirname, exists, join
from string import lowercase, uppercase
from types import (ListType, TupleType, DictType, StringType, UnicodeType,
IntType, LongType, FloatType, ComplexType, ClassType, TypeType)
# Set the Python version being used:
vi = sys.version_info
python_version = vi[0] + (float( vi[1] ) / 10.0)
try:
from traits.etsconfig.api import ETSConfig
except:
# If the ETSConfig package is not available, fake it:
class ETSConfig ( object ):
#-----------------------------------------------------------------------
# 'object' interface:
#-----------------------------------------------------------------------
def __init__ ( self ):
""" Constructor.
Note that this constructor can only ever be called from within
this module, since we don't expose the class.
"""
# Shadow attributes for properties:
self._application_data = None
self._toolkit = None
return
#-----------------------------------------------------------------------
# 'ETSConfig' interface:
#-----------------------------------------------------------------------
#-- Property Implementations -------------------------------------------
def _get_application_data ( self ):
""" Property getter.
This is a directory that applications and packages can safely
write non-user accessible data to i.e. configuration
information, preferences etc.
Do not put anything in here that the user might want to navigate
to (e.g. projects, user data files, etc).
The actual location differs between operating systems.
"""
if self._application_data is None:
self._application_data = self._initialize_application_data()
return self._application_data
def _set_application_data ( self, application_data ):
""" Property setter.
"""
self._application_data = application_data
application_data = property( _get_application_data,
_set_application_data )
def _get_toolkit ( self ):
"""
Property getter for the GUI toolkit. The value returned is, in
order of preference: the value set by the application; the value
passed on the command line using the '-toolkit' option; the value
specified by the 'ETS_TOOLKIT' environment variable; otherwise the
empty string.
"""
if self._toolkit is None:
self._toolkit = self._initialize_toolkit()
return self._toolkit
def _set_toolkit ( self, toolkit ):
"""
Property setter for the GUI toolkit. The toolkit can be set more
than once, but only if it is the same one each time. An application
that is written for a particular toolkit can explicitly set it
before any other module that gets the value is imported.
"""
if self._toolkit and (self._toolkit != toolkit):
raise ValueError( 'Cannot set toolkit to %s because it has '
'already been set to %s' % ( toolkit, self._toolkit ) )
self._toolkit = toolkit
return
toolkit = property( _get_toolkit, _set_toolkit )
#-- Private Methods ----------------------------------------------------
def _initialize_application_data ( self ):
""" Initializes the (default) application data directory.
"""
if sys.platform == 'win32':
environment_variable = 'APPDATA'
directory_name = 'Enthought'
else:
environment_variable = 'HOME'
directory_name = '.enthought'
# Lookup the environment variable:
parent_directory = os.environ.get( environment_variable, None )
if parent_directory is None:
raise ValueError( 'Environment variable "%s" not set' %
environment_variable )
application_data = os.path.join( parent_directory, directory_name )
# If a file already exists with this name then make sure that it is
# a directory!
if os.path.exists( application_data ):
if not os.path.isdir( application_data ):
raise ValueError( 'File "%s" already exists' %
application_data )
# Otherwise, create the directory:
else:
os.makedirs( application_data )
return application_data
def _initialize_toolkit ( self ):
""" Initializes the toolkit.
"""
# We handle the command line option even though it doesn't have the
# highest precedence because we always want to remove it from the
# command line:
if '-toolkit' in sys.argv:
opt_idx = sys.argv.index( '-toolkit' )
try:
opt_toolkit = sys.argv[ opt_idx + 1 ]
except IndexError:
raise ValueError( 'The -toolkit command line argument must '
'be followed by a toolkit name' )
# Remove the option:
del sys.argv[ opt_idx: opt_idx + 1 ]
else:
opt_toolkit = None
if self._toolkit is not None:
toolkit = self._toolkit
elif opt_toolkit is not None:
toolkit = opt_toolkit
else:
toolkit = os.environ.get( 'ETS_TOOLKIT', '' )
return toolkit
ETSConfig = ETSConfig()
#-------------------------------------------------------------------------------
# Provide Python 2.3+ compatible definitions (if necessary):
#-------------------------------------------------------------------------------
try:
from types import BooleanType
except ImportError:
BooleanType = IntType
def _enumerate ( seq ):
for i in xrange( len( seq) ):
yield i, seq[i]
try:
enumerate = enumerate
except:
enumerate = _enumerate
del _enumerate
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
ClassTypes = ( ClassType, TypeType )
SequenceTypes = ( ListType, TupleType )
ComplexTypes = ( float, int )
TypeTypes = ( StringType, UnicodeType, IntType, LongType, FloatType,
ComplexType, ListType, TupleType, DictType, BooleanType )
TraitNotifier = '__trait_notifier__'
# The standard Traits property cache prefix:
TraitsCache = '_traits_cache_'
#-------------------------------------------------------------------------------
# Singleton 'Uninitialized' object:
#-------------------------------------------------------------------------------
Uninitialized = None
class _Uninitialized(object):
""" The singleton value of this class represents the uninitialized state
of a trait and is specified as the 'old' value in the trait change
notification that occurs when the value of a trait is read before being
set.
"""
def __new__(cls):
if Uninitialized is not None:
return Uninitialized
else:
self = object.__new__(cls)
return self
def __repr__(self):
return '<uninitialized>'
def __reduce_ex__(self, protocol):
return (_Uninitialized, ())
#: When the first reference to a trait is a 'get' reference, the default value of
#: the trait is implicitly assigned and returned as the value of the trait.
#: Because of this implicit assignment, a trait change notification is
#: generated with the Uninitialized object as the 'old' value of the trait, and
#: the default trait value as the 'new' value. This allows other parts of the
#: traits package to recognize the assignment as the implicit default value
#: assignment, and treat it specially.
Uninitialized = _Uninitialized()
#-------------------------------------------------------------------------------
# Singleton 'Undefined' object (used as undefined trait name and/or value):
#-------------------------------------------------------------------------------
Undefined = None
class _Undefined(object):
""" Singleton 'Undefined' object (used as undefined trait name and/or value)
"""
def __new__(cls):
if Undefined is not None:
return Undefined
else:
self = object.__new__(cls)
return self
def __repr__(self):
return '<undefined>'
def __reduce_ex__(self, protocol):
return (_Undefined, ())
def __eq__(self, other):
return type(self) is type(other)
def __ne__(self, other):
return type(self) is not type(other)
#: Singleton object that indicates that a trait attribute has not yet had a
#: value set (i.e., its value is undefined). This object is used instead of
#: None, because None often has other meanings, such as that a value is not
#: used. When a trait attribute is first assigned a value, and its associated
#: trait notification handlers are called, Undefined is passed as the *old*
#: parameter, to indicate that the attribute previously had no value.
Undefined = _Undefined()
# Tell the C-base code about singleton 'Undefined' and 'Uninitialized' objects:
from . import ctraits
ctraits._undefined( Undefined, Uninitialized )
#-------------------------------------------------------------------------------
# Singleton 'Missing' object (used as missing method argument marker):
#-------------------------------------------------------------------------------
class Missing ( object ):
""" Singleton 'Missing' object (used as missing method argument marker).
"""
def __repr__ ( self ):
return '<missing>'
#: Singleton object that indicates that a method argument is missing from a
#: type-checked method signature.
Missing = Missing()
#-------------------------------------------------------------------------------
# Singleton 'Self' object (used as object reference to current 'object'):
#-------------------------------------------------------------------------------
class Self ( object ):
""" Singleton 'Self' object (used as object reference to current 'object').
"""
def __repr__ ( self ):
return '<self>'
#: Singleton object that references the current 'object'.
Self = Self()
#-------------------------------------------------------------------------------
# Define a special 'string' coercion function:
#-------------------------------------------------------------------------------
def strx ( arg ):
""" Wraps the built-in str() function to raise a TypeError if the
argument is not of a type in StringTypes.
"""
if type( arg ) in StringTypes:
return str( arg )
raise TypeError
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
StringTypes = ( StringType, UnicodeType, IntType, LongType, FloatType,
ComplexType )
#-------------------------------------------------------------------------------
# Define a mapping of coercable types:
#-------------------------------------------------------------------------------
# Mapping of coercable types.
CoercableTypes = {
LongType: ( 11, long, int ),
FloatType: ( 11, float, int ),
ComplexType: ( 11, complex, float, int ),
UnicodeType: ( 11, unicode, str )
}
#-------------------------------------------------------------------------------
# Return a string containing the class name of an object with the correct
# article (a or an) preceding it (e.g. 'an Image', 'a PlotValue'):
#-------------------------------------------------------------------------------
def class_of ( object ):
""" Returns a string containing the class name of an object with the
correct indefinite article ('a' or 'an') preceding it (e.g., 'an Image',
'a PlotValue').
"""
if isinstance( object, basestring ):
return add_article( object )
return add_article( object.__class__.__name__ )
#-------------------------------------------------------------------------------
# Return a string containing the right article (i.e. 'a' or 'an') prefixed to
# a specified string:
#-------------------------------------------------------------------------------
def add_article ( name ):
""" Returns a string containing the correct indefinite article ('a' or 'an')
prefixed to the specified string.
"""
if name[:1].lower() in 'aeiou':
return 'an ' + name
return 'a ' + name
#----------------------------------------------------------------------------
# Return a 'user-friendly' name for a specified trait:
#----------------------------------------------------------------------------
def user_name_for ( name ):
""" Returns a "user-friendly" version of a string, with the first letter
capitalized and with underscore characters replaced by spaces. For example,
``user_name_for('user_name_for')`` returns ``'User name for'``.
"""
name = name.replace( '_', ' ' )
result = ''
last_lower = False
for c in name:
if (c in uppercase) and last_lower:
result += ' '
last_lower = (c in lowercase)
result += c
return result.capitalize()
#-------------------------------------------------------------------------------
# Gets the path to the traits home directory:
#-------------------------------------------------------------------------------
_traits_home = None
def traits_home ( ):
""" Gets the path to the Traits home directory.
"""
global _traits_home
if _traits_home is None:
_traits_home = verify_path( join( ETSConfig.application_data,
'traits' ) )
return _traits_home
#-------------------------------------------------------------------------------
# Verify that a specified path exists, and try to create it if it doesn't:
#-------------------------------------------------------------------------------
def verify_path ( path ):
""" Verify that a specified path exists, and try to create it if it
does not exist.
"""
if not exists( path ):
try:
os.mkdir( path )
except:
pass
return path
#-------------------------------------------------------------------------------
# Returns the name of the module the caller's caller is located in:
#-------------------------------------------------------------------------------
def get_module_name ( level = 2 ):
""" Returns the name of the module that the caller's caller is located in.
"""
return sys._getframe( level ).f_globals.get( '__name__', '__main__' )
#-------------------------------------------------------------------------------
# Returns a resource path calculated from the caller's stack:
#-------------------------------------------------------------------------------
def get_resource_path ( level = 2 ):
"""Returns a resource path calculated from the caller's stack.
"""
module = sys._getframe( level ).f_globals.get( '__name__', '__main__' )
if module != '__main__':
# Return the path to the module:
try:
return dirname( getattr( sys.modules.get( module ), '__file__' ) )
except:
# Apparently 'module' is not a registered module...treat it like
# '__main__':
pass
# '__main__' is not a real module, so we need a work around:
for path in [ dirname( sys.argv[0] ), getcwd() ]:
if exists( path ):
break
return path
#-------------------------------------------------------------------------------
# Returns the value of an extended object attribute name of the form:
# name[.name2[.name3...]]:
#-------------------------------------------------------------------------------
def xgetattr( object, xname, default = Undefined ):
""" Returns the value of an extended object attribute name of the form:
name[.name2[.name3...]].
"""
names = xname.split( '.' )
for name in names[:-1]:
if default is Undefined:
object = getattr( object, name )
else:
object = getattr( object, name, None )
if object is None:
return default
if default is Undefined:
return getattr( object, names[-1] )
return getattr( object, names[-1], default )
#-------------------------------------------------------------------------------
# Sets the value of an extended object attribute name of the form:
# name[.name2[.name3...]]:
#-------------------------------------------------------------------------------
def xsetattr( object, xname, value ):
""" Sets the value of an extended object attribute name of the form:
name[.name2[.name3...]].
"""
names = xname.split( '.' )
for name in names[:-1]:
object = getattr( object, name )
setattr( object, names[-1], value )
#-------------------------------------------------------------------------------
# Traits metadata selection functions:
#-------------------------------------------------------------------------------
def is_none ( value ):
return (value is None)
def not_none ( value ):
return (value is not None)
def not_false ( value ):
return (value is not False)
def not_event ( value ):
return (value != 'event')
def is_str ( value ):
return isinstance( value, basestring )
| gpl-2.0 | 5,830,377,283,160,393,000 | 34.813559 | 81 | 0.482989 | false | 5.132794 | true | false | false |
google/clif | clif/testing/python/classes_test.py | 1 | 2595 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for clif.testing.python.classes."""
from absl.testing import absltest
from absl.testing import parameterized
from clif.testing.python import classes
# TODO: Restore simple import after OSS setup includes pybind11.
# pylint: disable=g-import-not-at-top
try:
from clif.testing.python import classes_pybind11
except ImportError:
classes_pybind11 = None
# pylint: enable=g-import-not-at-top
@parameterized.named_parameters([
np for np in zip(('c_api', 'pybind11'), (classes, classes_pybind11))
if np[1] is not None
])
class ClassesTest(absltest.TestCase):
def testKlass(self, wrapper_lib):
self.assertEqual(wrapper_lib.Klass.C2(), 3)
k = wrapper_lib.Klass(3)
self.assertEqual(k.i, 3)
self.assertEqual(k.i2, 9)
self.assertEqual(k.Int1(), 4)
k.i = 0
self.assertEqual(k.i, 0)
# AttributeError on CPython; TypeError on PyPy.
with self.assertRaises((AttributeError, TypeError)):
k.i2 = 0
def testDerivedClassDocstring(self, wrapper_lib):
# Nothing special about this being a derived class; that is just the
# one our test .clif file has a docstring on.
self.assertIn('class also has a docstring.\n\n',
wrapper_lib.Derived.__doc__)
self.assertIn('spans multiple lines', wrapper_lib.Derived.__doc__)
self.assertIn(wrapper_lib.Derived.__doc__,
wrapper_lib.Derived.__doc__.strip())
def testPythonDerived(self, wrapper_lib):
class PyK(wrapper_lib.Klass):
pass
k = PyK(4)
self.assertEqual(k.i, 4)
self.assertEqual(k.Int1(), 5)
def testDerived(self, wrapper_lib):
# k = wrapper_lib.Derived()
k = wrapper_lib.Derived.Init(0, 0)
self.assertEqual(k.i, 0)
self.assertEqual(k.j, 0)
self.assertNotIn(2, k)
with self.assertRaises(TypeError):
wrapper_lib.Derived(1)
def testDerivedInit(self, wrapper_lib):
k = wrapper_lib.Derived.Init(1, 2)
self.assertEqual(k.i, 1)
self.assertEqual(k.j, 2)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | 1,566,296,594,970,974,700 | 31.4375 | 74 | 0.6921 | false | 3.405512 | true | false | false |
CI-WATER/gsshapy | gsshapy/modeling/model.py | 1 | 14479 | # -*- coding: utf-8 -*-
#
# model.py
# GSSHApy
#
# Created by Alan D Snow, 2016.
# BSD 3-Clause
from datetime import timedelta
import logging
import uuid
import os
from gazar.grid import GDALGrid
import geopandas as gpd
from .event import EventMode, LongTermMode
from ..orm import WatershedMaskFile, ElevationGridFile, MapTableFile
from ..lib import db_tools as dbt
from ..util.context import tmp_chdir
log = logging.getLogger(__name__)
class GSSHAModel(object):
"""
This class manages the generation and modification of
models for GSSHA.
Parameters:
project_directory(str): Directory to write GSSHA project files to.
project_name(Optional[str]): Name of GSSHA project. Required for new model.
mask_shapefile(Optional[str]): Path to watershed boundary shapefile. Required for new model.
auto_clean_mask_shapefile(Optional[bool]): Chooses the largest region if the input is a multipolygon. Default is False.
grid_cell_size(Optional[str]): Cell size of model (meters). Required for new model.
elevation_grid_path(Optional[str]): Path to elevation raster used for GSSHA grid. Required for new model.
simulation_timestep(Optional[float]): Overall model timestep (seconds). Sets TIMESTEP card. Required for new model.
out_hydrograph_write_frequency(Optional[str]): Frequency of writing to hydrograph (minutes). Sets HYD_FREQ card. Required for new model.
roughness(Optional[float]): Value of uniform manning's n roughness for grid. Mutually exlusive with land use roughness. Required for new model.
land_use_grid(Optional[str]): Path to land use grid to use for roughness. Mutually exlusive with roughness. Required for new model.
land_use_grid_id(Optional[str]): ID of default grid supported in GSSHApy. Mutually exlusive with roughness. Required for new model.
land_use_to_roughness_table(Optional[str]): Path to land use to roughness table. Use if not using land_use_grid_id. Mutually exlusive with roughness. Required for new model.
load_rasters_to_db(Optional[bool]): If True, it will load the created rasters into the database. IF you are generating a large model, it is recommended to set this to False. Default is True.
db_session(Optional[database session]): Active database session object. Required for existing model.
project_manager(Optional[ProjectFile]): Initialized ProjectFile object. Required for existing model.
Model Generation Example:
.. code:: python
from datetime import datetime, timedelta
from gsshapy.modeling import GSSHAModel
model = GSSHAModel(project_name="gssha_project",
project_directory="/path/to/gssha_project",
mask_shapefile="/path/to/watershed_boundary.shp",
auto_clean_mask_shapefile=True,
grid_cell_size=1000,
elevation_grid_path="/path/to/elevation.tif",
simulation_timestep=10,
out_hydrograph_write_frequency=15,
land_use_grid='/path/to/land_use.tif',
land_use_grid_id='glcf',
load_rasters_to_db=False,
)
model.set_event(simulation_start=datetime(2017, 2, 28, 14, 33),
simulation_duration=timedelta(seconds=180*60),
rain_intensity=2.4,
rain_duration=timedelta(seconds=30*60),
)
model.write()
"""
def __init__(self,
project_directory,
project_name=None,
mask_shapefile=None,
auto_clean_mask_shapefile=False,
grid_cell_size=None,
elevation_grid_path=None,
simulation_timestep=30,
out_hydrograph_write_frequency=10,
roughness=None,
land_use_grid=None,
land_use_grid_id=None,
land_use_to_roughness_table=None,
load_rasters_to_db=True,
db_session=None,
project_manager=None,
):
self.project_directory = project_directory
self.db_session = db_session
self.project_manager = project_manager
self.load_rasters_to_db = load_rasters_to_db
if project_manager is not None and db_session is None:
raise ValueError("'db_session' is required to edit existing model if 'project_manager' is given.")
if project_manager is None and db_session is None:
if project_name is not None and mask_shapefile is None and elevation_grid_path is None:
self.project_manager, db_sessionmaker = \
dbt.get_project_session(project_name, self.project_directory)
self.db_session = db_sessionmaker()
self.project_manager.readInput(directory=self.project_directory,
projectFileName="{0}.prj".format(project_name),
session=self.db_session)
else:
# generate model
if None in (project_name, mask_shapefile, elevation_grid_path):
raise ValueError("Need to set project_name, mask_shapefile, "
"and elevation_grid_path to generate "
"a new GSSHA model.")
self.project_manager, db_sessionmaker = \
dbt.get_project_session(project_name, self.project_directory, map_type=0)
self.db_session = db_sessionmaker()
self.db_session.add(self.project_manager)
self.db_session.commit()
# ADD BASIC REQUIRED CARDS
# see http://www.gsshawiki.com/Project_File:Required_Inputs
self.project_manager.setCard('TIMESTEP',
str(simulation_timestep))
self.project_manager.setCard('HYD_FREQ',
str(out_hydrograph_write_frequency))
# see http://www.gsshawiki.com/Project_File:Output_Files_%E2%80%93_Required
self.project_manager.setCard('SUMMARY',
'{0}.sum'.format(project_name),
add_quotes=True)
self.project_manager.setCard('OUTLET_HYDRO',
'{0}.otl'.format(project_name),
add_quotes=True)
# ADD REQUIRED MODEL GRID INPUT
if grid_cell_size is None:
# caluclate cell size from elevation grid if not given
# as input from the user
ele_grid = GDALGrid(elevation_grid_path)
utm_bounds = ele_grid.bounds(as_utm=True)
x_cell_size = (utm_bounds[1] - utm_bounds[0])/ele_grid.x_size
y_cell_size = (utm_bounds[3] - utm_bounds[2])/ele_grid.y_size
grid_cell_size = min(x_cell_size, y_cell_size)
ele_grid = None
log.info("Calculated cell size is {grid_cell_size}"
.format(grid_cell_size=grid_cell_size))
if auto_clean_mask_shapefile:
mask_shapefile = self.clean_boundary_shapefile(mask_shapefile)
self.set_mask_from_shapefile(mask_shapefile, grid_cell_size)
self.set_elevation(elevation_grid_path, mask_shapefile)
self.set_roughness(roughness=roughness,
land_use_grid=land_use_grid,
land_use_grid_id=land_use_grid_id,
land_use_to_roughness_table=land_use_to_roughness_table,
)
@staticmethod
def clean_boundary_shapefile(shapefile_path):
"""
Cleans the boundary shapefile to that there is only one main polygon.
:param shapefile_path:
:return:
"""
wfg = gpd.read_file(shapefile_path)
first_shape = wfg.iloc[0].geometry
if hasattr(first_shape, 'geoms'):
log.warning("MultiPolygon found in boundary. "
"Picking largest area ...")
# pick largest shape to be the watershed boundary
# and assume the other ones are islands to be removed
max_area = -9999.0
main_geom = None
for geom in first_shape.geoms:
if geom.area > max_area:
main_geom = geom
max_area = geom.area
# remove self intersections
if not main_geom.is_valid:
log.warning("Invalid geometry found in boundary. "
"Attempting to self clean ...")
main_geom = main_geom.buffer(0)
wfg.loc[0, 'geometry'] = main_geom
out_cleaned_boundary_shapefile = \
os.path.splitext(shapefile_path)[0] +\
str(uuid.uuid4()) +\
'.shp'
wfg.to_file(out_cleaned_boundary_shapefile)
log.info("Cleaned boundary shapefile written to:"
"{}".format(out_cleaned_boundary_shapefile))
return out_cleaned_boundary_shapefile
return shapefile_path
def set_mask_from_shapefile(self, shapefile_path, cell_size):
"""
Adds a mask from a shapefile
"""
# make sure paths are absolute as the working directory changes
shapefile_path = os.path.abspath(shapefile_path)
# ADD MASK
with tmp_chdir(self.project_directory):
mask_name = '{0}.msk'.format(self.project_manager.name)
msk_file = WatershedMaskFile(project_file=self.project_manager,
session=self.db_session)
msk_file.generateFromWatershedShapefile(shapefile_path,
cell_size=cell_size,
out_raster_path=mask_name,
load_raster_to_db=self.load_rasters_to_db)
def set_elevation(self, elevation_grid_path, mask_shapefile):
"""
Adds elevation file to project
"""
# ADD ELEVATION FILE
ele_file = ElevationGridFile(project_file=self.project_manager,
session=self.db_session)
ele_file.generateFromRaster(elevation_grid_path,
mask_shapefile,
load_raster_to_db=self.load_rasters_to_db)
def set_outlet(self, latitude, longitude, outslope):
"""
Adds outlet point to project
"""
self.project_manager.setOutlet(latitude=latitude, longitude=longitude,
outslope=outslope)
def set_roughness(self,
roughness=None,
land_use_grid=None,
land_use_grid_id=None,
land_use_to_roughness_table=None):
"""
ADD ROUGHNESS FROM LAND COVER
See: http://www.gsshawiki.com/Project_File:Overland_Flow_%E2%80%93_Required
"""
if roughness is not None:
self.project_manager.setCard('MANNING_N', str(roughness))
elif land_use_grid is not None and (land_use_grid_id is not None \
or land_use_to_roughness_table is not None):
# make sure paths are absolute as the working directory changes
land_use_grid = os.path.abspath(land_use_grid)
if land_use_to_roughness_table is not None:
land_use_to_roughness_table = os.path.abspath(land_use_to_roughness_table)
mapTableFile = MapTableFile(project_file=self.project_manager)
mapTableFile.addRoughnessMapFromLandUse("roughness",
self.db_session,
land_use_grid,
land_use_to_roughness_table=land_use_to_roughness_table,
land_use_grid_id=land_use_grid_id)
else:
raise ValueError("Need to either set 'roughness', or need "
"to set values from land use grid ...")
def set_event(self,
simulation_start=None,
simulation_duration=None,
simulation_end=None,
rain_intensity=2,
rain_duration=timedelta(seconds=30*60),
event_type='EVENT',
):
"""
Initializes event for GSSHA model
"""
# ADD TEMPORTAL EVENT INFORMAITON
if event_type == 'LONG_TERM':
self.event = LongTermMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_end=simulation_end,
simulation_duration=simulation_duration,
)
else: # 'EVENT'
self.event = EventMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_duration=simulation_duration,
)
self.event.add_uniform_precip_event(intensity=rain_intensity,
duration=rain_duration)
def write(self):
"""
Write project to directory
"""
# write data
self.project_manager.writeInput(session=self.db_session,
directory=self.project_directory,
name=self.project_manager.name)
| bsd-3-clause | 7,239,281,014,911,218,000 | 47.915541 | 199 | 0.537123 | false | 4.427829 | false | false | false |
naure/YaP | tests/test_lib.py | 1 | 2568 | #!/usr/bin/env python3
import unittest
import sys
sys.path.append('.')
from yap import expand_env_soft
from yap import call_lib
escape_sh = None
exec(call_lib)
from yap import missing_lib
exec(missing_lib)
def B(s):
" Avoid quoting backslashes all the time "
return s.replace('B', '\\').replace('S', "'").replace('D', '"')
class Test(unittest.TestCase):
def test_test(self):
self.assertEqual(B('B S DB'), '\\ \' "\\')
def test_escape_sh(self):
data = [
('nothing', 'nothing'),
('with spaces', 'withB spaces'),
('with Bs', 'withB BBs'),
('keep DquotesD and SquotesS', 'keepB DquotesDB andB SquotesS'),
('with BDs', 'withB BBDs'),
('', ''),
]
for raw, escaped in data:
self.assertEqual(
escape_sh(B(raw)),
B(escaped),
)
def test_expand_env_soft(self):
class O(object):
pass
# Arguments
sys = O()
sys.argv = ['zero', 'un']
self.assertEqual(eval(
expand_env_soft('bool($1)')), True
)
self.assertEqual(eval(
expand_env_soft('$1 == "un"')), True
)
self.assertEqual(eval(
expand_env_soft('bool($2)')), False
)
self.assertEqual(eval(
expand_env_soft('$2 == "deux"')), False
)
self.assertEqual(eval(
expand_env_soft('$2 == $2')), False
)
with self.assertRaises(KeyError):
eval(expand_env_soft('"error: {}".format($2)'))
with self.assertRaises(KeyError):
eval(expand_env_soft('$2[0]'))
with self.assertRaises(KeyError):
eval(expand_env_soft('$2[-3:]'))
with self.assertRaises(KeyError):
eval(expand_env_soft('$2.attr'))
with self.assertRaises(KeyError):
eval(expand_env_soft('$2 + "nope"'))
with self.assertRaises(KeyError):
eval(expand_env_soft('int($2)'))
# Environment variables
os = O()
os.environ = {'env': 'ENV!', 'empty': ''}
self.assertEqual(eval(
expand_env_soft('$env')), 'ENV!'
)
self.assertEqual(eval(
expand_env_soft('$empty')), ''
)
self.assertEqual(eval(
expand_env_soft('bool($missing)')), False
)
with self.assertRaises(TypeError):
eval(expand_env_soft('"error: " + $missing'))
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 | 4,114,828,465,400,979,500 | 26.913043 | 76 | 0.507399 | false | 3.832836 | true | false | false |
Jgarcia-IAS/SAT | openerp/addons-extra/res_partner_fiscal_document/res_partner.py | 3 | 6596 | # -*- encoding: utf-8 -*-
# #############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) Odoo Colombia (Community).
# Author David Arnold (devCO)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api # , _
class ResPartner(models.Model):
_inherit = 'res.partner'
dom = "['|', " \
" ('on_contact' ,'!=', is_company )," \
" '|', " \
" '&', " \
" ('on_company' , '=', is_company )," \
" ('on_company' , '=', is_company )," \
" '&', " \
" ('on_merchant', '=', is_company )," \
" ('on_merchant', '=', is_company )]"
fiscal_id_type = fields.Many2one(
'res.partner.idtype',
string=u'Document Type',
domain=dom,
)
fiscal_id = fields.Char(
string=u'Document ID',
# compute='validateformatcopy',
)
fiscal_id_doc = fields.Binary(
string=u'Document Scan',
help="Upload the supporting Document "
"preferably as size-optimized PDF. "
"This might "
"help save disk space and PDF allows "
"you to concatenate multiple documents."
)
@api.one
@api.onchange(
'fiscal_id_type',
'fiscal_id',
'is_company',
)
def validateformatcopy(self):
# CASE: Current ID Type is not applicable on Merchant
if self.is_company:
if not self.fiscal_id_type.on_merchant:
# Get the first valid ID type (remember: ordered by sequence)
self.fiscal_id_type = self.env['res.partner.idtype'].search(
[('on_merchant', '=', True)], limit=1).id
self.fiscal_id = None # Reset ID value
# CASE: Current ID Type is not applicable on Company
if self.is_company:
if not self.fiscal_id_type.on_company:
# Get the first valid ID type (remember: ordered by sequence)
self.fiscal_id_type = self.env['res.partner.idtype'].search(
[('on_company', '=', True)], limit=1).id
self.fiscal_id = None # Reset ID value
# CASE: Current ID Type is not applicable on contact
if not self.is_company:
if not self.fiscal_id_type.on_contact:
# Get the first valid ID type (remember: ordered by sequence)
self.fiscal_id_type = self.env['res.partner.idtype'].search(
[('on_contact', '=', True)], limit=1).id
self.fiscal_id = None # Reset ID value
# If everything is fine, call subclasses
if self.fiscal_id_type and self.fiscal_id:
# Function for String Operations
res = self._validateandformatid()
if res['output_type'] and res['output_id']:
self.fiscal_id_type = res['output_type']
self.fiscal_id = res['output_id']
# Procedure for Copying
self._copyid()
def _validateandformatid(self):
"""
Hook method to be inherited for custom validation methods.
:param input_type: the value of the field fiscal_id_type (id); passed
on by onchange decorator
:param input_id: the value of the field fiscal_id (string); passed on
by onchange decorator
:return: must return a dict with validated and formatted values
Hint:
you might not alter the output_type unless you might want to build
some kind of fiscal_id_type recognition
based on the input pattern into your hook method. CO###.###.###-#
CO-VAT (NIT) for example.
Find below a suggested basic outline.
"""
return {'output_type': self.fiscal_id_type, 'output_id': self.fiscal_id}
"""
f_type = self.fiscal_id_type
f_id = self.fiscal_id
is_company = self.is_company
def default():
return {'output_type': f_type, 'output_id': f_id}
return {
# Define your cases
# The index to match is self.fiscal_id_type.code
# Note: You can change this index below.
# Example assignation using two functions
# {'output_type': func_type1(), 'output_id': funct_id1()}
'CODE1': { "put your assignation here" },
'CODE2': { "put your assignation here" },
}.get(self.fiscal_id_type.code, default())
"""
def _copyid(self):
"""
Hook Method to be inherited for custom copy methods based on the
document type (id)
Example Use Case: Copy some local VAT number into the VAT-Field in
it's international format for compatibility.
:return: It is a Procedure and therefore has no return value.
Find below a suggested basic outline.
"""
"""
f_type = self.fiscal_id_type
f_id = self.fiscal_id
is_company = self.is_company
def stringop_def(s): return s
def stringop_1(s): return re.match('\\d|\\w*', s)
# Define other Docstringoperatios if necessary
def default():
self.vat_subjected = True
# self.vat is a Boolean until base_vat is installed.
# self.vat = self.country_id.code + sringop_def(f_id)
{
# Some examples to consider...
# seld.vat_subjected: True,
# self.vat: self.country_id.code + stringop_1(f_id)
'CODE1': { "put your statments here" },
'CODE2': { "put your statments here" },
}.get(self.fiscal_id_type.code, default())
"""
| agpl-3.0 | -1,840,030,141,748,945,200 | 38.261905 | 80 | 0.541692 | false | 4.164141 | false | false | false |
jaufrec/ssn_names | import_names.py | 1 | 3856 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import psycopg2
import sys
import csv
import glob
import re
def load_names():
"""Import names from SSN data dump into database"""
con = psycopg2.connect(database='ssn_names')
cur = con.cursor()
# for filename in glob.iglob('*1880.txt'):
for filename in glob.iglob('*.txt'):
year = int(re.findall('\d+', filename)[0])
print('starting {0}'.format(year))
try:
with open(filename, 'r') as f:
reader = csv.reader(f, delimiter=',', quotechar='"')
for row in reader:
data = tuple(row[0:3]) + (year,)
load_statement = "INSERT INTO raw_names " + \
"(name, gender, count, year) " + \
"VALUES ('%s', '%s', '%s', '%s')" % data
cur.execute(load_statement)
except psycopg2.DatabaseError as e:
print('Error {0}'.format(e))
sys.exit(1)
finally:
if con:
con.commit()
def calculate_rank():
rank_statement = """
UPDATE raw_names
set rank = r.rnk
FROM (
SELECT name, year, gender, rank() OVER (partition by year, gender ORDER BY count DESC) AS rnk
FROM raw_names
) r
WHERE raw_names.name = r.name and raw_names.gender = r.gender and raw_names.year = r.year
"""
con = psycopg2.connect(database='ssn_names')
cur = con.cursor()
cur.execute(rank_statement)
con.commit()
def generate_list():
unique_names_statement_1 = """
INSERT INTO names(name, gender)
SELECT name,
gender
FROM raw_names
GROUP BY name, gender
"""
unique_names_statement_2 = """
SELECT name,
gender,
count(name) as num
INTO temp_max
FROM raw_names
WHERE rank <= 50
GROUP by name, gender
"""
unique_names_statement_3 = """
UPDATE names n
SET num_above_max = (SELECT num
FROM temp_max t
WHERE n.name = t.name
AND n.gender = t.gender)
"""
unique_names_statement_4 = """
drop table temp_max
"""
unique_names_statement_5 = """
SELECT name,
gender,
count(name) as num
INTO temp_min
FROM raw_names
WHERE rank > 500
GROUP by name, gender
"""
unique_names_statement_6 = """
UPDATE names n
SET num_below_min = (SELECT num
FROM temp_min t
WHERE n.name = t.name
AND n.gender = t.gender)
"""
unique_names_statement_7 = """
drop table temp_min
"""
unique_names_statement_8 = """
SELECT name,
gender,
count(name)
INTO temp_count
FROM raw_names
GROUP by name, gender
"""
unique_names_statement_9 = """
UPDATE names n
SET total_count = t.count
FROM temp_count t
WHERE t.name = n.name
AND t.gender = n.gender
"""
unique_names_statement_10 = """
UPDATE names
SET total_count_below = (136 - (total_count)) + num_below_min
"""
unique_names_statement_11 = """
drop table temp_count
"""
con = psycopg2.connect(database='ssn_names')
cur = con.cursor()
cur.execute(unique_names_statement_1)
cur.execute(unique_names_statement_2)
cur.execute(unique_names_statement_3)
cur.execute(unique_names_statement_4)
cur.execute(unique_names_statement_5)
cur.execute(unique_names_statement_6)
cur.execute(unique_names_statement_7)
cur.execute(unique_names_statement_8)
cur.execute(unique_names_statement_9)
cur.execute(unique_names_statement_10)
cur.execute(unique_names_statement_11)
con.commit()
def main():
# load_names()
print('calculating rank')
calculate_rank()
print('generating list')
generate_list()
if __name__ == "__main__":
main()
| gpl-2.0 | -8,039,289,289,064,200,000 | 23.1 | 99 | 0.567427 | false | 3.689952 | false | false | false |
dhenrygithub/QGIS | python/plugins/processing/algs/qgis/ExtractByAttribute.py | 1 | 5209 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ExtractByAttribute.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QVariant
from qgis.core import QgsExpression, QgsFeatureRequest
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterSelection
from processing.core.parameters import ParameterString
from processing.core.outputs import OutputVector
from processing.tools import dataobjects
class ExtractByAttribute(GeoAlgorithm):
INPUT = 'INPUT'
FIELD = 'FIELD'
OPERATOR = 'OPERATOR'
VALUE = 'VALUE'
OUTPUT = 'OUTPUT'
OPERATORS = ['=',
'!=',
'>',
'>=',
'<',
'<=',
'begins with',
'contains'
]
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Extract by attribute')
self.group, self.i18n_group = self.trAlgorithm('Vector selection tools')
self.i18n_operators = ['=',
'!=',
'>',
'>=',
'<',
'<=',
self.tr('begins with'),
self.tr('contains')]
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input Layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterTableField(self.FIELD,
self.tr('Selection attribute'), self.INPUT))
self.addParameter(ParameterSelection(self.OPERATOR,
self.tr('Operator'), self.i18n_operators))
self.addParameter(ParameterString(self.VALUE, self.tr('Value')))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Extracted (attribute)')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.INPUT))
fieldName = self.getParameterValue(self.FIELD)
operator = self.OPERATORS[self.getParameterValue(self.OPERATOR)]
value = self.getParameterValue(self.VALUE)
fields = layer.pendingFields()
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(fields,
layer.wkbType(), layer.crs())
idx = layer.fieldNameIndex(fieldName)
fieldType = fields[idx].type()
if fieldType != QVariant.String and operator in self.OPERATORS[-2:]:
op = ''.join(['"%s", ' % o for o in self.OPERATORS[-2:]])
raise GeoAlgorithmExecutionException(
self.tr('Operators %s can be used only with string fields.' % op))
if fieldType in [QVariant.Int, QVariant.Double, QVariant.UInt, QVariant.LongLong, QVariant.ULongLong]:
expr = '"%s" %s %s' % (fieldName, operator, value)
elif fieldType == QVariant.String:
if operator not in self.OPERATORS[-2:]:
expr = """"%s" %s '%s'""" % (fieldName, operator, value)
elif operator == 'begins with':
expr = """"%s" LIKE '%s%%'""" % (fieldName, value)
elif operator == 'contains':
expr = """"%s" LIKE '%%%s%%'""" % (fieldName, value)
elif fieldType in [QVariant.Date, QVariant.DateTime]:
expr = """"%s" %s '%s'""" % (fieldName, operator, value)
else:
raise GeoAlgorithmExecutionException(
self.tr('Unsupported field type "%s"' % fields[idx].typeName()))
expression = QgsExpression(expr)
if not expression.hasParserError():
req = QgsFeatureRequest(expression)
else:
raise GeoAlgorithmExecutionException(expression.parserErrorString())
for f in layer.getFeatures(req):
writer.addFeature(f)
del writer
| gpl-2.0 | 3,841,475,093,909,727,700 | 41.696721 | 110 | 0.525821 | false | 4.881912 | false | false | false |
ricket1978/ggplot | ggplot/components/__init__.py | 12 | 1402 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .aes import aes
from . import colors, shapes, size, linetypes, alphas
def assign_visual_mapping(data, aes, gg):
"""Assigns the visual mapping to the given data and adds the right legend
Parameters
----------
data : DataFrame
dataframe which should have aesthetic mappings assigned to
aes : aesthetic
mapping, visual value to variable
gg : ggplot object,
It holds global configuration values needed by
some of the mapping functions
Returns
-------
data : DataFrame
the changed dataframe with visual values added
legend : dict
A legend as specified in `components.legend`
"""
legend = {}
data, legend['color'] = colors.assign_colors(data, aes, gg, 'color')
data, legend['fill'] = colors.assign_colors(data, aes, gg, 'fill')
data, legend['size'] = size.assign_sizes(data, aes)
data, legend['linetype'] = linetypes.assign_linetypes(data, aes)
data, legend['shape'] = shapes.assign_shapes(data, aes)
data, legend['alpha'] = alphas.assign_alphas(data, aes)
# Delete empty entries in the legend
for _aes_name in ('color', 'fill', 'size', 'linetype', 'shape', 'alpha'):
if not legend[_aes_name]:
del legend[_aes_name]
return data, legend
| bsd-2-clause | -4,193,330,693,419,276,000 | 34.05 | 77 | 0.639087 | false | 3.938202 | false | false | false |
peterdougstuart/PCWG | pcwg/core/benchmark.py | 2 | 5616 | import numpy as np
from analysis import Analysis
from corrections import PowerCalculator
from corrections import TurbulencePowerCalculator
from ..core.status import Status
class BenchmarkAnalysis(Analysis):
def __init__(self, analysis_config, baseLineMode):
self.basePower = "Base Power"
self.baseLineMode = baseLineMode
Status.add("Baseline Mode: %s" % self.baseLineMode)
Analysis.__init__(self, analysis_config)
self.calculateBase()
self.calculateHubBenchmark()
self.calculateREWSBenchmark()
self.calculateTurbRenormBenchmark()
self.calculationCombinedBenchmark()
self.calculatePowerDeviationMatrixBenchmark()
self.calculateProductionByHeightBenchmark()
#self.dataFrame.to_csv("debug.dat")
def calculate_sensitivity_analysis(self):
#speed optimisation (sensitivity analysis not required for benchmark)
pass
def calculate_scatter_metric(self):
#speed optimisation (scatter metric not required for benchmark)
pass
def get_base_filter(self):
base_filter = Analysis.get_base_filter(self)
if self.baseLineMode == "Hub":
return base_filter & self.dataFrame[self.baseline.wind_speed_column].notnull()
elif self.baseLineMode == "Measured":
return base_filter
else:
raise Exception("Unrecognised baseline mode: %s" % self.baseLineMode)
def calculateBase(self):
if self.baseLineMode == "Hub":
if self.powerCurve is None:
exc_str = "%s Power Curve has not been calculated successfully." % self.powerCurveMode
if self.powerCurveMode == 'InnerMeasured':
exc_str += " Check Inner Range settings."
raise Exception(exc_str)
self.dataFrame[self.basePower] = self.dataFrame.apply(PowerCalculator(self.powerCurve, self.baseline.wind_speed_column).power, axis=1)
elif self.baseLineMode == "Measured":
if self.hasActualPower:
self.dataFrame[self.basePower] = self.dataFrame[self.actualPower]
else:
raise Exception("You must specify a measured power data column if using the 'Measured' baseline mode")
else:
raise Exception("Unkown baseline mode: % s" % self.baseLineMode)
self.baseYield = self.dataFrame[self.get_base_filter()][self.basePower].sum() * self.timeStampHours
def calculateHubBenchmark(self):
self.hubPower = "Hub Power"
self.dataFrame[self.hubPower] = self.dataFrame.apply(PowerCalculator(self.powerCurve, self.baseline.wind_speed_column).power, axis=1)
self.hubYield = self.dataFrame[self.get_base_filter()][self.baseline.power_column].sum() * self.timeStampHours
self.hubYieldCount = self.dataFrame[self.get_base_filter()][self.hubPower].count()
self.hubDelta = self.hubYield / self.baseYield - 1.0
Status.add("Hub Delta: %.3f%% (%d)" % (self.hubDelta * 100.0, self.hubYieldCount))
def get_rews(self):
Status.add("Locating REWS from {0} corrections".format(len(self.corrections)), verbosity=3)
for correction in self.corrections:
if self.corrections[correction].rews_applied() and not self.corrections[correction].turbulence_applied():
Status.add("Match: {0}".format(correction))
return correction
else:
Status.add("No match: {0}".format(correction))
raise Exception("Could not locate REWS correction")
def calculateREWSBenchmark(self):
if self.rewsActive:
self.rewsYield, self.rewsYieldCount, self.rewsDelta = self.calculate_benchmark_for_correction(self.get_rews())
def calculateTurbRenormBenchmark(self):
if self.turbRenormActive:
self.turbulenceYield, self.turbulenceYieldCount, self.turbulenceDelta = self.calculate_benchmark_for_correction("Turbulence")
if self.hasActualPower:
self.dataFrame[self.measuredTurbulencePower] = (self.dataFrame[self.actualPower] - self.dataFrame[self.corrections["Turbulence"].power_column] + self.dataFrame[self.basePower]).astype('float')
def calculationCombinedBenchmark(self):
if self.rewsActive and self.turbRenormActive:
self.combinedYield, self.combinedYieldCount, self.combinedDelta = self.calculate_benchmark_for_correction("{0} & Turbulence".format(self.get_rews()))
def calculatePowerDeviationMatrixBenchmark(self):
if self.powerDeviationMatrixActive:
self.powerDeviationMatrixYield, self.powerDeviationMatrixYieldCount, self.powerDeviationMatrixDelta = self.calculate_benchmark_for_correction("2D Power Deviation Matrix")
def calculateProductionByHeightBenchmark(self):
if self.productionByHeightActive:
self.productionByHeightYield, self.productionByHeightYieldCount, self.productionByHeightDelta = self.calculate_benchmark_for_correction("Production by Height")
def calculate_benchmark_for_correction(self, correction):
power_column = self.corrections[correction].power_column
energy = self.dataFrame[self.get_base_filter()][power_column].sum() * self.timeStampHours
count = self.dataFrame[self.get_base_filter()][power_column].count()
delta = energy / self.baseYield - 1.0
Status.add("%s Delta: %f%% (%d)" % (correction, delta * 100.0, count))
return (energy, count, delta)
| mit | 5,308,416,167,202,533,000 | 40.6 | 208 | 0.67183 | false | 4.028694 | false | false | false |
Mischback/django-oweb | oweb/views/tools.py | 1 | 4779 | """Provides some general, account-related tools"""
# Python imports
from math import ceil
# Django imports
from django.core.urlresolvers import reverse
from django.http import Http404
from django.shortcuts import redirect, render
# app imports
from oweb.exceptions import OWebDoesNotExist, OWebAccountAccessViolation
from oweb.models.planet import Planet
from oweb.models.building import Supply12
from oweb.models.research import Research113
from oweb.libs.production import get_fusion_production
from oweb.libs.costs import costs_onepointeight_total, costs_two_total
from oweb.libs.queue import get_mse
from oweb.libs.shortcuts import get_list_or_404, get_object_or_404
def tools_energy(req, account_id, energy_level=None, fusion_level=None):
"""Shows some energy related information"""
# this is the non-decorator version of the login_required decorator
# basically it checks, if the user is authenticated and redirects him, if
# not. The decorator could not handle the reverse url-resolution.
if not req.user.is_authenticated():
return redirect(reverse('oweb:app_login'))
# fetch the account and the current planet
try:
planets = Planet.objects.select_related('account').filter(account_id=account_id)
account = planets.first().account
except Planet.DoesNotExist:
raise OWebDoesNotExist
except AttributeError:
raise OWebDoesNotExist
# checks, if this account belongs to the authenticated user
if not req.user.id == account.owner_id:
raise OWebAccountAccessViolation
planet_ids = planets.values_list('id', flat=True)
if not fusion_level:
fusion_list = get_list_or_404(Supply12, astro_object_id__in=planet_ids)
# determine the average fusion reactor and maximum fusion reactor
max_fusion = 0
average_fusion = 0
for f in fusion_list:
average_fusion += f.level
if f.level > max_fusion:
max_fusion = f.level
fusion_level = ceil(average_fusion / len(planet_ids))
fusion_base_cost = f.base_cost
else:
fusion_base_cost = Supply12.base_cost
fusion_level = int(fusion_level)
if not energy_level:
energy = get_object_or_404(Research113, account_id=account_id)
energy_level = energy.level
energy_base_cost = energy.base_cost
else:
energy_level = int(energy_level)
energy_base_cost = Research113.base_cost
# calculate the costs of the current fusion plant
current_fusion_cost = costs_onepointeight_total(fusion_base_cost, fusion_level)
current_fusion_cost = get_mse(current_fusion_cost, (account.trade_metal, account.trade_crystal, account.trade_deut))
# calculate the costs of the current energy technology
current_energy_cost = costs_two_total(energy_base_cost, energy_level)
current_energy_cost = get_mse(current_energy_cost, (account.trade_metal, account.trade_crystal, account.trade_deut))
# calculate the production of the fusion plant
this_prod = int(get_fusion_production(fusion_level, energy=energy_level)[3])
fusion_matrix = []
for i in range(0, 5):
f = fusion_level + i
# calculate the costs of this fusion plant
f_cost = costs_onepointeight_total(fusion_base_cost, f)
f_cost = get_mse(f_cost, (account.trade_metal, account.trade_crystal, account.trade_deut)) - current_fusion_cost
et_range = []
for j in range(0, 5):
et = energy_level + j
# calculate the costs of this energy tech
et_cost = costs_two_total(energy_base_cost, et)
et_cost = (get_mse(et_cost, (account.trade_metal, account.trade_crystal, account.trade_deut)) - current_energy_cost) / len(planet_ids)
# total costs of this combination
next_cost = f_cost + et_cost
# calculate the production of this combination
next_prod = int(get_fusion_production(f, energy=et)[3])
next_prod_gain = int(next_prod - this_prod)
# calculate the "score" of this combination
# COSTS / PRODUCTION_GAIN
if next_prod_gain != 0:
next_ratio = next_cost / next_prod_gain
else:
next_ratio = 0
et_range.append((
et,
next_prod,
next_prod_gain,
next_cost,
next_ratio
))
fusion_matrix.append((int(f), et_range))
return render(req, 'oweb/tools_energy.html',
{
'account': account,
'planets': planets,
'fusion_matrix': fusion_matrix,
'energy_level': energy_level,
'fusion_level': fusion_level,
}
)
| mit | 3,612,048,991,356,591,000 | 37.853659 | 146 | 0.650345 | false | 3.724864 | false | false | false |
nprapps/app-template | fabfile/flat.py | 3 | 3778 | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
import copy
from cStringIO import StringIO
from fnmatch import fnmatch
import gzip
import hashlib
import logging
import mimetypes
import os
from boto.s3.key import Key
import app_config
import utils
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
GZIP_FILE_TYPES = ['.html', '.js', '.json', '.css', '.xml']
def deploy_file(bucket, src, dst, headers={}, public=True):
"""
Deploy a single file to S3, if the local version is different.
"""
k = bucket.get_key(dst)
s3_md5 = None
if k:
s3_md5 = k.etag.strip('"')
else:
k = Key(bucket)
k.key = dst
file_headers = copy.copy(headers)
if 'Content-Type' not in headers:
file_headers['Content-Type'] = mimetypes.guess_type(src)[0]
if file_headers['Content-Type'] == 'text/html':
# Force character encoding header
file_headers['Content-Type'] = '; '.join([
file_headers['Content-Type'],
'charset=utf-8'])
# Define policy
if public:
policy = 'public-read'
else:
policy = 'private'
# Gzip file
if os.path.splitext(src)[1].lower() in GZIP_FILE_TYPES:
file_headers['Content-Encoding'] = 'gzip'
with open(src, 'rb') as f_in:
contents = f_in.read()
output = StringIO()
f_out = gzip.GzipFile(filename=dst, mode='wb', fileobj=output, mtime=0)
f_out.write(contents)
f_out.close()
local_md5 = hashlib.md5()
local_md5.update(output.getvalue())
local_md5 = local_md5.hexdigest()
if local_md5 == s3_md5:
logger.info('Skipping %s (has not changed)' % src)
else:
logger.info('Uploading %s --> %s (gzipped)' % (src, dst))
k.set_contents_from_string(output.getvalue(), file_headers, policy=policy)
# Non-gzip file
else:
with open(src, 'rb') as f:
local_md5 = hashlib.md5()
local_md5.update(f.read())
local_md5 = local_md5.hexdigest()
if local_md5 == s3_md5:
logger.info('Skipping %s (has not changed)' % src)
else:
logger.info('Uploading %s --> %s' % (src, dst))
k.set_contents_from_filename(src, file_headers, policy=policy)
def deploy_folder(bucket_name, src, dst, headers={}, ignore=[]):
"""
Deploy a folder to S3, checking each file to see if it has changed.
"""
to_deploy = []
for local_path, subdirs, filenames in os.walk(src, topdown=True):
rel_path = os.path.relpath(local_path, src)
for name in filenames:
if name.startswith('.'):
continue
src_path = os.path.join(local_path, name)
skip = False
for pattern in ignore:
if fnmatch(src_path, pattern):
skip = True
break
if skip:
continue
if rel_path == '.':
dst_path = os.path.join(dst, name)
else:
dst_path = os.path.join(dst, rel_path, name)
to_deploy.append((src_path, dst_path))
if bucket_name == app_config.STAGING_S3_BUCKET:
public = False
else:
public = True
bucket = utils.get_bucket(bucket_name)
logger.info(dst)
for src, dst in to_deploy:
deploy_file(bucket, src, dst, headers, public=public)
def delete_folder(bucket_name, dst):
"""
Delete a folder from S3.
"""
bucket = utils.get_bucket(bucket_name)
for key in bucket.list(prefix='%s/' % dst):
logger.info('Deleting %s' % (key.key))
key.delete()
| mit | 7,508,140,029,491,824,000 | 26.376812 | 86 | 0.561673 | false | 3.591255 | false | false | false |
drepetto/chiplotle | chiplotle/hpgl/abstract/arc.py | 1 | 1046 | from chiplotle.hpgl.abstract.positional import _Positional
from chiplotle.hpgl.abstract.hpglprimitive import _HPGLPrimitive
class _Arc(_Positional):
def __init__(self, xy, angle, chordtolerance=None):
self.angle = angle
self.chordtolerance = chordtolerance
_Positional.__init__(self, xy)
@apply
def angle( ):
def fget(self):
return self._angle
def fset(self, arg):
if abs(arg) > 360:
raise ValueError('angle must be between -360 and 360.')
self._angle = arg
return property(**locals( ))
@property
def format(self):
if isinstance(self.x, int) and isinstance(self.y, int):
coordinates = '%i,%i' % (self.x, self.y)
else:
coordinates = '%.2f,%.2f' % (self.x, self.y)
result = '%s%s,%.2f' % (self._name, coordinates, self.angle)
if self.chordtolerance:
result += ',%.2f' % self.chordtolerance
result += _HPGLPrimitive._terminator
return result
| gpl-3.0 | -3,849,194,101,199,761,000 | 33.866667 | 71 | 0.578394 | false | 3.70922 | false | false | false |
macky360/Bitcoin-Related_Addresses | relatedaddresses.py | 1 | 7720 | #!/usr/bin/env python
import sys, os
PY_VERSION = sys.version_info
if PY_VERSION.major != 2 and PY_VERSION.minor != 7:
print("This application requires Python 2.7. You are using Python %d.%d."
% (PY_VERSION.major, PY_VERSION.minor))
sys.exit(1)
from datetime import datetime
from lib.blockchainquery import core as bq
from lib.bitcoinvalidation import addressvalidation as bv
EXAMPLE_ADDRESS = '18WaqDnNRbXpbfgGAv5bC7spb366c4CCfX'
def generate_related_report(recursive, indent, suppresszero, includechangeinputs, maxresult, parallel, *addresses):
'''Uses various techniques to identify addresses related and generates a report
'''
os.system('cls' if os.name == 'nt' else 'clear')
if recursive:
print("Recursively identifying addresses related to:")
else:
print("Identifying addresses related to:")
print("-"*70)
for count, addr in enumerate(addresses):
print ('{:>3}. {:<39}'.format(count+1,addr))
print("-"*70)
print('')
print("Please wait...")
related_addr_dict = bq.getRelatedAddresses(recursive, includechangeinputs, maxresult, parallel, None, *addresses)
running_balance = 0
#Generate text report
os.system('cls' if os.name == 'nt' else 'clear')
NonZeroAccount = 0
if(suppresszero):
print("Non Zero Related Accounts")
else:
print("Related Accounts")
print("-"*70)
resultsshown = print_audit_report_body(related_addr_dict,indent,suppresszero)
if(len(related_addr_dict) == maxresult):
print(' ...Maximum Limit Reached...')
if(resultsshown <len(related_addr_dict)):
print(' ...{:d} Zero Balance Results Suppressed...'.format(maxresult - resultsshown))
print("-"*70)
# Running balance
for addr in related_addr_dict:
running_balance = running_balance + float(bq.getAddressInfo(addr)[0]['final_balance']) / bq.SATOSHIS_IN_A_BITCOIN()
print("Total BTC {:>60f}".format(running_balance))
def print_audit_report_body(related_addr_dict, indent,suppresszero, parent_addr = None, depth=0, line_num = 0):
'''Outputs the audit report body. The function returns the number of lines printed'''
if(parent_addr == None):
for outer_addr, outer_value in related_addr_dict.iteritems():
if outer_value['relationtype'] == 'root':
ref = outer_value['txhash']
balance = float(bq.getAddressInfo(outer_addr)[0]['final_balance']) / bq.SATOSHIS_IN_A_BITCOIN()
line_num +=1
print ('{:>3}. {:<49}{:>16f}'.format(line_num, outer_addr,balance))
# Now we print any address related to the root
line_num = print_audit_report_body(related_addr_dict, indent, suppresszero, outer_addr, depth+1, line_num)
else:
# Now we print any address related to the parent
for addr, value in related_addr_dict.iteritems():
if(value['relation']==parent_addr):
balance = float(bq.getAddressInfo(addr)[0]['final_balance']) / bq.SATOSHIS_IN_A_BITCOIN()
MAX_DEPTH = 17
if(indent):
if(depth<MAX_DEPTH):
indents = ' ' * (depth-1) + ('=' if value['relationtype'] == 'fellow' else '>' if value['relationtype']=='change' else '?')
else:
prefix = ' d+' + str(depth-MAX_DEPTH+1)
indents = prefix + ' ' * (MAX_DEPTH-len(prefix)-2) + ('=' if value['relationtype'] == 'fellow' else '>' if value['relationtype']=='change' else '?')
else:
indents=''
if not suppresszero or balance>0:
if(not suppresszero or balance>0):
line_num += 1
print ('{:>3}. {:<49}{:>16f}'.format(line_num ,indents + addr,balance))
line_num = print_audit_report_body(related_addr_dict, indent, suppresszero, addr, depth+1, line_num)
return line_num
def show_help():
'''Prints the commandline help'''
filename = os.path.basename(__file__)
print('Reports the balances of any related bitcoin addresses.')
print('')
print('{} [-r][-s][-d][-t][-m][-p] Address1 Address2 ...'.format(filename.upper()))
print('')
print(' -r Recursively scan for related addresses')
print(' -s Suppress addresses with a zero balance')
print(' -i Indent to show relationships; useful when doing a recursive scan')
print(' -t Test addresses {0} used for scan'.format(EXAMPLE_ADDRESS))
print(' -e Calls made to external servers are reported')
print(' -c Includes inputs that appear to be using a related addr to store change')
print(' -m Max results, enter as -m300 to limit results to 300 [Default:50]')
print(' -p Use Parallel queries to Blockchain.info to increase speed. [Experimental]')
print('')
print('eg. {0} -r -s {1}'.format(filename.upper(),EXAMPLE_ADDRESS))
print('')
if __name__ == '__main__':
showhelp = False
parallel = False
recurse = False
usetestaddress = False
suppresszero = False
indent = False
reportcalls = False
includechangeinputs = False
showtime = False
addresses = []
unknownflags = []
maxresults = 50
startTime = datetime.now()
if len(sys.argv) ==1: showhelp = True
else:
for flag in sys.argv[1:]:
if flag == '-?': showhelp = True
elif flag == '-t': usetestaddress = True
elif flag == '-r': recurse = True
elif flag == '-s': suppresszero = True
elif flag == '-i': indent = True
elif flag == '-e': reportcalls = True
elif flag == '-c': includechangeinputs = True
elif flag == '-p': parallel = True
elif flag.startswith('-m'):
try:
maxresults = int(flag[2:])
except:
showhelp = True
if maxresults < 1:
showhelp = True
elif bv.check_bitcoin_address(flag):
addresses.append(flag)
else:
unknownflags.append(flag)
if len(unknownflags)>0:
for flag in unknownflags:
print("This argument is not understood: {0}".format(flag))
print('')
show_help()
elif showhelp:
show_help()
elif usetestaddress:
generate_related_report(recurse, indent, suppresszero, includechangeinputs, maxresults, parallel, EXAMPLE_ADDRESS)
showtime = True
else :
generate_related_report(recurse, indent, suppresszero, includechangeinputs, maxresults, parallel, *addresses)
showtime = True
if indent:
print('')
print('Address Prefix Key')
print('------------------')
print('None: Root address, this is one of the keys you searched for')
print('= : Fellow input address of its parent')
print('> : Used as a change address by its parent')
if includechangeinputs:
print('? : Used its parent as a change address. {May be unreliable}')
if reportcalls:
print('')
print('Call report')
print('-----------')
print('')
print('Calls to blockchain.info requesting information on addresses: ' + str(bq._get_address_info_cache_misses))
print('Calls to blockchain.info requesting information on blocks: ' + str(bq._get_block_info_cache_misses))
if showtime:
print('')
print('Report took {} seconds to generate'.format((datetime.now()-startTime).total_seconds()))
| gpl-2.0 | 6,022,440,774,097,053,000 | 40.505376 | 173 | 0.587824 | false | 3.981434 | false | false | false |
sfu-fas/coursys | otp/models.py | 1 | 5414 |
from . import auth_checks
from django.db import models
from django.contrib.sessions.models import Session
from django.contrib.sessions.backends.db import SessionStore
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.db.models.signals import post_save
from django.utils import timezone
from django_otp.plugins.otp_totp.models import TOTPDevice
from django_otp.plugins.otp_static.models import StaticDevice
from django.conf import settings
from six.moves.urllib.parse import quote, urlencode
import base64
ALL_DEVICE_CLASSES = [TOTPDevice, StaticDevice]
# This could be configurable from settings. It isn't at the moment.
check_auth = auth_checks.check_auth
needs_2fa = auth_checks.needs_2fa
def all_otp_devices(user, confirmed=True):
for Dev in ALL_DEVICE_CLASSES:
devs = Dev.objects.devices_for_user(user, confirmed=confirmed)
for d in devs: # could be a python3 'yield from'
yield d
def totpauth_url(totp_dev):
# https://github.com/google/google-authenticator/wiki/Key-Uri-Format
label = totp_dev.user.username.encode('utf8')
# We need two separate issuers, otherwise deploying in prod will override our authenticator token from
# dev
if settings.DEPLOY_MODE == 'production':
issuer = b'CourSys'
else:
issuer = b'CourSys-DEV'
query = [
('secret', base64.b32encode(totp_dev.bin_key)),
('digits', totp_dev.digits),
('issuer', issuer)
]
return b'otpauth://totp/%s?%s' % (label, urlencode(query).encode('ascii'))
# based on http://stackoverflow.com/a/4631504/1236542
class SessionInfo(models.Model):
'''
Meta-information about Sessions, so we can record when authentications happened.
'''
session = models.OneToOneField(Session, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True)
last_auth = models.DateTimeField(null=True)
last_2fa = models.DateTimeField(null=True)
@classmethod
def for_session(cls, session, save_new=True):
'Retrieve or create a SessionInfo for this Session.'
assert isinstance(session, Session)
try:
si = cls.objects.get(session=session)
except (SessionInfo.DoesNotExist):
si = SessionInfo(session=session)
if save_new:
si.save()
return si
@classmethod
def for_sessionstore(cls, sessionstore, save_new=True):
'Retrieve or create a SessionInfo for this SessionStore.'
assert isinstance(sessionstore, SessionStore)
try:
si = cls.objects.get(session__session_key=sessionstore.session_key)
except (SessionInfo.DoesNotExist):
si = SessionInfo(session=Session.objects.get(session_key=sessionstore.session_key))
if save_new:
si.save()
return si
@classmethod
def for_request(cls, request, save_new=True, user=None):
'Retrieve the SessionInfo for this request, if it has an active session.'
if hasattr(request, 'session_info') and request.session_info is not None:
# already have it.
return request.session_info
if request.session.session_key is None:
# no session: no point in looking.
request.session_info = None
else:
try:
si = cls.for_sessionstore(request.session, save_new=save_new)
except (Session.DoesNotExist):
request.session_info = None
return
request.session_info = si
return request.session_info
@classmethod
def just_logged_in(cls, request):
'Records that the session associated with this request just logged in (by django auth).'
si = cls.for_request(request, save_new=False)
if si is None:
return
si.last_auth = timezone.now()
si.save()
return si
@classmethod
def just_logged_out(cls, request):
'Records that the session associated with this request just logged out.'
si = cls.for_request(request, save_new=False)
if si is None:
return
si.last_auth = None
si.save()
return si
@classmethod
def just_2fa(cls, request):
'Records that the session associated with this request just completed 2FA.'
si = cls.for_request(request, save_new=False)
si.last_2fa = timezone.now()
si.save()
return si
def __str__(self):
return '%s@%s' % (self.session_id, self.created)
def okay_auth(self, request, user):
'''
Is the auth okay for this request/user?
Hook here to allow apps to customize behaviour. Returns a boolean pair:
Is standard Django auth okay?
Is 2FA okay?
May assume that Django auth *and* OTP auth have said yes. Only need to restrict further.
'''
return check_auth(self, request, user)
def logged_in_listener(request, **kwargs):
SessionInfo.just_logged_in(request)
def logged_out_listener(request, **kwargs):
SessionInfo.just_logged_out(request)
user_logged_in.connect(logged_in_listener)
user_logged_out.connect(logged_out_listener)
def session_create_listener(instance, **kwargs):
instance.session_info = SessionInfo.for_session(instance)
post_save.connect(session_create_listener, sender=Session) | gpl-3.0 | -6,296,844,271,298,977,000 | 32.018293 | 106 | 0.65737 | false | 3.889368 | false | false | false |
flopezag/fiware-management-scripts | HelpDesk/stackoverflowsync.py | 1 | 8004 | #!/usr/bin/env <PATH_HELPDESK>/env/bin/python
# -*- encoding: utf-8 -*-
##
# Copyright 2017 FIWARE Foundation, e.V.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
##
from datetime import datetime
from HelpDesk.platforms.servers import StackExchange
from HelpDesk.desks.helpdeskImporter import HelpDeskImporter
from logging import error, exception, info, debug
from logging import _nameToLevel as nameToLevel
from argparse import ArgumentParser
from sys import exc_info
from random import choice
from Common.logging_conf import LoggingConf
from Config.settings import JIRA_URL
__author__ = "Fernando López <[email protected]"
class StackOverflowSync(LoggingConf):
def __init__(self, loglevel):
"""
Initialize the script and fix the log level.
:return: Nothing.
"""
super(StackOverflowSync, self).__init__(loglevel=loglevel, log_file='stackoverflow.log')
info('\n\n---- StackOverflow Synchronization----\n')
# Tell urlib3 to use the pyOpenSSL
# urllib3.contrib.pyopenssl.inject_into_urllib3()
# Create a PoolManager that verifies certificates when performing requests
# http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
info("Getting the HelpDesk monitors data")
self.help_desk = HelpDeskImporter()
self.stack = StackExchange()
self.list_questions = None
try:
self.help_desk.get_monitors()
except Exception as e:
error(e)
exception('No connection to JIRA https://{}'.format(JIRA_URL))
exception("Unexpected error: {}".format(exc_info()[0]))
exit()
def get_stack_monitor(self):
"""
Get the list of questions in StackOverflow and the relation of questions already monitored in the system.
:return: The StackOverflow data.
"""
info("Getting the StackOverflow data")
try:
# raise Exception
self.stack.get_questions()
except Exception as e:
error(e)
info('Failed to get questions from server')
finally:
self.stack.match(self.help_desk.monitors)
def questions_with_no_answer(self, partition_date):
"""
Get the list of questions in StackOverflow with no response.
:param partition_date: Date from which we check the new questions.
:return: Nothing
"""
# Firstly: Get the list of monitored and unmonitored questions with no answer
info("Obtaining list of questions with no answer")
list_questions = filter(lambda x: not x.answer_count, self.stack.questions)
self.help_desk.update_with(list_questions)
answered_questions = filter(lambda x: x.answer_count > 0 and not x.is_answered, self.stack.questions)
new_questions = filter(lambda x: x.added_at >= partition_date, answered_questions)
self.help_desk.update_with(new_questions)
old_questions = filter(lambda x: x.added_at < partition_date, answered_questions)
mon_old_questions = filter(lambda x: x.monitor, old_questions)
self.help_desk.update_with_time(mon_old_questions)
unmon_old_questions = list(filter(lambda x: not x.monitor, old_questions))
if len(unmon_old_questions) > 0:
self.help_desk.update_with_time([choice(unmon_old_questions)])
else:
info('NOT available answered questions for synchronization with help desk')
def questions_with_answers(self, partition_date):
"""
Get the list of questions with a answer but not reflected in Jira.
:param partition_date: Date from which we check the new questions.
:return: The list of questions that need to be monitored.
"""
# Secondly: Get the list of questions answered to check if they are monitored
info("Obtaining list of questions answers")
accepted_questions = filter(lambda x: x.is_answered, self.stack.questions)
new_questions = filter(lambda x: x.added_at >= partition_date, accepted_questions)
self.help_desk.update_with(new_questions)
old_questions = filter(lambda x: x.added_at < partition_date, accepted_questions)
mon_old_questions = list(filter(lambda x: x.monitor, old_questions))
unmon_old_questions = list(filter(lambda x: not x.monitor, old_questions))
list_questions = mon_old_questions
if len(unmon_old_questions) > 0:
list_questions.append(choice(unmon_old_questions))
else:
info('NOT available questions with accepted answer for synchronization with help desk')
self.list_questions = list_questions
def get_answers(self):
"""
:return:
"""
info("Getting the final list of StackOverflow questions")
try:
self.stack.get_answers(self.list_questions)
except Exception as e:
error(e)
exception('Failed to get answers from server')
exception("Unexpected error: {}".format(exc_info()[0]))
else:
self.help_desk.update_with_time(self.list_questions)
def report(self):
def pq(a_question):
result = 'None'
if a_question.monitor:
result = a_question.monitor.fields.status
return result
for question in self.list_questions:
debug('{}, monitor={}, monitor status={}, question url={}'
.format(question, question.monitor, pq(question), question.url))
def get_number_issues_created(self):
return self.help_desk.n_monitors
def get_number_transitions(self):
return self.help_desk.n_transitions
def get_number_assignments(self):
return self.help_desk.n_assigments
def get_questions(self):
return len(self.stack.questions)
def process(self, year, month, day):
self.get_stack_monitor()
dividing_day = datetime(year=year, month=month, day=day)
self.questions_with_no_answer(partition_date=dividing_day)
self.questions_with_answers(partition_date=dividing_day)
self.get_answers()
info('helpdesk: # issues created = {}'.format(self.get_number_issues_created()))
info('helpdesk: # issues transitions = {}'.format(self.get_number_transitions()))
info('helpdesk: # issues assignments = {}'.format(self.get_number_assignments()))
info('stackoverflow questions= {}'.format(self.get_questions()))
self.close()
if __name__ == "__main__":
# Create the scripts arguments to execute the scripts
parser = ArgumentParser(prog='StackOverflow', description='StackOverflow synchronising script')
parser.add_argument('-l', '--log',
default='INFO',
help='The logging level to be used.')
args = parser.parse_args()
loglevel = None
try:
loglevel = nameToLevel[args.log.upper()]
except Exception as e1:
print('Invalid log level: {}'.format(args.log))
print('Please use one of the following values:')
print(' * CRITICAL')
print(' * ERROR')
print(' * WARNING')
print(' * INFO')
print(' * DEBUG')
print(' * NOTSET')
exit()
stackoverflowSync = StackOverflowSync(loglevel=loglevel)
stackoverflowSync.process(year=2015, month=9, day=21)
| apache-2.0 | -4,770,620,268,501,498,000 | 35.04955 | 113 | 0.645383 | false | 4.110426 | false | false | false |
thebravoman/software_engineering_2016 | hm_term2_rest_api_projects/11a_03_11/gpus/migrations/0001_initial.py | 1 | 1229 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-27 07:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GPU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('manufacturer', models.CharField(max_length=128)),
('GPU_manufacturer', models.CharField(max_length=128)),
('video_memory', models.IntegerField()),
('memory_clock', models.IntegerField()),
('core_speed', models.IntegerField()),
('boost_speed', models.IntegerField()),
('memory_type', models.CharField(max_length=128)),
('motherboard_connection', models.CharField(max_length=128)),
('power_supply', models.IntegerField()),
('picture', models.CharField(max_length=999999)),
('price', models.IntegerField()),
],
),
]
| mit | -6,903,305,332,872,117,000 | 35.147059 | 114 | 0.554109 | false | 4.585821 | false | false | false |
LarryHillyer/PoolHost | PoolHost/nfl/division/viewmodels.py | 1 | 12639 | from datetime import datetime
from django.db import models
from app.models import SiteUser, NFL_Division, NFL_Conference, League, Sport, SuperUser
from app.models import NFL_Conference_Choices
from app.mixins import HelperMixins
from nfl.division.forms import NFL_DivisionForm_Create, NFL_DivisionForm_Edit
class Layout_View(object):
def __init__(self, site_user, title):
self.viewmodel = {'site_user':site_user, # app/layout.html params
'title': title,
'year': datetime.now().year,}
class Index_Body_View(Layout_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, conferences, filter,
conference_id):
super().__init__(site_user, title)
self.viewmodel['partial_view_id'] = 'division-id'
self.viewmodel['conference_id'] = conference_id
self.viewmodel['filter' ] = filter
self.viewmodel['index_url'] = 'nfl:division:index'
self.viewmodel['pagination_routing_html'] = 'nfl/nfl_pagination_routing.html'
self.viewmodel['conference_pagination_list_html'] = 'division/conference_pagination_list.html'
self.viewmodel['shared_conference_pagination_list_html'] = 'nfl/shared_conference_pagination_list.html'
self.viewmodel['shared_division_pagination_list_html'] = 'nfl/shared_division_pagination_list.html'
self.viewmodel['conference_pagination_link_html'] = 'division/conference_pagination_link.html'
self.viewmodel['division_pagination_link_html'] = 'division/division_pagination_link.html'
self.viewmodel['conferences'] = conferences
self.viewmodel['modelsuccess_bool'] = modelsuccess_bool
self.viewmodel['modelstate'] = modelstate
self.viewmodel['modelstate_html'] = 'app/modelstatus.html'
self.viewmodel['create_url'] = 'nfl:division:create'
self.viewmodel['create_link_name'] = 'Create Division'
self.viewmodel['create_link_html'] = 'division/create_link.html'
self.viewmodel['shared_create_link_html'] = 'app/shared_create_link.html'
self.viewmodel['index_table_html'] = 'division/index_table.html'
self.viewmodel['home_url'] = 'nfl:home'
self.viewmodel['scripts'] = ['app/scripts/Client/TableStripping.js']
class Form_Body_View(Layout_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id):
super().__init__(site_user, title)
self.viewmodel['partial_view_id'] = 'division-id'
self.viewmodel['conference_id'] = conference_id
self.viewmodel['filter'] = filter
self.viewmodel['form'] = form
self.viewmodel['form_label_name'] = 'Division'
self.viewmodel['form_label_conference'] = 'Conference'
self.viewmodel['modelsuccess_bool'] = modelsuccess_bool
self.viewmodel['modelstate'] = modelstate
self.viewmodel['modelstate_html'] = 'app/modelstatus.html'
self.viewmodel['index_url'] = 'nfl:division:index'
self.viewmodel['index_link_html'] = 'division/index_link.html'
self.viewmodel['shared_index_link_html'] = 'app/shared_index_link.html'
self.viewmodel['scripts'] = ['app/scripts/jquery.validate.js']
class Details_Delete_Body_View(Layout_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id):
super().__init__(site_user, title)
self.viewmodel['partial_view_id'] = 'division-id'
self.viewmodel['division_id'] = division.id
self.viewmodel['conference_id'] = conference_id
self.viewmodel['filter'] = filter
self.viewmodel['descriptive_list'] = 'division/descriptive_list.html'
self.viewmodel['modelsuccess_bool'] = modelsuccess_bool
self.viewmodel['modelstate'] = modelstate
self.viewmodel['modelstate_html'] = 'app/modelstatus.html'
self.viewmodel['index_url'] = 'nfl:division:index'
class Table_View(Index_Body_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, divisions, conferences,
filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool, conferences, filter,
conference_id)
self.viewmodel['items'] = divisions
self.viewmodel['header_label_item'] = 'Division'
self.viewmodel['header_label_conference'] = 'Conference'
self.viewmodel['item_url'] = 'nfl:division:index'
self.viewmodel['edit_url'] = 'nfl:division:edit'
self.viewmodel['details_url'] = 'nfl:division:details'
self.viewmodel['delete_url'] = 'nfl:division:delete'
class DescriptiveList_View(Details_Delete_Body_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id)
self.viewmodel['item'] = division
self.viewmodel['item_label_name'] = 'Conference'
self.viewmodel['item_label_league_name'] = 'League'
self.viewmodel['item_label_sport_name'] = 'Sport'
class Create_View(Form_Body_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id)
self.viewmodel['form_template_html'] = 'division/create_form.html'
self.viewmodel['form_create_html'] = 'app/shared_create_form.html'
self.viewmodel['form_html'] = 'division/division_form.html'
self.viewmodel['form_url'] = 'nfl:division:create'
self.viewmodel['form_label_submit'] = 'Create'
class Edit_View(Form_Body_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, form, filter,
division_id, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id)
self.viewmodel['division_id'] = division_id
self.viewmodel['form_template_html'] = 'division/edit_form.html'
self.viewmodel['form_edit_html'] = 'app/shared_edit_form.html'
self.viewmodel['form_html'] = 'division/division_form.html'
self.viewmodel['form_url'] = 'nfl:division:edit'
self.viewmodel['form_label_submit'] = 'Edit'
class Details_View(DescriptiveList_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id)
self.viewmodel['details_links_html'] = 'division/details_links.html'
self.viewmodel['edit_url'] = 'nfl:division:edit'
class Delete_View(DescriptiveList_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id)
self.viewmodel['delete_form'] = 'division/delete_form.html'
self.viewmodel['delete_url'] = 'nfl:division:delete'
self.viewmodel['shared_delete_form_html'] = 'app/shared_delete_form.html'
class SuperUser_Index(Table_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, divisions, conferences,
filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool, divisions, conferences,
filter, conference_id)
self.viewmodel['use_pagination'] = True
@classmethod
def get_index_viewmodel(cls, site_user, title, modelstate, filter, conference_id):
modelstate, modelsuccess_bool = League.get_modelstate(modelstate)
conference_id, divisions = SuperUser_Index.get_viewmodel_parameters_by_state(filter, conference_id)
conferences = NFL_Conference.get_all_items(NFL_Conference)
viewmodel = SuperUser_Index(site_user, title, modelstate, modelsuccess_bool,
divisions, conferences, filter, conference_id).viewmodel
return viewmodel
@classmethod
def get_viewmodel_parameters_by_state(cls, filter, conference_id):
if filter == 0:
conference_id = 0
divisions = NFL_Division.get_all_items(NFL_Division)
elif filter == 1:
conferences = NFL_Conference.get_all_items(NFL_Conference)
if conferences.count() == 0:
divisions = []
return conference_id, divisions
if conference_id == 0:
conference_id = NFL_Conference.get_conference_id_if_needed_and_possible(conferences, conference_id)
divisions = NFL_Division.get_items_by_conference_id(NFL_Division, conference_id)
return conference_id, divisions
class SuperUser_Create(Create_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id)
@classmethod
def get_create_viewmodel(cls, site_user, title, modelstate, filter, conference_id, form):
modelstate, modelsuccess_bool = League.get_modelstate(modelstate)
conferences = NFL_Conference.get_all_items(NFL_Conference)
NFL_Conference_Choices.get_choices_by_conferences(conferences)
if form == None:
form = NFL_DivisionForm_Create(initial={'conference_id': conference_id,
'filter' : filter})
viewmodel = SuperUser_Create(site_user, title, modelstate, modelsuccess_bool, form, filter,
conference_id).viewmodel
return viewmodel
class SuperUser_Edit(Edit_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool, form, filter,
division_id, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool, form, filter,
division_id, conference_id)
@classmethod
def get_edit_viewmodel(cls, site_user, title, modelstate, filter, division_id, conference_id,
form):
modelstate, modelsuccess_bool = NFL_Division.get_modelstate(modelstate)
division = NFL_Division.get_item_by_id(NFL_Division, division_id)
if form == None:
form = NFL_DivisionForm_Edit(initial = {'id': division.id,
'name': division.name,
'conference_id': division.conference_id,
'filter':filter})
viewmodel = SuperUser_Edit(site_user, title, modelstate, modelsuccess_bool, form, filter,
division_id, conference_id).viewmodel
return viewmodel
class SuperUser_Details(Details_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id)
@classmethod
def get_details_viewmodel(cls, site_user, title, modelstate, filter, division_id,
conference_id):
modelstate, modelsuccess_bool = League.get_modelstate(modelstate)
division = NFL_Division.get_item_by_id(NFL_Division, division_id)
viewmodel = SuperUser_Details(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id).viewmodel
return viewmodel
class User_Delete(Delete_View):
def __init__(self, site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id):
super().__init__(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id)
@classmethod
def get_delete_viewmodel(cls, site_user, title, modelstate, filter,
division_id, conference_id):
modelstate, modelsuccess_bool = League.get_modelstate(modelstate)
division = NFL_Division.get_item_by_id(NFL_Division, division_id)
viewmodel = User_Delete(site_user, title, modelstate, modelsuccess_bool,
division, filter, conference_id).viewmodel
return viewmodel
| gpl-3.0 | 2,366,950,305,864,144,000 | 37.18429 | 122 | 0.638658 | false | 3.71626 | false | false | false |
gopal1cloud/neutron | neutron/db/migration/alembic_migrations/versions/33dd0a9fa487_embrane_lbaas_driver.py | 5 | 1871 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""embrane_lbaas_driver
Revision ID: 33dd0a9fa487
Revises: 19180cf98af6
Create Date: 2014-02-25 00:15:35.567111
"""
# revision identifiers, used by Alembic.
revision = '33dd0a9fa487'
down_revision = '19180cf98af6'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.services.loadbalancer.plugin.LoadBalancerPlugin'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
u'embrane_pool_port',
sa.Column(u'pool_id', sa.String(length=36), nullable=False),
sa.Column(u'port_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'],
name=u'embrane_pool_port_ibfk_1'),
sa.ForeignKeyConstraint(['port_id'], [u'ports.id'],
name=u'embrane_pool_port_ibfk_2'),
sa.PrimaryKeyConstraint(u'pool_id'))
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table(u'embrane_pool_port')
| apache-2.0 | -558,871,082,599,062,300 | 30.711864 | 78 | 0.687867 | false | 3.550285 | false | false | false |
rbaumg/trac | trac/wiki/intertrac.py | 1 | 4800 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2019 Edgewall Software
# Copyright (C) 2005-2006 Christian Boos <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christian Boos <[email protected]>
import re
from trac.config import ConfigSection
from trac.core import *
from trac.util.html import Element, Fragment, find_element, tag
from trac.util.translation import N_, _, tag_
from trac.web.api import IRequestHandler
from trac.wiki.api import IWikiMacroProvider
from trac.wiki.formatter import extract_link
class InterTracDispatcher(Component):
"""InterTrac dispatcher."""
implements(IRequestHandler, IWikiMacroProvider)
is_valid_default_handler = False
intertrac_section = ConfigSection('intertrac',
"""This section configures InterTrac prefixes. Option names in
this section that contain a `.` are of the format
`<name>.<attribute>`. Option names that don't contain a `.` define
an alias.
The `.url` attribute is mandatory and is used for locating the
other Trac. This can be a relative path when the other Trac
environment is located on the same server.
The `.title` attribute is used for generating a tooltip when the
cursor is hovered over an InterTrac link.
Example configuration:
{{{#!ini
[intertrac]
# -- Example of setting up an alias:
t = trac
# -- Link to an external Trac:
genshi.title = Edgewall's Trac for Genshi
genshi.url = http://genshi.edgewall.org
}}}
""")
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'^/intertrac/(.*)', req.path_info)
if match:
if match.group(1):
req.args['link'] = match.group(1)
return True
def process_request(self, req):
link = req.args.get('link', '')
parts = link.split(':', 1)
if len(parts) > 1:
resolver, target = parts
if target[:1] + target[-1:] not in ('""', "''"):
link = '%s:"%s"' % (resolver, target)
from trac.web.chrome import web_context
link_frag = extract_link(self.env, web_context(req), link)
if isinstance(link_frag, (Element, Fragment)):
elt = find_element(link_frag, 'href')
if elt is None:
raise TracError(
_("Can't view %(link)s. Resource doesn't exist or "
"you don't have the required permission.", link=link))
href = elt.attrib.get('href')
else:
href = req.href(link.rstrip(':'))
req.redirect(href)
# IWikiMacroProvider methods
def get_macros(self):
yield 'InterTrac'
def get_macro_description(self, name):
return 'messages', N_("Provide a list of known InterTrac prefixes.")
def expand_macro(self, formatter, name, content):
intertracs = {}
for key, value in self.intertrac_section.options():
idx = key.rfind('.')
if idx > 0: # 0 itself doesn't help much: .xxx = ...
prefix, attribute = key[:idx], key[idx+1:]
intertrac = intertracs.setdefault(prefix, {})
intertrac[attribute] = value
else:
intertracs[key] = value # alias
if 'trac' not in intertracs:
intertracs['trac'] = {'title': _('The Trac Project'),
'url': 'http://trac.edgewall.org'}
def generate_prefix(prefix):
intertrac = intertracs[prefix]
if isinstance(intertrac, basestring):
yield tag.tr(tag.td(tag.strong(prefix)),
tag.td(tag_("Alias for %(name)s",
name=tag.strong(intertrac))))
else:
url = intertrac.get('url', '')
if url:
title = intertrac.get('title', url)
yield tag.tr(tag.td(tag.a(tag.strong(prefix),
href=url + '/timeline')),
tag.td(tag.a(title, href=url)))
return tag.table(class_="wiki intertrac")(
tag.tr(tag.th(tag.em(_("Prefix"))),
tag.th(tag.em(_("Trac Site")))),
[generate_prefix(p) for p in sorted(intertracs)])
| bsd-3-clause | -6,706,267,280,268,340,000 | 36.5 | 76 | 0.575625 | false | 4.047218 | false | false | false |
arve0/leicascanningtemplate | leicascanningtemplate/template.py | 1 | 13128 | import time, re
from lxml import objectify, etree
from copy import deepcopy
class ScanningTemplate(object):
"""Python object of Leica LAS Matrix Screener Scanning Template XML.
Provides easy access to elements via attributes:
>>> tmpl = ScanningTemplate('{ScanningTemplate}tmpl.xml')
>>> # attributes of MatrixScreenerTemplate/ScanningTemplate/Properties
>>> print(tmpl.properties.attrib)
Parameters
----------
filename : str
XML to load.
Attributes
----------
filename : str
Path XML-filename.
root : lxml.objectify.ObjectifiedElement
Objectified root of loaded XML.
See http://lxml.de/objectify.html#the-lxml-objectify-api
"""
def __init__(self, filename):
self.filename = filename
tree = objectify.parse(filename)
self.root = tree.getroot()
@property
def properties(self):
"Short hand for ``self.root.ScanningTemplate.Properties``"
return self.root.ScanningTemplate.Properties
# WELLS
@property
def well_array(self):
"Short hand for ``self.root.ScanWellArray``"
return self.root.ScanWellArray
@property
def wells(self):
"""All ScanWellData elements.
Returns
-------
list of objectify.ObjectifiedElement
"""
try:
return self.root.ScanWellArray.ScanWellData[:]
except AttributeError:
return []
def well_fields(self, well_x=1, well_y=1):
"""All ScanFieldData elements of given well.
Parameters
----------
well_x : int
well_y : int
Returns
-------
list of lxml.objectify.ObjectifiedElement
All ScanFieldData elements of given well.
"""
xpath = './ScanFieldArray/ScanFieldData'
xpath += _xpath_attrib('WellX', well_x)
xpath += _xpath_attrib('WellY', well_y)
return self.root.findall(xpath)
def well(self, well_x=1, well_y=1):
"""ScanWellData of specific well.
Parameters
----------
well_x : int
well_y : int
Returns
-------
lxml.objectify.ObjectifiedElement
"""
xpath = './ScanWellData'
xpath += _xpath_attrib('WellX', well_x)
xpath += _xpath_attrib('WellY', well_y)
# assume we find only one
return self.well_array.find(xpath)
def well_attrib(self, well_x=1, well_y=1):
"""Attributes of specific well.
Parameters
----------
well_x : int
well_y : int
Returns
-------
dict
Attributes of ScanWellArray/ScanWellData.
"""
return self.well(well_x, well_y).attrib
# FIELDS
@property
def field_array(self):
"Short hand for ``self.root.ScanFieldArray``"
return self.root.ScanFieldArray
@property
def fields(self):
"""All ScanFieldData elements.
Returns
-------
list of objectify.ObjectifiedElement
"""
try:
return self.root.ScanFieldArray.ScanFieldData[:]
except AttributeError:
return []
def field(self, well_x=1, well_y=1, field_x=1, field_y=1):
"""ScanFieldData of specified field.
Parameters
----------
well_x : int
well_y : int
field_x : int
field_y : int
Returns
-------
lxml.objectify.ObjectifiedElement
ScanFieldArray/ScanFieldData element.
"""
xpath = './ScanFieldArray/ScanFieldData'
xpath += _xpath_attrib('WellX', well_x)
xpath += _xpath_attrib('WellY', well_y)
xpath += _xpath_attrib('FieldX', field_x)
xpath += _xpath_attrib('FieldY', field_y)
# assume we find only one
return self.root.find(xpath)
def update_start_position(self):
"Set start position of experiment to position of first field."
x_start = self.field_array.ScanFieldData.FieldXCoordinate
y_start = self.field_array.ScanFieldData.FieldYCoordinate
# empty template have all fields positions set to zero
# --> avoid overwriting start position
if x_start != 0 and y_start != 0:
self.properties.ScanFieldStageStartPositionX = int(x_start * 1e6) # in um
self.properties.ScanFieldStageStartPositionY = int(y_start * 1e6)
def update_well_positions(self):
"""Set ``well_attrib['FieldXStartCoordinate']`` and
``well_attrib['FieldYStartCoordinate']`` to FieldXCoordinate and
FieldYCoordinate of first field in well.
"""
for well in self.wells:
well_x = well.attrib['WellX']
well_y = well.attrib['WellY']
first_field = self.well_fields(well_x, well_y)[0]
x_start = first_field.FieldXCoordinate.text
y_start = first_field.FieldYCoordinate.text
well.attrib['FieldXStartCoordinate'] = x_start
well.attrib['FieldYStartCoordinate'] = y_start
@property
def count_of_wells(self):
"""Number of wells in x/y-direction of template.
Returns
-------
tuple
(xs, ys) number of wells in x and y direction.
"""
xs = set([w.attrib['WellX'] for w in self.wells])
ys = set([w.attrib['WellY'] for w in self.wells])
return (len(xs), len(ys))
@property
def count_of_assigned_jobs(self):
"Number of fields that have attrib['JobAssigned'] set to true."
assigned = len([x.attrib['JobAssigned'] for x in self.fields
if x.attrib['JobAssigned'] == 'true'])
return assigned
def update_counts(self):
"Update counts of fields and wells."
# Properties.attrib['TotalCountOfFields']
fields = str(len(self.fields))
self.properties.attrib['TotalCountOfFields'] = fields
# Properties.CountOfWellsX/Y
wx, wy = (str(x) for x in self.count_of_wells)
self.properties.CountOfWellsX = wx
self.properties.CountOfWellsY = wy
# Properties.attrib['TotalCountOfWells']
wells = str(len(self.wells))
self.properties.attrib['TotalCountOfWells'] = wells
# Properties.attrib['TotalAssignedJobs']
self.properties.attrib['TotalAssignedJobs'] = str(self.count_of_assigned_jobs)
def remove_well(self, well_x, well_y):
"""Remove well and associated scan fields.
Parameters
----------
well_x : int
well_y : int
Raises
------
AttributeError
If well not found.
"""
well = self.well(well_x, well_y)
if well == None:
raise AttributeError('Well not found')
self.well_array.remove(well)
# remove associated fields
fields = self.well_fields(well_x, well_y)
for f in fields:
self.field_array.remove(f)
def well_exists(self, well_x, well_y):
"Check if well exists in ScanWellArray."
return self.well(well_x, well_y) != None
def field_exists(self, well_x, well_y, field_x, field_y):
"Check if field exists ScanFieldArray."
return self.field(well_x, well_y, field_x, field_y) != None
def add_well(self, well_x, well_y, start_x, start_y):
"""Add well with associated scan fields. ``self.wells[0]`` and
``self.fields[0]`` will be used as base. ScanWellData will be added to
ScanWellArray and ScanFieldData to ScanFieldArray. The amount of fields
added is decided by Properties/CountOfScanFields.
Parameters
----------
well_x : int
well_y : int
start_x : int
In meters. FieldXCoordinate of first field in well.
start_y : int
In meters. FieldYCoordinate of first field in well.
Raises
------
ValueError
If well or fields already exists.
"""
# raise ValueError if well already exists
if self.well_exists(well_x, well_y):
raise ValueError('Well already exists in ScanWellArray')
if len(self.well_fields(well_x, well_y)) != 0:
raise ValueError('Fields belonging to well already exists in ScanFieldArray')
base_well = deepcopy(self.wells[0])
# append well to ScanWellArray
base_well.attrib['WellX'] = str(well_x)
base_well.attrib['WellY'] = str(well_y)
base_well.attrib['FieldXStartCoordinate'] = str(start_x)
base_well.attrib['FieldYStartCoordinate'] = str(start_y)
self.well_array.append(base_well)
# append fields to ScanFieldArray
x_fields = int(self.properties.CountOfScanFieldsX)
y_fields = int(self.properties.CountOfScanFieldsY)
x_dist = float(self.properties.ScanFieldStageDistanceX) * 1e-6 # in um
y_dist = float(self.properties.ScanFieldStageDistanceY) * 1e-6
x_label = str(self.properties.TextWellPlateHorizontal[well_x - 1])
y_label = str(self.properties.TextWellPlateVertical[well_y - 1])
for i in range(x_fields):
for j in range(y_fields):
base_field = deepcopy(self.fields[0])
base_field.FieldXCoordinate = start_x + i*x_dist
base_field.FieldYCoordinate = start_y + j*y_dist
base_field.attrib['WellX'] = str(well_x)
base_field.attrib['WellY'] = str(well_y)
base_field.attrib['FieldX'] = str(i+1)
base_field.attrib['FieldY'] = str(j+1)
base_field.attrib['LabelX'] = x_label
base_field.attrib['LabelY'] = y_label
self.field_array.append(base_field)
def move_well(self, well_x, well_y, start_x, start_y):
"""Move well and associated scan fields. Spacing between
fields will be what Properties/ScanFieldStageDistance is set to.
Parameters
----------
well_x : int
well_y : int
start_x : int
In meters. FieldXCoordinate of first field in well.
start_y : int
In meters. FieldYCoordinate of first field in well.
Raises
------
ValueError
If specified well or associated fields does not exist.
"""
# raise ValueError if well or fields doesnt exist
if not self.well_exists(well_x, well_y):
raise ValueError('Well not found in ScanWellArray')
fields = self.well_fields(well_x, well_y)
if len(fields) == 0:
raise ValueError('Fields belonging to well not found in ScanFieldArray')
well = self.well(well_x, well_y)
# update well start coordinate
well.attrib['FieldXStartCoordinate'] = str(start_x)
well.attrib['FieldYStartCoordinate'] = str(start_y)
# update fields coordinates
x_dist = float(self.properties.ScanFieldStageDistanceX) * 1e-6 # in um
y_dist = float(self.properties.ScanFieldStageDistanceY) * 1e-6
for field in fields:
i = int(field.attrib['FieldX'])
j = int(field.attrib['FieldY'])
field.FieldXCoordinate = start_x + (i - 1)*x_dist
field.FieldYCoordinate = start_y + (j - 1)*y_dist
def write(self, filename=None):
"""Save template to xml. Before saving template will update
date, start position, well positions, and counts.
Parameters
----------
filename : str
If not set, XML will be written to self.filename.
"""
if not filename:
filename = self.filename
# update time
self.properties.CurrentDate = _current_time()
# set rubber band to true
self.properties.EnableRubberBand = 'true'
# update start position
self.update_start_position()
# update well postions
self.update_well_positions()
# update counts
self.update_counts()
# remove py:pytype attributes
objectify.deannotate(self.root)
# remove namespaces added by lxml
for child in self.root.iterchildren():
etree.cleanup_namespaces(child)
xml = etree.tostring(self.root, encoding='utf8',
xml_declaration=True, pretty_print=True)
# fix format quirks
# add carriage return character
xml = u'\r\n'.join(l.decode(encoding='utf8') for l in xml.splitlines())
# add space at "end/>" --> "end />"
xml = re.sub(r'(["a-z])/>', r'\1 />', xml)
xml = xml.replace("version='1.0' encoding='utf8'", 'version="1.0"')
with open(filename, 'wb') as f:
f.write(xml.encode('utf8'))
def _current_time():
"Time formatted as `Monday, February 09, 2015 | 8:12 PM`"
return time.strftime('%A, %B %d, %Y | %I:%M %p')
def _xpath_attrib(attrib, value):
"""Returns string ``[@attrib="value"]``.
"""
return '[@' + str(attrib) + '="' + str(value) + '"]'
| mit | -6,438,428,709,144,034,000 | 29.744731 | 89 | 0.58082 | false | 3.90598 | false | false | false |
Nithanaroy/DistributedDataPartitioning | MetaDataDAO.py | 1 | 2144 | """
Has the MetaData about the partitioning
"""
import Globals
TABLENAME = 'patitionmeta'
def create(conn):
"""
Create a MetaData table if it does not exist
:param conn: open connection to DB
:return:None
"""
with conn.cursor() as cur:
cur.execute("""
CREATE TABLE IF NOT EXISTS {0}(
KEY VARCHAR(50),
VALUE VARCHAR(50)
)
""".format(TABLENAME))
if Globals.DEBUG and Globals.DATABASE_QUERIES_DEBUG: Globals.printquery(cur.query)
def upsert(conn, key, value):
"""
Inserts a given (key, value) pair into meta data table if not present, else updates the value of the key
:param conn: open connection to DB
:param key: Key to insert / update
:param value: Value to insert / update
:return:None
"""
with conn.cursor() as cur:
cur.execute("SELECT value FROM {0} WHERE KEY = '{1}'".format(TABLENAME, key))
keyvalue = cur.fetchone()
if keyvalue is None:
cur.execute("INSERT INTO {0} VALUES ('{1}', '{2}')".format(TABLENAME, key, value))
if Globals.DEBUG and Globals.DATABASE_QUERIES_DEBUG: Globals.printquery(cur.query)
else:
cur.execute("UPDATE {0} SET VALUE = '{1}' WHERE KEY = '{2}'".format(TABLENAME, value, key))
if Globals.DEBUG and Globals.DATABASE_QUERIES_DEBUG: Globals.printquery(cur.query)
def select(conn, key):
"""
Fetches the value of a given key from meta data table
:param conn: open connection to DB
:param key: Key to fetch
:return:value of key if present, else None
"""
with conn.cursor() as cur:
cur.execute("SELECT value FROM {0} WHERE KEY = '{1}'".format(TABLENAME, key))
if Globals.DEBUG and Globals.DATABASE_QUERIES_DEBUG: Globals.printquery(cur.query)
keyvalue = cur.fetchone()
if keyvalue is not None: return keyvalue[0]
return None
def drop(conn):
"""
Drops the table
:param conn: open connection to DB
:return:None
"""
with conn.cursor() as cur:
cur.execute('drop table if exists {0};'.format(TABLENAME)) | mit | -7,550,101,559,104,451,000 | 31.014925 | 108 | 0.621269 | false | 3.702936 | false | false | false |
gasserma/dwr | strategies/hebeler_autopilot.py | 1 | 2496 | from strategies.strategy_base import StrategyBase
# https://www.irs.gov/publications/p590b/index.html#en_US_2014_publink1000231236
# age................0.....1.....2.....3.....4.....5.....6.....7.....8.....9
lifeExpectancy = [82.4, 81.6, 80.6, 79.7, 78.7, 77.7, 76.7, 75.8, 74.8, 73.8, # x 0
72.8, 71.8, 70.8, 69.9, 68.9, 67.9, 66.9, 66.0, 65.0, 64.0, # x 10
63.0, 62.1, 61.1, 60.1, 59.1, 58.2, 57.2, 56.2, 55.3, 54.3, # x 20
53.3, 52.4, 51.4, 50.4, 49.4, 48.5, 47.5, 46.5, 45.6, 44.6, # x 30
43.6, 42.7, 41.7, 40.7, 39.8, 38.8, 37.9, 37.0, 36.0, 35.1, # x 40
34.2, 33.3, 32.3, 31.4, 30.5, 29.6, 28.7, 27.9, 27.0, 26.1, # x 50
25.2, 24.4, 23.5, 22.7, 21.8, 21.0, 20.2, 19.4, 18.6, 17.8, # x 60
17.0, 16.3, 15.5, 14.8, 14.1, 13.4, 12.7, 12.1, 11.4, 10.8, # x 70
10.2, 9.7, 9.1, 8.6, 8.1, 7.6, 7.1, 6.7, 6.3, 5.9, # x 80
5.5, 5.2, 4.9, 4.6, 4.3, 4.1, 3.8, 3.6, 3.4, 3.1, # x 90
2.9, 2.7, 2.5, 2.3, 2.1, 1.9, 1.7, 1.5, 1.4, 1.2, # x 10
1.1, 1.0] # x 110
'''
http://www.marketwatch.com/story/put-retirement-savings-withdrawals-on-autopilot-2013-07-24
'''
from strategies.strategy_base import YearlyStrategyBase
class HebelerAuto(YearlyStrategyBase):
def __init__(self, age):
self.resetAge = age
def getInitialWithDrawal(self):
return self.initialAmount
def getCurrentWithdrawalAmount(self):
return self.lastYearsWithdrawal # using the parlance of the hebeler paper.
def yearBaseReset(self, portfolio):
self.portfolio = portfolio
self.initialAmount = self.portfolio.value * .04
self.lastYearsWithdrawal = self.initialAmount
self.lastYearsAmount = self.portfolio.value
self.age = self.resetAge
def yearWithdraw(self, inflationRate):
withdrawal = .5 * inflationRate * self.getInitialWithDrawal()
withdrawal += .5 * self.lastYearsAmount / lifeExpectancy[self.age]
self.lastYearsWithdrawal = withdrawal
self.age += 1
w = self.portfolio.withdraw(withdrawal)
self.lastYearsAmount = self.yearGetPortfolioValue()
return w
def yearGetPortfolioValue(self):
return self.portfolio.value
def yearGrow(self, yearGrowth):
self.portfolio.grow(yearGrowth) | gpl-3.0 | -2,088,315,451,977,527,000 | 45.240741 | 91 | 0.542468 | false | 2.570546 | false | false | false |
Danielhiversen/home-assistant | homeassistant/components/melissa.py | 1 | 1118 | """
Support for Melissa climate.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/melissa/
"""
import logging
import voluptuous as vol
from homeassistant.const import CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import load_platform
REQUIREMENTS = ["py-melissa-climate==1.0.6"]
_LOGGER = logging.getLogger(__name__)
DOMAIN = "melissa"
DATA_MELISSA = 'MELISSA'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Melissa Climate component."""
import melissa
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
api = melissa.Melissa(username=username, password=password)
hass.data[DATA_MELISSA] = api
load_platform(hass, 'sensor', DOMAIN, {}, config)
load_platform(hass, 'climate', DOMAIN, {}, config)
return True
| mit | 6,959,869,314,066,660,000 | 24.409091 | 74 | 0.711986 | false | 3.49375 | true | false | false |
janvc/utility-scripts | scripts/therm_crosscorr.py | 1 | 5411 | #!/usr/bin/env python3
# Copyright 2017 Jan von Cosel
#
# This file is part of utility-scripts.
#
# utility-scripts is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# utility-scripts is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have recieved a copy of the GNU General Public License
# along with utility-scripts. If not, see <http://www.gnu.org/licenses/>.
#
#
# create wavefunctions for specific vibrational states and use them to
# calculate cross-correlation functions for IVR analysis
# ----- RPTWF version -----
import sys
import re
import multiprocessing as mp
import subprocess
import os
import shutil
mctdhExe = "/home/jvcosel/mctdh85.5/bin/binary/x86_64/mctdh85"
corrExe = "/home/jvcosel/mctdh85.5/bin/binary/x86_64/crosscorr85"
def main():
nameDir = sys.argv[1]
refSLInput = sys.argv[2]
refMLInput = sys.argv[3]
prtThres = float(sys.argv[4])
refSLFile = open(refSLInput, "r")
refMLFile = open(refMLInput, "r")
refSLData = refSLFile.readlines()
refMLData = refMLFile.readlines()
refSLFile.close()
refMLFile.close()
# get the dimensionality of the system from the propagation:
with open(nameDir + "/op.log") as opFile:
for line in opFile:
if "ndof =" in line:
nDof = int(line.split()[2])
if len(sys.argv) == 6 and sys.argv[5] == "-c":
create_corrs(nDof, refSLData, refMLData, nameDir, refMLInput)
# create a gnuplot file to show the correlation functions:
gnuplotName = "thermcorrelations.plt"
gnuplotFile = open(gnuplotName, "w")
gnuplotFile.write("plot 'thermcorr_gs.dat' u 1:4 w l")
for i in range(nDof):
for j in range(2,6):
currCorrName = "thermcorr_" + str(i+1).zfill(3) + "_" + str(j) + ".dat"
maxCorrVal = 0.0
with open(currCorrName) as corrFile:
for line in corrFile:
if not "#" in line:
if float(line.split()[3]) > maxCorrVal:
maxCorrVal = float(line.split()[3])
if maxCorrVal > prtThres:
writeString = ", '" + currCorrName + "' u 1:4 w l"
gnuplotFile.write(writeString)
gnuplotFile.write("\n")
gnuplotFile.close()
def create_corrs(nModes, refSLData, refMLData, nameDir, refMLInput):
pool = mp.Pool(processes=4)
for i in range(nModes):
for j in range(2,6):
pool.apply_async(func=run_calc, args=(i+1, j, refSLData, refMLData, nameDir))
pool.close()
pool.join()
# do the calculation with the global ground state:
refMLDir = os.path.splitext(refMLInput)[0]
MLgencommand = [mctdhExe, "-mnd", refMLInput]
subprocess.check_call(MLgencommand)
shutil.copy2(refMLDir + "/restart", "calc_gs.rst")
corrcommand = [corrExe, "-f", nameDir + "/psi", "-o", "thermcorr_gs.dat", "-r", "calc_gs.rst"]
shutil.rmtree(refMLDir)
subprocess.check_call(corrcommand)
os.remove("calc_gs.rst")
def run_calc(mode, state, refSLData, refMLData, psiDir):
newSLData = refSLData[:]
newMLData = refMLData[:]
# get the name-directory for the reference calculations:
for i in range(len(refSLData)):
if ("name" in refSLData[i] and "=" in refSLData[i] and not "opname" in refSLData[i]):
dirLine = i
if "file" in refSLData[i] and "=" in refSLData[i]:
excLine = i
baseName = "thermcalc_" + str(mode).zfill(3) + "_" + str(state)
corrName = "thermcorr_" + str(mode).zfill(3) + "_" + str(state) + ".dat"
SLinputFileName = baseName + "_sl.inp"
MLinputFileName = baseName + "_ml.inp"
SLinputWF = baseName + "_sl.rst"
MLinputWF = baseName + "_ml.rst"
newSLData[dirLine] = " name = " + baseName + "\n"
excString = " operate = excite_" + str(mode).zfill(3) + "\n"
for i in range(state-1):
newSLData.insert(excLine + 1,excString)
SLinputFile = open(SLinputFileName, "w")
for item in newSLData:
SLinputFile.write(item)
SLinputFile.close()
os.mkdir(baseName)
SLgencommand = [mctdhExe, "-w", SLinputFileName]
subprocess.check_call(SLgencommand)
shutil.copy2(baseName + "/restart", SLinputWF)
for i in range(len(refMLData)):
if "file" in refMLData[i] and "=" in refMLData[i]:
rstLine = i
break
newMLData[dirLine] = " name = " + baseName + "\n"
newMLData[rstLine] = " file = " + SLinputWF + "\n"
MLinputFile = open(MLinputFileName, "w")
for item in newMLData:
MLinputFile.write(item)
MLinputFile.close()
MLgencommand = [mctdhExe, "-w", MLinputFileName]
subprocess.check_call(MLgencommand)
shutil.copy2(baseName + "/restart", MLinputWF)
shutil.rmtree(baseName)
corrcommand = [corrExe, "-f", psiDir + "/psi", "-o", corrName, "-r", MLinputWF]
subprocess.check_call(corrcommand)
os.remove(SLinputWF)
os.remove(MLinputWF)
os.remove(SLinputFileName)
os.remove(MLinputFileName)
if __name__ == "__main__":
main()
| gpl-3.0 | -3,756,362,510,976,841,000 | 32.81875 | 98 | 0.634633 | false | 3.205569 | false | false | false |
francisc0garcia/autonomous_bicycle | src/classes/handlers/Vector3Handler.py | 1 | 1848 | import numpy as np
import rospy
from geometry_msgs.msg import Vector3Stamped
class Vector3Handler(object):
"""
Handler for ROS topics of type: geometry_msgs/Vector3Stamped
Args:
topic_name: Name of ROS topic to be subscribed
buffer_size: Variable buffer, depend on frame rate of topic, default: 500
queue_size: Subscriber queue_size
"""
def __init__(self, topic_name, buffer_size=500, queue_size=10):
self.vector_data = Vector3Stamped()
[self.vector_x, self.vector_y, self.vector_z] = [0.0, 0.0, 0.0]
self.topic_name = topic_name
self.queue_size = queue_size
self.buffer_size = buffer_size
self.counter = 0
self.buffer = np.zeros([self.buffer_size, 3])
self.sub = rospy.Subscriber(self.topic_name, Vector3Stamped, self.callback,
queue_size=self.queue_size)
def callback(self, msg):
self.vector_data = msg
if self.counter < self.buffer_size:
self.buffer[self.counter] = [self.vector_data.vector.x,
self.vector_data.vector.y,
self.vector_data.vector.z]
else:
rospy.loginfo("Vector3Handler for: " + self.topic_name + " has reach buffer size.")
self.counter += 1
def get_value(self):
if self.counter > 0:
self.vector_x = np.sum(self.buffer[:, 0]) / self.counter
self.vector_y = np.sum(self.buffer[:, 1]) / self.counter
self.vector_z = np.sum(self.buffer[:, 2]) / self.counter
else:
[self.vector_x, self.vector_y, self.vector_z] = [0.0, 0.0, 0.0]
self.buffer = np.zeros([self.buffer_size, 3])
self.counter = 0
return [self.vector_x, self.vector_y, self.vector_z]
| apache-2.0 | -3,546,464,600,652,254,700 | 34.538462 | 95 | 0.576299 | false | 3.581395 | false | false | false |
soh-i/Ivy | src/Ivy/cli/edit_bench_cli_opts.py | 1 | 2832 | from argparse import ArgumentParser
import os.path
import sys
from Ivy.version import __version__
from Ivy.analysis_settings import EDIT_BENCH_SETTINGS
__program__ = 'ivy_benchmark'
__author__ = 'Soh Ishiguro <[email protected]>'
__license__ = ''
__status__ = 'development'
def parse_bench_opts():
desc = "Benchmarking test for detected RNA editing sites based on HTSeq data to evaluate detection params."
parser = ArgumentParser(description=desc,
prog=__program__,
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--vcf',
dest='vcf_file',
action='store',
nargs='+',
metavar='',
help='VCF file(s).')
group.add_argument('--csv',
dest='csv_file',
action='store',
nargs='+',
metavar='',
help='CSV file(s), for ***debug mode***.')
parser.add_argument('--source',
required=False,
default=EDIT_BENCH_SETTINGS['APP']['SOURCE'],
dest='source',
action='store',
metavar='',
help='To use specific sample/tissue/cell line. [default: {0}]'.format(
EDIT_BENCH_SETTINGS['APP']['SOURCE']))
parser.add_argument('--sp',
required=True,
default=EDIT_BENCH_SETTINGS['APP']['SP'],
dest='sp',
metavar='species',
action='store',
help='Species + genome version. (eg. human_hg19)')
parser.add_argument('--plot',
required=False,
default=EDIT_BENCH_SETTINGS['APP']['PLOT'],
action='store_true',
help='Make a precision-recall plot. [default: {0}]'.format(
EDIT_BENCH_SETTINGS['APP']['PLOT']))
parser.add_argument('--out',
dest='out',
default=EDIT_BENCH_SETTINGS['APP']['OUT'],
required=False,
action='store',
metavar='out',
help='Output file name. [default: {0}]'.format(
EDIT_BENCH_SETTINGS['APP']['OUT']))
parser.add_argument('--version',
action='version',
help='Show program version number and exit.',
version=__version__)
return parser.parse_args()
if __name__ == '__main__':
parse_bench_opts()
| gpl-2.0 | 5,171,551,590,040,019,000 | 40.647059 | 111 | 0.447034 | false | 4.994709 | false | false | false |
flavour/eden | modules/s3/s3track.py | 1 | 35210 | # -*- coding: utf-8 -*-
""" Simple Generic Location Tracking System
@copyright: 2011-2019 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from datetime import datetime, timedelta
from gluon import current, HTTP, FORM, INPUT, LABEL, TABLE
from gluon.storage import Storage
from s3dal import Table, Rows, Row
from .s3rest import S3Method
__all__ = ("S3Trackable",
"S3Tracker",
"S3CheckInMethod",
"S3CheckOutMethod",
)
UID = "uuid" # field name for UIDs
TRACK_ID = "track_id" # field name for track ID
LOCATION_ID = "location_id" # field name for base location
LOCATION = "gis_location" # location tablename
PRESENCE = "sit_presence" # presence tablename
# =============================================================================
class S3Trackable(object):
"""
Trackable types instance(s)
"""
def __init__(self, table=None, tablename=None, record=None, query=None,
record_id=None, record_ids=None, rtable=None):
"""
Constructor:
@param table: a Table object
@param tablename: a Str tablename
@param record: a Row object
@param query: a Query object
@param record_id: a record ID (if object is a Table)
@param record_ids: a list of record IDs (if object is a Table)
- these should be in ascending order
@param rtable: the resource table (for the recursive calls)
"""
db = current.db
s3db = current.s3db
self.records = []
self.table = s3db.sit_trackable
self.rtable = rtable
# if isinstance(trackable, (Table, str)):
# if hasattr(trackable, "_tablename"):
# table = trackable
# tablename = table._tablename
# else:
# table = s3db[trackable]
# tablename = trackable
# fields = self.__get_fields(table)
# if not fields:
# raise SyntaxError("Table %s is not a trackable type" % table._tablename)
# query = (table._id > 0)
# if uid is None:
# if record_id is not None:
# if isinstance(record_id, (list, tuple)):
# query = (table._id.belongs(record_id))
# else:
# query = (table._id == record_id)
# elif UID in table.fields:
# if not isinstance(uid, (list, tuple)):
# query = (table[UID].belongs(uid))
# else:
# query = (table[UID] == uid)
# fields = [table[f] for f in fields]
# rows = db(query).select(*fields)
if table or tablename:
if table:
tablename = table._tablename
else:
table = s3db[tablename]
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("Not a trackable type: %s" % tablename)
if record_ids:
query = (table._id.belongs(record_ids))
limitby = (0, len(record_ids))
orderby = table._id
elif record_id:
query = (table._id == record_id)
limitby = (0, 1)
orderby = None
else:
query = (table._id > 0)
limitby = None
orderby = table._id
fields = [table[f] for f in fields]
rows = db(query).select(limitby=limitby, orderby=orderby, *fields)
# elif isinstance(trackable, Row):
# fields = self.__get_fields(trackable)
# if not fields:
# raise SyntaxError("Required fields not present in the row")
# rows = Rows(records=[trackable], compact=False)
elif record:
fields = self.__get_fields(record)
if not fields:
raise SyntaxError("Required fields not present in the row")
rows = Rows(records=[record], compact=False)
# elif isinstance(trackable, Rows):
# rows = [r for r in trackable if self.__get_fields(r)]
# fail = len(trackable) - len(rows)
# if fail:
# raise SyntaxError("Required fields not present in %d of the rows" % fail)
# rows = Rows(records=rows, compact=False)
# elif isinstance(trackable, (Query, Expression)):
# tablename = db._adapter.get_table(trackable)
# self.rtable = s3db[tablename]
# fields = self.__get_fields(self.rtable)
# if not fields:
# raise SyntaxError("Table %s is not a trackable type" % table._tablename)
# query = trackable
# fields = [self.rtable[f] for f in fields]
# rows = db(query).select(*fields)
elif query:
tablename = db._adapter.get_table(query)
self.rtable = s3db[tablename]
fields = self.__get_fields(self.rtable)
if not fields:
raise SyntaxError("Table %s is not a trackable type" % table._tablename)
fields = [self.rtable[f] for f in fields]
rows = db(query).select(*fields)
# elif isinstance(trackable, Set):
# query = trackable.query
# tablename = db._adapter.get_table(query)
# table = s3db[tablename]
# fields = self.__get_fields(table)
# if not fields:
# raise SyntaxError("Table %s is not a trackable type" % table._tablename)
# fields = [table[f] for f in fields]
# rows = trackable.select(*fields)
else:
raise SyntaxError("Invalid parameters")
records = []
for r in rows:
if self.__super_entity(r):
table = s3db[r.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("Table %s is not a trackable type" % table._tablename)
fields = [table[f] for f in fields]
row = db(table[UID] == r[UID]).select(limitby=(0, 1),
*fields).first()
if row:
records.append(row)
else:
records.append(r)
self.records = Rows(records=records, compact=False)
# -------------------------------------------------------------------------
@staticmethod
def __super_entity(trackable):
"""
Check whether a trackable is a super-entity
@param trackable: the trackable object
"""
if hasattr(trackable, "fields"):
keys = trackable.fields
else:
keys = trackable
return "instance_type" in keys
# -------------------------------------------------------------------------
@classmethod
def __get_fields(cls, trackable, super_entity=True):
"""
Check a trackable for presence of required fields
@param: the trackable object
"""
fields = []
if hasattr(trackable, "fields"):
keys = trackable.fields
else:
keys = trackable
if super_entity and \
cls.__super_entity(trackable) and UID in keys:
return ("instance_type", UID)
if LOCATION_ID in keys:
fields.append(LOCATION_ID)
if TRACK_ID in keys:
fields.append(TRACK_ID)
return fields
elif hasattr(trackable, "update_record") or \
isinstance(trackable, (Table, Row)):
return fields
return None
# -------------------------------------------------------------------------
def get_location(self,
timestmp=None,
_fields=None,
_filter=None,
as_rows=False,
exclude=None,
empty = True):
"""
Get the current location of the instance(s) (at the given time)
@param timestmp: last datetime for presence (defaults to current time)
@param _fields: fields to retrieve from the location records (None for ALL)
@param _filter: filter for the locations
@param as_rows: return the result as Rows object
@param exclude: interlocks to break at (avoids circular check-ins)
@param empty: return None if no locations (set to False by gis.get_location_data())
@return: a location record, or a list of location records (if multiple)
@ToDo: Also show Timestamp of when seen there
"""
if exclude is None:
exclude = []
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
ltable = s3db[LOCATION]
if timestmp is None:
timestmp = datetime.utcnow()
locations = []
for r in self.records:
location = None
if TRACK_ID in r:
query = ((ptable.deleted == False) & \
(ptable[TRACK_ID] == r[TRACK_ID]) & \
(ptable.timestmp <= timestmp))
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence:
if presence.interlock:
exclude = [r[TRACK_ID]] + exclude
tablename, record_id = presence.interlock.split(",", 1)
trackable = S3Trackable(tablename=tablename, record_id=record_id)
record = trackable.records.first()
if TRACK_ID not in record or \
record[TRACK_ID] not in exclude:
location = trackable.get_location(timestmp=timestmp,
exclude=exclude,
_fields=_fields,
as_rows=True).first()
elif presence.location_id:
query = (ltable.id == presence.location_id)
if _filter is not None:
query = query & _filter
if _fields is None:
location = db(query).select(ltable.ALL,
limitby=(0, 1)).first()
else:
location = db(query).select(limitby=(0, 1),
*_fields).first()
if not location:
if len(self.records) > 1:
trackable = S3Trackable(record=r, rtable=self.rtable)
else:
trackable = self
location = trackable.get_base_location(_fields=_fields)
if location:
locations.append(location)
elif not empty:
# Ensure we return an entry for gis.get_location_data() so that indexes match
locations.append(Row({"lat": None, "lon": None}))
if as_rows:
return Rows(records=locations, compact=False)
if not locations:
return None
else:
return locations
# -------------------------------------------------------------------------
def set_location(self, location, timestmp=None):
"""
Set the current location of instance(s) (at the given time)
@param location: the location (as Row or record ID)
@param timestmp: the datetime of the presence (defaults to current time)
@return: location
"""
ptable = current.s3db[PRESENCE]
if timestmp is None:
timestmp = datetime.utcnow()
if isinstance(location, S3Trackable):
location = location.get_base_location()
if isinstance(location, Rows):
location = location.first()
if isinstance(location, Row):
if "location_id" in location:
location = location.location_id
else:
location = location.id
# Log even a set of no location
#if not location:
# return
#else:
data = dict(location_id=location, timestmp=timestmp)
for r in self.records:
if TRACK_ID not in r:
# No track ID => set base location
if len(self.records) > 1:
trackable = S3Trackable(record=r)
else:
trackable = self
trackable.set_base_location(location)
elif r[TRACK_ID]:
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
return location
# -------------------------------------------------------------------------
def check_in(self, table, record, timestmp=None):
"""
Bind the presence of the instance(s) to another instance
@param table: table name of the other resource
@param record: record in the other resource (as Row or record ID)
@param timestmp: datetime of the check-in
@return: nothing
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
if isinstance(table, str):
table = s3db[table]
fields = self.__get_fields(table)
if not fields:
raise SyntaxError("No location data in %s" % table._tablename)
interlock = None
if isinstance(record, Rows):
record = record.first()
if not isinstance(record, Row):
if not self.__super_entity(table):
fields = (table._id,)
record = db(table._id == record).select(limitby=(0, 1), *fields).first()
if self.__super_entity(record):
# Get the instance table
table = s3db[record.instance_type]
if not self.__get_fields(table, super_entity=False):
raise SyntaxError("No trackable type: %s" % table._tablename)
# Get the instance record
query = (table[UID] == record[UID])
record = db(query).select(table._id, limitby=(0, 1), *fields).first()
try:
record_id = record[table._id] if record else None
except AttributeError:
record_id = None
if record_id:
interlock = "%s,%s" % (table, record_id)
else:
raise SyntaxError("No record specified for %s" % table._tablename)
if interlock:
if timestmp is None:
timestmp = datetime.utcnow()
data = {"location_id": None,
"timestmp": timestmp,
"interlock": interlock,
}
q = (ptable.timestmp <= timestmp) & \
(ptable.deleted == False)
for r in self.records:
if TRACK_ID not in r:
# Cannot check-in a non-trackable
continue
track_id = r[TRACK_ID]
query = (ptable[TRACK_ID] == track_id) & q
presence = db(query).select(ptable.interlock,
orderby = ~ptable.timestmp,
limitby = (0, 1),
).first()
if presence and presence.interlock == interlock:
# Already checked-in to the same instance
continue
data[TRACK_ID] = track_id
ptable.insert(**data)
self.__update_timestamp(track_id, timestmp)
# -------------------------------------------------------------------------
def check_out(self, table=None, record=None, timestmp=None):
"""
Make the last log entry before timestmp independent from
the referenced entity (if any)
@param timestmp: the date/time of the check-out, defaults
to current time
"""
db = current.db
s3db = current.s3db
ptable = s3db[PRESENCE]
if timestmp is None:
timestmp = datetime.utcnow()
interlock = None
if table is not None:
if isinstance(table, str):
table = s3db[table]
if isinstance(record, Rows):
record = record.first()
if self.__super_entity(table):
if not isinstance(record, Row):
record = table[record]
table = s3db[record.instance_type]
fields = self.__get_fields(table, super_entity=False)
if not fields:
raise SyntaxError("No trackable type: %s" % table._tablename)
query = table[UID] == record[UID]
record = db(query).select(limitby=(0, 1)).first()
if isinstance(record, Row) and table._id.name in record:
record = record[table._id.name]
if record:
interlock = "%s,%s" % (table, record)
else:
return
q = ((ptable.deleted == False) & (ptable.timestmp <= timestmp))
for r in self.records:
if TRACK_ID not in r:
# Cannot check-out a non-trackable
continue
query = q & (ptable[TRACK_ID] == r[TRACK_ID])
presence = db(query).select(orderby=~ptable.timestmp,
limitby=(0, 1)).first()
if presence and presence.interlock:
if interlock and presence.interlock != interlock:
continue
elif not interlock and table and \
not presence.interlock.startswith("%s" % table):
continue
tablename, record_id = presence.interlock.split(",", 1)
trackable = S3Trackable(tablename=tablename, record_id=record_id)
location = trackable.get_location(_fields=["id"],
timestmp=timestmp,
as_rows=True).first()
if timestmp - presence.timestmp < timedelta(seconds=1):
timestmp = timestmp + timedelta(seconds=1)
data = dict(location_id=location.id,
timestmp=timestmp,
interlock=None)
data.update({TRACK_ID:r[TRACK_ID]})
ptable.insert(**data)
self.__update_timestamp(r[TRACK_ID], timestmp)
# -------------------------------------------------------------------------
def remove_location(self, location=None):
"""
Remove a location from the presence log of the instance(s)
@todo: implement
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def get_base_location(self,
_fields=None,
_filter=None,
as_rows=False,
empty=True):
"""
Get the base location of the instance(s)
@param _fields: fields to retrieve from the location records (None for ALL)
@param _filter: filter for the locations
@param as_rows: return the result as Rows object
@param empty: return None if no locations (set to False by gis.get_location_data())
@return: the base location(s) of the current instance
"""
db = current.db
s3db = current.s3db
ltable = s3db[LOCATION]
rtable = self.rtable
locations = []
for r in self.records:
location = None
query = None
if LOCATION_ID in r:
query = (ltable.id == r[LOCATION_ID])
if rtable:
query = query & (rtable[LOCATION_ID] == ltable.id)
if TRACK_ID in r:
query = query & (rtable[TRACK_ID] == r[TRACK_ID])
elif TRACK_ID in r:
q = (self.table[TRACK_ID] == r[TRACK_ID])
trackable = db(q).select(limitby=(0, 1)).first()
table = s3db[trackable.instance_type]
if LOCATION_ID in table.fields:
query = ((table[TRACK_ID] == r[TRACK_ID]) &
(table[LOCATION_ID] == ltable.id))
if query:
if _filter is not None:
query = query & _filter
if not _fields:
location = db(query).select(ltable.ALL,
limitby=(0, 1)).first()
else:
location = db(query).select(limitby=(0, 1),
*_fields).first()
if location:
locations.append(location)
elif not empty:
# Ensure we return an entry for gis.get_location_data() so that indexes match
locations.append(Row({"lat": None, "lon": None}))
if as_rows:
return Rows(records=locations, compact=False)
if not locations:
return None
elif len(locations) == 1:
return locations[0]
else:
return locations
# -------------------------------------------------------------------------
def set_base_location(self, location=None):
"""
Set the base location of the instance(s)
@param location: the location for the base location as Row or record ID
@return: nothing
@note: instance tables without a location_id field will be ignored
"""
if isinstance(location, S3Trackable):
location = location.get_base_location()
if isinstance(location, Rows):
location = location.first()
if isinstance(location, Row):
location.get("id", None)
if not location or not str(location).isdigit():
# Location not found
return
else:
data = {LOCATION_ID:location}
# Update records without track ID
for r in self.records:
if TRACK_ID in r:
continue
elif LOCATION_ID in r:
if hasattr(r, "update_record"):
r.update_record(**data)
else:
raise SyntaxError("Cannot relate record to a table.")
db = current.db
s3db = current.s3db
# Update records with track ID
# => this can happen table-wise = less queries
track_ids = [r[TRACK_ID] for r in self.records if TRACK_ID in r]
rows = db(self.table[TRACK_ID].belongs(track_ids)).select()
tables = []
append = tables.append
types = set()
seen = types.add
for r in rows:
instance_type = r.instance_type
if instance_type not in types:
seen(instance_type)
table = s3db[instance_type]
if instance_type not in tables and LOCATION_ID in table.fields:
append(table)
else:
# No location ID in this type => ignore gracefully
continue
# Location specified => update all base locations
for table in tables:
db(table[TRACK_ID].belongs(track_ids)).update(**data)
# Refresh records
for r in self.records:
if LOCATION_ID in r:
r[LOCATION_ID] = location
return location
# -------------------------------------------------------------------------
def __update_timestamp(self, track_id, timestamp):
"""
Update the timestamp of a trackable
@param track_id: the trackable ID (super-entity key)
@param timestamp: the timestamp
"""
if track_id:
if timestamp is None:
timestamp = datetime.utcnow()
current.db(self.table.track_id == track_id).update(track_timestmp=timestamp)
# =============================================================================
class S3Tracker(object):
"""
S3 Tracking system, can be instantiated once as global 's3tracker' object
"""
def __init__(self):
"""
Constructor
"""
# -------------------------------------------------------------------------
def __call__(self, table=None, record_id=None, record_ids=None,
tablename=None, record=None, query=None):
"""
Get a tracking interface for a record or set of records
@param table: a Table object
@param record_id: a record ID (together with Table or tablename)
@param record_ids: a list/tuple of record IDs (together with Table or tablename)
@param tablename: a Str object
@param record: a Row object
@param query: a Query object
@return: a S3Trackable instance for the specified record(s)
"""
return S3Trackable(table=table,
tablename=tablename,
record_id=record_id,
record_ids=record_ids,
record=record,
query=query,
)
# -------------------------------------------------------------------------
def get_all(self, entity,
location=None,
bbox=None,
timestmp=None):
"""
Get all instances of the given entity at the given location and time
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def get_checked_in(self, table, record,
instance_type=None,
timestmp=None):
"""
Get all trackables of the given type that are checked-in
to the given instance at the given time
"""
raise NotImplementedError
# =============================================================================
class S3CheckInMethod(S3Method):
"""
Custom Method to allow a trackable resource to check-in
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
s3db = current.s3db
response = current.response
table = r.table
tracker = S3Trackable(table, record_id=r.id)
title = T("Check-In")
get_vars = r.get_vars
# Are we being passed a location_id?
location_id = get_vars.get("location_id", None)
if not location_id:
# Are we being passed a lat and lon?
lat = get_vars.get("lat", None)
if lat is not None:
lon = get_vars.get("lon", None)
if lon is not None:
form_vars = Storage(lat = float(lat),
lon = float(lon),
)
form = Storage(vars=form_vars)
s3db.gis_location_onvalidation(form)
location_id = s3db.gis_location.insert(**form_vars)
form = None
if not location_id:
# Give the user a form to check-in
# Test the formstyle
formstyle = current.deployment_settings.get_ui_formstyle()
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "location_id"
label = LABEL("%s:" % T("Location"))
from .s3widgets import S3LocationSelector
field = table.location_id
#value = tracker.get_location(_fields=["id"],
# as_rows=True).first().id
value = None # We always want to create a new Location, not update the existing one
widget = S3LocationSelector(show_latlon = True)(field, value)
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Check-In"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(current.request.vars, current.session):
location_id = form.vars.get("location_id", None)
if location_id:
# We're not Checking-in in S3Track terms (that's about interlocking with another object)
#tracker.check_in()
#timestmp = form.vars.get("timestmp", None)
#if timestmp:
# # @ToDo: Convert from string
# pass
#tracker.set_location(location_id, timestmp=timestmp)
tracker.set_location(location_id)
response.confirmation = T("Checked-In successfully!")
response.view = "check-in.html"
output = dict(form = form,
title = title,
)
return output
# @ToDo: JSON representation for check-in from mobile devices
else:
raise HTTP(415, current.ERROR.BAD_FORMAT)
# =============================================================================
class S3CheckOutMethod(S3Method):
"""
Custom Method to allow a trackable resource to check-out
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
response = current.response
tracker = S3Trackable(r.table, record_id=r.id)
title = T("Check-Out")
# Give the user a form to check-out
# Test the formstyle
formstyle = current.deployment_settings.get_ui_formstyle()
row = formstyle("test", "test", "test", "test")
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form_rows = []
comment = ""
_id = "submit"
label = ""
widget = INPUT(_type="submit", _value=T("Check-Out"))
row = formstyle("%s__row" % _id, label, widget, comment)
if tuple_rows:
form_rows.append(row[0])
form_rows.append(row[1])
else:
form_rows.append(row)
if tuple_rows:
# Assume TRs
form = FORM(TABLE(*form_rows))
else:
form = FORM(*form_rows)
if form.accepts(current.request.vars, current.session):
# Check-Out
# We're not Checking-out in S3Track terms (that's about removing an interlock with another object)
# What we're doing is saying that we're now back at our base location
#tracker.check_out()
#timestmp = form_vars.get("timestmp", None)
#if timestmp:
# # @ToDo: Convert from string
# pass
#tracker.set_location(r.record.location_id, timestmp=timestmp)
tracker.set_location(r.record.location_id)
response.confirmation = T("Checked-Out successfully!")
response.view = "check-in.html"
output = dict(form = form,
title = title,
)
return output
# @ToDo: JSON representation for check-out from mobile devices
else:
raise HTTP(415, current.ERROR.BAD_FORMAT)
# END =========================================================================
| mit | 7,700,843,623,012,400,000 | 36.497338 | 114 | 0.480858 | false | 4.747843 | false | false | false |
kervi/kervi | kervi-devices/kervi/devices/sensors/LSM9DS1.py | 1 | 16858 | # The MIT License (MIT)
#
# Copyright (c) 2017 Tony DiCola for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#This driver is derived from Tony DoCola's work and adapted to the api of the kervi framework.
import time
try:
import struct
except ImportError:
import ustruct as struct
from kervi.hal import get_i2c, SensorDeviceDriver
# Internal constants and register values:
# pylint: disable=bad-whitespace
_LSM9DS1_ADDRESS_ACCELGYRO = 0x6B
_LSM9DS1_ADDRESS_MAG = 0x1E
_LSM9DS1_XG_ID = 0b01101000
_LSM9DS1_MAG_ID = 0b00111101
_LSM9DS1_ACCEL_MG_LSB_2G = 0.061
_LSM9DS1_ACCEL_MG_LSB_4G = 0.122
_LSM9DS1_ACCEL_MG_LSB_8G = 0.244
_LSM9DS1_ACCEL_MG_LSB_16G = 0.732
_LSM9DS1_MAG_MGAUSS_4GAUSS = 0.14
_LSM9DS1_MAG_MGAUSS_8GAUSS = 0.29
_LSM9DS1_MAG_MGAUSS_12GAUSS = 0.43
_LSM9DS1_MAG_MGAUSS_16GAUSS = 0.58
_LSM9DS1_GYRO_DPS_DIGIT_245DPS = 0.00875
_LSM9DS1_GYRO_DPS_DIGIT_500DPS = 0.01750
_LSM9DS1_GYRO_DPS_DIGIT_2000DPS = 0.07000
_LSM9DS1_TEMP_LSB_DEGREE_CELSIUS = 8 # 1°C = 8, 25° = 200, etc.
_LSM9DS1_REGISTER_WHO_AM_I_XG = 0x0F
_LSM9DS1_REGISTER_CTRL_REG1_G = 0x10
_LSM9DS1_REGISTER_CTRL_REG2_G = 0x11
_LSM9DS1_REGISTER_CTRL_REG3_G = 0x12
_LSM9DS1_REGISTER_TEMP_OUT_L = 0x15
_LSM9DS1_REGISTER_TEMP_OUT_H = 0x16
_LSM9DS1_REGISTER_STATUS_REG = 0x17
_LSM9DS1_REGISTER_OUT_X_L_G = 0x18
_LSM9DS1_REGISTER_OUT_X_H_G = 0x19
_LSM9DS1_REGISTER_OUT_Y_L_G = 0x1A
_LSM9DS1_REGISTER_OUT_Y_H_G = 0x1B
_LSM9DS1_REGISTER_OUT_Z_L_G = 0x1C
_LSM9DS1_REGISTER_OUT_Z_H_G = 0x1D
_LSM9DS1_REGISTER_CTRL_REG4 = 0x1E
_LSM9DS1_REGISTER_CTRL_REG5_XL = 0x1F
_LSM9DS1_REGISTER_CTRL_REG6_XL = 0x20
_LSM9DS1_REGISTER_CTRL_REG7_XL = 0x21
_LSM9DS1_REGISTER_CTRL_REG8 = 0x22
_LSM9DS1_REGISTER_CTRL_REG9 = 0x23
_LSM9DS1_REGISTER_CTRL_REG10 = 0x24
_LSM9DS1_REGISTER_OUT_X_L_XL = 0x28
_LSM9DS1_REGISTER_OUT_X_H_XL = 0x29
_LSM9DS1_REGISTER_OUT_Y_L_XL = 0x2A
_LSM9DS1_REGISTER_OUT_Y_H_XL = 0x2B
_LSM9DS1_REGISTER_OUT_Z_L_XL = 0x2C
_LSM9DS1_REGISTER_OUT_Z_H_XL = 0x2D
_LSM9DS1_REGISTER_WHO_AM_I_M = 0x0F
_LSM9DS1_REGISTER_CTRL_REG1_M = 0x20
_LSM9DS1_REGISTER_CTRL_REG2_M = 0x21
_LSM9DS1_REGISTER_CTRL_REG3_M = 0x22
_LSM9DS1_REGISTER_CTRL_REG4_M = 0x23
_LSM9DS1_REGISTER_CTRL_REG5_M = 0x24
_LSM9DS1_REGISTER_STATUS_REG_M = 0x27
_LSM9DS1_REGISTER_OUT_X_L_M = 0x28
_LSM9DS1_REGISTER_OUT_X_H_M = 0x29
_LSM9DS1_REGISTER_OUT_Y_L_M = 0x2A
_LSM9DS1_REGISTER_OUT_Y_H_M = 0x2B
_LSM9DS1_REGISTER_OUT_Z_L_M = 0x2C
_LSM9DS1_REGISTER_OUT_Z_H_M = 0x2D
_LSM9DS1_REGISTER_CFG_M = 0x30
_LSM9DS1_REGISTER_INT_SRC_M = 0x31
_MAGTYPE = True
_XGTYPE = False
_SENSORS_GRAVITY_STANDARD = 9.80665
# User facing constants/module globals.
ACCELRANGE_2G = (0b00 << 3)
ACCELRANGE_16G = (0b01 << 3)
ACCELRANGE_4G = (0b10 << 3)
ACCELRANGE_8G = (0b11 << 3)
MAGGAIN_4GAUSS = (0b00 << 5) # +/- 4 gauss
MAGGAIN_8GAUSS = (0b01 << 5) # +/- 8 gauss
MAGGAIN_12GAUSS = (0b10 << 5) # +/- 12 gauss
MAGGAIN_16GAUSS = (0b11 << 5) # +/- 16 gauss
GYROSCALE_245DPS = (0b00 << 3) # +/- 245 degrees/s rotation
GYROSCALE_500DPS = (0b01 << 3) # +/- 500 degrees/s rotation
GYROSCALE_2000DPS = (0b11 << 3) # +/- 2000 degrees/s rotation
# pylint: enable=bad-whitespace
def _twos_comp(val, bits):
# Convert an unsigned integer in 2's compliment form of the specified bit
# length to its signed integer value and return it.
if val & (1 << (bits - 1)) != 0:
return val - (1 << bits)
return val
class _LSM9DS1():
"""Driver for the LSM9DS1 accelerometer, magnetometer, gyroscope."""
def __init__(self):
self._BUFFER = bytearray(6)
# soft reset & reboot accel/gyro
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG8, 0x05)
# soft reset & reboot magnetometer
self._write_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG2_M, 0x0C)
time.sleep(0.01)
# Check ID registers.
if self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_WHO_AM_I_XG) != _LSM9DS1_XG_ID or \
self._read_u8(_MAGTYPE, _LSM9DS1_REGISTER_WHO_AM_I_M) != _LSM9DS1_MAG_ID:
raise RuntimeError('Could not find LSM9DS1, check wiring!')
# enable gyro continuous
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG1_G, 0xC0) # on XYZ
# Enable the accelerometer continous
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG5_XL, 0x38)
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG6_XL, 0xC0)
# enable mag continuous
self._write_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG3_M, 0x00)
# Set default ranges for the various sensors
self._accel_mg_lsb = None
self._mag_mgauss_lsb = None
self._gyro_dps_digit = None
self.accel_range = ACCELRANGE_2G
self.mag_gain = MAGGAIN_4GAUSS
self.gyro_scale = GYROSCALE_245DPS
@property
def accel_range(self):
"""The accelerometer range. Must be a value of:
- ACCELRANGE_2G
- ACCELRANGE_4G
- ACCELRANGE_8G
- ACCELRANGE_16G
"""
reg = self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG6_XL)
return (reg & 0b00011000) & 0xFF
@accel_range.setter
def accel_range(self, val):
assert val in (ACCELRANGE_2G, ACCELRANGE_4G, ACCELRANGE_8G,
ACCELRANGE_16G)
reg = self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG6_XL)
reg = (reg & ~(0b00011000)) & 0xFF
reg |= val
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG6_XL, reg)
if val == ACCELRANGE_2G:
self._accel_mg_lsb = _LSM9DS1_ACCEL_MG_LSB_2G
elif val == ACCELRANGE_4G:
self._accel_mg_lsb = _LSM9DS1_ACCEL_MG_LSB_4G
elif val == ACCELRANGE_8G:
self._accel_mg_lsb = _LSM9DS1_ACCEL_MG_LSB_8G
elif val == ACCELRANGE_16G:
self._accel_mg_lsb = _LSM9DS1_ACCEL_MG_LSB_16G
@property
def mag_gain(self):
"""The magnetometer gain. Must be a value of:
- MAGGAIN_4GAUSS
- MAGGAIN_8GAUSS
- MAGGAIN_12GAUSS
- MAGGAIN_16GAUSS
"""
reg = self._read_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG2_M)
return (reg & 0b01100000) & 0xFF
@mag_gain.setter
def mag_gain(self, val):
assert val in (MAGGAIN_4GAUSS, MAGGAIN_8GAUSS, MAGGAIN_12GAUSS,
MAGGAIN_16GAUSS)
reg = self._read_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG2_M)
reg = (reg & ~(0b01100000)) & 0xFF
reg |= val
self._write_u8(_MAGTYPE, _LSM9DS1_REGISTER_CTRL_REG2_M, reg)
if val == MAGGAIN_4GAUSS:
self._mag_mgauss_lsb = _LSM9DS1_MAG_MGAUSS_4GAUSS
elif val == MAGGAIN_8GAUSS:
self._mag_mgauss_lsb = _LSM9DS1_MAG_MGAUSS_8GAUSS
elif val == MAGGAIN_12GAUSS:
self._mag_mgauss_lsb = _LSM9DS1_MAG_MGAUSS_12GAUSS
elif val == MAGGAIN_16GAUSS:
self._mag_mgauss_lsb = _LSM9DS1_MAG_MGAUSS_16GAUSS
@property
def gyro_scale(self):
"""The gyroscope scale. Must be a value of:
- GYROSCALE_245DPS
- GYROSCALE_500DPS
- GYROSCALE_2000DPS
"""
reg = self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG1_G)
return (reg & 0b00011000) & 0xFF
@gyro_scale.setter
def gyro_scale(self, val):
assert val in (GYROSCALE_245DPS, GYROSCALE_500DPS, GYROSCALE_2000DPS)
reg = self._read_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG1_G)
reg = (reg & ~(0b00011000)) & 0xFF
reg |= val
self._write_u8(_XGTYPE, _LSM9DS1_REGISTER_CTRL_REG1_G, reg)
if val == GYROSCALE_245DPS:
self._gyro_dps_digit = _LSM9DS1_GYRO_DPS_DIGIT_245DPS
elif val == GYROSCALE_500DPS:
self._gyro_dps_digit = _LSM9DS1_GYRO_DPS_DIGIT_500DPS
elif val == GYROSCALE_2000DPS:
self._gyro_dps_digit = _LSM9DS1_GYRO_DPS_DIGIT_2000DPS
def read_accel_raw(self):
"""Read the raw accelerometer sensor values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values. If you
want the acceleration in nice units you probably want to use the
accelerometer property!
"""
# Read the accelerometer
self._read_bytes(_XGTYPE, 0x80 | _LSM9DS1_REGISTER_OUT_X_L_XL, 6, self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from('<hhh', self._BUFFER[0:6])
return (raw_x, raw_y, raw_z)
@property
def acceleration(self):
"""The accelerometer X, Y, Z axis values as a 3-tuple of
m/s^2 values.
"""
raw = self.read_accel_raw()
return map(lambda x: x * self._accel_mg_lsb / 1000.0 * _SENSORS_GRAVITY_STANDARD,
raw)
def read_mag_raw(self):
"""Read the raw magnetometer sensor values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values. If you
want the magnetometer in nice units you probably want to use the
magnetometer property!
"""
# Read the magnetometer
self._read_bytes(_MAGTYPE, 0x80 | _LSM9DS1_REGISTER_OUT_X_L_M, 6, self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from('<hhh', self._BUFFER[0:6])
return (raw_x, raw_y, raw_z)
@property
def magnetic(self):
"""The magnetometer X, Y, Z axis values as a 3-tuple of
gauss values.
"""
raw = self.read_mag_raw()
return map(lambda x: x * self._mag_mgauss_lsb / 1000.0, raw)
def read_gyro_raw(self):
"""Read the raw gyroscope sensor values and return it as a
3-tuple of X, Y, Z axis values that are 16-bit unsigned values. If you
want the gyroscope in nice units you probably want to use the
gyroscope property!
"""
# Read the gyroscope
self._read_bytes(_XGTYPE, 0x80 | _LSM9DS1_REGISTER_OUT_X_L_G, 6, self._BUFFER)
raw_x, raw_y, raw_z = struct.unpack_from('<hhh', self._BUFFER[0:6])
return (raw_x, raw_y, raw_z)
@property
def gyro(self):
"""The gyroscope X, Y, Z axis values as a 3-tuple of
degrees/second values.
"""
raw = self.read_gyro_raw()
return map(lambda x: x * self._gyro_dps_digit, raw)
def read_temp_raw(self):
"""Read the raw temperature sensor value and return it as a 12-bit
signed value. If you want the temperature in nice units you probably
want to use the temperature property!
"""
# Read temp sensor
self._read_bytes(_XGTYPE, 0x80 | _LSM9DS1_REGISTER_TEMP_OUT_L, 2, self._BUFFER)
temp = ((self._BUFFER[1] << 8) | self._BUFFER[0]) >> 4
return _twos_comp(temp, 12)
@property
def temperature(self):
"""The temperature of the sensor in degrees Celsius."""
# This is just a guess since the starting point (21C here) isn't documented :(
# See discussion from:
# https://github.com/kriswiner/LSM9DS1/issues/3
temp = self.read_temp_raw()
temp = 27.5 + temp/16
return temp
def _read_u8(self, sensor_type, address):
# Read an 8-bit unsigned value from the specified 8-bit address.
# The sensor_type boolean should be _MAGTYPE when talking to the
# magnetometer, or _XGTYPE when talking to the accel or gyro.
# MUST be implemented by subclasses!
raise NotImplementedError()
def _read_bytes(self, sensor_type, address, count, buf):
# Read a count number of bytes into buffer from the provided 8-bit
# register address. The sensor_type boolean should be _MAGTYPE when
# talking to the magnetometer, or _XGTYPE when talking to the accel or
# gyro. MUST be implemented by subclasses!
raise NotImplementedError()
def _write_u8(self, sensor_type, address, val):
# Write an 8-bit unsigned value to the specified 8-bit address.
# The sensor_type boolean should be _MAGTYPE when talking to the
# magnetometer, or _XGTYPE when talking to the accel or gyro.
# MUST be implemented by subclasses!
raise NotImplementedError()
class _LSM9DS1_I2C(_LSM9DS1):
"""Driver for the LSM9DS1 connect over I2C."""
def __init__(self, acclgyro_address=_LSM9DS1_ADDRESS_ACCELGYRO, mag_address=_LSM9DS1_ADDRESS_MAG, bus=None):
self._mag_device = get_i2c(mag_address, bus)
self._xg_device = get_i2c(acclgyro_address, bus)
super().__init__()
def _read_u8(self, sensor_type, address):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
return device.read_U8(address)
def _read_bytes(self, sensor_type, address, count, buf):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
r= device.read_list(address, count)
buf[:] = r
def _write_u8(self, sensor_type, address, val):
if sensor_type == _MAGTYPE:
device = self._mag_device
else:
device = self._xg_device
device.write8(address, val)
class LSM9DS1AccelerationDeviceDriver(SensorDeviceDriver):
def __init__(self, accel_range=ACCELRANGE_2G, acclgyro_address=_LSM9DS1_ADDRESS_ACCELGYRO, mag_address=_LSM9DS1_ADDRESS_MAG, bus=None):
SensorDeviceDriver.__init__(self)
self._device = _LSM9DS1_I2C(acclgyro_address,mag_address, bus)
self._device.accel_range = accel_range
@property
def dimensions(self):
return 3
@property
def value_type(self):
return "number"
@property
def dimension_labels(self):
return ["x","y", "z"]
@property
def type(self):
return "acceleration"
@property
def unit(self):
return "m/s^2"
def read_value(self):
x,y,z = self._device.acceleration
return [x, y, z]
class LSM9DS1GyroDeviceDriver(SensorDeviceDriver):
def __init__(self, gyro_scale=GYROSCALE_245DPS, acclgyro_address=_LSM9DS1_ADDRESS_ACCELGYRO, mag_address=_LSM9DS1_ADDRESS_MAG, bus=None):
SensorDeviceDriver.__init__(self)
self._device = _LSM9DS1_I2C(acclgyro_address,mag_address, bus)
self._device.gyro_scale = gyro_scale
@property
def value_type(self):
return "number"
@property
def dimensions(self):
return 3
@property
def dimension_labels(self):
return ["x","y", "z"]
@property
def type(self):
return "gyro"
@property
def unit(self):
return "degrees/second"
def read_value(self):
x,y,z = self._device.gyro
return [x, y, z]
class LSM9DS1MagneticDeviceDriver(SensorDeviceDriver):
def __init__(self, gain=MAGGAIN_4GAUSS, acclgyro_address=_LSM9DS1_ADDRESS_ACCELGYRO, mag_address=_LSM9DS1_ADDRESS_MAG, bus=None):
SensorDeviceDriver.__init__(self)
self._device = _LSM9DS1_I2C(acclgyro_address,mag_address, bus)
self._device.mag_gain = gain
@property
def dimensions(self):
return 3
@property
def dimension_labels(self):
return ["x","y", "z"]
@property
def type(self):
return "magnetic"
@property
def unit(self):
return "gauss"
@property
def value_type(self):
return "number"
def read_value(self):
x,y,z = self._device.magnetic
return [x, y, z]
| mit | 6,231,677,979,859,471,000 | 36.793722 | 141 | 0.617288 | false | 2.871061 | false | false | false |
lebabouin/CouchPotatoServer-develop | libs/tmdb3/cache_file.py | 10 | 13285 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------
# Name: cache_file.py
# Python Library
# Author: Raymond Wagner
# Purpose: Persistant file-backed cache using /tmp/ to share data
# using flock or msvcrt.locking to allow safe concurrent
# access.
#-----------------------
import struct
import errno
import json
import os
import io
from cStringIO import StringIO
from tmdb_exceptions import *
from cache_engine import CacheEngine, CacheObject
####################
# Cache File Format
#------------------
# cache version (2) unsigned short
# slot count (2) unsigned short
# slot 0: timestamp (8) double
# slot 0: lifetime (4) unsigned int
# slot 0: seek point (4) unsigned int
# slot 1: timestamp
# slot 1: lifetime index slots are IDd by their query date and
# slot 1: seek point are filled incrementally forwards. lifetime
# .... is how long after query date before the item
# .... expires, and seek point is the location of the
# slot N-2: timestamp start of data for that entry. 256 empty slots
# slot N-2: lifetime are pre-allocated, allowing fast updates.
# slot N-2: seek point when all slots are filled, the cache file is
# slot N-1: timestamp rewritten from scrach to add more slots.
# slot N-1: lifetime
# slot N-1: seek point
# block 1 (?) ASCII
# block 2
# .... blocks are just simple ASCII text, generated
# .... as independent objects by the JSON encoder
# block N-2
# block N-1
#
####################
def _donothing(*args, **kwargs):
pass
try:
import fcntl
class Flock( object ):
"""
Context manager to flock file for the duration the object exists.
Referenced file will be automatically unflocked as the interpreter
exits the context.
Supports an optional callback to process the error and optionally
suppress it.
"""
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
def __init__(self, fileobj, operation, callback=None):
self.fileobj = fileobj
self.operation = operation
self.callback = callback
def __enter__(self):
fcntl.flock(self.fileobj, self.operation)
def __exit__(self, exc_type, exc_value, exc_tb):
suppress = False
if callable(self.callback):
suppress = self.callback(exc_type, exc_value, exc_tb)
fcntl.flock(self.fileobj, fcntl.LOCK_UN)
return suppress
def parse_filename(filename):
if '$' in filename:
# replace any environmental variables
filename = os.path.expandvars(filename)
if filename.startswith('~'):
# check for home directory
return os.path.expanduser(filename)
elif filename.startswith('/'):
# check for absolute path
return filename
# return path with temp directory prepended
return '/tmp/' + filename
except ImportError:
import msvcrt
class Flock( object ):
LOCK_EX = msvcrt.LK_LOCK
LOCK_SH = msvcrt.LK_LOCK
def __init__(self, fileobj, operation, callback=None):
self.fileobj = fileobj
self.operation = operation
self.callback = callback
def __enter__(self):
self.size = os.path.getsize(self.fileobj.name)
msvcrt.locking(self.fileobj.fileno(), self.operation, self.size)
def __exit__(self, exc_type, exc_value, exc_tb):
suppress = False
if callable(self.callback):
suppress = self.callback(exc_type, exc_value, exc_tb)
msvcrt.locking(self.fileobj.fileno(), msvcrt.LK_UNLCK, self.size)
return suppress
def parse_filename(filename):
if '%' in filename:
# replace any environmental variables
filename = os.path.expandvars(filename)
if filename.startswith('~'):
# check for home directory
return os.path.expanduser(filename)
elif (ord(filename[0]) in (range(65,91)+range(99,123))) \
and (filename[1:3] == ':\\'):
# check for absolute drive path (e.g. C:\...)
return filename
elif (filename.count('\\') >= 3) and (filename.startswith('\\\\')):
# check for absolute UNC path (e.g. \\server\...)
return filename
# return path with temp directory prepended
return os.path.expandvars(os.path.join('%TEMP%',filename))
class FileCacheObject( CacheObject ):
_struct = struct.Struct('dII') # double and two ints
# timestamp, lifetime, position
@classmethod
def fromFile(cls, fd):
dat = cls._struct.unpack(fd.read(cls._struct.size))
obj = cls(None, None, dat[1], dat[0])
obj.position = dat[2]
return obj
def __init__(self, *args, **kwargs):
self._key = None
self._data = None
self._size = None
self._buff = StringIO()
super(FileCacheObject, self).__init__(*args, **kwargs)
@property
def size(self):
if self._size is None:
self._buff.seek(0,2)
size = self._buff.tell()
if size == 0:
if (self._key is None) or (self._data is None):
raise RuntimeError
json.dump([self.key, self.data], self._buff)
self._size = self._buff.tell()
self._size = size
return self._size
@size.setter
def size(self, value): self._size = value
@property
def key(self):
if self._key is None:
try:
self._key, self._data = json.loads(self._buff.getvalue())
except:
pass
return self._key
@key.setter
def key(self, value): self._key = value
@property
def data(self):
if self._data is None:
self._key, self._data = json.loads(self._buff.getvalue())
return self._data
@data.setter
def data(self, value): self._data = value
def load(self, fd):
fd.seek(self.position)
self._buff.seek(0)
self._buff.write(fd.read(self.size))
def dumpslot(self, fd):
pos = fd.tell()
fd.write(self._struct.pack(self.creation, self.lifetime, self.position))
def dumpdata(self, fd):
self.size
fd.seek(self.position)
fd.write(self._buff.getvalue())
class FileEngine( CacheEngine ):
"""Simple file-backed engine."""
name = 'file'
_struct = struct.Struct('HH') # two shorts for version and count
_version = 2
def __init__(self, parent):
super(FileEngine, self).__init__(parent)
self.configure(None)
def configure(self, filename, preallocate=256):
self.preallocate = preallocate
self.cachefile = filename
self.size = 0
self.free = 0
self.age = 0
def _init_cache(self):
# only run this once
self._init_cache = _donothing
if self.cachefile is None:
raise TMDBCacheError("No cache filename given.")
self.cachefile = parse_filename(self.cachefile)
try:
# attempt to read existing cache at filename
# handle any errors that occur
self._open('r+b')
# seems to have read fine, make sure we have write access
if not os.access(self.cachefile, os.W_OK):
raise TMDBCacheWriteError(self.cachefile)
except IOError as e:
if e.errno == errno.ENOENT:
# file does not exist, create a new one
try:
self._open('w+b')
self._write([])
except IOError as e:
if e.errno == errno.ENOENT:
# directory does not exist
raise TMDBCacheDirectoryError(self.cachefile)
elif e.errno == errno.EACCES:
# user does not have rights to create new file
raise TMDBCacheWriteError(self.cachefile)
else:
# let the unhandled error continue through
raise
elif e.errno == errno.EACCESS:
# file exists, but we do not have permission to access it
raise TMDBCacheReadError(self.cachefile)
else:
# let the unhandled error continue through
raise
def get(self, date):
self._init_cache()
self._open('r+b')
with Flock(self.cachefd, Flock.LOCK_SH): # lock for shared access
# return any new objects in the cache
return self._read(date)
def put(self, key, value, lifetime):
self._init_cache()
self._open('r+b')
with Flock(self.cachefd, Flock.LOCK_EX): # lock for exclusive access
newobjs = self._read(self.age)
newobjs.append(FileCacheObject(key, value, lifetime))
# this will cause a new file object to be opened with the proper
# access mode, however the Flock should keep the old object open
# and properly locked
self._open('r+b')
self._write(newobjs)
return newobjs
def _open(self, mode='r+b'):
# enforce binary operation
try:
if self.cachefd.mode == mode:
# already opened in requested mode, nothing to do
self.cachefd.seek(0)
return
except: pass # catch issue of no cachefile yet opened
self.cachefd = io.open(self.cachefile, mode)
def _read(self, date):
try:
self.cachefd.seek(0)
version, count = self._struct.unpack(\
self.cachefd.read(self._struct.size))
if version != self._version:
# old version, break out and well rewrite when finished
raise Exception
self.size = count
cache = []
while count:
# loop through storage definitions
obj = FileCacheObject.fromFile(self.cachefd)
cache.append(obj)
count -= 1
except:
# failed to read information, so just discard it and return empty
self.size = 0
self.free = 0
return []
# get end of file
self.cachefd.seek(0,2)
position = self.cachefd.tell()
newobjs = []
emptycount = 0
# walk backward through all, collecting new content and populating size
while len(cache):
obj = cache.pop()
if obj.creation == 0:
# unused slot, skip
emptycount += 1
elif obj.expired:
# object has passed expiration date, no sense processing
continue
elif obj.creation > date:
# used slot with new data, process
obj.size, position = position - obj.position, obj.position
newobjs.append(obj)
# update age
self.age = max(self.age, obj.creation)
elif len(newobjs):
# end of new data, break
break
# walk forward and load new content
for obj in newobjs:
obj.load(self.cachefd)
self.free = emptycount
return newobjs
def _write(self, data):
if self.free and (self.size != self.free):
# we only care about the last data point, since the rest are
# already stored in the file
data = data[-1]
# determine write position of data in cache
self.cachefd.seek(0,2)
end = self.cachefd.tell()
data.position = end
# write incremental update to free slot
self.cachefd.seek(4 + 16*(self.size-self.free))
data.dumpslot(self.cachefd)
data.dumpdata(self.cachefd)
else:
# rewrite cache file from scratch
# pull data from parent cache
data.extend(self.parent()._data.values())
data.sort(key=lambda x: x.creation)
# write header
size = len(data) + self.preallocate
self.cachefd.seek(0)
self.cachefd.truncate()
self.cachefd.write(self._struct.pack(self._version, size))
# write storage slot definitions
prev = None
for d in data:
if prev == None:
d.position = 4 + 16*size
else:
d.position = prev.position + prev.size
d.dumpslot(self.cachefd)
prev = d
# fill in allocated slots
for i in range(2**8):
self.cachefd.write(FileCacheObject._struct.pack(0, 0, 0))
# write stored data
for d in data:
d.dumpdata(self.cachefd)
self.cachefd.flush()
def expire(self, key):
pass
| gpl-3.0 | 894,537,346,810,222,000 | 32.976982 | 80 | 0.54332 | false | 4.348609 | false | false | false |
xesscorp/skidl | skidl/libs/siliconi_sklib.py | 1 | 2245 | from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib
SKIDL_lib_version = '0.0.1'
siliconi = SchLib(tool=SKIDL).add_parts(*[
Part(name='D469',dest=TEMPLATE,tool=SKIDL,keywords='High-Current Driver',description='Quad High-Current Power Driver',ref_prefix='U',num_units=4,do_erc=True,pins=[
Pin(num='7',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='14',name='V+',func=Pin.PWRIN,do_erc=True),
Pin(num='1',name='IN1',do_erc=True),
Pin(num='2',name='IN',do_erc=True),
Pin(num='13',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='3',name='IN1',do_erc=True),
Pin(num='4',name='IN',do_erc=True),
Pin(num='12',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='5',name='IN1',do_erc=True),
Pin(num='6',name='IN',do_erc=True),
Pin(num='11',name='OUT',func=Pin.OUTPUT,do_erc=True),
Pin(num='8',name='IN1',do_erc=True),
Pin(num='9',name='IN',do_erc=True),
Pin(num='10',name='OUT',func=Pin.OUTPUT,do_erc=True)]),
Part(name='DG411',dest=TEMPLATE,tool=SKIDL,keywords='CMOS Analog Switche',description='Monolithic Quad SPST, CMOS Analog Switches',ref_prefix='U',num_units=4,do_erc=True,pins=[
Pin(num='4',name='V-',func=Pin.PWRIN,do_erc=True),
Pin(num='5',name='GND',func=Pin.PWRIN,do_erc=True),
Pin(num='12',name='VCC',func=Pin.PWRIN,do_erc=True),
Pin(num='13',name='V+',func=Pin.PWRIN,do_erc=True),
Pin(num='1',name='SW',do_erc=True),
Pin(num='2',name='IN',func=Pin.PASSIVE,do_erc=True),
Pin(num='3',name='OUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='6',name='OUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='7',name='IN',func=Pin.PASSIVE,do_erc=True),
Pin(num='8',name='SW',do_erc=True),
Pin(num='9',name='SW',do_erc=True),
Pin(num='10',name='IN',func=Pin.PASSIVE,do_erc=True),
Pin(num='11',name='OUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='14',name='OUT',func=Pin.PASSIVE,do_erc=True),
Pin(num='15',name='IN',func=Pin.PASSIVE,do_erc=True),
Pin(num='16',name='SW',do_erc=True)])])
| mit | -4,694,984,533,232,278,000 | 59.675676 | 184 | 0.567929 | false | 2.669441 | false | true | false |
Samweli/inasafe | safe/utilities/i18n.py | 2 | 2466 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool by AusAid -**Internationalisation
utilities.**
The module provides utilities function to convert between unicode and byte
string for Python 2.x. When we move to Python 3, this module and its usage
should be removed as string in Python 3 is already stored in unicode.
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
# This import is to enable SIP API V2
# noinspection PyUnresolvedReferences
import qgis # pylint: disable=unused-import
# noinspection PyPackageRequirements
from PyQt4.QtCore import QCoreApplication, QSettings, QLocale
from safe.utilities.unicode import get_unicode
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '02/24/15'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
def tr(text, context='@default'):
"""We define a tr() alias here since the utilities implementation below
is not a class and does not inherit from QObject.
.. note:: see http://tinyurl.com/pyqt-differences
:param text: String to be translated
:type text: str, unicode
:param context: A context for the translation. Since a same can be
translated to different text depends on the context.
:type context: str
:returns: Translated version of the given string if available, otherwise
the original string.
:rtype: str, unicode
"""
# Ensure it's in unicode
text = get_unicode(text)
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
return QCoreApplication.translate(context, text)
def locale():
"""Get the name of the currently active locale.
:returns: Name of hte locale e.g. 'id'
:rtype: stre
"""
override_flag = QSettings().value(
'locale/overrideFlag', True, type=bool)
if override_flag:
locale_name = QSettings().value('locale/userLocale', 'en_US', type=str)
else:
# noinspection PyArgumentList
locale_name = QLocale.system().name()
# NOTES: we split the locale name because we need the first two
# character i.e. 'id', 'af, etc
locale_name = str(locale_name).split('_')[0]
return locale_name
| gpl-3.0 | -7,313,340,112,255,717,000 | 32.324324 | 79 | 0.698297 | false | 3.9456 | false | false | false |
onepas/kodi-addons | plugin.video.vietmediaplay/uuid.py | 16 | 17742 | r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
__author__ = 'Ka-Ping Yee <[email protected]>'
__date__ = '$Date: 2006/06/12 23:15:40 $'.split()[1].replace('/', '-')
__version__ = '$Revision: 1.30 $'.split()[1]
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
four possible forms: a similar string of hexadecimal digits, or a
string of 16 raw bytes as an argument named 'bytes', or a tuple of
six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a single
128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, fields=None, int=None,
version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'fields', or 'int' must be given.
The 'version' argument is optional; if given, the resulting UUID
will have its variant and version number set according to RFC 4122,
overriding bits in the given 'hex', 'bytes', 'fields', or 'int'.
"""
if [hex, bytes, fields, int].count(None) != 3:
raise TypeError('need just one of hex, bytes, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1<<128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L
# Set the version number.
int &= ~(0xf000 << 64L)
int |= version << 76L
self.__dict__['int'] = int
def __cmp__(self, other):
if isinstance(other, UUID):
return cmp(self.int, other.int)
return NotImplemented
def __hash__(self):
return hash(self.int)
def __int__(self):
return self.int
def __repr__(self):
return 'UUID(%r)' % str(self)
def __setattr__(self, name, value):
raise TypeError('UUID objects are immutable')
def __str__(self):
hex = '%032x' % self.int
return '%s-%s-%s-%s-%s' % (
hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:])
def get_bytes(self):
bytes = ''
for shift in range(0, 128, 8):
bytes = chr((self.int >> shift) & 0xff) + bytes
return bytes
bytes = property(get_bytes)
def get_fields(self):
return (self.time_low, self.time_mid, self.time_hi_version,
self.clock_seq_hi_variant, self.clock_seq_low, self.node)
fields = property(get_fields)
def get_time_low(self):
return self.int >> 96L
time_low = property(get_time_low)
def get_time_mid(self):
return (self.int >> 80L) & 0xffff
time_mid = property(get_time_mid)
def get_time_hi_version(self):
return (self.int >> 64L) & 0xffff
time_hi_version = property(get_time_hi_version)
def get_clock_seq_hi_variant(self):
return (self.int >> 56L) & 0xff
clock_seq_hi_variant = property(get_clock_seq_hi_variant)
def get_clock_seq_low(self):
return (self.int >> 48L) & 0xff
clock_seq_low = property(get_clock_seq_low)
def get_time(self):
return (((self.time_hi_version & 0x0fffL) << 48L) |
(self.time_mid << 32L) | self.time_low)
time = property(get_time)
def get_clock_seq(self):
return (((self.clock_seq_hi_variant & 0x3fL) << 8L) |
self.clock_seq_low)
clock_seq = property(get_clock_seq)
def get_node(self):
return self.int & 0xffffffffffff
node = property(get_node)
def get_hex(self):
return '%032x' % self.int
hex = property(get_hex)
def get_urn(self):
return 'urn:uuid:' + str(self)
urn = property(get_urn)
def get_variant(self):
if not self.int & (0x8000 << 48L):
return RESERVED_NCS
elif not self.int & (0x4000 << 48L):
return RFC_4122
elif not self.int & (0x2000 << 48L):
return RESERVED_MICROSOFT
else:
return RESERVED_FUTURE
variant = property(get_variant)
def get_version(self):
# The version bits are only meaningful for RFC 4122 UUIDs.
if self.variant == RFC_4122:
return int((self.int >> 76L) & 0xf)
version = property(get_version)
def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
import os
for dir in ['', '/sbin/', '/usr/sbin']:
try:
pipe = os.popen(os.path.join(dir, 'ifconfig'))
except IOError:
continue
for line in pipe:
words = line.lower().split()
for i in range(len(words)):
if words[i] in ['hwaddr', 'ether']:
return int(words[i + 1].replace(':', ''), 16)
def _ipconfig_getnode():
"""Get the hardware address on Windows by running ipconfig.exe."""
import os, re
dirs = ['', r'c:\windows\system32', r'c:\winnt\system32']
try:
import ctypes
buffer = ctypes.create_string_buffer(300)
ctypes.windll.kernel32.GetSystemDirectoryA(buffer, 300)
dirs.insert(0, buffer.value.decode('mbcs'))
except:
pass
for dir in dirs:
try:
pipe = os.popen(os.path.join(dir, 'ipconfig') + ' /all')
except IOError:
continue
for line in pipe:
value = line.split(':')[-1].strip().lower()
if re.match('([0-9a-f][0-9a-f]-){5}[0-9a-f][0-9a-f]', value):
return int(value.replace('-', ''), 16)
def _netbios_getnode():
"""Get the hardware address on Windows using NetBIOS calls.
See http://support.microsoft.com/kb/118623 for details."""
import win32wnet, netbios
ncb = netbios.NCB()
ncb.Command = netbios.NCBENUM
ncb.Buffer = adapters = netbios.LANA_ENUM()
adapters._pack()
if win32wnet.Netbios(ncb) != 0:
return
adapters._unpack()
for i in range(adapters.length):
ncb.Reset()
ncb.Command = netbios.NCBRESET
ncb.Lana_num = ord(adapters.lana[i])
if win32wnet.Netbios(ncb) != 0:
continue
ncb.Reset()
ncb.Command = netbios.NCBASTAT
ncb.Lana_num = ord(adapters.lana[i])
ncb.Callname = '*'.ljust(16)
ncb.Buffer = status = netbios.ADAPTER_STATUS()
if win32wnet.Netbios(ncb) != 0:
continue
status._unpack()
bytes = map(ord, status.adapter_address)
return ((bytes[0]<<40L) + (bytes[1]<<32L) + (bytes[2]<<24L) +
(bytes[3]<<16L) + (bytes[4]<<8L) + bytes[5])
# Thanks to Thomas Heller for ctypes and for his help with its use here.
# If ctypes is available, use it to find system routines for UUID generation.
_uuid_generate_random = _uuid_generate_time = _UuidCreate = None
try:
import ctypes, ctypes.util
_buffer = ctypes.create_string_buffer(16)
# The uuid_generate_* routines are provided by libuuid on at least
# Linux and FreeBSD, and provided by libc on Mac OS X.
for libname in ['uuid', 'c']:
try:
lib = ctypes.CDLL(ctypes.util.find_library(libname))
except:
continue
if hasattr(lib, 'uuid_generate_random'):
_uuid_generate_random = lib.uuid_generate_random
if hasattr(lib, 'uuid_generate_time'):
_uuid_generate_time = lib.uuid_generate_time
# On Windows prior to 2000, UuidCreate gives a UUID containing the
# hardware address. On Windows 2000 and later, UuidCreate makes a
# random UUID and UuidCreateSequential gives a UUID containing the
# hardware address. These routines are provided by the RPC runtime.
try:
lib = ctypes.windll.rpcrt4
except:
lib = None
_UuidCreate = getattr(lib, 'UuidCreateSequential',
getattr(lib, 'UuidCreate', None))
except:
pass
def _unixdll_getnode():
"""Get the hardware address on Unix using ctypes."""
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw).node
def _windll_getnode():
"""Get the hardware address on Windows using ctypes."""
if _UuidCreate(_buffer) == 0:
return UUID(bytes=_buffer.raw).node
def _random_getnode():
"""Get a random node ID, with eighth bit set as suggested by RFC 4122."""
import random
return random.randrange(0, 1<<48L) | 0x010000000000L
_node = None
def getnode():
"""Get the hardware address as a 48-bit integer. The first time this
runs, it may launch a separate program, which could be quite slow. If
all attempts to obtain the hardware address fail, we choose a random
48-bit number with its eighth bit set to 1 as recommended in RFC 4122."""
global _node
if _node is not None:
return _node
import sys
if sys.platform == 'win32':
getters = [_windll_getnode, _netbios_getnode, _ipconfig_getnode]
else:
getters = [_unixdll_getnode, _ifconfig_getnode]
for getter in getters + [_random_getnode]:
try:
_node = getter()
except:
continue
if _node is not None:
return _node
def uuid1(node=None, clock_seq=None):
"""Generate a UUID from a host ID, sequence number, and the current time.
If 'node' is not given, getnode() is used to obtain the hardware
address. If 'clock_seq' is given, it is used as the sequence number;
otherwise a random 14-bit sequence number is chosen."""
# When the system provides a version-1 UUID generator, use it (but don't
# use UuidCreate here because its UUIDs don't conform to RFC 4122).
if _uuid_generate_time and node is clock_seq is None:
_uuid_generate_time(_buffer)
return UUID(bytes=_buffer.raw)
import time
nanoseconds = int(time.time() * 1e9)
# 0x01b21dd213814000 is the number of 100-ns intervals between the
# UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00.
timestamp = int(nanoseconds/100) + 0x01b21dd213814000L
if clock_seq is None:
import random
clock_seq = random.randrange(1<<14L) # instead of stable storage
time_low = timestamp & 0xffffffffL
time_mid = (timestamp >> 32L) & 0xffffL
time_hi_version = (timestamp >> 48L) & 0x0fffL
clock_seq_low = clock_seq & 0xffL
clock_seq_hi_variant = (clock_seq >> 8L) & 0x3fL
if node is None:
node = getnode()
return UUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1)
def uuid3(namespace, name):
"""Generate a UUID from the MD5 hash of a namespace UUID and a name."""
import md5
hash = md5.md5(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=3)
def uuid4():
"""Generate a random UUID."""
# When the system provides a version-4 UUID generator, use it.
if _uuid_generate_random:
_uuid_generate_random(_buffer)
return UUID(bytes=_buffer.raw)
# Otherwise, get randomness from urandom or the 'random' module.
try:
import os
return UUID(bytes=os.urandom(16), version=4)
except:
import random
bytes = [chr(random.randrange(256)) for i in range(16)]
return UUID(bytes=bytes, version=4)
def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
import sha
hash = sha.sha(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5)
# The following standard UUIDs are for use with uuid3() or uuid5().
NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8')
NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
| gpl-2.0 | -4,095,845,845,809,557,000 | 36.273109 | 78 | 0.60416 | false | 3.526535 | true | false | false |
carlos-ferras/Sequence-ToolKit | controller/stk/assistant.py | 1 | 2990 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import pickle
from PyQt5 import QtWidgets, QtCore, QtNetwork
class Assistant(QtWidgets.QApplication):
timeout = 1000
def __init__(self, argv):
QtWidgets.QApplication.__init__(self, argv)
self.socket_filename = os.path.expanduser("~/.stk_assistant_share_memory")
self.shared_memory = QtCore.QSharedMemory()
self.shared_memory.setKey(self.socket_filename)
self.is_running = self.shared_memory.attach()
self.process = None
if not self.is_running:
if not self.shared_memory.create(1):
return
self.process = QtCore.QProcess()
self.process.finished.connect(self.quit)
self.server = QtNetwork.QLocalServer(self)
self.server.newConnection.connect(self.receiveMessage)
self.server.listen(self.socket_filename)
def start(self):
if not self.process:
self.process = QtCore.QProcess()
if self.process.state() != QtCore.QProcess.Running:
app = "assistant "
args = "-collectionFile resources/help/stk_collection.qhc -enableRemoteControl"
self.process.start(app + args)
if len(self.arguments()) == 2 and \
self.arguments()[1] in ['stk', 'gensec', 'genrep', 'genvis']:
self.handleMessage(self.arguments()[1])
return True
def sendMessage(self, message):
if not self.is_running:
raise Exception("Client cannot connect to the server. Not running.")
socket = QtNetwork.QLocalSocket(self)
socket.connectToServer(self.socket_filename, QtCore.QIODevice.WriteOnly)
if not socket.waitForConnected(self.timeout):
raise Exception(str(socket.errorString()))
socket.write(pickle.dumps(message))
if not socket.waitForBytesWritten(self.timeout):
raise Exception(str(socket.errorString()))
socket.disconnectFromServer()
def receiveMessage(self):
socket = self.server.nextPendingConnection()
if not socket.waitForReadyRead(self.timeout):
return
byte_array = socket.readAll()
self.handleMessage(pickle.loads(byte_array))
def handleMessage(self, content):
msg = QtCore.QByteArray()
msg.append('show index;')
msg.append('activateKeyword ' + content + ';')
msg.append('setSource ' + 'qthelp://com.sequence-toolkit.help-assistant/doc/html/' + content + '.html\n')
self.process.write(msg)
def quit(self):
self.shared_memory.detach()
os.remove(self.socket_filename)
if self.process is not None:
if self.process.state() == QtCore.QProcess.Running:
self.process.terminate()
self.process.waitForFinished(3000)
self.process.deleteLater()
QtWidgets.QApplication.quit()
| gpl-3.0 | -3,806,706,311,854,687,700 | 34.595238 | 113 | 0.625084 | false | 4.084699 | false | false | false |
acrellin/mltsp | cesium/features/tests/util.py | 5 | 1685 | import dask
import numpy as np
from cesium.features import generate_dask_graph
def generate_features(t, m, e, features_to_use):
"""Utility function that generates features from a dask DAG."""
graph = generate_dask_graph(t, m, e)
values = dask.get(graph, features_to_use)
return dict(zip(features_to_use, values))
def irregular_random(seed=0, size=50):
"""Generate random test data at irregularly-sampled times."""
state = np.random.RandomState(seed)
times = np.sort(state.uniform(0, 10, size))
values = state.normal(1, 1, size)
errors = state.exponential(0.1, size)
return times, values, errors
def regular_periodic(freqs, amplitudes, phase, size=501):
"""Generate periodic test data sampled at regular intervals: superposition
of multiple sine waves, each with multiple harmonics.
"""
times = np.linspace(0, 2, size)
values = np.zeros(size)
for (i,j), amplitude in np.ndenumerate(amplitudes):
values += amplitude * np.sin(2*np.pi*times*freqs[i]*(j+1) + phase)
errors = 1e-4*np.ones(size)
return times, values, errors
def irregular_periodic(freqs, amplitudes, phase, seed=0, size=501):
"""Generate periodic test data sampled at randomly-spaced intervals:
superposition of multiple sine waves, each with multiple harmonics.
"""
state = np.random.RandomState(seed)
times = np.sort(state.uniform(0, 2, size))
values = np.zeros(size)
for i in range(freqs.shape[0]):
for j in range(amplitudes.shape[1]):
values += amplitudes[i,j] * np.sin(2*np.pi*times*freqs[i]*(j+1)+phase)
errors = state.exponential(1e-2, size)
return times, values, errors
| bsd-3-clause | -2,430,628,174,755,432,400 | 34.104167 | 82 | 0.678932 | false | 3.303922 | false | false | false |
fy0/my-leetcode | 126.word-ladder-ii/main.py | 1 | 3073 | class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
# 首先,获取一个字符对应wordList index的映射
word_dict = {}
wordListSet = set(wordList)
if endWord not in wordListSet:
return []
for val in wordList:
for index, j in enumerate(val):
word_dict.setdefault(j, {})
word_dict[j].setdefault(index, set())
word_dict[j][index].add(val)
convert_table = {}
def build_table(start):
convert = set()
# 逐个试图替换每一个字符
for index0, i in enumerate(start):
# 把当前字符抽掉,求剩下字符可以拼出的词
ret = wordListSet
for index, j in enumerate(start):
if index == index0:
continue
# 如果留下的字符中的某个不在字典中,那么这一组数据必然都无法继续了
if j not in word_dict:
ret = set()
break
if index not in word_dict[j]:
ret = set()
break
# 留下可能的转换
ret = ret & word_dict[j][index]
convert |= ret
if start in convert:
convert.remove(start)
return convert
for i in wordList:
convert_table[i] = build_table(i)
convert_table[beginWord] = build_table(beginWord)
# 优化:如果输入字符串有两个以上不在字典中的字符,那么直接返回空值
unknown = 0
# for i in start:
# pass
best = []
best_len = len(wordList)
cache = {}
# 从输入开始穷举,试图换掉其中每一个字
def solve(path, start, limit):
nonlocal best, best_len
# path = path[:]
# print('solve', path, start)
# 中止条件:匹配上结果
if start == endWord:
# print(path)
return path
if len(path) > limit:
return
# 如果长度超出最佳纪录,那么没有必要计算
if len(path) >= best_len:
return
next_round = convert_table[start] - set(path)
for j in next_round:
path2 = path[:]
path2.append(j)
r = solve(path2, j, limit)
if r:
if len(r) < best_len:
best = [r]
best_len = len(r)
elif best_len == len(r):
if r not in best:
best.append(r)
for i in range(1, len(wordList) + 1):
print(i)
solve([beginWord], beginWord, i)
if best:
break
return best
| apache-2.0 | -8,953,778,943,092,849,000 | 27.635417 | 96 | 0.427792 | false | 3.385468 | false | false | false |
david-mateo/marabunta | marabunta/models/HeadingConsensusRobot.py | 1 | 2310 | from marabunta import BaseRobot
from math import sin,cos,pi
class HeadingConsensusRobot(BaseRobot):
"""Robot model for heading consensus.
By iteratively calling the update() method,
this robot will communicate with the rest
of the swarm and align its heading to the
swarm's mean heading.
Obstacle avoidance (implemented in BaseRobot)
will take precence over consensus reaching.
"""
#def __init__(self, body, network):
# BaseRobot.__init__(self, body, network)
# return
def heading_target(self):
"""Get the other agent's state and
compute the mean heading. Note that
for periodic quantities such as the
heading, the mean is defined as
< x_i > = atan( sum_i sin(x_i)/sum_i cos(x_i) )
Returns a vector pointing to the
mean heading. If no agents are
detected, returns None.
"""
neis = self.get_agents().values()
if neis:
sint = sum( [sin(nei[2]) for nei in neis])
cost = sum( [cos(nei[2]) for nei in neis])
target = [cost, sint]
else:
target = None
return target
def move_to_target(self, target, deltat, v):
"""Align the robot to *target* and
move forward for *deltat* at a speed *v*.
"""
self.align(target)
self.move_forward(deltat, v)
return
def update(self, deltat, v=None):
"""Perform one step of the consensus
protocol. This is the main "behavior"
of the robot. It consists of 4 steps:
1. Broadcast its state.
2. Perform swarming. In practice,
this means computing the desired
target direction of motion.
(in this case, perform heading
consensus)
3. Correct the desired target
in order to avoid obstacles.
4. Move in the desired target direction.
"""
self.broadcast_state()
# Perform swarming
target = self.heading_target()
if not target:
h= self.body.get_heading()
target = [cos(h) ,sin(h)]
# Avoid obstacles
target = self.correct_target(target)
self.move_to_target(target, deltat, v)
return
| gpl-3.0 | -5,241,920,178,329,342,000 | 32 | 59 | 0.576623 | false | 4.074074 | false | false | false |
ksrajkumar/openerp-6.1 | openerp/addons/base_crypt/crypt.py | 2 | 9810 | # Notice:
# ------
#
# Implements encrypting functions.
#
# Copyright (c) 2008, F S 3 Consulting Inc.
#
# Maintainer:
# Alec Joseph Rivera (agi<at>fs3.ph)
#
#
# Warning:
# -------
#
# This program as such is intended to be used by professional programmers
# who take the whole responsibility of assessing all potential consequences
# resulting from its eventual inadequacies and bugs. End users who are
# looking for a ready-to-use solution with commercial guarantees and
# support are strongly adviced to contract a Free Software Service Company.
#
# This program is Free Software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the:
#
# Free Software Foundation, Inc.
# 59 Temple Place - Suite 330
# Boston, MA 02111-1307
# USA.
from random import seed, sample
from string import ascii_letters, digits
from osv import fields,osv
import pooler
from tools.translate import _
from service import security
magic_md5 = '$1$'
def gen_salt( length=8, symbols=ascii_letters + digits ):
seed()
return ''.join( sample( symbols, length ) )
# The encrypt_md5 is based on Mark Johnson's md5crypt.py, which in turn is
# based on FreeBSD src/lib/libcrypt/crypt.c (1.2) by Poul-Henning Kamp.
# Mark's port can be found in ActiveState ASPN Python Cookbook. Kudos to
# Poul and Mark. -agi
#
# Original license:
#
# * "THE BEER-WARE LICENSE" (Revision 42):
# *
# * <[email protected]> wrote this file. As long as you retain this
# * notice you can do whatever you want with this stuff. If we meet some
# * day, and you think this stuff is worth it, you can buy me a beer in
# * return.
# *
# * Poul-Henning Kamp
#TODO: py>=2.6: from hashlib import md5
import hashlib
def encrypt_md5( raw_pw, salt, magic=magic_md5 ):
raw_pw = raw_pw.encode('utf-8')
salt = salt.encode('utf-8')
hash = hashlib.md5()
hash.update( raw_pw + magic + salt )
st = hashlib.md5()
st.update( raw_pw + salt + raw_pw)
stretch = st.digest()
for i in range( 0, len( raw_pw ) ):
hash.update( stretch[i % 16] )
i = len( raw_pw )
while i:
if i & 1:
hash.update('\x00')
else:
hash.update( raw_pw[0] )
i >>= 1
saltedmd5 = hash.digest()
for i in range( 1000 ):
hash = hashlib.md5()
if i & 1:
hash.update( raw_pw )
else:
hash.update( saltedmd5 )
if i % 3:
hash.update( salt )
if i % 7:
hash.update( raw_pw )
if i & 1:
hash.update( saltedmd5 )
else:
hash.update( raw_pw )
saltedmd5 = hash.digest()
itoa64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
rearranged = ''
for a, b, c in ((0, 6, 12), (1, 7, 13), (2, 8, 14), (3, 9, 15), (4, 10, 5)):
v = ord( saltedmd5[a] ) << 16 | ord( saltedmd5[b] ) << 8 | ord( saltedmd5[c] )
for i in range(4):
rearranged += itoa64[v & 0x3f]
v >>= 6
v = ord( saltedmd5[11] )
for i in range( 2 ):
rearranged += itoa64[v & 0x3f]
v >>= 6
return magic + salt + '$' + rearranged
class users(osv.osv):
_name="res.users"
_inherit="res.users"
# agi - 022108
# Add handlers for 'input_pw' field.
def set_pw(self, cr, uid, id, name, value, args, context):
if not value:
raise osv.except_osv(_('Error'), _("Please specify the password !"))
obj = pooler.get_pool(cr.dbname).get('res.users')
if not hasattr(obj, "_salt_cache"):
obj._salt_cache = {}
salt = obj._salt_cache[id] = gen_salt()
encrypted = encrypt_md5(value, salt)
cr.execute('update res_users set password=%s where id=%s',
(encrypted.encode('utf-8'), int(id)))
cr.commit()
del value
def get_pw( self, cr, uid, ids, name, args, context ):
cr.execute('select id, password from res_users where id in %s', (tuple(map(int, ids)),))
stored_pws = cr.fetchall()
res = {}
for id, stored_pw in stored_pws:
res[id] = stored_pw
return res
_columns = {
# The column size could be smaller as it is meant to store a hash, but
# an existing column cannot be downsized; thus we use the original
# column size.
'password': fields.function(get_pw, fnct_inv=set_pw, type='char',
size=64, string='Password', invisible=True,
store=True),
}
def login(self, db, login, password):
if not password:
return False
if db is False:
raise RuntimeError("Cannot authenticate to False db!")
cr = None
try:
cr = pooler.get_db(db).cursor()
return self._login(cr, db, login, password)
except Exception:
import logging
logging.getLogger('netsvc').exception('Could not authenticate')
return Exception('Access Denied')
finally:
if cr is not None:
cr.close()
def _login(self, cr, db, login, password):
cr.execute( 'SELECT password, id FROM res_users WHERE login=%s AND active',
(login.encode('utf-8'),))
if cr.rowcount:
stored_pw, id = cr.fetchone()
else:
# Return early if no one has a login name like that.
return False
stored_pw = self.maybe_encrypt(cr, stored_pw, id)
if not stored_pw:
# means couldn't encrypt or user is not active!
return False
# Calculate an encrypted password from the user-provided
# password.
obj = pooler.get_pool(db).get('res.users')
if not hasattr(obj, "_salt_cache"):
obj._salt_cache = {}
salt = obj._salt_cache[id] = stored_pw[len(magic_md5):11]
encrypted_pw = encrypt_md5(password, salt)
# Check if the encrypted password matches against the one in the db.
cr.execute("""UPDATE res_users
SET date=now() AT TIME ZONE 'UTC'
WHERE id=%s AND password=%s AND active
RETURNING id""",
(int(id), encrypted_pw.encode('utf-8')))
res = cr.fetchone()
cr.commit()
if res:
return res[0]
else:
return False
def check(self, db, uid, passwd):
if not passwd:
# empty passwords disallowed for obvious security reasons
raise security.ExceptionNoTb('AccessDenied')
# Get a chance to hash all passwords in db before using the uid_cache.
obj = pooler.get_pool(db).get('res.users')
if not hasattr(obj, "_salt_cache"):
obj._salt_cache = {}
self._uid_cache.get(db, {}).clear()
cached_pass = self._uid_cache.get(db, {}).get(uid)
if (cached_pass is not None) and cached_pass == passwd:
return True
cr = pooler.get_db(db).cursor()
try:
if uid not in self._salt_cache.get(db, {}):
# If we don't have cache, we have to repeat the procedure
# through the login function.
cr.execute( 'SELECT login FROM res_users WHERE id=%s', (uid,) )
stored_login = cr.fetchone()
if stored_login:
stored_login = stored_login[0]
res = self._login(cr, db, stored_login, passwd)
if not res:
raise security.ExceptionNoTb('AccessDenied')
else:
salt = self._salt_cache[db][uid]
cr.execute('SELECT COUNT(*) FROM res_users WHERE id=%s AND password=%s AND active',
(int(uid), encrypt_md5(passwd, salt)))
res = cr.fetchone()[0]
finally:
cr.close()
if not bool(res):
raise security.ExceptionNoTb('AccessDenied')
if res:
if self._uid_cache.has_key(db):
ulist = self._uid_cache[db]
ulist[uid] = passwd
else:
self._uid_cache[db] = {uid: passwd}
return bool(res)
def maybe_encrypt(self, cr, pw, id):
""" Return the password 'pw', making sure it is encrypted.
If the password 'pw' is not encrypted, then encrypt all active passwords
in the db. Returns the (possibly newly) encrypted password for 'id'.
"""
if not pw.startswith(magic_md5):
cr.execute("SELECT id, password FROM res_users " \
"WHERE active=true AND password NOT LIKE '$%'")
# Note that we skip all passwords like $.., in anticipation for
# more than md5 magic prefixes.
res = cr.fetchall()
for i, p in res:
encrypted = encrypt_md5(p, gen_salt())
cr.execute('UPDATE res_users SET password=%s where id=%s',
(encrypted, i))
if i == id:
encrypted_res = encrypted
cr.commit()
return encrypted_res
return pw
users()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,774,015,161,033,898,500 | 31.919463 | 100 | 0.566667 | false | 3.70049 | false | false | false |
RedHenLab/Audio | GSoC2015/CommercialDetection/src/web/output/views.py | 1 | 6235 | from django.shortcuts import render, render_to_response
from django.http import HttpResponse
from django.template import Template, Context, RequestContext
from django.template.loader import get_template
from web.settings import BASE_DIR
import sys
sys.path.append(BASE_DIR + "/../") #Shift one higher up the parent directory to reach src/
import os
from constants import *
from generate import Generate
import fileHandler
import timeFunc
from django.views.decorators.csrf import csrf_exempt
import simplejson
lines = {} #dictionary, with key as start_secs
def get_dict(labels):
"""
Given labels(output from the labels object in fileHandler.py), it creates a dictionary of the form {key:value}
Where,
key = start time in seconds = start_secs
value = line corresponding to that start time = [start, end, name, start_secs, end_secs]
Returns: Dictionary
"""
d = {}
for item in labels:
#Things are extracted based on labels format
name = item[2]
#We obtain the start and end time in seconds
start_secs = timeFunc.get_seconds(item[0])
end_secs = timeFunc.get_seconds(item[1])
item.append(start_secs)
item.append(end_secs)
#Create the dictionary
d[start_secs] = item
return d
@csrf_exempt
def index(request):
"""
The url /output maps to this function.
Displays the video and the corresponding labels file on the browser.
This page is called in two ways:
normal way: When the user visits the site
Ajax way: When user makes some changes to the labels, the page is reloaded with the help of this function
"""
global lines
t = get_template('output/index.html')
#If the page requested by the user(and not ajax), we have to read the labels file
if request.is_ajax() == False:
#Page was requested by the user
labels = fileHandler.LabelsFile(infile=BASE_DIR + "/../" + WEB_LABELS).read_lables(skip=False)
lines = get_dict(labels)
#Since keys are assorted in a dict, we sort them.
keys = lines.keys()
keys.sort()
#Now we have start_secs in increasing order, store this value in values.
values = [lines[key] for key in keys]
html = t.render(Context({'video_path': WEB_VIDEO_NAME, 'item_list': values}))
return HttpResponse(html)
@csrf_exempt
def update(request):
"""
The url /output/update is mapped to this function.
This function is always called through ajax
When the user edits any label on the browser, this function is called to reflect the changes in "lines" dictionary
"""
global lines
#Obtain the start_secs of the label which the user just edited
start = int(request.POST.get(u'start'))
#Obtain the new text
text = str(request.POST.get(u'text'))
#Update the text
l = lines[start]
l[2] = text
lines.update({start:l})
return HttpResponse(simplejson.dumps({'server_response': '1' }))
@csrf_exempt
def save(request):
"""
The url /output/save/ is mapped to this function.
This function is called with the click of the "Save changes" button.
The function writes the "lines" dictionary back into the labels file and ends the program.
"""
global lines
labels = fileHandler.LabelsFile(outfile=BASE_DIR + "/../" + WEB_LABELS)
keys = lines.keys()
keys.sort()
lines_list = [lines[key] for key in keys]
for line in lines_list:
l = [line[i] for i in range(3)]
labels.write_labels(l)
return HttpResponse('Successfully updated :-)')
@csrf_exempt
def delete(request):
"""
The url /output/delete/ maps to this function.
This function is called by the click of the button '-'.
It is used to delete the label, intended to be deleted by the user.
When a label is deleted the following operations take place:
- the end time of the to be deleted label is written onto the end time of the label preceeding it.
- the label to be deleted is removed from lines dictionary
"""
global lines
keys = lines.keys()
keys.sort()
start = int(request.POST.get(u'start_sec'))
end = int(request.POST.get(u'end_sec'))
#Now we find the preceeding label
for i in range(len(keys)):
if keys[i] == start:
break
#This will be the label, just above the label to be deleted
old_start = keys[i - 1]
#Performing the operations
#We assign the endtime of this to the previous start
lines[old_start][1] = timeFunc.get_time_string(end)
lines[old_start][-1] = end
del lines[start]
return HttpResponse(simplejson.dumps({'server_response': '1' }))
@csrf_exempt
def add(request):
"""
The url /output/add/ maps to this function.
The function is called by the click of the button '+'.
It is used to add another label.
When the function is called, these are the following operations performed.
- Obtain the new start time in seconds of the next label
- Make the end time of the new label, equal to the end time of the original label(where + was clicked)
- Change the end time of the previous label(the label whose + was clicked) to the new start time
"""
global lines
actual_start = int(request.POST.get(u'actual_start'))
start = int(request.POST.get(u'start_sec'))
end = int(request.POST.get(u'end_sec'))
if start in lines.keys():
#If already in the dictionary don't update
return HttpResponse(simplejson.dumps({'server_response': '1' }))
#Now we add the value in lines as well
lines.update({start: [timeFunc.get_time_string(start), timeFunc.get_time_string(end), UNCLASSIFIED_CONTENT, start, end]})
#We change the "end" of the previous start
lines[actual_start][1] = timeFunc.get_time_string(start)
print len(lines[start]), len(lines[actual_start])
return HttpResponse(simplejson.dumps({'server_response': '1' }))
| gpl-2.0 | 2,963,164,279,370,545,700 | 33.071038 | 125 | 0.643304 | false | 4.038212 | false | false | false |
njbbaer/unicorn-remote | app/programs/hd/candle.py | 1 | 2458 | #!/usr/bin/env python
import colorsys
import math
import time
from random import randint
import unicornhathd
def run(params):
width, height = unicornhathd.get_shape()
# buffer to contain candle "heat" data
candle = [0] * 256
# create a palette for mapping heat values onto colours
palette = [0] * 256
for i in range(0, 256):
h = i / 5.0
h /= 360.0
s = (1.0 / (math.sqrt(i / 50.0) + 0.01))
s = min(1.0, s)
s = max(0.0, s)
v = i / 200.0
if i < 60:
v = v / 2
v = min(1.0, v)
v = max(0.0, v)
r, g, b = colorsys.hsv_to_rgb(h, s, v)
palette[i] = (int(r * 255.0), int(g * 255.0), int(b * 255.0))
def set_pixel(b, x, y, v):
b[y * 16 + x] = int(v)
def get_pixel(b, x, y):
# out of range sample lookup
if x < 0 or y < 0 or x >= 16 or y >= 16:
return 0
# subpixel sample lookup
if isinstance(x, float) and x < 15:
f = x - int(x)
return (b[int(y) * 16 + int(x)] * (1.0 - f)) + (b[int(y) * 16 + int(x) + 1] * (f))
# fixed pixel sample lookup
return b[int(y) * 16 + int(x)]
step = 0
try:
while True:
# step for waving animation, adds some randomness
step += randint(0, 15)
# clone the current candle
temp = candle[:]
# seed new heat
v = 500
set_pixel(candle, 6, 15, v)
set_pixel(candle, 7, 15, v)
set_pixel(candle, 8, 15, v)
set_pixel(candle, 9, 15, v)
set_pixel(candle, 6, 14, v)
set_pixel(candle, 7, 14, v)
set_pixel(candle, 8, 14, v)
set_pixel(candle, 9, 14, v)
# blur, wave, and shift up one step
for x in range(0, 16):
for y in range(0, 16):
s = math.sin((y / 30.0) + (step / 10.0)) * ((16 - y) / 20.0)
v = 0
for i in range(0, 3):
for j in range(0, 3):
#r = randint(0, 2) - 1
v += get_pixel(candle, x + i + s - 1, y + j)
v /= 10
set_pixel(temp, x, y, v)
candle = temp
# copy candle into UHHD with palette
for x in range(0, 16):
for y in range(0, 16):
o = (i * 3) + 1
r, g, b = palette[max(0, min(255, get_pixel(candle, x, y)))]
unicornhathd.set_pixel(x, y, r, g, b)
unicornhathd.show()
except KeyboardInterrupt:
unicornhathd.off()
| mit | 8,055,438,462,381,919,000 | 24.340206 | 88 | 0.478438 | false | 2.874854 | false | false | false |
ktok07b6/polyphony | polyphony/compiler/deadcode.py | 1 | 1652 | from .env import env
from .ir import *
from logging import getLogger
logger = getLogger(__name__)
class DeadCodeEliminator(object):
def process(self, scope):
if scope.is_namespace() or scope.is_class():
return
usedef = scope.usedef
for blk in scope.traverse_blocks():
dead_stms = []
for stm in blk.stms:
if stm.is_a([MOVE, PHIBase]):
if stm.is_a(MOVE) and stm.src.is_a([TEMP, ATTR]) and stm.src.symbol().is_param():
continue
if stm.is_a(MOVE) and stm.src.is_a(CALL):
continue
defvars = usedef.get_vars_defined_at(stm)
for var in defvars:
if not var.is_a(TEMP):
break
if stm.block.path_exp.is_a([TEMP, ATTR]) and stm.block.path_exp.symbol() is var.symbol():
break
uses = usedef.get_stms_using(var.symbol())
if uses:
break
else:
dead_stms.append(stm)
for stm in dead_stms:
blk.stms.remove(stm)
logger.debug('removed dead code: ' + str(stm))
if stm.is_a(MOVE):
var = stm.dst
elif stm.is_a(PHIBase):
var = stm.var
if var.is_a([TEMP, ATTR]) and var.symbol().typ.is_seq():
memnode = var.symbol().typ.get_memnode()
env.memref_graph.remove_node(memnode) | mit | -2,885,627,664,183,240,700 | 39.317073 | 113 | 0.452785 | false | 3.980723 | false | false | false |
ThomasYeoLab/CBIG | stable_projects/predict_phenotypes/Nguyen2020_RNNAD/cbig/Nguyen2020/hord_base.py | 1 | 1312 | #!/usr/bin/env python
from __future__ import print_function
import csv
import os
import threading
class OptimBase(object):
def __init__(self, executable, fixed_args, log_file):
self.executable = executable
self.fixed_args = fixed_args
self.log_file = log_file
assert not os.path.isfile(self.log_file), '%s exits. Choose a different name' % self.log_file
self.best_result = 100
self.lock = threading.Lock()
self.f_eval_count = 0
self.hyper_map = None
self.dim = 0
self.func = []
self.init_param_list()
with open(self.log_file, 'w') as fhandler:
header = ['bestScore', 'score', 'nb_eval', 'fname'] + self.hyper_map.keys()
csv.writer(fhandler).writerow(header)
def init_param_list(self):
raise NotImplementedError()
def param_vec2dict(self, params):
return {k: self.func[v](params[v]) for k, v in self.hyper_map.items()}
def log(self, result, nb_eval, fname, var_args):
row = [self.best_result, result, nb_eval, fname]
row += [var_args[k] for k in self.hyper_map.keys()]
with open(self.log_file, 'a') as fhandler:
csv.writer(fhandler).writerow(row)
def execute(self, var_args):
raise NotImplementedError()
| mit | 2,659,299,299,542,736,000 | 31 | 101 | 0.60747 | false | 3.517426 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.