repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
urq/awssql | shellsql.py | 1 | 2032 | import os
import pickle
import sys
import click
from shellsql import aws
from shellsql.models import Dataset
from shellsql.cache import TTLCache, FileCache
from shellsql import utils
@click.group()
def cli():
pass
@cli.command()
def hello():
click.echo("hello world!")
@cli.command()
@click.argument('columns', nargs=-1)
def select(columns):
data = Dataset.from_file(sys.stdin)
data.select(columns).write(sys.stdout)
@cli.command()
def headers():
print '\n'.join(sorted(Dataset.from_file(sys.stdin).headers))
@cli.command()
@click.option('--region', default='us-east-1')
@click.option('--cache-dir', default='~/.shellsql-cache')
@click.option('--cache-ttl', default=60)
@click.argument('profiles', nargs=-1)
def get(profiles, cache_dir, cache_ttl, region):
# if not os.path.exists(os.path.expanduser(cache_dir)):
# os.mkdir(os.path.expanduser(cache_dir))
# cache = TTLCache(FileCache(os.path.expanduser(cache_dir)))
# data = cache.get(profile)
# if data is None:
# data = aws.get_instances_as_table(profile, region)
# print 'inserting into cache'
# cache.insert(profile, pickle.dumps(data))
# else:
# data = pickle.loads(data)
data = reduce(lambda x, y: x.merge(y),
[aws.get_instances_as_table(profile, region)
for profile in profiles])
data.write(sys.stdout)
#@cli.command()
#@click.argument('predicate')
#def map(columns, predicate):
#data = Dataset.from_file(sys.stdin)
#def func(row): exec predicate + '; return row' in globals(), locals()
#data = data.map(func)
#data.write(sys.stdout)
@cli.command()
@click.argument('predicate')
def filter(predicate):
data = Dataset.from_file(sys.stdin)
utils.import_execution_env()
print "predicate: {}".format(predicate)
func_str = """filter_func = lambda row: {}""".format(predicate)
print func_str
exec(func_str)
data = data.filter(filter_func)
data.write(sys.stdout)
if __name__ == '__main__':
cli()
| apache-2.0 | -1,326,841,644,984,561,400 | 27.222222 | 74 | 0.656004 | false |
vijayendrabvs/ssl-neutron | neutron/plugins/nec/ofc_driver_base.py | 1 | 5143 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
# @author: Akihiro MOTOKI
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class OFCDriverBase(object):
"""OpenFlow Controller (OFC) Driver Specification.
OFCDriverBase defines the minimum set of methods required by this plugin.
It would be better that other methods like update_* are implemented.
"""
@abstractmethod
def create_tenant(self, description, tenant_id=None):
"""Create a new tenant at OpenFlow Controller.
:param description: A description of this tenant.
:param tenant_id: A hint of OFC tenant ID.
A driver could use this id as a OFC id or ignore it.
:returns: ID of the tenant created at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def delete_tenant(self, ofc_tenant_id):
"""Delete a tenant at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def create_network(self, ofc_tenant_id, description, network_id=None):
"""Create a new network on specified OFC tenant at OpenFlow Controller.
:param ofc_tenant_id: a OFC tenant ID in which a new network belongs.
:param description: A description of this network.
:param network_id: A hint of an ID of OFC network.
:returns: ID of the network created at OpenFlow Controller.
ID returned must be unique in the OpenFlow Controller.
If a network is identified in conjunction with other information
such as a tenant ID, such information should be included in the ID.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def delete_network(self, ofc_network_id):
"""Delete a netwrok at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def create_port(self, ofc_network_id, portinfo,
port_id=None, filters=None):
"""Create a new port on specified network at OFC.
:param ofc_network_id: a OFC tenant ID in which a new port belongs.
:param portinfo: An OpenFlow information of this port.
{'datapath_id': Switch ID that a port connected.
'port_no': Port Number that a port connected on a Swtich.
'vlan_id': VLAN ID that a port tagging.
'mac': Mac address.
}
:param port_id: A hint of an ID of OFC port.
ID returned must be unique in the OpenFlow Controller.
If a port is identified in combination with a network or
a tenant, such information should be included in the ID.
:param filters: A list of packet filter associated with the port.
Each element is a tuple (neutron ID, OFC ID)
:returns: ID of the port created at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def delete_port(self, ofc_port_id):
"""Delete a port at OpenFlow Controller.
:raises: neutron.plugin.nec.common.exceptions.OFCException
"""
pass
@abstractmethod
def convert_ofc_tenant_id(self, context, ofc_tenant_id):
"""Convert old-style ofc tenand id to new-style one.
:param context: neutron context object
:param ofc_tenant_id: ofc_tenant_id to be converted
"""
pass
@abstractmethod
def convert_ofc_network_id(self, context, ofc_network_id,
tenant_id):
"""Convert old-style ofc network id to new-style one.
:param context: neutron context object
:param ofc_network_id: ofc_network_id to be converted
:param tenant_id: neutron tenant_id of the network
"""
pass
@abstractmethod
def convert_ofc_port_id(self, context, ofc_port_id,
tenant_id, network_id):
"""Convert old-style ofc port id to new-style one.
:param context: neutron context object
:param ofc_port_id: ofc_port_id to be converted
:param tenant_id: neutron tenant_id of the port
:param network_id: neutron network_id of the port
"""
pass
| apache-2.0 | 7,191,177,591,923,439,000 | 36.268116 | 79 | 0.642815 | false |
Lujeni/matterllo | core/models.py | 1 | 3323 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ast import literal_eval
from django.db import models
from django.conf import settings
class Board(models.Model):
name = models.CharField(max_length=100)
webhook_activate = models.BooleanField(default=False)
trello_board_id = models.CharField(max_length=100)
trello_token = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ["name"]
verbose_name_plural = "boards"
def __str__(self):
return self.name
def __unicode__(self):
return "{}".format(self.name)
class Webhook(models.Model):
name = models.CharField(max_length=50)
incoming_webhook_url = models.CharField(max_length=300, unique=True)
icon_url = models.CharField(
max_length=250,
default="http://maffrigby.com/wp-content/uploads/2015/05/trello-icon.png",
)
username = models.CharField(max_length=30, default="Matterllo")
board = models.ManyToManyField(Board)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ["name"]
verbose_name_plural = "webhooks"
def __str__(self):
return "{} :: {}".format(self.name, self.incoming_webhook_url)
def __unicode__(self):
return "{} :: {}".format(self.name, self.incoming_webhook_url)
class Bridge(models.Model):
EVENT_CHOICES = (
# card
("addAttachmentToCard", "addAttachmentToCard"),
("addLabelToCard", "addLabelToCard"),
("addMemberToCard", "addMemberToCard"),
("commentCard", "commentCard"),
("copyCard", "copyCard"),
("createCard", "createCard"),
("emailCard", "emailCard"),
("moveCardFromBoard", "moveCardFromBoard"),
("moveCardToBoard", "moveCardToBoard"),
("removeLabelFromCard", "removeLabelFromCard"),
("removeMemberFromCard", "removeMemberFromCard"),
(
"updateCard",
"updateCard (include moveCardToList, renameCard, renameCardDesc, updateCardDueDate, removeCardDueDate, archiveCard, unarchiveCard)",
),
# checklist
("addChecklistToCard", "addChecklistToCard"),
("createCheckItem", "createCheckItem"),
("updateCheckItemStateOnCard", "updateCheckItemStateOnCard"),
# list
("archiveList", "archiveList"),
("createList", "createList"),
("moveListFromBoard", "moveCardFromBoard"),
("moveListToBoard", "moveListToBoard"),
("renameList", "renameList"),
("updateList", "updateList"),
)
webhook = models.ForeignKey(Webhook, on_delete=models.CASCADE)
board = models.ForeignKey(Board, on_delete=models.CASCADE)
events = models.CharField(max_length=700)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name_plural = "bridges"
def __str__(self):
return "{}::{}".format(self.board, self.webhook)
def __unicode__(self):
return "{}::{}".format(self.board, self.webhook)
def events_as_list(self):
return literal_eval(self.events)
| mit | 9,003,355,092,251,951,000 | 30.647619 | 144 | 0.63858 | false |
solus-project/evolve-sc | solus_sc/main_window.py | 2 | 8096 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# This file is part of solus-sc
#
# Copyright © 2013-2018 Ikey Doherty <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
from .appsystem import AppSystem
from .groups import ScGroupsView
from .installed_view import ScInstalledView
from .sidebar import ScSidebar
from .updates_view import ScUpdatesView
from .basket import BasketView
from .search import ScSearchView
from .thirdparty import ThirdPartyView
from .settings_view import ScSettingsView
from gi.repository import Gtk, Gdk, GLib, Gio
import sys
import threading
class ScPlaceholderBox(Gtk.VBox):
""" So we don't show empty boxes :) """
def __init__(self):
Gtk.VBox.__init__(self)
lab = Gtk.Label("Sorry, this page is not yet implemented.")
self.add(lab)
class ScMainWindow(Gtk.ApplicationWindow):
groups_view = None
main_layout = None
sidebar = None
stack = None
sidebar_revealer = None
appsystem = None
# Pages
installed_view = None
updates_view = None
search_view = None
third_party = None
settings_view = None
prev_button = None
app = None
# Default open mode
mode_open = None
action_bar = None
did_map_once = False
def show_updates(self):
""" Switch to updates view """
self.sidebar.preselect_row("updates")
def show_search(self):
""" Switch to search view """
self.sidebar.preselect_row("search")
def do_delete_event(self, event, udata=None):
""" For now just propagate the event """
return False
def do_back(self):
""" Handle back navigation """
nom = self.stack.get_visible_child_name()
if nom == "installed":
self.installed_view.handle_back()
elif nom == "home":
self.groups_view.handle_back()
elif nom == "search":
self.search_view.handle_back()
else:
print("Shouldn't be happening boss")
def handle_back(self, btn, udata=None):
self.do_back()
def set_can_back(self, can_back):
self.prev_button.set_sensitive(can_back)
def update_back(self, nom):
""" Update back navigation """
sensitive = False
if nom == "installed":
sensitive = self.installed_view.can_back()
elif nom == "home":
sensitive = self.groups_view.can_back()
elif nom == "search":
sensitive = self.search_view.can_back()
self.set_can_back(sensitive)
def init_children(self):
self.installed_view.init_view()
# If we're not allowed to refresh on metered connections, only
# show the cached results on startup
settings = Gio.Settings.new("com.solus-project.software-center")
mon = Gio.NetworkMonitor.get_default()
if mon is not None:
can_net = settings.get_boolean("update-on-metered")
if not can_net and mon.get_network_metered():
self.updates_view.init_view()
return
GLib.idle_add(self.updates_view.external_refresh)
def init_view(self):
""" Our first ever show """
self.sidebar_revealer.set_reveal_child(True)
self.sidebar.preselect_row(self.mode_open)
self.stack.set_visible_child_name(self.mode_open)
return False
def on_mapped(self, w, udata=None):
if self.did_map_once:
return
self.did_map_once = True
GLib.timeout_add(200, self.init_view)
def on_button_press_event(self, widget, event):
if event.button == 8: # Back button
self.do_back()
def on_key_press_event(self, widget, event):
# check event modifiers
ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK)
# check if search view hotkey was pressed
if ctrl and event.keyval == Gdk.keyval_from_name('f'):
self.show_search()
def __init__(self, app):
Gtk.ApplicationWindow.__init__(self, application=app)
self.app = app
self.mode_open = "home"
self.appsystem = AppSystem()
self.set_icon_name("system-software-install")
# Set up the headerbar. Because GNOME n stuff.
headerbar = Gtk.HeaderBar()
headerbar.set_show_close_button(True)
self.set_titlebar(headerbar)
self.prev_button = Gtk.Button.new_from_icon_name(
"go-previous-symbolic", Gtk.IconSize.BUTTON)
headerbar.pack_start(self.prev_button)
self.prev_button.connect("clicked", self.handle_back)
# Window title
self.set_title(_("Software Center"))
self.get_style_context().add_class("solus-sc")
self.set_position(Gtk.WindowPosition.CENTER)
self.set_default_size(950, 650)
self.stack = Gtk.Stack()
self.stack.get_style_context().add_class("main-view")
self.set_can_back(False)
# We'll add view switching later
try:
self.init_first()
except Exception as e:
print(e)
sys.exit(1)
def init_first(self):
self.basket = BasketView(self)
self.groups_view = ScGroupsView(self)
# Main horizontal layout (Sidebar|VIEW)
self.main_layout = Gtk.HBox(0)
self.add(self.main_layout)
self.sidebar = ScSidebar(self, self.stack)
self.sidebar_revealer = Gtk.Revealer()
self.sidebar_revealer.add(self.sidebar)
self.sidebar_revealer.set_reveal_child(False)
self.main_layout.pack_start(self.sidebar_revealer, False, False, 0)
sep = Gtk.Separator()
sep.set_orientation(Gtk.Orientation.VERTICAL)
sep.get_style_context().add_class("sidebar-separator")
self.main_layout.pack_start(sep, False, False, 0)
tmpvbox = Gtk.VBox(0)
tmpvbox.pack_start(self.stack, True, True, 0)
tmpvbox.pack_start(self.basket, False, False, 0)
self.main_layout.pack_start(tmpvbox, True, True, 0)
# Dummy view for first time showing the application
self.dummy_widget = Gtk.EventBox()
# Supported views
self.stack.add_titled(self.dummy_widget, "empty", "empty")
# Main view, primary view, when opening the software center
self.stack.add_titled(self.groups_view, "home", _("Home"))
self.updates_view = ScUpdatesView(self.basket, self.appsystem)
# The page where updates are display
self.stack.add_titled(self.updates_view, "updates", _("Updates"))
# Package view for installed page
self.installed_view = ScInstalledView(self, self.basket, self.appsystem)
# This page shows the locally instaleld items
self.stack.add_titled(self.installed_view, "installed", _("Installed"))
self.third_party = ThirdPartyView(self)
# Software made available from outside the Solus software repos
self.stack.add_titled(self.third_party, "3rd-party", _("Third Party"))
# Search view
self.search_view = ScSearchView(self)
# The search page
self.stack.add_titled(self.search_view, "search", _("Search"))
self.settings_view = ScSettingsView(self)
# The settings page
self.stack.add_titled(self.settings_view, "settings", _("Settings"))
# set up intro animation
self.stack.set_visible_child_name("empty")
self.stack.set_transition_type(Gtk.StackTransitionType.SLIDE_UP)
revel = Gtk.RevealerTransitionType.SLIDE_RIGHT
self.sidebar_revealer.set_transition_type(revel)
self.connect("map-event", self.on_mapped)
self.connect("button-press-event", self.on_button_press_event)
self.connect("key-press-event", self.on_key_press_event)
t = threading.Thread(target=self.init_children)
t.start()
self.show_all()
| gpl-2.0 | -6,569,056,492,320,678,000 | 32.17623 | 80 | 0.627548 | false |
pluyckx/kam | kam/modules/plugins/core/base.py | 1 | 2017 | ##\package base
# \brief The base class for a core plugin.
#
# Core plugins are plugins that do not check parameters to keep the machine alive.
# They just execute some code to get a wanted behaviour.
#
# \author Philip Luyckx
# \copyright GNU Public License
# This file is part of Keep Alive Monitor (kam).
#
# Keep Alive Monitor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Keep Alive Monitor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Keep Alive Monitor. If not, see <http://www.gnu.org/licenses/>.
from kam.modules.exceptions.exceptions import KamFunctionNotImplemented
class CoreBase:
## \brief The constructor
def __init__(self):
self._enabled = False
## \brief Check if the core plugin is enabled, and call the _execute function if so.
#
# \public
def execute(self):
if self._enabled:
self._execute()
## \brief The actual implementation to execute when execute() is called
#
# Subclasses must override this function.
def _execute(self):
raise KamFunctionNotImplemented("_execute not implemented in class {0}".format(\
self.__class__.__name__))
## \brief Enable the plugin
#
# \protected
def _enable(self):
self._enabled = True
## \brief disable the plugin
#
# \protected
def _disable(self):
self._enabled = True
## \brief Check if the plugin is enabled
#
# \public
def isEnabled(self):
return self._enabled
## \brief Load the configuration
#
# \public
#
# \param config The config file in the form of a \e configparser object.
def loadConfig(self, config):
pass
| gpl-2.0 | -6,864,702,130,165,024,000 | 27.814286 | 85 | 0.714923 | false |
bundgus/python-playground | sqlalchemy-playground/sqlalchemy-playground.py | 1 | 4317 | import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
from sqlalchemy.sql import select
from sqlalchemy.sql import and_, or_, not_
from sqlalchemy.sql import text
from sqlalchemy import func, desc
print(sqlalchemy.__version__)
engine = create_engine('sqlite:///:memory:', echo=True)
metadata = MetaData()
users = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('fullname', String(50)),
)
addresses = Table('addresses', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', None, ForeignKey('users.id')),
Column('email_address', String, nullable=False)
)
metadata.create_all(engine)
conn = engine.connect()
ins = users.insert()
conn.execute(ins, name='jack', fullname='Jack Jones')
conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams')
conn.execute(addresses.insert(), [
{'user_id': 1, 'email_address' : '[email protected]'},
{'user_id': 1, 'email_address' : '[email protected]'},
{'user_id': 2, 'email_address' : '[email protected]'},
{'user_id': 2, 'email_address' : '[email protected]'},
])
'''
s = select([users])
result = conn.execute(s)
for row in result:
print(row)
result = conn.execute(s)
row = result.fetchone()
print("name:", row['name'], "; fullname:", row['fullname'])
print("name:", row[1], "; fullname:", row[2])
for row in conn.execute(s):
print("name:", row[users.c.name], "; fullname:", row[users.c.fullname])
s = select([users.c.name, users.c.fullname])
result = conn.execute(s)
for row in result:
print (row)
for row in conn.execute(select([users, addresses]).where(users.c.id == addresses.c.user_id)):
print (row)
print(and_(
users.c.name.like('j%'), users.c.id == addresses.c.user_id, \
or_(
addresses.c.email_address == '[email protected]',
addresses.c.email_address == '[email protected]'
), \
not_(users.c.id > 5)))
s = select([(users.c.fullname +
", " + addresses.c.email_address).
label('title')]).\
where(
and_(
users.c.id == addresses.c.user_id,
users.c.name.between('m', 'z'),
or_(
addresses.c.email_address.like('%@aol.com'),
addresses.c.email_address.like('%@msn.com')
)
)
)
result = conn.execute(s)
for row in result:
print (row)
s = text(
"SELECT users.fullname || ', ' || addresses.email_address AS title "
"FROM users, addresses "
"WHERE users.id = addresses.user_id "
"AND users.name BETWEEN :x AND :y "
"AND (addresses.email_address LIKE :e1 "
"OR addresses.email_address LIKE :e2)")
result = conn.execute(s, x='m', y='z', e1='%@aol.com', e2='%@msn.com')
for row in result:
print (row)
stmt = select([
addresses.c.user_id,
func.count(addresses.c.id).label('num_addresses')]).\
order_by(desc("num_addresses"))
result = conn.execute(stmt)
for row in result:
print (row)
print (users.join(addresses))
print(users.join(addresses))
print(users.join(addresses,
addresses.c.email_address.like(users.c.name + '%')
))
s = select([users.c.fullname]).select_from(
users.join(addresses,
addresses.c.email_address.like(users.c.name + '%'))
)
result = conn.execute(s)
for row in result:
print(row)
s = select([users.c.fullname]).select_from(users.outerjoin(addresses))
result = conn.execute(s)
for row in result:
print(row)
stmt = select([users.c.name]).order_by(users.c.name)
result = conn.execute(stmt)
for row in result:
print(row)
stmt = select([users.c.name]).order_by(users.c.name.desc())
result = conn.execute(stmt)
for row in result:
print(row)
stmt = select([users.c.name, func.count(addresses.c.id)]).\
select_from(users.join(addresses)).\
group_by(users.c.name)
result = conn.execute(stmt)
for row in result:
print(row)
'''
stmt = users.update().\
values(fullname="Fullname: " + users.c.name)
result = conn.execute(stmt)
conn.execute(users.delete().where(users.c.name > 'm'))
# result.close()
| mit | -1,070,303,591,381,381,500 | 26.496815 | 93 | 0.604587 | false |
Horniman/Horniman_Hack | satalite/models.py | 1 | 1056 | """Satalite date models"""
from __future__ import division, absolute_import, print_function, unicode_literals
from django.db import models
from db import models as db_models
class LogTempLive(models.Model):
"""Messages from Buoy"""
sensor = models.ForeignKey(db_models.Sensor)
processed = models.BooleanField(default = False)
data = models.CharField(max_length = 48)
time = models.DateTimeField()
#class LogTempBackup(models.Model):
# """Backup data"""
# sensor = models.ForeignKey(db_models.Sensor)
# byyy = models.CharField(max_length=4)
# bm = models.CharField(max_length=2)
# bd = models.CharField(max_length=2)
# bh = models.CharField(max_length=2)
# eyyy = models.CharField(max_length=4)
# em = models.CharField(max_length=2)
# ed = models.CharField(max_length=2)
# eh = models.CharField(max_length=2)
# sst = models.FloatField()
# sstanom = models.FloatField()
# hotspot = models.FloatField()
# dhw = models.FloatField()
# lat = models.FloatField()
# long = models.FloatField()
| gpl-2.0 | 3,972,958,697,875,750,400 | 34.2 | 82 | 0.686553 | false |
ElementalAlchemist/txircd | txircd/modules/rfc/cmd_topic.py | 1 | 4733 | from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.config import ConfigValidationError
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.utils import timestamp
from zope.interface import implements
from datetime import datetime
irc.RPL_TOPICWHOTIME = "333"
class TopicCommand(ModuleData):
implements(IPlugin, IModuleData)
name = "TopicCommand"
core = True
def actions(self):
return [ ("topic", 1, self.onTopic),
("join", 2, self.sendChannelTopic),
("buildisupport", 1, self.buildISupport) ]
def userCommands(self):
return [ ("TOPIC", 1, UserTopic(self.ircd, self)) ]
def serverCommands(self):
return [ ("TOPIC", 1, ServerTopic(self.ircd)) ]
def verifyConfig(self, config):
if "topic_length" in config:
if not isinstance(config["topic_length"], int) or config["topic_length"] < 0:
raise ConfigValidationError("topic_length", "invalid number")
elif config["topic_length"] > 326:
config["topic_length"] = 326
self.ircd.logConfigValidationWarning("topic_length", "value is too large", 326)
def onTopic(self, channel, setter, oldTopic):
userSource = setter in self.ircd.users
if userSource:
sourceUser = self.ircd.users[setter]
conditionalTags = {}
self.ircd.runActionStandard("sendingusertags", sourceUser, conditionalTags)
for user in channel.users.iterkeys():
if user.uuid[:3] == self.ircd.serverID:
tags = {}
if userSource:
tags = user.filterConditionalTags(conditionalTags)
user.sendMessage("TOPIC", channel.topic, to=channel.name, prefix=channel.topicSetter, tags=tags)
sourceServer = None
if userSource and setter[:3] == self.ircd.serverID:
if sourceUser not in channel.users:
tags = sourceUser.filterConditionalTags(conditionalTags)
sourceUser.sendMessage("TOPIC", channel.topic, to=channel.name, prefix=channel.topicSetter, tags=tags)
elif setter != self.ircd.serverID:
sourceServer = self.ircd.servers[setter[:3]]
while sourceServer.nextClosest != self.ircd.serverID:
sourceServer = self.ircd.servers[sourceServer.nextClosest]
self.ircd.broadcastToServers(sourceServer, "TOPIC", channel.name, str(timestamp(channel.existedSince)), str(timestamp(channel.topicTime)), channel.topic, prefix=setter)
def sendChannelTopic(self, channel, user):
if not channel.topic:
user.sendMessage(irc.RPL_NOTOPIC, channel.name, "No topic is set")
else:
user.sendMessage(irc.RPL_TOPIC, channel.name, channel.topic)
user.sendMessage(irc.RPL_TOPICWHOTIME, channel.name, channel.topicSetter, str(timestamp(channel.topicTime)))
def buildISupport(self, data):
data["TOPICLEN"] = self.ircd.config.get("topic_length", 326)
class UserTopic(Command):
implements(ICommand)
def __init__(self, ircd, module):
self.ircd = ircd
self.module = module
def parseParams(self, user, params, prefix, tags):
if not params:
user.sendSingleError("TopicCmd", irc.ERR_NEEDMOREPARAMS, "TOPIC", "Not enough parameters")
return None
if params[0] not in self.ircd.channels:
user.sendSingleError("TopicCmd", irc.ERR_NOSUCHCHANNEL, params[0], "No such channel")
return None
channel = self.ircd.channels[params[0]]
if len(params) == 1:
return {
"channel": channel
}
topic = params[1][:self.ircd.config.get("topic_length", 326)]
return {
"channel": channel,
"topic": topic
}
def affectedChannels(self, user, data):
return [ data["channel"] ]
def execute(self, user, data):
if "topic" in data:
data["channel"].setTopic(data["topic"], user.uuid)
else:
self.module.sendChannelTopic(data["channel"], user)
return True
class ServerTopic(Command):
implements(ICommand)
def __init__(self, ircd):
self.ircd = ircd
def parseParams(self, server, params, prefix, tags):
if len(params) != 4:
return None
if params[0] not in self.ircd.channels:
if params[0] in self.ircd.recentlyDestroyedChannels:
return {
"lostchannel": True
}
return None
return {
"source": prefix,
"channel": self.ircd.channels[params[0]],
"chantime": datetime.utcfromtimestamp(int(params[1])),
"topictime": datetime.utcfromtimestamp(int(params[2])),
"topic": params[3]
}
def execute(self, server, data):
if "lostchannel" in data:
return True
channel = data["channel"]
if data["chantime"] > channel.existedSince: # Don't set the topic when our channel overrides
return True # Assume handled by our ignoring of it
if channel.topic and data["topictime"] <= channel.topicTime:
return True # Don't set the topic when our topic overrides
if channel.setTopic(data["topic"], data["source"]):
return True
return None
topicCommand = TopicCommand() | bsd-3-clause | -808,310,799,892,986,600 | 33.057554 | 170 | 0.71498 | false |
masaohamanaka/mbed | workspace_tools/targets.py | 1 | 59025 | """
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
CORE_LABELS = {
"ARM7TDMI-S": ["ARM7"],
"Cortex-M0" : ["M0", "CORTEX_M"],
"Cortex-M0+": ["M0P", "CORTEX_M"],
"Cortex-M1" : ["M1", "CORTEX_M"],
"Cortex-M3" : ["M3", "CORTEX_M"],
"Cortex-M4" : ["M4", "CORTEX_M"],
"Cortex-M4F" : ["M4", "CORTEX_M"],
"Cortex-M7" : ["M7", "CORTEX_M"],
"Cortex-M7F" : ["M7", "CORTEX_M"],
"Cortex-A9" : ["A9", "CORTEX_A"]
}
import os
import binascii
import struct
import shutil
from workspace_tools.patch import patch
from paths import TOOLS_BOOTLOADERS
class Target:
def __init__(self):
# ARM Core
self.core = None
# Is the disk provided by the interface chip of this board virtual?
self.is_disk_virtual = False
# list of toolchains that are supported by the mbed SDK for this target
self.supported_toolchains = None
# list of extra specific labels
self.extra_labels = []
# list of macros (-D)
self.macros = []
# Default online compiler:
self.default_toolchain = "ARM"
self.name = self.__class__.__name__
# Code used to determine devices' platform
# This code is prefix in URL link provided in mbed.htm (in mbed disk)
self.detect_code = []
def program_cycle_s(self):
return 4 if self.is_disk_virtual else 1.5
def get_labels(self):
return [self.name] + CORE_LABELS[self.core] + self.extra_labels
def init_hooks(self, hook, toolchain_name):
pass
### MCU Support ###
class CM4_UARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.supported_toolchains = ["uARM"]
self.default_toolchain = "uARM"
class CM4_ARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class CM4F_UARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.supported_toolchains = ["uARM"]
self.default_toolchain = "uARM"
class CM4F_ARM(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
### NXP ###
# This class implements the post-link patching step needed by LPC targets
class LPCTarget(Target):
def __init__(self):
Target.__init__(self)
def init_hooks(self, hook, toolchain_name):
hook.hook_add_binary("post", self.lpc_patch)
@staticmethod
def lpc_patch(t_self, resources, elf, binf):
t_self.debug("LPC Patch: %s" % os.path.split(binf)[1])
patch(binf)
class LPC11C24(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11XX_11CXX', 'LPC11CXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
class LPC1114(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11XX_11CXX', 'LPC11XX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U24(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'LPC11U24_401']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.detect_code = ["1040"]
class OC_MBUINO(LPC11U24):
def __init__(self):
LPC11U24.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.macros = ['TARGET_LPC11U24']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
class LPC11U24_301(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
class LPC11U34_421(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class MICRONFCBOARD(LPC11U34_421):
def __init__(self):
LPC11U34_421.__init__(self)
self.macros = ['LPC11U34_421', 'APPNEARME_MICRONFCBOARD']
self.extra_labels = ['NXP', 'LPC11UXX', 'APPNEARME_MICRONFCBOARD']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class LPC11U35_401(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_501(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_501_IBDAP(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class XADOW_M0(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPC11U35_Y5_MBUG(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'MCU_LPC11U35_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR" , "IAR"]
self.default_toolchain = "uARM"
class LPC11U37_501(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
class LPCCAPPUCCINO(LPC11U37_501):
def __init__(self):
LPC11U37_501.__init__(self)
class ARCH_GPRS(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX', 'LPC11U37_501']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
class LPC11U68(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC11U6X']
self.supported_toolchains = ["ARM", "uARM", "GCC_CR", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.detect_code = ["1168"]
class LPC1347(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC13XX']
self.supported_toolchains = ["ARM", "GCC_ARM","IAR"]
class LPC1549(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC15XX']
self.supported_toolchains = ["uARM", "GCC_CR", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.detect_code = ["1549"]
class LPC1768(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X', 'MBED_LPC1768']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.detect_code = ["1010"]
class ARCH_PRO(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.supported_form_factors = ["ARDUINO"]
class UBLOX_C027(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.supported_form_factors = ["ARDUINO"]
class XBED_LPC1768(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC176X', 'XBED_LPC1768']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CS", "GCC_CR", "IAR"]
self.macros = ['TARGET_LPC1768']
self.detect_code = ["1010"]
class LPC2368(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "ARM7TDMI-S"
self.extra_labels = ['NXP', 'LPC23XX']
self.supported_toolchains = ["ARM", "GCC_ARM", "GCC_CR"]
class LPC2460(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "ARM7TDMI-S"
self.extra_labels = ['NXP', 'LPC2460']
self.supported_toolchains = ["GCC_ARM"]
class LPC810(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC81X']
self.supported_toolchains = ["uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.is_disk_virtual = True
class LPC812(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC81X']
self.supported_toolchains = ["uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["1050"]
class LPC824(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC82X']
self.supported_toolchains = ["uARM", "GCC_ARM","GCC_CR", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class SSCI824(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['NXP', 'LPC82X']
self.supported_toolchains = ["uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
self.is_disk_virtual = True
class LPC4088(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC408X']
self.supported_toolchains = ["ARM", "GCC_CR", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
if not os.path.isdir(binf):
# Regular binary file, nothing to do
LPCTarget.lpc_patch(t_self, resources, elf, binf)
return
outbin = open(binf + ".temp", "wb")
partf = open(os.path.join(binf, "ER_IROM1"), "rb")
# Pad the fist part (internal flash) with 0xFF to 512k
data = partf.read()
outbin.write(data)
outbin.write('\xFF' * (512*1024 - len(data)))
partf.close()
# Read and append the second part (external flash) in chunks of fixed size
chunksize = 128 * 1024
partf = open(os.path.join(binf, "ER_IROM2"), "rb")
while True:
data = partf.read(chunksize)
outbin.write(data)
if len(data) < chunksize:
break
partf.close()
outbin.close()
# Remove the directory with the binary parts and rename the temporary
# file to 'binf'
shutil.rmtree(binf, True)
os.rename(binf + '.temp', binf)
t_self.debug("Generated custom binary file (internal flash + SPIFI)")
LPCTarget.lpc_patch(t_self, resources, elf, binf)
class LPC4088_DM(LPC4088):
pass
class LPC4330_M4(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4330']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR", "GCC_ARM"]
class LPC4330_M0(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4330']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR"]
class LPC4337(LPCTarget):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['NXP', 'LPC43XX', 'LPC4337']
self.supported_toolchains = ["ARM"]
class LPC1800(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['NXP', 'LPC43XX']
self.supported_toolchains = ["ARM", "GCC_CR", "IAR"]
class LPC11U37H_401(LPCTarget):
def __init__(self):
LPCTarget.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['NXP', 'LPC11UXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "GCC_CR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
### Freescale ###
class KL05Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL25Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM", "GCC_CW_EWL", "GCC_CW_NEWLIB", "GCC_ARM","IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0200"]
class KL26Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["ARM","GCC_ARM","IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL43Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["GCC_ARM", "ARM"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
class KL46Z(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Freescale', 'KLXX']
self.supported_toolchains = ["GCC_ARM", "ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0220"]
class K20D50M(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['Freescale', 'K20XX']
self.supported_toolchains = ["GCC_ARM", "ARM", "IAR"]
self.is_disk_virtual = True
self.detect_code = ["0230"]
class TEENSY3_1(Target):
OUTPUT_EXT = 'hex'
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['Freescale', 'K20XX', 'K20DX256']
self.supported_toolchains = ["GCC_ARM", "ARM"]
self.is_disk_virtual = True
self.detect_code = ["0230"]
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO', 'GCC_ARM']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
from intelhex import IntelHex
binh = IntelHex()
binh.loadbin(binf, offset = 0)
with open(binf.replace(".bin", ".hex"), "w") as f:
binh.tofile(f, format='hex')
class K22F(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE']
self.macros = ["CPU_MK22FN512VLH12", "FSL_RTOS_MBED"]
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.detect_code = ["0201"]
class K64F(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE', 'MCU_K64F', 'FRDM']
self.macros = ["CPU_MK64FN1M0VMD12", "FSL_RTOS_MBED"]
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.supported_form_factors = ["ARDUINO"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
self.detect_code = ["0240"]
class MTS_GAMBIT(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Freescale', 'KPSDK_MCUS', 'KPSDK_CODE', 'MCU_K64F']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.macros = ["CPU_MK64FN1M0VMD12", "FSL_RTOS_MBED", "TARGET_K64F"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
### STMicro ###
class NUCLEO_F030R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F030R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0725"]
class NUCLEO_F070RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F070RB']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0755"]
class NUCLEO_F072RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F072RB']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0730"]
class NUCLEO_F091RC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F091RC']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0750"]
class NUCLEO_F103RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32F1', 'STM32F103RB']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0700"]
class NUCLEO_F302R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F302R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0705"]
class NUCLEO_F303RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F303RE']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0745"]
class NUCLEO_F334R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F334R8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0735"]
class NUCLEO_F401RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F401RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0720"]
class NUCLEO_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0740"]
class NUCLEO_F446RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F446RE']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0777"]
class NUCLEO_L053R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L053R8']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0715"]
class NUCLEO_L073RZ(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L073RZ']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0760"]
class NUCLEO_L152RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L152RE']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO", "MORPHO"]
self.detect_code = ["0710"]
class STM32F3XX(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4"
self.extra_labels = ['STM', 'STM32F3XX']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
class STM32F407(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F4XX']
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
class ARCH_MAX(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F407', 'STM32F407VG']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
self.macros = ['LSI_VALUE=32000']
def program_cycle_s(self):
return 2
class DISCO_F051R8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['STM', 'STM32F0', 'STM32F051', 'STM32F051R8']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F100RB(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32F1', 'STM32F100RB']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F303VC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F303', 'STM32F303VC']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F334C8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F3', 'STM32F334C8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.detect_code = ["0810"]
class DISCO_F407VG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F407', 'STM32F407VG']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
class DISCO_F429ZI(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F429', 'STM32F429ZI']
self.supported_toolchains = ["GCC_ARM", "IAR"]
self.default_toolchain = "GCC_ARM"
class DISCO_L053C8(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['STM', 'STM32L0', 'STM32L053C8']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
class DISCO_F746NG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M7"
self.extra_labels = ['STM', 'STM32F7', 'STM32F746', 'STM32F746NG']
self.supported_toolchains = ["ARM", "uARM", "IAR"]
self.default_toolchain = "uARM"
self.detect_code = ["0815"]
class DISCO_L476VG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32L4', 'STM32L476VG']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.detect_code = ["0820"]
class MTS_MDOT_F405RG(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F405RG']
self.macros = ['HSE_VALUE=26000000', 'OS_CLOCK=48000000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
self.default_toolchain = "ARM"
class MTS_MDOT_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.macros = ['HSE_VALUE=26000000', 'OS_CLOCK=96000000', 'USE_PLL_HSE_EXTC=0', 'VECT_TAB_OFFSET=0x00010000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "ARM"
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['GCC_ARM', 'ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.combine_bins)
# combine application binary with bootloader
# bootloader + padding to 64kB + application + md5sum (16 bytes)
@staticmethod
def combine_bins(t_self, resources, elf, binf):
loader = os.path.join(TOOLS_BOOTLOADERS, "MTS_MDOT_F411RE", "bootloader.bin")
target = binf + ".tmp"
if not os.path.exists(loader):
print "Can't find bootloader binary: " + loader
return
outbin = open(target, 'w+b')
part = open(loader, 'rb')
data = part.read()
outbin.write(data)
outbin.write('\xFF' * (64*1024 - len(data)))
part.close()
part = open(binf, 'rb')
data = part.read()
outbin.write(data)
part.close()
outbin.seek(0, 0)
data = outbin.read()
outbin.seek(0, 1)
crc = struct.pack('<I', binascii.crc32(data) & 0xFFFFFFFF)
outbin.write(crc)
outbin.close()
os.remove(binf)
os.rename(target, binf)
class MTS_DRAGONFLY_F411RE(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F411RE']
self.macros = ['HSE_VALUE=26000000', 'VECT_TAB_OFFSET=0x08010000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "ARM"
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['GCC_ARM', 'ARM_STD', 'ARM_MICRO']:
hook.hook_add_binary("post", self.combine_bins)
# combine application binary with bootloader
# bootloader + padding to 64kB + application + md5sum (16 bytes)
@staticmethod
def combine_bins(t_self, resources, elf, binf):
loader = os.path.join(TOOLS_BOOTLOADERS, "MTS_DRAGONFLY_F411RE", "bootloader.bin")
target = binf + ".tmp"
if not os.path.exists(loader):
print "Can't find bootloader binary: " + loader
return
outbin = open(target, 'w+b')
part = open(loader, 'rb')
data = part.read()
outbin.write(data)
outbin.write('\xFF' * (64*1024 - len(data)))
part.close()
part = open(binf, 'rb')
data = part.read()
outbin.write(data)
part.close()
outbin.seek(0, 0)
data = outbin.read()
outbin.seek(0, 1)
crc = struct.pack('<I', binascii.crc32(data) & 0xFFFFFFFF)
outbin.write(crc)
outbin.close()
os.remove(binf)
os.rename(target, binf)
class MOTE_L152RC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L152RC']
self.supported_toolchains = ["ARM", "uARM", "IAR", "GCC_ARM"]
self.default_toolchain = "uARM"
self.detect_code = ["4100"]
class DISCO_F401VC(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F401', 'STM32F401VC']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "GCC_ARM"
class UBLOX_C029(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['STM', 'STM32F4', 'STM32F439', 'STM32F439ZI']
self.macros = ['HSE_VALUE=24000000', 'HSE_STARTUP_TIMEOUT=5000']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM", "IAR"]
self.default_toolchain = "uARM"
self.supported_form_factors = ["ARDUINO"]
class NZ32SC151(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['STM', 'STM32L1', 'STM32L151RC']
self.supported_toolchains = ["ARM", "uARM", "GCC_ARM"]
self.default_toolchain = "uARM"
# After flashing device, how long to delay until we assume program is running
def program_cycle_s(self):
return 1.5
### Nordic ###
class MCU_NRF51(Target):
# the following is a list of possible Nordic softdevices in decreasing order
# of preference.
EXPECTED_SOFTDEVICES_WITH_OFFSETS = [
{
'name' : 's130_nrf51_1.0.0_softdevice.hex',
'boot' : 's130_nrf51_1.0.0_bootloader.hex',
'offset' : 0x1C000
},
{
'name' : 's110_nrf51822_8.0.0_softdevice.hex',
'boot' : 's110_nrf51822_8.0.0_bootloader.hex',
'offset' : 0x18000
},
{
'name' : 's110_nrf51822_7.1.0_softdevice.hex',
'boot' : 's110_nrf51822_7.1.0_bootloader.hex',
'offset' : 0x16000
},
{
'name' : 's110_nrf51822_7.0.0_softdevice.hex',
'boot' : 's110_nrf51822_7.0.0_bootloader.hex',
'offset' : 0x16000
},
{
'name' : 's110_nrf51822_6.0.0_softdevice.hex',
'boot' : 's110_nrf51822_6.0.0_bootloader.hex',
'offset' : 0x14000
}
]
OVERRIDE_BOOTLOADER_FILENAME = "nrf51822_bootloader.hex"
OUTPUT_EXT = 'hex'
MERGE_SOFT_DEVICE = True
MERGE_BOOTLOADER = False
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ["NORDIC", "MCU_NRF51", "MCU_NRF51822"]
self.macros = ['NRF51', 'TARGET_NRF51822']
self.supported_toolchains = ["ARM", "GCC_ARM", "IAR"]
self.is_disk_virtual = True
self.detect_code = ["1070"]
def program_cycle_s(self):
return 6
def init_hooks(self, hook, toolchain_name):
if toolchain_name in ['ARM_STD', 'ARM_MICRO', 'GCC_ARM', 'IAR']:
hook.hook_add_binary("post", self.binary_hook)
@staticmethod
def binary_hook(t_self, resources, elf, binf):
# Scan to find the actual paths of soft device
sdf = None
for softdeviceAndOffsetEntry in t_self.target.EXPECTED_SOFTDEVICES_WITH_OFFSETS:
for hexf in resources.hex_files:
if hexf.find(softdeviceAndOffsetEntry['name']) != -1:
t_self.debug("SoftDevice file found %s." % softdeviceAndOffsetEntry['name'])
sdf = hexf
if sdf is not None: break
if sdf is not None: break
if sdf is None:
t_self.debug("Hex file not found. Aborting.")
return
# Look for bootloader file that matches this soft device or bootloader override image
blf = None
if t_self.target.MERGE_BOOTLOADER is True:
for hexf in resources.hex_files:
if hexf.find(t_self.target.OVERRIDE_BOOTLOADER_FILENAME) != -1:
t_self.debug("Bootloader file found %s." % t_self.target.OVERRIDE_BOOTLOADER_FILENAME)
blf = hexf
break
elif hexf.find(softdeviceAndOffsetEntry['boot']) != -1:
t_self.debug("Bootloader file found %s." % softdeviceAndOffsetEntry['boot'])
blf = hexf
break
# Merge user code with softdevice
from intelhex import IntelHex
binh = IntelHex()
binh.loadbin(binf, offset=softdeviceAndOffsetEntry['offset'])
if t_self.target.MERGE_SOFT_DEVICE is True:
t_self.debug("Merge SoftDevice file %s" % softdeviceAndOffsetEntry['name'])
sdh = IntelHex(sdf)
binh.merge(sdh)
if t_self.target.MERGE_BOOTLOADER is True and blf is not None:
t_self.debug("Merge BootLoader file %s" % blf)
blh = IntelHex(blf)
binh.merge(blh)
with open(binf.replace(".bin", ".hex"), "w") as f:
binh.tofile(f, format='hex')
# 16KB Nordic targets are tight on SRAM using S130 (default) so we
# introduce two possible options:
# 1) Use S130 (default) - for this derive from MCU_NRF51_16K
# 2) Use S110 - for this derive from MCU_NRF51_16K_S110
# Note that the 'default' option will track the default choice
# for other Nordic targets, and so can take advantage of other
# future SoftDevice improvements
# The *_BASE targets should *not* be inherited from, as they do not
# specify enough for building a target
# 16KB MCU version, e.g. Nordic nRF51822, Seeed Arch BLE, etc.
class MCU_NRF51_16K_BASE(MCU_NRF51):
def __init__(self):
MCU_NRF51.__init__(self)
self.extra_labels += ['MCU_NORDIC_16K', 'MCU_NRF51_16K']
self.macros += ['TARGET_MCU_NORDIC_16K', 'TARGET_MCU_NRF51_16K']
# derivative class used to create softdevice+bootloader enabled images
class MCU_NRF51_16K_BOOT_BASE(MCU_NRF51_16K_BASE):
def __init__(self):
MCU_NRF51_16K_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_BOOT']
self.macros += ['TARGET_MCU_NRF51_16K_BOOT', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = True
self.MERGE_BOOTLOADER = True
# derivative class used to create program only images for use with FOTA
class MCU_NRF51_16K_OTA_BASE(MCU_NRF51_16K_BASE):
def __init__(self):
MCU_NRF51_16K_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_OTA']
self.macros += ['TARGET_MCU_NRF51_16K_OTA', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = False
class MCU_NRF51_16K(MCU_NRF51_16K_BASE):
def __init__(self):
MCU_NRF51_16K_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S130']
self.macros += ['TARGET_MCU_NRF51_16K_S130']
class MCU_NRF51_16K_S110(MCU_NRF51_16K_BASE):
def __init__(self):
MCU_NRF51_16K_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S110']
self.macros += ['TARGET_MCU_NRF51_16K_S110']
class MCU_NRF51_16K_BOOT(MCU_NRF51_16K_BOOT_BASE):
def __init__(self):
MCU_NRF51_16K_BOOT_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S130']
self.macros += ['TARGET_MCU_NRF51_16K_S130']
class MCU_NRF51_16K_BOOT_S110(MCU_NRF51_16K_BOOT_BASE):
def __init__(self):
MCU_NRF51_16K_BOOT_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S110']
self.macros += ['TARGET_MCU_NRF51_16K_S110']
class MCU_NRF51_16K_OTA(MCU_NRF51_16K_OTA_BASE):
def __init__(self):
MCU_NRF51_16K_OTA_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S130']
self.macros += ['TARGET_MCU_NRF51_16K_S130']
class MCU_NRF51_16K_OTA_S110(MCU_NRF51_16K_OTA_BASE):
def __init__(self):
MCU_NRF51_16K_OTA_BASE.__init__(self)
self.extra_labels += ['MCU_NRF51_16K_S110']
self.macros += ['TARGET_MCU_NRF51_16K_S110']
# 32KB MCU version, e.g. Nordic nRF51-DK, nRF51-Dongle, etc.
class MCU_NRF51_32K(MCU_NRF51):
def __init__(self):
MCU_NRF51.__init__(self)
self.extra_labels += ['MCU_NORDIC_32K', 'MCU_NRF51_32K']
self.macros += ['TARGET_MCU_NORDIC_32K', 'TARGET_MCU_NRF51_32K']
class MCU_NRF51_32K_BOOT(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
self.extra_labels += ['MCU_NRF51_32K_BOOT']
self.macros += ['TARGET_MCU_NRF51_32K_BOOT', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = True
self.MERGE_BOOTLOADER = True
class MCU_NRF51_32K_OTA(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
self.extra_labels += ['MCU_NRF51_32K_OTA']
self.macros += ['TARGET_MCU_NRF51_32K_OTA', 'TARGET_OTA_ENABLED']
self.MERGE_SOFT_DEVICE = False
#
# nRF51 based development kits
#
# This one is special for legacy reasons
class NRF51822(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.extra_labels += ['NRF51822', 'NRF51822_MKIT']
self.macros += ['TARGET_NRF51822_MKIT']
class NRF51822_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['NRF51822', 'NRF51822_MKIT']
self.macros += ['TARGET_NRF51822_MKIT']
class NRF51822_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['NRF51822', 'NRF51822_MKIT']
self.macros += ['TARGET_NRF51822_MKIT']
class ARCH_BLE(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.supported_form_factors = ["ARDUINO"]
class ARCH_BLE_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['ARCH_BLE']
self.macros += ['TARGET_ARCH_BLE']
self.supported_form_factors = ["ARDUINO"]
class ARCH_BLE_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['ARCH_BLE']
self.macros += ['TARGET_ARCH_BLE']
self.supported_form_factors = ["ARDUINO"]
class ARCH_LINK(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.extra_labels += ['ARCH_BLE']
self.macros += ['TARGET_ARCH_BLE']
self.supported_form_factors = ["ARDUINO"]
class ARCH_LINK_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['ARCH_BLE', 'ARCH_LINK']
self.macros += ['TARGET_ARCH_BLE', 'TARGET_ARCH_LINK']
self.supported_form_factors = ["ARDUINO"]
class ARCH_LINK_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['ARCH_BLE', 'ARCH_LINK']
self.macros += ['TARGET_ARCH_BLE', 'TARGET_ARCH_LINK']
self.supported_form_factors = ["ARDUINO"]
class SEEED_TINY_BLE(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
class SEEED_TINY_BLE_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['SEEED_TINY_BLE']
self.macros += ['TARGET_SEEED_TINY_BLE']
class SEEED_TINY_BLE_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['SEEED_TINY_BLE']
self.macros += ['TARGET_SEEED_TINY_BLE']
class HRM1017(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.macros += ['TARGET_NRF_LFCLK_RC']
class HRM1017_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['HRM1017']
self.macros += ['TARGET_HRM1017', 'TARGET_NRF_LFCLK_RC']
class HRM1017_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['HRM1017']
self.macros += ['TARGET_HRM1017', 'TARGET_NRF_LFCLK_RC']
class RBLAB_NRF51822(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.supported_form_factors = ["ARDUINO"]
class RBLAB_NRF51822_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['RBLAB_NRF51822']
self.macros += ['TARGET_RBLAB_NRF51822']
self.supported_form_factors = ["ARDUINO"]
class RBLAB_NRF51822_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['RBLAB_NRF51822']
self.macros += ['TARGET_RBLAB_NRF51822']
self.supported_form_factors = ["ARDUINO"]
class RBLAB_BLENANO(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
class RBLAB_BLENANO_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['RBLAB_BLENANO']
self.macros += ['TARGET_RBLAB_BLENANO']
class RBLAB_BLENANO_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['RBLAB_BLENANO']
self.macros += ['TARGET_RBLAB_BLENANO']
class NRF51822_Y5_MBUG(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
class WALLBOT_BLE(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
class WALLBOT_BLE_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['WALLBOT_BLE']
self.macros += ['TARGET_WALLBOT_BLE']
class WALLBOT_BLE_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['WALLBOT_BLE']
self.macros += ['TARGET_WALLBOT_BLE']
class DELTA_DFCM_NNN40(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.macros += ['TARGET_NRF_LFCLK_RC']
def program_cycle_s(self):
return 10
class DELTA_DFCM_NNN40_BOOT(MCU_NRF51_32K_BOOT):
def __init__(self):
MCU_NRF51_32K_BOOT.__init__(self)
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.extra_labels += ['DELTA_DFCM_NNN40']
self.macros += ['TARGET_DELTA_DFCM_NNN40', 'TARGET_NRF_LFCLK_RC']
def program_cycle_s(self):
return 10
class DELTA_DFCM_NNN40_OTA(MCU_NRF51_32K_OTA):
def __init__(self):
MCU_NRF51_32K_OTA.__init__(self)
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.extra_labels += ['DELTA_DFCM_NNN40']
self.macros += ['TARGET_DELTA_DFCM_NNN40', 'TARGET_NRF_LFCLK_RC']
def program_cycle_s(self):
return 10
class NRF51_DK(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
class NRF51_DK_BOOT(MCU_NRF51_32K_BOOT):
def __init__(self):
MCU_NRF51_32K_BOOT.__init__(self)
self.extra_labels = ['NRF51_DK']
self.macros += ['TARGET_NRF51_DK']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
class NRF51_DK_OTA(MCU_NRF51_32K_OTA):
def __init__(self):
MCU_NRF51_32K_OTA.__init__(self)
self.extra_labels = ['NRF51_DK']
self.macros += ['TARGET_NRF51_DK']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
class NRF51_DONGLE(MCU_NRF51_32K):
def __init__(self):
MCU_NRF51_32K.__init__(self)
class NRF51_DONGLE_BOOT(MCU_NRF51_32K_BOOT):
def __init__(self):
MCU_NRF51_32K_BOOT.__init__(self)
self.extra_labels = ['NRF51_DONGLE']
self.macros += ['TARGET_NRF51_DONGLE']
class NRF51_DONGLE_OTA(MCU_NRF51_32K_OTA):
def __init__(self):
MCU_NRF51_32K_OTA.__init__(self)
self.extra_labels = ['NRF51_DONGLE']
self.macros += ['TARGET_NRF51_DONGLE']
class NRF51_MICROBIT(MCU_NRF51_16K_S110):
def __init__(self):
MCU_NRF51_16K_S110.__init__(self)
self.EXPECTED_SOFTDEVICES_WITH_OFFSETS = [
{
'name' : 's110_nrf51822_8.0.0_softdevice.hex',
'boot' : 's110_nrf51822_8.0.0_bootloader.hex',
'offset' : 0x18000
},
{
'name' : 's110_nrf51822_7.1.0_softdevice.hex',
'boot' : 's110_nrf51822_7.1.0_bootloader.hex',
'offset' : 0x16000
}
]
self.macros += ['TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_BOOT(MCU_NRF51_16K_BOOT_S110):
def __init__(self):
MCU_NRF51_16K_BOOT_S110.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_OTA(MCU_NRF51_16K_OTA_S110):
def __init__(self):
MCU_NRF51_16K_OTA_S110.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_B(MCU_NRF51_16K):
def __init__(self):
MCU_NRF51_16K.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_B_BOOT(MCU_NRF51_16K_BOOT):
def __init__(self):
MCU_NRF51_16K_BOOT.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
class NRF51_MICROBIT_B_OTA(MCU_NRF51_16K_OTA):
def __init__(self):
MCU_NRF51_16K_OTA.__init__(self)
self.extra_labels += ['NRF51_MICROBIT']
self.macros += ['TARGET_NRF51_MICROBIT', 'TARGET_NRF_LFCLK_RC']
### ARM ###
class ARM_MPS2_Target(Target):
def __init__(self):
Target.__init__(self)
self.OUTPUT_EXT = 'axf'
def init_hooks(self, hook, toolchain_name):
hook.hook_add_binary("replace", self.output_axf)
@staticmethod
def output_axf(t_self, resources, elf, bin):
shutil.copy(elf, bin)
t_self.debug("Passing ELF file %s" % bin)
class ARM_MPS2_M0(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M0']
self.macros = ['CMSDK_CM0']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M0P(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M0P']
self.macros = ['CMSDK_CM0plus']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M1(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M1"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M1']
self.macros = ['CMSDK_CM1']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M3(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M3']
self.macros = ['CMSDK_CM3']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M4(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M4']
self.macros = ['CMSDK_CM4']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2_M7(ARM_MPS2_Target):
def __init__(self):
ARM_MPS2_Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['ARM_SSG', 'MPS2', 'MPS2_M7']
self.macros = ['CMSDK_CM7']
self.supported_toolchains = ["ARM"]
self.default_toolchain = "ARM"
class ARM_MPS2(ARM_MPS2_M4):
pass
### Renesas ###
class RZ_A1H(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-A9"
self.extra_labels = ['RENESAS', 'MBRZA1H']
self.supported_toolchains = ["ARM", "GCC_ARM"]
self.supported_form_factors = ["ARDUINO"]
self.default_toolchain = "ARM"
def program_cycle_s(self):
return 2
### Maxim Integrated ###
class MAXWSNENV(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Maxim', 'MAX32610']
self.macros = ['__SYSTEM_HFX=24000000']
self.supported_toolchains = ["GCC_ARM", "IAR", "ARM"]
self.default_toolchain = "ARM"
class MAX32600MBED(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Maxim', 'MAX32600']
self.macros = ['__SYSTEM_HFX=24000000']
self.supported_toolchains = ["GCC_ARM", "IAR", "ARM"]
self.default_toolchain = "ARM"
### Silicon Labs ###
class EFM32GG_STK3700(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32GG990F1024']
self.supported_toolchains = ["GCC_ARM", "ARM", "uARM"]
self.default_toolchain = "ARM"
class EFM32LG_STK3600(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M3"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32LG990F256']
self.supported_toolchains = ["GCC_ARM", "ARM", "uARM"]
self.default_toolchain = "ARM"
class EFM32WG_STK3800(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M4F"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32WG990F256']
self.supported_toolchains = ["GCC_ARM", "ARM", "uARM"]
self.default_toolchain = "ARM"
class EFM32ZG_STK3200(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32ZG222F32']
self.supported_toolchains = ["GCC_ARM", "uARM"]
self.default_toolchain = "uARM"
class EFM32HG_STK3400(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Silicon_Labs', 'EFM32']
self.macros = ['EFM32HG322F64']
self.supported_toolchains = ["GCC_ARM", "uARM"]
self.default_toolchain = "uARM"
##WIZnet
class WIZWIKI_W7500(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0"
self.extra_labels = ['WIZNET', 'W7500x', 'WIZwiki_W7500']
self.supported_toolchains = ["uARM", "ARM"]
self.default_toolchain = "ARM"
self.supported_form_factors = ["ARDUINO"]
class SAMR21G18A(Target):
def __init__(self):
Target.__init__(self)
self.core = "Cortex-M0+"
self.extra_labels = ['Atmel', 'SAM_CortexM0+', 'SAMR21']
self.macros = ['__SAMR21G18A__', 'I2C_MASTER_CALLBACK_MODE=true', 'EXTINT_CALLBACK_MODE=true', 'USART_CALLBACK_MODE=true', 'TC_ASYNC=true']
self.supported_toolchains = ["GCC_ARM"]
self.default_toolchain = "GCC_ARM"
# Get a single instance for each target
TARGETS = [
### NXP ###
LPC11C24(),
LPC11U24(),
OC_MBUINO(), # LPC11U24
LPC11U24_301(),
LPC11U34_421(),
MICRONFCBOARD(), # LPC11U34_421
LPC11U35_401(),
LPC11U35_501(), # LPC11U35_501
LPC11U35_501_IBDAP(), # LPC11U35_501
XADOW_M0(), # LPC11U35_501
LPC11U35_Y5_MBUG(), # LPC11U35_501
LPC11U37_501(),
LPCCAPPUCCINO(), # LPC11U37_501
ARCH_GPRS(), # LPC11U37_501
LPC11U68(),
LPC1114(),
LPC1347(),
LPC1549(),
LPC1768(), # LPC1768
ARCH_PRO(), # LPC1768
UBLOX_C027(), # LPC1768
XBED_LPC1768(), # LPC1768
LPC2368(),
LPC2460(),
LPC810(),
LPC812(),
LPC824(),
SSCI824(), # LPC824
LPC4088(),
LPC4088_DM(),
LPC4330_M4(),
LPC4330_M0(),
LPC4337(),
LPC11U37H_401(),
### Freescale ###
KL05Z(),
KL25Z(),
KL26Z(),
KL43Z(),
KL46Z(),
K20D50M(),
TEENSY3_1(),
K22F(),
K64F(),
MTS_GAMBIT(), # FRDM K64F
### STMicro ###
NUCLEO_F030R8(),
NUCLEO_F070RB(),
NUCLEO_F072RB(),
NUCLEO_F091RC(),
NUCLEO_F103RB(),
NUCLEO_F302R8(),
NUCLEO_F303RE(),
NUCLEO_F334R8(),
NUCLEO_F401RE(),
NUCLEO_F411RE(),
NUCLEO_F446RE(),
NUCLEO_L053R8(),
NUCLEO_L073RZ(),
NUCLEO_L152RE(),
STM32F3XX(),
STM32F407(),
DISCO_F051R8(),
DISCO_F100RB(),
DISCO_F303VC(),
DISCO_F334C8(),
DISCO_F746NG(),
DISCO_F407VG(), # STM32F407
ARCH_MAX(), # STM32F407
DISCO_F429ZI(),
DISCO_L053C8(),
DISCO_L476VG(),
MTS_MDOT_F405RG(),
MTS_MDOT_F411RE(),
MOTE_L152RC(),
MTS_DRAGONFLY_F411RE(),
DISCO_F401VC(),
UBLOX_C029(), # STM32F439
NZ32SC151(), # STM32L151
### Nordic ###
NRF51822(), # nRF51_16K
NRF51822_BOOT(), # nRF51_16K
NRF51822_OTA(), # nRF51_16K
ARCH_BLE(), # nRF51_16K
ARCH_BLE_BOOT(), # nRF51_16K
ARCH_BLE_OTA(), # nRF51_16K
ARCH_LINK(), # nRF51_16K
ARCH_LINK_BOOT(), # nRF51_16K
ARCH_LINK_OTA(), # nRF51_16K
SEEED_TINY_BLE(), # nRF51_16K
SEEED_TINY_BLE_BOOT(), # nRF51_16K
SEEED_TINY_BLE_OTA(), # nRF51_16K
HRM1017(), # nRF51_16K
HRM1017_BOOT(), # nRF51_16K
HRM1017_OTA(), # nRF51_16K
RBLAB_NRF51822(), # nRF51_16K
RBLAB_NRF51822_BOOT(), # nRF51_16K
RBLAB_NRF51822_OTA(), # nRF51_16K
RBLAB_BLENANO(), # nRF51_16K
RBLAB_BLENANO_BOOT(), # nRF51_16K
RBLAB_BLENANO_OTA(), # nRF51_16K
NRF51822_Y5_MBUG(), # nRF51_16K
WALLBOT_BLE(), # nRF51_16K
WALLBOT_BLE_BOOT(), # nRF51_16K
WALLBOT_BLE_OTA(), # nRF51_16K
DELTA_DFCM_NNN40(), # nRF51_16K
DELTA_DFCM_NNN40_BOOT(),# nRF51_16K
DELTA_DFCM_NNN40_OTA(), # nRF51_16K
NRF51_DK(), # nRF51_32K
NRF51_DK_BOOT(), # nRF51_32K
NRF51_DK_OTA(), # nRF51_32K
NRF51_DONGLE(), # nRF51_32K
NRF51_DONGLE_BOOT(), # nRF51_32K
NRF51_DONGLE_OTA(), # nRF51_32K
NRF51_MICROBIT(), # nRF51_16K - S110
NRF51_MICROBIT_B(), # nRF51_16K - default
### ARM ###
ARM_MPS2_M0(),
ARM_MPS2_M0P(),
ARM_MPS2_M1(),
ARM_MPS2_M3(),
ARM_MPS2_M4(),
ARM_MPS2_M7(),
ARM_MPS2(),
### Renesas ###
RZ_A1H(),
### Maxim Integrated ###
MAXWSNENV(),
MAX32600MBED(),
### Silicon Labs ###
EFM32GG_STK3700(),
EFM32LG_STK3600(),
EFM32WG_STK3800(),
EFM32ZG_STK3200(),
EFM32HG_STK3400(),
### WIZnet ###
WIZWIKI_W7500(),
SAMR21G18A(),
]
# Map each target name to its unique instance
TARGET_MAP = {}
for t in TARGETS:
TARGET_MAP[t.name] = t
TARGET_NAMES = TARGET_MAP.keys()
# Some targets with different name have the same exporters
EXPORT_MAP = { }
# Detection APIs
def get_target_detect_codes():
""" Returns dictionary mapping detect_code -> platform_name
"""
result = {}
for target in TARGETS:
for detect_code in target.detect_code:
result[detect_code] = target.name
return result
| apache-2.0 | 8,006,120,062,554,410,000 | 32.902929 | 147 | 0.573469 | false |
myarjunar/QGIS | tests/src/python/test_qgsproject.py | 1 | 5476 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsProject.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import chr
from builtins import range
__author__ = 'Sebastian Dietrich'
__date__ = '19/11/2015'
__copyright__ = 'Copyright 2015, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import qgis # NOQA
from qgis.core import QgsProject, QgsApplication, QgsUnitTypes, QgsCoordinateReferenceSystem
from qgis.testing import start_app, unittest
from utilities import (unitTestDataPath)
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsProject(unittest.TestCase):
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
self.messageCaught = False
def test_makeKeyTokens_(self):
# see http://www.w3.org/TR/REC-xml/#d0e804 for a list of valid characters
invalidTokens = []
validTokens = []
# all test tokens will be generated by prepending or inserting characters to this token
validBase = "valid"
# some invalid characters, not allowed anywhere in a token
# note that '/' must not be added here because it is taken as a separator by makeKeyTokens_()
invalidChars = "+*,;<>|!$%()=?#\x01"
# generate the characters that are allowed at the start of a token (and at every other position)
validStartChars = ":_"
charRanges = [
(ord('a'), ord('z')),
(ord('A'), ord('Z')),
(0x00F8, 0x02FF),
(0x0370, 0x037D),
(0x037F, 0x1FFF),
(0x200C, 0x200D),
(0x2070, 0x218F),
(0x2C00, 0x2FEF),
(0x3001, 0xD7FF),
(0xF900, 0xFDCF),
(0xFDF0, 0xFFFD),
# (0x10000, 0xEFFFF), while actually valid, these are not yet accepted by makeKeyTokens_()
]
for r in charRanges:
for c in range(r[0], r[1]):
validStartChars += chr(c)
# generate the characters that are only allowed inside a token, not at the start
validInlineChars = "-.\xB7"
charRanges = [
(ord('0'), ord('9')),
(0x0300, 0x036F),
(0x203F, 0x2040),
]
for r in charRanges:
for c in range(r[0], r[1]):
validInlineChars += chr(c)
# test forbidden start characters
for c in invalidChars + validInlineChars:
invalidTokens.append(c + validBase)
# test forbidden inline characters
for c in invalidChars:
invalidTokens.append(validBase[:4] + c + validBase[4:])
# test each allowed start character
for c in validStartChars:
validTokens.append(c + validBase)
# test each allowed inline character
for c in validInlineChars:
validTokens.append(validBase[:4] + c + validBase[4:])
logger = QgsApplication.messageLog()
logger.messageReceived.connect(self.catchMessage)
prj = QgsProject.instance()
for token in validTokens:
self.messageCaught = False
prj.readEntry("test", token)
myMessage = "valid token '%s' not accepted" % (token)
assert not self.messageCaught, myMessage
for token in invalidTokens:
self.messageCaught = False
prj.readEntry("test", token)
myMessage = "invalid token '%s' accepted" % (token)
assert self.messageCaught, myMessage
logger.messageReceived.disconnect(self.catchMessage)
def catchMessage(self):
self.messageCaught = True
def testCrs(self):
prj = QgsProject.instance()
prj.clear()
self.assertFalse(prj.crs().isValid())
prj.setCrs(QgsCoordinateReferenceSystem.fromOgcWmsCrs('EPSG:3111'))
self.assertEqual(prj.crs().authid(), 'EPSG:3111')
def testEllipsoid(self):
prj = QgsProject.instance()
prj.clear()
prj.setCrs(QgsCoordinateReferenceSystem.fromOgcWmsCrs('EPSG:3111'))
prj.setEllipsoid('WGS84')
self.assertEqual(prj.ellipsoid(), 'WGS84')
# if project has NO crs, then ellipsoid should always be none
prj.setCrs(QgsCoordinateReferenceSystem())
self.assertEqual(prj.ellipsoid(), 'NONE')
def testDistanceUnits(self):
prj = QgsProject.instance()
prj.clear()
prj.setDistanceUnits(QgsUnitTypes.DistanceFeet)
self.assertEqual(prj.distanceUnits(), QgsUnitTypes.DistanceFeet)
def testAreaUnits(self):
prj = QgsProject.instance()
prj.clear()
prj.setAreaUnits(QgsUnitTypes.AreaSquareFeet)
self.assertEqual(prj.areaUnits(), QgsUnitTypes.AreaSquareFeet)
def testReadEntry(self):
prj = QgsProject.instance()
prj.read(os.path.join(TEST_DATA_DIR, 'labeling/test-labeling.qgs'))
# valid key, valid int value
self.assertEqual(prj.readNumEntry("SpatialRefSys", "/ProjectionsEnabled", -1)[0], 0)
# invalid key
self.assertEqual(prj.readNumEntry("SpatialRefSys", "/InvalidKey", -1)[0], -1)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 3,873,158,816,957,129,700 | 32.595092 | 104 | 0.620161 | false |
eirnym/aiopg | examples/notify.py | 1 | 1061 | import asyncio
import aiopg
dsn = 'dbname=aiopg user=aiopg password=passwd host=127.0.0.1'
async def notify(conn):
async with conn.cursor() as cur:
for i in range(5):
msg = "message {}".format(i)
print('Send ->', msg)
await cur.execute("NOTIFY channel, '{}'".format(msg))
await cur.execute("NOTIFY channel, 'finish'")
async def listen(conn):
async with conn.cursor() as cur:
await cur.execute("LISTEN channel")
while True:
msg = await conn.notifies.get()
if msg.payload == 'finish':
return
else:
print('Receive <-', msg.payload)
async def main():
async with aiopg.create_pool(dsn) as pool:
async with pool.acquire() as conn1:
listener = listen(conn1)
async with pool.acquire() as conn2:
notifier = notify(conn2)
await asyncio.gather(listener, notifier)
print("ALL DONE")
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| bsd-2-clause | 5,568,833,685,149,267,000 | 26.205128 | 65 | 0.572102 | false |
loveisbug/liveshow-sh | live.py | 1 | 3864 | # -*- coding: utf-8 -*-
import urllib2
import HTMLParser
from bs4 import BeautifulSoup
import sys
from datetime import *
import re
import smtplib
from email.MIMEText import MIMEText
def sendmail(subject, content):
email_host = 'smtp host'
email_user = 'sender email'
email_pwd = 'sender pwd'
maillist = ['[email protected]']
me = email_user
msg = MIMEText(content, 'html', 'utf-8')
msg['Subject'] = subject
msg['From'] = me
msg['To'] = ', '.join(maillist)
try:
smtp = smtplib.SMTP(email_host)
smtp.login(email_user, email_pwd)
smtp.sendmail(me, maillist, msg.as_string())
smtp.quit()
print 'email send success.'
except Exception, e:
print e
print 'email send failed.'
livedict = {
'MAO' : 'maosh/1441569/1', # https://site.douban.com/maosh/widget/events/1441569/
'YYT' : 'yuyintang_h/1217192/1',
'QSW' : '187956/11298220/1', # https://site.douban.com/187956/widget/events/11298220/
'OST' : '176416/10189365/1',
'JZC' : 'jzclub/1357869/1',
'HAL' : '273062/191469274/1', # https://site.douban.com/273062/widget/events/191469274/
'MSL' : '290170/192970720/2', # https://site.douban.com/290170/widget/events/192970720/
'696' : 'livebar696/1381481/1', # https://site.douban.com/livebar696/widget/events/1381481/
'YGS' : 'yugongyishan/1431074/2', # https://site.douban.com/yugongyishan/widget/events/1431074/
'MOG' : 'moguspace/191972683/1', # https://site.douban.com/moguspace/widget/events/191972683/
'DDC' : '237627/16619636/2' # https://site.douban.com/237627/widget/events/16619636/
}
def fetchliveshow(livehouse):
baseurl = 'https://site.douban.com/' + livedict[livehouse].split('/')[0] + '/widget/events/' + livedict[livehouse].split('/')[1] + '/?start='
liststyle = int(livedict[livehouse].split('/')[2])
pagedepth = 10
pagecnt = 0
urlrequest = urllib2.Request(baseurl + str(pagecnt))
html_src = urllib2.urlopen(urlrequest).read()
parser = BeautifulSoup(html_src, "html.parser")
try:
eventcnt = re.findall(r'\d+', parser.find('span', 'count').text)
except:
eventcnt = ['0']
if len(eventcnt):
pagecnt = int(eventcnt[0]) / pagedepth + 1
print pagecnt
text = ''
for i in range(0, pagecnt):
urlrequest = urllib2.Request(baseurl + str(i * pagedepth))
html_src = urllib2.urlopen(urlrequest).read()
parser = BeautifulSoup(html_src, "html.parser")
# liststyle 1: 'events-list-s', 'class':'item close' and 'class':'item '
# liststyle 2: 'events-list', 'class':'item'
if liststyle == 1:
elist = parser.find('div', {'class' : 'events-list-s'}).findAll('li', {'class' : 'item '})
elif liststyle == 2:
elist = parser.find('div', {'class' : 'events-list'}).findAll('li', {'class' : 'item'})
else:
elist = []
print len(elist), i
for event in elist:
if event.findNext('span').text.find(u'已结束') != -1:
elist = []
break
eventurl = event.findNext('a')['href']
urlrequest = urllib2.Request(eventurl)
html_src = urllib2.urlopen(urlrequest).read()
parser = BeautifulSoup(html_src, "html.parser")
title = parser.find('h1', {'itemprop' : 'summary'}).contents[0].strip()
try:
datetime = parser.find('li', 'calendar-str-item').text.strip()
except AttributeError:
datetime = next(parser.find('ul', 'calendar-strs ').findNext('li').children).strip()
except:
datetime = ''
prices = parser.findAll('span', 'tickets-info-price')
price = prices[-1].text.strip() if len(prices) else ' '
text += '<b>' + datetime + ' ' + price + '</b><br>' + '<a href="' + eventurl + '">' + title + '</a><br><br>'
if len(elist) < pagedepth:
break
sendmail(livehouse + ' Liveshow - ' + str(date.today()), text)
def main(argv):
if len(argv) > 1:
fetchliveshow(argv[1])
return 0
else:
print "Please input the livehouse: MAO, YYT, QSW, OST."
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit | 4,368,484,991,240,511,500 | 35.742857 | 142 | 0.653447 | false |
SamuelToh/pixelated-user-agent | service/test/unit/test_welcome_mail.py | 1 | 2330 | #
# Copyright (c) 2014 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
from mockito import verify, mock
from mockito.matchers import Matcher
from email import message_from_file
from pixelated.config.leap import add_welcome_mail
from pixelated.adapter.model.mail import InputMail
class TestWelcomeMail(unittest.TestCase):
def test_add_welcome_mail(self):
mail_store = mock()
input_mail = self._get_welcome_mail()
add_welcome_mail(mail_store)
capture = WelcomeMailCapture()
verify(mail_store).add_mail('INBOX', capture)
capture.assert_mail(input_mail.raw)
def _get_welcome_mail(self):
current_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_path,
'..',
'..',
'pixelated',
'assets',
'welcome.mail')) as mail_template_file:
mail_template = message_from_file(mail_template_file)
return InputMail.from_python_mail(mail_template)
class WelcomeMailCapture(Matcher):
def matches(self, arg):
self.value = arg
return True
def assert_mail(self, mail):
captured_mail = self._format(self.value)
expected_mail = self._format(mail)
assert captured_mail == expected_mail
def _format(self, mail):
splitter = '\n'
arr = mail.split(splitter)
arr = self._remove_variable_value(arr)
return splitter.join(arr)
def _remove_variable_value(self, arr):
arr.pop(0)
arr.pop(6)
arr.pop(44)
return arr
| agpl-3.0 | 6,749,108,892,892,384,000 | 30.917808 | 77 | 0.639056 | false |
Diyago/Machine-Learning-scripts | DEEP LEARNING/segmentation/Segmentation pipeline/get dataset.py | 1 | 2616 | # code from https://github.com/BBarbosa/tflearn-image-recognition-toolkit/blob/4a0528dcfb206b1e45997f2fbc097aafacfa0fa0/scripts/html_link_parser.py
import re
import argparse
from PIL import Image
from io import BytesIO
from bs4 import BeautifulSoup
from skimage import io as skio
from urllib.request import urlopen
import os
def html_url_parser(url, save_dir, show=False, wait=False):
"""
HTML parser to download images from URL.
Params:\n
`url` - Image url\n
`save_dir` - Directory to save extracted images\n
`show` - Show downloaded image\n
`wait` - Press key to continue executing
"""
website = urlopen(url)
html = website.read()
soup = BeautifulSoup(html, "html5lib")
for image_id, link in enumerate(soup.find_all("a", href=True)):
if image_id == 0:
continue
img_url = link["href"]
try:
if os.path.isfile(save_dir + "img-%d.png" % image_id) == False:
print("[INFO] Downloading image from URL:", link["href"])
image = Image.open(urlopen(img_url))
image.save(save_dir + "img-%d.png" % image_id, "PNG")
if show:
image.show()
else:
print("skipped")
except KeyboardInterrupt:
print("[EXCEPTION] Pressed 'Ctrl+C'")
break
except Exception as image_exception:
print("[EXCEPTION]", image_exception)
continue
if wait:
key = input("[INFO] Press any key to continue ('q' to exit)... ")
if key.lower() == "q":
break
# ///////////////////////////////////////////////////
# Main method
# ///////////////////////////////////////////////////
if __name__ == "__main__":
URL_TRAIN_IMG = (
"https://www.cs.toronto.edu/~vmnih/data/mass_roads/train/sat/index.html"
)
URL_TRAIN_GT = (
"https://www.cs.toronto.edu/~vmnih/data/mass_roads/train/map/index.html"
)
URL_TEST_IMG = (
"https://www.cs.toronto.edu/~vmnih/data/mass_roads/valid/sat/index.html"
)
URL_TEST_GT = (
"https://www.cs.toronto.edu/~vmnih/data/mass_roads/valid/map/index.html"
)
html_url_parser(url=URL_TRAIN_IMG, save_dir="./road_segmentation/training/input/")
html_url_parser(url=URL_TRAIN_GT, save_dir="./road_segmentation/training/output/")
html_url_parser(url=URL_TEST_IMG, save_dir="./road_segmentation/testing/input/")
html_url_parser(url=URL_TEST_GT, save_dir="./road_segmentation/testing/output/")
print("[INFO] All done!")
| apache-2.0 | -718,070,879,443,201,700 | 31.296296 | 147 | 0.574541 | false |
zhlinh/leetcode | 0057.Insert Interval/solution.py | 1 | 1441 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: [email protected]
Version: 0.0.1
Created Time: 2016-02-22
Last_modify: 2016-02-22
******************************************
'''
'''
Given a set of non-overlapping intervals,
insert a new interval into the intervals (merge if necessary).
You may assume that the intervals were initially sorted
according to their start times.
Example 1:
Given intervals [1,3],[6,9], insert and merge [2,5] in as [1,5],[6,9].
Example 2:
Given [1,2],[3,5],[6,7],[8,10],[12,16],
insert and merge [4,9] in as [1,2],[3,10],[12,16].
This is because the new interval [4,9] overlaps with [3,5],[6,7],[8,10].
'''
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution(object):
def insert(self, intervals, newInterval):
"""
:type intervals: List[Interval]
:type newInterval: Interval
:rtype: List[Interval]
"""
ns, ne = newInterval.start, newInterval.end
left, right = [], []
for x in intervals:
if x.end < ns:
left.append(x)
elif x.start > ne:
right.append(x)
else:
ns = min(x.start, ns)
ne = max(x.end, ne)
return left + [Interval(ns, ne)] + right
| apache-2.0 | -6,815,767,327,999,001,000 | 26.711538 | 72 | 0.529493 | false |
psychopy/versions | psychopy/experiment/components/mouse/__init__.py | 1 | 28995 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2020 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
from __future__ import absolute_import, print_function
from builtins import super # provides Py3-style super() using python-future
from os import path
from psychopy.experiment.components import BaseComponent, Param, _translate
import re
# the absolute path to the folder containing this path
thisFolder = path.abspath(path.dirname(__file__))
iconFile = path.join(thisFolder, 'mouse.png')
tooltip = _translate('Mouse: query mouse position and buttons')
# only use _localized values for label values, nothing functional:
_localized = {'saveMouseState': _translate('Save mouse state'),
'forceEndRoutineOnPress': _translate('End Routine on press'),
'timeRelativeTo': _translate('Time relative to'),
'Clickable stimuli': _translate('Clickable stimuli'),
'Store params for clicked': _translate('Store params for clicked'),
'New clicks only': _translate('New clicks only')}
class MouseComponent(BaseComponent):
"""An event class for checking the mouse location and buttons
at given timepoints
"""
categories = ['Responses']
targets = ['PsychoPy', 'PsychoJS']
def __init__(self, exp, parentName, name='mouse',
startType='time (s)', startVal=0.0,
stopType='duration (s)', stopVal=1.0,
startEstim='', durationEstim='',
save='final', forceEndRoutineOnPress="any click",
timeRelativeTo='mouse onset'):
super(MouseComponent, self).__init__(
exp, parentName, name=name,
startType=startType, startVal=startVal,
stopType=stopType, stopVal=stopVal,
startEstim=startEstim, durationEstim=durationEstim)
self.type = 'Mouse'
self.url = "http://www.psychopy.org/builder/components/mouse.html"
self.exp.requirePsychopyLibs(['event'])
self.categories = ['Inputs']
self.order += [
'forceEndRoutineOnPress',
'saveMouseState', 'timeRelativeTo',
'newClicksOnly', 'clickable', 'saveParamsClickable']
# params
msg = _translate(
"How often should the mouse state (x,y,buttons) be stored? "
"On every video frame, every click or just at the end of the "
"Routine?")
self.params['saveMouseState'] = Param(
save, valType='str',
allowedVals=['final', 'on click', 'every frame', 'never'],
hint=msg,
label=_localized['saveMouseState'])
msg = _translate("Should a button press force the end of the routine"
" (e.g end the trial)?")
if forceEndRoutineOnPress is True:
forceEndRoutineOnPress = 'any click'
elif forceEndRoutineOnPress is False:
forceEndRoutineOnPress = 'never'
self.params['forceEndRoutineOnPress'] = Param(
forceEndRoutineOnPress, valType='str',
allowedVals=['never', 'any click', 'valid click'],
updates='constant',
hint=msg,
label=_localized['forceEndRoutineOnPress'])
msg = _translate("What should the values of mouse.time should be "
"relative to?")
self.params['timeRelativeTo'] = Param(
timeRelativeTo, valType='str',
allowedVals=['mouse onset', 'experiment', 'routine'],
updates='constant',
hint=msg,
label=_localized['timeRelativeTo'])
msg = _translate('If the mouse button is already down when we start '
'checking then wait for it to be released before '
'recording as a new click.'
)
self.params['newClicksOnly'] = Param(
True, valType='bool',
updates='constant',
hint=msg,
label=_localized['New clicks only'])
msg = _translate('A comma-separated list of your stimulus names that '
'can be "clicked" by the participant. '
'e.g. target, foil'
)
self.params['clickable'] = Param(
'', valType='code',
updates='constant',
hint=msg,
label=_localized['Clickable stimuli'])
msg = _translate('The params (e.g. name, text), for which you want '
'to store the current value, for the stimulus that was'
'"clicked" by the mouse. Make sure that all the '
'clickable objects have all these params.'
)
self.params['saveParamsClickable'] = Param(
'name,', valType='code',
updates='constant', allowedUpdates=[],
hint=msg,
label=_localized['Store params for clicked'])
@property
def _clickableParamsList(self):
# convert clickableParams (str) to a list
params = self.params['saveParamsClickable'].val
paramsList = re.findall(r"[\w']+", params)
return paramsList or ['name']
def _writeClickableObjectsCode(self, buff):
# code to check if clickable objects were clicked
code = (
"# check if the mouse was inside our 'clickable' objects\n"
"gotValidClick = False\n"
"for obj in [%(clickable)s]:\n"
" if obj.contains(%(name)s):\n"
" gotValidClick = True\n")
buff.writeIndentedLines(code % self.params)
buff.setIndentLevel(+2, relative=True)
code = ''
for paramName in self._clickableParamsList:
code += "%s.clicked_%s.append(obj.%s)\n" %(self.params['name'],
paramName, paramName)
buff.writeIndentedLines(code % self.params)
buff.setIndentLevel(-2, relative=True)
def _writeClickableObjectsCodeJS(self, buff):
# code to check if clickable objects were clicked
code = (
"// check if the mouse was inside our 'clickable' objects\n"
"gotValidClick = false;\n"
"for (const obj of [{clickable}]) {{\n"
" if (obj.contains({name})) {{\n"
" gotValidClick = true;\n")
buff.writeIndentedLines(code.format(name=self.params['name'],
clickable=self.params['clickable'].val))
buff.setIndentLevel(+2, relative=True)
dedent = 2
code = ''
for paramName in self._clickableParamsList:
code += "%s.clicked_%s.push(obj.%s)\n" % (self.params['name'],
paramName, paramName)
buff.writeIndentedLines(code % self.params)
for dents in range(dedent):
buff.setIndentLevel(-1, relative=True)
buff.writeIndented('}\n')
def writeInitCode(self, buff):
code = ("%(name)s = event.Mouse(win=win)\n"
"x, y = [None, None]\n"
"%(name)s.mouseClock = core.Clock()\n")
buff.writeIndentedLines(code % self.params)
def writeInitCodeJS(self, buff):
code = ("%(name)s = new core.Mouse({\n"
" win: psychoJS.window,\n"
"});\n"
"%(name)s.mouseClock = new util.Clock();\n")
buff.writeIndentedLines(code % self.params)
def writeRoutineStartCode(self, buff):
"""Write the code that will be called at the start of the routine
"""
# create some lists to store recorded values positions and events if
# we need more than one
code = ("# setup some python lists for storing info about the "
"%(name)s\n")
if self.params['saveMouseState'].val in ['every frame', 'on click']:
code += ("%(name)s.x = []\n"
"%(name)s.y = []\n"
"%(name)s.leftButton = []\n"
"%(name)s.midButton = []\n"
"%(name)s.rightButton = []\n"
"%(name)s.time = []\n")
if self.params['clickable'].val:
for clickableObjParam in self._clickableParamsList:
code += "%(name)s.clicked_{} = []\n".format(clickableObjParam)
code += "gotValidClick = False # until a click is received\n"
if self.params['timeRelativeTo'].val.lower() == 'routine':
code += "%(name)s.mouseClock.reset()\n"
buff.writeIndentedLines(code % self.params)
def writeRoutineStartCodeJS(self, buff):
"""Write the code that will be called at the start of the routine"""
code = ("// setup some python lists for storing info about the %(name)s\n")
if self.params['saveMouseState'].val in ['every frame', 'on click']:
code += ("// current position of the mouse:\n"
"%(name)s.x = [];\n"
"%(name)s.y = [];\n"
"%(name)s.leftButton = [];\n"
"%(name)s.midButton = [];\n"
"%(name)s.rightButton = [];\n"
"%(name)s.time = [];\n")
if self.params['clickable'].val:
for clickableObjParam in self._clickableParamsList:
code += "%s.clicked_%s = [];\n" % (self.params['name'], clickableObjParam)
code += "gotValidClick = false; // until a click is received\n"
if self.params['timeRelativeTo'].val.lower() == 'routine':
code += "%(name)s.mouseClock.reset();\n"
buff.writeIndentedLines(code % self.params)
def writeFrameCode(self, buff):
"""Write the code that will be called every frame"""
forceEnd = self.params['forceEndRoutineOnPress'].val
# get a clock for timing
timeRelative = self.params['timeRelativeTo'].val.lower()
if timeRelative == 'experiment':
self.clockStr = 'globalClock'
elif timeRelative in ['routine', 'mouse onset']:
self.clockStr = '%s.mouseClock' % self.params['name'].val
# only write code for cases where we are storing data as we go (each
# frame or each click)
# might not be saving clicks, but want it to force end of trial
if (self.params['saveMouseState'].val not in
['every frame', 'on click'] and forceEnd == 'never'):
return
buff.writeIndented("# *%s* updates\n" % self.params['name'])
# writes an if statement to determine whether to draw etc
self.writeStartTestCode(buff)
code = ("%(name)s.status = STARTED\n")
if self.params['timeRelativeTo'].val.lower() == 'mouse onset':
code += "%(name)s.mouseClock.reset()\n"
if self.params['newClicksOnly']:
code += (
"prevButtonState = %(name)s.getPressed()"
" # if button is down already this ISN'T a new click\n")
else:
code += (
"prevButtonState = [0, 0, 0]"
" # if now button is down we will treat as 'new' click\n")
buff.writeIndentedLines(code % self.params)
# to get out of the if statement
buff.setIndentLevel(-1, relative=True)
# test for stop (only if there was some setting for duration or stop)
if self.params['stopVal'].val not in ['', None, -1, 'None']:
# writes an if statement to determine whether to draw etc
self.writeStopTestCode(buff)
buff.writeIndented("%(name)s.status = FINISHED\n" % self.params)
# to get out of the if statement
buff.setIndentLevel(-2, relative=True)
# if STARTED and not FINISHED!
code = ("if %(name)s.status == STARTED: "
"# only update if started and not finished!\n") % self.params
buff.writeIndented(code)
buff.setIndentLevel(1, relative=True) # to get out of if statement
dedentAtEnd = 1 # keep track of how far to dedent later
def _buttonPressCode(buff, dedent):
"""Code compiler for mouse button events"""
code = ("buttons = %(name)s.getPressed()\n"
"if buttons != prevButtonState: # button state changed?")
buff.writeIndentedLines(code % self.params)
buff.setIndentLevel(1, relative=True)
dedent += 1
buff.writeIndented("prevButtonState = buttons\n")
code = ("if sum(buttons) > 0: # state changed to a new click\n")
buff.writeIndentedLines(code % self.params)
buff.setIndentLevel(1, relative=True)
dedent += 1
if self.params['clickable'].val:
self._writeClickableObjectsCode(buff)
return buff, dedent
# No mouse tracking, end routine on any or valid click
if self.params['saveMouseState'].val == 'never' and forceEnd in ['any click', 'valid click']:
buff, dedentAtEnd = _buttonPressCode(buff, dedentAtEnd)
if forceEnd == 'valid click':
# does valid response end the trial?
code = ("if gotValidClick: # abort routine on response\n"
" continueRoutine = False\n")
buff.writeIndentedLines(code)
buff.setIndentLevel(-dedentAtEnd, relative=True)
else:
buff.writeIndented('continueRoutine = False')
buff.setIndentLevel(-dedentAtEnd, relative=True)
elif self.params['saveMouseState'].val != 'never':
mouseCode = ("x, y = {name}.getPos()\n"
"{name}.x.append(x)\n"
"{name}.y.append(y)\n"
"buttons = {name}.getPressed()\n"
"{name}.leftButton.append(buttons[0])\n"
"{name}.midButton.append(buttons[1])\n"
"{name}.rightButton.append(buttons[2])\n"
"{name}.time.append({clockStr}.getTime())\n".format(name=self.params['name'],
clockStr=self.clockStr))
# Continuous mouse tracking
if self.params['saveMouseState'].val in ['every frame']:
buff.writeIndentedLines(mouseCode)
# Continuous mouse tracking for all button press
if forceEnd == 'never' and self.params['saveMouseState'].val in ['on click']:
buff, dedentAtEnd = _buttonPressCode(buff, dedentAtEnd)
buff.writeIndentedLines(mouseCode)
# Mouse tracking for events that end routine
elif forceEnd in ['any click', 'valid click']:
buff, dedentAtEnd = _buttonPressCode(buff, dedentAtEnd)
# Save all mouse events on button press
if self.params['saveMouseState'].val in ['on click']:
buff.writeIndentedLines(mouseCode)
# also write code about clicked objects if needed.
if self.params['clickable'].val:
# does valid response end the trial?
if forceEnd == 'valid click':
code = ("if gotValidClick: # abort routine on response\n"
" continueRoutine = False\n")
buff.writeIndentedLines(code)
# does any response end the trial?
if forceEnd == 'any click':
code = ("# abort routine on response\n"
"continueRoutine = False\n")
buff.writeIndentedLines(code)
else:
pass # forceEnd == 'never'
# 'if' statement of the time test and button check
buff.setIndentLevel(-dedentAtEnd, relative=True)
def writeFrameCodeJS(self, buff):
"""Write the code that will be called every frame"""
forceEnd = self.params['forceEndRoutineOnPress'].val
# get a clock for timing
timeRelative = self.params['timeRelativeTo'].val.lower()
if timeRelative == 'experiment':
self.clockStr = 'globalClock'
elif timeRelative in ['routine', 'mouse onset']:
self.clockStr = '%s.mouseClock' % self.params['name'].val
# only write code for cases where we are storing data as we go (each
# frame or each click)
# might not be saving clicks, but want it to force end of trial
if (self.params['saveMouseState'].val not in
['every frame', 'on click'] and forceEnd == 'never'):
return
buff.writeIndented("// *%s* updates\n" % self.params['name'])
# writes an if statement to determine whether to draw etc
self.writeStartTestCodeJS(buff)
code = "%(name)s.status = PsychoJS.Status.STARTED;\n"
if self.params['timeRelativeTo'].val.lower() == 'mouse onset':
code += "%(name)s.mouseClock.reset();\n" % self.params
if self.params['newClicksOnly']:
code += (
"prevButtonState = %(name)s.getPressed();"
" // if button is down already this ISN'T a new click\n")
else:
code += (
"prevButtonState = [0, 0, 0];"
" // if now button is down we will treat as 'new' click\n")
code+=("}\n")
buff.writeIndentedLines(code % self.params)
# to get out of the if statement
buff.setIndentLevel(-1, relative=True)
# test for stop (only if there was some setting for duration or stop)
if self.params['stopVal'].val not in ['', None, -1, 'None']:
# writes an if statement to determine whether to draw etc
self.writeStopTestCodeJS(buff)
buff.writeIndented("%(name)s.status = PsychoJS.Status.FINISHED;\n"
" }\n" % self.params)
# to get out of the if statement
buff.setIndentLevel(-1, relative=True)
# if STARTED and not FINISHED!
code = ("if (%(name)s.status === PsychoJS.Status.STARTED) { "
"// only update if started and not finished!\n")
buff.writeIndented(code % self.params)
buff.setIndentLevel(1, relative=True) # to get out of if statement
dedentAtEnd = 1 # keep track of how far to dedent later
# write param checking code
if (self.params['saveMouseState'].val == 'on click'
or forceEnd in ['any click', 'valid click']):
code = ("_mouseButtons = %(name)s.getPressed();\n")
buff.writeIndentedLines(code % self.params)
# buff.setIndentLevel(1, relative=True)
# dedentAtEnd += 1
code = "if (!_mouseButtons.every( (e,i,) => (e == prevButtonState[i]) )) { // button state changed?\n"
buff.writeIndented(code)
buff.setIndentLevel(1, relative=True)
dedentAtEnd += 1
buff.writeIndented("prevButtonState = _mouseButtons;\n")
code = ("if (_mouseButtons.reduce( (e, acc) => (e+acc) ) > 0) { // state changed to a new click\n")
buff.writeIndentedLines(code % self.params)
buff.setIndentLevel(1, relative=True)
dedentAtEnd += 1
elif self.params['saveMouseState'].val == 'every frame':
code = "_mouseButtons = %(name)s.getPressed();\n" % self.params
buff.writeIndented(code)
# only do this if buttons were pressed
if self.params['saveMouseState'].val in ['on click', 'every frame']:
code = ("_mouseXYs = %(name)s.getPos();\n"
"%(name)s.x.push(_mouseXYs[0]);\n"
"%(name)s.y.push(_mouseXYs[1]);\n"
"%(name)s.leftButton.push(_mouseButtons[0]);\n"
"%(name)s.midButton.push(_mouseButtons[1]);\n"
"%(name)s.rightButton.push(_mouseButtons[2]);\n" %
self.params)
code += ("%s.time.push(%s.getTime());\n" % (self.params['name'], self.clockStr))
buff.writeIndentedLines(code)
# also write code about clicked objects if needed.
if self.params['clickable'].val:
self._writeClickableObjectsCodeJS(buff)
# does the response end the trial?
if forceEnd == 'any click':
code = ("// abort routine on response\n"
"continueRoutine = false;\n")
buff.writeIndentedLines(code)
elif forceEnd == 'valid click':
code = ("if (gotValidClick === true) { // abort routine on response\n"
" continueRoutine = false;\n"
"}\n")
buff.writeIndentedLines(code)
else:
pass # forceEnd == 'never'
for thisDedent in range(dedentAtEnd):
buff.setIndentLevel(-1, relative=True)
buff.writeIndentedLines('}')
def writeRoutineEndCode(self, buff):
# some shortcuts
name = self.params['name']
# do this because the param itself is not a string!
store = self.params['saveMouseState'].val
if store == 'nothing':
return
forceEnd = self.params['forceEndRoutineOnPress'].val
if len(self.exp.flow._loopList):
currLoop = self.exp.flow._loopList[-1] # last (outer-most) loop
else:
currLoop = self.exp._expHandler
if currLoop.type == 'StairHandler':
code = ("# NB PsychoPy doesn't handle a 'correct answer' for "
"mouse events so doesn't know how to handle mouse with "
"StairHandler\n")
else:
code = ("# store data for %s (%s)\n" %
(currLoop.params['name'], currLoop.type))
buff.writeIndentedLines(code)
if store == 'final': # for the o
# buff.writeIndented("# get info about the %(name)s\n"
# %(self.params))
code = ("x, y = {name}.getPos()\n"
"buttons = {name}.getPressed()\n").format(name=self.params['name'])
# also write code about clicked objects if needed.
buff.writeIndentedLines(code)
if self.params['clickable'].val:
buff.writeIndented("if sum(buttons):\n")
buff.setIndentLevel(+1, relative=True)
self._writeClickableObjectsCode(buff)
buff.setIndentLevel(-1, relative=True)
if currLoop.type != 'StairHandler':
code = (
"{loopName}.addData('{name}.x', x)\n"
"{loopName}.addData('{name}.y', y)\n"
"{loopName}.addData('{name}.leftButton', buttons[0])\n"
"{loopName}.addData('{name}.midButton', buttons[1])\n"
"{loopName}.addData('{name}.rightButton', buttons[2])\n"
)
buff.writeIndentedLines(
code.format(loopName=currLoop.params['name'],
name=name))
# then add `trials.addData('mouse.clicked_name',.....)`
if self.params['clickable'].val:
for paramName in self._clickableParamsList:
code = (
"if len({name}.clicked_{param}):\n"
" {loopName}.addData('{name}.clicked_{param}', "
"{name}.clicked_{param}[0])\n"
)
buff.writeIndentedLines(
code.format(loopName=currLoop.params['name'],
name=name,
param=paramName))
elif store != 'never':
# buff.writeIndented("# save %(name)s data\n" %(self.params))
mouseDataProps = ['x', 'y', 'leftButton', 'midButton',
'rightButton', 'time']
# possibly add clicked params if we have clickable objects
if self.params['clickable'].val:
for paramName in self._clickableParamsList:
mouseDataProps.append("clicked_{}".format(paramName))
# use that set of properties to create set of addData commands
for property in mouseDataProps:
if store == 'every frame' or forceEnd == "never":
code = ("%s.addData('%s.%s', %s.%s)\n" %
(currLoop.params['name'], name,
property, name, property))
buff.writeIndented(code)
else:
# we only had one click so don't return a list
code = ("if len(%s.%s): %s.addData('%s.%s', %s.%s[0])\n" %
(name, property,
currLoop.params['name'], name,
property, name, property))
buff.writeIndented(code)
# get parent to write code too (e.g. store onset/offset times)
super().writeRoutineEndCode(buff)
if currLoop.params['name'].val == self.exp._expHandler.name:
buff.writeIndented("%s.nextEntry()\n" % self.exp._expHandler.name)
def writeRoutineEndCodeJS(self, buff):
"""Write code at end of routine"""
# some shortcuts
name = self.params['name']
# do this because the param itself is not a string!
store = self.params['saveMouseState'].val
if store == 'nothing':
return
forceEnd = self.params['forceEndRoutineOnPress'].val
if len(self.exp.flow._loopList):
currLoop = self.exp.flow._loopList[-1] # last (outer-most) loop
else:
currLoop = self.exp._expHandler
if currLoop.type == 'StairHandler':
code = ("/*NB PsychoPy doesn't handle a 'correct answer' for "
"mouse events so doesn't know how to handle mouse with "
"StairHandler*/\n")
else:
code = ("// store data for %s (%s)\n" %
(currLoop.params['name'], currLoop.type))
buff.writeIndentedLines(code)
if store == 'final':
code = ("_mouseXYs = {name}.getPos();\n"
"_mouseButtons = {name}.getPressed();\n")
if currLoop.type != 'StairHandler':
code += (
"psychoJS.experiment.addData('{name}.x', _mouseXYs[0]);\n"
"psychoJS.experiment.addData('{name}.y', _mouseXYs[1]);\n"
"psychoJS.experiment.addData('{name}.leftButton', _mouseButtons[0]);\n"
"psychoJS.experiment.addData('{name}.midButton', _mouseButtons[1]);\n"
"psychoJS.experiment.addData('{name}.rightButton', _mouseButtons[2]);\n"
)
buff.writeIndentedLines(code.format(name=name))
# For clicked objects...
if self.params['clickable'].val:
for paramName in self._clickableParamsList:
code = (
"if ({name}.clicked_{param}.length > 0) {{\n"
" psychoJS.experiment.addData('{name}.clicked_{param}', "
"{name}.clicked_{param}[0]);}}\n".format(name=name,
param=paramName))
buff.writeIndentedLines(code)
elif store != 'never':
# buff.writeIndented("# save %(name)s data\n" %(self.params))
mouseDataProps = ['x', 'y', 'leftButton', 'midButton',
'rightButton', 'time']
# possibly add clicked params if we have clickable objects
if self.params['clickable'].val:
for paramName in self._clickableParamsList:
mouseDataProps.append("clicked_{}".format(paramName))
# use that set of properties to create set of addData commands
for property in mouseDataProps:
if store == 'every frame' or forceEnd == "never":
code = ("psychoJS.experiment.addData('%s.%s', %s.%s);\n" %
(name, property, name, property))
buff.writeIndented(code)
else:
# we only had one click so don't return a list
code = ("if (%s.%s) {"
" psychoJS.experiment.addData('%s.%s', %s.%s[0])};\n"
% (name, property, name, property, name, property))
buff.writeIndented(code)
buff.writeIndentedLines("\n")
| gpl-3.0 | -2,335,225,837,905,101,000 | 45.02381 | 114 | 0.534989 | false |
shootstar/novatest | nova/api/openstack/compute/plugins/v3/cells.py | 1 | 14865 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo.config import cfg
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.cells import rpc_driver
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import db
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
ALIAS = "os-cells"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def make_cell(elem):
elem.set('name')
elem.set('username')
elem.set('type')
elem.set('rpc_host')
elem.set('rpc_port')
caps = xmlutil.SubTemplateElement(elem, 'capabilities',
selector='capabilities')
cap = xmlutil.SubTemplateElement(caps, xmlutil.Selector(0),
selector=xmlutil.get_items)
cap.text = 1
make_capacity(elem)
def make_capacity(cell):
def get_units_by_mb(capacity_info):
return capacity_info['units_by_mb'].items()
capacity = xmlutil.SubTemplateElement(cell, 'capacities',
selector='capacities')
ram_free = xmlutil.SubTemplateElement(capacity, 'ram_free',
selector='ram_free')
ram_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(ram_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
disk_free = xmlutil.SubTemplateElement(capacity, 'disk_free',
selector='disk_free')
disk_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(disk_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
cell_nsmap = {None: wsgi.XMLNS_V10}
class CellTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cell', selector='cell')
make_cell(root)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cells')
elem = xmlutil.SubTemplateElement(root, 'cell', selector='cells')
make_cell(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellDeserializer(wsgi.XMLDeserializer):
"""Deserializer to handle xml-formatted cell create requests."""
def _extract_capabilities(self, cap_node):
caps = {}
for cap in cap_node.childNodes:
cap_name = cap.tagName
caps[cap_name] = self.extract_text(cap)
return caps
def _extract_cell(self, node):
cell = {}
cell_node = self.find_first_child_named(node, 'cell')
extract_fns = {
'capabilities': self._extract_capabilities,
'rpc_port': lambda child: int(self.extract_text(child)),
}
for child in cell_node.childNodes:
name = child.tagName
extract_fn = extract_fns.get(name, self.extract_text)
cell[name] = extract_fn(child)
return cell
def default(self, string):
"""Deserialize an xml-formatted cell create request."""
node = xmlutil.safe_minidom_parse_string(string)
return {'body': {'cell': self._extract_cell(node)}}
def _filter_keys(item, keys):
"""
Filters all model attributes except for keys
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
def _fixup_cell_info(cell_info, keys):
"""
If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport = rpc_driver.parse_transport_url(transport_url)
except ValueError:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return cell_info
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = transport[transport_field]
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class CellsController(object):
"""Controller for Cell resources."""
def __init__(self):
self.compute_api = compute.API()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@wsgi.serializers(xml=CellsTemplate)
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@wsgi.serializers(xml=CellsTemplate)
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@wsgi.serializers(xml=CellTemplate)
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@wsgi.serializers(xml=CellTemplate)
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v3, along with capabilities
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound:
msg = (_("Cell %(id)s not found.") % {'id': id})
raise exc.HTTPNotFound(explanation=msg)
return dict(cell={"capacities": capacities})
@wsgi.serializers(xml=CellTemplate)
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = db.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
return dict(cell=_scrub_cell(cell))
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
num_deleted = db.cell_delete(context, id)
if num_deleted == 0:
raise exc.HTTPNotFound()
return {}
def _validate_cell_name(self, cell_name):
"""Validate cell name is not empty and doesn't contain '!' or '.'."""
if not cell_name:
msg = _("Cell name cannot be empty")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if '!' in cell_name or '.' in cell_name:
msg = _("Cell name cannot contain '!' or '.'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _validate_cell_type(self, cell_type):
"""Validate cell_type is 'parent' or 'child'."""
if cell_type not in ['parent', 'child']:
msg = _("Cell type must be 'parent' or 'child'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _normalize_cell(self, cell, existing=None):
"""
Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
self._validate_cell_type(cell['type'])
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport = {}
if existing and 'transport_url' in existing:
transport = rpc_driver.parse_transport_url(
existing['transport_url'])
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
'virtual_host': 'rpc_virtual_host',
}
for key, input_field in transport_field_map.items():
# Set the default value of the field; using setdefault()
# lets us avoid overriding the existing transport URL
transport.setdefault(key, None)
# Only override the value if we're given an override
if input_field in cell:
transport[key] = cell.pop(input_field)
# Now set the transport URL
cell['transport_url'] = rpc_driver.unparse_transport_url(transport)
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
if 'name' not in cell:
msg = _("No cell name in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
self._validate_cell_name(cell['name'])
self._normalize_cell(cell)
cell = db.cell_create(context, cell)
return dict(cell=_scrub_cell(cell))
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
cell.pop('id', None)
if 'name' in cell:
self._validate_cell_name(cell['name'])
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = db.cell_get(context, id)
except exception.CellNotFound:
raise exc.HTTPNotFound()
self._normalize_cell(cell, existing)
try:
cell = db.cell_update(context, id, cell)
except exception.CellNotFound:
raise exc.HTTPNotFound()
return dict(cell=_scrub_cell(cell))
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if body:
msg = _("Only 'updated_since' and 'project_id' are understood.")
raise exc.HTTPBadRequest(explanation=msg)
if updated_since:
try:
timeutils.parse_isotime(updated_since)
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.V3APIExtensionBase):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/cells/api/v3"
version = 1
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension(ALIAS, CellsController(),
collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
def get_controller_extensions(self):
return []
| apache-2.0 | 837,730,049,796,058,100 | 34.141844 | 78 | 0.592398 | false |
appleseedhq/cortex | python/IECoreNuke/UndoManagers.py | 5 | 3349 | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import nuke
## A context object intended for use with python's "with" block. It ensures
# that all operations in the block are performed with nuke's undo in a
# particular state (enabled or disabled) and that the previous state is correctly
# restored on exit from the block.
class UndoState :
## state should be True to enable undo, and False to disable it.
def __init__( self, state ) :
self.__state = state
def __enter__( self ) :
self.__prevState = not nuke.Undo.disabled()
if self.__state :
nuke.Undo.enable()
else :
nuke.Undo.disable()
def __exit__( self, type, value, traceBack ) :
if self.__prevState :
nuke.Undo.enable()
else :
nuke.Undo.disable()
## A context object intended for use with python's "with" block. It ensures
# that all operations in the block are performed with undo disabled, and that
# undo is reenabled if necessary upon exit from the block.
class UndoDisabled( UndoState ) :
def __init__( self ) :
UndoState.__init__( self, False )
## A context object intended for use with python's "with" block. It ensures
# that all operations in the block are performed with undo enabled, and that
# undo is disabled if necessary upon exit from the block.
class UndoEnabled( UndoState ) :
def __init__( self ) :
UndoState.__init__( self, True )
## A context object intended for use with python's "with" block. It groups a
# series of actions into a single Nuke undo.
class UndoBlock :
def __enter__( self ) :
nuke.Undo.begin()
def __exit__( self, type, value, traceBack ) :
nuke.Undo.end()
| bsd-3-clause | 6,144,218,958,561,458,000 | 35.402174 | 81 | 0.692744 | false |
Upstream-Research/csv-tools | csv_tools/csv_prepend.py | 1 | 11652 | ## Copyright (c) 2016-2017 Upstream Research, Inc. All Rights Reserved. ##
## Subject to an 'MIT' License. See LICENSE file in top-level directory ##
## #python-3.x
## python 2 does not work due mostly to issues with csv and io modules with unicode data
help_text = (
"CSV-PREPEND tool version 20170918\n"
"Insert a header row into a CSV stream\n"
"\n"
"csv-prepend [OPTIONS] ColumnValueList [InputFile]\n"
"\n"
"OPTIONS\n"
" -E {E} Input file text encoding (e.g. 'utf-8', 'windows-1252')\n"
" -e {E} Output file text encoding (e.g. 'utf-8', 'windows-1252')\n"
" -K {N} Number of rows to skip from the input (default=0)\n"
" -N {N} Maximum number of rows to read (default=ALL)\n"
" -n {N} Maximum number of rows to write (default=ALL)\n"
" -o {F} Output file name\n"
" -S {S} Input file field delimiter (default ',')\n"
" -s {S} Output file field delimiter (default ',')\n"
"\n"
"ColumnValueList is a comma separated list of values to be inserted as \n"
"the first row.\n"
"It is possible to replace the header row using the -K option.\n"
)
import sys
import csv
import io
from ._csv_helpers import (
decode_delimiter_name
,decode_charset_name
,decode_newline
)
def main(arg_list, stdin, stdout, stderr):
in_io = stdin
out_io = stdout
err_io = stderr
show_help = False
input_file_name = None
output_file_name = None
input_delimiter = ','
output_delimiter = ','
# 'std' will be translated to the standard line break decided by csv_helpers.decode_newline
input_row_terminator = 'std'
output_row_terminator = 'std'
input_charset_name = 'utf_8_sig'
output_charset_name = 'utf_8'
output_charset_error_mode = 'strict' # 'strict' | 'ignore' | 'replace' | 'backslashreplace'
input_charset_error_mode = 'strict' # 'strict' | 'ignore' | 'replace' | 'backslashreplace'
csv_cell_width_limit = 4*1024*1024 # python default is 131072 = 0x00020000
input_row_start_offset = 0
input_row_count_max = None
output_row_count_max = None
head_row_str = None
# [20160916 [db] I avoided using argparse in order to retain some flexibility for command syntax]
arg_count = len(arg_list)
arg_index = 1
while (arg_index < arg_count):
arg = arg_list[arg_index]
if (arg == "--help"
or arg == "-?"
):
show_help = True
elif (arg == "-o"
or arg == "--output"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_file_name = arg
elif (arg == "-E"
or arg == "--charset-in"
or arg == "--encoding-in"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_charset_name = arg
elif (arg == "-e"
or arg == "--charset-out"
or arg == "--encoding-out"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_charset_name = arg
elif (arg == "--charset-in-error-mode"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_charset_error_mode = arg
elif (arg == "--charset-out-error-mode"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_charset_error_mode = arg
elif (arg == "--charset-error-mode"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_charset_error_mode = arg
output_charset_error_mode = arg
elif (arg == "-S"
or arg == "--separator-in"
or arg == "--delimiter-in"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_delimiter = arg
elif (arg == "-s"
or arg == "--separator-out"
or arg == "--delimiter-out"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_delimiter = arg
elif (arg == "-W"
or arg == "--terminator-in"
or arg == "--newline-in"
or arg == "--endline-in"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_row_terminator = arg
elif (arg == "-w"
or arg == "--terminator-out"
or arg == "--newline-out"
or arg == "--endline-out"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
output_row_terminator = arg
elif (arg == "--cell-width-limit"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
csv_cell_width_limit = int(arg)
elif (arg == "-K"
or arg == "--row-offset-in"
or arg == "--offset"
or arg == "--skip"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
input_row_start_offset = int(arg)
elif (arg == "-N"
or arg == "--row-count-in"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
if ('ALL' == arg.upper()):
input_row_count_max = None
else:
input_row_count_max = int(arg)
elif (arg == "-n"
or arg == "--row-count-out"
):
if (arg_index < arg_count):
arg_index += 1
arg = arg_list[arg_index]
if ('ALL' == arg.upper()):
output_row_count_max = None
else:
output_row_count_max = int(arg)
elif (None != arg
and 0 < len(arg)
):
if (None == head_row_str):
head_row_str = arg
elif (None == input_file_name):
input_file_name = arg
arg_index += 1
head_row = None
if (None != head_row_str):
head_row = head_row_str.split(',')
if (None == head_row):
show_help = True
if (show_help):
out_io.write(help_text)
else:
input_charset_name = decode_charset_name(input_charset_name)
output_charset_name = decode_charset_name(output_charset_name)
input_row_terminator = decode_newline(input_row_terminator)
output_row_terminator = decode_newline(output_row_terminator)
input_delimiter = decode_delimiter_name(input_delimiter)
output_delimiter = decode_delimiter_name(output_delimiter)
in_file = None
out_file = None
try:
read_text_io_mode = 'rt'
#in_newline_mode = '' # don't translate newline chars
in_newline_mode = input_row_terminator
in_file_id = input_file_name
should_close_in_file = True
if (None == in_file_id):
in_file_id = in_io.fileno()
should_close_in_file = False
in_io = io.open(
in_file_id
,mode=read_text_io_mode
,encoding=input_charset_name
,newline=in_newline_mode
,errors=input_charset_error_mode
,closefd=should_close_in_file
)
if (should_close_in_file):
in_file = in_io
write_text_io_mode = 'wt'
out_newline_mode='' # don't translate newline chars
#out_newline_mode = output_row_terminator
out_file_id = output_file_name
should_close_out_file = True
if (None == out_file_id):
out_file_id = out_io.fileno()
should_close_out_file = False
out_io = io.open(
out_file_id
,mode=write_text_io_mode
,encoding=output_charset_name
,newline=out_newline_mode
,errors=output_charset_error_mode
,closefd=should_close_out_file
)
if (should_close_out_file):
out_file = out_io
in_csv = csv.reader(
in_io
,delimiter=input_delimiter
,lineterminator=input_row_terminator
)
out_csv = csv.writer(
out_io
,delimiter=output_delimiter
,lineterminator=output_row_terminator
)
execute(
in_csv
,out_csv
,input_row_terminator
,output_row_terminator
,input_row_start_offset
,input_row_count_max
,output_row_count_max
,head_row
)
except BrokenPipeError:
pass
finally:
if (None != in_file):
in_file.close()
in_file = None
if (None != out_file):
out_file.close()
out_file = None
def execute(
in_csv
,out_csv
,input_row_terminator
,output_row_terminator
,in_row_offset_start
,in_row_count_max
,out_row_count_max
,new_head_row
):
# first write the new row
out_csv.writerow(new_head_row)
# then write the output using the csv-translate code
# [20170918 [db] This is just a copy of the code from -csv-translate;
# it is a bit overkill to include all of this here]
end_row = None
cr_newline = '\r'
lf_newline = '\n'
crlf_newline = '\r\n'
out_newline = output_row_terminator
in_row_count = 0
out_row_count = 0
in_row = next(in_csv, end_row)
while (end_row != in_row
and (None == in_row_count_max or in_row_count < in_row_count_max)
and (None == out_row_count_max or out_row_count < out_row_count_max)
):
in_row_count += 1
if (in_row_offset_start < in_row_count):
out_row = list(in_row)
column_count = len(out_row)
column_position = 0
while (column_position < column_count):
cell_value = out_row[column_position]
# fix newline characters in the data
# (some tools - like postgres - can't handle mixed newline chars)
if (None != cell_value):
# replace crlf with lf, then we will replace lf's with the output newline,
# this prevents us from turning a crlf into a double newline
cell_value = cell_value.replace(crlf_newline, lf_newline)
cell_value = cell_value.replace(cr_newline, lf_newline)
cell_value = cell_value.replace(lf_newline, out_newline)
out_row[column_position] = cell_value
column_position += 1
out_csv.writerow(out_row)
out_row_count += 1
in_row = next(in_csv, end_row)
def console_main():
main(sys.argv, sys.stdin, sys.stdout, sys.stderr)
if __name__ == "__main__":
console_main()
| mit | -3,902,079,733,962,294,300 | 34.202417 | 101 | 0.495795 | false |
Oleh-Hrebchuk/OpenVPN-TryFalse | vpn/models.py | 1 | 3592 | from django.db import models
# Create your models here.
class General(models.Model):
class Meta:
db_table = "general"
permissions = (('admins', "admins manage all settings openvpn"),)
general_vpn_name = models.TextField(max_length=200)
general_project_name = models.TextField(max_length=200)
general_server_ip = models.GenericIPAddressField()
general_server_port = models.IntegerField()
general_project_status = models.TextField(max_length=20)
def __unicode__(self):
return '%s %s' % (self.general_server_port, self.general_vpn_name)
class PathsVPN(models.Model):
class Meta:
db_table = "pathsvpn"
pathsvpn_vpn_path = models.TextField(max_length=200)
pathsvpn_general = models.ForeignKey(General)
def __unicode__(self):
return self.pathsvpn_vpn_path
class Certs(models.Model):
class Meta:
db_table = 'certs'
certs_user_name = models.TextField(max_length=200)
certs_general = models.ForeignKey(General)
class Revoke(models.Model):
class Meta:
db_table = 'revoke'
certs_revoke_name = models.TextField(max_length=200)
certs_revoke_status = models.TextField(max_length=200)
certs_general = models.ForeignKey(General)
class ProgressBarCheckReq(models.Model):
class Meta:
db_table = 'progress_bar_check_req'
progress_percents = models.TextField(max_length=100,default=0)
class ProgressBarInstall(models.Model):
class Meta:
db_table = 'progress_bar_install'
progress_percents = models.TextField(max_length=100,default=0)
class MailConfiguration(models.Model):
class Meta:
db_table = 'mail_configuration'
mail_name = models.TextField(max_length=30)
mail_smtp_server = models.TextField(max_length=30)
mail_smtp_helo = models.TextField(max_length=30)
mail_smtp_email = models.TextField(max_length=30)
mail_port = models.IntegerField()
mail_tls = models.TextField(max_length=30)
mail_pass = models.TextField(max_length=30)
class RouteVPN(models.Model):
class Meta:
db_table = 'route_vpn'
route_name = models.TextField(max_length=100)
status = models.TextField(max_length=100)
user = models.ForeignKey('Users',related_name='route')
class Users(models.Model):
class Meta:
db_table = 'users'
user_routes = models.TextField(max_length=100)
class RouteAccessList(models.Model):
class Meta:
db_table = 'route_access_list'
route_name = models.TextField(max_length=100)
route_general = models.ForeignKey(General)
access = models.TextField(max_length=100)
user = models.ForeignKey('Revoke',related_name='route')
user_name = models.TextField(max_length=100)
class RouteList(models.Model):
class Meta:
db_table = 'route_list'
route_name = models.TextField(max_length=100)
route_general = models.ForeignKey(General)
class Groups(models.Model):
class Meta:
db_table = 'groups'
name_group = models.TextField(max_length=100)
route_general = models.ForeignKey(General)
class GroupsAcl(models.Model):
class Meta:
db_table = 'groups_acl'
acl_name = models.TextField(max_length=100)
acl_general = models.ForeignKey(General)
acl_group = models.ForeignKey(Groups)
class GroupsUsersAcl(models.Model):
class Meta:
db_table = 'groups_users_acl'
group_name = models.TextField(max_length=100)
group_general = models.ForeignKey(General)
group_user_name = models.TextField(max_length=100)
| gpl-3.0 | 8,188,553,040,697,901,000 | 23.772414 | 74 | 0.679287 | false |
jhh/puka | puka/middleware/debug.py | 1 | 1769 | import os
from django.conf import settings
from django.db import connection
def terminal_width():
"""
Function to compute the terminal width.
WARNING: This is not my code, but I've been using it forever and
I don't remember where it came from.
"""
width = 0
try:
import fcntl
import struct
import termios
s = struct.pack("HHHH", 0, 0, 0, 0)
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
width = struct.unpack("HHHH", x)[1]
except:
pass
if width <= 0:
try:
width = int(os.environ["COLUMNS"])
except:
pass
if width <= 0:
width = 80
return width
class SqlPrintingMiddleware:
"""
Middleware which prints out a list of all SQL queries done
for each view that is processed. This is only useful for debugging.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
indentation = 2
pad = " " * indentation
if len(connection.queries) > 0 and settings.DEBUG:
width = terminal_width()
total_time = 0.0
for query in connection.queries:
nice_sql = query["sql"].replace('"', "")
sql = f"\033[1;31m[{query['time']}]\033[0m {nice_sql}"
total_time = total_time + float(query["time"])
while len(sql) > width - indentation:
print(f"{pad}{sql[: width - indentation]}")
sql = sql[width - indentation :]
print(f"{pad}{sql}\n")
print(f"{pad}\033[1;32m[TOTAL TIME: {str(total_time)} seconds]\033[0m")
return response
| mit | 3,804,612,522,956,817,400 | 28.983051 | 83 | 0.549463 | false |
ashtonteng/squad_exp | SelfMatchingLayer.py | 1 | 6757 | import tensorflow as tf
from tensorflow.contrib import rnn
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]="2"
class SelfMatchingLayer():
def __init__(self, args, inputs, scope):
print("building self-matching layer", scope)
batch_size = args.batch_size
vocab_size = args.vocab_size
hidden_size = args.SelfMatchingLayer_size
model = args.model
num_layers = args.num_layers
training = args.training
#inputs = #batch_size x seq_length x hidden_size
max_seq_length = tf.shape(inputs)[1]
def compute_lengths(inputs):
used = tf.sign(tf.reduce_max(tf.abs(inputs), reduction_indices=2))
lengths = tf.reduce_sum(used, reduction_indices=1)
lengths = tf.cast(lengths, tf.int32) #lengths must be integers
return lengths
seq_lengths = compute_lengths(inputs)
# dropout beta testing: double check which one should affect next line
#if training and output_keep_prob < 1.0:
# inputs = tf.nn.dropout(inputs, output_keep_prob)
if model == 'rnn':
cell_fn = rnn.BasicRNNCell
elif model == 'gru':
cell_fn = rnn.GRUCell
elif model == 'lstm':
cell_fn = rnn.BasicLSTMCell
elif model == 'nas':
cell_fn = rnn.NASCell
else:
raise Exception("model type not supported: {}".format(model))
"""
1) W_vP * v_jP = how important is the jth p word to the t'th p word
2) W_vP2 * v_tP = how important is the t'th p word just by itself
"""
with tf.variable_scope(scope): #variables unsed in the pointerRNN
W_vP = tf.get_variable("W_vP", [hidden_size, hidden_size])#, initializer=tf.random_normal_initializer)
W_vP2 = tf.get_variable("W_vP2", [hidden_size, hidden_size])#, initializer=tf.random_normal_initializer)
W_g = tf.get_variable("W_g", [2*hidden_size, 2*hidden_size])#, initializer=tf.random_normal_initializer)
v = tf.get_variable("v", [hidden_size, 1])
tf.summary.histogram("W_vP_self", W_vP)
tf.summary.histogram("W_g_self", W_g)
tf.summary.histogram("W_vP2_self", W_vP)
tf.summary.histogram("v_self", v)
W_vP_tiled = tf.tile(tf.expand_dims(W_vP, 0), [batch_size, 1, 1]) #batch_size x hidden_size x hidden_size
W_vP2_tiled = tf.tile(tf.expand_dims(W_vP2, 0), [batch_size, 1, 1]) #batch_size x hidden_size x hidden_size
v_tiled = tf.tile(tf.expand_dims(v, 0), [batch_size, 1, 1]) #batch_size x hidden_size x 1
weighted_inputs = tf.matmul(inputs, W_vP_tiled) #batch_size x seq_length x hidden_size
weighted_inputs2 = tf.matmul(inputs, W_vP2_tiled) #batch_size x seq_length x hidden_size
#weighted_inputs2_tiled = tf.tile(tf.expand_dims(weighted_inputs2, 1), [1, max_seq_length, 1, 1]) #batch_size x seq_length x seq_length x hidden_size
#tf.matmul(tf.tanh(tf.add(tf.expand_dims(weighted_inputs, 1), weighted_inputs2_tiled)), v_tiled) #batch_size x seq_length x
#create TensorArray of length seq_length, containing tensors of size batch_size x 2*hidden_size, to be populated by tf.while_loop
initial_ta = tf.TensorArray(tf.float32, size=max_seq_length)
def condition(time, input_ta):
#elements_finished = (time >= seq_lengths) #this operation produces boolean tensor of [batch_size] defining if corresponding sequence has ended
#finished = tf.reduce_all(elements_finished) #AND operation over all batches. True if all batches finished.
return tf.less(time, max_seq_length)
def body(time, input_ta):
time_index = tf.stack([tf.constant(0, dtype=tf.int32), time, tf.constant(0, dtype=tf.int32)], axis=0)
inputs_slice = tf.slice(inputs, time_index, [-1, 1, -1]) #batch_size x 1 x hidden_size
weighted_inputs_slice = tf.matmul(inputs_slice, W_vP2_tiled) #batch_size x 1 x hidden_size
#time_index = tf.stack([tf.constant(0, dtype=tf.int32), time, tf.constant(0, dtype=tf.int32)], axis=0)
#weighted_inputs2_slice = tf.slice(weighted_inputs2, time_index, [-1, 1, -1]) #batch_size x 1 x hidden_size
logits = tf.matmul(tf.tanh(tf.add(weighted_inputs, weighted_inputs_slice)), v_tiled) #batch_size x seq_length x hidden_size * batch_size x hidden_size x 1 = #batch_size x seq_length x 1
attention_over_passage = tf.nn.softmax(logits, dim=1) # batch_size x seq_length x 1
weighted_passage = tf.reduce_sum(tf.multiply(attention_over_passage, inputs), axis=1) #batch_size x hidden_size
weighted_passage_with_inputs = tf.concat([tf.squeeze(inputs_slice, axis=1), weighted_passage], axis=1)
gate = tf.sigmoid(tf.matmul(weighted_passage_with_inputs, W_g)) #batch_size x hidden_size
output_ta = input_ta.write(time, tf.multiply(gate, weighted_passage_with_inputs))
return time + 1, output_ta
time = tf.constant(0)
time, output_ta = tf.while_loop(condition, body, [time, initial_ta])
BiRNN_inputs_stacked = tf.reshape(output_ta.stack(), [batch_size, max_seq_length, 2*hidden_size])
def compute_lengths(inputs):
used = tf.sign(tf.reduce_max(tf.abs(inputs), reduction_indices=2))
lengths = tf.reduce_sum(used, reduction_indices=1)
lengths = tf.cast(lengths, tf.int32) #lengths must be integers
return lengths
seq_lengths = compute_lengths(inputs)
cells_fw = []
for _ in range(num_layers):
cell = cell_fn(2*hidden_size)
if training and (output_keep_prob < 1.0 or input_keep_prob < 1.0):
cell = rnn.DropoutWrapper(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob)
cells_fw.append(cell) #cells is num_layers of cell stacked together
cells_bw = []
for _ in range(num_layers):
cell = cell_fn(2*hidden_size)
if training and (output_keep_prob < 1.0 or input_keep_prob < 1.0):
cell = rnn.DropoutWrapper(cell, input_keep_prob=input_keep_prob, output_keep_prob=output_keep_prob)
cells_bw.append(cell)
initial_states_fw = [cells_fw[i].zero_state(batch_size, tf.float32) for i in range(num_layers)]
initial_states_bw = [cells_bw[i].zero_state(batch_size, tf.float32) for i in range(num_layers)]
outputs, output_states_fw, output_states_bw = rnn.stack_bidirectional_dynamic_rnn(cells_fw, cells_bw, BiRNN_inputs_stacked, initial_states_fw, initial_states_bw, dtype=tf.float32, sequence_length=seq_lengths, scope=scope)
self.outputs = outputs | mit | -9,153,670,270,058,485,000 | 57.258621 | 229 | 0.631789 | false |
google/nitroml | examples/config.py | 1 | 1213 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""NitroML config.
This file defines environments for nitroml.
"""
import os
USE_KUBEFLOW = True
PIPELINE_NAME = 'examples'
GCS_BUCKET_NAME = 'artifacts.nitroml-brain-xgcp.appspot.com'
PIPELINE_ROOT = os.path.join('gs://', GCS_BUCKET_NAME, PIPELINE_NAME)
TF_DOWNLOAD_DIR = os.path.join('gs://', GCS_BUCKET_NAME, 'tensorflow-datasets')
OTHER_DOWNLOAD_DIR = os.path.join('gs://', GCS_BUCKET_NAME, 'other-datasets')
ENDPOINT = '38070e0315a0e15-dot-us-east1.pipelines.googleusercontent.com'
TFX_IMAGE = 'tensorflow/tfx:0.23.0.dev20200716'
| apache-2.0 | 6,816,562,970,263,589,000 | 38.129032 | 79 | 0.700742 | false |
scopenco/hagent | lib/modules/ip/IpAddr.py | 1 | 3971 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
Set of fuctions/classes for ip administration
Author: Andrey Scopenco [email protected]
'''
import sys
sys.path.insert(0, '/usr/local/hagent/lib')
import logging
from cli import Output
from hagent_db import add_record, del_record, update_record, \
get_record_attr, get_account_resources
class IpAddr(object):
'''Base class for ip administration '''
def __init__(self, options, ip):
self.options = options
self.ip = ip
self.db = self.options.get('db_file')
self.output = {'status': 0}
self.service_attr = {}
if not self.ip:
self.output['status'] = 1
self.output['status_msg'] = 'argument <ip> not specified'
def create(self, account, shared):
'''Create ip and assign to user.'''
try:
if self.output['status']:
raise Output
# check if some ip exist
check_attr = get_record_attr(self.db, 'Ip', self.ip)
if not check_attr['status']:
self.output['status'] = 1
self.output['status_msg'] = 'Ip %s exist' % self.ip
raise Output
if shared == 'on':
self.service_attr['shared'] = shared
else:
if account:
self.service_attr['account'] = account
self.output.update(add_record(self.db, 'Ip',
self.ip, self.service_attr))
raise Output
except Output:
return self.output
def delete(self):
'''Delete virtual domain.'''
try:
if self.output['status']:
raise Output
#TODO
# add check if ip assigned to one of users
# if so show error
self.output.update(del_record(self.db, 'Ip', self.ip))
raise Output
except Output:
return self.output
def update(self, account, shared='off', free='off', restart=1):
'''Change account for ip'''
restart = str(restart)
if restart != str(0):
restart = 1
try:
if not account and shared == 'off' and free == 'off':
self.output['status'] = 1
self.output['status_msg'] = 'argument <account> not specified'
if self.output['status']:
raise Output
# check if ip exist
check_attr = get_record_attr(self.db, 'Ip', self.ip)
if check_attr['status']:
self.output.update(check_attr)
raise Output
self.service_attr.update(check_attr)
del(self.service_attr['status'])
if free == 'on':
# TODO
# remove ip from all domains
if 'account' in self.service_attr:
del(self.service_attr['account'])
if 'shared' in self.service_attr:
del(self.service_attr['shared'])
else:
if shared == 'on':
# TODO
# remove ip from account and assign to shared
self.service_attr['shared'] = shared
if 'account' in self.service_attr:
del(self.service_attr['account'])
else:
# TODO
# remove from shared and assign to account
# if shared add is only one, show error
self.service_attr['account'] = account
if 'shared' in self.service_attr:
del(self.service_attr['shared'])
self.output.update(update_record(
self.db, 'Ip', self.ip, self.service_attr, remove_attr=True))
raise Output
except Output:
return self.output
if __name__ == "__main__":
print __doc__
| gpl-2.0 | 5,762,475,317,647,996,000 | 30.267717 | 78 | 0.498363 | false |
KBNLresearch/iromlab | iromlab/kbapi/sru.py | 1 | 12625 | #! /usr/bin/env python
"""
Python API for KB SRU
"""
import sys
import urllib
import requests
from lxml import etree
SRU_BASEURL = 'http://jsru.kb.nl/sru/sru'
SRU_BASEURL += '?version=1.2&maximumRecords=%i'
SRU_BASEURL += '&operation=searchRetrieve'
SRU_BASEURL += '&startRecord=%i'
SRU_BASEURL += '&recordSchema=%s'
SRU_BASEURL += '&x-collection=%s&query=%s'
SETS = {'ANP': {'collection': 'ANP',
'description_en': 'Radio Bulletins ANP Press Agency',
'description_nl': 'ANP Radiobulletins Digitaal',
'metadataPrefix': 'didl',
'recordschema': 'dcx',
'setname': 'anp',
'time_period': [1937, 1989]},
'DPO': {'collection': 'DPO_boekdeel',
'description_en': 'Early Dutch Books Online',
'description_nl': 'Early Dutch Books Online',
'metadataPrefix': 'didl',
'recordschema': 'ddd',
'setname': 'DPO',
'time_period': [1781, 1800]},
'BYVANCK': {'description_en': 'Medieval Illuminated Manuscripts',
'description_nl': 'Middeleeuwse Verluchte Handschriften',
'metadataPrefix': 'dcx',
'setname': 'BYVANCK',
'time_period': [500, 1500]},
'SGD': {'description_en': 'States General Digital',
'description_nl': 'Staten-Generaal Digitaal',
'metadataPrefix': 'dcx',
'setname': 'sgd:register',
'time_period': [1962, 1994]},
'GGC': {'collection': 'GGC',
'description_en': 'General Catalogue KB',
'description_nl': 'Algemene Catalogus KB',
'metadataPrefix': 'dcx',
'recordschema': 'dcx',
'setname': 'ggc',
'time_period': [1937, 2021]}} # No idea what to use here?
# Name spaces in GGC records
srw_ns = 'http://www.loc.gov/zing/srw/'
tel_ns = 'http://krait.kb.nl/coop/tel/handbook/telterms.html'
xsi_ns = 'http://www.w3.org/2001/XMLSchema-instance'
dc_ns = 'http://purl.org/dc/elements/1.1/'
dcterms_ns = 'http://purl.org/dc/terms/'
dcx_ns = 'http://krait.kb.nl/coop/tel/handbook/telterms.html'
NSMAPGGC = {"srw": srw_ns,
"tel": tel_ns,
"xsi": xsi_ns,
"dc": dc_ns,
"dcterms": dcterms_ns,
"dcx": dcx_ns}
class response():
def __init__(self, record_data, sru):
self.record_data = record_data
self.sru = sru
def getElementText(self, tagName, attributeName, attributeValue):
# Returns text content of all elements for which tag matches tagName,
# and attribute value equals attributeValue. Set attributeName to empty
# string to get all tagName matches.
textFields = []
for r in self.record_data.iter():
if r.tag == tagName:
if attributeName != '':
try:
if r.attrib[attributeName] == attributeValue:
textFields.append(r.text)
except KeyError:
pass
else:
textFields.append(r.text)
return textFields
@property
def records(self):
if self.sru.nr_of_records == 0:
record_data = "<xml></xml>"
else:
ns = {'zs': 'http://www.loc.gov/zing/srw/'}
record_data = self.record_data.xpath("zs:records/zs:record",
namespaces=ns)[0]
return record(record_data, self.sru)
# Below property functions all return a list with all instances that satisfy
# criteria
@property
def typesDutch(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}type',
'{http://www.w3.org/XML/1998/namespace}lang',
'nl'))
@property
def typesDCMI(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}type',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'DCMIType'))
@property
def identifiersISBN(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:ISBN'))
@property
def identifiersBrinkman(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:Brinkman'))
@property
def identifiersURI(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:URI'))
@property
def identifiersOCLC(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}identifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'OCLC'))
@property
def languagesDutch(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/XML/1998/namespace}lang',
'nl'))
@property
def languagesEnglish(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/XML/1998/namespace}lang',
'en'))
@property
def languagesFrench(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/XML/1998/namespace}lang',
'fr'))
@property
def languagesISO639(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}language',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:ISO639-2'))
@property
def dates(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}date',
'',
''))
@property
def extents(self):
return(self.getElementText('{http://purl.org/dc/terms/}extent',
'',
''))
@property
def creators(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}creator',
'',
''))
@property
def contributors(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}contributor',
'',
''))
@property
def titles(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}title',
'',
''))
@property
def titlesMain(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}title',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:maintitle'))
@property
def titlesIntermediate(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}title',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:intermediatetitle'))
@property
def publishers(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}publisher',
'',
''))
@property
def countries(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}country',
'',
''))
@property
def subjectsBrinkman(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}subject',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:Brinkman'))
@property
def subjectsISO9707(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}subject',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'ISO_9707_[Brinkman]'))
@property
def subjectsUNESCO(self):
return(self.getElementText('{http://purl.org/dc/elements/1.1/}subject',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'UNESCO'))
@property
def collectionIdentifiers(self):
return(self.getElementText('{http://purl.org/dc/terms/}isPartOf',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcx:collectionIdentifier'))
@property
def recordIdentifiersURI(self):
return(self.getElementText('{http://krait.kb.nl/coop/tel/handbook/telterms.html}recordIdentifier',
'{http://www.w3.org/2001/XMLSchema-instance}type',
'dcterms:URI'))
@property
def annotations(self):
# Note that annotations sometimes contain language or itenID attibutes;
# ignored for now (collect everything).
return(self.getElementText('{http://krait.kb.nl/coop/tel/handbook/telterms.html}annotation',
'',
''))
class record():
def __init__(self, record_data, sru):
self.record_data = record_data
self.sru = sru
def __iter__(self):
return self
# This works under Python 2.7
def next(self):
if self.sru.nr_of_records == 0:
raise StopIteration
if self.sru.startrecord < self.sru.nr_of_records + 1:
record_data = self.sru.run_query()
self.sru.startrecord += 1
return response(record_data, self.sru)
else:
raise StopIteration
# This works under Python 3
def __next__(self):
if self.sru.nr_of_records == 0:
raise StopIteration
if self.sru.startrecord < self.sru.nr_of_records + 1:
record_data = self.sru.run_query()
self.sru.startrecord += 1
return response(record_data, self.sru)
else:
raise StopIteration
class sru():
DEBUG = False
collection = False
maximumrecords = 50
nr_of_records = 0
query = ""
recordschema = False
sru_collections = SETS
startrecord = 0
def search(self, query, collection=False,
startrecord=1, maximumrecords=1, recordschema=False):
self.maximumrecords = maximumrecords
if sys.version.startswith('3'):
self.query = urllib.parse.quote_plus(query)
elif sys.version.startswith('2'):
self.query = urllib.quote_plus(query)
self.startrecord = startrecord
if collection not in self.sru_collections:
raise Exception('Unknown collection')
self.collection = self.sru_collections[collection]['collection']
if not self.collection:
raise Exception('Error, no collection specified')
if not recordschema:
self.recordschema = self.sru_collections[collection]['recordschema']
else:
self.recordschema = recordschema
record_data = self.run_query()
nr_of_records = [i.text for i in record_data.iter() if
i.tag.endswith('numberOfRecords')][0]
self.nr_of_records = int(nr_of_records)
if self.nr_of_records > 0:
return response(record_data, self)
return False
def run_query(self):
url = SRU_BASEURL % (self.maximumrecords, self.startrecord,
self.recordschema, self.collection, self.query)
if self.DEBUG:
sys.stdout.write(url)
r = requests.get(url)
if not r.status_code == 200:
raise Exception('Error while getting data from %s' % url)
record_data = etree.fromstring(r.content)
return record_data
| apache-2.0 | -5,763,885,260,459,784,000 | 35.594203 | 106 | 0.515644 | false |
klen/fquest | base/fquest/celery.py | 1 | 2029 | from __future__ import absolute_import
from sqlalchemy.exc import IntegrityError, DataError
from datetime import datetime, timedelta
from celery import Celery
from celery.utils.log import get_task_logger
from flask import current_app as app
from ..app import create_app
logger = get_task_logger('fquest')
if not app:
app = create_app()
ctx = app.test_request_context()
ctx.push()
celery = Celery('fquest')
celery.config_from_object(dict(
BROKER_URL=app.config.get('BROKER_URL'),
CELERYBEAT_SCHEDULE={
'fquest-beat': {
'task': 'base.fquest.celery.beat',
'schedule': app.config.get('BEAT_SCHEDULE'),
},
}
))
@celery.task(ignore_result=True)
def beat():
" Fetch character progress. "
from .models import Character, db, Event
from ..ext import cache
last_synced = cache.get('fquest.last_synced')
logger.info('BEAT')
if last_synced:
characters = Character.query.filter(Character.facebook_synced <= last_synced - timedelta(minutes=10)).limit(10).all()
else:
characters = [Character.query.order_by(Character.facebook_synced.desc()).first()]
cache.set('fquest.last_synced', datetime.now(), timeout=300)
for character in characters:
try:
if Event.fire(character):
db.session.commit()
except (IntegrityError, DataError):
db.session.rollback()
@celery.task
def publish(token, level, ignore_result=True):
" Async action publush. "
from facepy import GraphAPI, FacepyError
graph = GraphAPI(token)
try:
logger.info(level, token)
graph.session.request('POST', '%s/me/fquest-klen:raised' % graph.url, data=dict(
access_token=token,
level="http://fquest.node42.org%s" % level
))
# graph.post('/me/fquest-klen:raised', data=dict(
# level="http://fquest.node42.org%s" % level
# ))
except FacepyError, e:
logger.error(str(e))
# pymode:lint_ignore=E061
| bsd-3-clause | -30,655,614,917,419,030 | 25.012821 | 125 | 0.637753 | false |
shubhamjain0594/OthelloReinforcementLearning | nn.py | 1 | 8833 |
import neurolab as nl
import game2
import othello
import ntuplesystematic as nts
import time
import random
class nn:
def __init__(self):
self.x = [[-1,1] for x in range(64)]
self.net = nl.net.newff(self.x,[1])
#self.net.trainf = nl.train.train_gd
self.moveb = 0
self.movew = 0
self.last_vb = 0
self.last_vw = 0
self.fin_v = []
self.fin_val = []
def play_move(self,game,epsilon = 0):
moves = game.generate_moves()
num = random.uniform(0,1)
if(num <= epsilon):
temp = game.copy()
if(game.player==-1):
if(self.moveb == 0):
move = random.choice(moves)
temp.play_move(move)
v=[]
for k in range(8):
for l in range(8):
v.append(temp.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
self.moveb = self.moveb+1
self.last_vb = v
return (v1[0][0], move)
else:
if(moves[0]==None):
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(game.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = [v1]
#print 0
#print self.last_vb
self.fin_v.append(self.last_vb)
self.fin_val.append(v1)
self.last_vb = v
return (0,None)
else:
move = random.choice(moves)
reward = 0
temp.play_move(move)
if(temp.terminal_test()):
if(temp.score()>0):
reward=-1
elif(temp.score()<0):
reward = 1
v=[]
for k in range(8):
for l in range(8):
v.append(temp.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = reward + v1
v1 = [v1]
#print 1
#print self.last_vb
self.fin_v.append(self.last_vb)
self.fin_val.append(v1)
self.last_vb = v
return (v1[0],move)
else:
if(self.movew == 0):
move = random.choice(moves)
temp.play_move(move)
v=[]
for k in range(8):
for l in range(8):
v.append(temp.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
self.movew = self.movew+1
self.last_vw = v
return (v1[0][0], move)
else:
if(moves[0]==None):
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(game.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = [v1]
#print 2
#print self.last_vw
self.fin_v.append(self.last_vw)
self.fin_val.append(v1)
self.last_vw = v
return (0,None)
else:
move = random.choice(moves)
reward = 0
temp.play_move(move)
if(temp.terminal_test()):
if(temp.score()>0):
reward=-1
elif(temp.score()<0):
reward = 1
v=[]
for k in range(8):
for l in range(8):
v.append(temp.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = reward + v1
v1 = [v1]
#print 3
#print self.last_vw
self.fin_v.append(self.last_vw)
self.fin_val.append(v1)
self.last_vw = v
return (v1[0],move)
else:
if(game.player == -1):
if(self.moveb==0):
j=0
max1 = 0
best_v = 0
best_move = None
for move in moves:
temp = game.copy()
temp.play_move(move)
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(temp.get_color([k,l]))
#print v
v2 = [v]
v1 = self.net.sim(v2)
if(j==0):
max1 = v1[0][0]
best_v = v
best_move = move
elif(v1[0][0]>max1):
max1 = v1[0][0]
best_move = move
best_v =v
j = j+1
self.moveb = self.moveb+1
self.last_vb = best_v
return (max1, best_move)
else:
if(moves[0]==None):
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(game.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = [v1]
#print 4
#print self.last_vb
self.fin_v.append(self.last_vb)
self.fin_val.append(v1)
self.last_vb = v
return (0,None)
else:
j=0
max1 = 0
best_v = 0
best_move = 0
for move in moves:
temp = game.copy()
temp.play_move(move)
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(temp.get_color([k,l]))
#print v
v2 = [v]
v1 = self.net.sim(v2)
if(j==0):
max1 = v1[0][0]
best_v = v
best_move = move
elif(v1[0][0]>max1):
max1 = v1[0][0]
best_move = move
best_v =v
j = j+1
temp = game.copy()
reward = 0
temp.play_move(best_move)
if(temp.terminal_test()):
if(temp.score()>0):
reward=-1
elif(temp.score()<0):
reward = 1
v2 = [best_v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = reward + v1
v1 = [v1]
#print 5
#print self.last_vw
self.fin_v.append(self.last_vb)
self.fin_val.append(v1)
self.last_vb = best_v
return (max1,best_move)
else:
if(self.movew==0):
j=0
max1 = 0
best_v = 0
best_move = 0
for move in moves:
temp = game.copy()
temp.play_move(move)
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(temp.get_color([k,l]))
#print v
v2 = [v]
v1 = self.net.sim(v2)
if(j==0):
max1 = v1[0][0]
best_v = v
best_move = move
elif(v1[0][0]<max1):
max1 = v1[0][0]
best_move = move
best_v =v
j = j+1
self.movew = self.movew+1
self.last_vw = best_v
return (max1,best_move)
else:
if(moves[0]==None):
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(game.get_color([k,l]))
v2 = [v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = [v1]
#print 6
#print self.last_vw
self.fin_v.append(self.last_vw)
self.fin_val.append(v1)
self.last_vw = v
return (0,None)
else:
j=0
max1 = 0
best_v = 0
best_move = 0
for move in moves:
temp = game.copy()
temp.play_move(move)
v = []
for k in range(8):
for l in range(8):
#print temp.get_color([k,l])
v.append(temp.get_color([k,l]))
#print v
v2 = [v]
v1 = self.net.sim(v2)
if(j==0):
max1 = v1[0][0]
best_v = v
best_move = move
elif(v1[0][0]<max1):
max1 = v1[0][0]
best_move = move
best_v =v
j = j+1
temp = game.copy()
reward = 0
temp.play_move(best_move)
if(temp.terminal_test()):
if(temp.score()>0):
reward=-1
elif(temp.score()<0):
reward = 1
v2 = [best_v]
v1 = self.net.sim(v2)
v1 = v1[0][0]
v1 = reward + v1
v1 = [v1]
#print 7
#print self.last_vw
self.fin_v.append(self.last_vw)
self.fin_val.append(v1)
self.last_vw = best_v
return (max1,best_move)
def reset(self):
#print self.fin_v
#print self.fin_val
error = self.net.train(self.fin_v,self.fin_val,epochs=5,show=1)
self.moveb = 0
self.movew = 0
self.last_vb = 0
self.last_vw = 0
self.fin_v = []
self.fin_val = []
def reset_without_train(self):
self.moveb = 0
self.movew = 0
self.last_vb = 0
self.last_vw = 0
self.fin_v = []
self.fin_val = []
if __name__ == "__main__":
"""
Creates a main player
"""
playernew = nn()
nTuplesSystematicObject = nts.nTuplesSystematic()
game2.play(othello.game(), game2.player(lambda x: playernew.play_move(x)),game2.player(lambda x: nTuplesSystematicObject.play_next_move(x)), True)
playernew.reset_without_train()
time.sleep(5)
k = 100
for i in range(k):
print(i)
game2.play(othello.game(), game2.player(lambda x: playernew.play_move(x,0.3)),game2.player(lambda x: playernew.play_move(x,0.3)), False)
playernew.reset()
wins = [0, 0]
for i in range(100):
winner = game2.play(othello.game(), game2.player_epsilon(lambda x: playernew.play_move(x)),game2.player_epsilon(lambda x: nTuplesSystematicObject.play_next_move(x)), False)
if winner == 1:
wins[0] += 1
elif winner == 2:
wins[1] += 1
winner = game2.play(othello.game(),game2.player_epsilon(lambda x: nTuplesSystematicObject.play_next_move(x)), game2.player_epsilon(lambda x: playernew.play_move(x)), False)
if winner == 2:
wins[0] += 1
elif winner == 1:
wins[1] += 1
print wins
f = open('results','a')
val = (k,0.001,'epsilon',wins)
val = str(val)
| gpl-2.0 | -5,274,029,948,232,391,000 | 22.744624 | 174 | 0.510246 | false |
tjcsl/ion | intranet/apps/templatetags/math.py | 1 | 1143 | import logging
from django import template
register = template.Library()
logger = logging.getLogger(__name__)
@register.filter
def round_num(number, precision=0):
"""Rounds a number to a given precision in decimal digits (default 0 digits) and returns the
integer value.
Precision may be negative. A precision of 1 will round to the tenths
place and a precision of -1 will round to the tens place.
Returns:
Float
"""
return round(number, precision)
@register.filter
def to_int(num):
"""Converts a number to an integer."""
return int(num)
@register.filter
def divide(dividend, divisor):
"""Returns the quotient of the arguments as a float."""
try:
return 1.0 * dividend / divisor
except ZeroDivisionError:
return 0.0
@register.filter
def multiply(num1, num2):
"""Returns the product of the arguments."""
return num1 * num2
@register.filter
def minimum(num1, num2):
"""Returns smaller of two numbers."""
return min(num1, num2)
@register.filter
def maximum(num1, num2):
"""Returns smaller of two numbers."""
return max(num1, num2)
| gpl-2.0 | 6,612,513,036,553,923,000 | 18.05 | 96 | 0.677165 | false |
cjdinsmore/slack-karma-bot | sqlite_helper.py | 1 | 2956 | import sqlite3
from models import DbMessage, DbUser, ReactionNames
class SqliteHelper(object):
"""
This class manages interfacing with the SQLite database. It stores DbUser and DbMessage
objects (see: models.py).
"""
def __init__(self, db_file):
self.connection = sqlite3.connect(db_file)
self.cursor = self.connection.cursor()
def add_users(self, users):
"""
Adds users to the database.
"""
query = 'INSERT INTO User VALUES (?, ?, ?, ?)'
users_as_rows = []
for user in users:
users_as_rows.append(user.to_row())
self._execute_many_query(query, users_as_rows)
return self.cursor.fetchall()
def get_votes_for_user(self, user_id):
"""
Fetches a sum of the user's upvotes, returning a tuple (upvotes, downvotes)
"""
query = 'SELECT sum(upvotes), sum(downvotes) FROM Message WHERE user_id=?'
args = (user_id,)
self._execute_query(query, args)
return self.cursor.fetchone()
def get_user_by_id(self, user_id):
"""
Self-explanatory.
"""
query = 'SELECT * FROM User WHERE slack_id=?'
args = (user_id,)
row = self._execute_query(query, args=args)
if row:
return DbUser(row[0])
def get_messages_for_user(self, user_id):
"""
Fetches all messages in the database for a given user.
Returns an array of DbMessage objects (models.py)
"""
messages = []
args = (user_id,)
query = "SELECT * FROM Message WHERE user_id=?"
self._execute_query(query, args)
rows = self.cursor.fetchall()
for row in rows:
messages.append(DbMessage(row))
return messages
def get_latest_message_timestamp(self):
"""
Gets the timestamp for the most recent message.
"""
query = 'SELECT timestamp FROM Message ORDER BY timestamp DESC'
self._execute_query(query)
return self.cursor.fetchone()[0]
def add_messages(self, messages):
"""
Adds messages to the database.
"""
query = 'INSERT INTO Message VALUES (NULL, ?, ?, ?, ?, ?)'
messages_as_rows = []
for db_message in messages:
messages_as_rows.append(db_message.to_row())
self._execute_many_query(query, messages_as_rows)
return self.cursor.fetchall()
def _execute_query(self, query, args=None):
"""
Protected method that executes a database query.
`args` represents arguments for the WHERE clause, like user_id and such.
"""
if args:
self.cursor.execute(query, args)
else:
self.cursor.execute(query)
def _execute_many_query(self, query, args):
with self.connection:
self.cursor.executemany(query, args)
| mit | -4,904,182,541,421,417,000 | 32.213483 | 95 | 0.569012 | false |
torch2424/LanguageProcessingLearning | pythonIsItMeme/memeML.py | 1 | 3017 | import nltk
from textblob.classifiers import NaiveBayesClassifier
#Utf 8 support
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
print "Welcom 2 meme guesser!"
print "Plz wait we is being loaded..."
#Our testing data features
memeFeatures = []
noMemeFeatures = []
#Read in our meme training data
memeTrainList = [];
with open('trainingDataMeme', 'r') as f:
for line in f:
line = line.strip().lower()
memeTrainList.append([line, 'meme'])
#Add the memes to the features
memeFeatures.extend(memeTrainList)
#Read in our non-meme training data
noMemeTrainList = [];
with open('trainingDataNoMeme', 'r') as f:
for line in f:
line = line.strip().lower()
noMemeTrainList.append([line, 'nomeme'])
#Add the nomemes to the features
noMemeFeatures.extend(noMemeTrainList)
#Train a classifier iteratively, and use the most accurate
classifierDivide = 23
maxSize = 0
if len(memeFeatures) > len(noMemeFeatures):
maxSize = len(noMemeFeatures)
else:
maxSize = len(memeFeatures)
#Make sure maxsize is greater than 10
if maxSize < classifierDivide:
print "plz add some moar memes, need moar than 10 ;)"
#Find our index for 10 iterations
featureIteration = maxSize / classifierDivide
#Create our features array, and classifiers
classifierFeatures = []
trainedClassifiers = []
for i in range(0, featureIteration):
#Extend the needed features
classifierFeatures.extend(memeFeatures[(i * featureIteration): ((i + 1) * featureIteration)])
classifierFeatures.extend(noMemeFeatures[(i * featureIteration): ((i + 1) * featureIteration)])
#Train a classifier to be added to our array
trainedClassifiers.append(NaiveBayesClassifier(classifierFeatures))
#Next find the most accurate classifier
#Find our accuracy
testingAccuracy = [];
with open('testingData', 'r') as f:
for line in f:
line = line.strip().lower()
testingAccuracy.append([line.split('%')[0], line.split('%')[1]])
#Set the classifier to the first classifier
bClassifier = trainedClassifiers[0]
for i in range(1, len(trainedClassifiers)):
if trainedClassifiers[i].accuracy(testingAccuracy) < bClassifier.accuracy(testingAccuracy):
bClassifier = trainedClassifiers[i]
print("More accurate index: " + str(i))
print("Meme Accuracy index " + str(i) + ": " + str(bClassifier.accuracy(testingAccuracy) * 100) + "%")
#Print the number of classifiers we tested
print("Tested Classifiers: " + str(len(trainedClassifiers)))
#Times 100 for percent, print our accuracy
print("Meme Accuracy: " + str(bClassifier.accuracy(testingAccuracy) * 100) + "%")
#Read in our testing
testingList = [];
with open('testingData', 'r') as f:
for line in f:
line = line.strip().lower()
testingList.append(line.split('%')[0])
#Print our testing data
for i in range(0, len(testingList)):
print testingList[i] + " - this haz " + bClassifier.classify(testingList[i])
#Show our most informative features
bClassifier.show_informative_features(15)
| apache-2.0 | -6,739,082,548,951,831,000 | 28.871287 | 106 | 0.71528 | false |
VertNet/api | Download/CountHandler.py | 1 | 3989 | # This file is part of VertNet: https://github.com/VertNet/webapp
#
# VertNet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# VertNet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with VertNet. If not, see: http://www.gnu.org/licenses
"""Download service.
Get parameters from request
Get record count from vnsearch.query_rec_counter
Send email to user with result
"""
import os
import json
import logging
from datetime import datetime
from google.appengine.api import search, taskqueue, mail
import webapp2
import Search.search as vnsearch
from config import OPTIMUM_CHUNK_SIZE
LAST_UPDATED = '2016-05-20T12:37:29+CEST'
IS_DEV = os.environ.get('SERVER_SOFTWARE', '').startswith('Development')
if IS_DEV:
QUEUE_NAME = 'default'
else:
QUEUE_NAME = 'apitracker'
class CountHandler(webapp2.RequestHandler):
def post(self):
# Get parameters from request
q = json.loads(self.request.get('q'))
latlon = self.request.get('latlon')
country = self.request.get('country')
user_agent = self.request.get('user_agent')
requesttime = self.request.get('requesttime')
reccount = int(self.request.get('reccount'))
fromapi = self.request.get('fromapi')
source = self.request.get('source')
cursor = self.request.get('cursor')
email = self.request.get('email')
if cursor:
curs = search.Cursor(web_safe_string=cursor)
else:
curs = ''
records, next_cursor = vnsearch.query_rec_counter(
q, OPTIMUM_CHUNK_SIZE, curs=curs
)
logging.info("Got %d records this round" % records)
# Update the total number of records retrieved
reccount = reccount+records
if next_cursor:
curs = next_cursor.web_safe_string
else:
curs = None
if curs:
countparams = dict(
q=self.request.get('q'), cursor=curs, reccount=reccount,
requesttime=requesttime, fromapi=fromapi, source=source,
latlon=latlon, email=email, country=country,
user_agent=user_agent)
logging.info('Record counter. Count: %s Email: %s Query: %s'
' Cursor: %s Version: %s' %
(reccount, email, q, next_cursor, fromapi))
# Keep counting
taskqueue.add(
url='/service/download/count',
params=countparams
)
else:
# Finished counting. Log the results and send email.
apitracker_params = dict(
latlon=latlon,
country=country,
user_agent=user_agent,
query=q,
type='count',
api_version=fromapi,
request_source=source,
count=reccount,
downloader=email
)
taskqueue.add(
url='/apitracker',
payload=json.dumps(apitracker_params),
queue_name=QUEUE_NAME
)
resulttime = datetime.utcnow().isoformat()
mail.send_mail(
sender="VertNet Counts <[email protected]>",
to=email,
subject="Your VertNet count is ready!",
body="""Your query found %s matching records.
Query: %s
Request submitted: %s
Request fulfilled: %s
""" % (reccount, q, requesttime, resulttime))
logging.info("Successfully sent mail to user")
| gpl-2.0 | -9,069,144,461,149,763,000 | 31.169355 | 72 | 0.601655 | false |
UMD-DRASTIC/drastic-web | webdav/resources.py | 1 | 6514 | from os import path as ospath
from datetime import datetime
from djangodav.fs.resources import BaseFSDavResource
from djangodav.utils import url_join
from drastic.models import Collection, Resource, DataObject
import logging
logging.warn('WEBDAV has been loaded')
CHUNK_SIZE = 1048576
def chunkstring(string, length):
return (string[0+i:length+i] for i in range(0, len(string), length))
class DrasticDavResource(BaseFSDavResource):
root = '/'
node = None
notfound = False
def me(self):
if self.node is not None or self.notfound:
return self.node
try:
self.node = Collection.find(self.get_abs_path())
except Exception:
logging.exception('Cannot fetch drastic collection for {}'.format(self.path))
if self.node is None:
try:
self.node = Resource.find(self.get_abs_path())
except Exception:
logging.exception("Cannot find drastic file resource for {}"
.format(self.path))
if self.node is None:
self.notfound = True
return self.node
def get_abs_path(self):
"""Return the absolute path of the resource. Used internally to interface with
an actual file system. If you override all other methods, this one will not
be used."""
return ospath.join(self.root, *self.path)
@property
def getcontentlength(self):
"""Return the size of the resource in bytes."""
if self.is_collection:
return 0
else:
return self.me().get_size()
def get_created(self):
"""Return the create time as datetime object."""
return self.me().get_create_ts()
def get_modified(self):
"""Return the modified time as datetime object."""
return self.me().get_modified_ts()
@property
def is_root(self):
if self.path is None or len(self.path) == 0:
return True
else:
return False
@property
def displayname(self):
if self.is_root:
return '/'
else:
return super(DrasticDavResource, self).displayname
@property
def is_collection(self):
"""Return True if this resource is a directory (collection in WebDAV parlance)."""
return isinstance(self.me(), Collection)
@property
def is_object(self):
"""Return True if this resource is a file (resource in WebDAV parlance)."""
return not self.is_collection
@property
def exists(self):
"""Return True if this resource exists."""
return self.me() is not None
@property
def getetag(self):
return self.me().uuid
def get_children(self):
"""Return an iterator of all direct children of this resource."""
if self.is_collection:
child_c, child_r = self.me().get_child()
child_c = [u"{}/".format(c) for c in child_c]
child_c.extend(child_r)
for child in child_c:
yield self.clone(url_join(*(self.path + [child])))
def read(self):
data = []
for chk in self.me().chunk_content():
data.append(chk)
return data
def write(self, request):
"""Write this data object from HTTP request."""
# Note that all permission checks happen in DAVView
# TODO Can be optimized with Cassandra LWT
# Check if the resource already exists
content = request.body
# md5sum = md5(content).hexdigest()
mimetype = "application/octet-stream"
logging.warn(str(dir(request)))
if hasattr(request, 'content_type'):
tmp = request.content_type.split("; ")
mimetype = tmp[0]
resource = Resource.find(self.get_abs_path())
if resource:
# NOTE For now WEBDAV updates are not supported.
# TODO WEBDAV updates were resulting in empty files. Compare with CDMIResource
raise NotImplementedError()
# Update value
# Delete old blobs
old_meta = resource.get_metadata()
old_acl = resource.get_acl()
create_ts = resource.get_create_ts()
resource.delete_blobs()
uuid = None
seq_num = 0
for chk in chunkstring(content, CHUNK_SIZE):
if uuid is None:
uuid = DataObject.create(chk,
metadata=old_meta,
acl=old_acl,
create_ts=create_ts).uuid
else:
DataObject.append_chunk(uuid, chk, seq_num, False)
seq_num += 1
url = "cassandra://{}".format(uuid)
resource.update(url=url,
mimetype=mimetype)
else: # Create resource
uuid = None
seq_num = 0
create_ts = datetime.now()
for chk in chunkstring(content, CHUNK_SIZE):
if uuid is None:
uuid = DataObject.create(chk, False,
create_ts=create_ts).uuid
else:
DataObject.append_chunk(uuid, chk, seq_num, False)
seq_num += 1
if uuid is None: # Content is null
uuid = self.create_empty_data_object()
url = "cassandra://{}".format(uuid)
resource = Resource.create(name=self.displayname,
container=self.get_parent_path()[:-1],
url=url,
mimetype=mimetype,
size=len(content))
def delete(self):
"""Delete the resource, recursive is implied."""
self.me().delete()
def create_collection(self):
"""Create a directory in the location of this resource."""
# TODO needs checks from CDMIView
container = None
if self.get_parent_path() == '' or self.get_parent_path() == '/':
container = '/'
else:
container = self.get_parent_path()[:-1]
Collection.create(name=self.displayname, container=container)
def copy_object(self, destination, depth=0):
raise NotImplementedError
def move_object(self, destination):
raise NotImplementedError
| agpl-3.0 | -4,060,378,795,736,630,300 | 32.927083 | 90 | 0.548204 | false |
chrisRubiano/django_reportes | config/settings/local.py | 1 | 2241 | # -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='-og<m&!U(b$2D.+^D-9LvG{,-Bdk%F[pE@Q>@26QB9}0EeTuj`')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
INSTALLED_APPS += ['debug_toolbar', ]
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + '1']
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions', ]
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| mit | 205,409,767,345,248,540 | 29.283784 | 99 | 0.481481 | false |
DatapuntAmsterdam/datapunt_geosearch | web/geosearch/create_import_index.py | 1 | 6572 | # -*- coding: utf-8 -*-
"""
Geosearch master index
Create a master index that should be usable for all geosearches
With this master index we could do one search and find all items
within a specific radius or that contains some specific point
Current counts for different datasets:
kadastraal_object 582551
pand 183659
beperking 45202
meetbout 12904
monument 9420
bouwblok 8597
openbareruimte 6116
ligplaats 2914
peilmerk 874
bominslag 865
verdachtgebied 489
buurt 481
standplaats 321
grondexploitatie 183
uitgevoerdonderzoek 136
buurtcombinatie 99
biz 49
grootstedelijkgebied 36
tellus 28
gebiedsgerichtwerken 22
gevrijwaardgebied 19
stadsdeel 8
unesco 2
"""
import json
import re
from collections import Counter
from datapunt_geosearch.config import DSN_BAG, DSN_MONUMENTEN, DSN_VARIOUS_SMALL_DATASETS, DSN_MILIEU, DSN_NAP, \
DSN_GRONDEXPLOITATIE
from datapunt_geosearch.datasource import BagDataSource, dbconnection
from datapunt_geosearch.datasource import BominslagMilieuDataSource
from datapunt_geosearch.datasource import MunitieMilieuDataSource
from datapunt_geosearch.datasource import NapMeetboutenDataSource
# from datapunt_geosearch.datasource import TellusDataSource
from datapunt_geosearch.datasource import MonumentenDataSource
from datapunt_geosearch.datasource import GrondExploitatieDataSource
from datapunt_geosearch.datasource import get_dataset_class, get_all_dataset_names
import psycopg2.extras
sources = {
"bag": {
'ds': BagDataSource,
'config': DSN_BAG
},
"monumenten": {
'ds': MonumentenDataSource,
'config': DSN_MONUMENTEN
},
"bominslagmilieu": {
'ds': BominslagMilieuDataSource,
'config': DSN_MILIEU
},
"munitiemilieu": {
'ds': MunitieMilieuDataSource,
'config': DSN_MILIEU
},
"nap": {
'ds': NapMeetboutenDataSource,
'config': DSN_NAP
},
"grondexploitatie": {
'ds': GrondExploitatieDataSource,
'config': DSN_GRONDEXPLOITATIE
},
# "tellus": {
# 'ds': TellusDataSource,
# 'config': DSN_TELLUS
# },
}
# Mapping van item naar authorisatie scope
required_scopes = {
'grondexploitatie': 'GREX/R'
}
# URI can be generated
# type can be generated
master_index_table_name = 'geo_master'
def create_index_table(conn):
with conn.transaction_cursor() as cur:
# If something went wrong the previous time
cur.execute(f'''DROP TABLE IF EXISTS {master_index_table_name}_new''')
cur.execute(f'''
CREATE TABLE {master_index_table_name}_new (
dataset char varying(36) NOT NULL,
id char varying(36) NOT NULL,
display char varying(128),
wkb_geometry Geometry(Geometry,28992),
data JSONB,
PRIMARY KEY(dataset, id)
)
''')
cur.execute(f'''
CREATE INDEX ON {master_index_table_name}_new USING gist (wkb_geometry)
''')
def rename_index_table(conn):
with conn.transaction_cursor() as cur:
cur.execute(f'''
ALTER TABLE IF EXISTS {master_index_table_name} rename to {master_index_table_name}_old
''')
cur.execute(f'''
ALTER TABLE {master_index_table_name}_new rename to {master_index_table_name}
''')
cur.execute(f'''
DROP TABLE IF EXISTS {master_index_table_name}_old
''')
def get_index_data(sources, conn):
count = Counter()
with conn.transaction_cursor() as write_cursor:
for key, value in sources.items():
print(f"Process {key}")
ds = value['ds'](value['config'])
operator = ds.meta['operator']
geofield = ds.meta['geofield']
if 'fields' in ds.meta:
fields = ','.join(ds.meta['fields'])
for field in ds.meta['fields']:
m = re.match(f'''{geofield} as (.*)$''', field)
if m:
geofield = m.group(1)
else:
fields = '*'
with ds.dbconn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as read_cur:
for dataset_name, datasets in ds.meta['datasets'].items():
for dataset_ident, table in datasets.items():
print(f"Processing dataset: {dataset_ident}")
if dataset_ident in required_scopes:
scope = "'" + required_scopes[dataset_ident] + "'"
else:
scope = 'NULL'
query = """SELECT {} FROM {}""".format(
fields,
table)
read_cur.execute(query)
for record in read_cur:
count[dataset_ident] += 1
if count[dataset_ident] > 0 and (count[dataset_ident] % 1000) == 0:
print(f"Processing item {count[dataset_ident]} in {dataset_ident}")
uri = record.pop('uri') # uri can be recreated
if 'id' not in record:
m = re.search(r'/([0-9\-a-fA-F]+)/$', uri)
id1 = m.group(1)
else:
id1 = record.pop('id')
id1 = str(id1)
display = record.pop('display')
wkb_geometry = record.pop(geofield)
record.pop('type', None) # type can be recreated on the fly
json_data = None if len(record) == 0 else json.dumps(record, default=str)
write_cursor.execute(f'''
INSERT INTO {master_index_table_name}_new(dataset, id, display, wkb_geometry, data)
VALUES(%s, %s, %s, %s, %s)
''', (dataset_ident, id1, display, wkb_geometry, json_data))
for key, item in sorted(count.items(), key=lambda x: x[1], reverse=True):
print(key, item)
if __name__ == '__main__':
# First add all data for all generic datasets to the sources
dataset_names = get_all_dataset_names(dsn=DSN_VARIOUS_SMALL_DATASETS)
for dataset_name in dataset_names:
ds_class = get_dataset_class(dataset_name)
sources[dataset_name] = {
'ds': ds_class,
'config': DSN_VARIOUS_SMALL_DATASETS
}
conn = dbconnection(DSN_VARIOUS_SMALL_DATASETS)
create_index_table(conn)
get_index_data(sources, conn)
rename_index_table(conn)
| mpl-2.0 | 3,499,150,070,914,391,600 | 32.530612 | 113 | 0.583993 | false |
roboime/pyroboime | roboime/core/skills/orientto.py | 1 | 2436 | #
# Copyright (C) 2013-2015 RoboIME
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
from numpy import pi, sign, array
from numpy.linalg import norm
#from ...utils.mathutils import sqrt
from ...utils.pidcontroller import PidController
from .. import Skill
class OrientTo(Skill):
"""
This skill will orient the robot arount a given point to look to another given point,
"""
angle_tolerance = 0.5
distance_tolerance = 0.11
walkspeed = 0.1
def __init__(self, robot, lookpoint=None, minpower=0.0, maxpower=1.0, **kwargs):
"""
"""
super(OrientTo, self).__init__(robot, deterministic=True, **kwargs)
self.lookpoint = lookpoint
self.minpower = minpower
self.maxpower = maxpower
self.angle_controller = PidController(kp=1.8, ki=0, kd=0, integ_max=687.55, output_max=360)
self.distance_controller = PidController(kp=1.8, ki=0, kd=0, integ_max=687.55, output_max=360)
@property
def final_target(self):
return self.lookpoint
def good_position(self):
good_distance = self.robot.kicker.distance(self.ball) <= self.distance_tolerance
good_angle = abs(self.delta_angle()) < self.angle_tolerance
return good_distance and good_angle
def delta_angle(self):
delta = self.robot.angle - self.ball.angle_to_point(self.lookpoint)
return (180 + delta) % 360 - 180
def _step(self):
delta_angle = self.delta_angle()
self.angle_controller.input = delta_angle
self.angle_controller.feedback = 0.0
self.angle_controller.step()
#d = self.robot.front_cut + self.ball.radius
d = norm(array(self.robot) - array(self.ball))
r = self.robot.radius
w = self.angle_controller.output
max_w = 180.0 * self.robot.max_speed / r / pi
if abs(w) > max_w:
w = sign(w) * max_w
v = pi * w * d / 180.0
self.robot.action.speeds = (0.0, v, -w)
| agpl-3.0 | 2,438,896,582,819,734,000 | 33.309859 | 102 | 0.65353 | false |
matthagy/Jamenson | jamenson/transform/globals.py | 1 | 4950 | '''Transform operation on globally scoped symbols to
operations on symbol_cell mapping.
'''
from __future__ import absolute_import
from __future__ import with_statement
from ..runtime.symbol import get_symbol_cells_map, gensym
from ..compiler import ir as I
from ..compiler import bind
from ..compiler.walk import IRWalker, propigate_location
from ..compiler.translate import state as translation_state
class GlobalSymbolTransformer(IRWalker):
def __init__(self, symbol_map_sym, top_scope):
IRWalker.__init__(self)
self.symbol_map_sym = symbol_map_sym
self.current_scope = top_scope
@staticmethod
def is_global(binding):
return bind.get_binding_use_type(binding) == bind.BND_GLOBAL
@staticmethod
def replace(old, new, skips=[]):
propigate_location(old, new, skips)
I.replace_child(old, new)
def visit_function(self, func):
for child in func.defaults:
self.visit(child)
old_scope = self.current_scope
self.current_scope = func.scope
self.visit(func.body)
self.current_scope = old_scope
def make_read_map(self):
return I.make_read_binding(self.current_scope.use_symbol(self.symbol_map_sym))
def visit_read_binding(self, rb):
if not self.is_global(rb.binding):
return
self.replace(rb, I.make_getitem(self.make_read_map(),
I.make_constant(rb.binding.symbol)))
def make_set(self, binding, value_ir):
return I.make_setitem(self.make_read_map(),
I.make_constant(binding.symbol),
value_ir)
def visit_write_binding(self, wb):
value = wb.value
if self.is_global(wb.binding):
del value.continuation
self.replace(wb, self.make_set(wb.binding, value),
skips=[value])
self.visit(value)
def visit_delete_binding(self, db):
if not self.is_global(db.binding):
return
self.replace(db, I.make_delitem(self.make_read_map(),
I.make_constant(db.binding.symbol)))
def visit_foriter(self, fi):
itr = fi.iter
if self.is_global(fi.binding):
old_binding = fi.binding
del fi.binding
sym = gensym('foriter-tmp')
self.current_scope.register_local(sym)
del itr.continuation
self.replace(fi, I.make_progn([
I.make_foriter(tag=fi.tag,
binding=self.current_scope.use_symbol(sym),
iter=itr),
self.make_set(old_binding, I.make_read_binding(self.current_scope.use_symbol(sym)))
]),
skips=[itr])
del fi.tag
self.visit(itr)
def visit_unpack_seq(self, us):
new_bindings = []
copies = []
for binding in us.places:
if not self.is_global(binding):
new_bindings.append(binding)
else:
gs = gensym('unpack-tmp')
new_bindings.append(self.current_scope.register_and_use_local(gs))
copies.append([gs, binding])
seq = us.seq
if copies:
del seq.continuation
del us.places
self.replace(us, I.make_progn([
I.make_unpack_seq(seq, new_bindings)
] + [self.make_set(binding, I.make_read_binding(self.current_scope.use_symbol(gs)))
for gs,binding in copies]),
skips=[seq])
self.visit(seq)
def transform_global_symbol_use(top):
assert isinstance(top, I.toplevel)
top_scope = top.scope
assert not top_scope.parent
symbol_map_sym = gensym('symbol-cells-map')
symbol_map_binding = top_scope.register_local(symbol_map_sym)
GlobalSymbolTransformer(symbol_map_sym, top_scope).visit(top.expression)
if not len(symbol_map_binding.uses):
top_scope.unregister_binding(symbol_map_binding)
return top
expression = top.expression
del expression.continuation
when = None
if isinstance(expression, I.evalwhen):
when = expression.when
expression = expression.expression
del expression.continuation
new_ir = I.make_progn([I.make_write_binding(
top_scope.use_symbol(symbol_map_sym),
I.make_call(callee=I.make_constant(get_symbol_cells_map),
args=[], kwd_names=[], kwd_values=[],
star_args=None, star_kwds=None)),
expression])
if when is not None:
new_ir = I.make_evalwhen(when=when, expression=new_ir)
new_top = I.make_toplevel(new_ir, top_scope)
propigate_location(top, new_top, [expression])
return new_top
| apache-2.0 | -3,855,520,096,124,097,500 | 36.218045 | 99 | 0.573131 | false |
ari-zah/gaiasky | assets/scripts/tests/camera-path-sync.py | 1 | 1065 | # This script tests the synchronous camera file playing.
# Created by Toni Sagrista
import time, os
from py4j.java_gateway import JavaGateway, GatewayParameters
gateway = JavaGateway(gateway_parameters=GatewayParameters(auto_convert=True))
gs = gateway.entry_point
# Prints to both Gaia Sky and Python logs
def printall(string):
# print to gaia sky log
gs.print(string)
# print to python log
print(string)
gs.disableInput()
gs.cameraStop()
gs.minimizeInterfaceWindow()
fname = os.path.abspath("./camera-path-test.gsc")
printall("(1/2) Starting synchronous camera file execution: %s" % fname)
t0 = time.time()
gs.runCameraPath(fname, True)
t1 = time.time()
printall("Sync exec: script regained control after %.4f seconds" % (t1 - t0))
printall("(2/2) Starting asynchronous camera file execution: %s" % fname)
t0 = time.time()
gs.runCameraPath(fname)
t1 = time.time()
printall("Async exec: script regained control after %.4f seconds" % (t1 - t0))
gs.maximizeInterfaceWindow()
gs.enableInput()
printall("Script finishes")
gateway.close()
| lgpl-3.0 | -7,236,454,278,470,441,000 | 23.204545 | 78 | 0.738028 | false |
rob-nn/motus | gait_loader.py | 1 | 2787 | from numpy import *
class DataLoader(object):
def __init__(self, file_name):
self._data = None
self._file_name = file_name
self._load_data()
self._data_descs =[]
self._generate_data_descs()
def _load_data(self):
f = open(self._file_name)
data = f.readlines()
f.close()
j = 0
data_list = []
for i in range(len(data)):
line = data[j]
if len(line) <=1 or line[0] == '#':
data.pop(j)
j = j -1
else:
words = line.split()
temp = []
for word in words:
temp.append(float(word))
data_list.append(temp)
j = j + 1
self._data = array(data_list)
def _generate_data_descs(self):
self._data_descs.append(self._generate_data_desc(0, 'Left angular velocities'))
self._data_descs.append(self._generate_data_desc(1, 'Right angular velocities'))
self._data_descs.append(self._generate_data_desc(2, 'Left angles'))
self._data_descs.append(self._generate_data_desc(3, 'Right angles'))
self._data_descs.append(self._generate_data_desc(4, 'Left angular accelarations'))
self._data_descs.append(self._generate_data_desc(5, 'Right angular accelerations'))
self._data_descs.append(self._generate_data_desc(6, 'Left x velocities'))
self._data_descs.append(self._generate_data_desc(7, 'Left y velocities'))
self._data_descs.append(self._generate_data_desc(8, 'Left z velocities'))
self._data_descs.append(self._generate_data_desc(9, 'Right x velocities'))
self._data_descs.append(self._generate_data_desc(10, 'Right y velocities'))
self._data_descs.append(self._generate_data_desc(11, 'Right z velocities'))
def _generate_data_desc(self, index, desc):
column = self.data[:, index]
return DataDesc(index, desc, column.min(), column.max())
@property
def data(self):
return self._data
@property
def data_descs(self):
return self._data_descs
def normalize(self, index):
return array((self.data[:, index] - self.data_descs[index].min_val) / \
(self.data_descs[index].max_val - self.data_descs[index].min_val))
def normalize_all(self):
new_data = array([])
for i in range(self.data.shape[1]):
new_data = concatenate((new_data, self.normalize(i)))
return reshape(new_data, self.data.shape)
class DataDesc(object):
def __init__(self, index, desc, min_val, max_val):
self._index = index
self._min_val = min_val
self._max_val = max_val
self._desc = desc
@property
def index(self):
return self._index
@property
def min_val(self):
return self._min_val
@property
def max_val(self):
return self._max_val
@property
def desc(self):
return self._desc
def loadWalk(value):
return DataLoader('./dynamics_data/dynamics_walk' + str(value) + '.mat')
| gpl-2.0 | -6,392,789,390,617,272,000 | 30.314607 | 88 | 0.642268 | false |
mariocesar/django-tricks | django_tricks/models/abstract.py | 1 | 2638 | from uuid import uuid4
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from .mixins import MPAwareModel
treebeard = True
try:
from treebeard.mp_tree import MP_Node
except ImportError:
treebeard = False
class UniqueTokenModel(models.Model):
token = models.CharField(max_length=32, unique=True, blank=True)
class Meta:
abstract = True
def get_token(self):
return str(uuid4().hex)
def save(self, **kwargs):
if not self.token:
self.token = self.get_token()
super().save(**kwargs)
if treebeard:
class MaterializedPathNode(MPAwareModel, MP_Node):
slug = models.SlugField(max_length=255, db_index=True, unique=False, blank=True)
node_order_by = ['name']
node_order_by = ['numval', 'strval']
class Meta:
abstract = True
class MutableModelManager(models.QuerySet):
def by_type(self, model_class):
return self.filter(specific_type=ContentType.objects.get_for_model(model_class))
class MutableModel(models.Model):
"""A Model that if inherited from will store the specific class reference in self."""
specific_type = models.ForeignKey(
ContentType,
verbose_name=_('specific type'),
related_name='+',
editable=False,
on_delete=models.PROTECT)
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.pk and not self.specific_type_id:
# this model is being newly created rather than retrieved from the db;
# set content type to correctly represent the model class that this was
# created as
self.specific_type = ContentType.objects.get_for_model(self)
@cached_property
def specific(self):
"""Return this page in its most specific subclassed form."""
specific_type = ContentType.objects.get_for_id(self.specific_type_id)
model_class = specific_type.model_class()
if model_class is None:
return self
elif isinstance(self, model_class):
return self
else:
return specific_type.get_object_for_this_type(id=self.id)
@cached_property
def specific_class(self):
"""Return the class that this page would be if instantiated in its most specific form."""
specific_type = ContentType.objects.get_for_id(self.specific_type_id)
return specific_type.model_class()
| isc | 4,206,543,305,950,345,000 | 29.321839 | 97 | 0.655042 | false |
jamielennox/requests-mock | requests_mock/adapter.py | 1 | 10629 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import weakref
from requests.adapters import BaseAdapter
from requests.utils import requote_uri
import six
from six.moves.urllib import parse as urlparse
from requests_mock import exceptions
from requests_mock.request import _RequestObjectProxy
from requests_mock.response import _MatcherResponse
import logging
logger = logging.getLogger(__name__)
try:
import purl
purl_types = (purl.URL,)
except ImportError:
purl = None
purl_types = ()
ANY = object()
class _RequestHistoryTracker(object):
def __init__(self):
self.request_history = []
def _add_to_history(self, request):
self.request_history.append(request)
@property
def last_request(self):
"""Retrieve the latest request sent"""
try:
return self.request_history[-1]
except IndexError:
return None
@property
def called(self):
return self.call_count > 0
@property
def called_once(self):
return self.call_count == 1
@property
def call_count(self):
return len(self.request_history)
def reset(self):
self.request_history = []
class _RunRealHTTP(Exception):
"""A fake exception to jump out of mocking and allow a real request.
This exception is caught at the mocker level and allows it to execute this
request through the real requests mechanism rather than the mocker.
It should never be exposed to a user.
"""
class _Matcher(_RequestHistoryTracker):
"""Contains all the information about a provided URL to match."""
def __init__(self, method, url, responses, complete_qs, request_headers,
additional_matcher, real_http, case_sensitive):
"""
:param bool complete_qs: Match the entire query string. By default URLs
match if all the provided matcher query arguments are matched and
extra query arguments are ignored. Set complete_qs to true to
require that the entire query string needs to match.
"""
super(_Matcher, self).__init__()
self._method = method
self._url = url
self._responses = responses
self._complete_qs = complete_qs
self._request_headers = request_headers
self._real_http = real_http
self._additional_matcher = additional_matcher
# url can be a regex object or ANY so don't always run urlparse
if isinstance(url, six.string_types):
url_parts = urlparse.urlparse(url)
self._scheme = url_parts.scheme.lower()
self._netloc = url_parts.netloc.lower()
self._path = requote_uri(url_parts.path or '/')
self._query = url_parts.query
if not case_sensitive:
self._path = self._path.lower()
self._query = self._query.lower()
elif isinstance(url, purl_types):
self._scheme = url.scheme()
self._netloc = url.netloc()
self._path = url.path()
self._query = url.query()
if not case_sensitive:
self._path = self._path.lower()
self._query = self._query.lower()
else:
self._scheme = None
self._netloc = None
self._path = None
self._query = None
def _match_method(self, request):
if self._method is ANY:
return True
if request.method.lower() == self._method.lower():
return True
return False
def _match_url(self, request):
if self._url is ANY:
return True
# regular expression matching
if hasattr(self._url, 'search'):
return self._url.search(request.url) is not None
# scheme is always matched case insensitive
if self._scheme and request.scheme.lower() != self._scheme:
return False
# netloc is always matched case insensitive
if self._netloc and request.netloc.lower() != self._netloc:
return False
if (request.path or '/') != self._path:
return False
# construct our own qs structure as we remove items from it below
request_qs = urlparse.parse_qs(request.query, keep_blank_values=True)
matcher_qs = urlparse.parse_qs(self._query, keep_blank_values=True)
for k, vals in six.iteritems(matcher_qs):
for v in vals:
try:
request_qs.get(k, []).remove(v)
except ValueError:
return False
if self._complete_qs:
for v in six.itervalues(request_qs):
if v:
return False
return True
def _match_headers(self, request):
for k, vals in six.iteritems(self._request_headers):
try:
header = request.headers[k]
except KeyError:
# NOTE(jamielennox): This seems to be a requests 1.2/2
# difference, in 2 they are just whatever the user inputted in
# 1 they are bytes. Let's optionally handle both and look at
# removing this when we depend on requests 2.
if not isinstance(k, six.text_type):
return False
try:
header = request.headers[k.encode('utf-8')]
except KeyError:
return False
if header != vals:
return False
return True
def _match_additional(self, request):
if callable(self._additional_matcher):
return self._additional_matcher(request)
if self._additional_matcher is not None:
raise TypeError("Unexpected format of additional matcher.")
return True
def _match(self, request):
return (self._match_method(request) and
self._match_url(request) and
self._match_headers(request) and
self._match_additional(request))
def __call__(self, request):
if not self._match(request):
return None
# doing this before _add_to_history means real requests are not stored
# in the request history. I'm not sure what is better here.
if self._real_http:
raise _RunRealHTTP()
if len(self._responses) > 1:
response_matcher = self._responses.pop(0)
else:
response_matcher = self._responses[0]
self._add_to_history(request)
return response_matcher.get_response(request)
class Adapter(BaseAdapter, _RequestHistoryTracker):
"""A fake adapter than can return predefined responses.
"""
def __init__(self, case_sensitive=False):
super(Adapter, self).__init__()
self._case_sensitive = case_sensitive
self._matchers = []
def send(self, request, **kwargs):
request = _RequestObjectProxy(request,
case_sensitive=self._case_sensitive,
**kwargs)
self._add_to_history(request)
for matcher in reversed(self._matchers):
try:
resp = matcher(request)
except Exception:
request._matcher = weakref.ref(matcher)
raise
if resp is not None:
request._matcher = weakref.ref(matcher)
resp.connection = self
logger.debug('{} {} {}'.format(request._request.method,
request._request.url,
resp.status_code))
return resp
raise exceptions.NoMockAddress(request)
def close(self):
pass
def register_uri(self, method, url, response_list=None, **kwargs):
"""Register a new URI match and fake response.
:param str method: The HTTP method to match.
:param str url: The URL to match.
"""
complete_qs = kwargs.pop('complete_qs', False)
additional_matcher = kwargs.pop('additional_matcher', None)
request_headers = kwargs.pop('request_headers', {})
real_http = kwargs.pop('_real_http', False)
if response_list and kwargs:
raise RuntimeError('You should specify either a list of '
'responses OR response kwargs. Not both.')
elif real_http and (response_list or kwargs):
raise RuntimeError('You should specify either response data '
'OR real_http. Not both.')
elif not response_list:
response_list = [] if real_http else [kwargs]
# NOTE(jamielennox): case_sensitive is not present as a kwarg because i
# think there would be an edge case where the adapter and register_uri
# had different values.
# Ideally case_sensitive would be a value passed to match() however
# this would change the contract of matchers so we pass ito to the
# proxy and the matcher separately.
responses = [_MatcherResponse(**k) for k in response_list]
matcher = _Matcher(method,
url,
responses,
case_sensitive=self._case_sensitive,
complete_qs=complete_qs,
additional_matcher=additional_matcher,
request_headers=request_headers,
real_http=real_http)
self.add_matcher(matcher)
return matcher
def add_matcher(self, matcher):
"""Register a custom matcher.
A matcher is a callable that takes a `requests.Request` and returns a
`requests.Response` if it matches or None if not.
:param callable matcher: The matcher to execute.
"""
self._matchers.append(matcher)
def reset(self):
super(Adapter, self).reset()
for matcher in self._matchers:
matcher.reset()
__all__ = ['Adapter']
| apache-2.0 | -4,611,116,238,172,673,500 | 32.215625 | 79 | 0.581428 | false |
netinept/plog | plog/storages/settings_s3boto.py | 1 | 1207 | # S3Boto storage settings for photologue example project.
import os
DEFAULT_FILE_STORAGE = 'plog.storages.s3utils.MediaS3BotoStorage'
STATICFILES_STORAGE = 'plog.storages.s3utils.StaticS3BotoStorage'
try:
# If you want to test the example_project with S3, you'll have to configure the
# environment variables as specified below.
# (Secret keys are stored in environment variables for security - you don't want to
# accidentally commit and push them to a public repository).
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
except KeyError:
raise KeyError('Need to define AWS environment variables: ' +
'AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and AWS_STORAGE_BUCKET_NAME')
# Default Django Storage API behavior - don't overwrite files with same name
AWS_S3_FILE_OVERWRITE = False
MEDIA_ROOT = '/media/'
MEDIA_URL = 'http://%s.s3.amazonaws.com/media/' % AWS_STORAGE_BUCKET_NAME
STATIC_ROOT = '/static/'
STATIC_URL = 'http://%s.s3.amazonaws.com/static/' % AWS_STORAGE_BUCKET_NAME
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/' | apache-2.0 | -7,196,048,099,957,750,000 | 40.655172 | 91 | 0.727423 | false |
Bhare8972/LOFAR-LIM | LIM_scripts/utilities.py | 1 | 11289 | #!/usr/bin/env python3
##ON APP MACHINE
import sys
from os import listdir, mkdir
from os.path import isdir, dirname, abspath
import os
import subprocess
import weakref
from scipy import fftpack
import numpy as np
## some global variables, this needs to be fixed at some point
default_raw_data_loc = None#"/exp_app2/appexp1/public/raw_data"
default_processed_data_loc = None#"/home/brian/processed_files"
MetaData_directory = dirname(abspath(__file__)) + '/data' ## change this if antenna_response_model is in a folder different from this module
#### constants
C = 299792458.0
RTD = 180.0/3.1415926 ##radians to degrees
n_air = 1.000293
v_air = C/n_air
latlonCS002 = np.array([52.91512249, 6.869837540]) ## lattitude and longitude of CS002 in degrees
#### log data to screen and to a file
class logger(object):
class std_writer(object):
def __init__(self, logger):
self.logger_ref = weakref.ref(logger)
def write(self, msg):
logger=self.logger_ref()
logger.out_file.write(msg)
if logger.to_screen:
logger.old_stdout.write(msg)
def flush(self):
logger=self.logger_ref()
logger.out_file.flush()
def __init__(self):
self.has_stderr = False
self.has_stdout = False
self.old_stderr = sys.stderr
self.old_stdout = sys.stdout
self.set("out_log")
def set(self, fname, to_screen=True):
self.out_file = open(fname, 'w')
self.set_to_screen( to_screen )
def __call__(self, *args):
for a in args:
if self.to_screen:
self.old_stdout.write(str(a))
self.old_stdout.write(" ")
self.out_file.write(str(a))
self.out_file.write(" ")
self.out_file.write("\n")
if self.to_screen:
self.old_stdout.write("\n")
self.out_file.flush()
self.old_stdout.flush()
def set_to_screen(self, to_screen=True):
self.to_screen = to_screen
def take_stdout(self):
if not self.has_stdout:
sys.stdout = self.std_writer(self)
self.has_stdout = True
def take_stderr(self):
if not self.has_stderr:
sys.stderr = self.std_writer(self)
self.has_stderr = True
def restore_stdout(self):
if self.has_stdout:
sys.stdout = self.old_stdout
self.has_stdout = False
def restore_stderr(self):
if self.has_stderr:
sys.stderr = self.old_stderr
self.has_stderr = False
def flush(self):
self.out_file.flush()
# def __del__(self):
# self.restore_stderr()
# self.restore_stdout()
#log = logger()
def iterate_pairs(list_one, list_two, list_one_avoid=[], list_two_avoid=[]):
"""returns an iterator that loops over all pairs of the two lists"""
for item_one in list_one:
if item_one in list_one_avoid:
continue
for item_two in list_two:
if item_two in list_two_avoid:
continue
yield (item_one, item_two)
import re
natural_regex_pattern = re.compile('([0-9]+)')
def natural_sort( l ):
""" Sort the given iterable in the way that humans expect. Usefull for sorting station names."""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in natural_regex_pattern.split(key) ]
return sorted(l, key = alphanum_key)
#### some file utils
def Fname_data(Fpath):
""" takes both pulse data file names and h5 file names and returns UTC_time, station_name, Fpath"""
Fname = Fpath.split('/')[-1]
data = Fname.split('_')
timeID = data[1]
station_name = data[2]
if len(data[3][1:])==0:
file_number = 0
else:
file_number = int(data[3][1:])
return timeID, station_name, Fpath, file_number
##note that timeID is a string representing the datetime of a LOFAR trigger. such as: D20130619T094846.507Z
## the timeID is used to uniquely identify triggers
def get_timeID(fname):
data=fname.split("_")
return data[1]
def year_from_timeID(timeID):
return timeID[1:5]
def raw_data_dir(timeID, data_loc=None):
"""gives path to the raw data folder for a particular timeID, given location of data structure. Defaults to default_raw_data_loc"""
if data_loc is None:
data_loc = default_raw_data_loc
if default_raw_data_loc is None:
print("ERROR: 'default_raw_data_loc' in utilities is not set.")
quit()
path = data_loc + '/' + year_from_timeID(timeID)+"/"+timeID
return path
def processed_data_dir(timeID, data_loc=None):
"""gives path to the analysis folders for a particular timeID, given location of data structure. Defaults to default_processed_data_loc
makes the directory if it doesn't exist"""
if data_loc is None:
data_loc = default_processed_data_loc
if default_processed_data_loc is None:
print("ERROR: 'default_processed_data_loc' in utilities is not set.")
quit()
path=data_loc + "/" + year_from_timeID(timeID)+"/"+timeID
if not isdir(path):
mkdir(path)
return path
## a python list where the keys are the number of a station and the values are the station name
SId_to_Sname = [None]*209 #just to pre-initilize list, so syntax below is possible
SId_to_Sname[1] = "CS001"
SId_to_Sname[2] = "CS002"
SId_to_Sname[3] = "CS003"
SId_to_Sname[4] = "CS004"
SId_to_Sname[5] = "CS005"
SId_to_Sname[6] = "CS006"
SId_to_Sname[7] = "CS007"
#SId_to_Sname[8] = "CS008"
#SId_to_Sname[9] = "CS009"
#SId_to_Sname[10] = "CS010"
SId_to_Sname[11] = "CS011"
#SId_to_Sname[12] = "CS012"
SId_to_Sname[13] = "CS013"
#SId_to_Sname[14] = "CS014"
#SId_to_Sname[15] = "CS015"
#SId_to_Sname[16] = "CS016"
SId_to_Sname[17] = "CS017"
#SId_to_Sname[18] = "CS018"
#SId_to_Sname[19] = "CS019"
#SId_to_Sname[20] = "CS020"
SId_to_Sname[21] = "CS021"
#SId_to_Sname[22] = "CS022"
#SId_to_Sname[23] = "CS023"
SId_to_Sname[24] = "CS024"
#SId_to_Sname[25] = "CS025"
SId_to_Sname[26] = "CS026"
#SId_to_Sname[27] = "CS027"
SId_to_Sname[28] = "CS028"
#SId_to_Sname[29] = "CS029"
SId_to_Sname[30] = "CS030"
SId_to_Sname[31] = "CS031"
SId_to_Sname[32] = "CS032"
SId_to_Sname[101] = "CS101"
#SId_to_Sname[102] = "CS102"
SId_to_Sname[103] = "CS103"
SId_to_Sname[121] = "CS201"
SId_to_Sname[141] = "CS301"
SId_to_Sname[142] = "CS302"
SId_to_Sname[161] = "CS401"
SId_to_Sname[181] = "CS501"
#SId_to_Sname[104] = "RS104"
#SId_to_Sname[105] = "RS105"
SId_to_Sname[106] = "RS106"
#SId_to_Sname[107] = "RS107"
#SId_to_Sname[108] = "RS108"
#SId_to_Sname[109] = "RS109"
#SId_to_Sname[122] = "RS202"
#SId_to_Sname[123] = "RS203"
#SId_to_Sname[124] = "RS204"
SId_to_Sname[125] = "RS205"
#SId_to_Sname[126] = "RS206"
#SId_to_Sname[127] = "RS207"
SId_to_Sname[128] = "RS208"
#SId_to_Sname[129] = "RS209"
SId_to_Sname[130] = "RS210"
#SId_to_Sname[143] = "RS303"
#SId_to_Sname[144] = "RS304"
SId_to_Sname[145] = "RS305"
SId_to_Sname[146] = "RS306"
SId_to_Sname[147] = "RS307"
#SId_to_Sname[148] = "RS308"
#SId_to_Sname[149] = "RS309"
SId_to_Sname[150] = "RS310"
SId_to_Sname[166] = "RS406"
SId_to_Sname[167] = "RS407"
SId_to_Sname[169] = "RS409"
SId_to_Sname[183] = "RS503"
SId_to_Sname[188] = "RS508"
SId_to_Sname[189] = "RS509"
SId_to_Sname[201] = "DE601"
SId_to_Sname[202] = "DE602"
SId_to_Sname[203] = "DE603"
SId_to_Sname[204] = "DE604"
SId_to_Sname[205] = "DE605"
SId_to_Sname[206] = "FR606"
SId_to_Sname[207] = "SE607"
SId_to_Sname[208] = "UK608"
## this just "inverts" the previous list, discarding unused values
Sname_to_SId_dict = {name:ID for ID,name in enumerate(SId_to_Sname) if name is not None}
def even_antName_to_odd(even_ant_name):
even_num = int(even_ant_name)
odd_num = even_num + 1
return str( odd_num ).zfill( 9 )
def antName_is_even(ant_name):
return not int(ant_name)%2
def odd_antName_to_even(odd_ant_name):
odd_num = int(odd_ant_name)
even_num = odd_num + 1
return str( even_num ).zfill( 9 )
#### plotting utilities ####
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
### some math functions? ###
def normalize_angle_radians( angle_radians ):
"""For an angle in radians, return the equivalent angle that is garunteed be between -pi and pi"""
while angle_radians > np.pi:
angle_radians -= 2.0*np.pi
while angle_radians < -np.pi:
angle_radians += 2.0*np.pi
return angle_radians
def BoundingBox_collision(BB1, BB2):
""" return true if two N-D bounding boxes collide, False otherwise"""
for B1, B2 in zip(BB1,BB2):
if (B1[1] < B2[0]) or (B2[1] < B1[0]):
return False
return True
### some build tools ####
def GSL_include():
"""return directory for location of GSL headers, useful when combining GSL and cython"""
try:
gsl_include = subprocess.check_output('gsl-config --cflags', shell=True).decode('utf-8')[2:-1]
except subprocess.CalledProcessError:
gsl_include = os.getenv('LIB_GSL')
if gsl_include is None:
# Environmental variable LIB_GSL not set, use hardcoded path.
gsl_include = r"c:\Program Files\GnuWin32\include"
else:
gsl_include += "/include"
assert gsl_include != '', "Couldn't find gsl. Make sure it's installed and in the path."
return gsl_include
def GSL_library_dir():
"""return directory for location of GSL binaries, useful when combining GSL and cython"""
try:
lib_gsl_dir = subprocess.check_output('gsl-config --libs', shell=True).decode('utf-8').split()[0][2:]
except subprocess.CalledProcessError:
lib_gsl_dir = os.getenv('LIB_GSL')
if lib_gsl_dir is None:
# Environmental variable LIB_GSL not set, use hardcoded path.
lib_gsl_dir = r"c:\Program Files\GnuWin32\lib"
else:
lib_gsl_dir += "/lib"
return lib_gsl_dir
| mit | -6,915,917,853,648,077,000 | 29.349462 | 141 | 0.60776 | false |
fintech-circle/edx-platform | openedx/core/djangoapps/auth_exchange/forms.py | 1 | 4033 | """
Forms to support third-party to first-party OAuth 2.0 access token exchange
"""
import provider.constants
from django.contrib.auth.models import User
from django.forms import CharField
from edx_oauth2_provider.constants import SCOPE_NAMES
from oauth2_provider.models import Application
from provider.forms import OAuthForm, OAuthValidationError
from provider.oauth2.forms import ScopeChoiceField, ScopeMixin
from provider.oauth2.models import Client
from requests import HTTPError
from social.backends import oauth as social_oauth
from social.exceptions import AuthException
from third_party_auth import pipeline
class AccessTokenExchangeForm(ScopeMixin, OAuthForm):
"""Form for access token exchange endpoint"""
access_token = CharField(required=False)
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
client_id = CharField(required=False)
def __init__(self, request, oauth2_adapter, *args, **kwargs):
super(AccessTokenExchangeForm, self).__init__(*args, **kwargs)
self.request = request
self.oauth2_adapter = oauth2_adapter
def _require_oauth_field(self, field_name):
"""
Raise an appropriate OAuthValidationError error if the field is missing
"""
field_val = self.cleaned_data.get(field_name)
if not field_val:
raise OAuthValidationError(
{
"error": "invalid_request",
"error_description": "{} is required".format(field_name),
}
)
return field_val
def clean_access_token(self):
"""
Validates and returns the "access_token" field.
"""
return self._require_oauth_field("access_token")
def clean_client_id(self):
"""
Validates and returns the "client_id" field.
"""
return self._require_oauth_field("client_id")
def clean(self):
if self._errors:
return {}
backend = self.request.backend
if not isinstance(backend, social_oauth.BaseOAuth2):
raise OAuthValidationError(
{
"error": "invalid_request",
"error_description": "{} is not a supported provider".format(backend.name),
}
)
self.request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
client_id = self.cleaned_data["client_id"]
try:
client = self.oauth2_adapter.get_client(client_id=client_id)
except (Client.DoesNotExist, Application.DoesNotExist):
raise OAuthValidationError(
{
"error": "invalid_client",
"error_description": "{} is not a valid client_id".format(client_id),
}
)
if client.client_type not in [provider.constants.PUBLIC, Application.CLIENT_PUBLIC]:
raise OAuthValidationError(
{
# invalid_client isn't really the right code, but this mirrors
# https://github.com/edx/django-oauth2-provider/blob/edx/provider/oauth2/forms.py#L331
"error": "invalid_client",
"error_description": "{} is not a public client".format(client_id),
}
)
self.cleaned_data["client"] = client
user = None
try:
user = backend.do_auth(self.cleaned_data.get("access_token"), allow_inactive_user=True)
except (HTTPError, AuthException):
pass
if user and isinstance(user, User):
self.cleaned_data["user"] = user
else:
# Ensure user does not re-enter the pipeline
self.request.social_strategy.clean_partial_pipeline()
raise OAuthValidationError(
{
"error": "invalid_grant",
"error_description": "access_token is not valid",
}
)
return self.cleaned_data
| agpl-3.0 | -3,508,841,061,231,892,500 | 36 | 106 | 0.598562 | false |
nicolaselie/pykuli | keyboard/mac.py | 1 | 5816 | #Copyright 2013 Paul Barton
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
from Quartz import *
from AppKit import NSEvent
from .base import KeyboardMeta, KeyboardEventMeta
# Taken from events.h
# /System/Library/Frameworks/Carbon.framework/Versions/A/Frameworks/HIToolbox.framework/Versions/A/Headers/Events.h
character_translate_table = {
'a': 0x00,
's': 0x01,
'd': 0x02,
'f': 0x03,
'h': 0x04,
'g': 0x05,
'z': 0x06,
'x': 0x07,
'c': 0x08,
'v': 0x09,
'b': 0x0b,
'q': 0x0c,
'w': 0x0d,
'e': 0x0e,
'r': 0x0f,
'y': 0x10,
't': 0x11,
'1': 0x12,
'2': 0x13,
'3': 0x14,
'4': 0x15,
'6': 0x16,
'5': 0x17,
'=': 0x18,
'9': 0x19,
'7': 0x1a,
'-': 0x1b,
'8': 0x1c,
'0': 0x1d,
']': 0x1e,
'o': 0x1f,
'u': 0x20,
'[': 0x21,
'i': 0x22,
'p': 0x23,
'l': 0x25,
'j': 0x26,
'\'': 0x27,
'k': 0x28,
';': 0x29,
'\\': 0x2a,
',': 0x2b,
'/': 0x2c,
'n': 0x2d,
'm': 0x2e,
'.': 0x2f,
'`': 0x32,
' ': 0x31,
'\r': 0x24,
'\t': 0x30,
'shift': 0x38
}
# Taken from ev_keymap.h
# http://www.opensource.apple.com/source/IOHIDFamily/IOHIDFamily-86.1/IOHIDSystem/IOKit/hidsystem/ev_keymap.h
special_key_translate_table = {
'KEYTYPE_SOUND_UP': 0,
'KEYTYPE_SOUND_DOWN': 1,
'KEYTYPE_BRIGHTNESS_UP': 2,
'KEYTYPE_BRIGHTNESS_DOWN': 3,
'KEYTYPE_CAPS_LOCK': 4,
'KEYTYPE_HELP': 5,
'POWER_KEY': 6,
'KEYTYPE_MUTE': 7,
'UP_ARROW_KEY': 8,
'DOWN_ARROW_KEY': 9,
'KEYTYPE_NUM_LOCK': 10,
'KEYTYPE_CONTRAST_UP': 11,
'KEYTYPE_CONTRAST_DOWN': 12,
'KEYTYPE_LAUNCH_PANEL': 13,
'KEYTYPE_EJECT': 14,
'KEYTYPE_VIDMIRROR': 15,
'KEYTYPE_PLAY': 16,
'KEYTYPE_NEXT': 17,
'KEYTYPE_PREVIOUS': 18,
'KEYTYPE_FAST': 19,
'KEYTYPE_REWIND': 20,
'KEYTYPE_ILLUMINATION_UP': 21,
'KEYTYPE_ILLUMINATION_DOWN': 22,
'KEYTYPE_ILLUMINATION_TOGGLE': 23
}
class Keyboard(KeyboardMeta):
def press_key(self, key):
if key in special_key_translate_table:
self._press_special_key(key, True)
else:
self._press_normal_key(key, True)
def release_key(self, key):
if key in special_key_translate_table:
self._press_special_key(key, False)
else:
self._press_normal_key(key, False)
def special_key_assignment(self):
self.volume_mute_key = 'KEYTYPE_MUTE'
self.volume_down_key = 'KEYTYPE_SOUND_DOWN'
self.volume_up_key = 'KEYTYPE_SOUND_UP'
self.media_play_pause_key = 'KEYTYPE_PLAY'
# Doesn't work :(
# self.media_next_track_key = 'KEYTYPE_NEXT'
# self.media_prev_track_key = 'KEYTYPE_PREVIOUS'
def _press_normal_key(self, key, down):
try:
if self.is_char_shifted(key):
key_code = character_translate_table[key.lower()]
event = CGEventCreateKeyboardEvent(None,
character_translate_table['shift'], down)
CGEventPost(kCGHIDEventTap, event)
# Tiny sleep to let OS X catch up on us pressing shift
time.sleep(.01)
else:
key_code = character_translate_table[key]
event = CGEventCreateKeyboardEvent(None, key_code, down)
CGEventPost(kCGHIDEventTap, event)
except KeyError:
raise RuntimeError("Key {} not implemented.".format(key))
def _press_special_key(self, key, down):
""" Helper method for special keys.
Source: http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac
"""
key_code = special_key_translate_table[key]
ev = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
NSSystemDefined, # type
(0,0), # location
0xa00 if down else 0xb00, # flags
0, # timestamp
0, # window
0, # ctx
8, # subtype
(key_code << 16) | ((0xa if down else 0xb) << 8), # data1
-1 # data2
)
CGEventPost(0, ev.CGEvent())
class KeyboardEvent(KeyboardEventMeta):
def run(self):
tap = CGEventTapCreate(
kCGSessionEventTap,
kCGHeadInsertEventTap,
kCGEventTapOptionDefault,
CGEventMaskBit(kCGEventKeyDown) |
CGEventMaskBit(kCGEventKeyUp),
self.handler,
None)
loopsource = CFMachPortCreateRunLoopSource(None, tap, 0)
loop = CFRunLoopGetCurrent()
CFRunLoopAddSource(loop, loopsource, kCFRunLoopDefaultMode)
CGEventTapEnable(tap, True)
while self.state:
CFRunLoopRunInMode(kCFRunLoopDefaultMode, 5, False)
def handler(self, proxy, type, event, refcon):
key = CGEventGetIntegerValueField(event, kCGKeyboardEventKeycode)
if type == kCGEventKeyDown:
self.key_press(key)
elif type == kCGEventKeyUp:
self.key_release(key)
if self.capture:
CGEventSetType(event, kCGEventNull)
return event
| gpl-3.0 | -165,510,983,904,530,620 | 27.935323 | 115 | 0.585798 | false |
rgayon/plaso | plaso/cli/status_view.py | 1 | 18822 | # -*- coding: utf-8 -*-
"""The status view."""
from __future__ import unicode_literals
import ctypes
import sys
import time
try:
import win32api
import win32console
except ImportError:
win32console = None
from dfvfs.lib import definitions as dfvfs_definitions
import plaso
from plaso.cli import tools
from plaso.cli import views
class StatusView(object):
"""Processing status view."""
MODE_LINEAR = 'linear'
MODE_WINDOW = 'window'
_SOURCE_TYPES = {
dfvfs_definitions.SOURCE_TYPE_DIRECTORY: 'directory',
dfvfs_definitions.SOURCE_TYPE_FILE: 'single file',
dfvfs_definitions.SOURCE_TYPE_STORAGE_MEDIA_DEVICE: (
'storage media device'),
dfvfs_definitions.SOURCE_TYPE_STORAGE_MEDIA_IMAGE: (
'storage media image')}
_UNITS_1024 = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'EiB', 'ZiB', 'YiB']
_WINAPI_STD_OUTPUT_HANDLE = -11
_WINAPI_ENABLE_PROCESSED_INPUT = 1
_WINAPI_ENABLE_LINE_INPUT = 2
_WINAPI_ENABLE_ECHO_INPUT = 4
_WINAPI_ANSI_CONSOLE_MODE = (
_WINAPI_ENABLE_PROCESSED_INPUT | _WINAPI_ENABLE_LINE_INPUT |
_WINAPI_ENABLE_ECHO_INPUT)
def __init__(self, output_writer, tool_name):
"""Initializes a status view.
Args:
output_writer (OutputWriter): output writer.
tool_name (str): namd of the tool.
"""
super(StatusView, self).__init__()
self._artifact_filters = None
self._filter_file = None
self._have_ansi_support = not win32console
self._mode = self.MODE_WINDOW
self._output_writer = output_writer
self._source_path = None
self._source_type = None
self._stdout_output_writer = isinstance(
output_writer, tools.StdoutOutputWriter)
self._storage_file_path = None
self._tool_name = tool_name
if win32console:
kernel32 = ctypes.windll.kernel32
stdout_handle = kernel32.GetStdHandle(self._WINAPI_STD_OUTPUT_HANDLE)
result = kernel32.SetConsoleMode(
stdout_handle, self._WINAPI_ANSI_CONSOLE_MODE)
self._have_ansi_support = result != 0
def _AddsAnalysisProcessStatusTableRow(self, process_status, table_view):
"""Adds an analysis process status table row.
Args:
process_status (ProcessStatus): processing status.
table_view (CLITabularTableView): table view.
"""
used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)
events = ''
if (process_status.number_of_consumed_events is not None and
process_status.number_of_consumed_events_delta is not None):
events = '{0:d} ({1:d})'.format(
process_status.number_of_consumed_events,
process_status.number_of_consumed_events_delta)
event_tags = ''
if (process_status.number_of_produced_event_tags is not None and
process_status.number_of_produced_event_tags_delta is not None):
event_tags = '{0:d} ({1:d})'.format(
process_status.number_of_produced_event_tags,
process_status.number_of_produced_event_tags_delta)
reports = ''
if (process_status.number_of_produced_reports is not None and
process_status.number_of_produced_reports_delta is not None):
reports = '{0:d} ({1:d})'.format(
process_status.number_of_produced_reports,
process_status.number_of_produced_reports_delta)
table_view.AddRow([
process_status.identifier, process_status.pid, process_status.status,
used_memory, events, event_tags, reports])
def _AddExtractionProcessStatusTableRow(self, process_status, table_view):
"""Adds an extraction process status table row.
Args:
process_status (ProcessStatus): processing status.
table_view (CLITabularTableView): table view.
"""
used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory)
sources = ''
if (process_status.number_of_produced_sources is not None and
process_status.number_of_produced_sources_delta is not None):
sources = '{0:d} ({1:d})'.format(
process_status.number_of_produced_sources,
process_status.number_of_produced_sources_delta)
events = ''
if (process_status.number_of_produced_events is not None and
process_status.number_of_produced_events_delta is not None):
events = '{0:d} ({1:d})'.format(
process_status.number_of_produced_events,
process_status.number_of_produced_events_delta)
# TODO: shorten display name to fit in 80 chars and show the filename.
table_view.AddRow([
process_status.identifier, process_status.pid, process_status.status,
used_memory, sources, events, process_status.display_name])
def _ClearScreen(self):
"""Clears the terminal/console screen."""
if self._have_ansi_support:
# ANSI escape sequence to clear screen.
self._output_writer.Write('\033[2J')
# ANSI escape sequence to move cursor to top left.
self._output_writer.Write('\033[H')
elif win32console:
# This version of Windows cmd.exe does not support ANSI escape codes, thus
# instead we fill the console screen buffer with spaces. The downside of
# this approach is an annoying flicker.
top_left_coordinate = win32console.PyCOORDType(0, 0)
screen_buffer = win32console.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
screen_buffer_information = screen_buffer.GetConsoleScreenBufferInfo()
screen_buffer_attributes = screen_buffer_information['Attributes']
screen_buffer_size = screen_buffer_information['Size']
console_size = screen_buffer_size.X * screen_buffer_size.Y
screen_buffer.FillConsoleOutputCharacter(
' ', console_size, top_left_coordinate)
screen_buffer.FillConsoleOutputAttribute(
screen_buffer_attributes, console_size, top_left_coordinate)
screen_buffer.SetConsoleCursorPosition(top_left_coordinate)
# TODO: remove update flicker. For win32console we could set the cursor
# top left, write the table, clean the remainder of the screen buffer
# and set the cursor at the end of the table.
def _FormatSizeInUnitsOf1024(self, size):
"""Represents a number of bytes in units of 1024.
Args:
size (int): size in bytes.
Returns:
str: human readable string of the size.
"""
magnitude_1024 = 0
used_memory_1024 = float(size)
while used_memory_1024 >= 1024:
used_memory_1024 /= 1024
magnitude_1024 += 1
if 0 < magnitude_1024 <= 7:
return '{0:.1f} {1:s}'.format(
used_memory_1024, self._UNITS_1024[magnitude_1024])
return '{0:d} B'.format(size)
def _FormatProcessingTime(self, processing_status):
"""Formats the processing time.
Args:
processing_status (ProcessingStatus): processing status.
Returns:
str: processing time formatted as: "5 days, 12:34:56".
"""
processing_time = 0
if processing_status:
processing_time = time.time() - processing_status.start_time
processing_time, seconds = divmod(int(processing_time), 60)
processing_time, minutes = divmod(processing_time, 60)
days, hours = divmod(processing_time, 24)
if days == 0:
days_string = ''
elif days == 1:
days_string = '1 day, '
else:
days_string = '{0:d} days, '.format(days)
return '{0:s}{1:02d}:{2:02d}:{3:02d}'.format(
days_string, hours, minutes, seconds)
def _PrintAnalysisStatusHeader(self, processing_status):
"""Prints the analysis status header.
Args:
processing_status (ProcessingStatus): processing status.
"""
self._output_writer.Write(
'Storage file\t\t: {0:s}\n'.format(self._storage_file_path))
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time))
if processing_status and processing_status.events_status:
self._PrintEventsStatus(processing_status.events_status)
self._output_writer.Write('\n')
def _PrintAnalysisStatusUpdateLinear(self, processing_status):
"""Prints an analysis status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time: {0:s}\n'.format(processing_time))
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events consumed: {3:d}\n').format(
processing_status.foreman_status.identifier,
processing_status.foreman_status.pid,
processing_status.foreman_status.status,
processing_status.foreman_status.number_of_consumed_events)
self._output_writer.Write(status_line)
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events consumed: {3:d}\n').format(
worker_status.identifier, worker_status.pid, worker_status.status,
worker_status.number_of_consumed_events)
self._output_writer.Write(status_line)
self._output_writer.Write('\n')
def _PrintAnalysisStatusUpdateWindow(self, processing_status):
"""Prints an analysis status update in window mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(
self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self._PrintAnalysisStatusHeader(processing_status)
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'PID', 'Status', 'Memory', 'Events', 'Tags',
'Reports'], column_sizes=[23, 7, 15, 15, 15, 15, 0])
self._AddsAnalysisProcessStatusTableRow(
processing_status.foreman_status, table_view)
for worker_status in processing_status.workers_status:
self._AddsAnalysisProcessStatusTableRow(worker_status, table_view)
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
if processing_status.aborted:
self._output_writer.Write(
'Processing aborted - waiting for clean up.\n\n')
if self._stdout_output_writer:
# We need to explicitly flush stdout to prevent partial status updates.
sys.stdout.flush()
def _PrintExtractionStatusUpdateLinear(self, processing_status):
"""Prints an extraction status update in linear mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time: {0:s}\n'.format(processing_time))
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events produced: {3:d}, file: '
'{4:s}\n').format(
processing_status.foreman_status.identifier,
processing_status.foreman_status.pid,
processing_status.foreman_status.status,
processing_status.foreman_status.number_of_produced_events,
processing_status.foreman_status.display_name)
self._output_writer.Write(status_line)
for worker_status in processing_status.workers_status:
status_line = (
'{0:s} (PID: {1:d}) status: {2:s}, events produced: {3:d}, file: '
'{4:s}\n').format(
worker_status.identifier, worker_status.pid, worker_status.status,
worker_status.number_of_produced_events,
worker_status.display_name)
self._output_writer.Write(status_line)
self._output_writer.Write('\n')
def _PrintExtractionStatusUpdateWindow(self, processing_status):
"""Prints an extraction status update in window mode.
Args:
processing_status (ProcessingStatus): processing status.
"""
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(
self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self.PrintExtractionStatusHeader(processing_status)
table_view = views.CLITabularTableView(column_names=[
'Identifier', 'PID', 'Status', 'Memory', 'Sources', 'Events',
'File'], column_sizes=[15, 7, 15, 15, 15, 15, 0])
self._AddExtractionProcessStatusTableRow(
processing_status.foreman_status, table_view)
for worker_status in processing_status.workers_status:
self._AddExtractionProcessStatusTableRow(worker_status, table_view)
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
if processing_status.aborted:
self._output_writer.Write(
'Processing aborted - waiting for clean up.\n\n')
# TODO: remove update flicker. For win32console we could set the cursor
# top left, write the table, clean the remainder of the screen buffer
# and set the cursor at the end of the table.
if self._stdout_output_writer:
# We need to explicitly flush stdout to prevent partial status updates.
sys.stdout.flush()
def _PrintEventsStatus(self, events_status):
"""Prints the status of the events.
Args:
events_status (EventsStatus): events status.
"""
if events_status:
table_view = views.CLITabularTableView(
column_names=['Events:', 'Filtered', 'In time slice', 'Duplicates',
'MACB grouped', 'Total'],
column_sizes=[15, 15, 15, 15, 15, 0])
table_view.AddRow([
'', events_status.number_of_filtered_events,
events_status.number_of_events_from_time_slice,
events_status.number_of_duplicate_events,
events_status.number_of_macb_grouped_events,
events_status.total_number_of_events])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
def _PrintTasksStatus(self, processing_status):
"""Prints the status of the tasks.
Args:
processing_status (ProcessingStatus): processing status.
"""
if processing_status and processing_status.tasks_status:
tasks_status = processing_status.tasks_status
table_view = views.CLITabularTableView(
column_names=['Tasks:', 'Queued', 'Processing', 'Merging',
'Abandoned', 'Total'],
column_sizes=[15, 7, 15, 15, 15, 0])
table_view.AddRow([
'', tasks_status.number_of_queued_tasks,
tasks_status.number_of_tasks_processing,
tasks_status.number_of_tasks_pending_merge,
tasks_status.number_of_abandoned_tasks,
tasks_status.total_number_of_tasks])
self._output_writer.Write('\n')
table_view.Write(self._output_writer)
def GetAnalysisStatusUpdateCallback(self):
"""Retrieves the analysis status update callback function.
Returns:
function: status update callback function or None if not available.
"""
if self._mode == self.MODE_LINEAR:
return self._PrintAnalysisStatusUpdateLinear
if self._mode == self.MODE_WINDOW:
return self._PrintAnalysisStatusUpdateWindow
return None
def GetExtractionStatusUpdateCallback(self):
"""Retrieves the extraction status update callback function.
Returns:
function: status update callback function or None if not available.
"""
if self._mode == self.MODE_LINEAR:
return self._PrintExtractionStatusUpdateLinear
if self._mode == self.MODE_WINDOW:
return self._PrintExtractionStatusUpdateWindow
return None
# TODO: refactor to protected method.
def PrintExtractionStatusHeader(self, processing_status):
"""Prints the extraction status header.
Args:
processing_status (ProcessingStatus): processing status.
"""
self._output_writer.Write(
'Source path\t\t: {0:s}\n'.format(self._source_path))
self._output_writer.Write(
'Source type\t\t: {0:s}\n'.format(self._source_type))
if self._artifact_filters:
artifacts_string = ', '.join(self._artifact_filters)
self._output_writer.Write('Artifact filters\t: {0:s}\n'.format(
artifacts_string))
if self._filter_file:
self._output_writer.Write('Filter file\t\t: {0:s}\n'.format(
self._filter_file))
processing_time = self._FormatProcessingTime(processing_status)
self._output_writer.Write(
'Processing time\t\t: {0:s}\n'.format(processing_time))
self._PrintTasksStatus(processing_status)
self._output_writer.Write('\n')
def PrintExtractionSummary(self, processing_status):
"""Prints a summary of the extraction.
Args:
processing_status (ProcessingStatus): processing status.
"""
if not processing_status:
self._output_writer.Write(
'WARNING: missing processing status information.\n')
elif not processing_status.aborted:
if processing_status.error_path_specs:
self._output_writer.Write('Processing completed with errors.\n')
else:
self._output_writer.Write('Processing completed.\n')
number_of_warnings = (
processing_status.foreman_status.number_of_produced_warnings)
if number_of_warnings:
output_text = '\n'.join([
'',
('Number of warnings generated while extracting events: '
'{0:d}.').format(number_of_warnings),
'',
'Use pinfo to inspect warnings in more detail.',
''])
self._output_writer.Write(output_text)
if processing_status.error_path_specs:
output_text = '\n'.join([
'',
'Path specifications that could not be processed:',
''])
self._output_writer.Write(output_text)
for path_spec in processing_status.error_path_specs:
self._output_writer.Write(path_spec.comparable)
self._output_writer.Write('\n')
self._output_writer.Write('\n')
def SetMode(self, mode):
"""Sets the mode.
Args:
mode (str): status view mode.
"""
self._mode = mode
def SetSourceInformation(
self, source_path, source_type, artifact_filters=None, filter_file=None):
"""Sets the source information.
Args:
source_path (str): path of the source.
source_type (str): source type.
artifact_filters (Optional[list[str]]): names of artifact definitions to
use as filters.
filter_file (Optional[str]): filter file.
"""
self._artifact_filters = artifact_filters
self._filter_file = filter_file
self._source_path = source_path
self._source_type = self._SOURCE_TYPES.get(source_type, 'UNKNOWN')
def SetStorageFileInformation(self, storage_file_path):
"""Sets the storage file information.
Args:
storage_file_path (str): path to the storage file.
"""
self._storage_file_path = storage_file_path
| apache-2.0 | -2,176,070,529,547,022,800 | 33.855556 | 80 | 0.660663 | false |
hoogenm/compose | compose/project.py | 1 | 23763 | from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import logging
import operator
from functools import reduce
import enum
from docker.errors import APIError
from . import parallel
from .config import ConfigurationError
from .config.config import V1
from .config.sort_services import get_container_name_from_network_mode
from .config.sort_services import get_service_name_from_network_mode
from .const import IMAGE_EVENTS
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .container import Container
from .network import build_networks
from .network import get_networks
from .network import ProjectNetworks
from .service import BuildAction
from .service import ContainerNetworkMode
from .service import ContainerPidMode
from .service import ConvergenceStrategy
from .service import NetworkMode
from .service import PidMode
from .service import Service
from .service import ServiceNetworkMode
from .service import ServicePidMode
from .utils import microseconds_from_time_nano
from .volume import ProjectVolumes
log = logging.getLogger(__name__)
@enum.unique
class OneOffFilter(enum.Enum):
include = 0
exclude = 1
only = 2
@classmethod
def update_labels(cls, value, labels):
if value == cls.only:
labels.append('{0}={1}'.format(LABEL_ONE_OFF, "True"))
elif value == cls.exclude:
labels.append('{0}={1}'.format(LABEL_ONE_OFF, "False"))
elif value == cls.include:
pass
else:
raise ValueError("Invalid value for one_off: {}".format(repr(value)))
class Project(object):
"""
A collection of services.
"""
def __init__(self, name, services, client, networks=None, volumes=None, config_version=None):
self.name = name
self.services = services
self.client = client
self.volumes = volumes or ProjectVolumes({})
self.networks = networks or ProjectNetworks({}, False)
self.config_version = config_version
def labels(self, one_off=OneOffFilter.exclude):
labels = ['{0}={1}'.format(LABEL_PROJECT, self.name)]
OneOffFilter.update_labels(one_off, labels)
return labels
@classmethod
def from_config(cls, name, config_data, client):
"""
Construct a Project from a config.Config object.
"""
use_networking = (config_data.version and config_data.version != V1)
networks = build_networks(name, config_data, client)
project_networks = ProjectNetworks.from_services(
config_data.services,
networks,
use_networking)
volumes = ProjectVolumes.from_config(name, config_data, client)
project = cls(name, [], client, project_networks, volumes, config_data.version)
for service_dict in config_data.services:
service_dict = dict(service_dict)
if use_networking:
service_networks = get_networks(service_dict, networks)
else:
service_networks = {}
service_dict.pop('networks', None)
links = project.get_links(service_dict)
network_mode = project.get_network_mode(
service_dict, list(service_networks.keys())
)
pid_mode = project.get_pid_mode(service_dict)
volumes_from = get_volumes_from(project, service_dict)
if config_data.version != V1:
service_dict['volumes'] = [
volumes.namespace_spec(volume_spec)
for volume_spec in service_dict.get('volumes', [])
]
secrets = get_secrets(
service_dict['name'],
service_dict.pop('secrets', None) or [],
config_data.secrets)
project.services.append(
Service(
service_dict.pop('name'),
client=client,
project=name,
use_networking=use_networking,
networks=service_networks,
links=links,
network_mode=network_mode,
volumes_from=volumes_from,
secrets=secrets,
pid_mode=pid_mode,
**service_dict)
)
return project
@property
def service_names(self):
return [service.name for service in self.services]
def get_service(self, name):
"""
Retrieve a service by name. Raises NoSuchService
if the named service does not exist.
"""
for service in self.services:
if service.name == name:
return service
raise NoSuchService(name)
def validate_service_names(self, service_names):
"""
Validate that the given list of service names only contains valid
services. Raises NoSuchService if one of the names is invalid.
"""
valid_names = self.service_names
for name in service_names:
if name not in valid_names:
raise NoSuchService(name)
def get_services(self, service_names=None, include_deps=False):
"""
Returns a list of this project's services filtered
by the provided list of names, or all services if service_names is None
or [].
If include_deps is specified, returns a list including the dependencies for
service_names, in order of dependency.
Preserves the original order of self.services where possible,
reordering as needed to resolve dependencies.
Raises NoSuchService if any of the named services do not exist.
"""
if service_names is None or len(service_names) == 0:
service_names = self.service_names
unsorted = [self.get_service(name) for name in service_names]
services = [s for s in self.services if s in unsorted]
if include_deps:
services = reduce(self._inject_deps, services, [])
uniques = []
[uniques.append(s) for s in services if s not in uniques]
return uniques
def get_services_without_duplicate(self, service_names=None, include_deps=False):
services = self.get_services(service_names, include_deps)
for service in services:
service.remove_duplicate_containers()
return services
def get_links(self, service_dict):
links = []
if 'links' in service_dict:
for link in service_dict.get('links', []):
if ':' in link:
service_name, link_name = link.split(':', 1)
else:
service_name, link_name = link, None
try:
links.append((self.get_service(service_name), link_name))
except NoSuchService:
raise ConfigurationError(
'Service "%s" has a link to service "%s" which does not '
'exist.' % (service_dict['name'], service_name))
del service_dict['links']
return links
def get_network_mode(self, service_dict, networks):
network_mode = service_dict.pop('network_mode', None)
if not network_mode:
if self.networks.use_networking:
return NetworkMode(networks[0]) if networks else NetworkMode('none')
return NetworkMode(None)
service_name = get_service_name_from_network_mode(network_mode)
if service_name:
return ServiceNetworkMode(self.get_service(service_name))
container_name = get_container_name_from_network_mode(network_mode)
if container_name:
try:
return ContainerNetworkMode(Container.from_id(self.client, container_name))
except APIError:
raise ConfigurationError(
"Service '{name}' uses the network stack of container '{dep}' which "
"does not exist.".format(name=service_dict['name'], dep=container_name))
return NetworkMode(network_mode)
def get_pid_mode(self, service_dict):
pid_mode = service_dict.pop('pid', None)
if not pid_mode:
return PidMode(None)
service_name = get_service_name_from_network_mode(pid_mode)
if service_name:
return ServicePidMode(self.get_service(service_name))
container_name = get_container_name_from_network_mode(pid_mode)
if container_name:
try:
return ContainerPidMode(Container.from_id(self.client, container_name))
except APIError:
raise ConfigurationError(
"Service '{name}' uses the PID namespace of container '{dep}' which "
"does not exist.".format(name=service_dict['name'], dep=container_name)
)
return PidMode(pid_mode)
def start(self, service_names=None, **options):
containers = []
def start_service(service):
service_containers = service.start(quiet=True, **options)
containers.extend(service_containers)
services = self.get_services(service_names)
def get_deps(service):
return {
(self.get_service(dep), config)
for dep, config in service.get_dependency_configs().items()
}
parallel.parallel_execute(
services,
start_service,
operator.attrgetter('name'),
'Starting',
get_deps,
)
return containers
def stop(self, service_names=None, one_off=OneOffFilter.exclude, **options):
containers = self.containers(service_names, one_off=one_off)
def get_deps(container):
# actually returning inversed dependencies
return {(other, None) for other in containers
if container.service in
self.get_service(other.service).get_dependency_names()}
parallel.parallel_execute(
containers,
self.build_container_operation_with_timeout_func('stop', options),
operator.attrgetter('name'),
'Stopping',
get_deps,
)
def pause(self, service_names=None, **options):
containers = self.containers(service_names)
parallel.parallel_pause(reversed(containers), options)
return containers
def unpause(self, service_names=None, **options):
containers = self.containers(service_names)
parallel.parallel_unpause(containers, options)
return containers
def kill(self, service_names=None, **options):
parallel.parallel_kill(self.containers(service_names), options)
def remove_stopped(self, service_names=None, one_off=OneOffFilter.exclude, **options):
parallel.parallel_remove(self.containers(
service_names, stopped=True, one_off=one_off
), options)
def down(self, remove_image_type, include_volumes, remove_orphans=False):
self.stop(one_off=OneOffFilter.include)
self.find_orphan_containers(remove_orphans)
self.remove_stopped(v=include_volumes, one_off=OneOffFilter.include)
self.networks.remove()
if include_volumes:
self.volumes.remove()
self.remove_images(remove_image_type)
def remove_images(self, remove_image_type):
for service in self.get_services():
service.remove_image(remove_image_type)
def restart(self, service_names=None, **options):
containers = self.containers(service_names, stopped=True)
parallel.parallel_execute(
containers,
self.build_container_operation_with_timeout_func('restart', options),
operator.attrgetter('name'),
'Restarting',
)
return containers
def build(self, service_names=None, no_cache=False, pull=False, force_rm=False, build_args=None):
for service in self.get_services(service_names):
if service.can_be_built():
service.build(no_cache, pull, force_rm, build_args)
else:
log.info('%s uses an image, skipping' % service.name)
def create(
self,
service_names=None,
strategy=ConvergenceStrategy.changed,
do_build=BuildAction.none,
):
services = self.get_services_without_duplicate(service_names, include_deps=True)
for svc in services:
svc.ensure_image_exists(do_build=do_build)
plans = self._get_convergence_plans(services, strategy)
for service in services:
service.execute_convergence_plan(
plans[service.name],
detached=True,
start=False)
def events(self, service_names=None):
def build_container_event(event, container):
time = datetime.datetime.fromtimestamp(event['time'])
time = time.replace(
microsecond=microseconds_from_time_nano(event['timeNano']))
return {
'time': time,
'type': 'container',
'action': event['status'],
'id': container.id,
'service': container.service,
'attributes': {
'name': container.name,
'image': event['from'],
},
'container': container,
}
service_names = set(service_names or self.service_names)
for event in self.client.events(
filters={'label': self.labels()},
decode=True
):
# The first part of this condition is a guard against some events
# broadcasted by swarm that don't have a status field.
# See https://github.com/docker/compose/issues/3316
if 'status' not in event or event['status'] in IMAGE_EVENTS:
# We don't receive any image events because labels aren't applied
# to images
continue
# TODO: get labels from the API v1.22 , see github issue 2618
try:
# this can fail if the container has been removed
container = Container.from_id(self.client, event['id'])
except APIError:
continue
if container.service not in service_names:
continue
yield build_container_event(event, container)
def up(self,
service_names=None,
start_deps=True,
strategy=ConvergenceStrategy.changed,
do_build=BuildAction.none,
timeout=None,
detached=False,
remove_orphans=False,
scale_override=None,
rescale=True):
warn_for_swarm_mode(self.client)
self.initialize()
self.find_orphan_containers(remove_orphans)
if scale_override is None:
scale_override = {}
services = self.get_services_without_duplicate(
service_names,
include_deps=start_deps)
for svc in services:
svc.ensure_image_exists(do_build=do_build)
plans = self._get_convergence_plans(services, strategy)
def do(service):
return service.execute_convergence_plan(
plans[service.name],
timeout=timeout,
detached=detached,
scale_override=scale_override.get(service.name),
rescale=rescale
)
def get_deps(service):
return {
(self.get_service(dep), config)
for dep, config in service.get_dependency_configs().items()
}
results, errors = parallel.parallel_execute(
services,
do,
operator.attrgetter('name'),
None,
get_deps,
)
if errors:
raise ProjectError(
'Encountered errors while bringing up the project.'
)
return [
container
for svc_containers in results
if svc_containers is not None
for container in svc_containers
]
def initialize(self):
self.networks.initialize()
self.volumes.initialize()
def _get_convergence_plans(self, services, strategy):
plans = {}
for service in services:
updated_dependencies = [
name
for name in service.get_dependency_names()
if name in plans and
plans[name].action in ('recreate', 'create')
]
if updated_dependencies and strategy.allows_recreate:
log.debug('%s has upstream changes (%s)',
service.name,
", ".join(updated_dependencies))
plan = service.convergence_plan(ConvergenceStrategy.always)
else:
plan = service.convergence_plan(strategy)
plans[service.name] = plan
return plans
def pull(self, service_names=None, ignore_pull_failures=False, parallel_pull=False, silent=False):
services = self.get_services(service_names, include_deps=False)
if parallel_pull:
def pull_service(service):
service.pull(ignore_pull_failures, True)
_, errors = parallel.parallel_execute(
services,
pull_service,
operator.attrgetter('name'),
'Pulling',
limit=5,
)
if len(errors):
raise ProjectError(b"\n".join(errors.values()))
else:
for service in services:
service.pull(ignore_pull_failures, silent=silent)
def push(self, service_names=None, ignore_push_failures=False):
for service in self.get_services(service_names, include_deps=False):
service.push(ignore_push_failures)
def _labeled_containers(self, stopped=False, one_off=OneOffFilter.exclude):
return list(filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})])
)
def containers(self, service_names=None, stopped=False, one_off=OneOffFilter.exclude):
if service_names:
self.validate_service_names(service_names)
else:
service_names = self.service_names
containers = self._labeled_containers(stopped, one_off)
def matches_service_names(container):
return container.labels.get(LABEL_SERVICE) in service_names
return [c for c in containers if matches_service_names(c)]
def find_orphan_containers(self, remove_orphans):
def _find():
containers = self._labeled_containers()
for ctnr in containers:
service_name = ctnr.labels.get(LABEL_SERVICE)
if service_name not in self.service_names:
yield ctnr
orphans = list(_find())
if not orphans:
return
if remove_orphans:
for ctnr in orphans:
log.info('Removing orphan container "{0}"'.format(ctnr.name))
ctnr.kill()
ctnr.remove(force=True)
else:
log.warning(
'Found orphan containers ({0}) for this project. If '
'you removed or renamed this service in your compose '
'file, you can run this command with the '
'--remove-orphans flag to clean it up.'.format(
', '.join(["{}".format(ctnr.name) for ctnr in orphans])
)
)
def _inject_deps(self, acc, service):
dep_names = service.get_dependency_names()
if len(dep_names) > 0:
dep_services = self.get_services(
service_names=list(set(dep_names)),
include_deps=True
)
else:
dep_services = []
dep_services.append(service)
return acc + dep_services
def build_container_operation_with_timeout_func(self, operation, options):
def container_operation_with_timeout(container):
if options.get('timeout') is None:
service = self.get_service(container.service)
options['timeout'] = service.stop_timeout(None)
return getattr(container, operation)(**options)
return container_operation_with_timeout
def get_volumes_from(project, service_dict):
volumes_from = service_dict.pop('volumes_from', None)
if not volumes_from:
return []
def build_volume_from(spec):
if spec.type == 'service':
try:
return spec._replace(source=project.get_service(spec.source))
except NoSuchService:
pass
if spec.type == 'container':
try:
container = Container.from_id(project.client, spec.source)
return spec._replace(source=container)
except APIError:
pass
raise ConfigurationError(
"Service \"{}\" mounts volumes from \"{}\", which is not the name "
"of a service or container.".format(
service_dict['name'],
spec.source))
return [build_volume_from(vf) for vf in volumes_from]
def get_secrets(service, service_secrets, secret_defs):
secrets = []
for secret in service_secrets:
secret_def = secret_defs.get(secret.source)
if not secret_def:
raise ConfigurationError(
"Service \"{service}\" uses an undefined secret \"{secret}\" "
.format(service=service, secret=secret.source))
if secret_def.get('external_name'):
log.warn("Service \"{service}\" uses secret \"{secret}\" which is external. "
"External secrets are not available to containers created by "
"docker-compose.".format(service=service, secret=secret.source))
continue
if secret.uid or secret.gid or secret.mode:
log.warn(
"Service \"{service}\" uses secret \"{secret}\" with uid, "
"gid, or mode. These fields are not supported by this "
"implementation of the Compose file".format(
service=service, secret=secret.source
)
)
secrets.append({'secret': secret, 'file': secret_def.get('file')})
return secrets
def warn_for_swarm_mode(client):
info = client.info()
if info.get('Swarm', {}).get('LocalNodeState') == 'active':
if info.get('ServerVersion', '').startswith('ucp'):
# UCP does multi-node scheduling with traditional Compose files.
return
log.warn(
"The Docker Engine you're using is running in swarm mode.\n\n"
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
"All containers will be scheduled on the current node.\n\n"
"To deploy your application across the swarm, "
"use `docker stack deploy`.\n"
)
class NoSuchService(Exception):
def __init__(self, name):
self.name = name
self.msg = "No such service: %s" % self.name
def __str__(self):
return self.msg
class ProjectError(Exception):
def __init__(self, msg):
self.msg = msg
| apache-2.0 | -5,603,012,117,800,324,000 | 34.361607 | 102 | 0.58124 | false |
tpltnt/hackerspace-api-bot | bot.py | 1 | 3642 | #!/usr/bin/env python3
"""
Make your hackerspace a XMPP buddy.
"""
from configparser import ConfigParser
from optparse import OptionParser
import logging
import requests
import sleekxmpp
import sys
import time
import urllib.request
import json
class HackerspaceApiBot(sleekxmpp.ClientXMPP):
"""
A SleekXMPP based bot that will mimic the on-/offline
status of a given hackerspace. It also authorizes other
automatically to allow status tracking.
"""
def __init__(self, jid, password):
super(HackerspaceApiBot, self).__init__(jid, password, plugin_whitelist=['xep_0199'])
self.auto_authorize = True
self.add_event_handler('session_start', self.start)
def start(self, event):
self.send_presence("hello world")
self.get_roster()
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# JID and password as arguments
optp.add_option("-c", "--config", dest="config",
help="configuration file (ini-format) to use")
optp.add_option("-j", "--jid", dest="jid",
help="JID to use")
optp.add_option("-p", "--password", dest="password",
help="XMPP account password to use")
optp.add_option("-u", "--url", dest="jsonurl",
help="URL to load hackerspace API json file from")
opts, args = optp.parse_args()
if None == opts.config:
if None == opts.jid:
print("no JID given ...")
sys.exit(1)
if None == opts.jsonurl:
print("no URL given ...")
sys.exit(1)
if None == opts.password:
print("no XMPP account password given ...")
sys.exit(1)
else:
configfile = ConfigParser()
configfile.read(opts.config)
if 1 != len(configfile.sections()):
print("can not process more than on section/hackerspace currently ...")
sys.exit(2)
# iterate over all sections (hackerspace configs)
for section in configfile:
# skip default
if "DEFAULT" == section:
continue
print("using \"" + section + "\" configuration")
opts.jid = configfile[section]['jid']
opts.jsonurl = configfile[section]['url']
opts.password = configfile[section]['password']
# set up logging (for debugging)
#logging.basicConfig(level=logging.INFO,
# format='%(levelname)-8s %(message)s')
# validate TLS Certificates
#xmpp.ca_certs = "path/to/ca/cert"
# set up the bot
xmpp = HackerspaceApiBot(opts.jid, opts.password)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0004') # Data Forms
xmpp.register_plugin('xep_0060') # PubSub
xmpp.register_plugin('xep_0199') # XMPP Ping
# just run in an endless loop and check from time to time
while(True):
try:
jsondata = requests.get(opts.jsonurl)
except AttributeError:
print("reading from URL failed")
continue
if 200 != jsondata.status_code:
print("reading from URL failed")
continue
spacestate = json.loads(jsondata.text)
if spacestate['open']:
# connect to server and set status
if xmpp.connect(use_tls=True, use_ssl=False):
xmpp.process(block=True)
else:
print("unable to connect to server")
else:
xmpp.disconnect(wait=True)
# wait 5 minutes before checking again
time.sleep(5*60)
| agpl-3.0 | -2,070,123,706,832,758,800 | 33.358491 | 93 | 0.589237 | false |
ktneely/ir-scripts | VulnMgmt/Vuln-tickets.py | 1 | 9151 | #!/usr/bin/python3
# This takes an XML report extracted from an OpenVAS VA scanner and
# creates issue tickets on ServiceNow and Redmine systems for tracking
# purposes.
#
# Most parameters are specified in the 'ov_prefs.txt' file, however,
# the XML report file may be specified on the command line. If
# specified this way, the script will ignore that line in the
# preferences file, however, the line must still exist!
# version 0.5
#modules
import os
import sys
import csv
import json
import socket
import requests
from redmine import Redmine
import xml.etree.ElementTree as ET
## Configure your environment through preferences file
# load prefs from ~/.incmgmt/prefs.txt
# The parameters should be in the following format
# DO NOT use comments or blank lines.
# Redmine Project
# Redmine URL
# Redmine API key
# ServiceNow URL
# ServiceNow username
# Servicenow password
# severity level
# OpenVAS XML report file
# Preamble: general info you want included in every ticket created
os.chdir(os.path.expanduser("~") + "/.incmgmt/")
prefs = []
for line in open('ov_prefs.txt'):
prefs.append(line)
redmine_project = prefs[0].rstrip()
redmine_server = prefs[1].rstrip()
redmine_key = prefs[2].rstrip()
sn_server = prefs[3].rstrip()
user = prefs[4].rstrip()
pwd = prefs[5].rstrip()
severity_filter = prefs[6].rstrip()
if len(sys.argv) == 1: # test for command line arguments
ov_report = prefs[7].rstrip()
else:
ov_report = sys.argv[1]
preamble = prefs[8].rstrip()
# Define service now headers
headers = {"Content-Type":"application/json","Accept":"application/json"}
# Input the vulnerability report and parse the XML
root = ET.parse(ov_report)
## determine criticality factors
# impact and urgency are used for Service Now
# priority is used for Redmine
def criticality(cvss):
global impact
global urgency
global priority
if float(cvss) > 7:
impact = 2
urgency = 1
priority = 5
elif float(cvss) < 4:
impact = 3
urgency = 3
priority = 3
else:
impact = 2
urgency = 2
priority = 4
return impact, urgency, priority
def reverse_lookup(ip):
try:
hostname = socket.gethostbyaddr(ip)[0]
except socket.herror:
hostname = " "
return hostname
## determine category
""" Redmine reference
0 nothing
53 Database
54 Networking
56 Server - Unix
55 Server - Windows
57 Web Application """
## Function to categorize the issue for all ticketing systems
# categoy is used for redmine, and subcategory is used for
# ServiceNow because it has a default high-level category for vulns
def categorize(family):
if family == "Web application abuses" or "Web Servers":
category = 57
subcategory = "Internal Application"
elif family == "Databases":
category = 53
subcategory = "Internal Application"
elif family == "General":
category = 56
subcategory = "UNIX"
elif "CentOS" in family:
category = 56
subcategory = "UNIX"
elif "Windows" in family:
category = 55
subcategory = "Windows"
else:
category = 0
subcategory = " "
return category, subcategory
#Specify Redmine server params
redmine = Redmine(redmine_server, requests={'verify': False}, key=redmine_key, version='2.5.1')
def redmine_issue(priority, subject, body, category):
## Create an issue in Redmine to track the vulnerability
# and return information regarding the created ticket
new_issue = redmine.issue.create(project_id = redmine_project, \
priority_id = priority, subject = subject, description = body,\
tracker_id=19, category_id = category)
redmine_issue_id = str(new_issue.id)
redmine_url = redmine_server + "/issues/" + redmine_issue_id
print("redmine ticket created")
return redmine_url, redmine_issue_id
def sn_issue(subject, redmine_url, subcategory, impact, urgency):
## Create the incident in ServiceNow
# Construct the incident JSON object
incident_data = '{' + \
'"short_description":' + '"' + subject + '",' + \
'"description":' + '"For more information, see: ' + redmine_url + '",' + \
'"u_category":' + '"Vulnerability Management",' + \
'"u_subcategory":' + '"' + subcategory + '",' + \
'"impact":' + '"' + str(impact) + '",' + \
'"urgency":' + '"' + str(urgency) + '",' + \
'"contact_type":"Alert"' + '}'
# Create the incident on the Service Now system
response = requests.post(sn_server, auth=(user, pwd), \
headers=headers, data=incident_data)
# Capture the ticket number and unique identifier
sn_ticket = response.json()['result']['number']
sys_id = response.json()['result']['sys_id']
print("service now ticket created")
return sn_ticket, sys_id
# Update the Service Now ticket with a comment
def sn_update(sys_id, comment):
sn_url = sn_server + '/' + sys_id # REST URL for the ticket
update = requests.patch(sn_url, auth=(user, pwd), headers=headers,\
data='{"comments":"' + comment +'"}')
if update.status_code != 200:
print('Status:', response.status_code, 'Headers:',\
response.headers, 'Error Response:',response.json())
exit()
print("Updated Service Now ticket" + " " + sys_id) # user output
# checks for a ticket with the exact same "subject" or "short
# description" on the Redmine system.
def CheckTickets(subject):
i = 0
project = redmine.project.get(redmine_project)
while i < len(project.issues):
# print("Checking: " + str(project.issues[i]))
if str(project.issues[i]) == subject:
incident_id = project.issues[i].id
opentix_log = csv.reader(open('opentix.csv'))
# Generate a dictionary of the known open tickets. This
# should really be performed at the beginning so it
# doesn't run everytime, but meh!
tix_dict = {}
for row in opentix_log:
tix_dict[row[0]]=row[2]
sn_sysid = tix_dict[str(incident_id)]
print("Found match: " + tix_dict[str(incident_id)] + " " + str(project.issues[i])) # debug
return sn_sysid # return a value for test
i += 1
return None # if the test fails, return nothing
def log(redmine_issue_id, sn_ticket, sys_id, redmine_url):
# Write log file of tickets created
ticket_log = open('ticketlog.csv','a')
opentix_log = open('opentix.csv','a')
ticket_log.write(redmine_issue_id + ',' + sn_ticket + ',' + \
sys_id + ',' + redmine_url + ',' + '\n')
opentix_log.write(redmine_issue_id + ',' + sn_ticket + ',' + \
sys_id + '\n')
ticket_log.close()
opentix_log.close()
## Main program. Extract the data, then call functions
# Extract elements from the XML for use in creating the ticket
for result in root.findall("./report/results/result"):
# only process vulnerabilities of a certain severity or higher
if result.find('overrides/override/new_severity') is not None:
cvss = result.find('overrides/override/new_severity').text
else:
cvss = result.find('severity').text
if float(cvss) >= float(severity_filter):
# Extract the elements from the XML
host_ip = result.find('host').text
severity = result.find('severity').text
if result.find('description').text is not None:
description = result.find('description').text
else:
description = "no extended description available"
short_desc = result.find('nvt/name').text
cvss = result.find('nvt/cvss_base').text
cve = result.find('nvt/cve').text
system_type = result.find('nvt/family')
# get some additional info based on extracted values
hostname = reverse_lookup(host_ip) # perform name lookup
impact, urgency, priority = criticality(severity)
category, subcategory = categorize(system_type)
full_desc = result.find('nvt/tags').text
criticality(cvss) # calc criticality levels
subject = short_desc + " detected on " + hostname + " " + host_ip
# Create the body of the ticket by combining multiple elements from
# the report file.
body = preamble + "\n \n" + full_desc + "\n \n CVEs:" + cve +\
"\n \n Description: \n" + description
# Check for currently active ticket for same issue. This
previous = CheckTickets(subject)
# Create a new ticket if one does not exist.
if previous is not None:
sn_update(previous, "Please provide an update for this ticket")
else:
# create the issues in redmine and return info
redmine_url, redmine_issue_id = redmine_issue(priority, \
subject, body, category)
# create the issues in ServiceNow and return info
sn_ticket, sys_id = sn_issue(subject, redmine_url, \
subcategory, impact, urgency)
log (redmine_issue_id, sn_ticket, sys_id, redmine_url)
| gpl-3.0 | 3,450,388,158,392,082,000 | 35.899194 | 103 | 0.635013 | false |
easyw/kicad-3d-models-in-freecad | cadquery/FCAD_script_generator/4UCON_17809/cq_models/conn_4ucon_17809.py | 1 | 10895 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# CadQuery script to generate connector models
## requirements
## freecad (v1.5 and v1.6 have been tested)
## cadquery FreeCAD plugin (v0.3.0 and v0.2.0 have been tested)
## https://github.com/jmwright/cadquery-freecad-module
## This script can be run from within the cadquery module of freecad.
## To generate VRML/ STEP files for, use export_conn_jst_xh
## script of the parent directory.
#* This is a cadquery script for the generation of MCAD Models. *
#* *
#* Copyright (c) 2016 *
#* Rene Poeschl https://github.com/poeschlr *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU General Public License (GPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#* The models generated with this script add the following exception: *
#* As a special exception, if you create a design which uses this symbol, *
#* and embed this symbol or unaltered portions of this symbol into the *
#* design, this symbol does not by itself cause the resulting design to *
#* be covered by the GNU General Public License. This exception does not *
#* however invalidate any other reasons why the design itself might be *
#* covered by the GNU General Public License. If you modify this symbol, *
#* you may extend this exception to your version of the symbol, but you *
#* are not obligated to do so. If you do not wish to do so, delete this *
#* exception statement from your version. *
#****************************************************************************
__title__ = "model description for 4UCON 17809 series connectors"
__author__ = "hackscribble"
__Comment__ = 'model description for 4UCON 17809 series connectors using cadquery'
___ver___ = "0.3 18/06/2020"
import cadquery as cq
from Helpers import show
from collections import namedtuple
import FreeCAD
from conn_4ucon_17809_params import *
from ribbon import Ribbon
def generate_straight_pin(params, pin_1_side):
foot_height = seriesParams.foot_height
pin_width=seriesParams.pin_width
pin_depth=seriesParams.pin_depth
pin_height=seriesParams.pin_height
pin_inside_distance=seriesParams.pin_inside_distance
pin_thickness = seriesParams.pin_thickness
chamfer_long = seriesParams.pin_chamfer_long
chamfer_short = seriesParams.pin_chamfer_short
sign = 1 if pin_1_side else -1
pin=cq.Workplane("YZ").workplane(offset=-pin_width/2.0)\
.moveTo(0, foot_height)\
.line(sign*pin_thickness/2,0)\
.line(sign*1.27,-foot_height)\
.line(0, -2.54)\
.line(sign*-pin_thickness,0)\
.line(0, 2.54)\
.line(sign*-1.27, foot_height)\
.line(0,1)\
.close()\
.extrude(pin_width).edges("|X").fillet(0.07)
return pin
def generate_2_pin_group(params, pin_1_side):
pin_pitch=params.pin_pitch
pin_y_pitch=params.pin_y_pitch
num_pins=params.num_pins
pin_a = generate_straight_pin(params, pin_1_side).translate((0, -pin_y_pitch/2, 0))
pin_b = pin_a.translate((0, -2 * pin_y_pitch, 0))
pin_group = pin_a.union(pin_b)
return pin_group
def generate_pins(params):
pin_pitch=params.pin_pitch
num_pins=params.num_pins
pins = generate_2_pin_group(params, pin_1_side=True)
for i in range(1, num_pins // 2):
pins = pins.union(generate_2_pin_group(params, i % 2 == 0).translate((i*pin_pitch,0,0)))
return pins
def generate_2_contact_group(params):
pin_y_pitch=params.pin_y_pitch
foot_height = seriesParams.foot_height
pin_thickness = seriesParams.pin_thickness
pin_width=seriesParams.pin_width
y_offset = -(2*pin_y_pitch)
c_list = [
('start', {'position': (pin_y_pitch, foot_height), 'direction': 90.0, 'width':pin_thickness}),
('line', {'length': 4.5}),
('arc', {'radius': 0.2, 'angle': 35.0}),
('line', {'length': 3}),
('arc', {'radius': 2.0, 'angle': -70.0}),
('line', {'length': 2}),
('arc', {'radius': 0.2, 'angle': 35.0}),
('line', {'length': 2.8}),
]
ribbon = Ribbon(cq.Workplane("YZ").workplane(offset=-pin_width/2.0), c_list)
contact1 = ribbon.drawRibbon().extrude(pin_width)
contact2 = contact1.mirror("XZ")
contact1 = contact1.union(contact2).translate((0,-3*pin_y_pitch/2.0,0))
return contact1
def generate_contacts(params):
num_pins=params.num_pins
pin_pitch=params.pin_pitch
pair = generate_2_contact_group(params)
contacts = pair
for i in range(0, num_pins // 2):
contacts = contacts.union(pair.translate((i*pin_pitch,0,0)))
return contacts
def generate_body(params, calc_dim):
pin_inside_distance = seriesParams.pin_inside_distance
pin_width = seriesParams.pin_width
num_pins = params.num_pins
pin_pitch = params.pin_pitch
pin_y_pitch=params.pin_y_pitch
body_length = calc_dim.length
body_width = seriesParams.body_width
body_height = seriesParams.body_height
body_fillet_radius = seriesParams.body_fillet_radius
marker_x_inside = seriesParams.marker_x_inside
marker_y_inside = seriesParams.marker_y_inside
marker_size = seriesParams.marker_size
marker_depth = seriesParams.marker_depth
foot_height = seriesParams.foot_height
foot_width = seriesParams.foot_width
foot_length = seriesParams.foot_length
foot_inside_distance = seriesParams.foot_inside_distance
slot_length = calc_dim.slot_length
slot_outside_pin = seriesParams.slot_outside_pin
slot_width = seriesParams.slot_width
slot_depth = seriesParams.slot_depth
slot_chamfer = seriesParams.slot_chamfer
hole_width = seriesParams.hole_width
hole_length = seriesParams.hole_length
hole_offset = seriesParams.hole_offset
hole_depth = seriesParams.hole_depth
top_void_depth = seriesParams.top_void_depth
top_void_width = seriesParams.top_void_width
bottom_void_width = calc_dim.bottom_void_width
recess_depth = seriesParams.recess_depth
recess_large_width = seriesParams.recess_large_width
recess_small_width = seriesParams.recess_small_width
recess_height = seriesParams.recess_height
x_offset = (((num_pins // 2) - 1)*pin_pitch)/2.0
y_offset = -(1.5*pin_y_pitch)
# body
body = cq.Workplane("XY").workplane(offset=foot_height).moveTo(x_offset, y_offset)\
.rect(body_length, body_width).extrude(body_height)\
.edges("|Z").fillet(body_fillet_radius).edges(">Z").fillet(body_fillet_radius)
# pin 1 marker
body = body.faces(">Z").workplane().moveTo(-(body_length/2)+marker_x_inside, (body_width/2)-marker_y_inside)\
.line(-marker_size,-marker_size/2).line(0, marker_size).close().cutBlind(-marker_depth)
# foot
foot = cq.Workplane("YZ").workplane(offset=(body_length/2)-foot_inside_distance)\
.moveTo(y_offset - foot_length/2, 0)\
.line(foot_length*0.2,0)\
.line(0,foot_height/2)\
.line(foot_length*0.6,0)\
.line(0,-foot_height/2)\
.line(foot_length*0.2,0)\
.line(0,foot_height)\
.line(-foot_length,0)\
.close()\
.extrude(-foot_width)
foot_mirror = foot.mirror("YZ")
foot = foot.union(foot_mirror).translate((x_offset, 0, 0))
body = body.union(foot)
# slot
body = body.faces(">Z").workplane().rect(slot_length, slot_width).cutBlind(-slot_depth)
chamfer = cq.Workplane("XY").workplane(offset=foot_height+body_height).moveTo(x_offset, y_offset) \
.rect(slot_length+2*slot_chamfer, slot_width+2*slot_chamfer) \
.workplane(offset=-slot_chamfer).rect(slot_length, slot_width) \
.loft(combine=True)
body = body.cut(chamfer)
# contact holes
body = body.faces(">Z").workplane().center(0, hole_offset)\
.rarray(pin_pitch, 1, (num_pins//2), 1).rect(hole_width, hole_length)\
.center(0, -2*hole_offset)\
.rarray(pin_pitch, 1, (num_pins//2), 1).rect(hole_width, hole_length)\
.cutBlind(-2)
# internal void
body = body.faces(">Z").workplane(offset=-hole_depth)\
.rarray(pin_pitch, 1, (num_pins//2), 1).rect(hole_width, top_void_width)\
.cutBlind(-(top_void_depth-hole_depth))
body = body.faces(">Z").workplane(offset=-top_void_depth)\
.rarray(pin_pitch, 1, (num_pins//2), 1).rect(hole_width, bottom_void_width)\
.cutBlind(-(body_height-top_void_depth))
# body end recesses
body = body.faces(">Z").workplane().center(body_length/2.0-recess_depth/2.0, 0)\
.rect(recess_depth, recess_small_width).cutBlind(-recess_height)
recess = cq.Workplane("XY").workplane(offset=foot_height+body_height).center(x_offset-body_length/2.0+recess_depth/2.0, y_offset)\
.rect(recess_depth, recess_large_width).extrude(-recess_height).edges(">X").edges("|Z").fillet(0.3)
body = body.cut(recess)
return body
def generate_part(part_key):
params = all_params[part_key]
calc_dim = dimensions(params)
pins = generate_pins(params)
body = generate_body(params, calc_dim)
contacts = generate_contacts(params)
return (pins, body, contacts)
# opened from within freecad
if "module" in __name__:
part_to_build = 'ucon_17809_02x10_1.27mm'
FreeCAD.Console.PrintMessage("Started from CadQuery: building " +
part_to_build + "\n")
(pins, body, contacts) = generate_part(part_to_build)
show(pins)
show(body)
show(contacts)
| gpl-2.0 | 7,542,962,034,964,685,000 | 38.908425 | 134 | 0.616246 | false |
lavizhao/keyword | data_analysis/ana_words.py | 1 | 1087 | #coding: utf-8
import sys
def morethan(keyword,n):
"""
Arguments:
- `keyword`:
- `n`:
"""
ans = 0
for line in keyword:
if len(line.split()) - 1 <= n :
ans += 1
print "少余%s的词占总的百分比为%s"%(n,1.0*ans/len(keyword))
def aw(kf,nc):
"""
"""
f = open(kf)
print kf
keyword = f.readlines()
print "总关键词长度:",len(keyword)
morethan(keyword,1000)
morethan(keyword,100)
morethan(keyword,10)
morethan(keyword,5)
morethan(keyword,2)
twf = open(nc,"w")
a = 0
for line in keyword:
if len(line.split()) - 1 <= 200 :
twf.write(line)
a += 1
print "处理后词表长度",a
twf.close()
def usage():
"""
"""
print '''
计数文件:
python ana_words.py ../data/counting.txt ../data/new_counting.txt
'''
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv)!= 3:
usage()
kf = sys.argv[1]
nc = sys.argv[2]
#analysis words
aw(kf,nc)
| apache-2.0 | -7,799,968,741,795,487,000 | 16.40678 | 71 | 0.484907 | false |
a10networks/a10sdk-python | a10sdk/core/waf/waf_wsdl.py | 2 | 1276 | from a10sdk.common.A10BaseClass import A10BaseClass
class Wsdl(A10BaseClass):
"""Class Description::
Manage Web Services Definition Language files.
Class wsdl supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param max_filesize: {"description": "Set maximum WSDL file size (Maximum file size in KBytes, default is 32K)", "partition-visibility": "shared", "default": 32, "optional": true, "format": "number", "maximum": 256, "minimum": 16, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/waf/wsdl`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "wsdl"
self.a10_url="/axapi/v3/waf/wsdl"
self.DeviceProxy = ""
self.uuid = ""
self.max_filesize = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| apache-2.0 | 6,820,183,366,621,144,000 | 33.486486 | 252 | 0.631661 | false |
jasonrbriggs/stomp.py | tests/test_basic.py | 1 | 8361 | import signal
from time import monotonic
import stomp
from stomp.listener import TestListener
from .testutils import *
@pytest.fixture()
def testlistener():
yield TestListener("123", print_to_log=True)
@pytest.fixture()
def conn(testlistener):
conn = stomp.Connection11(get_default_host())
conn.set_listener("testlistener", testlistener)
conn.connect(get_default_user(), get_default_password(), wait=True)
yield conn
conn.disconnect(receipt=None)
@pytest.fixture()
def invalidconn(testlistener):
conn = stomp.Connection([("192.0.2.0", 60000)], timeout=5, reconnect_attempts_max=1)
conn.set_listener("testlistener", testlistener)
yield conn
class TestBasic(object):
def test_subscribe_and_send(self, conn, testlistener):
queuename = "/queue/test1-%s" % testlistener.timestamp
conn.subscribe(destination=queuename, id=1, ack="auto")
conn.send(body='{"val": "this is a test"}', destination=queuename,
content_type="application/json", receipt="123")
validate_send(conn)
(headers, body) = testlistener.get_latest_message()
assert "content-type" in headers
assert headers["content-type"] == "application/json"
def test_default_to_localhost(self):
conn = stomp.Connection()
listener = TestListener("123", print_to_log=True)
queuename = "/queue/test1-%s" % listener.timestamp
conn.set_listener("testlistener", listener)
conn.connect(get_rabbitmq_user(), get_rabbitmq_password(), wait=True)
conn.send(body="this is a test", destination=queuename, receipt="123")
conn.disconnect(receipt=None)
def test_commit(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/test2-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="auto")
trans_id = conn.begin()
conn.send(body="this is a test1", destination=queuename, transaction=trans_id)
conn.send(body="this is a test2", destination=queuename, transaction=trans_id)
conn.send(body="this is a test3", destination=queuename, transaction=trans_id, receipt="123")
time.sleep(3)
listener = conn.get_listener("testlistener")
assert listener.connections == 1, "should have received 1 connection acknowledgement"
assert listener.messages == 0, "should not have received any messages"
conn.commit(transaction=trans_id)
listener.wait_for_message()
time.sleep(3)
assert listener.messages == 3, "should have received 3 messages"
assert listener.errors == 0, "should not have received any errors"
def test_abort(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/test3-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="auto")
trans_id = conn.begin()
conn.send(body="this is a test1", destination=queuename, transaction=trans_id)
conn.send(body="this is a test2", destination=queuename, transaction=trans_id)
conn.send(body="this is a test3", destination=queuename, transaction=trans_id)
time.sleep(3)
listener = conn.get_listener("testlistener")
assert listener.connections == 1, "should have received 1 connection acknowledgement"
assert listener.messages == 0, "should not have received any messages"
conn.abort(transaction=trans_id)
time.sleep(3)
assert listener.messages == 0, "should not have received any messages"
assert listener.errors == 0, "should not have received any errors"
def test_timeout(self, invalidconn):
ms = monotonic()
try:
invalidconn.connect("test", "test")
pytest.fail("shouldn't happen")
except stomp.exception.ConnectFailedException:
pass # success!
ms = monotonic() - ms
assert ms > 5.0, "connection timeout should have been at least 5 seconds"
def test_childinterrupt(self, conn):
def childhandler(signum, frame):
print("received child signal")
oldhandler = signal.signal(signal.SIGCHLD, childhandler)
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/test5-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="auto", receipt="123")
listener = conn.get_listener("testlistener")
listener.wait_on_receipt()
conn.send(body="this is an interrupt test 1", destination=queuename)
print("causing signal by starting child process")
os.system("sleep 1")
time.sleep(1)
signal.signal(signal.SIGCHLD, oldhandler)
print("completed signal section")
conn.send(body="this is an interrupt test 2", destination=queuename, receipt="123")
listener.wait_for_message()
assert listener.connections == 1, "should have received 1 connection acknowledgment"
assert listener.errors == 0, "should not have received any errors"
assert conn.is_connected(), "should still be connected to STOMP provider"
def test_clientack(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/testclientack-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="client")
conn.send(body="this is a test", destination=queuename, receipt="123")
listener = conn.get_listener("testlistener")
listener.wait_for_message()
(headers, _) = listener.get_latest_message()
message_id = headers["message-id"]
subscription = headers["subscription"]
conn.ack(message_id, subscription)
def test_clientnack(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/testclientnack-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="client")
conn.send(body="this is a test", destination=queuename, receipt="123")
listener = conn.get_listener("testlistener")
listener.wait_for_message()
(headers, _) = listener.get_latest_message()
message_id = headers["message-id"]
subscription = headers["subscription"]
conn.nack(message_id, subscription)
def test_specialchars(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/testspecialchars-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="client")
hdrs = {
"special-1": "test with colon : test",
"special-2": "test with backslash \\ test",
"special-3": "test with newline \n"
}
conn.send(body="this is a test", headers=hdrs, destination=queuename, receipt="123")
listener = conn.get_listener("testlistener")
listener.wait_for_message()
(headers, _) = listener.get_latest_message()
_ = headers["message-id"]
_ = headers["subscription"]
assert "special-1" in headers
assert "test with colon : test" == headers["special-1"]
assert "special-2" in headers
assert "test with backslash \\ test" == headers["special-2"]
assert "special-3" in headers
assert "test with newline \n" == headers["special-3"]
def test_host_bind_port(self):
conn = stomp.Connection(bind_host_port=("localhost", next_free_port()))
listener = TestListener("981", print_to_log=True)
queuename = "/queue/testbind-%s" % listener.timestamp
conn.set_listener("testlistener", listener)
conn.connect(get_rabbitmq_user(), get_rabbitmq_password(), wait=True)
conn.send(body="this is a test using local bind port", destination=queuename, receipt="981")
conn.disconnect(receipt=None)
class TestConnectionErrors(object):
def test_connect_wait_error(self):
conn = stomp.Connection(get_default_host())
try:
conn.connect("invalid", "user", True)
pytest.fail("Shouldn't happen")
except:
pass
def test_connect_nowait_error(self):
conn = stomp.Connection(get_default_host())
try:
conn.connect("invalid", "user", False)
assert not conn.is_connected(), "Should not be connected"
except:
pytest.fail("Shouldn't happen")
| apache-2.0 | -6,242,501,988,846,949,000 | 35.995575 | 101 | 0.640115 | false |
phiros/nepi | src/nepi/resources/linux/ns3/ccn/ns3ccnrdceapplication.py | 1 | 9256 | #
# NEPI, a framework to manage network experiments
# Copyright (C) 2014 INRIA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Alina Quereilhac <[email protected]>
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.linux.ns3.ccn.ns3ccndceapplication import LinuxNS3CCNDceApplication
@clsinit_copy
class LinuxNS3DceCCNR(LinuxNS3CCNDceApplication):
_rtype = "linux::ns3::dce::CCNR"
@classmethod
def _register_attributes(cls):
max_fanout = Attribute("maxFanout",
"Sets the CCNR_BTREE_MAX_FANOUT environmental variable. ",
flags = Flags.Design)
max_leaf_entries = Attribute("maxLeafEntries",
"Sets the CCNR_BTREE_MAX_LEAF_ENTRIES environmental variable. ",
flags = Flags.Design)
max_node_bytes = Attribute("maxNodeBytes",
"Sets the CCNR_BTREE_MAX_NODE_BYTES environmental variable. ",
flags = Flags.Design)
max_node_pool = Attribute("maxNodePool",
"Sets the CCNR_BTREE_MAX_NODE_POOL environmental variable. ",
flags = Flags.Design)
content_cache = Attribute("contentCache",
"Sets the CCNR_CONTENT_CACHE environmental variable. ",
flags = Flags.Design)
debug = Attribute("debug",
"Sets the CCNR_DEBUG environmental variable. "
"Logging level for ccnr. Defaults to WARNING.",
type = Types.Enumerate,
allowed = [
"NONE",
"SEVERE",
"ERROR",
"WARNING",
"INFO",
"FINE, FINER, FINEST"],
flags = Flags.Design)
directory = Attribute("directory",
"Sets the CCNR_DIRECTORY environmental variable. ",
flags = Flags.Design)
global_prefix = Attribute("globalPrefix",
"Sets the CCNR_GLOBAL_PREFIX environmental variable. ",
flags = Flags.Design)
listen_on = Attribute("listenOn",
"Sets the CCNR_LISTEN_ON environmental variable. ",
flags = Flags.Design)
min_send_bufsize = Attribute("minSendBufsize",
"Sets the CCNR_MIN_SEND_BUFSIZE environmental variable. ",
flags = Flags.Design)
proto = Attribute("proto",
"Sets the CCNR_PROTO environmental variable. ",
flags = Flags.Design)
status_port = Attribute("statusPort",
"Sets the CCNR_STATUS_PORT environmental variable. ",
flags = Flags.Design)
start_write_scope_limit = Attribute("startWriteScopeLimit",
"Sets the CCNR_START_WRITE_SCOPE_LIMIT environmental variable. ",
flags = Flags.Design)
ccns_debug = Attribute("ccnsDebug",
"Sets the CCNS_DEBUG environmental variable. ",
flags = Flags.Design)
ccns_enable = Attribute("ccnsEnable",
"Sets the CCNS_ENABLE environmental variable. ",
flags = Flags.Design)
ccns_faux_error = Attribute("ccnsFauxError",
"Sets the CCNS_FAUX_ERROR environmental variable. ",
flags = Flags.Design)
ccns_heartbeat_micros = Attribute("ccnsHeartBeatMicros",
"Sets the CCNS_HEART_BEAT_MICROS environmental variable. ",
flags = Flags.Design)
ccns_max_compares_busy = Attribute("ccnsMaxComparesBusy",
"Sets the CCNS_MAX_COMPARES_BUSY environmental variable. ",
flags = Flags.Design)
ccns_max_fetch_busy = Attribute("ccnsMaxFetchBusy",
"Sets the CCNS_MAX_FETCH_BUSY environmental variable. ",
flags = Flags.Design)
ccns_node_fetch_lifetime = Attribute("ccnsNodeFetchLifetime",
"Sets the CCNS_NODE_FETCH_LIFETIME environmental variable. ",
flags = Flags.Design)
ccns_note_err = Attribute("ccnsNoteErr",
"Sets the CCNS_NOTE_ERR environmental variable. ",
flags = Flags.Design)
ccns_repo_store = Attribute("ccnsRepoStore",
"Sets the CCNS_REPO_STORE environmental variable. ",
flags = Flags.Design)
ccns_root_advise_fresh = Attribute("ccnsRootAdviseFresh",
"Sets the CCNS_ROOT_ADVISE_FRESH environmental variable. ",
flags = Flags.Design)
ccns_root_advise_lifetime = Attribute("ccnsRootAdviseLifetime",
"Sets the CCNS_ROOT_ADVISE_LIFETIME environmental variable. ",
flags = Flags.Design)
ccns_stable_enabled = Attribute("ccnsStableEnabled",
"Sets the CCNS_STABLE_ENABLED environmental variable. ",
flags = Flags.Design)
ccns_sync_scope = Attribute("ccnsSyncScope",
"Sets the CCNS_SYNC_SCOPE environmental variable. ",
flags = Flags.Design)
repo_file = Attribute("repoFile1",
"The Repository uses $CCNR_DIRECTORY/repoFile1 for "
"persistent storage of CCN Content Objects",
flags = Flags.Design)
cls._register_attribute(max_fanout)
cls._register_attribute(max_leaf_entries)
cls._register_attribute(max_node_bytes)
cls._register_attribute(max_node_pool)
cls._register_attribute(content_cache)
cls._register_attribute(debug)
cls._register_attribute(directory)
cls._register_attribute(global_prefix)
cls._register_attribute(listen_on)
cls._register_attribute(min_send_bufsize)
cls._register_attribute(proto)
cls._register_attribute(status_port)
cls._register_attribute(start_write_scope_limit)
cls._register_attribute(ccns_debug)
cls._register_attribute(ccns_enable)
cls._register_attribute(ccns_faux_error)
cls._register_attribute(ccns_heartbeat_micros)
cls._register_attribute(ccns_max_compares_busy)
cls._register_attribute(ccns_max_fetch_busy)
cls._register_attribute(ccns_node_fetch_lifetime)
cls._register_attribute(ccns_note_err)
cls._register_attribute(ccns_repo_store)
cls._register_attribute(ccns_root_advise_fresh)
cls._register_attribute(ccns_root_advise_lifetime)
cls._register_attribute(ccns_stable_enabled)
cls._register_attribute(ccns_sync_scope)
cls._register_attribute(repo_file)
def _instantiate_object(self):
if not self.get("binary"):
self.set("binary", "ccnr")
if not self.get("environment"):
self.set("environment", self._environment)
repoFile1 = self.get("repoFile1")
if repoFile1:
env = "CCNR_DIRECTORY=/REPO/"
environment = self.get("environment")
if environment:
env += ";" + environment
self.set("environment", env)
self.set("files", "%s=/REPO/repoFile1" % repoFile1)
super(LinuxNS3DceCCNR, self)._instantiate_object()
@property
def _environment(self):
envs = dict({
"maxFanout": "CCNR_BTREE_MAX_FANOUT",
"maxLeafEntries": "CCNR_BTREE_MAX_LEAF_ENTRIES",
"maxNodeBytes": "CCNR_BTREE_MAX_NODE_BYTES",
"maxNodePool": "CCNR_BTREE_MAX_NODE_POOL",
"contentCache": "CCNR_CONTENT_CACHE",
"debug": "CCNR_DEBUG",
"directory": "CCNR_DIRECTORY",
"globalPrefix": "CCNR_GLOBAL_PREFIX",
"listenOn": "CCNR_LISTEN_ON",
"minSendBufsize": "CCNR_MIN_SEND_BUFSIZE",
"proto": "CCNR_PROTO",
"statusPort": "CCNR_STATUS_PORT",
"startWriteScopeLimit": "CCNR_START_WRITE_SCOPE_LIMIT",
"ccnsDebug": "CCNS_DEBUG",
"ccnsEnable": "CCNS_ENABLE",
"ccnsFauxError": "CCNS_FAUX_ERROR",
"ccnsHeartBeatMicros": "CCNS_HEART_BEAT_MICROS",
"ccnsMaxComparesBusy": "CCNS_MAX_COMPARES_BUSY",
"ccnsMaxFetchBusy": "CCNS_MAX_FETCH_BUSY",
"ccnsNodeFetchLifetime": "CCNS_NODE_FETCH_LIFETIME",
"ccnsNoteErr": "CCNS_NOTE_ERR",
"ccnsRepoStore": "CCNS_REPO_STORE",
"ccnsRootAdviseFresh": "CCNS_ROOT_ADVISE_FRESH",
"ccnsRootAdviseLifetime": "CCNS_ROOT_ADVISE_LIFETIME",
"ccnsStableEnabled": "CCNS_STABLE_ENABLED",
"ccnsSyncScope": "CCNS_SYNC_SCOPE",
})
env = ";".join(map(lambda k: "%s=%s" % (envs.get(k), str(self.get(k))),
[k for k in envs.keys() if self.get(k)]))
return env
| gpl-3.0 | 6,438,432,219,293,200,000 | 39.243478 | 87 | 0.611927 | false |
cswaney/hfttools | prickle/core.py | 1 | 65651 | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import struct
import h5py
import time
import os
class Database():
"""Connection to an HDF5 database storing message and order book data.
Parameters
----------
path : string
Specifies location of the HDF5 file
names : list
Contains the stock tickers to include in the database
nlevels : int
Specifies the number of levels to include in the order book data
"""
def __init__(self, path, names, nlevels, method):
self.method = method
if self.method == 'hdf5':
try:
self.file = h5py.File(path, 'r+') # read/write, file must exist
print('Appending existing HDF5 file.')
for name in names:
if name in self.file['messages'].keys():
print('Overwriting message data for {}'.format(name))
del self.file['messages'][name]
if name in self.file['orderbooks'].keys():
print('Overwriting orderbook data for {}'.format(name))
del self.file['orderbooks'][name]
if name in self.file['trades'].keys():
print('Overwriting trades data for {}'.format(name))
del self.file['trades'][name]
if name in self.file['noii'].keys():
print('Overwriting noii data for {}'.format(name))
del self.file['noii'][name]
except OSError as e:
print('HDF5 file does not exist. Creating a new one.')
self.file = h5py.File(path, 'x') # create file, fail if exists
self.messages = self.file.require_group('messages')
self.orderbooks = self.file.require_group('orderbooks')
self.trades = self.file.require_group('trades')
self.noii = self.file.require_group('noii')
for name in names:
self.messages.require_dataset(name,
shape=(0, 8),
maxshape=(None, None),
dtype='i')
self.orderbooks.require_dataset(name,
shape=(0, 4 * nlevels + 2),
maxshape=(None, None),
dtype='i')
self.trades.require_dataset(name,
shape=(0, 5),
maxshape=(None, None),
dtype='i')
self.noii.require_dataset(name,
shape=(0, 14),
maxshape=(None, None),
dtype='i')
elif self.method == 'csv':
if os.path.exists('{}'.format(path)):
response = input('A database with that path already exists. Are you sure you want to proceed? [Y/N] ')
if response == 'Y':
proceed = True
for item in os.listdir('{}/messages/'.format(path)):
os.remove('{}/messages/{}'.format(path, item))
for item in os.listdir('{}/books/'.format(path)):
os.remove('{}/books/{}'.format(path, item))
for item in os.listdir('{}/trades/'.format(path)):
os.remove('{}/trades/{}'.format(path, item))
for item in os.listdir('{}/noii/'.format(path)):
os.remove('{}/noii/{}'.format(path, item))
os.rmdir('{}/messages/'.format(path))
os.rmdir('{}/books/'.format(path))
os.rmdir('{}/trades/'.format(path))
os.rmdir('{}/noii/'.format(path))
for item in os.listdir('{}'.format(path)):
os.remove('{}/{}'.format(path, item))
os.rmdir('{}'.format(path))
else:
# TODO: Need to exit the program
proceed = False
print('Process cancelled.')
else:
proceed = True
if proceed:
print('Creating a new database in directory: {}/'.format(path))
self.messages_path = '{}/messages/'.format(path)
self.books_path = '{}/books/'.format(path)
self.trades_path = '{}/trades/'.format(path)
self.noii_path = '{}/noii/'.format(path)
os.makedirs(path)
os.makedirs(self.messages_path)
os.makedirs(self.books_path)
os.makedirs(self.trades_path)
os.makedirs(self.noii_path)
columns = ['sec', 'nano', 'name']
columns.extend(['bidprc{}'.format(i) for i in range(nlevels)])
columns.extend(['askprc{}'.format(i) for i in range(nlevels)])
columns.extend(['bidvol{}'.format(i) for i in range(nlevels)])
columns.extend(['askvol{}'.format(i) for i in range(nlevels)])
for name in names:
with open(self.messages_path + 'messages_{}.txt'.format(name), 'w') as messages_file:
messages_file.write('sec,nano,name,type,refno,side,shares,price,mpid\n')
with open(self.books_path + 'books_{}.txt'.format(name), 'w') as books_file:
books_file.write(','.join(columns) + '\n')
with open(self.trades_path + 'trades_{}.txt'.format(name), 'w') as trades_file:
trades_file.write('sec,nano,name,side,shares,price\n')
with open(self.noii_path + 'noii_{}.txt'.format(name), 'w') as noii_file:
noii_file.write('sec,nano,name,type,cross,shares,price,paired,imb,dir,far,near,curr\n')
def close(self):
if self.method == 'hdf5':
self.file.close()
else:
pass
class Message():
"""A class representing out-going messages from the NASDAQ system.
Parameters
----------
sec : int
Seconds
nano : int
Nanoseconds
type : string
Message type
event : string
System event
name : string
Stock ticker
buysell : string
Trade position
price : int
Trade price
shares : int
Shares
refno : int
Unique reference number of order
newrefno : int
Replacement reference number
mpid: string
MPID attribution
"""
def __init__(self, date='.', sec=-1, nano=-1, type='.', event='.', name='.',
buysell='.', price=-1, shares=0, refno=-1, newrefno=-1, mpid='.'):
self.date = date
self.name = name
self.sec = sec
self.nano = nano
self.type = type
self.event = event
self.buysell = buysell
self.price = price
self.shares = shares
self.refno = refno
self.newrefno = newrefno
self.mpid = mpid
def __str__(self):
sep = ', '
line = ['sec=' + str(self.sec),
'nano=' + str(self.nano),
'type=' + str(self.type),
'event=' + str(self.event),
'name=' + str(self.name),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares),
'refno=' + str(self.refno),
'newrefno=' + str(self.newrefno),
'mpid= {}'.format(self.mpid)]
return sep.join(line)
def __repr__(self):
sep = ', '
line = ['sec: ' + str(self.sec),
'nano: ' + str(self.nano),
'type: ' + str(self.type),
'event: ' + str(self.event),
'name: ' + str(self.name),
'buysell: ' + str(self.buysell),
'price: ' + str(self.price),
'shares: ' + str(self.shares),
'refno: ' + str(self.refno),
'newrefno: ' + str(self.newrefno),
'mpid: {}'.format(self.mpid)]
return 'Message(' + sep.join(line) + ')'
def split(self):
"""Converts a replace message to an add and a delete."""
assert self.type == 'U', "ASSERT-ERROR: split method called on non-replacement message."
if self.type == 'U':
new_message = Message(date=self.date,
sec=self.sec,
nano=self.nano,
type='U',
price=self.price,
shares=self.shares,
refno=self.refno,
newrefno=self.newrefno)
del_message = Message(date=self.date,
sec=self.sec,
nano=self.nano,
type='D',
refno=self.refno,
newrefno=-1)
add_message = Message(date=self.date,
sec=self.sec,
nano=self.nano,
type='U+',
price=self.price,
shares=self.shares,
refno=self.refno,
newrefno=self.newrefno)
return (new_message, del_message, add_message)
def to_list(self):
"""Returns message as a list."""
values = []
values.append(str(self.date))
values.append(str(self.name))
values.append(int(self.sec))
values.append(int(self.nano))
values.append(str(self.type))
values.append(str(self.event))
values.append(str(self.buysell))
values.append(int(self.price))
values.append(int(self.shares))
values.append(int(self.refno))
values.append(int(self.newrefno))
values.append(int(self.mpid))
return values
def to_array(self):
"""Returns message as an np.array of integers."""
if self.type == 'P':
if self.buysell == 'B':
side = -1
else:
side = 1
values = [self.sec, self.nano, side, self.price, self.shares]
return np.array(values)
else:
if self.type == 'A': # add
type = 0
elif self.type == 'F': # add w/mpid
type = 1
elif self.type == 'X': # cancel
type = 2
elif self.type == 'D': # delete
type = 3
elif self.type == 'E': # execute
type = 4
elif self.type == 'C': # execute w/price
type = 5
elif self.type == 'U': # replace
type = 6
else:
type = -1
if self.buysell == 'B': # bid
side = 1
elif self.buysell == 'S': # ask
side = -1
else:
side = 0
values = [self.sec,
self.nano,
type,
side,
self.price,
np.abs(self.shares),
self.refno,
self.newrefno]
return np.array(values)
def to_txt(self, path=None):
if self.type in ('S', 'H'):
sep = ','
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.event)]
elif self.type in ('A', 'F', 'E', 'C', 'X', 'D', 'U'):
sep = ','
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.type),
str(self.refno),
str(self.buysell),
str(self.shares),
str(self.price / 10 ** 4),
str(self.mpid)]
elif self.type == 'P':
sep = ','
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.buysell),
str(self.shares),
str(self.price / 10 ** 4)]
if path is None:
return sep.join(line) + '\n'
else:
with open(path, 'a') as fout:
fout.write(sep.join(line) + '\n')
class NOIIMessage():
"""A class representing out-going messages from the NASDAQ system.
This class is specific to net order imbalance indicator messages and
cross trade messages.
Parameters
----------
sec: int
Seconds
nano: int
Nanoseconds
name: string
Stock ticker
type: string
Message type
cross: string
Cross type
buysell: string
Trade position
price: int
Trade price
shares: int
Shares
matchno: int
Unique reference number of trade
paired: int
Shares paired
imbalance: int
Shares imbalance
direction: string
Imbalance direction
far: int
Far price
near: int
Near price
current: int
Current refernce price
"""
def __init__(self, date='.', sec=-1, nano=-1, name='.', type='.', cross='.',
buysell='.', price=-1, shares=0, matchno=-1, paired=-1,
imbalance=-1, direction='.', far=-1, near=-1, current=-1):
self.date = date
self.sec = sec
self.nano = nano
self.name = name
self.type = type
self.cross = cross
self.buysell = buysell
self.price = price
self.shares = shares
self.matchno = matchno
self.paired = paired
self.imbalance = imbalance
self.direction = direction
self.far = far
self.near = near
self.current = current
def __str__(self):
sep = ', '
line = ['date=' + str(self.date),
'sec=' + str(self.sec),
'nano=' + str(self.nano),
'name=' + str(self.name),
'type=' + str(self.type),
'cross=' + str(self.cross),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares),
'matchno=' + str(self.matchno),
'paired=' + str(self.paired),
'imbalance=' + str(self.imbalance),
'direction=' + str(self.direction),
'far=' + str(self.far),
'near=' + str(self.near),
'current=' + str(self.current)]
return sep.join(line)
def __repr__(self):
sep = ', '
line = ['date=' + str(self.date),
'sec=' + str(self.sec),
'nano=' + str(self.nano),
'name=' + str(self.name),
'type=' + str(self.type),
'cross=' + str(self.cross),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares),
'matchno=' + str(self.matchno),
'paired=' + str(self.paired),
'imbalance=' + str(self.imbalance),
'direction=' + str(self.direction),
'far=' + str(self.far),
'near=' + str(self.near),
'current=' + str(self.current)]
return 'Message(' + sep.join(line) + ')'
def to_list(self):
"""Returns message as a list."""
values = []
values.append(str(self.date))
values.append(int(self.sec))
values.append(int(self.nano))
values.append(str(self.name))
values.append(str(self.type))
values.append(str(self.cross))
values.append(str(self.buysell))
values.append(int(self.price))
values.append(int(self.shares))
values.append(int(self.matchno))
values.append(int(self.paired))
values.append(int(self.imbalance))
values.append(int(self.direction))
values.append(int(self.far))
values.append(int(self.near))
values.append(int(self.current))
return values
def to_array(self):
"""Returns message as an np.array of integers."""
if self.type == 'Q': # cross trade
type = 0
elif self.type == 'I': # noii
type = 1
else:
type = -1
print('Unexpected NOII message type: {}'.format(self.type))
if self.cross == 'O': # opening cross
cross = 0
elif self.cross == 'C': # closing cross
cross = 1
elif self.cross == 'H': # halted cross
cross = 2
elif self.cross == 'I': # intraday cross
cross = 3
else:
cross = -1
print('Unexpected cross type: {}'.format(self.cross))
if self.buysell == 'B': # bid
side = 1
elif self.buysell == 'S': # ask
side = -1
else:
side = 0
if self.direction == 'B': # bid
dir = 1
elif self.direction == 'S': # ask
dir = -1
else:
dir = 0
values = [self.sec,
self.nano,
type,
cross,
side,
self.price,
self.shares,
self.matchno,
self.paired,
self.imbalance,
dir,
self.far,
self.near,
self.current]
return np.array(values)
def to_txt(self, path=None):
sep = ','
if self.type == 'Q':
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.type),
str(self.cross),
str(self.shares),
str(self.price / 10 ** 4),
str(self.paired),
str(self.imbalance),
str(self.direction),
str(self.far),
str(self.near),
str(self.current)]
elif self.type == 'I':
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.type),
str(self.cross),
str(self.shares),
str(self.price),
str(self.paired),
str(self.imbalance),
str(self.direction),
str(self.far / 10 ** 4),
str(self.near / 10 ** 4),
str(self.current / 10 ** 4)]
if path is None:
return sep.join(line) + '\n'
else:
with open(path, 'a') as fout:
fout.write(sep.join(line) + '\n')
class Trade():
"""A class representing trades on the NASDAQ system.
Parameters
----------
date: int
Date
sec : int
Seconds
nano : int
Nanoseconds
name : string
Stock ticker
side : string
Buy or sell
price : int
Trade price
shares : int
Shares
"""
def __init__(self, date='.', sec=-1, nano=-1, name='.', side='.', price=-1, shares=0):
self.date = date
self.name = name
self.sec = sec
self.nano = nano
self.side = side
self.price = price
self.shares = shares
def __str__(self):
sep = ', '
line = ['sec: ' + str(self.sec),
'nano: ' + str(self.nano),
'name: ' + str(self.name),
'side: ' + str(self.buysell),
'price: ' + str(self.price),
'shares: ' + str(self.shares)]
return sep.join(line)
def __repr__(self):
sep = ', '
line = ['sec: ' + str(self.sec),
'nano: ' + str(self.nano),
'name: ' + str(self.name),
'side: ' + str(self.buysell),
'price: ' + str(self.price),
'shares: ' + str(self.shares)]
return 'Trade(' + sep.join(line) + ')'
def to_list(self):
"""Returns message as a list."""
values = []
values.append(str(self.date))
values.append(str(self.name))
values.append(int(self.sec))
values.append(int(self.nano))
values.append(str(self.side))
values.append(int(self.price))
values.append(int(self.shares))
return values
def to_array(self):
"""Returns message as an np.array of integers."""
if self.side == 'B':
side = -1
else:
side = 1
return np.array([self.sec, self.nano, side, self.price, self.shares])
def to_txt(self, path=None):
sep = ','
line = [str(self.sec),
str(self.nano),
str(self.name),
str(self.side),
str(self.shares),
str(self.price / 10 ** 4)]
if path is None:
return sep.join(line) + '\n'
else:
with open(path, 'a') as fout:
fout.write(sep.join(line) + '\n')
class Messagelist():
"""A class to store messages.
Provides methods for writing to HDF5 and PostgreSQL databases.
Parameters
----------
date : string
Date to be assigned to data
names : list
Contains the stock tickers to include in the database
Attributes
----------
messages : dict
Contains a Message objects for each name in names
Examples
--------
Create a MessageList::
>> msglist = pk.Messagelist(date='112013', names=['GOOG', 'AAPL'])
"""
def __init__(self, date, names):
self.messages = {}
self.date = date
for name in names:
self.messages[name] = []
def add(self, message):
"""Add a message to the list."""
try:
self.messages[message.name].append(message)
except KeyError as e:
print("KeyError: Could not find {} in the message list".format(message.name))
def to_hdf5(self, name, db, grp):
"""Write messages to HDF5 file."""
assert db.method == 'hdf5', 'Attempted to write to non-HDF5 database'
m = self.messages[name]
if len(m) > 0:
listed = [message.to_array() for message in m]
array = np.array(listed)
if grp == 'messages':
db_size, db_cols = db.messages[name].shape # rows
array_size, array_cols = array.shape
db_resize = db_size + array_size
db.messages[name].resize((db_resize, db_cols))
db.messages[name][db_size:db_resize, :] = array
if grp == 'trades':
db_size, db_cols = db.trades[name].shape # rows
array_size, array_cols = array.shape
db_resize = db_size + array_size
db.trades[name].resize((db_resize, db_cols))
db.trades[name][db_size:db_resize, :] = array
if grp == 'noii':
db_size, db_cols = db.noii[name].shape # rows
array_size, array_cols = array.shape
db_resize = db_size + array_size
db.noii[name].resize((db_resize, db_cols))
db.noii[name][db_size:db_resize, :] = array
self.messages[name] = [] # reset
print('wrote {} messages to dataset (name={}, group={})'.format(len(m), name, grp))
def to_txt(self, name, db, grp):
assert db.method == 'csv', 'Attempted to write to non-CSV database'
message_list = self.messages[name]
if len(message_list) > 0:
texted = [message.to_txt() for message in message_list]
if grp == 'messages':
with open('{}/messages_{}.txt'.format(db.messages_path, name), 'a') as fout:
fout.writelines(texted)
if grp == 'trades':
with open('{}/trades_{}.txt'.format(db.trades_path, name), 'a') as fout:
fout.writelines(texted)
if grp == 'noii':
with open('{}/noii_{}.txt'.format(db.noii_path, name), 'a') as fout:
fout.writelines(texted)
self.messages[name] = []
print('wrote {} messages to dataset (name={}, group={})'.format(len(message_list), name, grp))
class Order():
"""A class to represent limit orders.
Stores essential message data for order book reconstruction.
Attributes
----------
name : string
Stock ticker
buysell : string
Trade position
price : int
Trade price
shares : int
Shares
"""
def __init__(self, name='.', buysell='.', price='.', shares='.'):
self.name = name
self.buysell = buysell
self.price = price
self.shares = shares
def __str__(self):
sep = ', '
line = ['name=' + str(self.name),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares)]
return sep.join(line)
def __repr__(self):
sep = ', '
line = ['name=' + str(self.name),
'buysell=' + str(self.buysell),
'price=' + str(self.price),
'shares=' + str(self.shares)]
return 'Order(' + sep.join(line) + ')'
class Orderlist():
"""A class to store existing orders and process incoming messages.
This class handles the matching of messages to standing orders. Incoming messages are first matched to standing orders so that missing message data can be completed, and then the referenced order is updated based on the message.
Attributes
----------
orders : dict
Keys are reference numbers, values are Orders.
"""
def __init__(self):
self.orders = {}
def __str__(self):
sep = '\n'
line = []
for key in self.orders.keys():
line.append(str(key) + ': ' + str(self.orders[key]))
return sep.join(line)
# updates message by reference.
def complete_message(self, message):
"""Look up Order for Message and fill in missing data."""
if message.refno in self.orders.keys():
# print('complete_message received message: {}'.format(message.type))
ref_order = self.orders[message.refno]
if message.type == 'U':
message.name = ref_order.name
message.buysell = ref_order.buysell
elif message.type == 'U+': # ADD from a split REPLACE order
message.type = 'A'
message.name = ref_order.name
message.buysell = ref_order.buysell
message.refno = message.newrefno
message.newrefno = -1
elif message.type in ('E', 'C', 'X'):
message.name = ref_order.name
message.buysell = ref_order.buysell
message.price = ref_order.price
message.shares = -message.shares
elif message.type == 'D':
message.name = ref_order.name
message.buysell = ref_order.buysell
message.price = ref_order.price
message.shares = -ref_order.shares
def add(self, message):
"""Add a new Order to the list."""
order = Order()
order.name = message.name
order.buysell = message.buysell
order.price = message.price
order.shares = message.shares
self.orders[message.refno] = order
def update(self, message):
"""Update an existing Order based on incoming Message."""
if message.refno in self.orders.keys():
if message.type == 'E': # execute
self.orders[message.refno].shares += message.shares
elif message.type == 'X': # execute w/ price
self.orders[message.refno].shares += message.shares
elif message.type == 'C': # cancel
self.orders[message.refno].shares += message.shares
elif message.type == 'D': # delete
self.orders.pop(message.refno)
else:
pass
class Book():
"""A class to represent an order book.
This class provides a method for updating the state of an order book from an
incoming message.
Attributes
----------
bids : dict
Keys are prices, values are shares
asks : dict
Keys are prices, values are shares
levels : int
Number of levels of the the order book to track
sec : int
Seconds
nano : int
Nanoseconds
"""
def __init__(self, date, name, levels):
self.bids = {}
self.asks = {}
self.min_bid = -np.inf
self.max_ask = np.inf
self.levels = levels
self.sec = -1
self.nano = -1
self.date = date
self.name = name
def __str__(self):
sep = ', '
sorted_bids = sorted(self.bids.keys(), reverse=True) # high-to-low
sorted_asks = sorted(self.asks.keys()) # low-to-high
bid_list = []
ask_list = []
nbids = len(self.bids)
nasks = len(self.asks)
for i in range(0, self.levels):
if i < nbids:
bid_list.append(str(self.bids[sorted_bids[i]]) + '@' + str(sorted_bids[i]))
else:
pass
if i < nasks:
ask_list.append(str(self.asks[sorted_asks[i]]) + '@' + str(sorted_asks[i]))
else:
pass
return 'bids: ' + sep.join(bid_list) + '\n' + 'asks: ' + sep.join(ask_list)
def __repr__(self):
sep = ', '
sorted_bids = sorted(self.bids.keys(), reverse=True) # high-to-low
sorted_asks = sorted(self.asks.keys()) # low-to-high
bid_list = []
ask_list = []
nbids = len(self.bids)
nasks = len(self.asks)
for i in range(0, self.levels):
if i < nbids:
bid_list.append(str(self.bids[sorted_bids[i]]) + '@' + str(sorted_bids[i]))
else:
pass
if i < nasks:
ask_list.append(str(self.asks[sorted_asks[i]]) + '@' + str(sorted_asks[i]))
else:
pass
return 'Book( \n' + 'bids: ' + sep.join(bid_list) + '\n' + 'asks: ' + sep.join(ask_list) + ' )'
def update(self, message):
"""Update Book using incoming Message data."""
self.sec = message.sec
self.nano = message.nano
updated = False
if message.buysell == 'B':
if message.price in self.bids.keys():
self.bids[message.price] += message.shares
if self.bids[message.price] == 0:
self.bids.pop(message.price)
elif message.type in ('A', 'F'):
self.bids[message.price] = message.shares
elif message.buysell == 'S':
if message.price in self.asks.keys():
self.asks[message.price] += message.shares
if self.asks[message.price] == 0:
self.asks.pop(message.price)
elif message.type in ('A', 'F'):
self.asks[message.price] = message.shares
return self
def to_list(self):
"""Return Order as a list."""
values = []
values.append(self.date)
values.append(self.name)
values.append(int(self.sec))
values.append(int(self.nano))
sorted_bids = sorted(self.bids.keys(), reverse=True)
sorted_asks = sorted(self.asks.keys())
for i in range(0, self.levels): # bid price
if i < len(self.bids):
values.append(sorted_bids[i])
else:
values.append(-1)
for i in range(0, self.levels): # ask price
if i < len(self.asks):
values.append(sorted_asks[i])
else:
values.append(-1)
for i in range(0, self.levels): # bid depth
if i < len(self.bids):
values.append(self.bids[sorted_bids[i]])
else:
values.append(0)
for i in range(0, self.levels): # ask depth
if i < len(self.asks):
values.append(self.asks[sorted_asks[i]])
else:
values.append(0)
return values
def to_array(self):
'''Return Order as numpy array.'''
values = []
values.append(int(self.sec))
values.append(int(self.nano))
sorted_bids = sorted(self.bids.keys(), reverse=True)
sorted_asks = sorted(self.asks.keys())
for i in range(0, self.levels): # bid price
if i < len(self.bids):
values.append(sorted_bids[i])
else:
values.append(-1)
for i in range(0, self.levels): # ask price
if i < len(self.asks):
values.append(sorted_asks[i])
else:
values.append(-1)
for i in range(0, self.levels): # bid depth
if i < len(self.bids):
values.append(self.bids[sorted_bids[i]])
else:
values.append(0)
for i in range(0, self.levels): # ask depth
if i < len(self.asks):
values.append(self.asks[sorted_asks[i]])
else:
values.append(0)
return np.array(values)
def to_txt(self):
values = []
values.append(int(self.sec))
values.append(int(self.nano))
values.append(self.name)
sorted_bids = sorted(self.bids.keys(), reverse=True)
sorted_asks = sorted(self.asks.keys())
for i in range(0, self.levels): # bid price
if i < len(self.bids):
values.append(sorted_bids[i] / 10 ** 4)
else:
values.append(-1)
for i in range(0, self.levels): # ask price
if i < len(self.asks):
values.append(sorted_asks[i] / 10 ** 4)
else:
values.append(-1)
for i in range(0, self.levels): # bid depth
if i < len(self.bids):
values.append(self.bids[sorted_bids[i]])
else:
values.append(0)
for i in range(0, self.levels): # ask depth
if i < len(self.asks):
values.append(self.asks[sorted_asks[i]])
else:
values.append(0)
return ','.join([str(v) for v in values]) + '\n'
class Booklist():
"""A class to store Books.
Provides methods for writing to external databases.
Examples
--------
Create a Booklist::
>> booklist = pk.BookList(['GOOG', 'AAPL'], levels=10)
Attributes
----------
books : list
A list of Books
method : string
Specifies the type of database to create ('hdf5' or 'postgres')
"""
def __init__(self, date, names, levels, method):
self.books = {}
self.method = method
for name in names:
self.books[name] = {'hist': [], 'cur': Book(date, name, levels)}
def update(self, message):
"""Update Book data from message."""
b = self.books[message.name]['cur'].update(message)
if self.method == 'hdf5':
self.books[message.name]['hist'].append(b.to_array())
if self.method == 'csv':
self.books[message.name]['hist'].append(b.to_txt())
def to_hdf5(self, name, db):
"""Write Book data to HDF5 file."""
hist = self.books[name]['hist']
if len(hist) > 0:
array = np.array(hist)
db_size, db_cols = db.orderbooks[name].shape # rows
array_size, array_cols = array.shape
db_resize = db_size + array_size
db.orderbooks[name].resize((db_resize, db_cols))
db.orderbooks[name][db_size:db_resize, :] = array
self.books[name]['hist'] = [] # reset
print('wrote {} books to dataset (name={})'.format(len(hist), name))
def to_txt(self, name, db):
hist = self.books[name]['hist']
if len(hist) > 0:
with open('{}/books_{}.txt'.format(db.books_path, name), 'a') as fout:
fout.writelines(hist)
self.books[name]['hist'] = [] # reset
print('wrote {} books to dataset (name={})'.format(len(hist), name))
def get_message_size(size_in_bytes):
"""Return number of bytes in binary message as an integer."""
(message_size,) = struct.unpack('>H', size_in_bytes)
return message_size
def get_message_type(type_in_bytes):
"""Return the type of a binary message as a string."""
return type_in_bytes.decode('ascii')
def get_message(message_bytes, message_type, date, time, version):
"""Return binary message data as a Message."""
if message_type in ('T', 'S', 'H', 'A', 'F', 'E', 'C', 'X', 'D', 'U', 'P', 'Q', 'I'):
message = protocol(message_bytes, message_type, time, version)
if version == 5.0:
message.sec = int(message.nano / 10 ** 9)
message.nano = message.nano % 10 ** 9
message.date = date
return message
else:
return None
def protocol(message_bytes, message_type, time, version):
"""Decode binary message data and return as a Message."""
if message_type in ('T', 'S', 'H', 'A', 'F', 'E', 'C', 'X', 'D', 'U', 'P'):
message = Message()
elif message_type in ('Q', 'I'):
message = NOIIMessage()
# elif message_type in ('H'):
# message = TradingActionMessage()
message.type = message_type
if version == 4.0:
if message.type == 'T': # time
temp = struct.unpack('>I', message_bytes)
message.sec = temp[0]
message.nano = 0
elif message_type == 'S': # systems
temp = struct.unpack('>Is', message_bytes)
message.sec = time
message.nano = temp[0]
message.event = temp[1].decode('ascii')
elif message_type == 'H': # trade-action
temp = struct.unpack('>I6sss4s', message_bytes)
message.sec = time
message.nano = temp[0]
message.name = temp[1].decode('ascii').rstrip(' ')
message.event = temp[2].decode('ascii')
elif message.type == 'A': # add
temp = struct.unpack('>IQsI6sI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
elif message.type == 'F': # add w/mpid
temp = struct.unpack('>IQsI6sI4s', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
elif message.type == 'E': # execute
temp = struct.unpack('>IQIQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
elif message.type == 'C': # execute w/price (actually don't need price...)
temp = struct.unpack('>IQIQsI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
message.price = temp[5]
elif message.type == 'X': # cancel
temp = struct.unpack('>IQI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
elif message.type == 'D': # delete
temp = struct.unpack('>IQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
elif message.type == 'U': # replace
temp = struct.unpack('>IQQII', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.newrefno = temp[2]
message.shares = temp[3]
message.price = temp[4]
elif message.type == 'Q':
temp = struct.unpack('>IQ6sIQs', message_bytes)
message.sec = time
message.nano = temp[0]
message.shares = temp[1]
message.name = temp[2].decode('ascii').rstrip(' ')
message.price = temp[3]
message.event = temp[5].decode('ascii')
return message
elif version == 4.1:
if message.type == 'T': # time
temp = struct.unpack('>I', message_bytes)
message.sec = temp[0]
message.nano = 0
elif message.type == 'S': # systems
temp = struct.unpack('>Is', message_bytes)
message.sec = time
message.nano = temp[0]
message.name = '.'
message.event = temp[1].decode('ascii')
elif message.type == 'H': # trade-action
temp = struct.unpack('>I8sss4s', message_bytes)
message.sec = time
message.nano = temp[0]
message.name = temp[1].decode('ascii').rstrip(' ')
message.event = temp[2].decode('ascii')
elif message.type == 'A': # add
temp = struct.unpack('>IQsI8sI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
elif message.type == 'F': # add w/mpid
temp = struct.unpack('>IQsI8sI4s', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
message.mpid = temp[6].decode('ascii').rstrip(' ')
elif message.type == 'E': # execute
temp = struct.unpack('>IQIQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
elif message.type == 'C': # execute w/price
temp = struct.unpack('>IQIQsI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
message.price = temp[5]
elif message.type == 'X': # cancel
temp = struct.unpack('>IQI', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.shares = temp[2]
elif message.type == 'D': # delete
temp = struct.unpack('>IQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
elif message.type == 'U': # replace
temp = struct.unpack('>IQQII', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.newrefno = temp[2]
message.shares = temp[3]
message.price = temp[4]
elif message.type == 'Q': # cross-trade
temp = struct.unpack('>IQ8sIQs', message_bytes)
message.sec = time
message.nano = temp[0]
message.shares = temp[1]
message.name = temp[2].decode('ascii').rstrip(' ')
message.price = temp[3]
message.event = temp[5].decode('ascii')
elif message.type == 'P': # trade message
temp = struct.unpack('>IQsI8sIQ', message_bytes)
message.sec = time
message.nano = temp[0]
message.refno = temp[1]
message.buysell = temp[2].decode('ascii')
message.shares = temp[3]
message.name = temp[4].decode('ascii').rstrip(' ')
message.price = temp[5]
# message.matchno = temp[6]
elif message.type == 'I':
temp = struct.unpack('>IQQs8sIIIss', message_bytes)
message.sec = time
message.nano = temp[0]
message.paired = temp[1]
message.imbalance = temp[2]
message.direction = temp[3].decode('ascii')
message.name = temp[4].decode('ascii').rstrip(' ')
message.far = temp[5]
message.near = temp[6]
message.current = temp[7]
message.cross = temp[8].decode('ascii')
# message.pvar = temp[9].decode('ascii'])
return message
elif version == 5.0:
if message.type == 'T': # time
raise ValueError('Time messages not supported in ITCHv5.0.')
elif message_type == 'S': # systems
temp = struct.unpack('>HHHIs', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.event = temp[4].decode('ascii')
elif message.type == 'H':
temp = struct.unpack('>HHHI8sss4s', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.name = temp[4].decode('ascii').rstrip(' ')
message.event = temp[5].decode('ascii')
elif message.type == 'A': # add
temp = struct.unpack('>HHHIQsI8sI', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.buysell = temp[5].decode('ascii')
message.shares = temp[6]
message.name = temp[7].decode('ascii').rstrip(' ')
message.price = temp[8]
elif message.type == 'F': # add w/mpid
temp = struct.unpack('>HHHIQsI8sI4s', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.buysell = temp[5].decode('ascii')
message.shares = temp[6]
message.name = temp[7].decode('ascii').rstrip(' ')
message.price = temp[8]
elif message.type == 'E': # execute
temp = struct.unpack('>HHHIQIQ', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.shares = temp[5]
elif message.type == 'C': # execute w/price
temp = struct.unpack('>HHHIQIQsI', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.shares = temp[5]
message.price = temp[8]
elif message.type == 'X': # cancel
temp = struct.unpack('>HHHIQI', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.shares = temp[5]
elif message.type == 'D': # delete
temp = struct.unpack('>HHHIQ', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
elif message.type == 'U': # replace
temp = struct.unpack('>HHHIQQII', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.refno = temp[4]
message.newrefno = temp[5]
message.shares = temp[6]
message.price = temp[7]
elif message.type == 'Q': # cross-trade
temp = struct.unpack('>HHHIQ8sIQ1s', message_bytes)
message.sec = time
message.nano = temp[2] | (temp[3] << 16)
message.shares = temp[4]
message.name = temp[5].decode('ascii').rstrip(' ')
message.price = temp[6]
message.event = temp[8].decode('ascii')
return message
else:
raise ValueError('ITCH version ' + str(version) + ' is not supported')
def unpack(fin, ver, date, nlevels, names, method='csv', fout=None, host=None, user=None):
"""Read ITCH data file, construct LOB, and write to database.
This method reads binary data from a ITCH data file, converts it into human-readable data, then saves time series of out-going messages as well as reconstructed order book snapshots to a research database.
The version number of the ITCH data is specified as a float. Supported versions are: 4.1.
"""
BUFFER_SIZE = 10 ** 4
orderlist = Orderlist()
booklist = Booklist(date, names, nlevels, method)
messagelist = Messagelist(date, names)
tradeslist = Messagelist(date, names)
noiilist = Messagelist(date, names)
if method == 'hdf5':
db = Database(path=fout, names=names, nlevels=nlevels, method='hdf5')
log_path = os.path.abspath('{}/../system.log'.format(fout))
with open(log_path, 'w') as system_file:
system_file.write('sec,nano,name,event\n')
elif method == 'csv':
db = Database(path=fout, names=names, nlevels=nlevels, method='csv')
log_path = '{}/system.log'.format(fout)
with open(log_path, 'w') as system_file:
system_file.write('sec,nano,name,event\n')
data = open(fin, 'rb')
message_reads = 0
message_writes = 0
trade_writes = 0
noii_writes = 0
reading = True
clock = 0
start = time.time()
while reading:
# read message
message_size = get_message_size(data.read(2))
message_type = get_message_type(data.read(1))
message_bytes = data.read(message_size - 1)
message = get_message(message_bytes, message_type, date, clock, ver)
message_reads += 1
# update clock
if message_type == 'T':
if message.sec % 1800 == 0:
print('TIME={}'.format(message.sec))
clock = message.sec
# update system
if message_type == 'S':
print('SYSTEM MESSAGE: {}'.format(message.event))
message.to_txt(log_path)
if message.event == 'C': # end messages
reading = False
if message_type == 'H':
if message.name in names:
print('TRADING MESSAGE ({}): {}'.format(message.name, message.event))
message.to_txt(log_path)
# TODO: What to do about halts?
if message.event == 'H': # halted (all US)
pass
elif message.event == 'P': # paused (all US)
pass
elif message.event == 'Q': # quotation only
pass
elif message.event == 'T': # trading on nasdaq
pass
# complete message
if message_type == 'U':
message, del_message, add_message = message.split()
orderlist.complete_message(message)
orderlist.complete_message(del_message)
orderlist.complete_message(add_message)
if message.name in names:
message_writes += 1
orderlist.update(del_message)
booklist.update(del_message)
orderlist.add(add_message)
booklist.update(add_message)
messagelist.add(message)
# print('ORDER MESSAGE <REPLACE>')
elif message_type in ('E', 'C', 'X', 'D'):
orderlist.complete_message(message)
if message.name in names:
message_writes += 1
orderlist.update(message)
booklist.update(message)
messagelist.add(message)
# print('ORDER MESSAGE')
elif message_type in ('A', 'F'):
if message.name in names:
message_writes += 1
orderlist.add(message)
booklist.update(message)
messagelist.add(message)
# print('ORDER MESSAGE')
elif message_type == 'P':
if message.name in names:
trade_writes += 1
tradeslist.add(message)
# print('TRADE MESSAGE')
elif message_type in ('Q', 'I'):
if message.name in names:
noii_writes += 1
noiilist.add(message)
# print('NOII MESSAGE')
# write message
if method == 'hdf5':
if message_type in ('U', 'A', 'F', 'E', 'C', 'X', 'D'):
if message.name in names:
if len(messagelist.messages[message.name]) == BUFFER_SIZE:
messagelist.to_hdf5(name=message.name, db=db, grp='messages')
if len(booklist.books[message.name]['hist']) == BUFFER_SIZE:
booklist.to_hdf5(name=message.name, db=db)
elif message_type == 'P':
if message.name in names:
if len(tradeslist.messages[message.name]) == BUFFER_SIZE:
tradeslist.to_hdf5(name=message.name, db=db, grp='trades')
elif message_type in ('Q', 'I'):
if message.name in names:
if len(noiilist.messages[message.name]) == BUFFER_SIZE:
noiilist.to_hdf5(name=message.name, db=db, grp='noii')
elif method == 'csv':
if message_type in ('U', 'A', 'F', 'E', 'C', 'X', 'D'):
if message.name in names:
if len(messagelist.messages[message.name]) == BUFFER_SIZE:
messagelist.to_txt(name=message.name, db=db, grp='messages')
if len(booklist.books[message.name]['hist']) == BUFFER_SIZE:
booklist.to_txt(name=message.name, db=db)
elif message_type == 'P':
if message.name in names:
if len(tradeslist.messages[message.name]) == BUFFER_SIZE:
tradeslist.to_txt(name=message.name, db=db, grp='trades')
elif message_type in ('Q', 'I'):
if message.name in names:
if len(noiilist.messages[message.name]) == BUFFER_SIZE:
noiilist.to_txt(name=message.name, db=db, grp='noii')
# clean up
print('Cleaning up...')
for name in names:
if method == 'hdf5':
messagelist.to_hdf5(name=name, db=db, grp='messages')
booklist.to_hdf5(name=name, db=db)
tradeslist.to_hdf5(name=name, db=db, grp='trades')
noiilist.to_hdf5(name=name, db=db, grp='noii')
elif method == 'csv':
messagelist.to_txt(name=name, db=db, grp='messages')
booklist.to_txt(name=name, db=db)
tradeslist.to_txt(name=name, db=db, grp='trades')
noiilist.to_txt(name=name, db=db, grp='noii')
stop = time.time()
data.close()
db.close()
print('Elapsed time: {} seconds'.format(stop - start))
print('Messages read: {}'.format(message_reads))
print('Messages written: {}'.format(message_writes))
print('Trades written: {}'.format(trade_writes))
print('NOII written: {}'.format(noii_writes))
def load_hdf5(db, name, grp):
"""Read data from database and return pd.DataFrames."""
if grp == 'messages':
try:
with h5py.File(db, 'r') as f:
try:
messages = f['/messages/' + name]
data = messages[:, :]
T, N = data.shape
columns = ['sec',
'nano',
'type',
'side',
'price',
'shares',
'refno',
'newrefno']
df = pd.DataFrame(data, index=np.arange(0, T), columns=columns)
return df
except KeyError as e:
print('Could not find name {} in messages'.format(name))
except OSError as e:
print('Could not find file {}'.format(path))
if grp == 'books':
try:
with h5py.File(db, 'r') as f:
try:
data = f['/orderbooks/' + name]
nlevels = int((data.shape[1] - 2) / 4)
pidx = list(range(2, 2 + nlevels))
pidx.extend(list(range(2 + nlevels, 2 + 2 * nlevels)))
vidx = list(range(2 + 2 * nlevels, 2 + 3 * nlevels))
vidx.extend(list(range(2 + 3 * nlevels, 2 + 4 * nlevels)))
timestamps = data[:, 0:2]
prices = data[:, pidx]
volume = data[:, vidx]
base_columns = [str(i) for i in list(range(1, nlevels + 1))]
price_columns = ['bidprc.' + i for i in base_columns]
volume_columns = ['bidvol.' + i for i in base_columns]
price_columns.extend(['askprc.' + i for i in base_columns])
volume_columns.extend(['askvol.' + i for i in base_columns])
df_time = pd.DataFrame(timestamps, columns=['sec', 'nano'])
df_price = pd.DataFrame(prices, columns=price_columns)
df_volume = pd.DataFrame(volume, columns=volume_columns)
df_price = pd.concat([df_time, df_price], axis=1)
df_volume = pd.concat([df_time, df_volume], axis=1)
return df_price, df_volume
except KeyError as e:
print('Could not find name {} in orderbooks'.format(name))
except OSError as e:
print('Could not find file {}'.format(path))
if grp == 'trades':
try:
with h5py.File(db, 'r') as f:
try:
messages = f['/trades/' + name]
data = messages[:, :]
T, N = data.shape
columns = ['sec',
'nano',
'side',
'price',
'shares']
df = pd.DataFrame(data, index=np.arange(0, T), columns=columns)
return df
except KeyError as e:
print('Could not find name {} in messages'.format(name))
except OSError as e:
print('Could not find file {}'.format(path))
if grp == 'noii':
try:
with h5py.File(db, 'r') as f:
try:
messages = f['/noii/' + name]
data = messages[:, :]
T, N = data.shape
columns = ['sec',
'nano',
'type',
'cross',
'side',
'price',
'shares',
'matchno',
'paired',
'imb',
'dir',
'far',
'near',
'current']
df = pd.DataFrame(data, index=np.arange(0, T), columns=columns)
return df
except KeyError as e:
print('Could not find name {} in messages'.format(name))
except OSError as e:
print('Could not find file {}'.format(path))
def interpolate(data, tstep):
"""Interpolate limit order data.
Uses left-hand interpolation, and assumes that the data is indexed by timestamp.
"""
T, N = data.shape
timestamps = data.index
t0 = timestamps[0] - (timestamps[0] % tstep) # 34200
tN = timestamps[-1] - (timestamps[-1] % tstep) + tstep # 57600
timestamps_new = np.arange(t0 + tstep, tN + tstep, tstep) # [34200, ..., 57600]
X = np.zeros((len(timestamps_new), N)) # np.array
X[-1, :] = data.values[-1, :]
t = timestamps_new[0] # keeps track of time in NEW sampling frequency
for i in np.arange(0, T): # observations in data...
if timestamps[i] > t:
s = timestamps[i] - (timestamps[i] % tstep)
tidx = int((t - t0) / tstep - 1)
sidx = int((s - t0) / tstep) # plus one for python indexing (below)
X[tidx:sidx, :] = data.values[i - 1, :]
t = s + tstep
else:
pass
return pd.DataFrame(X,
index=timestamps_new,
columns=data.columns)
def imshow(data, which, levels):
"""
Display order book data as an image, where order book data is either of
`df_price` or `df_volume` returned by `load_hdf5` or `load_postgres`.
"""
if which == 'prices':
idx = ['askprc.' + str(i) for i in range(levels, 0, -1)]
idx.extend(['bidprc.' + str(i) for i in range(1, levels + 1, 1)])
elif which == 'volumes':
idx = ['askvol.' + str(i) for i in range(levels, 0, -1)]
idx.extend(['bidvol.' + str(i) for i in range(1, levels + 1, 1)])
plt.imshow(data.loc[:, idx].T, interpolation='nearest', aspect='auto')
plt.yticks(range(0, levels * 2, 1), idx)
plt.colorbar()
plt.tight_layout()
plt.show()
def reorder(data, columns):
"""Reorder the columns of order data.
The resulting columns will be asks (high-to-low) followed by bids (low-to-high).
"""
levels = int((data.shape[1] - 2) / 2)
if columns == 'volume' or type == 'v':
idx = ['askvol.' + str(i) for i in range(levels, 0, -1)]
idx.extend(['bidvol.' + str(i) for i in range(1, levels + 1, 1)])
elif columns == 'price' or type == 'p':
idx = ['askprc.' + str(i) for i in range(levels, 0, -1)]
idx.extend(['bidprc.' + str(i) for i in range(1, levels + 1, 1)])
return data.ix[:, idx]
def find_trades(messages, eps=10 ** -6):
if 'time' not in messages.columns:
messages['time'] = messages['sec'] + messages['nano'] / 10 ** 9
if 'type' in messages.columns:
messages = messages[messages.type == 'E']
trades = []
i = 0
while i < len(messages):
time = messages.iloc[i].time
side = messages.iloc[i].side
shares = messages.iloc[i].shares
vwap = messages.iloc[i].price
hit = 0
i += 1
if i == len(messages):
break
while messages.iloc[i].time <= time + eps and messages.iloc[i].side == side:
shares += messages.iloc[i].shares
if messages.iloc[i].price != vwap:
hit = 1
vwap = messages.iloc[i].price * messages.iloc[i].shares / shares + vwap * (
shares - messages.iloc[i].shares) / shares
i += 1
if i == len(messages):
break
# print('TRADE (time={}, side={}, shares={}, vwap={}, hit={})'.format(time, side, shares, vwap, hit))
trades.append([time, side, shares, vwap, hit])
return pd.DataFrame(trades, columns=['time', 'side', 'shares', 'vwap', 'hit'])
def plot_trades(trades):
sells = trades[trades.side == 'B']
buys = trades[trades.side == 'S']
plt.hist(sells.shares, bins=np.arange(-1000, 100, 100), edgecolor='white', color='C0', alpha=0.5)
plt.hist(-buys.shares, bins=np.arange(1, 1100, 100), edgecolor='white', color='C1', alpha=0.5)
plt.show()
plt.clf()
def nodups(books, messages):
"""Return messages and books with rows remove for orders that didn't change book."""
assert books.shape[0] == messages.shape[0], "books and messages do not have the same number of rows"
subset = books.columns.drop(['sec', 'nano', 'name'])
dups = books.duplicated(subset=subset)
return books[~dups].reset_index(), messages[~dups].reset_index()
def combine(messages, hidden):
"""Combine hidden executions with message data."""
messages = messages.drop(['index', 'sec', 'nano', 'name', 'refno', 'mpid'], axis=1)
hidden['type'] = 'H'
hidden = hidden.drop(['hit'], axis=1)
hidden = hidden.rename(columns={'vwap': 'price'})
combined = pd.concat([messages, hidden])
return combined.sort_values(by='time', axis=0)
| mit | 8,576,444,294,337,752,000 | 36.861015 | 232 | 0.489086 | false |
conejoninja/pelisalacarta | python/main-classic/channels/vepelis.py | 1 | 18213 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para VePelis
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "vepelis"
__category__ = "F"
__type__ = "generic"
__title__ = "VePelis"
__language__ = "ES"
__creationdate__ = "20130528"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("[vepelis.py] mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, title="Ultimas Agregadas", action="listado2" , url="http://www.vepelis.com/pelicula/ultimas-peliculas" , extra="http://www.vepelis.com/pelicula/ultimas-peliculas"))
itemlist.append( Item(channel=__channel__, title="Estrenos en DVD" , action="listado2" , url="http://www.vepelis.com/pelicula/ultimas-peliculas/estrenos-dvd" , extra="http://www.vepelis.com/pelicula/ultimas-peliculas/estrenos-dvd"))
itemlist.append( Item(channel=__channel__, title="Peliculas en Cartelera", action="listado2" , url="http://www.vepelis.com/pelicula/ultimas-peliculas/cartelera" , extra="http://www.vepelis.com/pelicula/ultimas-peliculas/cartelera"))
itemlist.append( Item(channel=__channel__, title="Ultimas Actualizadas" , action="listado2" , url="http://www.vepelis.com/pelicula/ultimas-peliculas/ultimas/actualizadas" , extra="http://www.vepelis.com/pelicula/ultimas-peliculas/ultimas/actualizadas"))
itemlist.append( Item(channel=__channel__, title="Por Genero" , action="generos" , url="http://www.vepelis.com/"))
itemlist.append( Item(channel=__channel__, title="Por Orden Alfabetico" , action="alfabetico" , url="http://www.vepelis.com/"))
itemlist.append( Item(channel=__channel__, title="Buscar" , action="search" , url="http://www.vepelis.com/"))
return itemlist
def listarpeliculas(item):
logger.info("[vepelis.py] listarpeliculas")
# Descarga la página
data = scrapertools.cachePage(item.url)
extra = item.extra
# Extrae las entradas de la pagina seleccionada
'''<td class="DarkText" align="center" valign="top" width="100px" height="160px" style="background-color:#1e1e1e;" onmouseover="this.style.backgroundColor='#000000'" onmouseout="this.style.backgroundColor='#1e1e1e'"><p style="margin-bottom: 3px;border-bottom:#ABABAB 1px solid">
<a href="http://www.peliculasaudiolatino.com/movies/Larry_Crowne.html"><img src="http://www.peliculasaudiolatino.com/poster/85x115/peliculas/movieimg/movie1317696842.jpg" alt="Larry Crowne" border="0" height="115" width="85"></a>'''
patron = '<td class=.*?<a '
patron += 'href="([^"]+)"><img src="([^"]+)" alt="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
scrapedurl = match[0]
scrapedtitle = match[2]
scrapedtitle = unicode( scrapedtitle, "iso-8859-1" , errors="replace" ).encode("utf-8")
scrapedthumbnail = match[1]
scrapedplot = ""
logger.info(scrapedtitle)
# Añade al listado
itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle , fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , extra=extra , folder=True) )
# Extrae la marca de siguiente página
patron = 'Anterior.*? :: <a href="/../../.*?/page/([^"]+)">Siguiente '
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for match in matches:
if len(matches)>0:
scrapedurl = extra+match
scrapedtitle = "!Pagina Siguiente"
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append( Item(channel=__channel__, action="listarpeliculas", title=scrapedtitle , fulltitle=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , extra=extra , folder=True) )
return itemlist
def findvideos(item):
logger.info("[vepelis.py] videos")
# Descarga la página
data = scrapertools.cachePage(item.url)
title = item.title
scrapedthumbnail = item.thumbnail
itemlist = []
patron = '<li><a href="#ms.*?">([^"]+)</a></li>.*?<iframe src="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
#itemlist.append( Item(channel=__channel__, action="play", title=title , fulltitle=item.fulltitle, url=item.url , thumbnail=scrapedthumbnail , folder=False) )
if (DEBUG): scrapertools.printMatches(matches)
for match in matches:
url = match[1]
title = "SERVIDOR: " + match[0]
title = unicode( title, "iso-8859-1" , errors="replace" ).encode("utf-8")
itemlist.append( Item(channel=__channel__, action="play", title=title , fulltitle=item.fulltitle, url=url , thumbnail=scrapedthumbnail , folder=False) )
return itemlist
def play(item):
logger.info("[vepelis.py] play")
itemlist=[]
from servers import servertools
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.channel=__channel__
videoitem.action="play"
videoitem.folder=False
return itemlist
#data2 = scrapertools.cache_page(item.url)
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/mv.php?url=","http://www.megavideo.com/?v=")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/videobb.php?url=","http://www.videobb.com/watch_video.php?v=")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/vidbux.php?url=","http://www.vidbux.com/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/vidxden.php?url=","http://www.vidxden.com/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/videozer.php?url=","http://www.videozer.com/video/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/v/pl/play.php?url=","http://www.putlocker.com/embed/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/v/mv/play.php?url=","http://www.modovideo.com/frame.php?v=")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/v/ss/play.php?url=","http://www.sockshare.com/embed/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/v/vb/play.php?url=","http://vidbull.com/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/sockshare.php?url=","http://www.sockshare.com/embed/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/moevide.php?url=","http://moevideo.net/?page=video&uid=")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/novamov.php?url=","http://www.novamov.com/video/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/movshare.php?url=","http://www.movshare.net/video/")
#data2 = data2.replace("http://www.peliculasaudiolatino.com/show/divxstage.php?url=","http://www.divxstage.net/video/")
#listavideos = servertools.findvideos(data2)
#for video in listavideos:
# invalid = video[1]
# invalid = invalid[0:8]
# if invalid!= "FN3WE43K" and invalid!="9CC3F8&e":
# scrapedtitle = item.title+video[0]
# videourl = item.url
# server = video[2]
# if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"]")
#logger.info("url=" + item.url)
# Añade al listado de XBMC
#itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle , fulltitle=item.fulltitle, url=videourl , server=server , folder=False) )
# itemlist.append( Item(channel=__channel__, action="play" , title=item.title , url=item.url, thumbnail="", plot="", server=item.url))
# return itemlist
def generos(item):
logger.info("[vepelis.py] generos")
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
patron = '>.*?<li><a title="(.*?)" href="(.*?)"'
matches = re.compile(patron,re.DOTALL).findall(data)
if (DEBUG): scrapertools.printMatches(matches)
for match in matches:
scrapedurl = urlparse.urljoin("",match[1])
scrapedurl = scrapedurl.replace(".html","/page/0.html")
extra = scrapedurl.replace ("/page/0.html","/page/")
scrapedtitle = match[0]
#scrapedtitle = scrapedtitle.replace("","")
scrapedthumbnail = ""
scrapedplot = ""
logger.info(scrapedtitle)
if scrapedtitle=="Eroticas +18":
if config.get_setting("adult_mode") == "true":
itemlist.append( Item(channel=__channel__, action="listado2", title="Eroticas +18" , url="http://www.myhotamateurvideos.com" , thumbnail=scrapedthumbnail , plot=scrapedplot , extra="" , folder=True) )
else:
if scrapedtitle <> "" and len(scrapedtitle) < 20 and scrapedtitle <> "Iniciar Sesion":
itemlist.append( Item(channel=__channel__, action="listado2", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , extra=extra, folder=True) )
itemlist = sorted(itemlist, key=lambda Item: Item.title)
return itemlist
def alfabetico(item):
logger.info("[cinewow.py] listalfabetico")
extra = item.url
itemlist = []
itemlist.append( Item(channel=__channel__, action="listado2" , title="0-9", url="http://www.vepelis.com/letra/09.html", extra="http://www.vepelis.com/letra/09.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="A" , url="http://www.vepelis.com/letra/a.html", extra="http://www.vepelis.com/letra/a.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="B" , url="http://www.vepelis.com/letra/b.html", extra="http://www.vepelis.com/letra/b.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="C" , url="http://www.vepelis.com/letra/c.html", extra="http://www.vepelis.com/letra/c.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="E" , url="http://www.vepelis.com/letra/d.html", extra="http://www.vepelis.com/letra/d.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="D" , url="http://www.vepelis.com/letra/e.html", extra="http://www.vepelis.com/letra/e.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="F" , url="http://www.vepelis.com/letra/f.html", extra="http://www.vepelis.com/letra/f.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="G" , url="http://www.vepelis.com/letra/g.html", extra="http://www.vepelis.com/letra/g.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="H" , url="http://www.vepelis.com/letra/h.html", extra="http://www.vepelis.com/letra/h.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="I" , url="http://www.vepelis.com/letra/i.html", extra="http://www.vepelis.com/letra/i.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="J" , url="http://www.vepelis.com/letra/j.html", extra="http://www.vepelis.com/letra/j.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="K" , url="http://www.vepelis.com/letra/k.html", extra="http://www.vepelis.com/letra/k.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="L" , url="http://www.vepelis.com/letra/l.html", extra="http://www.vepelis.com/letra/l.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="M" , url="http://www.vepelis.com/letra/m.html", extra="http://www.vepelis.com/letra/m.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="N" , url="http://www.vepelis.com/letra/n.html", extra="http://www.vepelis.com/letra/n.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="O" , url="http://www.vepelis.com/letra/o.html", extra="http://www.vepelis.com/letra/o.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="P" , url="http://www.vepelis.com/letra/p.html", extra="http://www.vepelis.com/letra/p.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="Q" , url="http://www.vepelis.com/letra/q.html", extra="http://www.vepelis.com/letra/q.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="R" , url="http://www.vepelis.com/letra/r.html", extra="http://www.vepelis.com/letra/r.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="S" , url="http://www.vepelis.com/letra/s.html", extra="http://www.vepelis.com/letra/s.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="T" , url="http://www.vepelis.com/letra/t.html", extra="http://www.vepelis.com/letra/t.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="U" , url="http://www.vepelis.com/letra/u.html", extra="http://www.vepelis.com/letra/u.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="V" , url="http://www.vepelis.com/letra/v.html", extra="http://www.vepelis.com/letra/v.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="W" , url="http://www.vepelis.com/letra/w.html", extra="http://www.vepelis.com/letra/w.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="X" , url="http://www.vepelis.com/letra/x.html", extra="http://www.vepelis.com/letra/x.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="Y" , url="http://www.vepelis.com/letra/y.html", extra="http://www.vepelis.com/letra/y.html"))
itemlist.append( Item(channel=__channel__, action="listado2" , title="Z" , url="http://www.vepelis.com/letra/z.html", extra="http://www.vepelis.com/letra/z.html"))
return itemlist
def listado2(item):
logger.info("[vepelis.py] listado2")
extra = item.extra
itemlist = []
# Descarga la página
data = scrapertools.cachePage(item.url)
patron = '<h2 class="titpeli.*?<a href="([^"]+)" title="([^"]+)".*?peli_img_img">.*?<img src="([^"]+)".*?<strong>Idioma</strong>:.*?/>([^"]+)</div>.*?<strong>Calidad</strong>: ([^"]+)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
if (DEBUG): scrapertools.printMatches(matches)
for match in matches:
scrapedurl = match[0] #urlparse.urljoin("",match[0])
scrapedtitle = match[1] + ' - ' + match[4]
scrapedtitle = unicode( scrapedtitle, "iso-8859-1" , errors="replace" ).encode("utf-8")
scrapedthumbnail = match[2]
#scrapedplot = match[0]
#itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle , fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle, fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , folder=True) )
#if extra<>"":
# Extrae la marca de siguiente página
#patron = 'page=(.*?)"><span><b>'
patron = '<span><b>(.*?)</b></span>'
matches = re.compile(patron,re.DOTALL).findall(data)
#if DEBUG: scrapertools.printMatches(matches)
for match in matches:
#if len(matches)>0:
nu = int(match[0]) + 1
scrapedurl = extra + "?page=" + str(nu)
scrapedtitle = "!Pagina Siguiente ->"
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append( Item(channel=__channel__, action="listado2", title=scrapedtitle , fulltitle=scrapedtitle, url=scrapedurl , thumbnail=scrapedthumbnail , extra=extra , folder=True) )
return itemlist
def search(item,texto):
logger.info("[vepelis.py] search")
itemlist = []
texto = texto.replace(" ","+")
try:
# Series
item.url="http://www.vepelis.com/buscar/?q=%s"
item.url = item.url % texto
item.extra = ""
itemlist.extend(listado2(item))
itemlist = sorted(itemlist, key=lambda Item: Item.title)
return itemlist
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
'''url = "http://www.peliculasaudiolatino.com/series-anime"
data = scrapertools.cachePage(url)
# Extrae las entradas de todas series
patronvideos = '<li>[^<]+'
patronvideos += '<a.+?href="([\D]+)([\d]+)">[^<]+'
patronvideos += '.*?/>(.*?)</a>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
scrapedtitle = match[2].strip()
# Realiza la busqueda
if scrapedtitle.lower()==texto.lower() or texto.lower() in scrapedtitle.lower():
logger.info(scrapedtitle)
scrapedurl = urlparse.urljoin(url,(match[0]+match[1]))
scrapedthumbnail = urlparse.urljoin("http://www.peliculasaudiolatino.com/images/series/",(match[1]+".png"))
scrapedplot = ""
# Añade al listado
itemlist.append( Item(channel=__channel__, action="listacapitulos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist'''
# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
from servers import servertools
# mainlist
mainlist_items = mainlist(Item())
# Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
novedades_items = listado2(mainlist_items[0])
bien = False
for novedades_item in novedades_items:
mirrors = servertools.find_video_items( item=novedades_item )
if len(mirrors)>0:
bien = True
break
return bien
| gpl-3.0 | 6,332,740,761,873,854,000 | 55.691589 | 283 | 0.652599 | false |
smn/malva | malva/utils.py | 1 | 1658 | # -*- test-case-name: malva.tests.test_utils -*-
from twisted.internet.serialport import SerialPort
from twisted.internet.defer import DeferredList, Deferred
from twisted.internet import reactor
from txgsm import txgsm
from serial.tools import list_ports
class ModemProbe(object):
protocol = txgsm.TxGSMProtocol
serial_port_class = SerialPort
def __init__(self, verbose):
self.verbose = verbose
def available_ports(self):
return list_ports.comports()
def probe_ports(self, timeout=2):
dl = [self.probe_port(port, timeout)
for port, _, _ in self.available_ports()]
return DeferredList(dl, consumeErrors=True)
def setup_protocol(self, port):
# separate function for easier stubbing in a test
proto = self.protocol()
proto.verbose = self.verbose
self.serial_port_class(proto, port, reactor)
return proto
def probe_port(self, port, timeout):
def get_results(probe_result):
(_, imsi, _, manufacturer, _) = probe_result
return (port, imsi, manufacturer)
d = self.get_modem(port, timeout)
d.addCallback(lambda modem: modem.probe())
d.addCallback(get_results)
return d
def get_modems(self, timeout=2):
dl = [self.get_modem(port, timeout)
for port, _, _ in self.available_ports()]
return DeferredList(dl, consumeErrors=True)
def get_modem(self, port, timeout):
d = Deferred()
d.addCallback(self.setup_protocol)
reactor.callLater(timeout, d.cancel)
reactor.callLater(0, d.callback, port)
return d
| bsd-3-clause | 5,435,535,710,121,342,000 | 28.607143 | 57 | 0.638721 | false |
codycollier/netropy | netropy/record.py | 1 | 2678 | """record
The record data structure and helper functions.
"""
import collections
import hashlib
# -----------------------------------------------------------------------------
# The main data structure
# -----------------------------------------------------------------------------
record_fields = ['version', 'frequency', 'timeStamp',
'seedValue', 'previousOutputValue',
'signatureValue', 'outputValue', 'statusCode']
record_field_ints = ('frequency', 'timeStamp')
Record = collections.namedtuple('Record', record_fields)
# -----------------------------------------------------------------------------
# Parsing helpers
# -----------------------------------------------------------------------------
def _extract_value(field_name, raw_xml):
"""Extract a value from raw xml
Simplistic string parsing version...
"""
val = raw_xml.split("%s>" % field_name)[1].rstrip('</')
return val
def parse_record_xml(record_xml):
"""Parse record xml and return a dictionary
Simplistic string parsing version...
"""
rec = {}
for field_name in record_fields:
val = _extract_value(field_name, record_xml)
if field_name in record_field_ints:
val = int(val)
rec[field_name] = val
return rec
# -----------------------------------------------------------------------------
# Record validation
# -----------------------------------------------------------------------------
def verify_record(record):
"""Verify a record is internally consistent
signatureValue - This can't be verified as there is no public key
outputValue - This should be a hash of the signatureValue
From the schema file info for outputValue:
The SHA-512 hash of the signatureValue as a 64 byte hex string
reminder:
The outputValue hash is a hash of the signatureValue byte string, not
the signatureValue hex string. See decode('hex').
"""
signature_value = record['signatureValue']
output_value = record['outputValue']
sv_hash = hashlib.sha512(signature_value.decode('hex')).hexdigest().upper()
return sv_hash == output_value
def verify_pair(record1, record2):
"""Verify two records which are chained together
Any given record (except the first) should be chained to the previous
by a matching hash in previousOutputValue.
From the schema file info for outputValue:
The SHA-512 hash value for the previous record - 64 byte hex string
"""
rec1_output_value = record1['outputValue']
rec2_previous_output_value = record2['previousOutputValue']
return rec1_output_value == rec2_previous_output_value
| mit | -1,230,551,760,263,086,600 | 28.428571 | 79 | 0.558626 | false |
jerpat/csmake | csmake-manifest/CsmakeModules/CsversionHLinuxConfigApt.py | 1 | 3468 | # <copyright>
# (c) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# </copyright>
# <copyright>
# (c) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# </copyright>
from Csmake.CsmakeAspect import CsmakeAspect
import urlparse
from datetime import datetime
import sys
class CsversionHLinuxConfigApt(CsmakeAspect):
"""Purpose: To capture the information provided to a HLinuxConfigApt
section.
Options: tag - Provides a context name for the information
for example - cs-mgmt-base-image or cs-mgmt-sources
Joinpoints: end__build - captures the metadata from the section
Creates Environment:
__Csversion__ - A dictionary where product metadata is stored under
'product'. 'product' is product info keyed off of
the type of data stored, in this case 'apt'.
The same metadata/tag combination will be overwritten
if pulled twice.
The structure of the product dictionary is
a dictionary of tags from builds with apt
{ 'product' : { <tag> : { 'apt' : { <apt options>} } } }
"""
REQUIRED_OPTIONS = ['tag']
def end__build(self, phase, options, hlinuxsection, hlinuxoptions):
if '__Csversion__' not in self.env.env:
self.env.env['__Csversion__'] = {}
self.log.debug("__Csversion__ not found creating new")
if 'product' not in self.env.env['__Csversion__']:
self.env.env['__Csversion__']['product'] = {}
self.log.debug("product not found, creating new")
versdict = self.env.env['__Csversion__']['product']
if 'apt' not in versdict:
versdict['apt'] = {}
self.log.debug("build data not found, creating new")
else:
if options['tag'] in versdict['apt']:
self.log.warning("apt, Tag: %s :: Overwriting %s",
options['tag'],
str(versdict['apt'][options['tag']]) )
versdict['apt'][options['tag']] = dict(hlinuxoptions)
self.log.passed()
return True
| gpl-3.0 | 2,895,465,660,196,402,700 | 45.24 | 80 | 0.647636 | false |
cemoody/lda2vec | lda2vec/negative_sampling.py | 1 | 7611 | import numpy
import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class NegativeSamplingFunction(function.Function):
ignore_label = -1
def __init__(self, sampler, sample_size):
self.sampler = sampler
self.sample_size = sample_size
def _make_samples(self, t):
if hasattr(self, 'samples'):
return self.samples # for testing
size = int(t.shape[0])
# first one is the positive, and others are sampled negatives
samples = self.sampler((size, self.sample_size + 1))
samples[:, 0] = t
self.samples = samples
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, t_type, w_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0],
w_type.dtype == numpy.float32,
w_type.ndim == 2,
)
def forward_cpu(self, inputs):
x, t, W = inputs
self.ignore_mask = (t != self.ignore_label)
self._make_samples(t)
loss = numpy.float32(0.0)
for i, (ix, k) in enumerate(six.moves.zip(x[self.ignore_mask],
self.samples[self.ignore_mask])):
w = W[k]
f = w.dot(ix)
f[0] *= -1 # positive sample
loss += numpy.sum(numpy.logaddexp(f, 0))
return numpy.array(loss, numpy.float32),
def forward_gpu(self, inputs):
x, t, W = inputs
self.ignore_mask = (t != self.ignore_label)
n_in = x.shape[1]
self._make_samples(t)
self.wx = cuda.elementwise(
'raw T W, raw T x, bool mask, S k, int32 c, int32 m', 'T wx',
'''
T f = 0;
if (mask == 1){
for (int j = 0; j < c; ++j) {
int x_ind[] = {(i / m), j};
int w_ind[] = {k, j};
f += x[x_ind] * W[w_ind];
}
}
wx = f;
''',
'negative_sampling_wx'
)(W, x, self.ignore_mask[:, None], self.samples, n_in,
self.sample_size + 1)
y = cuda.elementwise(
'T wx, int32 c, int32 m', 'T y',
'''
T f = wx;
if (i % m == 0) {
f = -f;
}
T loss;
if (f < 0) {
loss = __logf(1 + __expf(f));
} else {
loss = f + __logf(1 + __expf(-f));
}
y = loss;
''',
'negative_sampling_forward'
)(self.wx, n_in, self.sample_size + 1)
# TODO(okuta): merge elementwise
loss = cuda.cupy.sum(y * self.ignore_mask[:, None].astype('float32'))
return loss,
def backward_cpu(self, inputs, grads):
x, t, W = inputs
gloss, = grads
gx = numpy.zeros_like(x)
gW = numpy.zeros_like(W)
for i, (ix, k) in enumerate(six.moves.zip(x[self.ignore_mask],
self.samples[self.ignore_mask])):
w = W[k]
f = w.dot(ix)
# g == -y * gloss / (1 + exp(yf))
f[0] *= -1
g = gloss / (1 + numpy.exp(-f))
g[0] *= -1
gx[i] = g.dot(w)
for ik, ig in six.moves.zip(k, g):
gW[ik] += ig * ix
return gx, None, gW
def backward_gpu(self, inputs, grads):
cupy = cuda.cupy
x, t, W = inputs
gloss, = grads
n_in = x.shape[1]
g = cuda.elementwise(
'T wx, raw T gloss, int32 m', 'T g',
'''
T y;
if (i % m == 0) {
y = 1;
} else {
y = -1;
}
g = -y * gloss[0] / (1.0f + __expf(wx * y));
''',
'negative_sampling_calculate_g'
)(self.wx, gloss, self.sample_size + 1)
gx = cupy.zeros_like(x)
cuda.elementwise(
'raw T g, raw T W, bool mask, raw S k, int32 c, int32 m', 'T gx',
'''
int d = i / c;
T w = 0;
if (mask == 1){
for (int j = 0; j < m; ++j) {
w += g[d * m + j] * W[k[d * m + j] * c + i % c];
}
}
gx = w;
''',
'negative_sampling_calculate_gx'
)(g, W, self.ignore_mask[:, None], self.samples, n_in,
self.sample_size + 1, gx)
gW = cupy.zeros_like(W)
cuda.elementwise(
'T g, raw T x, S k, bool mask, int32 c, int32 m',
'raw T gW',
'''
T gi = g;
if (mask == 1) {
for (int j = 0; j < c; ++j) {
atomicAdd(&gW[k * c + j], gi * x[(i / m) * c + j]);
}
}
''',
'negative_sampling_calculate_gw'
)(g, x, self.samples, self.ignore_mask[:, None], n_in,
self.sample_size + 1, gW)
return gx, None, gW
def negative_sampling(x, t, W, sampler, sample_size):
"""Negative sampling loss function.
In natural language processing, especially language modeling, the number of
words in a vocabulary can be very large.
Therefore, you need to spend a lot of time calculating the gradient of the
embedding matrix.
By using the negative sampling trick you only need to calculate the
gradient for a few sampled negative examples.
The objective function is below:
.. math::
f(x, p) = \\log \\sigma(x^\\top w_p) + \\
k E_{i \\sim P(i)}[\\log \\sigma(- x^\\top w_i)],
where :math:`\sigma(\cdot)` is a sigmoid function, :math:`w_i` is the
weight vector for the word :math:`i`, and :math:`p` is a positive example.
It is approximeted with :math:`k` examples :math:`N` sampled from
probability :math:`P(i)`, like this:
.. math::
f(x, p) \\approx \\log \\sigma(x^\\top w_p) + \\
\\sum_{n \\in N} \\log \\sigma(-x^\\top w_n).
Each sample of :math:`N` is drawn from the word distribution :math:`P(w)`.
This is calculated as :math:`P(w) = \\frac{1}{Z} c(w)^\\alpha`, where
:math:`c(w)` is the unigram count of the word :math:`w`, :math:`\\alpha` is
a hyper-parameter, and :math:`Z` is the normalization constant.
Args:
x (~chainer.Variable): Batch of input vectors.
t (~chainer.Variable): Vector of groundtruth labels.
W (~chainer.Variable): Weight matrix.
sampler (function): Sampling function. It takes a shape and returns an
integer array of the shape. Each element of this array is a sample
from the word distribution. A :class:`~chainer.utils.WalkerAlias`
object built with the power distribution of word frequency is
recommended.
sample_size (int): Number of samples.
See: `Distributed Representations of Words and Phrases and their\
Compositionality <http://arxiv.org/abs/1310.4546>`_
.. seealso:: :class:`~chainer.links.NegativeSampling`.
"""
return NegativeSamplingFunction(sampler, sample_size)(x, t, W)
# Monkey-patch the chainer code to replace the negative sampling
# with the one used here
import chainer.links as L
import chainer.functions as F
negative_sampling.patched = True
L.NegativeSampling.negative_sampling = negative_sampling
F.negative_sampling = negative_sampling
| mit | -4,506,923,810,108,634,600 | 31.665236 | 79 | 0.496518 | false |
seraphln/onedrop | crawler/api_proxy.py | 1 | 5434 | # coding=utf8
#
"""
跟远程API服务器交互的通用逻辑
"""
import os
import json
import socket
import urllib
import datetime
import requests
import traceback
import config
API_HOST = "http://180.76.149.212:8083"
GRAPHQL_HOST = "%s/graphql?query=%%s" % API_HOST
def to_url_params(params):
"""
根据GET参数的请求对URL进行编码
@param params: GET参数
@type params: Dict
:return: urllib.urlencode(params)
"""
if not params:
return ""
new_params = {}
for k,v in params.items():
if isinstance(v, unicode):
new_params[k] = v.encode('utf-8')
elif isinstance(v, str):
new_params[k] = v
else:
raise
return urllib.urlencode(new_params)
def request(method, url, params=None):
"""
封装起来的请求远程服务器的操作
@param method: 请求的HTTP Method
@type method: String
@param url: 请求的PATH
@type url: String
@param params: 请求所带的参数
@type params: Dict
@param ak: 请求所带的access_Key
@type ak: String
:return: api_response
"""
start_time = str(datetime.datetime.now())
headers = {}
headers['X-Auth-Access-Key'] = config.API_ACCESS_KEY
query_dict = {"headers": headers,
"verify": False,
"timeout": 60}
data = None if not params else params
if method == 'GET' and params:
url += '?' + to_url_params(params)
data = None
else:
if data:
query_dict["data"] = data
status_code = 0
try:
resp = requests.request(method, url, **query_dict)
status_code = resp.status_code
resp = resp.json()
except:
resp = {'success':False, 'result':traceback.format_exc()}
resp['status_code'] = status_code
resp['time'] = resp.get('time', {})
resp['time']['api_start'] = start_time
resp['time']['api_end'] = str(datetime.datetime.now())
return resp
def get_crawler_seed():
"""
获取采集的种子
:return: {"data": {"tasks": cseeds[0].get("node")}}
"""
query_str = ''' query fetchCrawlerSeeds{allCrawlerSeeds(source: "pcbaby") {
edges {
node {
id,
name,
source,
url,
status
}
}
}}
'''
data = request("GET", GRAPHQL_HOST % query_str)
cseeds = data.get("data", {}).get("allCrawlerSeeds", {}).get("edges", [{}])
print cseeds
if not cseeds or cseeds[0].get("status") == "finished":
return {}
else:
return {"data": {"seeds": cseeds[0].get("node")}}
def get_crawler_task(source):
"""
从anduin的远程服务请求一个监控任务
@param source: 爬虫任务对应的来源
@type source: String
:return: {"data": {"tasks": ctasks[0].get("node")}}
"""
query_str = ''' query fetchCrawlerTasks{allCrawlerTasks(source: "%s") {
edges {
node {
id,
name,
url,
status,
category,
ttype
}
}
}}
'''
query_str = query_str % source
data = request("GET", GRAPHQL_HOST % query_str)
ctasks = data.get("data", {}).get("allCrawlerTasks", {}).get("edges", [{}])
if not ctasks or ctasks[0].get("status") == "finished":
return {}
else:
return {"data": {"tasks": ctasks[0].get("node")}}
def update_crawler_task_by_rest_api(task_result):
"""
通过post请求将接口数据更新到远程服务器
@param task_result: 爬虫任务采集的结果
@type task_result: Dict
:return: {}
"""
url = "%s/update_crawler_task/" % API_HOST
data = {"task_result": task_result}
return request("POST", url, params=data)
def register_crawler_node(task_result):
""" 将当前爬虫节点注册到服务器上 """
query_str = '''
mutation MTMutation {
cnodes(input: {nodeInfo: "%s"}) {
cnode {
id,
name,
remoteAddr,
status
}
}
}
'''
query_str = query_str % str(task_result)
url = GRAPHQL_HOST % query_str
return request("POST", url)
def update_crawler_task(task_result):
""" 将数据更新到远程服务器上 """
query_str = '''
mutation MTMutation {
ctasks(input: {taskResult: "%s"}) {
ctask {
id,
status
}
}
}
'''
query_str = query_str % str(task_result)
url = GRAPHQL_HOST % query_str
return request("POST", url)
if __name__ == "__main__":
#import json
#import base64
#task_result = {"name": "%s-%s" % (socket.gethostname(), os.getpid())}
#print register_crawler_node(base64.urlsafe_b64encode(json.dumps(task_result)))
print get_crawler_seed() | gpl-3.0 | 1,172,086,396,405,874,000 | 23.528571 | 83 | 0.481359 | false |
pytn/pytn | pytn/proposals/models.py | 1 | 1067 | from django.db import models
from symposion.proposals.models import ProposalBase
class Proposal(ProposalBase):
AUDIENCE_LEVEL_NOVICE = 1
AUDIENCE_LEVEL_EXPERIENCED = 2
AUDIENCE_LEVEL_INTERMEDIATE = 3
AUDIENCE_LEVELS = [
(AUDIENCE_LEVEL_NOVICE, "Novice"),
(AUDIENCE_LEVEL_INTERMEDIATE, "Intermediate"),
(AUDIENCE_LEVEL_EXPERIENCED, "Experienced"),
]
audience_level = models.IntegerField(choices=AUDIENCE_LEVELS)
recording_release = models.BooleanField(
default=True,
help_text="By submitting your proposal, you agree to give permission to the conference organizers to record, edit, and release audio and/or video of your presentation. If you do not agree to this, please uncheck this box."
)
def __unicode__(self):
return self.title
class Meta:
abstract = True
class TalkProposal(Proposal):
class Meta:
verbose_name = "talk proposal"
class TutorialProposal(Proposal):
class Meta:
verbose_name = "tutorial proposal"
| mit | 4,846,354,523,018,538,000 | 26.358974 | 230 | 0.677601 | false |
OpenIxia/ixnetwork_client_python | ixnetwork/samples/emulation_host/emulation_host_demo.py | 1 | 2859 | import sys
import os
path = os.path.realpath(__file__)
sys.path.insert(0, path[0: path.rfind('ixnetwork')])
from ixnetwork.IxnHttp import IxnHttp
from ixnetwork.IxnConfigManagement import IxnConfigManagement
from ixnetwork.IxnPortManagement import IxnPortManagement
from ixnetwork.IxnStatManagement import IxnStatManagement
from ixnetwork.IxnEmulationHosts import IxnIgmpHostEmulation, IxnIpv4Emulation
import os
import time
import json
# connect to an existing session
ixnhttp = IxnHttp('10.200.22.48', rest_port=12345)
ixnhttp.current_session = ixnhttp.sessions()[0]
# print system information
print(ixnhttp.system_info)
# load a binary configuration
config_mgmt = IxnConfigManagement(ixnhttp)
config_filename = '%s/emulation-host-demo.ixncfg' % os.path.dirname(os.path.realpath(__file__))
config_mgmt.load_config(config_filename, upload=True, remove_chassis=True)
# get a list of vports and change the type to ethernet
query_result = ixnhttp.root.query \
.node('vport', properties=['type', 'connectedTo']) \
.go()
for vport in query_result.vport:
vport.operations.unassignports({'arg1': [vport.href], 'arg2': False})
vport.attributes.type.value = 'ethernet'
vport.attributes.connectedTo.value = 'null'
vport.update()
# assign hardware ports to virtual ports
port_mgmt = IxnPortManagement(ixnhttp)
port_mgmt.map('PE2-6/5', '10.200.109.21', '1', '3') \
.map('PE2-6/8', '10.200.109.21', '1', '4') \
.apply()
# find igmp emulation host session(s) by vport_name and mac addresses
igmp = IxnIgmpHostEmulation(ixnhttp)
igmp.find(vport_name='PE2-6/5', versionType='version2')
print(igmp.session_ids)
# find ipv4 emulation host session(s) by vport_name
ipv4 = IxnIpv4Emulation(ixnhttp)
ipv4.find(vport_name='PE2-6/8')
print(ipv4.session_ids)
# low level API start all protocols
ixnhttp.root.operations.startallprotocols()
# wait for ipv4 and igmp emulation sessions to be in an up state
ipv4.wait_until(IxnIpv4Emulation.STATE_UP, timeout=90)
igmp.wait_until(IxnIgmpHostEmulation.STATE_UP, timeout=90)
# stop the ipv4 and igmp emulation sessions
igmp.stop(IxnIgmpHostEmulation.STATE_NOTSTARTED, timeout=90)
ipv4.stop(IxnIpv4Emulation.STATE_NOTSTARTED, timeout=90)
# print statistics
time.sleep(5)
stat_mgmt = IxnStatManagement(ixnhttp)
views = stat_mgmt.get_views()
print(views)
port_summary_page = stat_mgmt.get_view_page('Port Summary')
stat_mgmt.print_view_page(port_summary_page, column_captions=[
'Port', 'Sessions Total', 'Sessions Up', 'Sessions Down', 'Sessions Not Started'])
protocols_summary_page = stat_mgmt.get_view_page('Protocols Summary')
stat_mgmt.print_view_page(protocols_summary_page, column_captions=[
'Protocol Type', 'Sessions Total', 'Sessions Up', 'Sessions Down', 'Sessions Not Started'])
| mit | 4,229,875,265,070,379,500 | 31.244186 | 95 | 0.733123 | false |
PhilHarnish/forge | src/puzzle/problems/crossword/cryptic_problem.py | 1 | 10923 | import collections
from data import chain, crossword, warehouse
from data.alphabets import cryptic_keywords
from puzzle.problems.crossword import _base_crossword_problem
class CrypticProblem(_base_crossword_problem._BaseCrosswordProblem):
def __init__(self, name, lines, **kwargs):
super(CrypticProblem, self).__init__(name, lines, **kwargs)
self._plan = None
self._tokens = None
def _init(self):
if self._plan is None and self._tokens is None:
parsed, plan = _compile(self.lines[0])
self._tokens = chain.Chain(parsed)
self._plan = plan
@staticmethod
def score(lines):
if len(lines) > 1:
return 0
line = lines[0]
parts = line.split()
if any(part in cryptic_keywords.ALL_INDICATORS for part in parts):
return 1
return _base_crossword_problem.score(lines) * .9 # Lower than normal.
def _solve(self):
self._init()
solutions = _Solutions(self._notes, self._min_length, self._max_length)
_visit(self._tokens, self._plan, solutions)
return solutions
def _compile(clue):
words_api = warehouse.get('/api/words')
result = []
indicators_seen = collections.defaultdict(list)
for i, token in enumerate(crossword.tokenize_clue(clue)):
indicator_token = token
base_form = words_api.base_form(token)
if base_form in cryptic_keywords.ALL_INDICATORS:
indicator_token = base_form
if indicator_token in cryptic_keywords.ALL_INDICATORS:
for indicator in cryptic_keywords.ALL_INDICATORS[indicator_token]:
indicators_seen[indicator].append(i)
result.append([token])
plan = sorted(indicators_seen.items(), key=lambda i: _VISIT_ORDER[i[0]])
return result, plan
def _visit(tokens, plan, solutions):
words_api = warehouse.get('/api/words')
# First pass: perform any necessary expansions.
for _, words in tokens.items():
source = words[0]
if source in cryptic_keywords.SHORTHAND_CONVERSIONS:
words.extend(cryptic_keywords.SHORTHAND_CONVERSIONS[source])
words.extend(words_api.expand(source).keys())
for indicator, positions in plan:
try:
_VISIT_MAP[indicator](tokens, positions, solutions)
except NotImplementedError:
print('Indicator for "%s" not implemented' % indicator)
raise NotImplementedError('Indicator for "%s" not implemented' % indicator)
except Exception:
print('Error visiting %s for %s' % (
indicator, ' '.join(words[0] for words in tokens)
))
raise
if not solutions:
# Attempt to find the solution from pieces of the expanded words.
_visit_concatenate(tokens, [], solutions)
if not solutions:
# Finally, attempt to find the solution from just 1 expanded word.
_visit_edge_words(tokens, [], solutions)
def _visit_initial(tokens, positions, solutions):
del solutions # Initial indicator produces more tokens.
for position in positions:
tokens.pop(position)
for _, words in tokens.items():
source = words[0]
words.append(source[0])
for position in reversed(positions):
tokens.restore(position)
def _visit_edge_words(tokens, positions, solutions):
del positions
top_words = warehouse.get('/words/unigram')
for edge in (tokens[0], tokens[-1]):
for token in edge[1:]: # Skip first word.
if token in top_words:
solutions.add(token, .33, 'synonym for edge word "%s"', [[edge[0]]])
def _visit_word_edges(tokens, positions, solutions):
del solutions # Edge indicator produces more tokens.
for position in positions:
tokens.pop(position)
for _, words in tokens.items():
source = words[0]
words.append(source[0] + source[-1])
for position in reversed(positions):
tokens.restore(position)
def _visit_reversal(tokens, positions, solutions):
del solutions # Initial indicator produces more tokens.
for position in positions:
tokens.pop(position)
for _, words in tokens.items():
source = words[0]
words.append(''.join(reversed(source)))
for position in reversed(positions):
tokens.restore(position)
def _visit_embedded(tokens, positions, solutions):
min_length = solutions.min_length
max_length = solutions.max_length
acc = []
pos_map = []
start_map = []
for pos, expanded in tokens.items():
source = expanded[0]
acc.append(source)
for i in range(len(source)):
pos_map.append(pos)
start_map.append(i == 0)
search_text = ''.join(acc)
trie = warehouse.get('/words/unigram/trie')
interesting_threshold = trie.interesting_threshold()
end = len(search_text) - min_length
ignored = set(acc) # Ignore words from clue itself.
for offset in range(end + 1): # End should be inclusive.
for result, weight in trie.walk(search_text[offset:]):
if result in ignored:
continue
result_length = len(result)
if result_length >= min_length and result_length <= max_length:
base_weight = min(1, weight / interesting_threshold)
# Demote scores for start-of-word.
if start_map[offset]:
base_weight *= .9
# Score = % of word not banned by `positions`.
score = base_weight * (
sum(pos_map[i] not in positions for i in
range(offset, offset + result_length))
) / result_length
start_pos = pos_map[offset]
end_pos = pos_map[offset + result_length - 1] + 1
embedded_slice = tokens[start_pos:end_pos]
solutions.add(result, score, 'embedded in %s', embedded_slice)
def _visit_anagram(tokens, positions, solutions):
end = len(tokens)
min_length = solutions.min_length
max_length = solutions.max_length
anagram_positions = set(positions)
anagram_index = warehouse.get('/words/unigram/anagram_index')
trie = warehouse.get('/words/unigram/trie')
interesting_threshold = trie.interesting_threshold()
banned_max = len(anagram_positions)
def _add(acc, banned_max):
parts = []
banned_matches = 0
for word, pos in acc:
parts.append(word)
if pos in anagram_positions:
banned_matches += 1
elif word in cryptic_keywords.CONCATENATE_INDICATORS:
# Special case for concatenate keywords which frequently join two
# chunks of an anagram.
banned_matches += 1
banned_max += 1
solution = ''.join(parts)
if solution not in anagram_index:
return
anagrams = anagram_index[solution]
# Score is 0 if all acc are from possitions; .5 if 1/2 are, etc.
if not anagram_positions:
score = 1
else:
score = 1 - (banned_matches / banned_max)
for anagram in anagrams:
if anagram != solution:
base_weight = min(1, trie[anagram] / interesting_threshold)
solutions.add(anagram, base_weight * score, 'anagram of %s', acc)
def _crawl(pos, acc, acc_length):
# Try to form total word from all remaining words.
for i in range(pos, end):
words = tokens[i]
for word in words:
word_length = len(word)
new_length = acc_length + word_length
if new_length > max_length:
continue
acc_length = new_length
acc.append((word, i))
if min_length <= new_length <= max_length:
_add(acc, banned_max)
elif new_length < max_length:
_crawl(i + 1, acc, acc_length)
acc_length -= word_length
acc.pop()
_crawl(0, [], 0)
def _visit_concatenate(tokens, positions, solutions):
end = len(tokens)
min_length = solutions.min_length
max_length = solutions.max_length
concatenate_positions = set(positions)
trie = warehouse.get('/words/unigram/trie')
interesting_threshold = trie.interesting_threshold()
def _add(acc):
if len(acc) == 1:
return # Ignore complete words in input.
parts = []
banned_matches = 0
for word, pos in acc:
parts.append(word)
if pos in concatenate_positions:
banned_matches += 1
solution = ''.join(parts)
if solution not in trie:
return
# Score is 0 if all acc are from possitions; .5 if 1/2 are, etc.
if not concatenate_positions:
score = 1
else:
score = 1 - (banned_matches / len(concatenate_positions))
base_weight = min(1, trie[solution] / interesting_threshold)
solutions.add(solution, base_weight * score, 'concatenation of %s', acc)
def _crawl(pos, acc, acc_length):
if pos in concatenate_positions and pos + 1 < end:
# Optionally, skip ahead to next position using current acc.
_crawl(pos + 1, acc, acc_length)
# Try to form total word from all remaining starting points.
for i in range(pos, end):
words = tokens[i]
for word in words:
word_length = len(word)
new_length = acc_length + word_length
if new_length > max_length:
continue
acc_length = new_length
acc.append((word, i))
if new_length >= min_length and new_length <= max_length:
_add(acc)
elif new_length < max_length and trie.has_keys_with_prefix(
''.join(a[0] for a in acc)):
_crawl(i + 1, acc, acc_length)
acc_length -= word_length
acc.pop()
_crawl(0, [], 0)
def _visit_homophone(tokens, positions, solutions):
del tokens, positions
if not solutions:
raise NotImplementedError('Homophones not implemented')
def _visit_insert(tokens, positions, solutions):
if not solutions:
# "INSERT" indicator is usually a subset of functionality provided by
# "ANAGRAM".
_visit_anagram(tokens, positions, solutions)
if not solutions:
raise NotImplementedError()
class _Solutions(dict):
def __init__(self, notes, min_length, max_length):
super(_Solutions, self).__init__()
self._notes = notes
self.min_length = min_length
self.max_length = max_length
def add(self, solution, weight, note, ingredients):
if solution not in self or weight > self[solution]:
self[solution] = weight
self._notes[solution].clear()
if note:
self._notes[solution].append(
note % ', '.join(words[0] for words in ingredients))
_VISIT_MAP = collections.OrderedDict([
# Embedded clues only use original words.
(cryptic_keywords.EMBEDDED_INDICATORS, _visit_embedded),
# Producers.
(cryptic_keywords.INITIAL_INDICATORS, _visit_initial),
(cryptic_keywords.EDGES_INDICATORS, _visit_word_edges),
(cryptic_keywords.REVERSAL_INDICATORS, _visit_reversal),
# Reducers.
(cryptic_keywords.ANAGRAM_INDICATORS, _visit_anagram),
(cryptic_keywords.CONCATENATE_INDICATORS, _visit_concatenate),
# TODO: Incomplete implementation. Redundant with anagram indicator.
(cryptic_keywords.INSERT_INDICATORS, _visit_insert),
# TODO: Incomplete implementation. This should be up with "producers".
(cryptic_keywords.HOMOPHONE_INDICATORS, _visit_homophone),
])
_VISIT_ORDER = dict([(indicator, i) for i, indicator in enumerate(_VISIT_MAP)])
| mit | -3,694,395,314,970,705,000 | 33.457413 | 81 | 0.657786 | false |
adykstra/mne-python | mne/decoding/base.py | 1 | 20408 | """Base class copy from sklearn.base."""
# Authors: Gael Varoquaux <[email protected]>
# Romain Trachel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import time
import numbers
from ..parallel import parallel_func
from ..fixes import BaseEstimator, is_classifier
from ..utils import check_version, logger, warn
class LinearModel(BaseEstimator):
"""Compute and store patterns from linear models.
The linear model coefficients (filters) are used to extract discriminant
neural sources from the measured data. This class computes the
corresponding patterns of these linear filters to make them more
interpretable [1]_.
Parameters
----------
model : object | None
A linear model from scikit-learn with a fit method
that updates a ``coef_`` attribute.
If None the model will be LogisticRegression.
Attributes
----------
filters_ : ndarray, shape ([n_targets], n_features)
If fit, the filters used to decompose the data.
patterns_ : ndarray, shape ([n_targets], n_features)
If fit, the patterns used to restore M/EEG signals.
Notes
-----
.. versionadded:: 0.10
See Also
--------
CSP
mne.preprocessing.ICA
mne.preprocessing.Xdawn
References
----------
.. [1] Haufe, S., Meinecke, F., Gorgen, K., Dahne, S., Haynes, J.-D.,
Blankertz, B., & Biebmann, F. (2014). On the interpretation of
weight vectors of linear models in multivariate neuroimaging.
NeuroImage, 87, 96-110.
"""
def __init__(self, model=None): # noqa: D102
if model is None:
from sklearn.linear_model import LogisticRegression
if check_version('sklearn', '0.20'):
model = LogisticRegression(solver='liblinear')
else:
model = LogisticRegression()
self.model = model
self._estimator_type = getattr(model, "_estimator_type", None)
def fit(self, X, y, **fit_params):
"""Estimate the coefficients of the linear model.
Save the coefficients in the attribute ``filters_`` and
computes the attribute ``patterns_``.
Parameters
----------
X : array, shape (n_samples, n_features)
The training input samples to estimate the linear coefficients.
y : array, shape (n_samples, [n_targets])
The target values.
**fit_params : dict of string -> object
Parameters to pass to the fit method of the estimator.
Returns
-------
self : instance of LinearModel
Returns the modified instance.
"""
X, y = np.asarray(X), np.asarray(y)
if X.ndim != 2:
raise ValueError('LinearModel only accepts 2-dimensional X, got '
'%s instead.' % (X.shape,))
if y.ndim > 2:
raise ValueError('LinearModel only accepts up to 2-dimensional y, '
'got %s instead.' % (y.shape,))
# fit the Model
self.model.fit(X, y, **fit_params)
# Computes patterns using Haufe's trick: A = Cov_X . W . Precision_Y
inv_Y = 1.
X = X - X.mean(0, keepdims=True)
if y.ndim == 2 and y.shape[1] != 1:
y = y - y.mean(0, keepdims=True)
inv_Y = np.linalg.pinv(np.cov(y.T))
self.patterns_ = np.cov(X.T).dot(self.filters_.T.dot(inv_Y)).T
return self
@property
def filters_(self):
if not hasattr(self.model, 'coef_'):
raise ValueError('model does not have a `coef_` attribute.')
filters = self.model.coef_
if filters.ndim == 2 and filters.shape[0] == 1:
filters = filters[0]
return filters
def transform(self, X):
"""Transform the data using the linear model.
Parameters
----------
X : array, shape (n_samples, n_features)
The data to transform.
Returns
-------
y_pred : array, shape (n_samples,)
The predicted targets.
"""
return self.model.transform(X)
def fit_transform(self, X, y):
"""Fit the data and transform it using the linear model.
Parameters
----------
X : array, shape (n_samples, n_features)
The training input samples to estimate the linear coefficients.
y : array, shape (n_samples,)
The target values.
Returns
-------
y_pred : array, shape (n_samples,)
The predicted targets.
"""
return self.fit(X, y).transform(X)
def predict(self, X):
"""Compute predictions of y from X.
Parameters
----------
X : array, shape (n_samples, n_features)
The data used to compute the predictions.
Returns
-------
y_pred : array, shape (n_samples,)
The predictions.
"""
return self.model.predict(X)
def predict_proba(self, X):
"""Compute probabilistic predictions of y from X.
Parameters
----------
X : array, shape (n_samples, n_features)
The data used to compute the predictions.
Returns
-------
y_pred : array, shape (n_samples, n_classes)
The probabilities.
"""
return self.model.predict_proba(X)
def decision_function(self, X):
"""Compute distance from the decision function of y from X.
Parameters
----------
X : array, shape (n_samples, n_features)
The data used to compute the predictions.
Returns
-------
y_pred : array, shape (n_samples, n_classes)
The distances.
"""
return self.model.decision_function(X)
def score(self, X, y):
"""Score the linear model computed on the given test data.
Parameters
----------
X : array, shape (n_samples, n_features)
The data to transform.
y : array, shape (n_samples,)
The target values.
Returns
-------
score : float
Score of the linear model
"""
return self.model.score(X, y)
def _set_cv(cv, estimator=None, X=None, y=None):
"""Set the default CV depending on whether clf is classifier/regressor."""
# Detect whether classification or regression
if estimator in ['classifier', 'regressor']:
est_is_classifier = estimator == 'classifier'
else:
est_is_classifier = is_classifier(estimator)
# Setup CV
if check_version('sklearn', '0.18'):
from sklearn import model_selection as models
from sklearn.model_selection import (check_cv, StratifiedKFold, KFold)
if isinstance(cv, (int, np.int)):
XFold = StratifiedKFold if est_is_classifier else KFold
cv = XFold(n_splits=cv)
elif isinstance(cv, str):
if not hasattr(models, cv):
raise ValueError('Unknown cross-validation')
cv = getattr(models, cv)
cv = cv()
cv = check_cv(cv=cv, y=y, classifier=est_is_classifier)
else:
from sklearn import cross_validation as models
from sklearn.cross_validation import (check_cv, StratifiedKFold, KFold)
if isinstance(cv, (int, np.int)):
if est_is_classifier:
cv = StratifiedKFold(y=y, n_folds=cv)
else:
cv = KFold(n=len(y), n_folds=cv)
elif isinstance(cv, str):
if not hasattr(models, cv):
raise ValueError('Unknown cross-validation')
cv = getattr(models, cv)
if cv.__name__ not in ['KFold', 'LeaveOneOut']:
raise NotImplementedError('CV cannot be defined with str for'
' sklearn < .017.')
cv = cv(len(y))
cv = check_cv(cv=cv, X=X, y=y, classifier=est_is_classifier)
# Extract train and test set to retrieve them at predict time
if hasattr(cv, 'split'):
cv_splits = [(train, test) for train, test in
cv.split(X=np.zeros_like(y), y=y)]
else:
# XXX support sklearn.cross_validation cv
cv_splits = [(train, test) for train, test in cv]
if not np.all([len(train) for train, _ in cv_splits]):
raise ValueError('Some folds do not have any train epochs.')
return cv, cv_splits
def _check_estimator(estimator, get_params=True):
"""Check whether an object has the methods required by sklearn."""
valid_methods = ('predict', 'transform', 'predict_proba',
'decision_function')
if (
(not hasattr(estimator, 'fit')) or
(not any(hasattr(estimator, method) for method in valid_methods))
):
raise ValueError('estimator must be a scikit-learn transformer or '
'an estimator with the fit and a predict-like (e.g. '
'predict_proba) or a transform method.')
if get_params and not hasattr(estimator, 'get_params'):
raise ValueError('estimator must be a scikit-learn transformer or an '
'estimator with the get_params method that allows '
'cloning.')
def _get_inverse_funcs(estimator, terminal=True):
"""Retrieve the inverse functions of an pipeline or an estimator."""
inverse_func = [False]
if hasattr(estimator, 'steps'):
# if pipeline, retrieve all steps by nesting
inverse_func = list()
for _, est in estimator.steps:
inverse_func.extend(_get_inverse_funcs(est, terminal=False))
elif hasattr(estimator, 'inverse_transform'):
# if not pipeline attempt to retrieve inverse function
inverse_func = [estimator.inverse_transform]
# If terminal node, check that that the last estimator is a classifier,
# and remove it from the transformers.
if terminal:
last_is_estimator = inverse_func[-1] is False
all_invertible = not(False in inverse_func[:-1])
if last_is_estimator and all_invertible:
# keep all inverse transformation and remove last estimation
inverse_func = inverse_func[:-1]
else:
inverse_func = list()
return inverse_func
def get_coef(estimator, attr='filters_', inverse_transform=False):
"""Retrieve the coefficients of an estimator ending with a Linear Model.
This is typically useful to retrieve "spatial filters" or "spatial
patterns" of decoding models [1]_.
Parameters
----------
estimator : object | None
An estimator from scikit-learn.
attr : str
The name of the coefficient attribute to retrieve, typically
``'filters_'`` (default) or ``'patterns_'``.
inverse_transform : bool
If True, returns the coefficients after inverse transforming them with
the transformer steps of the estimator.
Returns
-------
coef : array
The coefficients.
References
----------
.. [1] Haufe, S., Meinecke, F., Gorgen, K., Dahne, S., Haynes, J.-D.,
Blankertz, B., & Biessmann, F. (2014). On the interpretation of weight
vectors of linear models in multivariate neuroimaging. NeuroImage, 87,
96-110. doi:10.1016/j.neuroimage.2013.10.067.
"""
# Get the coefficients of the last estimator in case of nested pipeline
est = estimator
while hasattr(est, 'steps'):
est = est.steps[-1][1]
# If SlidingEstimator, loop across estimators
if hasattr(est, 'estimators_'):
coef = list()
for this_est in est.estimators_:
coef.append(get_coef(this_est, attr, inverse_transform))
coef = np.transpose(coef)
elif not hasattr(est, attr):
raise ValueError('This estimator does not have a %s '
'attribute.' % attr)
else:
coef = getattr(est, attr)
# inverse pattern e.g. to get back physical units
if inverse_transform:
if not hasattr(estimator, 'steps') and not hasattr(est, 'estimators_'):
raise ValueError('inverse_transform can only be applied onto '
'pipeline estimators.')
# The inverse_transform parameter will call this method on any
# estimator contained in the pipeline, in reverse order.
for inverse_func in _get_inverse_funcs(estimator)[::-1]:
coef = inverse_func(np.array([coef]))[0]
return coef
def cross_val_multiscore(estimator, X, y=None, groups=None, scoring=None,
cv=None, n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation.
Parameters
----------
estimator : instance of sklearn.base.BaseEstimator
The object to use to fit the data.
Must implement the 'fit' method.
X : array-like, shape (n_samples, n_dimensional_features,)
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, shape (n_samples, n_targets,)
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable | None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
Note that when using an estimator which inherently returns
multidimensional output - in particular, SlidingEstimator
or GeneralizingEstimator - you should set the scorer
there, not here.
cv : int, cross-validation generator | iterable
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a ``(Stratified)KFold``,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
n_jobs : int, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : int, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape (n_splits,) | shape (n_splits, n_scores)
Array of scores of the estimator for each run of the cross validation.
"""
# This code is copied from sklearn
from sklearn.base import clone
from sklearn.utils import indexable
from sklearn.metrics.scorer import check_scoring
from sklearn.model_selection._split import check_cv
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
# Note: this parallelization is implemented using MNE Parallel
parallel, p_func, n_jobs = parallel_func(_fit_and_score, n_jobs,
pre_dispatch=pre_dispatch)
scores = parallel(p_func(clone(estimator), X, y, scorer, train, test,
verbose, None, fit_params)
for train, test in cv_iter)
return np.array(scores)[:, 0, ...] # flatten over joblib output.
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split."""
# This code is adapted from sklearn
from sklearn.model_selection._validation import _index_param_value
from sklearn.utils.metaestimators import _safe_split
from sklearn.utils.validation import _num_samples
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = {k: _index_param_value(X, v, train)
for k, v in fit_params.items()}
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e))
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
test_score = _score(estimator, X_test, y_test, scorer)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
total_time = score_time + fit_time
end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score, test_score] if return_train_score else [test_score]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set.
This code is the same as sklearn.model_selection._validation._score
but accepts to output arrays instead of floats.
"""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
return score
| bsd-3-clause | 8,974,590,610,377,032,000 | 35.771171 | 79 | 0.59315 | false |
mpetyx/energagement | energagement/myapp/migrations/0014_auto_20150823_1721.py | 1 | 4241 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0013_auto_20150731_0044'),
]
operations = [
migrations.RemoveField(
model_name='building',
name='ape_kwh',
),
migrations.RemoveField(
model_name='building',
name='co2_lt_m2',
),
migrations.RemoveField(
model_name='building',
name='co2_tn_m2',
),
migrations.RemoveField(
model_name='building',
name='cosf',
),
migrations.RemoveField(
model_name='building',
name='euro_forecast',
),
migrations.RemoveField(
model_name='building',
name='euro_m2_electricity',
),
migrations.RemoveField(
model_name='building',
name='euro_m2_liquidfuel',
),
migrations.RemoveField(
model_name='building',
name='euro_m2_monthly',
),
migrations.RemoveField(
model_name='building',
name='kwh_m2',
),
migrations.RemoveField(
model_name='building',
name='kwh_m2_cooling',
),
migrations.RemoveField(
model_name='building',
name='kwh_m2_heating',
),
migrations.RemoveField(
model_name='building',
name='kwh_m2_lighting',
),
migrations.RemoveField(
model_name='building',
name='kwh_m2_usagehours',
),
migrations.RemoveField(
model_name='building',
name='kwh_m2_user',
),
migrations.RemoveField(
model_name='building',
name='lt_m2',
),
migrations.RemoveField(
model_name='electricvehicle',
name='co2_tn_user',
),
migrations.RemoveField(
model_name='electricvehicle',
name='euro_forecast',
),
migrations.RemoveField(
model_name='electricvehicle',
name='euro_m2_monthly',
),
migrations.RemoveField(
model_name='electricvehicle',
name='euro_user',
),
migrations.RemoveField(
model_name='electricvehicle',
name='kwh_user',
),
migrations.RemoveField(
model_name='streetlighting',
name='ape_kwh',
),
migrations.RemoveField(
model_name='streetlighting',
name='co2_lt_m2',
),
migrations.RemoveField(
model_name='streetlighting',
name='co2_tn_km',
),
migrations.RemoveField(
model_name='streetlighting',
name='cosf',
),
migrations.RemoveField(
model_name='streetlighting',
name='euro_forecast',
),
migrations.RemoveField(
model_name='streetlighting',
name='euro_line',
),
migrations.RemoveField(
model_name='streetlighting',
name='euro_monthly',
),
migrations.RemoveField(
model_name='streetlighting',
name='kwh_km',
),
migrations.RemoveField(
model_name='streetlighting',
name='kwh_light',
),
migrations.RemoveField(
model_name='streetlighting',
name='kwh_line',
),
migrations.RemoveField(
model_name='streetlighting',
name='operatinglights_percentage',
),
migrations.AddField(
model_name='building',
name='co2_lt',
field=models.ManyToManyField(related_name='co2_lt_b', blank=True, null=True, to='myapp.Value'),
preserve_default=True,
),
migrations.AddField(
model_name='building',
name='lt',
field=models.ManyToManyField(related_name='lt', blank=True, null=True, to='myapp.Value'),
preserve_default=True,
),
]
| mit | -554,861,424,052,546,050 | 27.273333 | 107 | 0.500354 | false |
KDE/kcmgrub2 | kcmgrub2.py | 1 | 41045 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Grub2 Control Module
#
# Copyright 2011 Alberto Mattea <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
try:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4 import uic
except:
print("No python Qt bindings found")
try:
from PyKDE4.kdecore import *
from PyKDE4.kdeui import *
from PyKDE4.kio import *
except:
print("No python KDE bindings found")
import os, locale, re
import pbkdf2
class PyKcm(KCModule):
def __init__(self, component_data, parent):
KCModule.__init__(self, component_data, parent)
self.ready = False
self.language = locale.getlocale(locale.LC_MESSAGES)
self.encoding = locale.getlocale(locale.LC_CTYPE)
appName = "kcmgrub2"
catalogue = "kcmgrub2"
programName = ki18n("Bootloader configuration")
version = "1.3"
description = ki18n("Grub2 configuration tool")
license = KAboutData.License_GPL_V3
copyright = ki18n("(c) 2011 Alberto Mattea")
text = KLocalizedString()
homePage = "http://kde-apps.org/content/show.php?content=137886"
bugEmail = ""
uifile = KStandardDirs.locate("data", "kcmgrub2/kcmgrub2.ui")
userdiagfile = KStandardDirs.locate("data", "kcmgrub2/userdiag.ui")
groupdiagfile = KStandardDirs.locate("data", "kcmgrub2/groupdiag.ui")
resdiagfile = KStandardDirs.locate("data", "kcmgrub2/resdiag.ui")
self.aboutData=KAboutData(appName, catalogue, programName, version, description, license, copyright, text, homePage, bugEmail)
self.aboutData.addAuthor(ki18n("Alberto Mattea"), ki18n("Maintainer"))
self.setAboutData(self.aboutData)
self.ui=uic.loadUi(uifile, self)
self.userDiagWidget=QDialog()
self.userDiag=uic.loadUi(userdiagfile, self.userDiagWidget)
self.groupDiagWidget=QDialog()
self.groupDiag=uic.loadUi(groupdiagfile, self.groupDiagWidget)
self.resDiagWidget=QDialog()
self.resDiag=uic.loadUi(resdiagfile, self.resDiagWidget)
self.setButtons(KCModule.Buttons(KCModule.Apply|KCModule.Default))
self.connectUiElements()
self.setNeedsAuthorization(True)
self.defFileOptions={"GRUB_DEFAULT": "0", "GRUB_SAVEDEFAULT": "false", "GRUB_HIDDEN_TIMEOUT": "0", "GRUB_TIMEOUT": "3", "GRUB_HIDDEN_TIMEOUT_QUIET": "true", "GRUB_DISTRIBUTOR": "`lsb_release -i -s 2> /dev/null || echo Debian`", "GRUB_CMDLINE_LINUX_DEFAULT": "\"quiet splash\"", "GRUB_TERMINAL": "", "GRUB_GFXMODE": "640x480", "GRUB_DISABLE_LINUX_UUID": "false", "GRUB_DISABLE_LINUX_RECOVERY": "\"false\"", "GRUB_BACKGROUND": "", "GRUB_DISABLE_OS_PROBER": "false", "GRUB_INIT_TUNE": ""}
self.defOtherOptions={"memtest": "true", "memtestpath": "/etc/grub.d/" + self.findMemtest() if self.findMemtest() != None else "none"}
self.defCurrentColors={"normal": ["white", "black"], "highlight": ["black", "light-gray"]}
self.errTable=(i18n("unknown"), i18n("cannot open /etc/default/grub for writing"), i18n("cannot chdir to /etc/grub.d"), i18n("cannot open files in /etc/grub.d for writing"), i18n("cannot change the execution bit for memtest"), i18n("calling update-grub failed"), i18n("cannot set permissions on grub.cfg"), i18n("calling grub-install failed"), i18n("cannot change the execution bit for the colors script"))
self.colorsList=("black", "blue", "brown", "cyan", "dark-gray", "green", "light-cyan", "light-blue", "light-green", "light-gray", "light-magenta", "light-red", "magenta", "red", "white", "yellow")
self.tcolorsNames=(i18n("Black"), i18n("Blue"), i18n("Brown"), i18n("Cyan"), i18n("Dark grey"), i18n("Green"), i18n("Light cyan"), i18n("Light blue"), i18n("Light green"), i18n("Light gray"), i18n("Light magenta"), i18n("Light red"), i18n("Magenta"), i18n("Red"), i18n("White"), i18n("Yellow"))
self.bcolorsNames=(i18n("Transparent"), i18n("Blue"), i18n("Brown"), i18n("Cyan"), i18n("Dark grey"), i18n("Green"), i18n("Light cyan"), i18n("Light blue"), i18n("Light green"), i18n("Light gray"), i18n("Light magenta"), i18n("Light red"), i18n("Magenta"), i18n("Red"), i18n("White"), i18n("Yellow"))
self.fileOptions=self.defFileOptions.copy()
self.otherOptions=self.defOtherOptions.copy()
def changed(self):
self.emit(SIGNAL("changed(bool)"), True)
def save(self):
self.setEnabled(False)
outFile=self.generateCfgfile()
self.updateGrubd()
self.action=self.authAction()
self.action.watcher().progressStep.connect(self.showProgress)
args={"cfgFile": outFile, "memtestOn": self.otherOptions["memtest"], "memtestPath": self.otherOptions["memtestpath"], "grubd": self.grubd, "grubinst": self.selDevices}
self.action.setArguments(args)
self.authSuccessful=False
reply=self.action.execute()
if not self.authSuccessful: KMessageBox.error(self, i18n("Unable to authenticate."))
if reply.failed() and self.authSuccessful:
KMessageBox.error(self, i18n("There was an error while executing the action: " + self.errTable[reply.errorCode()]))
else: self.load()
self.action.watcher().progressStep.disconnect(self.showProgress)
self.setEnabled(True)
def load(self):
self.ready=False
self.setEnabled(False)
try:
self.fileOptions.update(self.getOptionsFromFile())
self.otherOptions.update(self.getOtherOptions())
self.getGrubCfg()
self.parseGrubd()
self.getInfo()
self.loadSettings()
self.setEnabled(True)
except:
KMessageBox.error(self, i18n("Error: cannot open Grub configuration files. Make sure Grub is installed correctly."))
raise
self.ready=True
def defaults(self):
self.fileOptions=self.defFileOptions.copy()
self.otherOptions=self.defOtherOptions.copy()
self.currentColors=self.defCurrentColors.copy()
self.loadSettings()
self.ui.secEnabled.setChecked(False)
def showProgress(self, state):
if state==1:
self.authSuccessful=True
self.prg=KProgressDialog(self, i18n("Bootloader"), i18n("Updating grub configuration..."))
self.prg.setModal(True)
self.prg.setAllowCancel(False)
self.prg.progressBar().setMaximum(0)
self.prg.setMinimumDuration(0)
elif state==2:
self.prg.setLabelText(i18n("Installing grub on the selected devices..."))
elif state==3:
self.prg.close()
def getOptionsFromFile(self):
cfg={}
self.cfgFile=open("/etc/default/grub").readlines()
for line in self.cfgFile:
tokens=line.split("=", 1)
if len(tokens)==2:
setting=tokens[0].strip()
value=tokens[1].strip()
if setting in self.fileOptions.keys(): cfg[setting]=value
return cfg
def getOtherOptions(self):
cfg={}
memtest=self.findMemtest()
if memtest != None:
cfg["memtestpath"]="/etc/grub.d/" + memtest
cfg["memtest"]="true" if os.access(cfg["memtestpath"], os.F_OK | os.R_OK | os.X_OK) else "false"
else:
cfg["memtestpath"]="none"
cfg["memtest"]="false"
return cfg
def findMemtest(self):
candidates=os.listdir("/etc/grub.d/")
for candidate in candidates:
if "memtest86+" in candidate: return candidate
return None
def getGrubCfg(self):
self.readAction=KAuth.Action("org.kde.kcontrol.kcmgrub2.readcfg")
self.readAction.setHelperID("org.kde.kcontrol.kcmgrub2")
reply=self.readAction.execute()
if reply.failed(): raise OSError
else:
self.grubCfg=unicode(reply.data()[QString(u'contents')].toString(), "utf-8")
self.currentItems=self.getCurrentItems()
self.currentColors=self.getCurrentColors()
def getCurrentItems(self):
lines=self.grubCfg.splitlines()
entries=list()
for line in lines:
tokens=line.split()
if len(tokens)>1 and tokens[0]=="menuentry":
osname=""
i=1
while tokens[i][-1] != "'" and tokens[i][-1] != "\"":
osname=osname+tokens[i]+" "
i+=1
osname+=tokens[i]
entries.append(osname.strip("\"'"))
return entries
def getCurrentColors(self):
mcn_reg=re.compile(r"set menu_color_normal=(.+)/(.+)")
mch_reg=re.compile(r"set menu_color_highlight=(.+)/(.+)")
cn_reg=re.compile(r"set color_normal=(.+)/(.+)")
ch_reg=re.compile(r"set color_highlight=(.+)/(.+)")
colors=self.defCurrentColors.copy()
mcngroups=mcn_reg.findall(self.grubCfg)
mchgroups=mch_reg.findall(self.grubCfg)
cngroups=cn_reg.findall(self.grubCfg)
chgroups=ch_reg.findall(self.grubCfg)
if len(mcngroups)>0: colors["normal"]=list(mcngroups[-1])
elif len(cngroups)>0: colors["normal"]=list(cngroups[-1])
if len(mchgroups)>0: colors["highlight"]=list(mchgroups[-1])
elif len(chgroups)>0: colors["highlight"]=list(chgroups[-1])
return colors
def loadSettings(self):
self.setWhatsThis()
### General ###
ght=self.fileOptions["GRUB_HIDDEN_TIMEOUT"]
gt=self.fileOptions["GRUB_TIMEOUT"]
gb=self.fileOptions["GRUB_BACKGROUND"]
if self.fileOptions["GRUB_HIDDEN_TIMEOUT_QUIET"]=="false": self.ui.showCountdown.setChecked(True)
else: self.ui.showCountdown.setChecked(False)
if ght=="": self.ui.noHidden.setChecked(True)
elif ght.isdigit():
self.ui.noHidden.setChecked(False)
self.ui.autoStart.setChecked(True)
self.ui.autoStartTimeout.setValue(int(ght))
if gt.isdigit() and int(gt)>=0:
self.ui.autoStart.setChecked(True)
if ght=="": self.ui.autoStartTimeout.setValue(int(gt))
elif ght=="": self.ui.autoStart.setChecked(False)
if gb != "":
self.ui.showBgImage.setChecked(True)
self.ui.bgImage.setText(gb)
else: self.ui.showBgImage.setChecked(False)
self.ui.autoStartTimeout.setEnabled(self.ui.autoStart.isChecked())
self.ui.bgImage.setEnabled(self.ui.showBgImage.isChecked())
self.ui.noHidden.setEnabled(self.ui.autoStart.isChecked())
self.ui.showCountdown.setEnabled(self.ui.autoStart.isChecked())
if "splash" in self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"]: self.ui.showSplash.setChecked(True)
else: self.ui.showSplash.setChecked(False)
if "quiet" in self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"]: self.ui.quietBoot.setChecked(True)
else: self.ui.quietBoot.setChecked(False)
self.generateBootList()
self.generateColorsList()
### Advanced ###
self.ui.distributor.setText(self.fileOptions["GRUB_DISTRIBUTOR"])
self.ui.gfxMode.setText(self.fileOptions["GRUB_GFXMODE"])
self.ui.cmdlineLinuxDefault.setText(self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"].strip("\" "))
if self.fileOptions["GRUB_TERMINAL"]=="console": self.ui.disableGfxterm.setChecked(True)
else: self.ui.disableGfxterm.setChecked(False)
if self.fileOptions["GRUB_DISABLE_LINUX_UUID"]=="true": self.ui.disableLinuxUUID.setChecked(True)
else: self.ui.disableLinuxUUID.setChecked(False)
if self.fileOptions["GRUB_DISABLE_LINUX_RECOVERY"]=="\"true\"": self.ui.disableLinuxRecovery.setChecked(True)
else: self.ui.disableLinuxRecovery.setChecked(False)
if self.fileOptions["GRUB_DISABLE_OS_PROBER"]=="true": self.ui.disableOsprober.setChecked(True)
else: self.ui.disableOsprober.setChecked(False)
if self.otherOptions["memtest"]=="false": self.ui.disableMemtest.setChecked(True)
else: self.ui.disableMemtest.setChecked(False)
if self.otherOptions["memtestpath"]=="none": self.ui.disableMemtest.setEnabled(False)
else: self.ui.disableMemtest.setEnabled(True)
self.ui.gfxMode.setEnabled(not self.ui.disableGfxterm.isChecked())
self.ui.label_3.setEnabled(not self.ui.disableGfxterm.isChecked())
self.ui.initTune.setText(self.fileOptions["GRUB_INIT_TUNE"].strip("\" "))
self.ui.tunePresets.clear()
self.ui.tunePresets.addItems((i18n("Choose preset..."), i18n("440 Hz beep"), i18n("Broken chord")))
self.resDiag.curRes.setText("{0}x{1}x{2}".format(*self.getScreenResolution()))
### Security ###
self.populateUsersTable()
self.populateGroupsTable()
if len(self.security["superusers"])>0: self.ui.secEnabled.setChecked(True)
self.ui.users.setHorizontalHeaderLabels((i18n("Name"), i18n("Superuser"), i18n("Password type")))
self.ui.groups.setHorizontalHeaderLabels((i18n("Name"), i18n("Locked"), i18n("Allowed users")))
self.ui.usersGroup.setEnabled(self.ui.secEnabled.isChecked())
self.ui.groupsGroup.setEnabled(self.ui.secEnabled.isChecked())
self.updateButtons()
### Tools ###
self.ui.pkgName.setText(self.info["pkgName"])
self.ui.pkgVersion.setText(self.info["pkgVersion"])
self.ui.hostOS.setText(self.info["hostOS"])
self.ui.devices.clear()
for item in self.parts:
curItem=QListWidgetItem(item)
curItem.setCheckState(Qt.Unchecked)
self.ui.devices.addItem(curItem)
self.selDevices=list()
def getInfo(self):
for f in os.environ["PATH"].split(":"):
cur=f + "/" + "grub-mkconfig"
if os.path.exists(cur): break
else:
cur=f + "/" + "grub2-mkconfig"
if os.path.exists(cur): break
else: return
source=open(cur).read()
pkgName=re.compile(r"PACKAGE_NAME=(.*)", re.IGNORECASE)
pkgVersion=re.compile(r"PACKAGE_VERSION=(.*)", re.IGNORECASE)
hostOS=re.compile(r"HOST_OS=(.*)", re.IGNORECASE)
self.info=dict()
if pkgName.search(source): self.info["pkgName"]=pkgName.search(source).groups()[0]
else: self.info["pkgName"]=""
if pkgVersion.search(source): self.info["pkgVersion"]=pkgVersion.search(source).groups()[0]
else: self.info["pkgVersion"]=""
if hostOS.search(source): self.info["hostOS"]=hostOS.search(source).groups()[0]
else: self.info["hostOS"]=""
self.parts=list()
if os.path.exists("/proc/partitions"):
lines=open("/proc/partitions").readlines()
for line in lines[2:]: self.parts.append(line.split()[3])
def populateUsersTable(self):
self.ui.users.setRowCount(0)
for item in self.security["users"].items():
self.ui.users.insertRow(self.ui.users.rowCount())
for x in range(3):
self.ui.users.setCellWidget(self.ui.users.rowCount()-1, x, QLabel())
self.ui.users.cellWidget(self.ui.users.rowCount()-1, x).setAlignment(Qt.AlignCenter)
self.ui.users.cellWidget(self.ui.users.rowCount()-1, 0).setText(item[0])
self.ui.users.cellWidget(self.ui.users.rowCount()-1, 1).setText(i18n("Yes") if item[0] in self.security["superusers"] else i18n("No"))
self.ui.users.cellWidget(self.ui.users.rowCount()-1, 2).setText(i18n("Crypted") if item[1][0] else i18n("Plaintext"))
def populateGroupsTable(self):
self.ui.groups.setRowCount(0)
for item in sorted(self.security["groups"].items(), key=lambda x: x[0]):
self.ui.groups.insertRow(self.ui.groups.rowCount())
for x in range(3):
self.ui.groups.setCellWidget(self.ui.groups.rowCount()-1, x, QLabel())
self.ui.groups.cellWidget(self.ui.groups.rowCount()-1, x).setAlignment(Qt.AlignCenter)
self.ui.groups.cellWidget(self.ui.groups.rowCount()-1, 0).setText(item[0])
self.ui.groups.cellWidget(self.ui.groups.rowCount()-1, 1).setText(i18n("Yes") if item[1][0] else i18n("No"))
self.ui.groups.cellWidget(self.ui.groups.rowCount()-1, 2).setText(",".join(item[1][1]) if item[1][0] else i18n("Everyone"))
if self.ui.groups.cellWidget(self.ui.groups.rowCount()-1, 2).text()=="":
self.ui.groups.cellWidget(self.ui.groups.rowCount()-1, 2).setText(i18n("Superusers only"))
def generateBootList(self):
gd=self.fileOptions["GRUB_DEFAULT"]
self.defItem.clear()
for item in self.currentItems: self.defItem.addItem(item)
self.defItem.addItem(i18n("Last used"))
if gd.isdigit():
if int(gd)<self.defItem.count(): self.defItem.setCurrentIndex(int(gd))
else:
gd="0"
self.defItem.setCurrentIndex(0)
self.defItem.emit(SIGNAL("currentIndexChanged(int)"), int(gd))
elif gd=="saved": self.defItem.setCurrentIndex(self.defItem.count()-1)
elif gd.strip("\"'") in self.currentItems: self.defItem.setCurrentIndex(self.defItem.findText(gd.strip("\"'")))
def generateColorsList(self):
self.ui.ntCol.addItems(self.tcolorsNames)
self.ui.htCol.addItems(self.tcolorsNames)
self.ui.nbCol.addItems(self.bcolorsNames)
self.ui.hbCol.addItems(self.bcolorsNames)
self.ui.ntCol.setCurrentIndex(self.colorsList.index(self.currentColors["normal"][0]))
self.ui.nbCol.setCurrentIndex(self.colorsList.index(self.currentColors["normal"][1]))
self.ui.htCol.setCurrentIndex(self.colorsList.index(self.currentColors["highlight"][0]))
self.ui.hbCol.setCurrentIndex(self.colorsList.index(self.currentColors["highlight"][1]))
def parseGrubd(self):
items=os.listdir("/etc/grub.d/")
self.grubd=dict()
for item in items:
if item != "README": self.grubd[item]=open("/etc/grub.d/" + item).read()
self.security=dict()
self.security["superusers"]=self.getSuperUsers()
self.security["users"]=self.getUsers()
self.security["groups"]=self.getGroups()
def getSuperUsers(self):
superusers=list()
regex=re.compile(r'set superusers ?= ?"?([a-zA-Z0-9,]{1,})"?')
for candidate in self.grubd.items():
curusers=list()
if regex.search(candidate[1]):
curusersgroups=regex.findall(candidate[1])
for curusersgroup in curusersgroups: curusers.extend(curusersgroup.split(","))
superusers.extend(curusers)
return superusers
def getUsers(self):
users=dict()
cryptoreg=re.compile(r"password_pbkdf2 ([a-zA-Z0-9]{1,}) ([^\n #]+)")
plainreg=re.compile(r"password ([a-zA-Z0-9]{1,}) ([^\n #]+)")
for candidate in self.grubd.items():
cryptolist=cryptoreg.findall(candidate[1])
plainlist=plainreg.findall(candidate[1])
for item in cryptolist: users[item[0]]=[True, item[1]]
for item in plainlist: users[item[0]]=[False, item[1]]
return users
def getGroups(self):
groups=dict()
lockedregex=re.compile(r'menuentry.+--users "?[a-zA-Z0-9,]{0,}"? .*{')
unlockedregex=re.compile(r"menuentry.+{")
usersregex=re.compile(r'--users "?([a-zA-Z0-9,]{0,})"? {')
for candidate in self.grubd.items():
users=list()
if lockedregex.search(candidate[1]):
if usersregex.search(candidate[1]):
usergroups=usersregex.findall(candidate[1])
for usergroup in usergroups: users.extend(usergroup.split(","))
groups[candidate[0]]=[True, list(set(users))] # Remove duplicates on-the-go
elif unlockedregex.search(candidate[1]): groups[candidate[0]]=[False, list()]
return groups
def populateUsersConfig(self, group):
self.groupDiag.users.selectedListWidget().clear()
self.groupDiag.users.availableListWidget().clear()
self.groupDiag.users.selectedListWidget().addItems(self.security["groups"][group][1])
for item in self.security["users"].keys():
if item not in self.security["groups"][group][1]: self.groupDiag.users.availableListWidget().addItem(item)
def updateDefItem(self, state):
if self.ready:
if state==self.defItem.count()-1:
self.fileOptions["GRUB_DEFAULT"]="saved"
self.fileOptions["GRUB_SAVEDEFAULT"]="true"
else:
if "Linux" in self.defItem.itemText(state): self.fileOptions["GRUB_DEFAULT"]=str(state) # No full names with Linux (version changes)
else: self.fileOptions["GRUB_DEFAULT"]="'"+str(self.defItem.itemText(state))+"'" # Use full names to avoid reordering problems
self.fileOptions["GRUB_SAVEDEFAULT"]="false"
self.changed()
def updateCmdlineFromCheckbox1(self, state):
if self.ready:
if self.ui.showSplash.isChecked():
if "splash" not in self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"]:
self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"]="\""+str(self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"].strip("\" ") + " splash").strip()+"\""
else:
self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"]=self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"].strip("\" ").replace("splash", "")
self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"]="\""+self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"].strip()+"\""
self.ui.cmdlineLinuxDefault.setText(self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"].strip("\" "))
self.changed()
def updateCmdlineFromCheckbox2(self, state):
if self.ready:
if self.ui.quietBoot.isChecked():
if "quiet" not in self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"]:
self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"]="\""+str(self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"].strip("\" ") + " quiet").strip()+"\""
else:
self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"]=self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"].strip("\" ").replace("quiet", "")
self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"]="\""+self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"].strip()+"\""
self.ui.cmdlineLinuxDefault.setText(self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"].strip("\" "))
self.changed()
def updateCmdlineLinuxDefault(self, state):
if self.ready:
self.fileOptions["GRUB_CMDLINE_LINUX_DEFAULT"]=str("\""+state+"\"")
if "splash" in state: self.ui.showSplash.setChecked(True)
else: self.ui.showSplash.setChecked(False)
if "quiet" in state: self.ui.quietBoot.setChecked(True)
else: self.ui.quietBoot.setChecked(False)
self.changed()
def updateAutoStart(self, state):
if self.ready:
self.ui.autoStartTimeout.setEnabled(state)
if state:
self.ui.noHidden.setEnabled(True)
self.ui.showCountdown.setEnabled(True)
if self.ui.noHidden.isChecked():
self.fileOptions["GRUB_TIMEOUT"]=str(self.ui.autoStartTimeout.value())
else:
self.fileOptions["GRUB_TIMEOUT"]="3"
self.fileOptions["GRUB_HIDDEN_TIMEOUT"]=str(self.ui.autoStartTimeout.value())
else:
self.fileOptions["GRUB_TIMEOUT"]="-1"
self.fileOptions["GRUB_HIDDEN_TIMEOUT"]=""
self.ui.noHidden.setChecked(True)
self.ui.noHidden.setEnabled(False)
self.ui.showCountdown.setChecked(False)
self.ui.showCountdown.setEnabled(False)
self.changed()
def updateShowBgImage(self, state):
if self.ready:
self.ui.bgImage.setEnabled(state)
if state: self.fileOptions["GRUB_BACKGROUND"]=str(self.ui.bgImage.text())
else: self.fileOptions["GRUB_BACKGROUND"]=""
self.changed()
def updateShowCountdown(self, state):
if self.ready:
self.fileOptions["GRUB_HIDDEN_TIMEOUT_QUIET"]="false" if state else "true"
self.changed()
def updateNoHidden(self, state):
if self.ready:
if state:
self.fileOptions["GRUB_HIDDEN_TIMEOUT"]=""
self.fileOptions["GRUB_TIMEOUT"]=str(self.ui.autoStartTimeout.value())
else: self.fileOptions["GRUB_HIDDEN_TIMEOUT"]=str(self.ui.autoStartTimeout.value())
self.changed()
def updateDisableGfxterm(self, state):
if self.ready:
if state:
self.ui.label_3.setEnabled(False)
self.ui.gfxMode.setEnabled(False)
else:
self.ui.label_3.setEnabled(True)
self.ui.gfxMode.setEnabled(True)
self.fileOptions["GRUB_TERMINAL"]="console" if state else ""
self.changed()
def updateDisableLinuxUUID(self, state):
if self.ready:
self.fileOptions["GRUB_DISABLE_LINUX_UUID"]="true" if state else "false"
self.changed()
def updateDisableLinuxRecovery(self, state):
if self.ready:
self.fileOptions["GRUB_DISABLE_LINUX_RECOVERY"]="\"true\"" if state else "false"
self.changed()
def updateDisableMemtest(self, state):
if self.ready:
self.otherOptions["memtest"]="false" if state else "true"
self.changed()
def updateDisableOsprober(self, state):
if self.ready:
self.fileOptions["GRUB_DISABLE_OS_PROBER"]="true" if state else "false"
self.changed()
def updateDistributor(self, state):
if self.ready:
self.fileOptions["GRUB_DISTRIBUTOR"]=str(self.ui.distributor.text())
self.changed()
def updateInitTune(self, state):
if self.ready:
self.fileOptions["GRUB_INIT_TUNE"]=str("\""+state+"\"")
self.changed()
def updateTunePresets(self, state):
if self.ready:
if state==1: self.fileOptions["GRUB_INIT_TUNE"]="\"480 440 1\""
elif state==2: self.fileOptions["GRUB_INIT_TUNE"]="\"180 440 1 554 1 659 1\""
self.initTune.setText(self.fileOptions["GRUB_INIT_TUNE"].strip("\""))
self.changed()
def updateGfxBox(self, state):
self.fileOptions["GRUB_GFXMODE"]=str(self.resDiag.vbeModes.currentItem().text()).split()[1]
self.ui.gfxMode.setText(self.fileOptions["GRUB_GFXMODE"])
self.changed()
def updateGfxMode(self, state):
if self.ready:
self.fileOptions["GRUB_GFXMODE"]=str(self.ui.gfxMode.text())
self.changed()
def updateAutoStartTimeout(self, state):
if self.ready:
if self.fileOptions["GRUB_HIDDEN_TIMEOUT"]=="":
self.fileOptions["GRUB_TIMEOUT"]=str(self.ui.autoStartTimeout.value())
else:
self.fileOptions["GRUB_HIDDEN_TIMEOUT"]=str(self.ui.autoStartTimeout.value())
self.changed()
def updateBgImage(self, state):
if self.ready:
if type(state)==KUrl: self.fileOptions["GRUB_BACKGROUND"]=str(state.path())
else: self.fileOptions["GRUB_BACKGROUND"]=str(state)
self.changed()
def updateSecEnabled(self, state):
if self.ready:
self.ui.usersGroup.setEnabled(state)
self.ui.groupsGroup.setEnabled(state)
self.ui.userDel.setEnabled(self.ui.users.rowCount()>0)
self.ui.userMod.setEnabled(self.ui.users.rowCount()>0)
self.changed()
def updateLocked(self, state):
self.groupDiag.users.setEnabled(state)
def updateButtons(self):
if self.ui.users.rowCount()==0 or len(self.ui.users.selectedRanges())==0:
self.ui.userDel.setEnabled(False)
self.ui.userMod.setEnabled(False)
else:
self.ui.userDel.setEnabled(True)
self.ui.userMod.setEnabled(True)
if self.ui.groups.rowCount()==0 or len(self.ui.groups.selectedRanges())==0: self.ui.groupMod.setEnabled(False)
else: self.ui.groupMod.setEnabled(True)
def updateNtCol(self, state):
if self.ready:
self.currentColors["normal"][0]=self.colorsList[state]
self.changed()
def updateNbCol(self, state):
if self.ready:
self.currentColors["normal"][1]=self.colorsList[state]
self.changed()
def updateHtCol(self, state):
if self.ready:
self.currentColors["highlight"][0]=self.colorsList[state]
self.changed()
def updateHbCol(self, state):
if self.ready:
self.currentColors["highlight"][1]=self.colorsList[state]
self.changed()
def updateDevices(self):
if self.ready:
self.selDevices=list()
for x in range(self.ui.devices.count()):
if self.ui.devices.item(x).checkState()==Qt.Checked: self.selDevices.append("/dev/"+ str(self.ui.devices.item(x).text()))
self.changed()
def generateCfgfile(self):
out=list()
usedsettings=list()
for line in self.cfgFile:
l=line.strip()
if len(l)>0:
if l[0]=="#": st=l[1:].split("=")[0].strip()
else: st=l.split("=")[0].strip()
if st in self.fileOptions.keys():
out.append(st+"="+self.fileOptions[st])
usedsettings.append(st)
else: out.append(l)
else: out.append("")
out.append("")
for setting in self.fileOptions.keys():
if setting not in usedsettings: out.append(setting+"="+self.fileOptions[setting]+"\n")
return "\n".join(out)
def updateGrubd(self):
items=sorted(self.grubd.items(), key=lambda x: x[0])
outitems=list()
for item in items: outitems.append([item[0], list()])
regex1=re.compile(r'set superusers ?= ?"?[a-zA-Z0-9,]{1,}"?')
regex2=re.compile(r"password_pbkdf2 [a-zA-Z0-9]{1,} [^\n #]+")
regex3=re.compile(r"password [a-zA-Z0-9]{1,} [^\n #]+")
for line in items[0][1].splitlines():
if not (regex1.search(line) or regex2.search(line) or regex3.search(line)): outitems[0][1].append(line)
if self.ui.secEnabled.isChecked():
if outitems[0][1][-1].strip()!="EOF": outitems[0][1].append("cat <<EOF")
else: del outitems[0][1][-1]
outitems[0][1].append('set superusers="{0}"'.format(",".join(self.security["superusers"])))
for user in self.security["users"].items():
outitems[0][1].append("password{0} {1} {2}".format("_pbkdf2" if user[1][0] else "", user[0], user[1][1]))
outitems[0][1].append("EOF")
entryregex1=re.compile(r"(menuentry.+)--users.*{")
entryregex2=re.compile(r"(menuentry.+){")
entryregex3=re.compile(r'(printf.+"menuentry.+)--users.*{\\n')
entryregex4=re.compile(r'(printf.+"menuentry.+){\\n')
for x in range(1, len(outitems)):
for line in items[x][1].splitlines():
if not (entryregex1.search(line) or entryregex2.search(line) or entryregex3.search(line) or entryregex4.search(line)): toappend=line
elif self.security["groups"][outitems[x][0]][0] and self.ui.secEnabled.isChecked():
users='"'+",".join(self.security["groups"][outitems[x][0]][1])+'"'
if entryregex3.search(line):
toappend=entryregex3.sub(r"\1--users {0} {{\\n".format(users), line)
elif entryregex4.search(line):
toappend=entryregex4.sub(r"\1--users {0} {{\\n".format(users), line)
elif entryregex1.search(line):
toappend=entryregex1.sub(r"\1--users {0} {{".format(users), line)
elif entryregex2.search(line):
toappend=entryregex2.sub(r"\1--users {0} {{".format(users), line)
else:
if entryregex3.search(line):
toappend=entryregex3.sub(r"\1{\\n", line)
elif entryregex4.search(line):
toappend=entryregex4.sub(r"\1{\\n", line)
elif entryregex1.search(line):
toappend=entryregex1.sub(r"\1{", line)
elif entryregex2.search(line):
toappend=entryregex2.sub(r"\1{", line)
outitems[x][1].append(toappend)
for item in outitems: self.grubd[item[0]]="\n".join(item[1])
self.grubd["09_colors"]="#! /bin/sh\nset -e\ncat <<EOF\nset menu_color_normal={0}/{1}\nset menu_color_highlight={2}/{3}\nset color_normal={0}/{1}\nset color_highlight={2}/{3}\nEOF\n".format(self.currentColors["normal"][0], self.currentColors["normal"][1], self.currentColors["highlight"][0], self.currentColors["highlight"][1])
def showAddUser(self):
self.userDiag.userConfirm.setEnabled(False)
self.userDiag.show()
def showModUser(self):
self.userDiag.userConfirm.setEnabled(False)
username=str(self.ui.users.cellWidget(self.ui.users.currentRow(), 0).text())
self.userDiag.userName.setText(username)
self.userDiag.superUser.setChecked(True if username in self.security["superusers"] else False)
self.userDiag.show()
def showModGroup(self):
groupname=str(self.ui.groups.cellWidget(self.ui.groups.currentRow(), 0).text())
self.groupDiag.locked.setChecked(True if self.security["groups"][groupname][0] else False)
self.groupDiag.users.setEnabled(self.groupDiag.locked.isChecked())
self.populateUsersConfig(groupname)
self.groupDiag.show()
def modUser(self):
username=str(self.userDiag.userName.text())
if self.userDiag.cryptPass.isChecked():
self.worker=WorkThread(username, str(self.userDiag.password.text()))
self.worker.started.connect(self.showCryptProgress)
self.worker.finished.connect(self.completeModUser1)
self.worker.start()
else:
password=str(self.userDiag.password.text())
self.completeModUser2(username, password)
def completeModUser1(self, salt, crypt, username):
self.worker.started.disconnect(self.showCryptProgress)
self.worker.finished.disconnect(self.completeModUser1)
password="grub.pbkdf2.sha512.10000.{0}.{1}".format(salt, crypt)
self.completeModUser2(username, password)
self.cprg.close()
def completeModUser2(self, username, password):
self.security["users"][username]=(self.userDiag.cryptPass.isChecked(), password)
if self.userDiag.superUser.isChecked() and (username not in self.security["superusers"]): self.security["superusers"].append(str(username))
elif (not self.userDiag.superUser.isChecked()) and (username in self.security["superusers"]): self.security["superusers"].remove(username)
self.populateUsersTable()
self.updateButtons()
self.userDiag.userName.clear()
self.userDiag.password.clear()
self.userDiag.passwordConfirm.clear()
self.userDiag.superUser.setChecked(False)
self.userDiag.cryptPass.setChecked(False)
self.changed()
def delUser(self):
username=str(self.ui.users.cellWidget(self.ui.users.currentRow(), 0).text())
self.ui.users.removeRow(self.ui.users.currentRow())
del self.security["users"][username]
if username in self.security["superusers"]: self.security["superusers"].remove(username)
for item in self.security["groups"].keys():
if username in self.security["groups"][item][1]: self.security["groups"][item][1].remove(username)
self.populateGroupsTable()
self.updateButtons()
self.changed()
def modGroup(self):
groupname=str(self.ui.groups.cellWidget(self.ui.groups.currentRow(), 0).text())
if self.groupDiag.locked.isChecked():
self.security["groups"][groupname][0]=True
self.security["groups"][groupname][1]=list()
for item in xrange(self.groupDiag.users.selectedListWidget().count()):
self.security["groups"][groupname][1].append(str(self.groupDiag.users.selectedListWidget().item(item).text()))
else:
self.security["groups"][groupname][0]=False
self.security["groups"][groupname][1]=list()
self.populateGroupsTable()
self.updateButtons()
self.changed()
def showCryptProgress(self):
self.cprg=KProgressDialog(self, i18n("Bootloader"), i18n("Crypting password..."))
self.cprg.setMinimumDuration(0)
self.cprg.setModal(True)
self.cprg.setAllowCancel(False)
self.cprg.progressBar().setMaximum(0)
def dataCheck(self):
user=str(self.userDiag.userName.text()).strip()
password1=str(self.userDiag.password.text()).strip()
password2=str(self.userDiag.passwordConfirm.text()).strip()
if password1==password2 and password1!="" and user!="": self.userDiag.userConfirm.setEnabled(True)
else: self.userDiag.userConfirm.setEnabled(False)
def doProbeVbe(self):
args={"vbetest": KStandardDirs.locate("data", "kcmgrub2/vbetest")}
self.probeAction=KAuth.Action("org.kde.kcontrol.kcmgrub2.probevbe")
self.probeAction.setHelperID("org.kde.kcontrol.kcmgrub2")
self.probeAction.setArguments(args)
reply=self.probeAction.execute()
if reply.failed(): KMessageBox.error(self, i18n("Cannot probe the video card"))
else:
res=str(reply.data()[QString(u'contents')].toString()).split("\n")[:-1]
self.resDiag.vbeModes.clear()
self.resDiag.vbeModes.addItems(res)
self.resDiag.show()
def getScreenResolution(self):
return QApplication.desktop().screenGeometry().width(), QApplication.desktop().screenGeometry().height(), QApplication.desktop().depth()
def connectUiElements(self):
self.ui.defItem.currentIndexChanged.connect(self.updateDefItem)
self.ui.showSplash.stateChanged.connect(self.updateCmdlineFromCheckbox1)
self.ui.quietBoot.stateChanged.connect(self.updateCmdlineFromCheckbox2)
self.ui.cmdlineLinuxDefault.textEdited.connect(self.updateCmdlineLinuxDefault)
self.ui.autoStart.stateChanged.connect(self.updateAutoStart)
self.ui.showBgImage.stateChanged.connect(self.updateShowBgImage)
self.ui.showCountdown.stateChanged.connect(self.updateShowCountdown)
self.ui.noHidden.stateChanged.connect(self.updateNoHidden)
self.ui.disableGfxterm.stateChanged.connect(self.updateDisableGfxterm)
self.ui.disableLinuxUUID.stateChanged.connect(self.updateDisableLinuxUUID)
self.ui.disableLinuxRecovery.stateChanged.connect(self.updateDisableLinuxRecovery)
self.ui.secEnabled.stateChanged.connect(self.updateSecEnabled)
self.ui.disableMemtest.stateChanged.connect(self.updateDisableMemtest)
self.ui.disableOsprober.stateChanged.connect(self.updateDisableOsprober)
self.ui.distributor.textEdited.connect(self.updateDistributor)
self.ui.initTune.textEdited.connect(self.updateInitTune)
self.ui.tunePresets.currentIndexChanged.connect(self.updateTunePresets)
self.ui.gfxMode.textEdited.connect(self.updateGfxMode)
self.ui.autoStartTimeout.valueChanged.connect(self.updateAutoStartTimeout)
self.ui.bgImage.textChanged.connect(self.updateBgImage)
self.ui.bgImage.urlSelected.connect(self.updateBgImage)
self.ui.userAdd.clicked.connect(self.showAddUser)
self.ui.userDel.clicked.connect(self.delUser)
self.ui.userMod.clicked.connect(self.showModUser)
self.ui.groupMod.clicked.connect(self.showModGroup)
self.userDiag.userName.textEdited.connect(self.dataCheck)
self.userDiag.password.textEdited.connect(self.dataCheck)
self.userDiag.passwordConfirm.textEdited.connect(self.dataCheck)
self.userDiag.userConfirm.clicked.connect(self.userDiag.close)
self.userDiag.userConfirm.clicked.connect(self.modUser)
self.userDiag.userCancel.clicked.connect(self.userDiag.close)
self.groupDiag.groupConfirm.clicked.connect(self.groupDiag.close)
self.groupDiag.groupConfirm.clicked.connect(self.modGroup)
self.groupDiag.groupCancel.clicked.connect(self.groupDiag.close)
self.groupDiag.locked.stateChanged.connect(self.updateLocked)
self.ui.users.clicked.connect(self.updateButtons)
self.ui.groups.clicked.connect(self.updateButtons)
self.ui.devices.clicked.connect(self.updateDevices)
self.ui.ntCol.currentIndexChanged.connect(self.updateNtCol)
self.ui.nbCol.currentIndexChanged.connect(self.updateNbCol)
self.ui.htCol.currentIndexChanged.connect(self.updateHtCol)
self.ui.hbCol.currentIndexChanged.connect(self.updateHbCol)
self.ui.probeVbe.clicked.connect(self.doProbeVbe)
self.resDiag.buttonBox.button(QDialogButtonBox.Ok).clicked.connect(self.updateGfxBox)
self.resDiag.buttonBox.button(QDialogButtonBox.Ok).clicked.connect(self.resDiag.close)
self.resDiag.buttonBox.button(QDialogButtonBox.Cancel).clicked.connect(self.resDiag.close)
def setWhatsThis(self):
self.ui.defItem.setWhatsThis(i18n("Select the item that will be highlighted at startup and booted after the specified timeout"))
self.ui.nbCol.setWhatsThis(i18n("Choose the menu background color. If you choose 'transparent' and don't set a background image, you'll get a black background"))
self.ui.quietBoot.setWhatsThis(i18n("If this is checked, linux startup messages will not be displayed"))
self.ui.distributor.setWhatsThis(i18n("This will be used as a title for linux boot items. If you want it to be the output of a command, enclose it in reverse quotes (`)"))
self.ui.initTune.setWhatsThis(i18n("Set a tune that will be played on startup using the system speaker. You can either choose one of the presets or write your own with the following syntax: a single number meaning tempo, then pairs of numbers where the first is the frequency and the second is the duration"))
self.ui.usersGroup.setWhatsThis(i18n("Here you can manage existing users and add new ones. Superusers can boot every item, edit menu entries and get a commandline. There should be at least one superuser."))
self.ui.groupsGroup.setWhatsThis(i18n("Here you can manage menu groups. An unlocked group can be booted by everyone, while locked ones can be booted only by the specified users and by superusers."))
self.ui.devices.setWhatsThis(i18n("The bootloader will be installed on the selected devices, overwriting other bootloaders. Be very careful or you may lose access to other operating systems!"))
class WorkThread(QThread):
started=pyqtSignal()
finished=pyqtSignal(str, str, str)
def __init__(self, username, clearpw):
self.username=username
self.clearpw=clearpw
QThread.__init__(self)
def run(self):
self.emit(SIGNAL('started()'))
salt, crypt=pbkdf2.pbkdf2(self.clearpw)
self.emit(SIGNAL('finished(QString, QString, QString)'), salt, crypt, self.username)
return
def CreatePlugin(widget_parent, parent, component_data):
KGlobal.locale().insertCatalog("kcmgrub2")
return PyKcm(component_data, widget_parent)
| gpl-3.0 | -3,379,527,373,034,329,600 | 47.174883 | 489 | 0.691388 | false |
davidraleigh/cxxtest | python/cxxtest/cxxtest_misc.py | 1 | 2691 | #!/usr/bin/python
#-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v2.1
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
import sys
import os
def abort( problem ):
'''Print error message and exit'''
sys.stderr.write( '\n' )
sys.stderr.write( problem )
sys.stderr.write( '\n\n' )
sys.exit(2)
def resolve_symlinks(orig_path):
drive,tmp = os.path.splitdrive(os.path.normpath(orig_path))
if not drive:
drive = os.path.sep
parts = tmp.split(os.path.sep)
actual_path = [drive]
while parts:
actual_path.append(parts.pop(0))
if not os.path.islink(os.path.join(*actual_path)):
continue
actual_path[-1] = os.readlink(os.path.join(*actual_path))
tmp_drive, tmp_path = os.path.splitdrive(
dereference_path(os.path.join(*actual_path)) )
if tmp_drive:
drive = tmp_drive
actual_path = [drive] + tmp_path.split(os.path.sep)
return os.path.join(*actual_path)
def relpath(path, start=None):
"""Return a relative version of a path.
(provides compatibility with Python < 2.6)"""
# Some notes on implementation:
# - We rely on resolve_symlinks to correctly resolve any symbolic
# links that may be present in the paths
# - The explicit handling od the drive name is critical for proper
# function on Windows (because os.path.join('c:','foo') yields
# "c:foo"!).
if not start:
start = os.getcwd()
ref_drive, ref_path = os.path.splitdrive(
resolve_symlinks(os.path.abspath(start)) )
if not ref_drive:
ref_drive = os.path.sep
start = [ref_drive] + ref_path.split(os.path.sep)
while '' in start:
start.remove('')
pth_drive, pth_path = os.path.splitdrive(
resolve_symlinks(os.path.abspath(path)) )
if not pth_drive:
pth_drive = os.path.sep
path = [pth_drive] + pth_path.split(os.path.sep)
while '' in path:
path.remove('')
i = 0
max = min(len(path), len(start))
while i < max and path[i] == start[i]:
i += 1
if i < 2:
return os.path.join(*path)
else:
rel = ['..']*(len(start)-i) + path[i:]
if rel:
return os.path.join(*rel)
else:
return '.'
| lgpl-2.1 | 1,456,833,161,039,569,400 | 33.5 | 74 | 0.578595 | false |
liviu-/ding | ding/ding.py | 1 | 4914 | #!/usr/bin/env python
"""Simple CLI beep tool"""
from __future__ import unicode_literals
from __future__ import print_function
import re
import os
import sys
import time
import datetime
import argparse
VERSION = '2.1.0'
N_BEEPS = 4
WAIT_BEEPS = 0.15
def relative_time(arg):
"""Validate user provided relative time"""
if not re.match('\d+[smh]( +\d+[smh])*', arg):
raise argparse.ArgumentTypeError("Invalid time format: {}".format(arg))
return arg
def absolute_time(arg):
"""Validate user provided absolute time"""
if not all([t.isdigit() for t in arg.split(':')]):
raise argparse.ArgumentTypeError("Invalid time format: {}".format(arg))
# Valid time (e.g. hour must be between 0..23)
try:
datetime.time(*map(int, arg.split(':')))
except ValueError as e:
raise argparse.ArgumentTypeError("Invalid time format: {}".format(e))
return arg
def get_args(args):
"""Parse commandline arguments"""
parent_parser = argparse.ArgumentParser(
add_help=False, description='Lightweight time management CLI tool')
parent_parser.add_argument(
'-n', '--no-timer', action='store_true', help='Hide the countdown timer')
parent_parser.add_argument(
'-c', '--command', type=str, help='Use a custom command instead of the default beep')
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version', action='version', version=VERSION)
subparsers = parser.add_subparsers(dest='mode')
subparsers.required = True
parser_in = subparsers.add_parser('in', parents=[parent_parser])
parser_in.add_argument('time', nargs='+', type=relative_time,
help='relative time \d+[smh]( +\d+[smh])* (e.g. 1h 30m)')
parser_every = subparsers.add_parser('every', parents=[parent_parser])
parser_every.add_argument('time', nargs='+', type=relative_time,
help='relative time \d+[smh]( +\d+[smh])* (e.g. 2m 15s)')
parser_at = subparsers.add_parser('at', parents=[parent_parser])
parser_at.add_argument('time', type=absolute_time, help='absolute time [hh:[mm[:ss]]]')
return parser.parse_args(args)
class TimeParser():
"""Class helping with parsing user provided time into seconds"""
time_map = {
's': 1,
'm': 60,
'h': 60 * 60,
}
def __init__(self, time, relative):
self.time = time
self.relative = relative
def get_seconds(self):
return self._get_seconds_relative() if self.relative else self._get_seconds_absolute()
def _get_seconds_relative(self):
return sum([self.time_map[t[-1]] * int(t[:-1]) for t in self.time])
def _get_seconds_absolute(self):
now = datetime.datetime.now()
user_time = (datetime.datetime.combine(datetime.date.today(),
datetime.time(*map(int, self.time.split(':')))))
return ((user_time - now).seconds if user_time > now
else (user_time + datetime.timedelta(days=1) - now).seconds)
def countdown(seconds, notimer=False):
"""Countdown for `seconds`, printing values unless `notimer`"""
if not notimer:
os.system('cls' if os.name == 'nt' else 'clear') # initial clear
while seconds > 0:
start = time.time()
# print the time without a newline or carriage return
# this leaves the cursor at the end of the time while visible
if not notimer:
print(datetime.timedelta(seconds=seconds), end='')
sys.stdout.flush()
seconds -= 1
time.sleep(1 - time.time() + start)
# emit a carriage return
# this moves the cursor back to the beginning of the line
# so the next time overwrites the current time
if not notimer:
print(end='\r')
def beep(seconds, command):
"""Make the beep noise"""
for _ in range(N_BEEPS):
if command:
os.system(command)
else:
sys.stdout.write('\a')
sys.stdout.flush()
time.sleep(WAIT_BEEPS)
def parse_time(args):
"""Figure out the number of seconds to wait"""
relative = args.mode == 'in' or args.mode == "every"
parser = TimeParser(args.time, relative)
return parser.get_seconds()
def main(args=sys.argv[1:]):
args = get_args(args)
while True:
try:
seconds = parse_time(args)
countdown(seconds, args.no_timer)
beep(seconds, args.command)
# doing `if` here so there just can't be any stack printed for an interrupt
if args.mode != "every":
break
except KeyboardInterrupt:
print() # ending current line
break # without printing useless stack...
if __name__ == '__main__':
main()
| mit | -4,648,106,790,088,834,000 | 32.202703 | 98 | 0.59361 | false |
keithadavidson/ansible-mezzanine | deploy/scripts/set_mezzanine_settings.py | 1 | 1811 | #!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Keith Davidson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# A script to set the site domain
# Assumes two environment variables
#
# PROJECT_DIR: the project directory (e.g., ~/projname)
import os
import sys
# Add the project directory to system path
project_dir = os.path.expanduser(os.environ['PROJECT_DIR'])
sys.path.append(project_dir)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from mezzanine.conf import settings
settings.TWITTER_ACCESS_TOKEN_KEY = os.environ['TWITTER_ACCESS_TOKEN_KEY']
settings.TWITTER_ACCESS_TOKEN_SECRET = os.environ['TWITTER_ACCESS_TOKEN_SECRET']
settings.TWITTER_CONSUMER_KEY = os.environ['TWITTER_CONSUMER_KEY']
settings.TWITTER_CONSUMER_SECRET = os.environ['TWITTER_CONSUMER_SECRET']
| mit | -3,355,165,800,266,858,500 | 39.244444 | 80 | 0.773606 | false |
Crompulence/cpl-library | utils/design_topology/CFD.py | 1 | 4796 | import numpy as np
import matplotlib.pyplot as plt
from mpi4py import MPI
from cplpy import CPL
from draw_grid import draw_grid
class CFD():
def __init__(self, npxyz, xyzL, xyz_orig, ncxyz):
#initialise MPI and CPL
self.comm = MPI.COMM_WORLD
self.CPL = CPL()
self.CFD_COMM = self.CPL.init(CPL.CFD_REALM)
self.nprocs_realm = self.CFD_COMM.Get_size()
# Parameters of the cpu topology (cartesian grid)
self.npxyz = np.array(npxyz, order='F', dtype=np.int32)
self.NProcs = np.product(npxyz)
self.xyzL = np.array(xyzL, order='F', dtype=np.float64)
self.xyz_orig = np.array(xyz_orig, order='F', dtype=np.float64)
self.ncxyz = np.array(ncxyz, order='F', dtype=np.int32)
if (self.nprocs_realm != self.NProcs):
print("Non-coherent number of processes in CFD ", self.nprocs_realm,
" no equal to ", self.npxyz[0], " X ", self.npxyz[1], " X ", self.npxyz[2])
MPI.Abort(errorcode=1)
#Setup coupled simulation
self.cart_comm = self.CFD_COMM.Create_cart([self.npxyz[0], self.npxyz[1], self.npxyz[2]])
self.CPL.setup_cfd(self.cart_comm, self.xyzL, self.xyz_orig, self.ncxyz)
#Get limits of overlap region
self.olap_limits = self.CPL.get_olap_limits()
self.portion = self.CPL.my_proc_portion(self.olap_limits)
[self.ncxl, self.ncyl, self.nczl] = self.CPL.get_no_cells(self.portion)
self.dx = self.CPL.get("xl_cfd")/float(self.CPL.get("ncx"))
self.dy = self.CPL.get("yl_cfd")/float(self.CPL.get("ncy"))
self.dz = self.CPL.get("zl_cfd")/float(self.CPL.get("ncz"))
self.ioverlap = (self.CPL.get("icmax_olap")-self.CPL.get("icmin_olap")+1)
self.joverlap = (self.CPL.get("jcmax_olap")-self.CPL.get("jcmin_olap")+1)
self.koverlap = (self.CPL.get("kcmax_olap")-self.CPL.get("kcmin_olap")+1)
self.xoverlap = self.ioverlap*self.dx
self.yoverlap = self.joverlap*self.dy
self.zoverlap = self.koverlap*self.dz
def recv_CPL_data(self):
# recv data to plot
self.recv_array = np.zeros((1, self.ncxl, self.ncyl, self.nczl), order='F', dtype=np.float64)
self.recv_array, ierr = self.CPL.recv(self.recv_array, self.olap_limits)
def plot_grid(self, ax):
#Plot CFD and coupler Grid
draw_grid(ax,
nx=self.CPL.get("ncx"),
ny=self.CPL.get("ncy"),
nz=self.CPL.get("ncz"),
px=self.CPL.get("npx_cfd"),
py=self.CPL.get("npy_cfd"),
pz=self.CPL.get("npz_cfd"),
xmin=self.CPL.get("x_orig_cfd"),
ymin=self.CPL.get("y_orig_cfd"),
zmin=self.CPL.get("z_orig_cfd"),
xmax=(self.CPL.get("icmax_olap")+1)*self.dx,
ymax=self.CPL.get("yl_cfd"),
zmax=(self.CPL.get("kcmax_olap")+1)*self.dz,
lc = 'r',
label='CFD')
#Plot MD domain
draw_grid(ax, nx=1, ny=1, nz=1,
px=self.CPL.get("npx_md"),
py=self.CPL.get("npy_md"),
pz=self.CPL.get("npz_md"),
xmin=self.CPL.get("x_orig_md"),
ymin=-self.CPL.get("yl_md")+self.yoverlap,
zmin=self.CPL.get("z_orig_md"),
xmax=(self.CPL.get("icmax_olap")+1)*self.dx,
ymax=self.yoverlap,
zmax=(self.CPL.get("kcmax_olap")+1)*self.dz,
label='MD')
def plot_data(self, ax):
# === Plot both grids ===
#Plot x component on grid
x = np.linspace(self.CPL.get("x_orig_cfd")+.5*self.dx,
self.xoverlap-.5*self.dx,self.ioverlap)
z = np.linspace(self.CPL.get("z_orig_cfd")+.5*self.dz,
self.zoverlap-.5*self.dz,self.koverlap)
try:
for j in range(self.joverlap):
ax.plot(x, 0.5*self.dy*(self.recv_array[0,:,j,0]+1.+2*j), 's-')
except ValueError:
print("Arrays not equal:", x.shape, z.shape, self.recv_array.shape)
def finalise(self):
self.CPL.finalize()
MPI.Finalize()
if __name__ == '__main__':
#Get input file
import inpututils
ip = inpututils.InputMod("./CFD.in")
npxyz = ip.read_input("npxyz")
xyzL = ip.read_input("xyzL")
xyz_orig = ip.read_input("xyz_orig")
ncxyz = ip.read_input("ncxyz")
cfd = CFD(npxyz=npxyz,
xyzL = xyzL,
xyz_orig = xyz_orig,
ncxyz = ncxyz)
cfd.recv_CPL_data()
fig, ax = plt.subplots(1,1)
cfd.plot_grid(ax)
cfd.plot_data(ax)
plt.show()
cfd.finalise()
| gpl-3.0 | -1,381,466,288,594,465,300 | 34.791045 | 101 | 0.541076 | false |
caterinaurban/Typpete | typpete/unittests/inference/generic_test.py | 1 | 1346 | # type_params {'generic_tolist': ['GTL'], 'flatten': ['FL'], 'flatten_dict': ['DK','DV']}
def generic_tolist(a):
return [a]
u = generic_tolist(1.2)
u[0] = 2.4
v = generic_tolist(True)
v2 = v[v[0]]
def flatten(lists):
"""
Flattens a list of lists into a flat list
"""
return [item for sublist in lists for item in sublist]
def flatten_dict(dicts,
defaults):
"""
Flattens a dict of lists, i.e., concatenates all lists for the same keys.
"""
result = {}
for key in defaults:
result[key] = []
for d in dicts:
for key, value in d.items():
if key in result:
result[key].extend(value)
else:
result[key] = value
return result
a = flatten([[1,2], [1,2], [True, False]])
a2 = flatten([["hi"], ['yo', 'sup']])
a4 = a[a[0]]
b = [{1:[2]}, {True: [True]}, {5: [1.2, 2]}]
c = b[0][1]
d = flatten_dict(b, [True, 1])
e = flatten_dict([{1.2: ['hi']}], [3, 5])
class A:
def bar(self):
return 1
class B(A):
pass
ff = flatten_dict([{'hi': [A()]}, {'sup': [A()], 'hey': [B(), A()]}], ['asd', 'erer'])
ff['hi'][0].bar()
# flatten := Callable[[List[List[FL]]], List[FL]]
# flatten_dict := Callable[[List[Dict[DV, List[DK]]], List[DV]], Dict[DV, List[DK]]]
# generic_tolist := Callable[[GTL], List[GTL]] | mpl-2.0 | 5,171,939,318,511,768,000 | 21.830508 | 89 | 0.520802 | false |
hedvig/project-config | zuul/openstack_functions.py | 1 | 2255 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
def set_log_url(item, job, params):
if hasattr(item.change, 'refspec'):
path = "%s/%s/%s/%s/" % (
params['ZUUL_CHANGE'][-2:], params['ZUUL_CHANGE'],
params['ZUUL_PATCHSET'], params['ZUUL_PIPELINE'])
elif hasattr(item.change, 'ref'):
path = "%s/%s/%s/" % (
params['ZUUL_NEWREV'][:2], params['ZUUL_NEWREV'],
params['ZUUL_PIPELINE'])
else:
path = params['ZUUL_PIPELINE'] + '/'
params['BASE_LOG_PATH'] = path
params['LOG_PATH'] = path + '%s/%s/' % (job.name,
params['ZUUL_UUID'][:7])
def reusable_node(item, job, params):
if 'OFFLINE_NODE_WHEN_COMPLETE' in params:
del params['OFFLINE_NODE_WHEN_COMPLETE']
def set_node_options(item, job, params):
# Force tox to pass through ZUUL_ variables
zuul_params = [x for x in params.keys() if x.startswith('ZUUL_')]
params['TOX_TESTENV_PASSENV'] = ' '.join(zuul_params)
# Set up log url parameter for all jobs
set_log_url(item, job, params)
# Default to single use node. Potentially overriden below.
# Select node to run job on.
params['OFFLINE_NODE_WHEN_COMPLETE'] = '1'
proposal_re = r'^.*(merge-release-tags|(propose|upstream)-(.*?)-(constraints-.*|updates?|update-liberty))$' # noqa
release_re = r'^.*-(forge|jenkinsci|mavencentral|pypi-(both|wheel)|npm)-upload$'
hook_re = r'^hook-(.*?)-(rtfd)$'
# jobs run on the persistent proposal and release workers
if (re.match(proposal_re, job.name) or re.match(release_re, job.name) or
re.match(hook_re, job.name)):
reusable_node(item, job, params)
| apache-2.0 | -4,446,275,520,323,971,000 | 40.759259 | 119 | 0.636807 | false |
linearregression/socorro | socorro/external/es/supersearch.py | 1 | 19711 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import re
from elasticsearch_dsl import Search, A, F, Q
from elasticsearch.exceptions import NotFoundError
from socorro.external import (
BadArgumentError,
)
from socorro.external.es.super_search_fields import SuperSearchFields
from socorro.lib import datetimeutil
from socorro.lib.search_common import SearchBase
BAD_INDEX_REGEX = re.compile('\[\[(.*)\] missing\]')
class SuperSearch(SearchBase):
def __init__(self, *args, **kwargs):
self.config = kwargs.get('config')
self.es_context = self.config.elasticsearch.elasticsearch_class(
self.config.elasticsearch
)
self.all_fields = SuperSearchFields(config=self.config).get_fields()
# Create a map to associate a field's name in the database to its
# exposed name (in the results and facets).
self.database_name_to_field_name_map = dict(
(x['in_database_name'], x['name'])
for x in self.all_fields.values()
)
kwargs.update(fields=self.all_fields)
super(SuperSearch, self).__init__(
*args, **kwargs
)
def get_connection(self):
with self.es_context() as conn:
return conn
def generate_list_of_indices(self, from_date, to_date, es_index=None):
"""Return the list of indices to query to access all the crash reports
that were processed between from_date and to_date.
The naming pattern for indices in elasticsearch is configurable, it is
possible to have an index per day, per week, per month...
Parameters:
* from_date datetime object
* to_date datetime object
"""
if es_index is None:
es_index = self.config.elasticsearch_index
indices = []
current_date = from_date
while current_date <= to_date:
index = current_date.strftime(es_index)
# Make sure no index is twice in the list
# (for weekly or monthly indices for example)
if index not in indices:
indices.append(index)
current_date += datetime.timedelta(days=1)
return indices
def get_indices(self, dates):
"""Return the list of indices to use for given dates. """
start_date = None
end_date = None
for date in dates:
if '>' in date.operator:
start_date = date.value
if '<' in date.operator:
end_date = date.value
return self.generate_list_of_indices(start_date, end_date)
def format_field_names(self, hit):
"""Return a hit with each field's database name replaced by its
exposed name. """
new_hit = {}
for field in hit:
new_field = field
if '.' in new_field:
# Remove the prefix ("processed_crash." or "raw_crash.").
new_field = new_field.split('.')[-1]
new_field = self.database_name_to_field_name_map.get(
new_field, new_field
)
new_hit[new_field] = hit[field]
return new_hit
def format_fields(self, hit):
"""Return a well formatted document.
Elasticsearch returns values as lists when using the `fields` option.
This function removes the list when it contains zero or one element.
It also calls `format_field_names` to correct all the field names.
"""
hit = self.format_field_names(hit)
for field in hit:
if isinstance(hit[field], (list, tuple)):
if len(hit[field]) == 0:
hit[field] = None
elif len(hit[field]) == 1:
hit[field] = hit[field][0]
return hit
def get_field_name(self, value, full=True):
try:
field_ = self.all_fields[value]
except KeyError:
raise BadArgumentError(
value,
msg='Unknown field "%s"' % value
)
if not field_['is_returned']:
# Returning this field is not allowed.
raise BadArgumentError(
value,
msg='Field "%s" is not allowed to be returned' % value
)
field_name = '%s.%s' % (
field_['namespace'],
field_['in_database_name']
)
if full and field_['has_full_version']:
# If the param has a full version, that means what matters
# is the full string, and not its individual terms.
field_name += '.full'
return field_name
def format_aggregations(self, aggregations):
"""Return aggregations in a form that looks like facets.
We used to expose the Elasticsearch facets directly. This is thus
needed for backwards compatibility.
"""
aggs = aggregations.to_dict()
for agg in aggs:
for i, bucket in enumerate(aggs[agg]['buckets']):
sub_aggs = {}
for key in bucket:
# Go through all sub aggregations. Those are contained in
# all the keys that are not 'key' or 'count'.
if key in ('key', 'key_as_string', 'doc_count'):
continue
sub_aggs[key] = [
{
# For date data, Elasticsearch exposes a timestamp
# in 'key' and a human-friendly string in
# 'key_as_string'. We thus check if the later
# exists to expose it, and return the normal
# 'key' if not.
'term': x.get('key_as_string', x['key']),
'count': x['doc_count'],
}
for x in bucket[key]['buckets']
]
aggs[agg]['buckets'][i] = {
'term': bucket.get('key_as_string', bucket['key']),
'count': bucket['doc_count'],
}
if sub_aggs:
aggs[agg]['buckets'][i]['facets'] = sub_aggs
aggs[agg] = aggs[agg]['buckets']
return aggs
def get(self, **kwargs):
"""Return a list of results and aggregations based on parameters.
The list of accepted parameters (with types and default values) is in
the database and can be accessed with the super_search_fields service.
"""
# Filter parameters and raise potential errors.
params = self.get_parameters(**kwargs)
# Find the indices to use to optimize the elasticsearch query.
indices = self.get_indices(params['date'])
# Create and configure the search object.
search = Search(
using=self.get_connection(),
index=indices,
doc_type=self.config.elasticsearch.elasticsearch_doctype,
)
# Create filters.
filters = None
for field, sub_params in params.items():
sub_filters = None
for param in sub_params:
if param.name.startswith('_'):
# By default, all param values are turned into lists,
# even when they have and can have only one value.
# For those we know there can only be one value,
# so we just extract it from the made-up list.
if param.name == '_results_offset':
results_from = param.value[0]
elif param.name == '_results_number':
results_number = param.value[0]
elif param.name == '_facets_size':
facets_size = param.value[0]
elif param.name == '_histogram_interval.date':
histogram_interval_date = param.value[0]
# Don't use meta parameters in the query.
continue
field_data = self.all_fields[param.name]
name = '%s.%s' % (
field_data['namespace'],
field_data['in_database_name']
)
if param.data_type in ('date', 'datetime'):
param.value = datetimeutil.date_to_string(param.value)
elif param.data_type == 'enum':
param.value = [x.lower() for x in param.value]
elif param.data_type == 'str' and not param.operator:
param.value = [x.lower() for x in param.value]
args = {}
filter_type = 'term'
filter_value = None
if not param.operator:
# contains one of the terms
if len(param.value) == 1:
val = param.value[0]
if not isinstance(val, basestring) or (
isinstance(val, basestring) and ' ' not in val
):
filter_value = val
# If the term contains white spaces, we want to perform
# a phrase query. Thus we do nothing here and let this
# value be handled later.
else:
filter_type = 'terms'
filter_value = param.value
elif param.operator == '=':
# is exactly
if field_data['has_full_version']:
name = '%s.full' % name
filter_value = param.value
elif param.operator == '>':
# greater than
filter_type = 'range'
filter_value = {
'gt': param.value
}
elif param.operator == '<':
# lower than
filter_type = 'range'
filter_value = {
'lt': param.value
}
elif param.operator == '>=':
# greater than or equal to
filter_type = 'range'
filter_value = {
'gte': param.value
}
elif param.operator == '<=':
# lower than or equal to
filter_type = 'range'
filter_value = {
'lte': param.value
}
elif param.operator == '__null__':
# is null
filter_type = 'missing'
args['field'] = name
if filter_value is not None:
args[name] = filter_value
if args:
if param.operator_not:
new_filter = ~F(filter_type, **args)
else:
new_filter = F(filter_type, **args)
if sub_filters is None:
sub_filters = new_filter
elif param.data_type == 'enum':
sub_filters |= new_filter
else:
sub_filters &= new_filter
continue
# These use a wildcard and thus need to be in a query
# instead of a filter.
operator_wildcards = {
'~': '*%s*', # contains
'$': '%s*', # starts with
'^': '*%s' # ends with
}
if param.operator in operator_wildcards:
if field_data['has_full_version']:
name = '%s.full' % name
query_type = 'wildcard'
args[name] = (
operator_wildcards[param.operator] % param.value
)
elif not param.operator:
# This is a phrase that was passed down.
query_type = 'simple_query_string'
args['query'] = param.value[0]
args['fields'] = [name]
args['default_operator'] = 'and'
if args:
query = Q(query_type, **args)
if param.operator_not:
query = ~query
search = search.query(query)
else:
# If we reach this point, that means the operator is
# not supported, and we should raise an error about that.
raise NotImplementedError(
'Operator %s is not supported' % param.operator
)
if filters is None:
filters = sub_filters
elif sub_filters is not None:
filters &= sub_filters
search = search.filter(filters)
# Restricting returned fields.
fields = []
for param in params['_columns']:
for value in param.value:
if not value:
continue
field_name = self.get_field_name(value, full=False)
fields.append(field_name)
search = search.fields(fields)
# Sorting.
sort_fields = []
for param in params['_sort']:
for value in param.value:
if not value:
continue
# Values starting with a '-' are sorted in descending order.
# In order to retrieve the database name of the field, we
# must first remove the '-' part and add it back later.
# Example: given ['product', '-version'], the results will be
# sorted by ascending product and descending version.
desc = False
if value.startswith('-'):
desc = True
value = value[1:]
field_name = self.get_field_name(value, full=False)
if desc:
# The underlying library understands that '-' means
# sorting in descending order.
field_name = '-' + field_name
sort_fields.append(field_name)
search = search.sort(*sort_fields)
# Pagination.
results_to = results_from + results_number
search = search[results_from:results_to]
# Create facets.
for param in params['_facets']:
for value in param.value:
if not value:
continue
field_name = self.get_field_name(value)
search.aggs.bucket(
value,
'terms',
field=field_name,
size=facets_size,
)
# Create signature aggregations.
if params.get('_aggs.signature'):
sig_bucket = A(
'terms',
field=self.get_field_name('signature'),
size=facets_size,
)
for param in params['_aggs.signature']:
for value in param.value:
if not value:
continue
field_name = self.get_field_name(value)
sig_bucket.bucket(
value,
'terms',
field=field_name,
size=facets_size,
)
search.aggs.bucket('signature', sig_bucket)
# Create date histograms.
if params.get('_histogram.date'):
date_bucket = A(
'date_histogram',
field=self.get_field_name('date'),
interval=histogram_interval_date,
)
for param in params['_histogram.date']:
for value in param.value:
if not value:
continue
field_name = self.get_field_name(value)
val_bucket = A(
'terms',
field=field_name,
size=facets_size,
)
date_bucket.bucket(value, val_bucket)
search.aggs.bucket('histogram_date', date_bucket)
# Query and compute results.
hits = []
if params['_return_query'][0].value[0]:
# Return only the JSON query that would be sent to elasticsearch.
return {
'query': search.to_dict(),
'indices': indices,
}
# We call elasticsearch with a computed list of indices, based on
# the date range. However, if that list contains indices that do not
# exist in elasticsearch, an error will be raised. We thus want to
# remove all failing indices until we either have a valid list, or
# an empty list in which case we return no result.
while True:
try:
results = search.execute()
for hit in results:
hits.append(self.format_fields(hit.to_dict()))
total = search.count()
aggregations = self.format_aggregations(results.aggregations)
break # Yay! Results!
except NotFoundError, e:
missing_index = re.findall(BAD_INDEX_REGEX, e.error)[0]
if missing_index in indices:
del indices[indices.index(missing_index)]
else:
# Wait what? An error caused by an index that was not
# in the request? That should never happen, but in case
# it does, better know it.
raise
if indices:
# Update the list of indices and try again.
# Note: we need to first empty the list of indices before
# updating it, otherwise the removed indices never get
# actually removed.
search = search.index().index(*indices)
else:
# There is no index left in the list, return an empty
# result.
hits = []
total = 0
aggregations = {}
break
return {
'hits': hits,
'total': total,
'facets': aggregations,
}
# For backwards compatibility with the previous elasticsearch module.
# All those methods used to live in this file, but have been moved to
# the super_search_fields.py file now. Since the configuration of the
# middleware expect those to still be here, we bind them for now.
def get_fields(self, **kwargs):
return SuperSearchFields(config=self.config).get_fields(**kwargs)
def create_field(self, **kwargs):
return SuperSearchFields(config=self.config).create_field(**kwargs)
def update_field(self, **kwargs):
return SuperSearchFields(config=self.config).update_field(**kwargs)
def delete_field(self, **kwargs):
return SuperSearchFields(config=self.config).delete_field(**kwargs)
def get_missing_fields(self):
return SuperSearchFields(config=self.config).get_missing_fields()
| mpl-2.0 | 1,042,928,458,989,342,600 | 35.981238 | 79 | 0.488205 | false |
fabian0010/Blaze | pyasn1/type/namedval.py | 1 | 2701 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2017, Ilya Etingof <[email protected]>
# License: http://pyasn1.sf.net/license.html
#
# ASN.1 named integers
#
from pyasn1 import error
__all__ = ['NamedValues']
class NamedValues(object):
def __init__(self, *namedValues):
self.nameToValIdx = {}
self.valToNameIdx = {}
self.namedValues = ()
automaticVal = 1
for namedValue in namedValues:
if isinstance(namedValue, tuple):
name, val = namedValue
else:
name = namedValue
val = automaticVal
if name in self.nameToValIdx:
raise error.PyAsn1Error('Duplicate name %s' % (name,))
self.nameToValIdx[name] = val
if val in self.valToNameIdx:
raise error.PyAsn1Error('Duplicate value %s=%s' % (name, val))
self.valToNameIdx[val] = name
self.namedValues = self.namedValues + ((name, val),)
automaticVal += 1
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ', '.join([repr(x) for x in self.namedValues]))
def __str__(self):
return str(self.namedValues)
def __eq__(self, other):
return tuple(self) == tuple(other)
def __ne__(self, other):
return tuple(self) != tuple(other)
def __lt__(self, other):
return tuple(self) < tuple(other)
def __le__(self, other):
return tuple(self) <= tuple(other)
def __gt__(self, other):
return tuple(self) > tuple(other)
def __ge__(self, other):
return tuple(self) >= tuple(other)
def __hash__(self):
return hash(tuple(self))
def getName(self, value):
if value in self.valToNameIdx:
return self.valToNameIdx[value]
def getValue(self, name):
if name in self.nameToValIdx:
return self.nameToValIdx[name]
def getValues(self, *names):
try:
return [self.nameToValIdx[name] for name in names]
except KeyError:
raise error.PyAsn1Error(
'Unknown bit identifier(s): %s' % (set(names).difference(self.nameToValIdx),)
)
# TODO support by-name subscription
def __getitem__(self, i):
return self.namedValues[i]
def __len__(self):
return len(self.namedValues)
def __add__(self, namedValues):
return self.__class__(*self.namedValues + namedValues)
def __radd__(self, namedValues):
return self.__class__(*namedValues + tuple(self))
def clone(self, *namedValues):
return self.__class__(*tuple(self) + namedValues)
# XXX clone/subtype?
| mit | 7,432,628,266,542,734,000 | 27.431579 | 99 | 0.569789 | false |
jmeyers314/jtrace | batoid/coordTransform.py | 1 | 3059 | from . import _batoid
from .coordSys import CoordSys
import numpy as np
class CoordTransform:
"""Transformation between two coordinate systems.
Parameters
----------
fromSys : CoordSys
Origin coordinate systems.
toSys : CoordSys
Destination coordinate systems.
"""
def __init__(self, fromSys, toSys):
self.fromSys = fromSys
self.toSys = toSys
self.dr = fromSys.rot.T@(toSys.origin - fromSys.origin)
self.drot = [email protected]
def __getstate__(self):
return self.fromSys, self.toSys
def __setstate__(self, d):
self.__init__(*d)
def __eq__(self, rhs):
if not isinstance(rhs, CoordTransform): return False
return (
self.fromSys == rhs.fromSys and
self.toSys == rhs.toSys
)
def __ne__(self, rhs):
return not (self == rhs)
def applyForward(self, rv):
"""Apply forward-direction transformation to RayVector.
Parameters
----------
rv : RayVector
Rays to transform.
Returns
-------
transformed : RayVector
Reference to input RayVector transformed in place.
"""
from .trace import applyForwardTransform
return applyForwardTransform(self, rv)
def applyReverse(self, rv):
"""Apply reverse-direction transformation to RayVector.
Parameters
----------
rv : RayVector
Rays to transform.
Returns
-------
transformed : RayVector
Reference to input RayVector transformed in place.
"""
from .trace import applyReverseTransform
return applyReverseTransform(self, rv)
def applyForwardArray(self, x, y, z):
"""Apply forward-direction transformation to ndarrays.
Parameters
----------
x, y, z : ndarray
Coordinates to transform.
Returns
-------
xyz : ndarray
Transformed coordinates.
Notes
-----
Unlike applyForward, this method does not transform in-place, but
returns a newly created ndarray.
"""
r = np.array([x, y, z], dtype=float).T
r -= self.dr
return [email protected]
def applyReverseArray(self, x, y, z):
"""Apply reverse-direction transformation to ndarrays.
Parameters
----------
x, y, z : ndarray
Coordinates to transform.
Returns
-------
xyz : ndarray
Transformed coordinates.
Notes
-----
Unlike applyReverse, this method does not transform in-place, but
returns a newly created ndarray.
"""
r = np.array([x, y, z], dtype=float)
r = (self.drot@r).T
r += self.dr
return r.T
def __repr__(self):
return f"CoordTransform({self.fromSys!r}, {self.toSys!r})"
def __hash__(self):
return hash(("CoordTransform", self.fromSys, self.toSys))
| bsd-2-clause | -2,283,730,100,674,697,700 | 24.705882 | 73 | 0.552141 | false |
tparks5/tor-stem | test/integ/installation.py | 1 | 5352 | """
Tests installation of our library.
"""
import glob
import os
import shutil
import sys
import tarfile
import threading
import unittest
import stem
import stem.util.system
import test.util
from test.util import only_run_once
INSTALL_MISMATCH_MSG = "Running 'python setup.py sdist' doesn't match our git contents in the following way. The manifest in our setup.py may need to be updated...\n\n"
BASE_INSTALL_PATH = '/tmp/stem_test'
DIST_PATH = os.path.join(test.util.STEM_BASE, 'dist')
SETUP_THREAD, INSTALL_FAILURE, INSTALL_PATH, SDIST_FAILURE = None, None, None, None
def setup():
"""
Performs setup our tests will need. This mostly just needs disk iops so it
can happen asynchronously with other tests.
"""
global SETUP_THREAD
def _setup():
global INSTALL_FAILURE, INSTALL_PATH, SDIST_FAILURE
original_cwd = os.getcwd()
try:
os.chdir(test.util.STEM_BASE)
try:
os.chdir(test.util.STEM_BASE)
stem.util.system.call('%s setup.py install --prefix %s' % (sys.executable, BASE_INSTALL_PATH), timeout = 60)
stem.util.system.call('%s setup.py clean --all' % sys.executable, timeout = 60) # tidy up the build directory
site_packages_paths = glob.glob('%s/lib*/*/site-packages' % BASE_INSTALL_PATH)
if len(site_packages_paths) != 1:
raise AssertionError('We should only have a single site-packages directory, but instead had: %s' % site_packages_paths)
INSTALL_PATH = site_packages_paths[0]
except Exception as exc:
INSTALL_FAILURE = AssertionError("Unable to install with 'python setup.py install': %s" % exc)
if not os.path.exists(DIST_PATH):
try:
stem.util.system.call('%s setup.py sdist' % sys.executable, timeout = 60)
except Exception as exc:
SDIST_FAILURE = exc
else:
SDIST_FAILURE = AssertionError("%s already exists, maybe you manually ran 'python setup.py sdist'?" % DIST_PATH)
finally:
os.chdir(original_cwd)
if SETUP_THREAD is None:
SETUP_THREAD = threading.Thread(target = _setup)
SETUP_THREAD.start()
return SETUP_THREAD
def clean():
if os.path.exists(BASE_INSTALL_PATH):
shutil.rmtree(BASE_INSTALL_PATH)
if os.path.exists(DIST_PATH):
shutil.rmtree(DIST_PATH)
def _assert_has_all_files(path):
"""
Check that all the files in the stem directory are present in the
installation. This is a very common gotcha since our setup.py
requires us to remember to add new modules and non-source files.
:raises: **AssertionError** files don't match our content
"""
expected, installed = set(), set()
for root, dirnames, filenames in os.walk(os.path.join(test.util.STEM_BASE, 'stem')):
for filename in filenames:
file_format = filename.split('.')[-1]
if file_format not in test.util.IGNORED_FILE_TYPES:
expected.add(os.path.join(root, filename)[len(test.util.STEM_BASE) + 1:])
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if not filename.endswith('.pyc') and not filename.endswith('egg-info'):
installed.add(os.path.join(root, filename)[len(path) + 1:])
missing = expected.difference(installed)
extra = installed.difference(expected)
if missing:
raise AssertionError("The following files were expected to be in our installation but weren't. Maybe our setup.py needs to be updated?\n\n%s" % '\n'.join(missing))
elif extra:
raise AssertionError("The following files weren't expected to be in our installation.\n\n%s" % '\n'.join(extra))
class TestInstallation(unittest.TestCase):
@only_run_once
def test_install(self):
"""
Installs with 'python setup.py install' and checks we can use what we
install.
"""
if not INSTALL_PATH:
setup().join()
if INSTALL_FAILURE:
raise INSTALL_FAILURE
self.assertEqual(stem.__version__, stem.util.system.call([sys.executable, '-c', "import sys;sys.path.insert(0, '%s');import stem;print(stem.__version__)" % INSTALL_PATH])[0])
_assert_has_all_files(INSTALL_PATH)
@only_run_once
def test_sdist(self):
"""
Creates a source distribution tarball with 'python setup.py sdist' and
checks that it matches the content of our git repository. This primarily is
meant to test that our MANIFEST.in is up to date.
"""
if not stem.util.system.is_available('git'):
self.skipTest('(git unavailable)')
return
setup().join()
if SDIST_FAILURE:
raise SDIST_FAILURE
git_contents = [line.split()[-1] for line in stem.util.system.call('git ls-tree --full-tree -r HEAD')]
# tarball has a prefix 'stem-[verion]' directory so stipping that out
dist_tar = tarfile.open(os.path.join(DIST_PATH, 'stem-dry-run-%s.tar.gz' % stem.__version__))
tar_contents = ['/'.join(info.name.split('/')[1:]) for info in dist_tar.getmembers() if info.isfile()]
issues = []
for path in git_contents:
if path not in tar_contents and path not in ['.gitignore']:
issues.append(' * %s is missing from our release tarball' % path)
for path in tar_contents:
if path not in git_contents and path not in ['MANIFEST.in', 'PKG-INFO']:
issues.append(" * %s isn't expected in our release tarball" % path)
if issues:
self.fail(INSTALL_MISMATCH_MSG + '\n'.join(issues))
| lgpl-3.0 | 474,349,562,360,036,700 | 31.634146 | 178 | 0.674327 | false |
alq666/sre-kpi | monitors.py | 1 | 1349 | """Summarizes the monitor reports from Datadog into key metrics
"""
import csv
import os
import sqlite3
import sys
# Prepare the sqlite file for queries
# A denormalized version of the csv
try:
os.remove('monitors.sqlite')
except:
pass
conn = sqlite3.connect('monitors.sqlite')
c = conn.cursor()
c.execute("""
create table monitors
(
day date,
hour integer,
source_type text,
alert_type text,
priority integer,
hostname text,
device text,
alert_name text,
user text,
cnt integer
)
""")
# Consume the csv
reader = csv.reader(sys.stdin)
headers = reader.next()
for l in reader:
# yyyy-mm-dd hh24
day, hour = l[headers.index('hour')].split()
src = l[headers.index('source_type_name')]
alty = l[headers.index('alert_type')]
prio = int(l[headers.index('priority')])
host = l[headers.index('host_name')]
dev = l[headers.index('device_name')]
alnm = l[headers.index('alert_name')]
usrs = l[headers.index('user')].split()
cnt = int(l[headers.index('cnt')])
# In the case of multiple users, denormalize
for usr in usrs:
stmt = """insert into monitors
(day, hour, source_type, alert_type, priority, hostname, device, alert_name, user, cnt) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"""
c.execute(stmt, [day, hour, src, alty, prio, host, dev, alnm, usr, cnt])
conn.commit()
| mit | 1,948,408,615,343,619,800 | 23.527273 | 136 | 0.650852 | false |
OzFlux/PyFluxPro | scripts/pfp_gfALT.py | 1 | 64336 | # standard modules
import logging
import os
import traceback
# 3rd party modules
import dateutil
import numpy
import matplotlib.pyplot as plt
import pylab
import scipy
import statsmodels.api as sm
# PFP modules
from scripts import constants as c
from scripts import pfp_io
from scripts import pfp_ts
from scripts import pfp_utils
logger = logging.getLogger("pfp_log")
# functions for GapFillFromAlternate
def GapFillFromAlternate(main_gui, ds4, ds_alt, l4_info, called_by):
'''
This is the gap fill from alternate data GUI.
The alternate data gap fill GUI is displayed separately from the main OzFluxQC GUI.
It consists of text to display the start and end datetime of the file,
two entry boxes for the start and end datetimes of the alternate data gap fill and
a button to insert the gap fill data ("Run") and a button to exit ("Done")
the GUI when we are done. On exit, the OzFluxQC main GUI continues
and eventually writes the gap filled data to file.
'''
# set the default return code
ds4.returncodes["message"] = "normal"
# get the alternate data information
if l4_info[called_by]["info"]["call_mode"] == "interactive":
# put up a plot of the data coverage at L3
gfalternate_plotcoveragelines(ds4, l4_info, called_by)
# call the GapFillFromAlternate GUI
gfalternate_gui(main_gui, ds4, ds_alt, l4_info, called_by)
else:
# ["gui"] settings dictionary done in pfp_gf.ParseL4ControlFile()
gfalternate_run(ds4, ds_alt, l4_info, called_by)
def gfalternate_gui(main_gui, ds4, ds_alt, l4_info, called_by):
# put up the start and end dates
main_gui.l4_ui.ds4 = ds4
main_gui.l4_ui.ds_alt = ds_alt
main_gui.l4_ui.l4_info = l4_info
main_gui.l4_ui.called_by = called_by
main_gui.l4_ui.edit_cfg = main_gui.tabs.tab_dict[main_gui.tabs.tab_index_running]
start_date = ds4.series["DateTime"]["Data"][0].strftime("%Y-%m-%d %H:%M")
end_date = ds4.series["DateTime"]["Data"][-1].strftime("%Y-%m-%d %H:%M")
main_gui.l4_ui.label_DataStartDate_value.setText(start_date)
main_gui.l4_ui.label_DataEndDate_value.setText(end_date)
main_gui.l4_ui.show()
main_gui.l4_ui.exec_()
def gfalternate_autocomplete(ds_tower, ds_alt, l4_info, called_by, mode="verbose"):
"""
Purpose:
Gap fill using alternate data with gaps identified automatically.
Usage:
This routine is usually called after an initial gap filling process, either manual
or automatic monthly or number of days, has been done. It is intended to detect
remaining gaps, figure out the period either side of the gaps needed to get the
minimum number of good points and run the gap filling using alternate data on that
period.
Side effects:
Author: PRI
Date: April 2015
"""
# needs a re-write to improve the logic and simplify the code
# - alt_series_list needs to be ordered by decreasing correlation,
# as currently written the first alternate variable with the numbers
# is chosen
# - gfalternate_main is called from here AFTER we have figured out
# the "best" alternate variable to use but without passing the
# alternate variable name, gfalternate_main then figures out the
# "best" alternate variable by a different method
# - there is duplication of functionality between this routine and
# gfalternate_main
# - there are logical inconsistencies between this routine and
# gfalternate_main
l4a = l4_info[called_by]
mode = "quiet" #"verbose" #"quiet"
if not l4a["gui"]["auto_complete"]:
return
dt_tower = ds_tower.series["DateTime"]["Data"]
nRecs = len(dt_tower)
ts = int(float(ds_tower.globalattributes["time_step"]))
si_tower = pfp_utils.GetDateIndex(dt_tower, l4a["gui"]["startdate"], ts=ts, default=0)
ei_tower = pfp_utils.GetDateIndex(dt_tower, l4a["gui"]["enddate"], ts=ts, default=nRecs-1)
ldt_tower = dt_tower[si_tower: ei_tower + 1]
nRecs_gui = len(ldt_tower)
label_tower_list = l4a["gui"]["series_list"]
for label_tower in label_tower_list:
data_all = {}
label_composite = label_tower + "_composite"
not_enough_points = False
data_composite, _, _ = pfp_utils.GetSeriesasMA(ds_tower, label_composite, si=si_tower, ei=ei_tower)
data_tower, _, _ = pfp_utils.GetSeriesasMA(ds_tower, label_tower, si=si_tower, ei=ei_tower)
mask_composite = numpy.ma.getmaskarray(data_composite)
gapstartend = pfp_utils.contiguous_regions(mask_composite)
if len(gapstartend) == 0:
if mode.lower() != "quiet":
msg = " autocomplete: composite " + label_composite + " has no gaps to fill, skipping ..."
logger.info(msg)
continue
# now check all of the alternate data sources to see if they have anything to contribute
gotdataforgap = [False]*len(gapstartend)
label_output_list = gfalternate_getlabeloutputlist(l4_info, label_tower)
for label_output in label_output_list:
alt_filename = l4a["outputs"][label_output]["file_name"]
ds_alternate = ds_alt[alt_filename]
dt_alternate = ds_alternate.series["DateTime"]["Data"]
si_alternate = pfp_utils.GetDateIndex(dt_alternate, l4a["gui"]["startdate"], ts=ts, default=0)
ei_alternate = pfp_utils.GetDateIndex(dt_alternate, l4a["gui"]["enddate"], ts=ts, default=nRecs-1)
alt_series_list = [item for item in list(ds_alternate.series.keys()) if "_QCFlag" not in item]
alt_series_list = [item for item in alt_series_list if l4a["outputs"][label_output]["target"] in item]
for label_alternate in alt_series_list:
data_alt, _, _ = pfp_utils.GetSeriesasMA(ds_alternate, label_alternate, si=si_alternate, ei=ei_alternate)
data_all[label_alternate] = data_alt
for n, gap in enumerate(gapstartend):
min_points = max([int(((gap[1]-gap[0])+1)*l4a["gui"]["min_percent"]/100),3*l4a["gui"]["nperhr"]])
if numpy.ma.count(data_alt[gap[0]: gap[1]]) >= min_points:
if mode.lower() != "quiet":
msg = " autocomplete: " + label_tower + str(ldt_tower[gap[0]]) + str(ldt_tower[gap[1]]) + " got data to fill gap"
logger.info(msg)
gotdataforgap[n] = True
if numpy.ma.count_masked(data_tower[gap[0]: gap[1]]) == 0:
if mode.lower() != "quiet":
msg = " autocomplete: "+label_tower + str(ldt_tower[gap[0]]) + str(ldt_tower[gap[1]]) + " no gap to fill"
logger.info(msg)
gotdataforgap[n] = False
# finished checking all alternate data sources for data to fill remaining gaps
if mode.lower() != "quiet":
logger.info(" autocomplete: variable %s has %s gaps", label_tower, str(len(gapstartend)))
logger.info(" Auto-complete gap filling for %s (%s gaps)", label_tower, str(gotdataforgap.count(True)))
for n, gap in enumerate(gapstartend):
l4a["gui"]["autoforce"] = False
if not gotdataforgap[n]:
if mode.lower() != "quiet":
gap_startdate = ldt_tower[gap[0]].strftime("%Y-%m-%d %H:%M")
gap_enddate = ldt_tower[gap[1]].strftime("%Y-%m-%d %H:%M")
msg = " autocomplete: no alternate data for " + gap_startdate + " to " + gap_enddate
logger.info(msg)
continue
si = max([0, gap[0]])
ei = min([len(ldt_tower) - 1, gap[1]])
gap_startdate = ldt_tower[si].strftime("%Y-%m-%d %H:%M")
gap_enddate = ldt_tower[ei].strftime("%Y-%m-%d %H:%M")
if mode.lower() != "quiet":
msg = " autocomplete: gap is " + gap_startdate + " to " + gap_enddate
logger.info(msg)
min_points = max([int(((gap[1]-gap[0])+1)*l4a["gui"]["min_percent"]/100), 3*l4a["gui"]["nperhr"]])
num_good_points = 0
num_points_list = list(data_all.keys())
for label in list(data_all.keys()):
if numpy.ma.count(data_all[label][gap[0]:gap[1]]) < min_points:
num_points_list.remove(label)
continue
ngpts = gfalternate_getnumgoodpoints(data_tower[gap[0]:gap[1]], data_all[label][gap[0]:gap[1]])
#ngpts = int(len(data_tower[gap[0]:gap[1]+1])*l4a["gui"]["min_percent"]/100)
num_good_points = max([num_good_points, ngpts])
while num_good_points < min_points:
gap[0] = max(0, gap[0] - l4a["gui"]["nperday"])
gap[1] = min(nRecs_gui - 1, gap[1] + l4a["gui"]["nperday"])
if gap[0] == 0 and gap[1] == nRecs_gui - 1:
msg = " Unable to find enough good points in data set for " + label_tower
logger.warning(msg)
msg = " Replacing missing tower data with unmodified alternate data"
logger.warning(msg)
gap[0] = 0; gap[1] = -1
l4a["gui"]["autoforce"] = True
not_enough_points = True
if not_enough_points: break
min_points = max([int(((gap[1]-gap[0])+1)*l4a["gui"]["min_percent"]/100), 3*l4a["gui"]["nperhr"]])
for label in num_points_list:
ngpts = gfalternate_getnumgoodpoints(data_tower[gap[0]:gap[1]+1], data_all[label][gap[0]:gap[1]+1])
#ngpts = int(len(data_tower[gap[0]:gap[1]+1])*l4a["gui"]["min_percent"]/100)
if ngpts > num_good_points:
num_good_points = ngpts
gapfillperiod_startdate = ldt_tower[gap[0]].strftime("%Y-%m-%d %H:%M")
gapfillperiod_enddate = ldt_tower[gap[1]].strftime("%Y-%m-%d %H:%M")
if mode.lower() != "quiet":
msg = " autocomplete: gap fill period is " + gapfillperiod_startdate + " to " + gapfillperiod_enddate
logger.info(msg)
l4a["run"]["startdate"] = ldt_tower[gap[0]].strftime("%Y-%m-%d %H:%M")
l4a["run"]["enddate"] = ldt_tower[gap[1]].strftime("%Y-%m-%d %H:%M")
gfalternate_main(ds_tower, ds_alt, l4_info, called_by, label_tower_list=[label_tower])
if l4a["info"]["call_mode"] == "interactive":
gfalternate_plotcoveragelines(ds_tower, l4_info, called_by)
if not_enough_points: break
def gfalternate_createdataandstatsdict(ldt_tower, data_tower, attr_tower, l4a):
"""
Purpose:
Creates the data_dict and stat_dict to hold data and statistics during gap filling from
alternate data sources.
Usage:
Side effects:
Called by:
Calls:
Author: PRI
Date: May 2015
"""
data_dict = {}
stat_dict = {}
label_tower = l4a["run"]["label_tower"]
label_composite = l4a["run"]["label_composite"]
data_dict["DateTime"] = {"data": ldt_tower}
data_dict[label_tower] = {"attr": attr_tower,
"output_list": [label_tower, label_composite],
"data": data_tower}
data_dict[label_composite] = {"data": numpy.ma.masked_all_like(data_tower),
"fitcorr": numpy.ma.masked_all_like(data_tower),
"attr": attr_tower}
stat_dict[label_tower] = {"startdate": l4a["run"]["startdate"],
"enddate": l4a["run"]["enddate"]}
stat_dict[label_composite] = {"startdate": l4a["run"]["startdate"],
"enddate":l4a["run"]["enddate"]}
return data_dict, stat_dict
def gfalternate_done(alt_gui):
"""
Purpose:
Finishes up after gap filling from alternate data:
- destroy the GapFillFromAlternate GUI
- plot the summary statistics
- write the summary statistics to an Excel file
Usage:
Side effects:
Author: PRI
Date: August 2014
"""
# plot the summary statistics
#gfalternate_plotsummary(ds,alternate_info)
# close any open plots
if len(plt.get_fignums()) != 0:
for i in plt.get_fignums():
plt.close(i)
# destroy the alternate GUI
alt_gui.close()
# write Excel spreadsheet with fit statistics
pfp_io.xl_write_AlternateStats(alt_gui.ds4, alt_gui.l4_info)
# put the return code into ds.returncodes
alt_gui.ds4.returncodes["message"] = "normal"
def gfalternate_getalternatevaratmaxr(ds_tower, ds_alternate, l4a, mode="verbose"):
"""
Purpose:
Get a list of alternate variable names that are sorted based on correlation
with the tower data.
Usage:
Side effects:
Author: PRI
Date: August 2014
"""
# get a list of alternate variables for this tower variable
label_tower = l4a["run"]["label_tower"]
label_output = l4a["run"]["label_output"]
startdate = l4a["run"]["startdate"]
enddate = l4a["run"]["enddate"]
ts = int(float(ds_tower.globalattributes["time_step"]))
ldt_tower = ds_tower.series["DateTime"]["Data"]
si_tower = pfp_utils.GetDateIndex(ldt_tower, startdate, ts=ts)
ei_tower = pfp_utils.GetDateIndex(ldt_tower, enddate, ts=ts)
data_tower, _, _ = pfp_utils.GetSeriesasMA(ds_tower, label_tower, si=si_tower, ei=ei_tower)
# local pointers to the start and end indices
ldt_alternate = ds_alternate.series["DateTime"]["Data"]
si_alternate = pfp_utils.GetDateIndex(ldt_alternate, startdate, ts=ts)
ei_alternate = pfp_utils.GetDateIndex(ldt_alternate, enddate, ts=ts)
# create an array for the correlations and a list for the alternate variables in order of decreasing correlation
if "usevars" not in l4a["outputs"][label_output]:
altvar_list = gfalternate_getalternatevarlist(ds_alternate, l4a["run"]["label_tower"])
else:
altvar_list = l4a["outputs"][label_output]["usevars"]
r = numpy.zeros(len(altvar_list))
# loop over the variables in the alternate file
for idx, var in enumerate(altvar_list):
# get the alternate data
data_alternate, _, _ = pfp_utils.GetSeriesasMA(ds_alternate, var, si=si_alternate, ei=ei_alternate)
l4a["run"]["gotminpoints_alternate"] = gfalternate_gotminpoints(data_alternate, l4a,
label_tower, mode="quiet")
if numpy.ma.count(data_alternate) > l4a["run"]["min_points"]:
# check the lengths of the tower and alternate data are the same
if len(data_alternate) != len(data_tower):
msg = "gfalternate_getalternatevaratmaxr: alternate data length is " + str(len(data_alternate))
logger.info(msg)
msg = "gfalternate_getalternatevaratmaxr: tower data length is " + str(len(data_tower))
logger.info(msg)
raise ValueError('gfalternate_getalternatevaratmaxr: data_tower and data_alternate lengths differ')
# put the correlation into the r array
rval = numpy.ma.corrcoef(data_tower, data_alternate)[0, 1]
if rval == "nan": rval = float(0)
else:
if mode!="quiet":
msg = " getalternatevaratmaxr: not enough good data in alternate "+var
logger.error(msg)
rval = float(0)
r[idx] = numpy.ma.filled(rval, float(c.missing_value))
# save the correlation array for later plotting
l4a["run"]["r"] = r
# sort the correlation array and the alternate variable list
idx = numpy.flipud(numpy.argsort(r))
altvar_list_sorted = [altvar_list[j] for j in list(idx)]
# return the name of the alternate variable that has the highest correlation with the tower data
if l4a["outputs"][label_output]["source"].lower() == "access":
altvar_list_sorted = altvar_list_sorted[0:1]
return altvar_list_sorted
def gfalternate_getalternatevarlist(ds_alternate, label):
"""
Purpose:
Get a list of alternate variable names from the alternate data structure.
Usage:
Side effects:
Author: PRI
Date: August 2014
"""
alternate_var_list = [item for item in list(ds_alternate.series.keys()) if label in item]
# remove any extraneous Fn labels (alternate has Fn_lw and Fn_sw)
if label=="Fn":
alternate_var_list = [item for item in alternate_var_list if "lw" not in item]
alternate_var_list = [item for item in alternate_var_list if "sw" not in item]
# check the series in the alternate data
if len(alternate_var_list)==0:
logger.error("gfalternate_getalternatevarlist: series %s not in alternate data file", label)
return alternate_var_list
def gfalternate_getdataas2d(odt, data, l4a):
"""
Purpose:
Return data, a 1D array, as a 2D array with hours along axis=0 and days along
axis=1
Usage:
Side effects:
The 1D array, data, is truncated at the start and end to make whole days.
Author: PRI
Date: August 2014
"""
ts = l4a["info"]["time_step"]
nperday = l4a["gui"]["nperday"]
si = 0
while abs(odt[si].hour + float(odt[si].minute)/60 - float(ts)/60) > c.eps:
si = si + 1
ei = len(odt) - 1
while abs(odt[ei].hour + float(odt[ei].minute)/60) > c.eps:
ei = ei - 1
data_wholedays = data[si: ei + 1]
ndays = len(data_wholedays)//nperday
return numpy.ma.reshape(data_wholedays, [ndays, nperday])
def gfalternate_getdielaverage(data_dict, l4a):
odt = data_dict["DateTime"]["data"]
label_tower = l4a["run"]["label_tower"]
output_list = list(data_dict[label_tower]["output_list"])
diel_avg = {}
for label_output in output_list:
diel_avg[label_output] = {}
if "data" in list(data_dict[label_output].keys()):
data_2d = gfalternate_getdataas2d(odt, data_dict[label_output]["data"], l4a)
diel_avg[label_output]["data"] = numpy.ma.average(data_2d, axis=0)
if "fitcorr" in list(data_dict[label_output].keys()):
data_2d = gfalternate_getdataas2d(odt, data_dict[label_output]["fitcorr"], l4a)
diel_avg[label_output]["fitcorr"] = numpy.ma.average(data_2d, axis=0)
return diel_avg
def gfalternate_getfitcorrecteddata(data_dict, stat_dict, l4a):
"""
Wrapper for the various methods of fitting the alternate data to the tower data.
"""
if l4a["run"]["fit_type"].lower() == "ols":
gfalternate_getolscorrecteddata(data_dict, stat_dict, l4a)
if l4a["run"]["fit_type"].lower() == "ols_thru0":
gfalternate_getolscorrecteddata(data_dict, stat_dict, l4a)
if l4a["run"]["fit_type"].lower() == "mrev":
gfalternate_getmrevcorrected(data_dict, stat_dict, l4a)
if l4a["run"]["fit_type"].lower() == "replace":
gfalternate_getreplacedata(data_dict, stat_dict, l4a)
if l4a["run"]["fit_type"].lower() == "rma":
gfalternate_getrmacorrecteddata(data_dict, stat_dict, l4a)
if l4a["run"]["fit_type"].lower() == "odr":
gfalternate_getodrcorrecteddata(data_dict, stat_dict, l4a)
def gfalternate_getlabeloutputlist(l4_info, label_tower):
l4a = l4_info["GapFillFromAlternate"]
l4m = l4_info["MergeSeries"]
olist = [item for item in list(l4a["outputs"].keys()) if l4a["outputs"][item]["target"] == label_tower]
for item in list(l4m.keys()):
if label_tower in list(l4m[item].keys()):
mlist = l4m[item][label_tower]["source"]
label_output_list = []
for item in mlist:
if item in olist: label_output_list.append(item)
return label_output_list
def gfalternate_getcorrecteddata(ds_alternate, data_dict, stat_dict, l4a):
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
if l4a["run"]["nogaps_tower"]:
# tower data has no gaps
stat_dict[label_output][label_alternate]["nLags"] = int(0)
data_dict[label_output][label_alternate]["lagcorr"] = numpy.ma.copy(data_dict[label_output][label_alternate]["data"])
data_dict[label_output][label_alternate]["fitcorr"] = numpy.ma.copy(data_dict[label_output][label_alternate]["data"])
stat_dict[label_output][label_alternate]["slope"] = float(0)
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "No gaps in tower"
elif not l4a["run"]["nogaps_tower"] and l4a["run"]["gotminpoints_both"]:
# got enough good points common to both data series
gfalternate_getlagcorrecteddata(ds_alternate, data_dict, stat_dict, l4a)
gfalternate_getfitcorrecteddata(data_dict, stat_dict, l4a)
elif not l4a["run"]["nogaps_tower"] and not l4a["run"]["gotminpoints_both"]:
stat_dict[label_output][label_alternate]["nLags"] = int(0)
data_dict[label_output][label_alternate]["lagcorr"] = numpy.ma.copy(data_dict[label_output][label_alternate]["data"])
if l4a["run"]["fit_type"].lower() == "replace":
gfalternate_getfitcorrecteddata(data_dict, stat_dict, l4a)
else:
data_dict[label_output][label_alternate]["fitcorr"] = numpy.ma.masked_all_like(data_dict[label_output][label_alternate]["data"])
stat_dict[label_output][label_alternate]["slope"] = float(0)
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "Too few points"
else:
msg = "getcorrecteddata: Unrecognised combination of logical tests"
logger.error(msg)
def gfalternate_getlagcorrecteddata(ds_alternate, data_dict, stat_dict, l4a):
label_tower = l4a["run"]["label_tower"]
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
data_tower = data_dict[label_tower]["data"]
data_alternate = data_dict[label_output][label_alternate]["data"]
ldt_alternate = ds_alternate.series["DateTime"]["Data"]
startdate = l4a["run"]["startdate"]
enddate = l4a["run"]["enddate"]
ts = l4a["info"]["time_step"]
si_alternate = pfp_utils.GetDateIndex(ldt_alternate, startdate, ts=ts)
ei_alternate = pfp_utils.GetDateIndex(ldt_alternate, enddate, ts=ts)
if l4a["run"]["lag"].lower() == "yes":
maxlags = l4a["gui"]["max_lags"]
_, corr = pfp_ts.get_laggedcorrelation(data_tower, data_alternate, maxlags)
nLags = numpy.argmax(corr) - l4a["gui"]["max_lags"]
if nLags > l4a["gui"]["nperhr"]*6:
logger.error("getlagcorrecteddata: lag is more than 6 hours for %s", label_tower)
si_alternate = si_alternate - nLags
ei_alternate = ei_alternate - nLags
data_alternate, _, _ = pfp_utils.GetSeriesasMA(ds_alternate, label_alternate, si=si_alternate, ei=ei_alternate, mode="mirror")
data_dict[label_output][label_alternate]["lagcorr"] = data_alternate
stat_dict[label_output][label_alternate]["nLags"] = nLags
else:
data_dict[label_output][label_alternate]["lagcorr"] = numpy.ma.copy(data_dict[label_output][label_alternate]["data"])
stat_dict[label_output][label_alternate]["nLags"] = int(0)
def gfalternate_getmrevcorrected(data_dict, stat_dict, l4a):
"""
Fit alternate data to tower data by replacing means and equalising variance.
"""
odt = data_dict["DateTime"]["data"]
label_tower = l4a["run"]["label_tower"]
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
# local copies of the data
data_tower = numpy.ma.copy(data_dict[label_tower]["data"])
data_alternate = numpy.ma.copy(data_dict[label_output][label_alternate]["data"])
data_2d = gfalternate_getdataas2d(odt, data_tower, l4a)
data_twr_hravg = numpy.ma.average(data_2d, axis=0)
data_2d = gfalternate_getdataas2d(odt, data_alternate, l4a)
data_alt_hravg = numpy.ma.average(data_2d, axis=0)
# calculate the means
mean_tower = numpy.ma.mean(data_tower)
mean_alternate = numpy.ma.mean(data_alternate)
# calculate the variances
var_twr_hravg = numpy.ma.var(data_twr_hravg)
var_alt_hravg = numpy.ma.var(data_alt_hravg)
var_ratio = var_twr_hravg/var_alt_hravg
# correct the alternate data
data_dict[label_output][label_alternate]["fitcorr"] = ((data_alternate - mean_alternate)*var_ratio) + mean_tower
stat_dict[label_output][label_alternate]["eqnstr"] = "Mean replaced, equal variance"
stat_dict[label_output][label_alternate]["slope"] = float(0)
stat_dict[label_output][label_alternate]["offset"] = float(0)
def gfalternate_getnumgoodpoints(data_tower, data_alternate):
mask = numpy.ma.mask_or(data_tower.mask, data_alternate.mask, copy=True, shrink=False)
return len(numpy.where(mask == False)[0])
def gfalternate_getodrcorrecteddata(data_dict, stat_dict, l4a):
"""
Calculate the orthogonal distance regression fit between 2 1D arrays.
"""
label_tower = l4a["run"]["label_tower"]
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
y_in = numpy.ma.copy(data_dict[label_tower]["data"])
x_in = numpy.ma.copy(data_dict[label_output][label_alternate]["lagcorr"])
mask = numpy.ma.mask_or(x_in.mask, y_in.mask, copy=True, shrink=False)
x = numpy.ma.compressed(numpy.ma.array(x_in ,mask=mask, copy=True))
y = numpy.ma.compressed(numpy.ma.array(y_in, mask=mask, copy=True))
# attempt an ODR fit
linear = scipy.odr.Model(pfp_utils.linear_function)
mydata = scipy.odr.Data(x, y)
myodr = scipy.odr.ODR(mydata, linear, beta0=[1, 0])
myoutput = myodr.run()
odr_slope = myoutput.beta[0]
odr_offset = myoutput.beta[1]
data_dict[label_output][label_alternate]["fitcorr"] = odr_slope * x_in + odr_offset
stat_dict[label_output][label_alternate]["slope"] = odr_slope
stat_dict[label_output][label_alternate]["offset"] = odr_offset
stat_dict[label_output][label_alternate]["eqnstr"] = "y = %.3fx + %.3f"%(odr_slope, odr_offset)
def gfalternate_getolscorrecteddata(data_dict, stat_dict, l4a):
"""
Calculate the ordinary least squares fit between 2 1D arrays.
"""
label_tower = l4a["run"]["label_tower"]
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
y_in = numpy.ma.copy(data_dict[label_tower]["data"])
x_in = numpy.ma.copy(data_dict[label_output][label_alternate]["lagcorr"])
mask = numpy.ma.mask_or(x_in.mask,y_in.mask, copy=True, shrink=False)
x = numpy.ma.compressed(numpy.ma.array(x_in, mask=mask, copy=True))
y = numpy.ma.compressed(numpy.ma.array(y_in, mask=mask, copy=True))
# attempt an OLS fit
if l4a["run"]["fit_type"].lower() == "ols_thru0":
resols = sm.OLS(y, x).fit()
data_dict[label_output][label_alternate]["fitcorr"] = resols.params[0]*x_in
stat_dict[label_output][label_alternate]["slope"] = resols.params[0]
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "y = %.3fx"%(resols.params[0])
else:
resols = sm.OLS(y, sm.add_constant(x, prepend=False)).fit()
if resols.params.shape[0] == 2:
data_dict[label_output][label_alternate]["fitcorr"] = resols.params[0]*x_in+resols.params[1]
stat_dict[label_output][label_alternate]["slope"] = resols.params[0]
stat_dict[label_output][label_alternate]["offset"] = resols.params[1]
stat_dict[label_output][label_alternate]["eqnstr"] = "y = %.3fx + %.3f"%(resols.params[0], resols.params[1])
else:
data_dict[label_output][label_alternate]["fitcorr"] = numpy.ma.copy(x_in)
stat_dict[label_output][label_alternate]["slope"] = float(0)
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "OLS error, replaced"
def gfalternate_getoutputstatistics(data_dict, stat_dict, l4a):
label_tower = l4a["run"]["label_tower"]
output_list = list(data_dict[label_tower]["output_list"])
if label_tower in output_list:
output_list.remove(label_tower)
for label in output_list:
# OLS slope and offset
if l4a["run"]["fit_type"] != "replace":
x_in = numpy.ma.copy(data_dict[label]["fitcorr"])
y_in = numpy.ma.copy(data_dict[label_tower]["data"])
mask = numpy.ma.mask_or(x_in.mask, y_in.mask, copy=True, shrink=False)
x = numpy.ma.compressed(numpy.ma.array(x_in, mask=mask, copy=True))
y = numpy.ma.compressed(numpy.ma.array(y_in, mask=mask, copy=True))
# get the array lengths
nx = len(x)
# attempt an OLS fit
if nx >= l4a["run"]["min_points"]:
if l4a["run"]["fit_type"].lower() == "ols":
resols = sm.OLS(y, sm.add_constant(x, prepend=False)).fit()
if resols.params.shape[0] == 2:
stat_dict[label]["slope"] = resols.params[0]
stat_dict[label]["offset"] = resols.params[1]
stat_dict[label]["eqnstr"] = "y = %.3fx + %.3f"%(resols.params[0], resols.params[1])
else:
stat_dict[label]["slope"] = float(0)
stat_dict[label]["offset"] = float(0)
stat_dict[label]["eqnstr"] = "OLS error"
else:
resols = sm.OLS(y, x).fit()
stat_dict[label]["slope"] = resols.params[0]
stat_dict[label]["offset"] = float(0)
stat_dict[label]["eqnstr"] = "y = %.3fx"%(resols.params[0])
else:
stat_dict[label]["slope"] = float(0)
stat_dict[label]["offset"] = float(0)
stat_dict[label]["eqnstr"] = "Too few points"
else:
stat_dict[label]["slope"] = float(1)
stat_dict[label]["offset"] = float(0)
stat_dict[label]["eqnstr"] = "Data replaced"
# number of points
stat_dict[label]["No. points"] = len(data_dict[label_tower]["data"])
num = numpy.ma.count(data_dict[label]["fitcorr"])-numpy.ma.count(data_dict[label_tower]["data"])
if num < 0: num = 0
stat_dict[label]["No. filled"] = trap_masked_constant(num)
# correlation coefficient
r = numpy.ma.corrcoef(data_dict[label_tower]["data"], data_dict[label]["fitcorr"])
stat_dict[label]["r"] = trap_masked_constant(r[0,1])
# means
avg = numpy.ma.mean(data_dict[label_tower]["data"])
stat_dict[label]["Avg (Tower)"] = trap_masked_constant(avg)
avg = numpy.ma.mean(data_dict[label]["fitcorr"])
stat_dict[label]["Avg (Alt)"] = trap_masked_constant(avg)
# variances
var_tower = numpy.ma.var(data_dict[label_tower]["data"])
stat_dict[label]["Var (Tower)"] = trap_masked_constant(var_tower)
var_alt = numpy.ma.var(data_dict[label]["fitcorr"])
stat_dict[label]["Var (Alt)"] = trap_masked_constant(var_alt)
if var_alt != 0:
stat_dict[label]["Var ratio"] = trap_masked_constant(var_tower/var_alt)
else:
stat_dict[label]["Var ratio"] = float(c.missing_value)
# RMSE & NMSE
error = (data_dict[label_tower]["data"]-data_dict[label]["fitcorr"])
rmse = numpy.ma.sqrt(numpy.ma.average(error*error))
stat_dict[label]["RMSE"] = trap_masked_constant(rmse)
data_range = numpy.ma.max(data_dict[label_tower]["data"])-numpy.ma.min(data_dict[label_tower]["data"])
data_range = numpy.maximum(data_range, 1)
if numpy.ma.is_masked(data_range) or abs(data_range) < c.eps:
nmse = float(c.missing_value)
else:
nmse = rmse/data_range
stat_dict[label]["NMSE"] = trap_masked_constant(nmse)
# bias & fractional bias
stat_dict[label]["Bias"] = trap_masked_constant(numpy.ma.average(error))
norm_error = (error)/(0.5*(data_dict[label_tower]["data"]+data_dict[label]["fitcorr"]))
stat_dict[label]["Frac Bias"] = trap_masked_constant(numpy.ma.average(norm_error))
def gfalternate_getreplacedata(data_dict, stat_dict, l4a):
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
data_alternate = data_dict[label_output][label_alternate]["lagcorr"]
data_dict[label_output][label_alternate]["fitcorr"] = numpy.ma.copy(data_alternate)
stat_dict[label_output][label_alternate]["slope"] = float(1)
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "No OLS, replaced"
def gfalternate_getrmacorrecteddata(data_dict, stat_dict, l4a):
"""
Calculate the ordinary least squares fit between 2 1D arrays.
"""
label_tower = l4a["run"]["label_tower"]
label_output = l4a["run"]["label_output"]
label_alternate = l4a["run"]["label_alternate"]
y_in = numpy.ma.copy(data_dict[label_tower]["data"])
x_in = numpy.ma.copy(data_dict[label_output][label_alternate]["lagcorr"])
mask = numpy.ma.mask_or(x_in.mask, y_in.mask, copy=True, shrink=False)
x = numpy.ma.compressed(numpy.ma.array(x_in, mask=mask, copy=True))
y = numpy.ma.compressed(numpy.ma.array(y_in, mask=mask, copy=True))
# attempt an OLS fit
if l4a["run"]["fit_type"].lower() == "ols_thru0":
resols = sm.OLS(y, x).fit()
rma_slope = resols.params[0]/numpy.sqrt(resols.rsquared)
rma_offset = numpy.mean(y) - rma_slope * numpy.mean(x)
data_dict[label_output][label_alternate]["fitcorr"] = rma_slope*x_in
stat_dict[label_output][label_alternate]["slope"] = rma_slope
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "y = %.3fx"%(rma_slope)
else:
resols = sm.OLS(y, sm.add_constant(x, prepend=False)).fit()
if resols.params.shape[0] == 2:
rma_slope = resols.params[0]/numpy.sqrt(resols.rsquared)
rma_offset = numpy.mean(y) - rma_slope * numpy.mean(x)
data_dict[label_output][label_alternate]["fitcorr"] = rma_slope*x_in+rma_offset
stat_dict[label_output][label_alternate]["slope"] = rma_slope
stat_dict[label_output][label_alternate]["offset"] = rma_offset
stat_dict[label_output][label_alternate]["eqnstr"] = "y = %.3fx + %.3f"%(rma_slope, rma_offset)
else:
data_dict[label_output][label_alternate]["fitcorr"] = numpy.ma.copy(x_in)
stat_dict[label_output][label_alternate]["slope"] = float(0)
stat_dict[label_output][label_alternate]["offset"] = float(0)
stat_dict[label_output][label_alternate]["eqnstr"] = "RMA error, replaced"
def gfalternate_gotdataforgaps(data, data_alternate, l4a, mode="verbose"):
"""
Returns true if the alternate series has data where the composite series has gaps.
"""
return_code = True
ind = numpy.where((numpy.ma.getmaskarray(data) == True) & (numpy.ma.getmaskarray(data_alternate) == False))[0]
if len(ind) == 0:
if mode == "verbose":
label_alternate = l4a["run"]["label_alternate"]
msg = " Alternate series " + label_alternate + " has nothing to contribute"
logger.info(msg)
return_code = False
return return_code
def gfalternate_gotnogaps(data, label, mode="verbose"):
"""
Returns true if the data series has no gaps, false if there are gaps
"""
return_code = True
if numpy.ma.count_masked(data) == 0:
if mode == "verbose":
msg = " No gaps in " + label
logger.info(msg)
return_code = True
else:
return_code = False
return return_code
def gfalternate_gotminpoints(data, l4a, label, mode="verbose"):
"""
Returns true if data contains more than the minimum number of points required
or data contains less than the minimum number but the fit type is replace.
"""
return_code = True
if numpy.ma.count(data) < l4a["run"]["min_points"]:
if mode == "verbose":
msg = " Less than " + str(l4a["gui"]["min_percent"]) + " % data in series "
msg = msg + label + ", skipping ..."
logger.info(msg)
msg = "gotminpoints: " + label + " " + str(numpy.ma.count(data))
msg = msg + " " + str(l4a["run"]["min_points"])
logger.info(msg)
return_code = False
return return_code
def gfalternate_gotminpointsboth(data_tower, data_alternate, l4a, label_tower, label_alternate, mode="verbose"):
return_code = True
mask = numpy.ma.mask_or(numpy.ma.getmaskarray(data_tower), numpy.ma.getmaskarray(data_alternate),
copy=True, shrink=False)
if len(numpy.where(mask == False)[0]) < l4a["run"]["min_points"]:
if mode != "quiet":
msg = " Less than " + str(l4a["run"]["min_percent"]) + " % good data common to both series "
logger.info(msg)
msg = "gotminpointsboth: " + label_tower + " " + str(numpy.ma.count(data_tower))
msg = msg + " " + str(l4a["run"]["min_points"])
logger.info(msg)
msg = "gotminpointsboth: " + label_alternate + " " + str(numpy.ma.count(data_alternate))
msg = msg + " " + str(l4a["run"]["min_points"])
logger.info(msg)
return_code = False
return return_code
def gfalternate_initplot(data_dict, l4a, **kwargs):
pd = {"margin_bottom":0.075, "margin_top":0.05, "margin_left":0.075, "margin_right":0.05,
"xy_height":0.25, "xy_width":0.20, "xyts_space":0.05, "xyxy_space":0.05, "ts_width":0.9,
"text_left":0.675, "num_left":0.825, "row_bottom":0.35, "row_space":0.030}
# calculate bottom of the first time series and the height of the time series plots
label_tower = l4a["run"]["label_tower"]
label_composite = l4a["run"]["label_composite"]
output_list = list(data_dict[label_tower]["output_list"])
for item in [label_tower, label_composite]:
if item in output_list: output_list.remove(item)
nts = len(output_list) + 1
pd["ts_bottom"] = pd["margin_bottom"] + pd["xy_height"] + pd["xyts_space"]
pd["ts_height"] = (1.0 - pd["margin_top"] - pd["ts_bottom"])/nts
for key, value in kwargs.items():
pd[key] = value
return pd
def gfalternate_loadoutputdata(ds_tower, data_dict, l4a):
ldt_tower = ds_tower.series["DateTime"]["Data"]
label_output = l4a["run"]["label_output"]
flag_code = l4a["outputs"][label_output]["flag_code"]
label_composite = l4a["run"]["label_composite"]
label_alternate = l4a["run"]["label_alternate"]
ts = l4a["info"]["time_step"]
si = pfp_utils.GetDateIndex(ldt_tower, l4a["run"]["startdate"], ts=ts, default=0)
ei = pfp_utils.GetDateIndex(ldt_tower, l4a["run"]["enddate"], ts=ts, default=len(ldt_tower))
if l4a["gui"]["overwrite"]:
ind1 = numpy.where(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["data"]) == False)[0]
else:
ind1 = numpy.where((numpy.ma.getmaskarray(data_dict[label_output]["data"]) == True)&
(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["data"]) == False))[0]
data_dict[label_output]["data"][ind1] = data_dict[label_output][label_alternate]["data"][ind1]
if l4a["gui"]["overwrite"]:
ind2 = numpy.where(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False)[0]
else:
ind2 = numpy.where((numpy.ma.getmaskarray(data_dict[label_output]["fitcorr"]) == True)&
(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False))[0]
data_dict[label_output]["fitcorr"][ind2] = data_dict[label_output][label_alternate]["fitcorr"][ind2]
if l4a["gui"]["overwrite"]:
ind3 = numpy.where(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["data"]) == False)[0]
else:
ind3 = numpy.where((numpy.ma.getmaskarray(data_dict[label_composite]["data"]) == True)&
(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["data"]) == False))[0]
data_dict[label_composite]["data"][ind3] = data_dict[label_output][label_alternate]["data"][ind3]
if l4a["gui"]["overwrite"]:
ind4 = numpy.where(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False)[0]
else:
ind4 = numpy.where((numpy.ma.getmaskarray(data_dict[label_composite]["fitcorr"]) == True)&
(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False))[0]
data_dict[label_composite]["fitcorr"][ind4] = data_dict[label_output][label_alternate]["fitcorr"][ind4]
if l4a["gui"]["overwrite"]:
ind5 = numpy.where(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False)[0]
else:
ind5 = numpy.where((abs(ds_tower.series[label_composite]["Data"][si:ei+1]-float(c.missing_value)) < c.eps)&
(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False))[0]
ds_tower.series[label_composite]["Data"][si:ei+1][ind5] = numpy.ma.filled(data_dict[label_output][label_alternate]["fitcorr"][ind5], c.missing_value)
ds_tower.series[label_composite]["Flag"][si:ei+1][ind5] = numpy.int32(flag_code)
if l4a["gui"]["overwrite"]:
ind6 = numpy.where(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False)[0]
else:
ind6 = numpy.where((abs(ds_tower.series[label_output]["Data"][si:ei+1]-float(c.missing_value)) < c.eps)&
(numpy.ma.getmaskarray(data_dict[label_output][label_alternate]["fitcorr"]) == False))[0]
ds_tower.series[label_output]["Data"][si:ei+1][ind6] = numpy.ma.filled(data_dict[label_output][label_alternate]["fitcorr"][ind6], c.missing_value)
ds_tower.series[label_output]["Flag"][si:ei+1][ind6] = numpy.int32(flag_code)
def gfalternate_main(ds_tower, ds_alt, l4_info, called_by, label_tower_list=None):
"""
This is the main routine for using alternate data to gap fill drivers.
"""
l4a = l4_info[called_by]
mode = "quiet" #"quiet" #"verbose"
ts = int(float(ds_tower.globalattributes["time_step"]))
startdate = l4a["run"]["startdate"]
enddate = l4a["run"]["enddate"]
logger.info(" Gap fill with alternate: " + startdate + " to " + enddate)
# get local pointer to the datetime series
dt_tower = ds_tower.series["DateTime"]["Data"]
si_tower = pfp_utils.GetDateIndex(dt_tower, startdate, ts=ts, default=0)
ei_tower = pfp_utils.GetDateIndex(dt_tower, enddate, ts=ts, default=len(dt_tower)-1)
ldt_tower = dt_tower[si_tower:ei_tower + 1]
# now loop over the variables to be gap filled using the alternate data
if label_tower_list == None:
label_tower_list = l4a["gui"]["series_list"]
for label_tower in label_tower_list:
l4a["run"]["label_tower"] = label_tower
label_composite = label_tower + "_composite"
l4a["run"]["label_composite"] = label_composite
# read the tower data and check for gaps
data_tower, _, attr_tower = pfp_utils.GetSeriesasMA(ds_tower, label_tower, si=si_tower, ei=ei_tower)
l4a["run"]["min_points"] = int(len(data_tower)*l4a["gui"]["min_percent"]/100)
# check to see if we have any gaps to fill
l4a["run"]["nogaps_tower"] = gfalternate_gotnogaps(data_tower, label_tower, mode=mode)
# check to see if we have more than the minimum number of points
l4a["run"]["gotminpoints_tower"] = gfalternate_gotminpoints(data_tower, l4a, label_tower, mode=mode)
# initialise a dictionary to hold the data
data_dict, stat_dict = gfalternate_createdataandstatsdict(ldt_tower, data_tower, attr_tower, l4a)
# get a list of the output names for this tower series
label_output_list = gfalternate_getlabeloutputlist(l4_info, label_tower)
# loop over the outputs for this tower series
for label_output in label_output_list:
l4a["run"]["label_output"] = label_output
l4a["run"]["alternate_name"] = l4a["outputs"][label_output]["alternate_name"]
# update the alternate_info dictionary
gfalternate_update_alternate_info(l4a)
# update the dictionaries
stat_dict[label_output] = {"startdate": startdate,
"enddate": enddate}
data_dict[label_output] = {"data": numpy.ma.masked_all_like(data_tower),
"fitcorr": numpy.ma.masked_all_like(data_tower),
"attr": attr_tower,
"source": l4a["outputs"][label_output]["source"]}
# get a local pointer to the alternate data structure
ds_alternate = ds_alt[l4a["outputs"][label_output]["file_name"]]
ldt_alternate = ds_alternate.series["DateTime"]["Data"]
# start and end idices for this time range in the alternate data
si_alternate = pfp_utils.GetDateIndex(ldt_alternate, startdate, ts=ts, default=0)
ei_alternate = pfp_utils.GetDateIndex(ldt_alternate, enddate, ts=ts, default=len(ldt_alternate)-1)
# get the alternate series that has the highest correlation with the tower data
label_alternate_list = gfalternate_getalternatevaratmaxr(ds_tower, ds_alternate, l4a, mode=mode)
# loop over alternate variables
for label_alternate in label_alternate_list:
l4a["run"]["label_alternate"] = label_alternate
# get the raw alternate data
data_alternate, _, attr_alternate = pfp_utils.GetSeriesasMA(ds_alternate, label_alternate, si=si_alternate, ei=ei_alternate)
# check this alternate variable to see if there are enough points
l4a["run"]["gotminpoints_alternate"] = gfalternate_gotminpoints(data_alternate, l4a, label_alternate, mode=mode)
l4a["run"]["gotdataforgaps_alternate"] = gfalternate_gotdataforgaps(data_dict[label_output]["data"], data_alternate, l4a, mode=mode)
l4a["run"]["gotminpoints_both"] = gfalternate_gotminpointsboth(data_tower, data_alternate, l4a, label_tower, label_alternate, mode=mode)
# update the data and sata dictionaries
stat_dict[label_output][label_alternate] = {"startdate": startdate,
"enddate": enddate}
if label_output not in data_dict[label_tower]["output_list"]:
data_dict[label_tower]["output_list"].append(label_output)
data_dict[label_output][label_alternate] = {"data": data_alternate,
"attr": attr_alternate}
gfalternate_getcorrecteddata(ds_alternate, data_dict, stat_dict, l4a)
gfalternate_loadoutputdata(ds_tower, data_dict, l4a)
# check to see if we have alternate data for this whole period, if so there is no reason to continue
ind_tower = numpy.where(abs(ds_tower.series[label_output]["Data"][si_tower:ei_tower+1]-float(c.missing_value)) < c.eps)[0]
if len(ind_tower) == 0:
break
# we have completed the loop over the alternate data for this output
# now do the statistics, diurnal average and daily averages for this output
gfalternate_getoutputstatistics(data_dict, stat_dict, l4a)
for label_output in label_output_list:
for result in l4a["outputs"][label_output]["results"]:
l4a["outputs"][label_output]["results"][result].append(stat_dict[label_output][result])
if l4a["run"]["nogaps_tower"]:
if l4a["gui"]["show_all"]:
pass
else:
continue
# plot the gap filled data
pd = gfalternate_initplot(data_dict, l4a)
diel_avg = gfalternate_getdielaverage(data_dict, l4a)
# reserve figure number 0 for the coverage lines/progress plot
gfalternate_plotcomposite(data_dict, stat_dict, diel_avg, l4a, pd)
def gfalternate_plotcomposite(data_dict, stat_dict, diel_avg, l4a, pd):
# set up some local pointers
label_tower = l4a["run"]["label_tower"]
label_composite = l4a["run"]["label_composite"]
time_step = l4a["info"]["time_step"]
points_test = numpy.ma.count(data_dict[label_tower]["data"]) < l4a["run"]["min_points"]
fit_test = l4a["run"]["fit_type"] != "replace"
if points_test and fit_test: return
# turn on interactive plotting
if l4a["gui"]["show_plots"]:
plt.ion()
else:
plt.ioff()
# create the figure canvas or re-use existing
if plt.fignum_exists(1):
fig = plt.figure(1)
plt.clf()
else:
fig = plt.figure(1, figsize=(13, 8))
fig.canvas.set_window_title(label_tower)
# get the plot title string
title = l4a["info"]["site_name"] + " : Comparison of tower and alternate data for " + label_tower
plt.figtext(0.5, 0.96, title, ha='center', size=16)
# bottom row of XY plots: scatter plot of 30 minute data
rect1 = [0.10, pd["margin_bottom"], pd["xy_width"], pd["xy_height"]]
xyscatter = plt.axes(rect1)
xyscatter.set_ylabel("Tower (" + data_dict[label_tower]["attr"]["units"] + ")")
xyscatter.set_xlabel("Alt (" + data_dict[label_composite]["attr"]["units"] + ")")
text = str(time_step) + " minutes"
xyscatter.text(0.6, 0.075, text, fontsize=10, horizontalalignment="left",
transform=xyscatter.transAxes)
xyscatter.plot(data_dict[label_composite]["fitcorr"], data_dict[label_tower]["data"], 'b.')
# trap caes where all fitted, corrected data is masked
mamin = numpy.ma.min(data_dict[label_composite]["fitcorr"])
mamax = numpy.ma.max(data_dict[label_composite]["fitcorr"])
if not numpy.ma.is_masked(mamin) and not numpy.ma.is_masked(mamax):
xfit = numpy.array([mamin,mamax])
yfit = xfit*stat_dict[label_composite]["slope"] + stat_dict[label_composite]["offset"]
xyscatter.plot(xfit, yfit, 'g--', linewidth=3)
xyscatter.text(0.5, 0.9, stat_dict[label_composite]["eqnstr"], fontsize=8,
horizontalalignment='center', transform=xyscatter.transAxes, color='green')
# bottom row of XY plots: scatter plot of diurnal averages
ind = numpy.arange(l4a["gui"]["nperday"])/float(l4a["gui"]["nperhr"])
rect2 = [0.40, pd["margin_bottom"], pd["xy_width"], pd["xy_height"]]
diel_axes = plt.axes(rect2)
diel_axes.plot(ind, diel_avg[label_composite]["fitcorr"], 'g-', label="Alt (fit)")
diel_axes.plot(ind, diel_avg[label_composite]["data"], 'b-', label="Alt")
diel_axes.set_ylabel(label_tower + " (" + data_dict[label_tower]["attr"]["units"] + ")")
diel_axes.set_xlim(0, 24)
diel_axes.xaxis.set_ticks([0, 6, 12, 18, 24])
diel_axes.set_xlabel('Hour')
diel_axes.plot(ind, diel_avg[label_tower]["data"], 'ro', label="Tower")
diel_axes.legend(loc='upper right', frameon=False, prop={'size':8})
# top row: time series
ts_axes = []
rect3 = [pd["margin_left"], pd["ts_bottom"], pd["ts_width"], pd["ts_height"]]
ts_axes.append(plt.axes(rect3))
ts_axes[0].plot(data_dict["DateTime"]["data"], data_dict[label_tower]["data"], 'ro', label="Tower")
ts_axes[0].plot(data_dict["DateTime"]["data"], data_dict[label_composite]["fitcorr"], 'g-', label="Alt (fitted)")
ts_axes[0].set_xlim(data_dict["DateTime"]["data"][0], data_dict["DateTime"]["data"][-1])
ts_axes[0].legend(loc='upper right', frameon=False, prop={'size':10})
ts_axes[0].set_ylabel(label_tower + " (" + data_dict[label_tower]["attr"]["units"] + ")")
output_list = list(data_dict[label_tower]["output_list"])
for item in [label_tower, label_composite]:
if item in output_list: output_list.remove(item)
for n, label_output in enumerate(output_list):
n = n + 1
source = data_dict[label_output]["source"]
this_bottom = pd["ts_bottom"] + n*pd["ts_height"]
rect = [pd["margin_left"], this_bottom, pd["ts_width"], pd["ts_height"]]
ts_axes.append(plt.axes(rect, sharex=ts_axes[0]))
ts_axes[n].plot(data_dict["DateTime"]["data"], data_dict[label_output]["data"], 'b-', label=source)
plt.setp(ts_axes[n].get_xticklabels(), visible=False)
ts_axes[n].legend(loc='upper right', frameon=False, prop={'size':10})
ts_axes[n].set_ylabel(label_tower + " (" + data_dict[label_tower]["attr"]["units"] + ")")
# write the comparison statistics
stats_list = ["Var (Alt)", "Var (Tower)", "RMSE", "Bias", "r", "No. filled", "No. points"]
for n, item in enumerate(stats_list):
row_posn = pd["margin_bottom"] + n*pd["row_space"]
plt.figtext(pd["text_left"], row_posn, item)
plt.figtext(pd["num_left"], row_posn, '%.4g'%(stat_dict[label_composite][item]))
# save a hard copy of the plot
sdt = data_dict["DateTime"]["data"][0].strftime("%Y%m%d")
edt = data_dict["DateTime"]["data"][-1].strftime("%Y%m%d")
figname = l4a["info"]["site_name"].replace(" ", "") + "_Alternate_" + label_tower
figname = figname + "_" + sdt + "_" + edt + '.png'
figname = os.path.join(l4a["info"]["plot_path"], figname)
fig.savefig(figname, format='png')
# draw the plot on the screen
if l4a["gui"]["show_plots"]:
plt.draw()
pfp_utils.mypause(1)
plt.ioff()
else:
plt.ion()
def gfalternate_plotcoveragelines(ds_tower, l4_info, called_by):
"""
Purpose:
Plot a line representing the coverage of variables being gap filled.
Usage:
Author: PRI
Date: Back in the day
"""
# local pointer to l4_info["GapFillFromAlternate"]
l4a = l4_info[called_by]
# local pointer to datetime
ldt = ds_tower.series["DateTime"]["Data"]
# get the site name and the start and end date
site_name = ds_tower.globalattributes["site_name"]
start_date = ldt[0].strftime("%Y-%m-%d")
end_date = ldt[-1].strftime("%Y-%m-%d")
# list of targets to plot
targets = [l4a["outputs"][output]["target"] for output in list(l4a["outputs"].keys())]
targets = list(set(targets))
ylabel_list = [""] + targets + [""]
ylabel_right_list = [""]
colors = ["blue", "red", "green", "yellow", "magenta", "black", "cyan", "brown"]
xsize = 15.0
ysize = max([len(targets)*0.2, 1])
if l4a["gui"]["show_plots"]:
plt.ion()
else:
plt.ioff()
if plt.fignum_exists(0):
fig = plt.figure(0)
plt.clf()
ax1 = plt.subplot(111)
else:
fig = plt.figure(0, figsize=(xsize, ysize))
ax1 = plt.subplot(111)
title = "Coverage: " + site_name + " " + start_date + " to " + end_date
fig.canvas.set_window_title(title)
plt.ylim([0, len(targets) + 1])
plt.xlim([ldt[0], ldt[-1]])
for label, n in zip(targets, list(range(1, len(targets) + 1))):
data_series, _, _ = pfp_utils.GetSeriesasMA(ds_tower, label)
percent = 100*numpy.ma.count(data_series)/len(data_series)
ylabel_right_list.append("{0:.0f}%".format(percent))
ind_series = numpy.ma.ones(len(data_series))*float(n)
ind_series = numpy.ma.masked_where(numpy.ma.getmaskarray(data_series) == True, ind_series)
plt.plot(ldt, ind_series, color=colors[numpy.mod(n, 8)], linewidth=1)
if label+"_composite" in list(ds_tower.series.keys()):
data_composite, _, _ = pfp_utils.GetSeriesasMA(ds_tower, label+"_composite")
ind_composite = numpy.ma.ones(len(data_composite))*float(n)
ind_composite = numpy.ma.masked_where(numpy.ma.getmaskarray(data_composite) == True, ind_composite)
plt.plot(ldt, ind_composite, color=colors[numpy.mod(n,8)], linewidth=4)
ylabel_posn = list(range(0, len(targets)+2))
pylab.yticks(ylabel_posn, ylabel_list)
ylabel_right_list.append("")
ax2 = ax1.twinx()
pylab.yticks(ylabel_posn, ylabel_right_list)
fig.tight_layout()
if l4a["gui"]["show_plots"]:
plt.draw()
pfp_utils.mypause(1)
plt.ioff()
else:
plt.ion()
def gfalternate_quit(alt_gui):
""" Quit the GapFillFromAlternate GUI."""
# put the return code into ds.returncodes
alt_gui.ds4.returncodes["message"] = "quit"
alt_gui.ds4.returncodes["value"] = 1
# destroy the alternate GUI
alt_gui.close()
def gfalternate_run_interactive(alt_gui):
"""
Purpose:
Gets settings from the GapFillFromAlternate GUI and loads them
into the l4_info["gui"] dictionary
Usage:
Called when the "Run" button is clicked.
Side effects:
Loads settings into the l4_info["gui"] dictionary.
Author: PRI
Date: Re-written July 2019
"""
# local pointers to useful things
try:
ds_tower = alt_gui.ds4
ds_alt = alt_gui.ds_alt
called_by = alt_gui.called_by
l4_info = alt_gui.l4_info
l4a = l4_info[called_by]
# populate the l4_info["gui"] dictionary with things that will be useful
ts = int(float(ds_tower.globalattributes["time_step"]))
l4a["gui"]["nperhr"] = int(float(60)/ts + 0.5)
l4a["gui"]["nperday"] = int(float(24)*l4a["gui"]["nperhr"] + 0.5)
l4a["gui"]["max_lags"] = int(float(12)*l4a["gui"]["nperhr"] + 0.5)
# window period length
if str(alt_gui.radioButtons.checkedButton().text()) == "Manual":
l4a["gui"]["period_option"] = 1
elif str(alt_gui.radioButtons.checkedButton().text()) == "Months":
l4a["gui"]["period_option"] = 2
l4a["gui"]["number_months"] = int(alt_gui.lineEdit_NumberMonths.text())
elif str(alt_gui.radioButtons.checkedButton().text()) == "Days":
l4a["gui"]["period_option"] = 3
l4a["gui"]["number_days"] = int(alt_gui.lineEdit_NumberDays.text())
# plot settings
l4a["gui"]["overwrite"] = alt_gui.checkBox_Overwrite.isChecked()
l4a["gui"]["show_plots"] = alt_gui.checkBox_ShowPlots.isChecked()
l4a["gui"]["show_all"] = alt_gui.checkBox_PlotAll.isChecked()
# auto-complete settings
l4a["gui"]["auto_complete"] = alt_gui.checkBox_AutoComplete.isChecked()
l4a["gui"]["autoforce"] = False
# minimum percentage of good data required
l4a["gui"]["min_percent"] = max(int(str(alt_gui.lineEdit_MinPercent.text())),1)
# get the start and end datetimes entered in the alternate GUI
if len(str(alt_gui.lineEdit_StartDate.text())) != 0:
l4a["gui"]["startdate"] = str(alt_gui.lineEdit_StartDate.text())
else:
l4a["gui"]["startdate"] = l4a["info"]["startdate"]
if len(str(alt_gui.lineEdit_EndDate.text())) != 0:
l4a["gui"]["enddate"] = str(alt_gui.lineEdit_EndDate.text())
else:
l4a["gui"]["enddate"] = l4a["info"]["enddate"]
# now do the work
gfalternate_run(ds_tower, ds_alt, l4_info, called_by)
except Exception:
msg = " Error running L4, see below for details ..."
logger.error(msg)
error_message = traceback.format_exc()
logger.error(error_message)
return
def gfalternate_run(ds_tower, ds_alt, l4_info, called_by):
"""
Purpose:
Run the main routine for gap filling meteorological data.
Usage:
Side effects:
Author: PRI
Date: Re-written in August 2019
"""
l4a = l4_info[called_by]
# get a list of target variables
series_list = [l4a["outputs"][item]["target"] for item in list(l4a["outputs"].keys())]
l4a["gui"]["series_list"] = sorted(list(set(series_list)))
logger.info(" Gap filling %s using alternate data", l4a["gui"]["series_list"])
# initialise the l4_info["run"] dictionary
l4a["run"] = {"startdate": l4a["gui"]["startdate"],
"enddate": l4a["gui"]["enddate"]}
# run the main gap filling routine depending on window period
if l4a["gui"]["period_option"] == 1:
# manual run, window specified in GUI start and end datetime boxes
logger.info(" Starting manual run ...")
gfalternate_main(ds_tower, ds_alt, l4_info, called_by)
if l4a["info"]["call_mode"] == "interactive":
gfalternate_plotcoveragelines(ds_tower, l4_info, called_by)
logger.info(" Finished manual run ...")
elif l4a["gui"]["period_option"] == 2:
# automated run with window length in months
logger.info(" Starting auto (months) run ...")
startdate = dateutil.parser.parse(l4a["run"]["startdate"])
enddate = startdate + dateutil.relativedelta.relativedelta(months=l4a["gui"]["number_months"])
enddate = min([dateutil.parser.parse(l4a["info"]["enddate"]), enddate])
l4a["run"]["enddate"] = enddate.strftime("%Y-%m-%d %H:%M")
while startdate < enddate:
gfalternate_main(ds_tower, ds_alt, l4_info, called_by)
if l4a["info"]["call_mode"] == "interactive":
gfalternate_plotcoveragelines(ds_tower, l4_info, called_by)
startdate = enddate
l4a["run"]["startdate"] = startdate.strftime("%Y-%m-%d %H:%M")
enddate = startdate + dateutil.relativedelta.relativedelta(months=l4a["gui"]["number_months"])
enddate = min([dateutil.parser.parse(l4a["info"]["enddate"]), enddate])
l4a["run"]["enddate"] = enddate.strftime("%Y-%m-%d %H:%M")
# fill long gaps with autocomplete
gfalternate_autocomplete(ds_tower, ds_alt, l4_info, called_by)
logger.info(" Finished auto (months) run ...")
elif l4a["gui"]["period_option"] == 3:
# automated run with window length in days
logger.info(" Starting auto (days) run ...")
# get the start datetime entered in the alternate GUI
startdate = dateutil.parser.parse(l4a["run"]["startdate"])
# get the end datetime from the start datetime
enddate = startdate + dateutil.relativedelta.relativedelta(days=l4a["gui"]["number_days"])
# clip end datetime to last datetime in tower file
enddate = min([dateutil.parser.parse(l4a["info"]["enddate"]), enddate])
l4a["run"]["enddate"] = enddate.strftime("%Y-%m-%d %H:%M")
while startdate < enddate:
gfalternate_main(ds_tower, ds_alt, l4_info, called_by)
if l4a["info"]["call_mode"] == "interactive":
gfalternate_plotcoveragelines(ds_tower, l4_info, called_by)
startdate = enddate
l4a["run"]["startdate"] = startdate.strftime("%Y-%m-%d %H:%M")
enddate = startdate + dateutil.relativedelta.relativedelta(days=l4a["gui"]["number_days"])
enddate = min([dateutil.parser.parse(l4a["info"]["enddate"]), enddate])
l4a["run"]["enddate"] = enddate.strftime("%Y-%m-%d %H:%M")
gfalternate_autocomplete(ds_tower, ds_alt, l4_info, called_by)
logger.info(" Finished auto (days) run ...")
else:
logger.error("GapFillFromAlternate: unrecognised period option")
def gfalternate_update_alternate_info(l4a):
"""Update the l4_info dictionary."""
label_output = l4a["run"]["label_output"]
l4a["run"]["fit_type"] = l4a["outputs"][label_output]["fit_type"]
l4a["run"]["lag"] = l4a["outputs"][label_output]["lag"]
# autoforce is set true in gfalternate_autocomplete if there is not enough good points
# in the tower data for the whole time series, in this case we will use the alternate
# data "as is" by forcing a "replace" with no lag correction.
if l4a["gui"]["autoforce"]:
l4a["run"]["min_points"] = 0
l4a["run"]["fit_type"] = "replace"
l4a["run"]["lag"] = "no"
def trap_masked_constant(num):
if numpy.ma.is_masked(num):
num = float(c.missing_value)
return num
| bsd-3-clause | 3,962,994,394,770,090,000 | 52.792642 | 153 | 0.613529 | false |
sravel/scripts | local/make_ldhatfiles.py | 1 | 14611 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# @package make_ldhatfiles.py
# @author Lea Picard, Sebastien RAVEL
"""
The make_ldhatfiles script
==========================
:author: Sebastien Ravel, Lea Picard
:contact: [email protected]
:date: 08/07/2016
:version: 0.1
Script description
------------------
This Program takes a tab file and returns LDhat .sites and .locs files
Example
-------
>>> make_ldhatfiles.py -wd outPath -t SNP_table.tab -st chomosomeSize.txt
Help Programm
-------------
optional arguments:
- \-h, --help
show this help message and exit
- \-v, --version
display make_ldhatfiles.py version number and exit
Input mandatory infos for running:
- \-wd <path>, --workdir <path>
Path of the directory where files will be created
- \-t <filename>, --tab <filename>
Name of tab file in (input whole path if file is not
in the current working directory
- \-st <filename>, --size_tab <filename>
Name of a tab file containing the identifiers of the
subunits of division (chromosome/scaffold/contig) and
their total size. If some scaffolds are not wanted,
comment the line.
Input infos for running with default values:
- \-dt <int>, --datatype <int>
1 for haplotypic data (default), 2 for genotypic
- \-m <char>, --methode <char>
rhomap or interval (default)
- \-f <char>, --flag <char>
L for CO (default), C pour gene conversion
"""
##################################################
## Modules
##################################################
## Python modules
from sys import version_info, version
try:
assert version_info <= (3,0)
except AssertionError:
print("You are using version %s but version 2.7.x is require for this script!\n" % version.split(" ")[0])
exit(1)
#Import MODULES_SEB
import sys, os
current_dir = os.path.dirname(os.path.abspath(__file__))+"/"
sys.path.insert(1,current_dir+'../modules/')
from MODULES_SEB import directory, relativeToAbsolutePath, dictDict2txt, existant_file
import argparse
try:
import egglib3 as egglib # USE EGGLIB_3
if int(egglib.version.split(".")[0]) != 3 :
print("You are using not use egglib V3!\n" )
exit(1)
except ImportError:
print("You are not able to load egglib V3!\n" )
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
##################################################
## Variables Globales
version = "0.1"
VERSION_DATE = '15/03/2016'
completeLDhatPATH = "completeLDhat"
#intervalLDhatPATH = "interval"
##intervalLDhatPATH = "rhomap"
statLDhatPATH = "statLDhat"
##################################################
## Functions
def build_sites(paramfilename, dataType):
"""fonction adaptée du script build_concensusV2.py pour fichier .sites LDHAT"""
fichier = open(paramfilename,"r")
outfile = open("temps1.tab", "w")
head = fichier.readline()
outfile.write(head.replace("CHROM\t",""))
dictListPositions = {}
for ligne in fichier:
ligne = ligne.rstrip()
lligne = ligne.split("\t")
nameChro = lligne[0]
posSNP = lligne[1]
dictListPositions.setdefault(nameChro, [posSNP]).append(posSNP)
#if nameChro not in dictListPositions.keys():
#dictListPositions[nameChro] = [posSNP]
#else:
#dictListPositions[nameChro].append(posSNP)
ligneoutput="\t".join(lligne[1:2])
ref=lligne[2]
souchestr="\t".join(lligne[3:]).replace("R",ref)
ligneoutput+="\t"+lligne[2]+"\t"+souchestr+"\n"
outfile.write(ligneoutput)
outfile.close()
############################################################################################
# switch matrice
############################################################################################
fichier = open("temps1.tab","r")
outfile = open("temps.tab", "w")
A = []
for ligne in fichier:
tabligne = ligne.rstrip().split("\t")
A.append(tabligne)
#print(A)
for ligne in list(zip(*A)):
outfile.write("\t".join(ligne)+"\n")
outfile.close()
############################################################################################
# Grep consensus
############################################################################################
# Récupère le fichier de conf passer en argument
genometab = "temps.tab"
if paramfilename.count(".") > 1:
outputfilenameSite = ".".join(paramfilename.split(".")[0:-1])+".sites"
outputfilenameFasta = ".".join(paramfilename.split(".")[0:-1])+".fasta"
else:
outputfilenameSite = paramfilename.split(".")[0]+".sites"
outputfilenameFasta = paramfilename.split(".")[0]+".fasta"
outputfileSite = open(outputfilenameSite, 'w')
outputfileFasta = open(outputfilenameFasta, 'w')
# Utilisation du tab
TabFile = open(genometab, "r")
dictseq={}
head=TabFile.readline()
orderlist=[]
for tabline in TabFile:
ltab=tabline.rstrip().split("\t")
souche=ltab[0]
if souche not in dictseq.keys():
dictseq[souche]=""
orderlist.append(souche)
#get nb of sequences to add to file header
nbInd = len(orderlist)
seqreftab=ltab[1:]
dictseq[souche]="".join(seqreftab)
#get nb of SNPs in fasta sequence
nbSNP = len(dictseq[souche])
outputfileSite.write("%i %i %i\n" %(nbInd, nbSNP, dataType))
for souche in orderlist:
IDname = souche
seq = dictseq[souche]
record = SeqRecord(Seq(seq),id=IDname,name=IDname, description="")
SeqIO.write(record,outputfileSite, "fasta")
SeqIO.write(record,outputfileFasta, "fasta")
outputfileSite.close()
outputfileFasta.close()
####### Remove temps file
os.remove(genometab)
os.remove("temps1.tab")
return nbSNP, dictListPositions[nameChro], str(outputfileFasta.name), nbInd
############
## Main code
############
if __name__ == "__main__":
##
# parameters recovery
##
parser = argparse.ArgumentParser(prog='make_ldhatfiles.py', description='''This Program takes a tab file and returns LDhat .sites and .locs files''')
parser.add_argument('-v', '--version', action='version', version='You are using %(prog)s version: ' + version, help=\
'display make_ldhatfiles version number and exit')
filesreq = parser.add_argument_group('Input mandatory infos for running')
filesreq.add_argument('-wd', '--workdir', metavar="<path>",type=directory, required=True, dest = 'workdir', help = 'Path of the directory where files will be created')
filesreq.add_argument('-t', '--tab', metavar="<filename>",type=existant_file, required=True, dest = 'tabFile', help = 'Name of tab file in (input whole path if file is not in the current working directory')
filesreq.add_argument('-st', '--size_tab', metavar="<filename>",type=existant_file, required=True, dest = 'sizeTab', help = 'Name of a tab file containing the identifiers of the subunits of division (chromosome/scaffold/contig) and their total size. If some scaffolds are not wanted, comment the line.')
files = parser.add_argument_group('Input infos for running with default values')
files.add_argument('-dt', '--datatype', metavar="<int>", default=1, type=int, choices=[1,2], dest = 'datatype', help = '1 for haplotypic data (default), 2 for genotypic')
files.add_argument('-m', '--methode', metavar="<char>", default="interval", choices=["interval","rhomap"], dest = 'methode', help = 'rhomap or interval (default)')
files.add_argument('-f', '--flag', metavar="<char>", default="L", choices=["L","C"], dest = 'flag', help = 'L for CO (default), C pour gene conversion')
# check parameters
args = parser.parse_args()
#Welcome message
print("#################################################################")
print("# Welcome in make_ldhatfiles (Version " + version + ") #")
print("#################################################################")
# get arguments
workingObjDir = args.workdir
tabFile = relativeToAbsolutePath(args.tabFile)
sizeTab = relativeToAbsolutePath(args.sizeTab)
dataType = args.datatype
intervalLDhatPATH = args.methode
flag = args.flag
print("\t - Workink Directory: %s" % workingObjDir.pathDirectory)
print("\t - Input Path matrice is: %s" % tabFile)
print("\t - Input Path size is: %s" % sizeTab)
print("\t - dataType is : %s" % dataType)
print("\t - Working with : %s" % intervalLDhatPATH)
print("\t - flag is: %s\n\n" % flag)
#exit()
##
# code
##
# get basename to build the rest of the filenames
basename = tabFile.split("/")[-1].split(".")[0]
# build dictionary of scaffolds for the file to be split into
dictSizes = {}
with open(sizeTab, "r") as sizeTabOpen:
for sizeLine in sizeTabOpen:
# keys = IDs - values = total size
checkChro = sizeLine.split("\t")[0]
sizeChro = sizeLine.rstrip().split("\t")[1]
dictSizes[checkChro] = sizeChro
listRange = dictSizes.keys()
## split by specified subunits (scaffold/chromosome/contig etc)
# keys = subunits to be split, values = files to be written in
dictFilesOut = {}
with open(tabFile, "r") as tabOpen:
# get the header of the original tab file to rebuild split tab file
header = tabOpen.readline()
# start from second line
for line in tabOpen:
# chro = identifier of the subunit in the first column
chro = line.rstrip().split("\t")[0]
# if chro considered belongs to user-defined range
if chro in listRange:
# create subdirectory for the current scaffold
subdir = workingObjDir.pathDirectory+basename+"/"+chro
if not os.path.exists(subdir):
os.makedirs(subdir)
outputName = workingObjDir.pathDirectory+basename+"/"+chro+"/"+basename+"_"+chro+".tab"
# if chro not encountered yet, create file add header and current line
if chro not in dictFilesOut.keys():
dictFilesOut[chro] = open(outputName, "w")
dictFilesOut[chro].write(header)
dictFilesOut[chro].write(line)
# otherwise just add current line to relevant file
else:
dictFilesOut[chro].write(line)
# keys = names of split files, values = nb of SNPs in said file
dictNbSNP = {}
dictListPos = {}
listFasta = []
# for each split file
for fileOut in dictFilesOut.values():
name = fileOut.name
chroName = name.split("/")[-1].split(".")[0].replace(basename+"_","")
fileOut.close()
# create corresponding .sites file and associate Nb of SNPs
dictNbSNP[chroName], listPos, fasta, nbInd = build_sites(name, dataType)
listFasta.append(fasta)
dictListPos[chroName] = listPos
# for each subunit and its list of SNP positions
for checkChro, listPos in dictListPos.items():
if checkChro in dictFilesOut.keys():
outputLocsName = workingObjDir.pathDirectory+basename+"/"+checkChro+"/"+basename+"_"+checkChro+".locs"
# create .locs file
outputLocs = open(outputLocsName, "w")
# write header as NbSNP ScaffSize Flag
txt = "%i %s %s\n" %(dictNbSNP[checkChro], dictSizes[checkChro], flag)
outputLocs.write(txt)
# write SNP positions underneath
txtLocs = " ".join(dictListPos[checkChro])+"\n"
outputLocs.write(txtLocs)
outputLocs.close()
## calculate Pi and Theta values
dictThetaInfo = {}
cs = egglib.stats.ComputeStats()
cs.add_stats('Pi','thetaW')
# load alignement
for nameFasta in listFasta:
scaffold = nameFasta.split("/")[-1].replace(".fasta","").replace(basename+"_","")
# use egglib
align = egglib.io.from_fasta(nameFasta, groups=False)
stats = cs.process_align(align) # extract polymorphism data
# get number of SNPs in file
nbSNP = align.ls
#nbSNP = stats['ls_o']
# print results
if scaffold not in dictThetaInfo:
dictThetaInfo[scaffold] = { "Theta_SNP":stats['thetaW']/align.ls,
"Pi":stats['Pi']/align.ls,
"Nb_SNPs":nbSNP,
"Theta_allSNPs":stats['thetaW'],
"Theta_scaffold":stats['thetaW']/int(dictSizes[scaffold])
}
dicoMeanTheta = {}
sommeTheta,sommeSize = 0, 0
for scaffold, dico in dictThetaInfo.iteritems():
sommeTheta += dico["Theta_allSNPs"]
sommeSize += int(dictSizes[scaffold])
thetaCoreGenome = sommeTheta/sommeSize
with open(workingObjDir.pathDirectory+basename+"/"+basename+"_ThetaValues.tab", "w") as ThetaTab:
ThetaTab.write(dictDict2txt(dictThetaInfo))
ThetaTab.write("\nthetaCoreGenome\t%.4f" % thetaCoreGenome)
#MAKE sh script to run LDhat
objDir = directory(workingObjDir.pathDirectory+basename) # list all directory and files in the path
#nbInd = 13
#thetaCoreGenome = 0.007
cmdLoadR = "module load compiler/gcc/4.9.2 bioinfo/geos/3.4.2 bioinfo/gdal/1.9.2 mpi/openmpi/1.6.5 bioinfo/R/3.2.2"
cmdLookTable = completeLDhatPATH+" -n "+str(nbInd)+" -rhomax 100 -n_pts 201 -theta "+str(thetaCoreGenome)+" -prefix "+objDir.pathDirectory+basename
with open(workingObjDir.pathDirectory+basename+"/runLDhat_"+basename+".sh", "w") as runSHFile:
runSHFile.write("%s\n" % cmdLoadR)
runSHFile.write("%s\n" % cmdLookTable)
for scaff in sorted(objDir.listDir):
scaffObjDir = directory(scaff)
print scaffObjDir.__repr__
siteFile = [s for s in scaffObjDir.listFiles if ".site" in s]
locsFile = [s for s in scaffObjDir.listFiles if ".locs" in s]
basenameScaff = siteFile.split("/")[-1].split(".")[0]
#print basename
cmdCD = "cd "+scaff
if "rhomap" in intervalLDhatPATH:
cmdInterval = intervalLDhatPATH+" -seq "+siteFile+" -loc "+locsFile+" -lk "+objDir.pathDirectory+basename+"new_lk.txt -its 5000000 -bpen 10 -burn 100000 -samp 5000 -prefix "+scaffObjDir.pathDirectory+basenameScaff
if "interval" in intervalLDhatPATH:
cmdInterval = intervalLDhatPATH+" -seq "+siteFile+" -loc "+locsFile+" -lk "+objDir.pathDirectory+basename+"new_lk.txt -its 5000000 -bpen 10 -samp 5000 -prefix "+scaffObjDir.pathDirectory+basenameScaff
cmdStat = statLDhatPATH+" -input "+scaffObjDir.pathDirectory+basenameScaff+"rates.txt -prefix "+scaffObjDir.pathDirectory+basenameScaff
cmdGraph = "makeLDhatgraphs.R -f "+scaffObjDir.pathDirectory+basenameScaff+"rates.txt -o "+scaffObjDir.pathDirectory+basenameScaff+""
#print "%s\n%s\n%s\n%s\n" % (cmdCD,cmdLookTable,cmdInterval,cmdStat)
runSHFile.write("%s\n%s\n%s\n%s\n" % (cmdCD,cmdInterval,cmdStat,cmdGraph))
os.system("chmod 755 "+workingObjDir.pathDirectory+basename+"/runLDhat_"+basename+".sh")
cmdQsub = "qsub -V -q long.q -N "+basename+" -b Y -pe parallel_smp 4 "+workingObjDir.pathDirectory+basename+"/runLDhat_"+basename+".sh"
print(cmdQsub)
#print("\n\nExecution summary:")
#print(" - Outputting \n\
#- %s\n\
#- %s\n\
#- %s\n\n" % (tabFileOut.name,listKeepFile.name,correspondingCDSDir) )
print("#################################################################")
print("# End of execution #")
print("#################################################################")
| gpl-3.0 | 7,221,963,836,876,002,000 | 31.826966 | 304 | 0.64629 | false |
project-rig/rig | rig/place_and_route/place/rcm.py | 1 | 6479 | """Reverse Cuthill-McKee based placement.
"""
from collections import defaultdict, deque
from six import itervalues
from rig.place_and_route.place.sequential import place as sequential_place
from rig.links import Links
from rig.netlist import Net
def _get_vertices_neighbours(nets):
"""Generate a listing of each vertex's immedate neighbours in an undirected
interpretation of a graph.
Returns
-------
{vertex: {vertex: weight, ...}), ...}
"""
zero_fn = (lambda: 0)
vertices_neighbours = defaultdict(lambda: defaultdict(zero_fn))
for net in nets:
if net.weight != 0:
for sink in net.sinks:
vertices_neighbours[net.source][sink] += net.weight
vertices_neighbours[sink][net.source] += net.weight
return vertices_neighbours
def _dfs(vertex, vertices_neighbours):
"""Generate all the vertices connected to the supplied vertex in
depth-first-search order.
"""
visited = set()
to_visit = deque([vertex])
while to_visit:
vertex = to_visit.pop()
if vertex not in visited:
yield vertex
visited.add(vertex)
to_visit.extend(vertices_neighbours[vertex])
def _get_connected_subgraphs(vertices, vertices_neighbours):
"""Break a graph containing unconnected subgraphs into a list of connected
subgraphs.
Returns
-------
[set([vertex, ...]), ...]
"""
remaining_vertices = set(vertices)
subgraphs = []
while remaining_vertices:
subgraph = set(_dfs(remaining_vertices.pop(), vertices_neighbours))
remaining_vertices.difference_update(subgraph)
subgraphs.append(subgraph)
return subgraphs
def _cuthill_mckee(vertices, vertices_neighbours):
"""Yield the Cuthill-McKee order for a connected, undirected graph.
`Wikipedia
<https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_ provides
a good introduction to the Cuthill-McKee algorithm. The RCM algorithm
attempts to order vertices in a graph such that their adjacency matrix's
bandwidth is reduced. In brief the RCM algorithm is a breadth-first search
with the following tweaks:
* The search starts from the vertex with the lowest degree.
* Vertices discovered in each layer of the search are sorted by ascending
order of their degree in the output.
.. warning::
This function must not be called on a disconnected or empty graph.
Returns
-------
[vertex, ...]
"""
vertices_degrees = {v: sum(itervalues(vertices_neighbours[v]))
for v in vertices}
peripheral_vertex = min(vertices, key=(lambda v: vertices_degrees[v]))
visited = set([peripheral_vertex])
cm_order = [peripheral_vertex]
previous_layer = set([peripheral_vertex])
while len(cm_order) < len(vertices):
adjacent = set()
for vertex in previous_layer:
adjacent.update(vertices_neighbours[vertex])
adjacent.difference_update(visited)
visited.update(adjacent)
cm_order.extend(sorted(adjacent, key=(lambda v: vertices_degrees[v])))
previous_layer = adjacent
return cm_order
def rcm_vertex_order(vertices_resources, nets):
"""A generator which iterates over the vertices in Reverse-Cuthill-McKee
order.
For use as a vertex ordering for the sequential placer.
"""
vertices_neighbours = _get_vertices_neighbours(nets)
for subgraph_vertices in _get_connected_subgraphs(vertices_resources,
vertices_neighbours):
cm_order = _cuthill_mckee(subgraph_vertices, vertices_neighbours)
for vertex in reversed(cm_order):
yield vertex
def rcm_chip_order(machine):
"""A generator which iterates over a set of chips in a machine in
Reverse-Cuthill-McKee order.
For use as a chip ordering for the sequential placer.
"""
# Convert the Machine description into a placement-problem-style-graph
# where the vertices are chip coordinate tuples (x, y) and each net
# represents the links leaving each chip. This allows us to re-use the
# rcm_vertex_order function above to generate an RCM ordering of chips in
# the machine.
vertices = list(machine)
nets = []
for (x, y) in vertices:
neighbours = []
for link in Links:
if (x, y, link) in machine:
dx, dy = link.to_vector()
neighbour = ((x + dx) % machine.width,
(y + dy) % machine.height)
# In principle if the link to chip is marked as working, that
# chip should be working. In practice this might not be the
# case (especially for carelessly hand-defined Machine
# objects).
if neighbour in machine:
neighbours.append(neighbour)
nets.append(Net((x, y), neighbours))
return rcm_vertex_order(vertices, nets)
def place(vertices_resources, nets, machine, constraints):
"""Assigns vertices to chips in Reverse-Cuthill-McKee (RCM) order.
The `RCM <https://en.wikipedia.org/wiki/Cuthill%E2%80%93McKee_algorithm>`_
algorithm (in graph-centric terms) is a simple breadth-first-search-like
heuristic which attempts to yield an ordering of vertices which would yield
a 1D placement with low network congestion. Placement is performed by
sequentially assigning vertices in RCM order to chips, also iterated over
in RCM order.
This simple placement scheme is described by Torsten Hoefler and Marc Snir
in their paper entitled 'Generic topology mapping strategies for
large-scale parallel architectures' published in the Proceedings of the
international conference on Supercomputing, 2011.
This is a thin wrapper around the :py:func:`sequential
<rig.place_and_route.place.sequential.place>` placement algorithm which
uses an RCM ordering for iterating over chips and vertices.
Parameters
----------
breadth_first : bool
Should vertices be placed in breadth first order rather than the
iteration order of vertices_resources. True by default.
"""
return sequential_place(vertices_resources, nets,
machine, constraints,
rcm_vertex_order(vertices_resources, nets),
rcm_chip_order(machine))
| gpl-2.0 | -5,137,749,699,306,870,000 | 34.994444 | 79 | 0.656737 | false |
electionleaflets/electionleaflets | electionleaflets/apps/boundaries/views.py | 1 | 4341 | import math
from PIL import Image, ImageDraw
from django.http import HttpResponse, Http404
from django.shortcuts import render_to_response
from boundaries.models import Boundary
from parties.models import Party
from leaflets.models import Leaflet
google_dist = 20037508.34
def leaflet_polygon_options(boundary):
n = Leaflet.objects.filter(leafletconstituency__constituency__boundary = boundary).count()
return {"fill": leaflet_colour(n), "outline": (0,0,0,170)}
def leaflet_popup(boundary):
party_list = [(
p, Leaflet.objects.filter(
leafletconstituency__constituency__boundary=boundary,
publisher_party = p))
for p in Party.objects.filter(
leaflet__leafletconstituency__constituency__boundary=boundary)\
.distinct().order_by('name')]
unclassified_leaflets = Leaflet.objects.filter(
leafletconstituency__constituency__boundary=boundary,
publisher_party = None)
if unclassified_leaflets:
party_list = party_list + [({"name": "Uncategorised"}, unclassified_leaflets)]
return "boundaries/leaflets.html", {"constituency": boundary.constituency,
"party_list": party_list
}
def leaflet_colour(n):
r = math.log((n+1), 2)
return (int(50 + r * 16), int(255 - r * 32), int(100 + r * 16), int(32 + r * 32))
def leaflet_keyvalues():
return [0,2,5,10,20,50,100,200]
maps = {"leaflets": {"polygon_options": leaflet_polygon_options,
"template": leaflet_popup,
"colour": leaflet_colour,
"keyvalues": leaflet_keyvalues()}
}
def getDBzoom(z):
if int(z) > 10:
return 10
else:
return int(z)
def view_key(request, mapname=None, n=None, x=None, y=None):
image = Image.new("RGBA", (int(x), int(y)), maps[mapname]["colour"](int(n)))
response = HttpResponse(mimetype="image/png")
image.save(response, "PNG")
return response
def view_map(request, mapname):
from django.conf import settings
return render_to_response("boundaries/map.html", {"MEDIA_URL":settings.MEDIA_URL, "mapname": mapname, "keyvalues":maps[mapname]["keyvalues"]})
def tile(request, mapname, tz=None, tx=None, ty=None, tilex=256, tiley = 256):
options = maps[str(mapname)]
west, south, east, north = getTileRect(tx, ty, tz)
zoom = 2 ** float(tz)
tx = float(tx)
ty = float(ty)
image = Image.new("RGBA", (256, 256), (0, 0, 0, 0))
draw = ImageDraw.Draw(image)
dbz = getDBzoom(tz)
boundaries_within = Boundary.objects.filter(zoom=dbz, south__lt=north, north__gt=south, east__gt=west, west__lt=east)
for boundary in boundaries_within:
polygon_options = options["polygon_options"](boundary)
coords = eval(boundary.boundary)
l = []
for lng, lat in coords:
x = 256 * (lng - west) / (east - west)
y = 256 * (lat - north) / (south - north)
l.append((int(x), int(y)))
draw.polygon(l, **polygon_options)
del draw
response = HttpResponse(mimetype="image/png")
image.save(response, "PNG")
return response
def popup(request, mapname, x=None, y=None, z=None):
options = maps[str(mapname)]
x = float(x)
y = float(y)
dbz = getDBzoom(z)
possible_boundaries = Boundary.objects.filter(zoom=int(dbz), south__lt=y, north__gt=y, east__gt=x, west__lt=x)
for boundary in possible_boundaries:
coords = eval(boundary.boundary)
inside = False
for (vx0, vy0), (vx1, vy1) in zip(coords, coords[1:] + coords[:1]):
if ((vy0>y) != (vy1>y)) and (x < (vx1-vx0) * (y-vy0) / (vy1-vy0) + vx0):
inside = not(inside)
if inside:
return render_to_response(*options["template"](boundary))
raise Http404
def to_google(x, tilesAtThisZoom):
return google_dist * (1 - 2 * float(x) / tilesAtThisZoom)
def getTileRect(xt, yt, zoomt):
zoom = int(zoomt)
x = int(xt)
y = int(yt)
tilesAtThisZoom = 2 ** zoom
return (-to_google(x, tilesAtThisZoom),
to_google(y + 1, tilesAtThisZoom),
-to_google(x + 1, tilesAtThisZoom),
to_google(y, tilesAtThisZoom))
| mit | 428,706,416,601,348,300 | 36.102564 | 146 | 0.601705 | false |
wright-group/WrightTools | tests/kit/diff.py | 1 | 1245 | """Test diff."""
# --- import -------------------------------------------------------------------------------------
import numpy as np
import WrightTools as wt
# --- test ---------------------------------------------------------------------------------------
def test_ascending_1():
x = np.linspace(0, 10, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y)
assert np.all((np.abs(d - np.cos(x)) < 0.0001)[:-1])
def test_ascending_2():
x = np.linspace(0, 10, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y, 2)
assert np.all((np.abs(d + np.sin(x)) < 0.0001)[1:-2])
def test_ascending_3():
x = np.linspace(0, 10, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y, 3)
assert np.all((np.abs(d + np.cos(x)) < 0.0001)[2:-3])
def test_ascending_4():
x = np.linspace(0, 10, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y, 4)
assert np.all((np.abs(d - np.sin(x)) < 0.0001)[3:-4])
def test_descending_1():
x = np.linspace(10, 0, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y)
assert np.all((np.abs(d - np.cos(x)) < 0.0001)[1:-1])
def test_descending_3():
x = np.linspace(10, 0, 1000)
y = np.sin(x)
d = wt.kit.diff(x, y, 3)
assert np.all((np.abs(d + np.cos(x)) < 0.0001)[3:-3])
| mit | -850,052,172,316,749,000 | 22.055556 | 98 | 0.452209 | false |
openstack/ironic | ironic/tests/unit/drivers/modules/ibmc/test_management.py | 1 | 12202 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for iBMC Management interface."""
import itertools
from unittest import mock
from oslo_utils import importutils
from ironic.common import boot_devices
from ironic.common import boot_modes
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.ibmc import mappings
from ironic.drivers.modules.ibmc import utils
from ironic.tests.unit.drivers.modules.ibmc import base
constants = importutils.try_import('ibmc_client.constants')
ibmc_client = importutils.try_import('ibmc_client')
ibmc_error = importutils.try_import('ibmc_client.exceptions')
class IBMCManagementTestCase(base.IBMCTestCase):
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
properties = task.driver.get_properties()
for prop in utils.COMMON_PROPERTIES:
self.assertIn(prop, properties)
@mock.patch.object(utils, 'parse_driver_info', autospec=True)
def test_validate(self, mock_parse_driver_info):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.management.validate(task)
mock_parse_driver_info.assert_called_once_with(task.node)
@mock.patch.object(ibmc_client, 'connect', autospec=True)
def test_get_supported_boot_devices(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
# mock return value
_supported_boot_devices = list(mappings.GET_BOOT_DEVICE_MAP)
conn.system.get.return_value = mock.Mock(
boot_source_override=mock.Mock(
supported_boot_devices=_supported_boot_devices
)
)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
supported_boot_devices = (
task.driver.management.get_supported_boot_devices(task))
connect_ibmc.assert_called_once_with(**self.ibmc)
expect = sorted(list(mappings.GET_BOOT_DEVICE_MAP.values()))
self.assertEqual(expect, sorted(supported_boot_devices))
@mock.patch.object(ibmc_client, 'connect', autospec=True)
def test_set_boot_device(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
# mock return value
conn.system.set_boot_source.return_value = None
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
device_mapping = [
(boot_devices.PXE, constants.BOOT_SOURCE_TARGET_PXE),
(boot_devices.DISK, constants.BOOT_SOURCE_TARGET_HDD),
(boot_devices.CDROM, constants.BOOT_SOURCE_TARGET_CD),
(boot_devices.BIOS,
constants.BOOT_SOURCE_TARGET_BIOS_SETUP),
('floppy', constants.BOOT_SOURCE_TARGET_FLOPPY),
]
persistent_mapping = [
(True, constants.BOOT_SOURCE_ENABLED_CONTINUOUS),
(False, constants.BOOT_SOURCE_ENABLED_ONCE)
]
data_source = list(itertools.product(device_mapping,
persistent_mapping))
for (device, persistent) in data_source:
task.driver.management.set_boot_device(
task, device[0], persistent=persistent[0])
connect_ibmc.assert_called_once_with(**self.ibmc)
conn.system.set_boot_source.assert_called_once_with(
device[1],
enabled=persistent[1])
# Reset mocks
connect_ibmc.reset_mock()
conn.system.set_boot_source.reset_mock()
@mock.patch.object(ibmc_client, 'connect', autospec=True)
def test_set_boot_device_fail(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
# mock return value
conn.system.set_boot_source.side_effect = (
ibmc_error.IBMCClientError
)
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaisesRegex(
exception.IBMCError, 'set iBMC boot device',
task.driver.management.set_boot_device, task,
boot_devices.PXE)
connect_ibmc.assert_called_once_with(**self.ibmc)
conn.system.set_boot_source.assert_called_once_with(
constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_ONCE)
@mock.patch.object(ibmc_client, 'connect', autospec=True)
def test_get_boot_device(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
# mock return value
conn.system.get.return_value = mock.Mock(
boot_source_override=mock.Mock(
target=constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS
)
)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
result_boot_device = task.driver.management.get_boot_device(task)
conn.system.get.assert_called_once()
connect_ibmc.assert_called_once_with(**self.ibmc)
expected = {'boot_device': boot_devices.PXE,
'persistent': True}
self.assertEqual(expected, result_boot_device)
def test_get_supported_boot_modes(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
supported_boot_modes = (
task.driver.management.get_supported_boot_modes(task))
self.assertEqual(list(mappings.SET_BOOT_MODE_MAP),
supported_boot_modes)
@mock.patch.object(ibmc_client, 'connect', autospec=True)
def test_set_boot_mode(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
# mock system boot source override return value
conn.system.get.return_value = mock.Mock(
boot_source_override=mock.Mock(
target=constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS
)
)
conn.system.set_boot_source.return_value = None
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected_values = [
(boot_modes.LEGACY_BIOS, constants.BOOT_SOURCE_MODE_BIOS),
(boot_modes.UEFI, constants.BOOT_SOURCE_MODE_UEFI)
]
for ironic_boot_mode, ibmc_boot_mode in expected_values:
task.driver.management.set_boot_mode(task,
mode=ironic_boot_mode)
conn.system.get.assert_called_once()
connect_ibmc.assert_called_once_with(**self.ibmc)
conn.system.set_boot_source.assert_called_once_with(
constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS,
mode=ibmc_boot_mode)
# Reset
connect_ibmc.reset_mock()
conn.system.set_boot_source.reset_mock()
conn.system.get.reset_mock()
@mock.patch.object(ibmc_client, 'connect', autospec=True)
def test_set_boot_mode_fail(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
# mock system boot source override return value
conn.system.get.return_value = mock.Mock(
boot_source_override=mock.Mock(
target=constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS
)
)
conn.system.set_boot_source.side_effect = (
ibmc_error.IBMCClientError
)
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected_values = [
(boot_modes.LEGACY_BIOS, constants.BOOT_SOURCE_MODE_BIOS),
(boot_modes.UEFI, constants.BOOT_SOURCE_MODE_UEFI)
]
for ironic_boot_mode, ibmc_boot_mode in expected_values:
self.assertRaisesRegex(
exception.IBMCError, 'set iBMC boot mode',
task.driver.management.set_boot_mode, task,
ironic_boot_mode)
conn.system.set_boot_source.assert_called_once_with(
constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS,
mode=ibmc_boot_mode)
conn.system.get.assert_called_once()
connect_ibmc.assert_called_once_with(**self.ibmc)
# Reset
connect_ibmc.reset_mock()
conn.system.set_boot_source.reset_mock()
conn.system.get.reset_mock()
@mock.patch.object(ibmc_client, 'connect', autospec=True)
def test_get_boot_mode(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
# mock system boot source override return value
conn.system.get.return_value = mock.Mock(
boot_source_override=mock.Mock(
target=constants.BOOT_SOURCE_TARGET_PXE,
enabled=constants.BOOT_SOURCE_ENABLED_CONTINUOUS,
mode=constants.BOOT_SOURCE_MODE_BIOS,
)
)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
response = task.driver.management.get_boot_mode(task)
conn.system.get.assert_called_once()
connect_ibmc.assert_called_once_with(**self.ibmc)
expected = boot_modes.LEGACY_BIOS
self.assertEqual(expected, response)
def test_get_sensors_data(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(NotImplementedError,
task.driver.management.get_sensors_data, task)
@mock.patch.object(ibmc_client, 'connect', autospec=True)
def test_inject_nmi(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
# mock system boot source override return value
conn.system.reset.return_value = None
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.inject_nmi(task)
connect_ibmc.assert_called_once_with(**self.ibmc)
conn.system.reset.assert_called_once_with(constants.RESET_NMI)
@mock.patch.object(ibmc_client, 'connect', autospec=True)
def test_inject_nmi_fail(self, connect_ibmc):
conn = self.mock_ibmc_conn(connect_ibmc)
# mock system boot source override return value
conn.system.reset.side_effect = (
ibmc_error.IBMCClientError
)
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaisesRegex(
exception.IBMCError, 'inject iBMC NMI',
task.driver.management.inject_nmi, task)
connect_ibmc.assert_called_once_with(**self.ibmc)
conn.system.reset.assert_called_once_with(constants.RESET_NMI)
| apache-2.0 | 3,788,170,899,793,567,000 | 43.210145 | 77 | 0.597935 | false |
py-eww/eww | eww/ioproxy.py | 1 | 3215 | # -*- coding: utf-8 -*-
"""
eww.ioproxy
~~~~~~~~~~~
We replace ``sys.std[in, out, err]`` with instances of ``IOProxy``.
``IOProxy`` provides a thread-local proxy to whatever we want to use
for IO.
It is worth mentioning that this is *not* a perfect proxy. Specifically,
it doesn't proxy any magic methods. There are lots of ways to fix that,
but so far it hasn't been needed.
If you want to make modification to sys.std[in, out, err], any changes you
make prior to calling embed will be respected and handled correctly. If
you change the IO files after calling embed, everything will break. Ooof.
Fortunately, that's a rare use case. In the event you want to though, you
can use the register() and unregister() public APIs. Check out the
:ref:`troubleshooting` page for more information.
"""
import logging
import threading
LOGGER = logging.getLogger(__name__)
class IOProxy(object):
"""IOProxy provides a proxy object meant to replace sys.std[in, out, err].
It does not proxy magic methods. It is used by calling the object's
register and unregister methods.
"""
def __init__(self, original_file):
"""Creates the thread local and registers the original file.
Args:
original_file (file): Since IOProxy is used to replace an
existing file, ``original_file`` should be
the file you're replacing.
"""
self.io_routes = threading.local()
self.original_file = original_file
self.register(original_file)
def register(self, io_file):
"""Used to register a file for use in a particular thread.
Args:
io_file (file): ``io_file`` will override the existing file, but
only in the thread ``register`` is called in.
Returns:
None
"""
self.io_routes.io_file = io_file
def unregister(self):
"""Used to unregister a file for use in a particular thread.
Returns:
None
"""
try:
del self.io_routes.io_file
except AttributeError:
LOGGER.debug('unregister() called, but no IO_file registered.')
def write(self, data, *args, **kwargs):
"""Modify the write method to force a flush after each write so our
sockets work correctly.
Args:
data (str): A string to be written to the file being proxied.
Returns:
None
"""
try:
self.io_routes.io_file.write(data, *args, **kwargs)
self.io_routes.io_file.flush()
except AttributeError as exception:
LOGGER.debug('Error calling IOProxy.write: ' + str(exception)
+ ' Msg: ' + str(data))
except IOError as exception:
# This can happen when a console thread is forcibly stopped
LOGGER.debug('Caught error while writing: ' + str(exception))
def __getattr__(self, name):
"""All other methods and attributes lookups go to the original
file.
"""
return getattr(self.io_routes.io_file, name)
| mit | 1,519,809,513,948,798,000 | 33.202128 | 78 | 0.598445 | false |
sajuptpm/neutron-ipam | neutron/tests/unit/metaplugin/fake_plugin.py | 1 | 2680 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import l3_gwmode_db
class Fake1(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = ['external-net', 'router']
def fake_func(self):
return 'fake1'
def create_network(self, context, network):
session = context.session
with session.begin(subtransactions=True):
net = super(Fake1, self).create_network(context, network)
self._process_l3_create(context, net, network['network'])
return net
def update_network(self, context, id, network):
session = context.session
with session.begin(subtransactions=True):
net = super(Fake1, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
return net
def delete_network(self, context, id):
return super(Fake1, self).delete_network(context, id)
def create_port(self, context, port):
port = super(Fake1, self).create_port(context, port)
return port
def create_subnet(self, context, subnet):
subnet = super(Fake1, self).create_subnet(context, subnet)
return subnet
def update_port(self, context, id, port):
port = super(Fake1, self).update_port(context, id, port)
return port
def delete_port(self, context, id, l3_port_check=True):
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
self.disassociate_floatingips(context, id)
return super(Fake1, self).delete_port(context, id)
class Fake2(Fake1):
def fake_func(self):
return 'fake2'
def fake_func2(self):
return 'fake2'
def start_rpc_listener(self):
# return value is only used to confirm this method was called.
return 'OK'
| apache-2.0 | 3,253,998,093,902,717,000 | 34.263158 | 78 | 0.65 | false |
rwatson/chromium-capsicum | tools/grit/grit/xtb_reader.py | 1 | 3972 | #!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Fast and efficient parser for XTB files.
'''
import sys
import xml.sax
import xml.sax.handler
class XtbContentHandler(xml.sax.handler.ContentHandler):
'''A content handler that calls a given callback function for each
translation in the XTB file.
'''
def __init__(self, callback, debug=False):
self.callback = callback
self.debug = debug
# 0 if we are not currently parsing a translation, otherwise the message
# ID of that translation.
self.current_id = 0
# Empty if we are not currently parsing a translation, otherwise the
# parts we have for that translation - a list of tuples
# (is_placeholder, text)
self.current_structure = []
# Set to the language ID when we see the <translationbundle> node.
self.language = ''
# Keep track of the if block we're inside. We can't nest ifs.
self.if_expr = None
def startElement(self, name, attrs):
if name == 'translation':
assert self.current_id == 0 and len(self.current_structure) == 0, (
"Didn't expect a <translation> element here.")
self.current_id = attrs.getValue('id')
elif name == 'ph':
assert self.current_id != 0, "Didn't expect a <ph> element here."
self.current_structure.append((True, attrs.getValue('name')))
elif name == 'translationbundle':
self.language = attrs.getValue('lang')
elif name == 'if':
assert self.if_expr is None, "Can't nest <if> in xtb files"
self.if_expr = attrs.getValue('expr')
def endElement(self, name):
if name == 'translation':
assert self.current_id != 0
# If we're in an if block, only call the callback (add the translation)
# if the expression is True.
should_run_callback = True
if self.if_expr:
should_run_callback = eval(self.if_expr, {}, {'os': sys.platform})
if should_run_callback:
self.callback(self.current_id, self.current_structure)
self.current_id = 0
self.current_structure = []
elif name == 'if':
assert self.if_expr is not None
self.if_expr = None
def characters(self, content):
if self.current_id != 0:
# We are inside a <translation> node so just add the characters to our
# structure.
#
# This naive way of handling characters is OK because in the XTB format,
# <ph> nodes are always empty (always <ph name="XXX"/>) and whitespace
# inside the <translation> node should be preserved.
self.current_structure.append((False, content))
class XtbErrorHandler(xml.sax.handler.ErrorHandler):
def error(self, exception):
pass
def fatalError(self, exception):
raise exception
def warning(self, exception):
pass
def Parse(xtb_file, callback_function, debug=False):
'''Parse xtb_file, making a call to callback_function for every translation
in the XTB file.
The callback function must have the signature as described below. The 'parts'
parameter is a list of tuples (is_placeholder, text). The 'text' part is
either the raw text (if is_placeholder is False) or the name of the placeholder
(if is_placeholder is True).
Args:
xtb_file: file('fr.xtb')
callback_function: def Callback(msg_id, parts): pass
Return:
The language of the XTB, e.g. 'fr'
'''
# Start by advancing the file pointer past the DOCTYPE thing, as the TC
# uses a path to the DTD that only works in Unix.
# TODO(joi) Remove this ugly hack by getting the TC gang to change the
# XTB files somehow?
front_of_file = xtb_file.read(1024)
xtb_file.seek(front_of_file.find('<translationbundle'))
handler = XtbContentHandler(callback=callback_function, debug=debug)
xml.sax.parse(xtb_file, handler)
assert handler.language != ''
return handler.language
| bsd-3-clause | -5,619,635,609,388,487,000 | 33.241379 | 81 | 0.674723 | false |
mindis/canteen | canteen/core/meta.py | 1 | 9304 | # -*- coding: utf-8 -*-
'''
canteen meta core
~~~~~~~~~~~~~~~~~
metaclass tools and APIs.
:author: Sam Gammon <[email protected]>
:copyright: (c) Keen IO, 2013
:license: This software makes use of the MIT Open Source License.
A copy of this license is included as ``LICENSE.md`` in
the root of the project.
'''
# utils
from ..util import struct
from ..util import decorators
## Globals
_owner_map = {}
grab = lambda x: x.__func__ if hasattr(x, '__func__') else x
owner = lambda x: intern(x.__owner__ if hasattr(x, '__owner__') else x.__name__)
construct = lambda cls, name, bases, properties: type.__new__(cls, name, bases, properties)
metachain = lambda cls, n, b, p: cls.__metachain__.append(construct(cls, n, b, p)) or cls.__metachain__[-1]
class MetaFactory(type):
''' '''
__owner__, __metachain__, __root__ = "BaseMeta", [], True
def __new__(cls, name=None, bases=None, properties=None):
''' '''
if not name or not bases or not isinstance(properties, dict): # pragma: nocover
raise NotImplementedError('`MetaFactory` is meta-abstract and cannot be constructed directly.')
# get ready to construct, do so immediately for ``MetaFactory`` itself and other explicit roots
if '__root__' in properties and properties['__root__']:
del properties['__root__'] # treat as a root - init directly and continue
return construct(cls, name, bases, properties)
# construct, yo. then unconditionally apply it to the metachain and return also, defer to the class'
# ``initialize``, or any of its bases if they have ``initialize`, for constructing the actual class.
return ((grab(properties['initialize'] if 'initialize' in properties else
getattr((x for x in bases if hasattr(x, 'initialize')).next(), 'initialize')))(*(
cls, name, bases, properties))) if (
'initialize' in properties or any((hasattr(b, 'initialize') for b in bases))
) else metachain(cls, name, bases, properties)
def mro(cls):
''' '''
# override metaclass MRO to make them superimposable on each other
if not cls.__metachain__:
return type.mro(cls)
# make sure to enforce MRO semantics
seen, tree, order = set(), [], type.mro(cls)
for group in ([order[0]], order[1:-2], cls.__metachain__, order[-2:]):
for base in group:
if base not in seen: seen.add(base), tree.append(base)
return tuple(tree)
__repr__ = lambda cls: "<meta '%s.%s'>" % (cls.__owner__, cls.__name__)
class Base(type):
''' '''
__owner__, __metaclass__, __root__ = "Base", MetaFactory, True
class Proxy(object):
''' '''
class Factory(Base):
''' '''
__hooks__ = []
def initialize(cls, name, bases, properties):
''' '''
def metanew(_cls, _name, _bases, _properties):
''' '''
# if this metaclass implements the ``Proxy.Register`` class,
# defer to _cls.register directly after construction
if issubclass(_cls, Proxy.Registry):
return grab(_cls.register)(_cls, construct(_cls, _name, _bases, _properties))
return construct(_cls, _name, _bases, _properties) # pragma: nocover
# drop down if we already have a metachain for this tree
if cls.__metachain__: properties['__new__'] = metanew
# construct, yo. then unconditionally apply it to the metachain and return
return metachain(cls, name, bases, properties)
class Registry(Factory):
''' '''
__chain__ = {}
def iter_children(cls):
''' '''
for obj in cls.__chain__[owner(cls)]:
if obj is cls: continue # skip the parent class
yield obj
def children(cls):
''' '''
# remember to filter-out weakrefs that have died
return [child for child in cls.iter_children()]
@staticmethod
def register(meta, target):
''' '''
_owner = owner(target)
# check to see if bases are only roots, if it is a root create a new metabucket
if not any(((False if x in (object, type) else True) for x in target.__bases__)):
meta.__chain__[_owner] = []
return target
# resolve owner and construct
for base in target.__bases__:
if not base in (object, type):
if _owner not in meta.__chain__: meta.__chain__[_owner] = []
meta.__chain__[_owner].append(target)
return target
class Component(Registry):
''' '''
__target__ = None
__binding__ = None
__injector_cache__ = {}
__map__ = {} # holds map of all platform instances
@decorators.classproperty
def singleton_map(cls):
''' '''
return cls.__map__
@classmethod
def reset_cache(cls):
''' '''
cls.__injector_cache__ = {}
cls.__class__.__injector_cache__ = {}
return
@staticmethod
def collapse(cls, spec=None):
''' '''
# try the injector cache
if (cls, spec) not in Proxy.Component.__injector_cache__:
# otherwise, collapse and build one
property_bucket = {}
for metabucket in Proxy.Registry.__chain__.iterkeys():
for concrete in filter(lambda x: issubclass(x.__class__, Proxy.Component), Proxy.Component.__chain__[metabucket]):
namespace = ''
responder, properties = concrete.inject(concrete, cls.__target__, cls.__delegate__) or (None, {})
if not responder: continue # filter out classes that opt-out of injection
if hasattr(concrete, '__binding__'):
def do_pluck(klass, obj):
''' '''
def pluck(property_name):
''' '''
# dereference property aliases
if hasattr(klass, '__aliases__') and property_name in klass.__aliases__:
return getattr(obj, klass.__aliases__[property_name])
return getattr(obj, property_name) # pragma: nocover
setattr(pluck, 'target', klass)
return pluck
if concrete.__binding__:
property_bucket[concrete.__binding__.__alias__] = struct.CallbackProxy(do_pluck(concrete, responder))
if concrete.__binding__.__namespace__:
namespace = concrete.__binding__.__alias__
for bundle in properties:
# clear vars
prop, alias, _global = None, None, False
if not isinstance(bundle, tuple):
property_bucket['.'.join((namespace, bundle)) if namespace else bundle] = (responder, bundle)
continue
prop, alias, _global = bundle
if _global:
property_bucket['.'.join((namespace, alias)) if namespace else alias] = (responder, prop)
continue
property_bucket[alias] = (responder, prop)
# if it's empty, don't cache
if not property_bucket: return {}
# set in cache, unless empty
Proxy.Component.__injector_cache__[(cls, spec)] = property_bucket
# return from cache
return Proxy.Component.__injector_cache__[(cls, spec)]
@staticmethod
def inject(cls, requestor, delegate):
''' '''
# allow class to "prepare" itself (potentially instantiating a singleton)
concrete = cls.__class__.prepare(cls) if hasattr(cls.__class__, 'prepare') else cls
# allow class to indicate it does not wish to inject
if concrete is None: return
# gather injectable attributes
_injectable = set()
if hasattr(cls, '__bindings__'):
for iterator in (cls.__dict__.iteritems(), cls.__class__.__dict__.iteritems()):
for prop, value in iterator:
if cls.__bindings__:
if prop in cls.__bindings__:
func = cls.__dict__[prop] if not isinstance(cls.__dict__[prop], (staticmethod, classmethod)) else cls.__dict__[prop].__func__
do_namespace = func.__binding__.__namespace__ if cls.__binding__.__namespace__ else False
_injectable.add((prop, func.__binding__.__alias__ or prop, do_namespace))
continue
else:
# if no bindings are in use, bind all non-special stuff
if not prop.startswith('__'):
_injectable.add(prop)
# return bound injectables or the whole set
return concrete, _injectable or set(filter(lambda x: not x.startswith('__'), concrete.__dict__.iterkeys()))
@classmethod
def prepare(cls, target):
''' '''
if (not hasattr(target, '__binding__')) or target.__binding__ is None: return
# resolve name, instantiate and register instance singleton
alias = target.__binding__.__alias__ if (hasattr(target.__binding__, '__alias__') and isinstance(target.__binding__, basestring)) else target.__name__
if hasattr(target, '__singleton__') and target.__singleton__:
# if we already have a singleton, give that
if alias in cls.__map__: return cls.__map__[alias]
# otherwise, startup a new singleton
cls.__map__[alias] = target()
return cls.__map__[alias]
return target # pragma: nocover
__all__ = (
'MetaFactory',
'Base',
'Proxy'
)
| mit | -6,218,907,525,932,880,000 | 30.013333 | 156 | 0.576849 | false |
pypa/twine | tests/test_main.py | 1 | 1168 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import colorama
from twine import __main__ as dunder_main
def test_exception_handling(monkeypatch):
monkeypatch.setattr(sys, "argv", ["twine", "upload", "missing.whl"])
message = "InvalidDistribution: Cannot find file (or expand pattern): 'missing.whl'"
assert dunder_main.main() == colorama.Fore.RED + message + colorama.Style.RESET_ALL
def test_no_color_exception(monkeypatch):
monkeypatch.setattr(sys, "argv", ["twine", "--no-color", "upload", "missing.whl"])
message = "InvalidDistribution: Cannot find file (or expand pattern): 'missing.whl'"
assert dunder_main.main() == message
| apache-2.0 | -8,956,839,330,193,934,000 | 39.275862 | 88 | 0.735445 | false |
gilliM/MFQ | ModisFromQgis/help/source/conf.py | 1 | 7118 | # -*- coding: utf-8 -*-
#
# ModisFromQgis documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 17:11:03 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ModisFromQgis'
copyright = u'2013, Gillian Milani / RSL (UZH)'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_TemplateModuleNames = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TemplateClassdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ModisFromQgis.tex', u'ModisFromQgis Documentation',
u'Gillian Milani / RSL (UZH)', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'TemplateClass', u'ModisFromQgis Documentation',
[u'Gillian Milani / RSL (UZH)'], 1)
]
| gpl-2.0 | 1,405,499,702,715,317,000 | 31.953704 | 80 | 0.709609 | false |
SavinaRoja/Kerminal | kerminal/telemachus_api.py | 1 | 13310 | # encoding: utf-8
#The information in this module was gleaned from DataLinkHandlers.cs
#https://github.com/richardbunt/Telemachus/blob/master/Telemachus/src/DataLinkHandlers.cs
#Actions are sent to server, result in one action per message
mj_actions = ['mj.smartassoff', # Smart ASS Off
'mj.node', # Node
'mj.prograde', # Prograde
'mj.retrograde', # Retrograde
'mj.normalplus', # Normal Plus
'mj.normalminus', # Normal Minus
'mj.radialplus', # Radial Plus
'mj.radialminus', # Radial Minus
'mj.targetplus', # Target Plus
'mj.targetminus', # Target Minus
'mj.relativeplus', # Relative Plus
'mj.relativeminus', # Relative Minus
'mj.parallelplus', # Parallel Plus
'mj.parallelminus', # Parallel Minus
'mj.surface', # Surface [float heading, float pitch]
'mj.surface2', # Surface [double heading, double pitch]
]
#FlyByWire Stuff
vessel_actions = ['v.setYaw', # Yaw [float yaw]
'v.setPitch', # Pitch [float pitch]
'v.setRoll', # Roll [float roll]
'v.setFbW', # Set Fly by Wire On or Off [bool state]
'v.setPitchYawRollXYZ', # Set pitch, yaw, roll, X, Y and Z [float pitch, yaw, roll, x, y, z]
]
flight_actions = ['f.stage', # Stage
'f.setThrottle', # Set Throttle [float magnitude]
#'f.throttle', # Throttle (plotable)
'f.throttleUp', # Throttle Up
'f.throttleZero', # Throttle Zero
'f.throttleFull', # Throttle Full
'f.throttleDown', # Throttle Down
'f.rcs', # RCS [optional bool on/off]
'f.sas', # SAS [optional bool on/off]
'f.light', # Light [optional bool on/off]
'f.gear', # Gear [optional bool on/off]
'f.brake', # Brake [optional bool on/off]
'f.abort', # Abort [optional bool on/off]
'f.ag1', # Action Group 1 [optional bool on/off]
'f.ag2', # Action Group 2 [optional bool on/off]
'f.ag3', # Action Group 3 [optional bool on/off]
'f.ag4', # Action Group 4 [optional bool on/off]
'f.ag5', # Action Group 5 [optional bool on/off]
'f.ag6', # Action Group 6 [optional bool on/off]
'f.ag7', # Action Group 7 [optional bool on/off]
'f.ag8', # Action Group 8 [optional bool on/off]
'f.ag9', # Action Group 9 [optional bool on/off]
'f.ag10', # Action Group 10 [optional bool on/off]
]
time_warp_actions = ['t.timeWarp', # Time Warp [int rate]
]
#MapView here refers to the in-game orbital map, not the google maps hook
mapview_actions = ['m.toggleMapView', # Toggle Map View
'm.enterMapView', # Enter Map View
'm.exitMapView', # Exit Map View
]
#Plotables are things you can subscribe to; will be sent at each pulse
flight_plotables = ['f.throttle', # Throttle
'v.rcsValue', # Query RCS value
'v.sasValue', # Query SAS value
'v.lightValue', # Query light value
'v.brakeValue', # Query brake value
'v.gearValue', # Query gear value
]
target_plotables = ['tar.o.sma', # Target Semimajor Axis
'tar.o.lan', # Target Longitude of Ascending Node
'tar.o.maae', # Target Mean Anomaly at Epoch
'tar.name', # Target Name
'tar.type', # Target Type
'tar.distance', # Target Distance
'tar.o.velocity', # Target Velocity
'tar.o.PeA', # Target Periapsis
'tar.o.ApA', # Target Apoapsis
'tar.o.timeToAp', # Target Time to Apoapsis
'tar.o.timeToPe', # Target Time to Periapsis
'tar.o.inclination', # Target Inclination
'tar.o.eccentricity', # Target Eccentricity
'tar.o.period', # Target Orbital Period
'tar.o.relativeVelocity', # Target Relative Velocity
#Sends improperly encoded text back!
#'tar.o.trueAnomaly', # Target True Anomaly
'tar.o.orbitingBody', # Target Orbiting Body
'tar.o.argumentOfPeriapsis', # Target Argument of Periapsis
'tar.o.timeToTransition1', # Target Time to Transition 1
'tar.o.timeToTransition2', # Target Time to Transition 2
'tar.o.timeOfPeriapsisPassage', # Target Time of Periapsis Passage
]
docking_plotables = ['dock.ax', # Docking x Angle
'dock.ay', # Relative Pitch Angle
'dock.az', # Docking z Angle
'dock.x', # Target x Distance
'dock.y', # Target y Distance
]
#In my tests, none of these can be used. Breaks the connection
#body_plotables = ['b.name', # Body Name
#'b.maxAtmosphere', # Body Max Atmosphere
#'b.radius', # Body Radius
#'b.number', # Number of Bodies
#'b.o.gravParameter', # Body Gravitational Parameter
#'b.o.relativeVelocity', # Relative Velocity
#'b.o.PeA', # Periapsis
#'b.o.ApA', # Apoapsis
#'b.o.timeToAp', # Time to Apoapsis
#'b.o.timeToPe', # Time to Periapsis
#'b.o.inclination', # Inclination
#'b.o.eccentricity', # Eccentricity
#'b.o.period', # Orbital Period
#'b.o.argumentOfPeriapsis', # Argument of Periapsis
#'b.o.timeToTransition1', # Time to Transition 1
#'b.o.timeToTransition2', # Time to Transition 2
#'b.o.sma', # Semimajor Axis
#'b.o.lan', # Longitude of Ascending Node
#'b.o.maae', # Mean Anomaly at Epoch
#'b.o.timeOfPeriapsisPassage', # Time of Periapsis Passage
#'b.o.trueAnomaly', # True Anomaly
#'b.o.phaseAngle', # Phase Angle
#]
navball_plotables = ['n.heading', # Heading
'n.pitch', # Pitch
'n.roll', # Roll
'n.rawheading', # Raw Heading
'n.rawpitch', # Raw Pitch
'n.rawroll', # Raw Roll
]
vessel_plotables = ['v.altitude', # Altitude
'v.heightFromTerrain', # Height from Terrain
'v.terrainHeight', # Terrain Height
'v.missionTime', # Mission Time
'v.surfaceVelocity', # Surface Velocity
'v.surfaceVelocityx', # Surface Velocity x
'v.surfaceVelocityy', # Surface Velocity y
'v.surfaceVelocityz', # Surface Velocity z
'v.angularVelocity', # Angular Velocity
'v.orbitalVelocity', # Orbital Velocity
'v.surfaceSpeed', # Surface Speed
'v.verticalSpeed', # Vertical Speed
'v.geeForce', # G-Force
'v.atmosphericDensity', # Atmospheric Density
'v.long', # Longitude
'v.lat', # Latitude
'v.dynamicPressure', # Dynamic Pressure
'v.name', # Name
'v.body', # Body Name
'v.angleToPrograde', # Angle to Prograde
]
orbit_plotables = ['o.relativeVelocity', # Relative Velocity
'o.PeA', # Periapsis
'o.ApA', # Apoapsis
'o.timeToAp', # Time to Apoapsis
'o.timeToPe', # Time to Periapsis
'o.inclination', # Inclination
'o.eccentricity', # Eccentricity
'o.epoch', # Epoch
'o.period', # Orbital Period
'o.argumentOfPeriapsis', # Argument of Periapsis
'o.timeToTransition1', # Time to Transition 1
'o.timeToTransition2', # Time to Transition 2
'o.sma', # Semimajor Axis
'o.lan', # Longitude of Ascending Node
'o.maae', # Mean Anomaly at Epoch
'o.timeOfPeriapsisPassage', # Time of Periapsis Passage
'o.trueAnomaly', # True Anomaly'
]
orbit_plots_names = {'o.relativeVelocity': 'Relative Velocity',
'o.PeA': 'Periapsis',
'o.ApA': 'Apoapsis',
'o.timeToAp': 'Time to Apoapsis',
'o.timeToPe': 'Time to Periapsis',
'o.inclination': 'Inclination',
'o.eccentricity': 'Eccentricity',
'o.epoch': 'Epoch',
'o.period': 'Orbital Period',
'o.argumentOfPeriapsis': 'Argument of Periapsis',
'o.timeToTransition1': 'Time to Transition 1',
'o.timeToTransition2': 'Time to Transition 2',
'o.sma': 'Semimajor Axis',
'o.lan': 'Longitude of Ascending Node',
'o.maae': 'Mean Anomaly at Epoch',
'o.timeOfPeriapsisPassage': 'Time of Periapsis Passage',
'o.trueAnomaly': 'True Anomaly',
}
sensor_plotables = [#'s.sensor', # Sensor Information [string sensor type]
's.sensor.temp', # Temperature sensor information
's.sensor.pres', # Pressure sensor information
's.sensor.grav', # Gravity sensor information
's.sensor.acc', # Acceleration sensor information
]
paused_plotables = ['p.paused', # Paused
]
api_plotables = ['a.version', # Telemachus Version
]
time_warp_plotables = ['t.universalTime', # Universal Time
]
resource_plotables = ['r.resourceMax[ElectricCharge]',
'r.resourceCurrent[ElectricCharge]',
'r.resource[ElectricCharge]',
'r.resourceMax[LiquidFuel]',
'r.resourceCurrent[LiquidFuel]',
'r.resource[LiquidFuel]',
'r.resourceMax[Oxidizer]',
'r.resourceCurrent[Oxidizer]',
'r.resource[Oxidizer]',
'r.resourceCurrent[MonoPropellant]',
'r.resource[MonoPropellant]',
'r.resourceMax[XenonGas]',
'r.resourceCurrent[XenonGas]',
'r.resource[XenonGas]',
'r.resourceMax[IntakeAir]',
'r.resourceCurrent[IntakeAir]',
'r.resource[IntakeAir]']
#These consitute the safe set of plotable values to work with
plotables = flight_plotables + target_plotables + docking_plotables + \
navball_plotables + vessel_plotables + orbit_plotables + \
sensor_plotables + api_plotables + time_warp_plotables + \
resource_plotables
resources = ['r.resource', # Resource Information [string resource type]
'r.resourceCurrent', # Resource Information for Current Stage [string resource type]
'r.resourceMax', # Max Resource Information [string resource type]
]
apis = ['a.api', # API Listing
'a.ip', # IP Addresses
'a.apiSubSet', # Subset of the API Listing [string api1, string api2, ... , string apiN]
'a.version', # Telemachus Version
]
| gpl-3.0 | 2,619,396,659,809,475,000 | 52.24 | 111 | 0.456123 | false |
YoungKwonJo/mlxtend | mlxtend/plotting/remove_chartchunk.py | 1 | 1051 | # Sebastian Raschka 08/13/2014
# mlxtend Machine Learning Library Extensions
# matplotlib utilities for removing chartchunk
def remove_borders(axes, left=False, bottom=False, right=True, top=True):
"""
A function to remove chartchunk from matplotlib plots, such as axes
spines, ticks, and labels.
Keyword arguments:
axes: An iterable containing plt.gca() or plt.subplot() objects, e.g. [plt.gca()].
left, bottom, right, top: Boolean to specify which plot axes to hide.
"""
for ax in axes:
ax.spines["top"].set_visible(not top)
ax.spines["right"].set_visible(not right)
ax.spines["bottom"].set_visible(not bottom)
ax.spines["left"].set_visible(not left)
if bottom:
ax.tick_params(bottom="off", labelbottom="off")
if top:
ax.tick_params(top="off")
if left:
ax.tick_params(left="off", labelleft="off")
if right:
ax.tick_params(right="off")
| bsd-3-clause | -849,386,927,102,821,400 | 35.241379 | 94 | 0.591817 | false |
eunchong/build | scripts/slave/recipe_modules/chromium_tests/chromium_chromiumos.py | 1 | 6899 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from . import steps
SPEC = {
'settings': {
'build_gs_bucket': 'chromium-chromiumos-archive',
# WARNING: src-side runtest.py is only tested with chromium CQ builders.
# Usage not covered by chromium CQ is not supported and can break
# without notice.
'src_side_runtest_py': True,
},
'builders': {
'Linux ChromiumOS Full': {
'chromium_config': 'chromium',
'chromium_apply_config': ['chromeos', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder_tester',
'compile_targets': [
'app_list_unittests',
'aura_builder',
'base_unittests',
'browser_tests',
'cacheinvalidation_unittests',
'chromeos_unittests',
'components_unittests',
'compositor_unittests',
'content_browsertests',
'content_unittests',
'crypto_unittests',
'dbus_unittests',
'device_unittests',
'gcm_unit_tests',
'google_apis_unittests',
'gpu_unittests',
'interactive_ui_tests',
'ipc_tests',
'jingle_unittests',
'media_unittests',
'message_center_unittests',
'nacl_loader_unittests',
'net_unittests',
'ppapi_unittests',
'printing_unittests',
'remoting_unittests',
'sandbox_linux_unittests',
'sql_unittests',
'sync_unit_tests',
'ui_base_unittests',
'unit_tests',
'url_unittests',
'views_unittests',
],
'tests': [
steps.ArchiveBuildStep(
'chromium-browser-snapshots',
gs_acl='public-read',
),
],
'testing': {
'platform': 'linux',
},
},
'Linux ChromiumOS Builder': {
'chromium_config': 'chromium',
'chromium_apply_config': ['chromeos', 'ninja_confirm_noop', 'mb'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder',
'compile_targets': [
'aura_builder',
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'Linux ChromiumOS Tests (1)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['chromeos', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'parent_buildername': 'Linux ChromiumOS Builder',
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
},
'Linux ChromiumOS GN': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_PLATFORM': 'chromeos',
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'enable_swarming': True,
'testing': {
'platform': 'linux',
},
},
'Linux ChromiumOS Ozone Builder': {
'chromium_config': 'chromium',
'chromium_apply_config': ['chromeos', 'ozone', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'builder',
'compile_targets': [
'aura_builder',
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'Linux ChromiumOS Ozone Tests (1)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['chromeos', 'ozone', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': 64,
},
'bot_type': 'tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'parent_buildername': 'Linux ChromiumOS Ozone Builder',
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'Linux ChromiumOS Builder (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['chromeos', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'bot_type': 'builder',
'compile_targets': [
'aura_builder',
],
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
'use_isolate': True,
},
'Linux ChromiumOS Tests (dbg)(1)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['chromeos', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_BITS': 64,
},
'bot_type': 'tester',
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'parent_buildername': 'Linux ChromiumOS Builder (dbg)',
'testing': {
'platform': 'linux',
},
'enable_swarming': True,
},
'Linux ChromiumOS GN (dbg)': {
'chromium_config': 'chromium',
'chromium_apply_config': ['mb'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Debug',
'TARGET_PLATFORM': 'chromeos',
},
'test_generators': [
steps.generate_gtest,
steps.generate_script,
steps.generate_isolated_script,
],
'enable_swarming': True,
'testing': {
'platform': 'linux',
},
},
},
}
# Simple Chrome compile-only builders.
for board in ('x86-generic', 'amd64-generic', 'daisy'):
SPEC['builders']['ChromiumOS %s Compile' % (board,)] = {
'chromium_config': 'chromium',
'chromium_apply_config': ['chromeos', 'ninja_confirm_noop'],
'gclient_config': 'chromium',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_PLATFORM': 'chromeos',
'TARGET_CROS_BOARD': board,
},
'bot_type': 'builder',
'compile_targets': [
'chromiumos_preflight',
],
'testing': {
'platform': 'linux',
},
}
| bsd-3-clause | -4,032,269,570,872,546,000 | 27.27459 | 76 | 0.541963 | false |
johnaparker/dynamics | examples/old/leap_semistable.py | 1 | 8602 | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import animation
from scipy import constants
def rotation_transform(axis, angle, ax = None):
if ax is None: ax = plt.gca()
t_scale = ax.transData
t_rotate = mpl.transforms.Affine2D().rotate_deg_around(axis[0], axis[1], angle*180/np.pi)
return t_rotate + t_scale
def animate(spheres, radius, T):
r = radius*1e9
for sphere in spheres:
sphere.x *= 1e9
sphere.y *= 1e9
xmin = np.inf; xmax = -np.inf
ymin = np.inf; ymax = -np.inf
fig,axes = plt.subplots(1,2)
plt.subplots_adjust(hspace=0.3)
plt.subplot(axes[0])
circles = []
lines = []
for i,sphere in enumerate(spheres):
xmin = min(np.min(sphere.x), xmin)
xmax = max(np.max(sphere.x), xmax)
ymin = min(np.min(sphere.y), ymin)
ymax = max(np.max(sphere.y), ymax)
circles.append(plt.Circle([sphere.x[0], sphere.y[0]], r, animated=True, edgecolor='C{}'.format(i), fc='white', lw=2))
lines.append(plt.Line2D([sphere.x[0]-r, sphere.x[0]+r], [sphere.y[0], sphere.y[0]], lw=1.5, color='black', animated=True))
plt.gca().add_artist(circles[-1])
plt.gca().add_line(lines[-1])
plt.xlim([xmin-r, xmax+r])
plt.ylim([ymin-r, ymax+r])
plt.xlabel("x (nm)")
plt.ylabel("y (nm)")
plt.gca().set_aspect('equal')
ax = plt.gca()
title = ax.text(.97,0.03, r"{0:.2f} $\mu$s".format(0.0), transform=ax.transAxes, horizontalalignment='right', fontsize=13, animated=True)
def update(t):
for i,sphere in enumerate(spheres):
circles[i].center = (sphere.x[t], sphere.y[t])
lines[i].set_data([sphere.x[t]-r, sphere.x[t]+r], [sphere.y[t], sphere.y[t]])
lines[i].set_transform(rotation_transform([sphere.x[t], sphere.y[t]], sphere.phi[t], ax=ax))
title.set_text(r"{0:.2f} $\mu$s".format(dt*t*1.e6))
return circles + lines + [title]
anim = animation.FuncAnimation(plt.gcf(), update, frames=np.arange(0,len(spheres[0].x),1), interval=6, blit=True, repeat=True)
# plt.figure()
# plt.plot(time, phi)
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=30, bitrate=7800)
# anim.save('test.mp4', writer=writer, dpi=200)
# plt.plot(x, y)
# plt.figure()
# rad_data = np.array([(sphere.x**2 + sphere.y**2)**0.5 for sphere in spheres])
# plt.hist(rad_data.flatten())
# plt.figure()
# phi_data = np.array([sphere.phi[4000:] % 2*np.pi for sphere in spheres])
# plt.hist(phi_data.flatten())
plt.subplot(axes[1])
plt.plot(spheres[0].x, spheres[0].y)
plt.plot(spheres[1].x, spheres[1].y)
plt.xlabel("x (nm)")
plt.ylabel("y (nm)")
plt.xlim([xmin-r, xmax+r])
plt.ylim([ymin-r, ymax+r])
plt.gca().set_aspect('equal')
plt.suptitle(r"time step: {0:.1f} ns, T = {1} K, $\mu$ = {2:.2f} mPa$\cdot$s".format(dt*1e9, T, mu*1e3))
plt.figure()
kT = constants.k*T
KE_x = 0.5*spheres[0].mass*spheres[0].vx**2/kT
KE_y = 0.5*spheres[0].mass*spheres[0].vy**2/kT
KE_r = 0.5*spheres[0].Iz*spheres[0].omega**2/kT
plt.hist(KE_r[np.isfinite(KE_r)], color = 'C2', bins=np.linspace(0,2.5,80), alpha=0.5)
plt.hist(KE_x[np.isfinite(KE_x)], color = 'C0', bins=np.linspace(0,2.5,80), alpha=0.5)
plt.hist(KE_y[np.isfinite(KE_y)], color = 'C1', bins=np.linspace(0,2.5,80), alpha=0.5)
plt.axvline(x = 0.5*constants.k*T/kT, color='black')
plt.figtext(.85,.8, r"$\frac{{1}}{{2}}kT$", horizontalalignment='right', verticalalignment='top', fontsize=14)
plt.figtext(.85,.70, r"$\left< \frac{{1}}{{2}}mv_x^2 \right>$: {0:.3f} $kT$".format(np.average(KE_x)) , horizontalalignment='right', verticalalignment='top', fontsize=14, color='C0')
plt.figtext(.85,.6, r"$\left< \frac{{1}}{{2}}mv_y^2 \right>$: {0:.3f} $kT$".format(np.average(KE_y)) , horizontalalignment='right', verticalalignment='top', fontsize=14, color='C1')
plt.figtext(.85,.5, r"$\left< \frac{{1}}{{2}}I_z \omega_z^2 \right>$: {0:.3f} $kT$".format(np.average(KE_r)) , horizontalalignment='right', verticalalignment='top', fontsize=14, color='C2')
plt.xlim([0,3*0.5])
plt.xlabel("energy (kT)")
plt.ylabel("count")
plt.suptitle(r"time step: {0:.1f} ns, T = {1} K, $\mu$ = {2:.2f} mPa$\cdot$s".format(dt*1e9, T, mu*1e3))
plt.figure()
plt.plot(time*1e3, spheres[0].phi*180/np.pi)
plt.plot(time*1e3, spheres[1].phi*180/np.pi)
plt.xlabel("time (ms)")
plt.ylabel("angle (deg.)")
plt.show()
class rigid_body:
def __init__(self, pos, mass, Iz):
self.pos = pos
self.angle = 0
self.mass = mass
self.Iz = Iz
self.velocity = np.zeros(2)
self.angular_velocity = 0
self.F = np.zeros(2)
self.prevF = np.zeros(2)
self.T = 0
self.prevT = 0
self.predicted_velocity = np.zeros(2)
self.predicted_angular_velocity = 0
self.x = []
self.y = []
self.vx = []
self.vy = []
self.omega = []
self.phi = []
def predict(self, dt):
self.velocity += (self.prevF/self.mass)*dt/2
self.angular_velocity += (self.prevT/self.Iz)*dt/2
self.pos += self.velocity*dt
self.angle += self.angular_velocity*dt
def correct(self, F, T, dt):
self.velocity += (F/self.mass)*dt/2
self.angular_velocity += (T/self.Iz)*dt/2
self.prevF = F
self.prevT = T
def push(self, F, dt):
self.velocity += (F/self.mass)*dt
self.pos += self.velocity*dt
def twist(self, T, dt):
self.angular_velocity += (T/self.Iz)*dt
self.angle += self.angular_velocity*dt
# final time and time step
tf = 300e-6
dt = 172e-9
tf = 100000*dt
time = np.arange(0,tf,dt)
# sphere properties
radius = 150e-9
density = 10490
mass = 4/3*np.pi*radius**3*density
Iz = 2/5*mass*radius**2
# initial conditions
spheres = [ rigid_body(np.array([-200e-9,0.0]), mass, Iz),
rigid_body(np.array([200e-9,0.0]), mass, Iz) ]
# spheres = [rigid_body(100e-9*np.array([x,0.0]), mass, Iz) for x in np.arange(-8,8,4)]
# spheres = [rigid_body(100e-9*np.array([x,4.0]), mass, Iz) for x in np.arange(-8,8,4)]
# spheres.extend([rigid_body(100e-9*np.array([x,-4.0]), mass, Iz) for x in np.arange(-8,8,4)])
# spheres.extend([rigid_body(100e-9*np.array([x,-8.0]), mass, Iz) for x in np.arange(-8,8,4)])
# fluid properties
mu = 0.6e-3 # liquid viscosity
temp = 320 # temperature
alpha_T = 6*np.pi*mu*radius
alpha_R = 8*np.pi*mu*radius**3
beta_T = (2*alpha_T*constants.k*temp/dt)**0.5
beta_R = (2*alpha_R*constants.k*temp/dt)**0.5
# Electrostatic repulsion
estatic = 0
# beam properties
r0 = 400e-9 # radius
I = 0e-12 # intensity
l = 0e-12 # angular drive
center = np.array([0,-200e-9]) # beam center
for t in time:
# Fadd = np.zeros((len(spheres),2))
# for i in range(0,len(spheres)):
# for j in range(i+1,len(spheres)):
# d = spheres[i].pos - spheres[j].pos
# r = np.linalg.norm(d)
# Fadd[i] += estatic*d/r**3
# Fadd[j] += -estatic*d/r**3
# if r < 2*radius:
# dv = spheres[i].velocity - spheres[j].velocity
# spheres[i].velocity -= np.dot(dv, d)/r**2 * d
# spheres[j].velocity += np.dot(dv, d)/r**2 * d
for i,sphere in enumerate(spheres):
sphere.predict(dt)
F = -alpha_T*sphere.velocity + beta_T*np.random.normal(size=2)
n = sphere.pos - center
dist = np.linalg.norm(n)
n /= dist
that = np.array([n[1], -n[0]])
# F += I*1*dist*np.exp(dist**2/r0**2)*(1 - dist**2/r0**2)*n
# F += l*that# *np.sin(t/59)
# F += Fadd[i]
T = -alpha_R*sphere.angular_velocity + beta_R*np.random.normal(size=1)[0]
sphere.correct(F,T,dt)
sphere.x.append(sphere.pos[0])
sphere.y.append(sphere.pos[1])
sphere.vx.append(sphere.velocity[0])
sphere.vy.append(sphere.velocity[1])
sphere.omega.append(sphere.angular_velocity)
sphere.phi.append(sphere.angle)
for sphere in spheres:
sphere.x = np.asarray(sphere.x)
sphere.y = np.asarray(sphere.y)
sphere.vx = np.asarray(sphere.vx)
sphere.vy = np.asarray(sphere.vy)
sphere.phi = np.asarray(sphere.phi)
sphere.omega = np.asarray(sphere.omega)
animate(spheres, radius=radius, T=temp)
# plt.plot(1e9*spheres[0].x, 1e9*spheres[0].y, '.-')
# plt.plot(1e9*spheres[1].x, 1e9*spheres[1].y, '.-')
plt.show()
| mit | 8,537,241,411,429,405,000 | 33.270916 | 194 | 0.58126 | false |
bendk/thesquirrel | docs/forms.py | 1 | 1624 | # thesquirrel.org
#
# Copyright (C) 2015 Flying Squirrel Community Space
#
# thesquirrel.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
# thesquirrel.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with thesquirrel.org. If not, see <http://www.gnu.org/licenses/>.
from django import forms
from django.utils.translation import ugettext as _
from docs.models import Document
class DocumentForm(forms.ModelForm):
public = forms.ChoiceField(label=_('Access'), choices=(
(False, 'Members Only'),
(True, 'Public'),
))
slug = forms.CharField(label=_('URL'), widget=forms.TextInput(attrs={
'data-slug-for': 'title'
}))
class Meta:
model = Document
fields = ( 'title', 'slug', 'public', 'body', )
labels = {
'body': '',
}
def __init__(self, author, *args, **kwargs):
self.author = author
super(DocumentForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
document = super(DocumentForm, self).save(commit=False)
document.author = self.author
if commit:
document.save()
return document
| agpl-3.0 | -4,625,286,531,913,107,000 | 33.553191 | 78 | 0.666256 | false |
kenik/pyrcp | pyrcp/user.py | 1 | 2065 | # -* coding: utf-8 -*-
from hashlib import md5
from pyrcp import *
import pyrcp.db as pydb
from flask import Flask, request, redirect, render_template
from flask_login import (LoginManager, login_required, login_user,
current_user, logout_user, UserMixin, AnonymousUserMixin)
from itsdangerous import URLSafeTimedSerializer
login_serializer = URLSafeTimedSerializer(app.secret_key)
class User(UserMixin):
def __init__(self, userid, user_pass, account_id):
self.id = userid
self.user_pass = user_pass
self.account_id = account_id
def get_auth_token(self):
"""
Encode a secure token for cookie
"""
data = [str(self.id), self.user_pass]
return login_serializer.dumps(data)
@staticmethod
def get(userid):
"""
Static method to search the database and see if userid exists. If it
does exist then return a User Object. If not then return None as
required by Flask-Login.
"""
#For this example the USERS database is a list consisting of
#(user,hased_password) of users.
db = pydb.get_db()
cursor = db.cursor()
sql = "SELECT `userid`, `user_pass`, `account_id` FROM `login` WHERE `userid`='%s'"
cur = cursor.execute(sql % (userid))
users = cursor.fetchall()
for user in users:
if user[0] == userid:
return User(user[0], user[1], user[2])
return None
def get_acc_id(self):
return self.account_id
def get_id(self):
return self.id
def is_anonymous(self):
return False
def is_authenticated(self):
return True
class AnonymousUser(AnonymousUserMixin):
def __init__(self):
self.id = 'Guest'
def get_id(self):
return self.id
def is_anonymous(self):
return True
def is_authenticated(self):
return False
def hash_pass(password):
return md5(password).hexdigest()
def main():
pass
if __name__ == '__main__':
main()
| gpl-2.0 | 2,577,444,483,230,044,700 | 25.818182 | 91 | 0.607264 | false |
NoNameYet07/421_521_final_project | BEERdunio_modules_kg1_sp.py | 1 | 6285 | # bin/usr/python
# Setting up GPIO pins
from time import sleep
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD) # Identifies the pin numbers to the pi
GPIO.setwarnings(False)
GPIO.setup(3, GPIO.OUT) # Should sets pin #3 as an output...but doesnt work yet
GPIO.setup(3, GPIO.LOW) # Turns initial output for pin 3 off
import time
timestr = time.strftime("%Y%m%d %H%M%S")
# Import functions to analyze license validity from CheckLicense.py
from CheckLicense import check_license, calc_BAC
import getpass
import sys
import re
# Operating modes
while True:
#try:
mode_req=raw_input("Enter Mode(normal, party, barkeep): ")
if mode_req=="party":
passwd=getpass.getpass("Enter password: ")
if passwd=="admin":
mode="party"
if mode_req=="normal":
passwd=getpass.getpass("Enter password: ")
if passwd=="admin":
mode="normal"
if mode_req=="barkeep":
passwd=getpass.getpass("Enter password: ")
if passwd=="admin":
mode="barkeep"
#Normal mode operations--------------------------------------------------------------------------------------------
while mode=='normal':
#try:
print '{0} mode!' .format(mode)
raw_text=getpass.getpass('Swipe card now: ').strip()
check_license_out=check_license(raw_text)
valid_license=check_license_out[0]
first_name=check_license_out[1]
last_name=check_license_out[2]
DL_num=check_license_out[3]
# Check to see if person is registered user
users=open("users_list.txt", 'r')
hit=0
print DL_num
if valid_license=='Yes':
for line in users:
if re.search(DL_num, line, re.IGNORECASE):
hit=hit+1
if hit>=1:
valid_license='Yes'
else:
print 'Not registered user'
valid_license='No'
# Calculating the BAC
BAC=calc_BAC(raw_text)
print BAC
# Opening the solenoid
if valid_license=='Yes':
GPIO.setup(3, GPIO.HIGH)
print 'Beer time!'
sleep(2);
GPIO.setup(3, GPIO.LOW)
with open("swipes.txt", "a") as myfile:
myfile.write(last_name+","+first_name+" ")
myfile.write(DL_num+" ")
myfile.write(mode+" ")
myfile.write(time.strftime("%Y-%m-%d")+" ")
myfile.write(str(time.time())+"\n")
# except (NameError, IndexError, ValueError):
# print "error"
# continue
#Party mode operations--------------------------------------------------------------------------------------------
while mode=="party":
try:
print '{0} mode!' .format(mode)
raw_license_text=getpass.getpass('Swipe card now: ').strip()
check_license_out=check_license(raw_license_text)
valid_license=check_license_out[0]
first_name=check_license_out[1]
last_name=check_license_out[2]
# Opening the solenoid
if valid_license=='Yes':
GPIO.setup(3, GPIO.HIGH)
print 'Beer time!'
sleep(2);
GPIO.setup(3, GPIO.LOW)
with open("swipes_normal.txt", "a") as myfile:
myfile.write(last_name)
myfile.write(",")
myfile.write(first_name)
myfile.write(",")
myfile.write(time.strftime("%Y%m%d%H%M%S\n"))
except (NameError, IndexError, ValueError):
print "error"
continue
#Barkeep mode operations-------------------------------------------------------------------------------------------
while mode=="barkeep":
try:
print '{0} mode!' .format(mode)
check_license_out=check_license(getpass.getpass('Swipe card now: ').strip())
valid_license=check_license_out[0]
first_name=check_license_out[1]
last_name=check_license_out[2]
#usr_chksum = #chksum(firstname_lastname)
#'{0}beer_score' .format(usr_chksum)
#Check to see if person is blacklisted
blacklist=open("blacklist.txt", 'r')
hit=0
if valid_license=='Yes':
for line in blacklist:
if re.search(last_name, line, re.IGNORECASE):
hit=hit+1
if re.search(first_name, line, re.IGNORECASE):
hit=hit+1
if hit>=2:
print "We don't serve your kind here."
blacklisted='Yes'
else:
blacklisted='No'
#Calculate BAC
#Opening the solenoid
if blacklisted=='No':
if BAC < intoxicated:
GPIO.setup(3, GPIO.HIGH)
print 'Beer time!'
print BAC
sleep(2);
GPIO.setup(3, GPIO.LOW)
with open("swipes_barkeep.txt", "a") as myfile:
myfile.write(last_name)
myfile.write(",")
myfile.write(first_name)
myfile.write("_")
myfile.write(time.strftime("%Y-%m-%d %H:%M%S\n"))
else:
print 'Your BAC is {0}' .format(BAC)
print "You are too drunk, beer time is over"
except (NameError, IndexError, ValueError):
print "error"
continue
# except (NameError, IndexError, ValueError):
print "error"
# continue
#end ---------------------------------------------------------------------------
| gpl-2.0 | -4,553,099,314,612,198,000 | 32.972973 | 119 | 0.449642 | false |
oblalex/gnuplot.py-py3k | gp_macosx.py | 1 | 4576 | # $Id: gp_macosx.py 291 2006-03-03 08:58:48Z mhagger $
# Copyright (C) 1998-2003 Michael Haggerty <[email protected]>
#
# This file is licensed under the GNU Lesser General Public License
# (LGPL). See LICENSE.txt for details.
"""gp_macosx -- an interface to the command line version of gnuplot
used under Mac OS X.
The only difference between this interface and gp_unix is that
default_term is 'aqua'.
This file implements a low-level interface to gnuplot. This file
should be imported through gp.py, which in turn should be imported via
'import Gnuplot' rather than using these low-level interfaces
directly.
"""
# ############ Configuration variables: ################################
class GnuplotOpts:
"""The configuration options for gnuplot on Mac OS X.
See the gp_unix.py for documentation on all of the parameters.
"""
gnuplot_command = 'gnuplot'
recognizes_persist = None # test automatically on first use
prefer_persist = 0
recognizes_binary_splot = 1
prefer_inline_data = 0
# os.mkfifo should be supported on Mac OS X. Let me know if I'm
# wrong.
support_fifo = 1
prefer_fifo_data = 1
default_term = 'aqua'
default_lpr = '| lpr'
prefer_enhanced_postscript = 1
# ############ End of configuration options ############################
from os import popen
def test_persist():
"""Determine whether gnuplot recognizes the option '-persist'.
If the configuration variable 'recognizes_persist' is set (i.e.,
to something other than 'None'), return that value. Otherwise,
try to determine whether the installed version of gnuplot
recognizes the -persist option. (If it doesn't, it should emit an
error message with '-persist' in the first line.) Then set
'recognizes_persist' accordingly for future reference.
"""
if GnuplotOpts.recognizes_persist is None:
import string
g = popen('echo | %s -persist 2>&1' % GnuplotOpts.gnuplot_command, 'r')
response = g.readlines()
g.close()
GnuplotOpts.recognizes_persist = (
(not response) or (string.find(response[0], '-persist') == -1))
return GnuplotOpts.recognizes_persist
class GnuplotProcess:
"""Unsophisticated interface to a running gnuplot program.
This represents a running gnuplot program and the means to
communicate with it at a primitive level (i.e., pass it commands
or data). When the object is destroyed, the gnuplot program exits
(unless the 'persist' option was set). The communication is
one-way; gnuplot's text output just goes to stdout with no attempt
to check it for error messages.
Members:
'gnuplot' -- the pipe to the gnuplot command.
Methods:
'__init__' -- start up the program.
'__call__' -- pass an arbitrary string to the gnuplot program,
followed by a newline.
'write' -- pass an arbitrary string to the gnuplot program.
'flush' -- cause pending output to be written immediately.
'close' -- close the connection to gnuplot.
"""
def __init__(self, persist=None):
"""Start a gnuplot process.
Create a 'GnuplotProcess' object. This starts a gnuplot
program and prepares to write commands to it.
Keyword arguments:
'persist=1' -- start gnuplot with the '-persist' option,
(which leaves the plot window on the screen even after
the gnuplot program ends, and creates a new plot window
each time the terminal type is set to 'x11'). This
option is not available on older versions of gnuplot.
"""
if persist is None:
persist = GnuplotOpts.prefer_persist
if persist:
if not test_persist():
raise Exception('-persist does not seem to be supported '
'by your version of gnuplot!')
self.gnuplot = popen('%s -persist' % GnuplotOpts.gnuplot_command,
'w')
else:
self.gnuplot = popen(GnuplotOpts.gnuplot_command, 'w')
# forward write and flush methods:
self.write = self.gnuplot.write
self.flush = self.gnuplot.flush
def close(self):
if self.gnuplot is not None:
self.gnuplot.close()
self.gnuplot = None
def __del__(self):
self.close()
def __call__(self, s):
"""Send a command string to gnuplot, followed by newline."""
self.write(s + '\n')
self.flush()
| lgpl-2.1 | 5,807,611,642,525,777,000 | 30.342466 | 79 | 0.629808 | false |
laterpay/djtranslationchecker | setup.py | 1 | 1271 | # -*- coding: UTF-8 -*-
from setuptools import find_packages, setup
import codecs
import os
#import time
#_version = "0.10.dev%s" % int(time.time())
_version = "0.10.0"
_packages = find_packages('.', exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
if os.path.exists('README.rst'):
_long_description = codecs.open('README.rst', 'r', 'utf-8').read()
else:
_long_description = ""
setup(
name='djtranslationchecker',
version=_version,
description="Check your Django translation files",
long_description=_long_description,
author="LaterPay GmbH",
author_email="[email protected]",
url="https://github.com/laterpay/djtranslationchecker",
license='MIT',
keywords="Django translation check gettext",
#test_suite="tests",
packages=_packages,
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
),
)
| mit | 5,567,271,756,062,893,000 | 27.886364 | 84 | 0.626279 | false |
grnet/agkyra | agkyra/syncer/file_client.py | 1 | 1872 | # Copyright (C) 2015 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
logger = logging.getLogger(__name__)
from agkyra.syncer import common, messaging
class FileClient(object):
def list_candidate_files(self, archive):
raise NotImplementedError
def start_probing_file(self, objname, old_state, ref_state, callback=None):
raise NotImplementedError
def stage_file(self, source_state):
raise NotImplementedError
def prepare_target(self, state):
raise NotImplementedError
def start_pulling_file(self, source_handle, target_state, sync_state,
callback=None):
synced_source_state, synced_target_state = \
self._start(source_handle, target_state, sync_state)
if callback is not None:
callback(synced_source_state, synced_target_state)
def _start(self, source_handle, target_state, sync_state):
try:
target_handle = self.prepare_target(target_state)
synced_target_state = target_handle.pull(source_handle, sync_state)
synced_source_state = source_handle.get_synced_state()
return synced_source_state, synced_target_state
finally:
source_handle.unstage_file()
| gpl-3.0 | -1,186,452,690,488,314,600 | 36.44 | 79 | 0.69391 | false |
telefonicaid/fiware-puppetwrapper | acceptance_tests/component/delete_module/features/steps.py | 1 | 1740 | __author__ = 'arobres'
# -*- coding: utf-8 -*-
from commons.rest_utils import RestUtils
from nose.tools import assert_true, assert_false
import commons.assertions as Assertions
import commons.fabric_utils as Fabutils
from commons.constants import URL, MODULE_NAME, REPOSITORY
from lettuce import step, world, before
api_utils = RestUtils()
@before.each_scenario
def setup(scenario):
world.software_downloaded = []
@step(u'Given a downloaded module from repository')
def given_a_downloaded_module_from_repository(step):
for examples in step.hashes:
url = examples[URL]
module_name = examples[MODULE_NAME]
repository = examples[REPOSITORY]
response = api_utils.download_module(software_name=module_name, repository=repository, url=url)
Assertions.assert_response_ok(response)
assert_true(Fabutils.execute_assert_download(module_name))
world.software_downloaded.append(module_name)
@step(u'When I delete the module "([^"]*)"')
def when_i_delete_the_module_group1(step, module_name):
world.module_name = module_name
world.response = api_utils.delete_module(software_name=module_name)
@step(u'Then the module is deleted from the system')
def then_the_module_is_deleted_from_the_system(step):
Assertions.assert_response_ok(world.response)
assert_false(Fabutils.execute_assert_download(world.module_name))
@step(u'Then the module is not deleted from the system')
def then_the_module_is_not_deleted_from_the_system(step):
Assertions.assert_response_ok(world.response)
assert_false(Fabutils.execute_assert_download(world.module_name))
for module in world.software_downloaded:
assert_true(Fabutils.execute_assert_download(module))
| apache-2.0 | -1,643,073,405,117,868,800 | 30.636364 | 103 | 0.737931 | false |
Subsets and Splits