repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mgeorgehansen/FIFE_Technomage | engine/python/fife/extensions/fife_settings.py | 1 | 15915 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2010 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
"""
Settings
==================================
This module provides a nice framework for loading and saving game settings.
It is by no means complete but it does provide a good starting point.
"""
import shutil
import os
from StringIO import StringIO
from fife.extensions import pychan
from fife.extensions.fife_utils import getUserDataDirectory
from fife.extensions.serializers.simplexml import SimpleXMLSerializer
SETTINGS_GUI_XML="""\
<Window name="Settings" title="Settings">
<Label text="Settings menu!" />
<HBox>
<VBox>
<Label text="Resolution:" />
<Label text="Renderer:" />
<Label text="Light Model:" />
</VBox>
<VBox min_size="120,60">
<DropDown name="screen_resolution" min_size="120,0" />
<DropDown name="render_backend" min_size="120,0" />
<DropDown name="lighting_model" min_size="120,0" />
</VBox>
</HBox>
<CheckBox name="enable_fullscreen" text="Use the full screen mode" />
<CheckBox name="enable_sound" text="Enable sound" />
<HBox>
<Spacer />
<Button name="cancelButton" text="Cancel" />
<Button name="okButton" text="Ok" />
<Button name="defaultButton" text="Defaults" />
</HBox>
</Window>
"""
CHANGES_REQUIRE_RESTART="""\
<Window title="Changes require restart">
<Label text="Some of your changes require you to restart." />
<HBox>
<Spacer />
<Button name="closeButton" text="Ok" />
</HBox>
</Window>
"""
FIFE_MODULE = "FIFE"
class Setting(object):
"""
This class manages loading and saving of game settings.
Usage::
from fife.extensions.fife_settings import Setting
settings = Setting(app_name="myapp")
screen_width = settings.get("FIFE", "ScreenWidth", 1024)
screen_height = settings.get("FIFE", "ScreenHeight", 768)
"""
def __init__(self, app_name="", settings_file="", default_settings_file= "settings-dist.xml", settings_gui_xml="", changes_gui_xml="", copy_dist=True, serializer=None):
"""
Initializes the Setting object.
@param app_name: The applications name. If this parameter is provided
alone it will try to read the settings file from the users home directory.
In windows this will be something like: C:\Documents and Settings\user\Application Data\fife
@type app_name: C{string}
@param settings_file: The name of the settings file. If this parameter is
provided it will look for the setting file as you specify it, first looking
in the working directory. It will NOT look in the users home directory.
@type settings_file: C{string}
@param default_settings_file: The name of the default settings file. If the settings_file
does not exist this file will be copied into the place of the settings_file. This file
must exist in the root directory of your project!
@type default_settings_file: C{string}
@param settings_gui_xml: If you specify this parameter you can customize the look
of the settings dialog box.
@param copy_dist: Copies the default settings file to the settings_file location. If
this is False it will create a new empty setting file.
@param serializer: Overrides the default XML serializer
@type serializer: C{SimpleSerializer}
"""
self._app_name = app_name
self._settings_file = settings_file
self._default_settings_file = default_settings_file
self._settings_gui_xml = settings_gui_xml
self._changes_gui_xml = changes_gui_xml
self.OptionsDlg = None
# Holds SettingEntries
self._entries = {}
if self._settings_file == "":
self._settings_file = "settings.xml"
self._appdata = getUserDataDirectory("fife", self._app_name)
else:
self._appdata = os.path.dirname(self._settings_file)
self._settings_file = os.path.basename(self._settings_file)
if self._settings_gui_xml == "":
self._settings_gui_xml = SETTINGS_GUI_XML
if self._changes_gui_xml == "":
self._changes_gui_xml = CHANGES_REQUIRE_RESTART
if not os.path.exists(os.path.join(self._appdata, self._settings_file)):
if os.path.exists(self._default_settings_file) and copy_dist:
shutil.copyfile(self._default_settings_file, os.path.join(self._appdata, self._settings_file))
#default settings
self._resolutions = ['640x480', '800x600', '1024x768', '1280x800', '1440x900']
self._renderbackends = ['OpenGL', 'SDL']
self._lightingmodels = [0, 1, 2]
#Used to stylize the options gui
self._gui_style = "default"
#Initialize the serializer
if serializer:
self._serializer = serializer
else:
self._serializer = SimpleXMLSerializer()
self.initSerializer()
self._initDefaultSettingEntries()
def initSerializer(self):
self._serializer.load(os.path.join(self._appdata, self._settings_file))
def _initDefaultSettingEntries(self):
"""Initializes the default fife setting entries. Not to be called from
outside this class."""
self.createAndAddEntry(FIFE_MODULE, "PlaySounds", "enable_sound",
requiresrestart=True)
self.createAndAddEntry(FIFE_MODULE, "FullScreen", "enable_fullscreen",
requiresrestart=True)
self.createAndAddEntry(FIFE_MODULE, "ScreenResolution", "screen_resolution", initialdata = self._resolutions,
requiresrestart=True)
self.createAndAddEntry(FIFE_MODULE, "RenderBackend", "render_backend", initialdata = self._renderbackends,
requiresrestart=True)
self.createAndAddEntry(FIFE_MODULE, "Lighting", "lighting_model", initialdata = self._lightingmodels,
requiresrestart=True)
def createAndAddEntry(self, module, name, widgetname, applyfunction=None, initialdata=None, requiresrestart=False):
""""
@param module: The Setting module this Entry belongs to
@type module: C{String}
@param name: The Setting's name
@type name: C{String}
@param widgetname: The name of the widget that is used to change this
setting
@type widgetname: C{String}
@param applyfunction: function that makes the changes when the Setting is
saved
@type applyfunction: C{function}
@param initialdata: If the widget supports the setInitialData() function
this can be used to set the initial data
@type initialdata: C{String} or C{Boolean}
@param requiresrestart: Whether or not the changing of this setting
requires a restart
@type requiresrestart: C{Boolean}
"""
entry = SettingEntry(module, name, widgetname, applyfunction, initialdata, requiresrestart)
self.addEntry(entry)
def addEntry(self, entry):
"""Adds a new C{SettingEntry} to the Settting
@param entry: A new SettingEntry that is to be added
@type entry: C{SettingEntry}
"""
if entry.module not in self._entries:
self._entries[entry.module] = {}
self._entries[entry.module][entry.name] = entry
# Make sure the new entry is available
if self.get(entry.module, entry.name) is None:
print "Updating", self._settings_file, "to the default, it is missing the entry:"\
, entry.name ,"for module", entry.module
self.setDefaults()
if self.get(entry.module, entry.name) is None:
print "WARNING:", entry.module, ":", entry.name, "still not found!"
def saveSettings(self, filename=""):
""" Writes the settings to the settings file
@param filename: Specifies the file to save the settings to. If it is not specified
the original settings file is used.
@type filename: C{string}
"""
if self._serializer:
if filename == "":
self._serializer.save(os.path.join(self._appdata, self._settings_file))
else:
self._serializer.save(filename)
def get(self, module, name, defaultValue=None):
""" Gets the value of a specified setting
@param module: Name of the module to get the setting from
@param name: Setting name
@param defaultValue: Specifies the default value to return if the setting is not found
@type defaultValue: C{str} or C{unicode} or C{int} or C{float} or C{bool} or C{list} or C{dict}
"""
if self._serializer:
return self._serializer.get(module, name, defaultValue)
else:
return None
def set(self, module, name, value, extra_attrs={}):
"""
Sets a setting to specified value.
@param module: Module where the setting should be set
@param name: Name of setting
@param value: Value to assign to setting
@type value: C{str} or C{unicode} or C{int} or C{float} or C{bool} or C{list} or C{dict}
@param extra_attrs: Extra attributes to be stored in the XML-file
@type extra_attrs: C{dict}
"""
if self._serializer:
self._serializer.set(module, name, value, extra_attrs)
def setGuiStyle(self, style):
""" Set a custom gui style used for the option dialog.
@param style: Pychan style to be used
@type style: C{string}
"""
self._gui_style = style
def onOptionsPress(self):
"""
Opens the options dialog box. Usually you would bind this to a button.
"""
self.changesRequireRestart = False
self.isSetToDefault = False
if not self.OptionsDlg:
self.loadSettingsDialog()
self.fillWidgets()
self.OptionsDlg.show()
def loadSettingsDialog(self):
"""
Load up the settings xml and return the widget.
"""
self.OptionsDlg = self._loadWidget(self._settings_gui_xml)
self.OptionsDlg.stylize(self._gui_style)
self.OptionsDlg.mapEvents({
'okButton' : self.applySettings,
'cancelButton' : self.OptionsDlg.hide,
'defaultButton' : self.setDefaults
})
return self.OptionsDlg
def _loadWidget(self, dialog):
"""Loads a widget. Can load both files and pure xml strings"""
if os.path.isfile(self._settings_gui_xml):
return pychan.loadXML(dialog)
else:
return pychan.loadXML(StringIO(dialog))
def fillWidgets(self):
for module in self._entries.itervalues():
for entry in module.itervalues():
widget = self.OptionsDlg.findChildByName(entry.settingwidgetname)
value = self.get(entry.module, entry.name)
if type(entry.initialdata) is list:
try:
value = entry.initialdata.index(value)
except ValueError:
raise ValueError("\"" + value + "\" is not a valid value for " + entry.name + ". Valid options: " + str(entry.initialdata))
entry.initializeWidget(widget, value)
def applySettings(self):
"""
Writes the settings file. If a change requires a restart of the engine
it notifies you with a small dialog box.
"""
for module in self._entries.itervalues():
for entry in module.itervalues():
widget = self.OptionsDlg.findChildByName(entry.settingwidgetname)
data = widget.getData()
# If the data is a list we need to get the correct selected data
# from the list. This is needed for e.g. dropdowns or listboxs
if type(entry.initialdata) is list:
data = entry.initialdata[data]
# only take action if something really changed
if data != self.get(entry.module, entry.name):
self.set(entry.module, entry.name, data)
entry.onApply(data)
if entry.requiresrestart:
self.changesRequireRestart = True
self.saveSettings()
self.OptionsDlg.hide()
if self.changesRequireRestart:
self._showChangeRequireRestartDialog()
def _showChangeRequireRestartDialog(self):
"""Shows a dialog that informes the user that a restart is required
to perform the changes."""
RestartDlg = self._loadWidget(self._changes_gui_xml)
RestartDlg.stylize(self._gui_style)
RestartDlg.mapEvents({ 'closeButton' : RestartDlg.hide })
RestartDlg.show()
def setAvailableScreenResolutions(self, reslist):
"""
A list of valid default screen resolutions. This should be called once
right after you instantiate Settings.
Valid screen resolutions must be strings in the form of: WIDTHxHEIGHT
Example:
settings.setAvailableScreenResolutions(["800x600", "1024x768"])
"""
self._resolutions = reslist
def setDefaults(self):
"""
Overwrites the setting file with the default settings file.
"""
shutil.copyfile(self._default_settings_file, os.path.join(self._appdata, self._settings_file))
self.changesRequireRestart = True
self.initSerializer()
#update all widgets with the new data
self.fillWidgets()
def _getEntries(self):
return self._entries
def _setEntries(self, entries):
self._entries = entries
def _getSerializer(self):
return self._serializer
entries = property(_getEntries, _setEntries)
serializer = property(_getSerializer)
class SettingEntry(object):
def __init__(self, module, name, widgetname, applyfunction=None, initialdata=None, requiresrestart=False):
"""
@param module: The Setting module this Entry belongs to
@type module: C{String}
@param name: The Setting's name
@type name: C{String}
@param widgetname: The name of the widget that is used to change this
setting
@type widgetname: C{String}
@param applyfunction: function that makes the changes when the Setting is
saved
@type applyfunction: C{function}
@param initialdata: If the widget supports the setInitialData() function
this can be used to set the initial data
@type initialdata: C{String} or C{Boolean}
@param requiresrestart: Whether or not the changing of this setting
requires a restart
@type requiresrestart: C{Boolean}
"""
self._module = module
self._name = name
self._settingwidgetname = widgetname
self._requiresrestart = requiresrestart
self._initialdata = initialdata
self._applyfunction = applyfunction
def initializeWidget(self, widget, currentValue):
"""Initialize the widget with needed data"""
if self._initialdata is not None:
widget.setInitialData(self._initialdata)
widget.setData(currentValue)
def onApply(self, data):
"""Implement actions that need to be taken when the setting is changed
here.
"""
if self._applyfunction is not None:
self._applyfunction(data)
def _getModule(self):
return self._module
def _setModule(self, module):
self._module = module
def _getName(self):
return self._name
def _setName(self, name):
self._name = name
def _getSettingWidgetName(self):
return self._settingwidgetname
def _setSettingWidgetName(self, settingwidgetname):
self._settingwidgetname = settingwidgetname
def _getRequiresRestart(self):
return self._requiresrestart
def _setRequiresRestart(self, requiresrestart):
self._requiresrestart = requiresrestart
def _getInitialData(self):
return self._initialdata
def _setInitialData(self, initialdata):
self._initialdata = initialdata
def _getApplyFunction(self):
return self._applyfunction
def _setApplyFunction(self, applyfunction):
self._applyfunction = applyfunction
module = property(_getModule, _setModule)
name = property(_getName, _setName)
settingwidgetname = property(_getSettingWidgetName, _setSettingWidgetName)
requiresrestart = property(_getRequiresRestart, _setRequiresRestart)
initialdata = property(_getInitialData, _setInitialData)
applyfunction = property(_getApplyFunction, _setApplyFunction)
def __str__(self):
return "SettingEntry: " + self.name + " Module: " + self.module + " Widget: " + \
self.settingwidgetname + " requiresrestart: " + str(self.requiresrestart) + \
" initialdata: " + str(self.initialdata)
| lgpl-2.1 | 3,252,405,455,924,976,600 | 32.861702 | 169 | 0.711781 | false | 3.47565 | false | false | false |
aenon/OnlineJudge | leetcode/5.BitManipulation/477.TotalHammingDistance.py | 1 | 1100 | # 477. Total Hamming Distance
# The Hamming distance between two integers is the number of positions at which the corresponding bits are different.
# Now your job is to find the total Hamming distance between all pairs of the given numbers.
# Example:
# Input: 4, 14, 2
# Output: 6
# Explanation: In binary representation, the 4 is 0100, 14 is 1110, and 2 is 0010 (just
# showing the four bits relevant in this case). So the answer will be:
# HammingDistance(4, 14) + HammingDistance(4, 2) + HammingDistance(14, 2) = 2 + 2 + 2 = 6.
# Note:
# Elements of the given array are in the range of 0 to 10^9
# Length of the array will not exceed 10^4.
class Solution(object):
def totalHammingDistance(self, nums):
"""
:type nums: List[int]
:rtype: int
loop through all the digits
"""
result = 0
for i in xrange(32):
counts = [0] * 2 # the number of 0's and 1's in the ith digit
for number in nums:
counts[number>>i & 1] += 1
result += counts[0] * counts[1]
return result | mit | -7,870,830,576,520,058,000 | 31.382353 | 117 | 0.626364 | false | 3.536977 | false | false | false |
f-prettyland/angr | angr/engines/vex/statements/loadg.py | 1 | 2392 | from .... import sim_options as o
from ....state_plugins.sim_action_object import SimActionObject
from ....state_plugins.sim_action import SimActionData
from . import SimIRStmt, SimStatementError
class SimIRStmt_LoadG(SimIRStmt):
def _execute(self):
addr = self._translate_expr(self.stmt.addr)
alt = self._translate_expr(self.stmt.alt)
guard = self._translate_expr(self.stmt.guard)
read_type, converted_type = self.stmt.cvt_types
read_size = self.size_bytes(read_type)
converted_size = self.size_bytes(converted_type)
read_expr = self.state.memory.load(addr.expr, read_size, endness=self.stmt.end)
if read_size == converted_size:
converted_expr = read_expr
elif "S" in self.stmt.cvt:
converted_expr = read_expr.sign_extend(converted_size*self.state.arch.byte_width -
read_size*self.state.arch.byte_width)
elif "U" in self.stmt.cvt:
converted_expr = read_expr.zero_extend(converted_size*self.state.arch.byte_width -
read_size*self.state.arch.byte_width)
else:
raise SimStatementError("Unrecognized IRLoadGOp %s!" % self.stmt.cvt)
read_expr = self.state.se.If(guard.expr != 0, converted_expr, alt.expr)
if o.ACTION_DEPS in self.state.options:
reg_deps = addr.reg_deps() | alt.reg_deps() | guard.reg_deps()
tmp_deps = addr.tmp_deps() | alt.tmp_deps() | guard.tmp_deps()
else:
reg_deps = None
tmp_deps = None
self.state.scratch.store_tmp(self.stmt.dst, read_expr, reg_deps, tmp_deps)
if o.TRACK_MEMORY_ACTIONS in self.state.options:
data_ao = SimActionObject(converted_expr)
alt_ao = SimActionObject(alt.expr, reg_deps=alt.reg_deps(), tmp_deps=alt.tmp_deps())
addr_ao = SimActionObject(addr.expr, reg_deps=addr.reg_deps(), tmp_deps=addr.tmp_deps())
guard_ao = SimActionObject(guard.expr, reg_deps=guard.reg_deps(), tmp_deps=guard.tmp_deps())
size_ao = SimActionObject(self.size_bits(converted_type))
r = SimActionData(self.state, self.state.memory.id, SimActionData.READ, addr=addr_ao, data=data_ao, condition=guard_ao, size=size_ao, fallback=alt_ao)
self.actions.append(r)
| bsd-2-clause | -9,205,185,699,880,460,000 | 49.893617 | 162 | 0.621237 | false | 3.441727 | false | false | false |
moozilla/dvcticker | dvcticker/main.py | 1 | 12328 | #todo: raise exceptions, then catch them to generate error images
import webapp2
from google.appengine.api import urlfetch
import json
from PIL import Image, ImageDraw, ImageFont
from google.appengine.api import memcache
import StringIO
import jinja2
import os
from decimal import * #used fixed point math for better accuracy
from google.appengine import runtime # for catching DeadlineExceededError
from google.appengine.api import urlfetch_errors # "
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
#imgFont = ImageFont.load('static/font/ncenB12.pil') # for testing locally, can't get truetype to work locally
imgFont = ImageFont.truetype('static/font/tahoma_bold.ttf', 14, encoding='unic')
def urlfetch_cache(url,exchange):
# fetches a url, but using memcache to not hammer the exchanges server
data = memcache.get(url)
if data is not None:
return process_json(data, exchange)
else:
try:
result = urlfetch.fetch(url,deadline=30) #timeout after 30 sec
if result.status_code == 200:
value = process_json(result.content, exchange)
memcache.add(url, result.content, 30) #cache for 30 sec
memcache.add('longcache'+url, result.content, 3000) #also cache for 5min in case of timeouts
return value
else:
return 'Error: '+exchange+' status code '+str(result.status_code) #'Error accessing Vircurex API'
except runtime.DeadlineExceededError: #raised if the overall request times out
data = memcache.get('longcache'+url)
if data is not None: return process_json(data, exchange)
else: return 'Error: '+exchange+' timeout'
except runtime.apiproxy_errors.DeadlineExceededError: #raised if an RPC exceeded its deadline (set)
data = memcache.get('longcache'+url)
if data is not None: return process_json(data, exchange)
else: return 'Error: '+exchange+' timeout'
except urlfetch_errors.DeadlineExceededError: #raised if the URLFetch times out
data = memcache.get('longcache'+url)
if data is not None: return process_json(data, exchange)
else: return 'Error: '+exchange+' timeout'
except urlfetch.Error: #catch DownloadError
data = memcache.get('longcache'+url)
if data is not None: return process_json(data, exchange)
else: return 'Error: '+exchange+' timeout'
def process_json(txt, exchange):
#should probably add error handling in case bad json is passed
if exchange == 'vircurex':
if txt == '"Unknown currency"': return 'Error: bad Vircurex API result'
obj = json.loads(txt)
return obj['value']
elif exchange == 'mtgox_bid':
obj = json.loads(txt)
if obj['result'] == 'success':
return obj['return']['buy']['value']
else:
return 'Error: bad MTGox API result'
elif exchange == 'mtgox_ask':
obj = json.loads(txt)
if obj['result'] == 'success':
return obj['return']['sell']['value']
else:
return 'Error: bad MTGox API result'
elif exchange == 'btce_bid':
obj = json.loads(txt)
if not any('error' in s for s in obj):
return str(obj['ticker']['buy'])
else:
return 'Error: bad BTC-E API result'
elif exchange == 'btce_ask':
obj = json.loads(txt)
if not any('error' in s for s in obj):
return str(obj['ticker']['sell'])
else:
return 'Error: bad BTC-E API result'
elif exchange == 'campbx_bid':
obj = json.loads(txt)
# need to check for error
return obj['Best Bid']
elif exchange == 'campbx_ask':
obj = json.loads(txt)
# need to check for error
return obj['Best Ask']
else:
return 'Error: invalid exchange'
def get_campbx_value(base,alt,amount):
url = 'http://campbx.com/api/xticker.php'
reverse = False
if base == 'btc':
if alt != 'usd': return 'Error: only BTC/USD valid on CampBX'
exch = 'campbx_bid'
elif base == 'usd':
if alt != 'btc': return 'Error: only BTC/USD valid on CampBX'
exch = 'campbx_ask'
reverse = True
else:
return 'Error: only BTC/USD valid on CampBX'
value = urlfetch_cache(url,exch)
if value.startswith('Error'): return value
if reverse: return str((Decimal(amount) / Decimal(value)).quantize(Decimal('.00000001'), rounding=ROUND_DOWN)) # need to round to a certain number
else: return str(Decimal(amount) * Decimal(value))
def get_mtgox_value(base,alt,amount):
cur = ['usd', 'aud', 'cad', 'chf', 'cny', 'dkk',
'eur', 'gbp', 'hkd', 'jpy', 'nzd', 'pln', 'rub', 'sek', 'sgd', 'thb']
reverse = False # true if going from cur-> btc
if base == 'btc':
if not any(alt in s for s in cur):
return 'Error: invalid destination currency'
url = 'http://data.mtgox.com/api/1/btc'+alt+'/ticker'
exch = 'mtgox_bid'
elif any(base in s for s in cur):
if alt != 'btc':
return 'Error: destination currency must be BTC'
url = 'http://data.mtgox.com/api/1/btc'+base+'/ticker' #mtgox api always has btc first
exch = 'mtgox_ask'
reverse = True
else:
return 'Error: invalid base currency'
value = urlfetch_cache(url,exch)
if value.startswith('Error'): return value
if reverse: return str((Decimal(amount) / Decimal(value)).quantize(Decimal('.00000001'), rounding=ROUND_DOWN)) # need to round to a certain number
else: return str(Decimal(amount) * Decimal(value))
def get_btce_value(base,alt,amount):
# in BTC-e currencies must be traded in pairs, we also support going in reverse (buying)
cur_fwd = {'btc':['usd','rur','eur'], 'ltc':['btc','usd','rur'], 'nmc':['btc'], 'usd':['rur'], 'eur':['usd'], 'nvc':['btc'], 'trc':['btc'], 'ppc':['btc'], 'ftc':['btc'], 'cnc':['btc']}
cur_rev = {'btc':['ltc','nmc','nvc','trc','ppc','ftc','cnc'], 'usd':['btc','ltc'], 'rur':['btc','usd'], 'eur':['btc']}
reverse = False # if going from cur-> btc
if any(base in s for s in cur_fwd) and any(alt in s for s in cur_fwd[base]):
#if not any(alt in s for s in cur_fwd[base]):
#return 'Error: invalid destination currency' # can't return here because some can be base or alt
url = 'https://btc-e.com/api/2/'+base+'_'+alt+'/ticker' #https://btc-e.com/api/2/nmc_btc/ticker
exch = 'btce_bid'
else:
if any(base in s for s in cur_rev):
if not any(alt in s for s in cur_rev[base]):
return 'Error: invalid currency pair'
url = 'https://btc-e.com/api/2/'+alt+'_'+base+'/ticker'
exch = 'btce_ask'
reverse = True
else:
return 'Error: invalid currency pair'
value = urlfetch_cache(url,exch)
if value.startswith('Error'): return value
if reverse: return str((Decimal(amount) / Decimal(value)).quantize(Decimal('.00000001'), rounding=ROUND_DOWN)) # need to round to a certain number
else: return str(Decimal(amount) * Decimal(value))
def get_vircurex_value(type, base, alt, amount):
# gets json from vircurex about bid/ask prices
# eg. https://vircurex.com/api/get_highest_bid.json?base=BTC&alt=NMC
if type == 'bid':
url = 'https://vircurex.com/api/get_highest_bid.json'
elif type == 'ask':
url = 'https://vircurex.com/api/get_lowest_ask.json'
else:
return 'Error: Type must be either "bid" or "ask"'
cur = ['btc', 'dvc', 'ixc', 'ltc', 'nmc', 'ppc', 'trc', 'usd', 'eur', 'ftc', 'frc', 'cnc']
if not any(base in s for s in cur): return 'Error: invalid currency'
if not any(alt in s for s in cur): return 'Error: invalid currency'
url += '?base=' + base + '&alt=' + alt
value = urlfetch_cache(url,'vircurex')
if value.startswith('Error'): return value
return str(Decimal(amount)*Decimal(value)) # return amount * value
def get_bid(exchange, amount, base, alt):
if exchange == 'vircurex':
return get_vircurex_value('bid',base,alt,amount)
elif exchange == 'mtgox':
return get_mtgox_value(base,alt,amount)
elif exchange == 'btc-e':
return get_btce_value(base,alt,amount)
elif exchange == 'campbx':
return get_campbx_value(base,alt,amount)
else:
return 'Error: bad exchange'
def get_text_width(str):
img = Image.new("RGBA", (1,1)) # just used to calculate the text size, size doesn't matter
draw = ImageDraw.Draw(img)
w, h = draw.textsize(str, imgFont) # calculate width font will take up
return w
# returns text, with optional coin icon, in string encoded form so it can be written to HTTP response
def make_img(str, text_pos, coinimg=None):
img = Image.new("RGBA", (get_text_width(str) + text_pos, 20))
draw = ImageDraw.Draw(img) # set draw to new image
if coinimg != None:
img.paste(coinimg, (0,2)) #paste the coin image into the generated image
draw.text((text_pos,1), str, font=imgFont, fill='#555555')
output = StringIO.StringIO()
img.save(output, format='png')
img_to_serve = output.getvalue()
output.close()
return img_to_serve
class MainHandler(webapp2.RequestHandler):
def get(self):
#base = self.request.get('base','dvc')
#alt = self.request.get('alt','btc')
#value = get_vircurex_value('bid',base,alt)
#template_values = {
# 'value': value
#}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render())#template_values))
class ImageHandler(webapp2.RequestHandler):
def get(self,exchange,amount,base,alt):
if amount == '': amount = '1' # default amount is 1
exchange = exchange.lower() # make sure everything is lowercase
base = base.lower()
if alt == None:
if base == 'btc': alt = 'usd' # btc.png just shows btc value in usd
else: alt = 'btc' # if no alt specified, default to BTC
alt = alt.lower()
value = get_bid(exchange,amount,base,alt)
#if bid.startswith('Error'): value = bid
#else: value = str(Decimal(amount)*Decimal(bid))
text_pos = 19 # 3 px after coin image (all are 16x16)
if value.startswith('Error'):
text_pos = 0
elif alt == 'usd':
# round down to 2 decimal places
value = '$ '+str(Decimal(value).quantize(Decimal('.01'), rounding=ROUND_DOWN))
text_pos = 2
elif alt == 'eur':
# euro symbol in unicode (only works with truetype fonts)
value = u'\u20AC '+str(Decimal(value).quantize(Decimal('.01'), rounding=ROUND_DOWN))
text_pos = 2 # have to position euro symbol so it doesn't cut off
elif any(alt in s for s in ['aud', 'cad', 'chf', 'cny', 'dkk',
'gbp', 'hkd', 'jpy', 'nzd', 'pln', 'rub', 'sek', 'sgd', 'thb', 'rur', 'nvc']):
value = alt.upper() + ' ' + value
text_pos = 2
#text_pos 0 = error
if text_pos!=0 and any(alt in s for s in ['btc', 'dvc', 'ixc', 'ltc', 'nmc', 'ppc', 'trc', 'ftc', 'frc', 'cnc']):
coinimg = Image.open('static/img/'+alt+'.png')
else: coinimg = None
img_to_serve = make_img(value, text_pos, coinimg)
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(img_to_serve)
class ErrorHandler(webapp2.RequestHandler):
def get(self):
img_to_serve = make_img('Error: Malformed URL', 0)
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(img_to_serve)
app = webapp2.WSGIApplication([
('/', MainHandler),
('/([^/]+)/(\d*\.?\d*)([A-Za-z]+)(?:/([A-Za-z]+))?(?:\.png)?', ImageHandler),
('/.*', ErrorHandler)
], debug=True)
| mit | 2,237,923,900,706,341,600 | 43.666667 | 188 | 0.589066 | false | 3.539477 | false | false | false |
seanjtaylor/out-for-justice | scripts/test_optimize.py | 1 | 1921 |
import random
import pickle
import numpy as np
import networkx as nx
from app.optim import slow_compute_loss, step
def main(input_file, num_police, num_steps, prob_step):
"""
Parameters
----------
num_police : the number of police to use
num_steps : the number of steps to take
prob_step : the probability of taking a step if it doesn't improve loss
"""
with open(input_file) as f:
graph = pickle.load(f)
graph = nx.convert_node_labels_to_integers(graph)
N = graph.number_of_nodes()
# compute random starting places
starting_positions = np.zeros(N)
places = random.sample(xrange(N), num_police)
starting_positions[places] = 1
# one outcome that is uniformly distributed
risks = np.ones(N).reshape((-1, 1))
import time
start = time.time()
# initialize the optimization
positions = [starting_positions]
losses = [slow_compute_loss(graph, positions[-1], risks)]
current = positions[-1]
tried = set()
for i in range(num_steps):
new_position = step(graph, current)
pos_id = tuple(new_position.nonzero()[0])
if pos_id in tried:
continue
tried.add(pos_id)
positions.append(new_position)
losses.append(slow_compute_loss(graph, new_position, risks))
if (losses[-1] < losses[-2]) or (random.random() < prob_step):
current = new_position
print time.time() - start
print sorted(losses)[:10]
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('input_file')
parser.add_argument('--num_police', type=int, default=1)
parser.add_argument('--num_steps', type=int, default=100)
parser.add_argument('--prob_step', type=float, default=0.25)
args = parser.parse_args()
main(args.input_file, args.num_police, args.num_steps, args.prob_step)
| mit | 3,145,182,075,389,221,000 | 25.680556 | 75 | 0.63925 | false | 3.63138 | false | false | false |
dothiko/mypaint | lib/layer/test.py | 1 | 1433 | # This file is part of MyPaint.
# Copyright (C) 2011-2015 by Andrew Chadwick <[email protected]>
# Copyright (C) 2007-2012 by Martin Renold <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
def make_test_stack():
"""Makes a simple test RootLayerStack (2 branches of 3 leaves each)
:return: The root stack, and a list of its leaves.
:rtype: tuple
"""
import lib.layer.group
import lib.layer.data
import lib.layer.tree
root = lib.layer.tree.RootLayerStack(doc=None)
layer0 = lib.layer.group.LayerStack(name='0')
root.append(layer0)
layer00 = lib.layer.data.PaintingLayer(name='00')
layer0.append(layer00)
layer01 = lib.layer.data.PaintingLayer(name='01')
layer0.append(layer01)
layer02 = lib.layer.data.PaintingLayer(name='02')
layer0.append(layer02)
layer1 = lib.layer.group.LayerStack(name='1')
root.append(layer1)
layer10 = lib.layer.data.PaintingLayer(name='10')
layer1.append(layer10)
layer11 = lib.layer.data.PaintingLayer(name='11')
layer1.append(layer11)
layer12 = lib.layer.data.PaintingLayer(name='12')
layer1.append(layer12)
return (root, [layer00, layer01, layer02, layer10, layer11, layer12])
| gpl-2.0 | 8,603,394,478,543,778,000 | 35.74359 | 73 | 0.707606 | false | 3.220225 | false | false | false |
delimitry/ascii_clock | asciicanvas.py | 1 | 6119 | #-*- coding: utf-8 -*-
#-----------------------------------------------------------------------
# Author: delimitry
#-----------------------------------------------------------------------
class AsciiCanvas(object):
"""
ASCII canvas for drawing in console using ASCII chars
"""
def __init__(self, cols, lines, fill_char=' '):
"""
Initialize ASCII canvas
"""
if cols < 1 or cols > 1000 or lines < 1 or lines > 1000:
raise Exception('Canvas cols/lines must be in range [1..1000]')
self.cols = cols
self.lines = lines
if not fill_char:
fill_char = ' '
elif len(fill_char) > 1:
fill_char = fill_char[0]
self.fill_char = fill_char
self.canvas = [[fill_char] * (cols) for _ in range(lines)]
def clear(self):
"""
Fill canvas with empty chars
"""
self.canvas = [[self.fill_char] * (self.cols) for _ in range(self.lines)]
def print_out(self):
"""
Print out canvas to console
"""
print(self.get_canvas_as_str())
def add_line(self, x0, y0, x1, y1, fill_char='o'):
"""
Add ASCII line (x0, y0 -> x1, y1) to the canvas, fill line with `fill_char`
"""
if not fill_char:
fill_char = 'o'
elif len(fill_char) > 1:
fill_char = fill_char[0]
if x0 > x1:
# swap A and B
x1, x0 = x0, x1
y1, y0 = y0, y1
# get delta x, y
dx = x1 - x0
dy = y1 - y0
# if a length of line is zero just add point
if dx == 0 and dy == 0:
if self.check_coord_in_range(x0, y0):
self.canvas[y0][x0] = fill_char
return
# when dx >= dy use fill by x-axis, and use fill by y-axis otherwise
if abs(dx) >= abs(dy):
for x in range(x0, x1 + 1):
y = y0 if dx == 0 else y0 + int(round((x - x0) * dy / float((dx))))
if self.check_coord_in_range(x, y):
self.canvas[y][x] = fill_char
else:
if y0 < y1:
for y in range(y0, y1 + 1):
x = x0 if dy == 0 else x0 + int(round((y - y0) * dx / float((dy))))
if self.check_coord_in_range(x, y):
self.canvas[y][x] = fill_char
else:
for y in range(y1, y0 + 1):
x = x0 if dy == 0 else x1 + int(round((y - y1) * dx / float((dy))))
if self.check_coord_in_range(x, y):
self.canvas[y][x] = fill_char
def add_text(self, x, y, text):
"""
Add text to canvas at position (x, y)
"""
for i, c in enumerate(text):
if self.check_coord_in_range(x + i, y):
self.canvas[y][x + i] = c
def add_rect(self, x, y, w, h, fill_char=' ', outline_char='o'):
"""
Add rectangle filled with `fill_char` and outline with `outline_char`
"""
if not fill_char:
fill_char = ' '
elif len(fill_char) > 1:
fill_char = fill_char[0]
if not outline_char:
outline_char = 'o'
elif len(outline_char) > 1:
outline_char = outline_char[0]
for px in range(x, x + w):
for py in range(y, y + h):
if self.check_coord_in_range(px, py):
if px == x or px == x + w - 1 or py == y or py == y + h - 1:
self.canvas[py][px] = outline_char
else:
self.canvas[py][px] = fill_char
def add_nine_patch_rect(self, x, y, w, h, outline_3x3_chars=None):
"""
Add nine-patch rectangle
"""
default_outline_3x3_chars = (
'.', '-', '.',
'|', ' ', '|',
'`', '-', "'"
)
if not outline_3x3_chars:
outline_3x3_chars = default_outline_3x3_chars
# filter chars
filtered_outline_3x3_chars = []
for index, char in enumerate(outline_3x3_chars[0:9]):
if not char:
char = default_outline_3x3_chars[index]
elif len(char) > 1:
char = char[0]
filtered_outline_3x3_chars.append(char)
for px in range(x, x + w):
for py in range(y, y + h):
if self.check_coord_in_range(px, py):
if px == x and py == y:
self.canvas[py][px] = filtered_outline_3x3_chars[0]
elif px == x and y < py < y + h - 1:
self.canvas[py][px] = filtered_outline_3x3_chars[3]
elif px == x and py == y + h - 1:
self.canvas[py][px] = filtered_outline_3x3_chars[6]
elif x < px < x + w - 1 and py == y:
self.canvas[py][px] = filtered_outline_3x3_chars[1]
elif x < px < x + w - 1 and py == y + h - 1:
self.canvas[py][px] = filtered_outline_3x3_chars[7]
elif px == x + w - 1 and py == y:
self.canvas[py][px] = filtered_outline_3x3_chars[2]
elif px == x + w - 1 and y < py < y + h - 1:
self.canvas[py][px] = filtered_outline_3x3_chars[5]
elif px == x + w - 1 and py == y + h - 1:
self.canvas[py][px] = filtered_outline_3x3_chars[8]
else:
self.canvas[py][px] = filtered_outline_3x3_chars[4]
def check_coord_in_range(self, x, y):
"""
Check that coordinate (x, y) is in range, to prevent out of range error
"""
return 0 <= x < self.cols and 0 <= y < self.lines
def get_canvas_as_str(self):
"""
Return canvas as a string
"""
return '\n'.join([''.join(col) for col in self.canvas])
def __str__(self):
"""
Return canvas as a string
"""
return self.get_canvas_as_str()
| mit | 5,868,139,019,117,371,000 | 36.771605 | 87 | 0.440758 | false | 3.588856 | false | false | false |
m4nh/roars | scripts/nodes/examples/arp_detector_example.py | 1 | 2688 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from roars.rosutils.rosnode import RosNode
from roars.vision.cameras import CameraRGB
from roars.vision.arucoutils import MarkerDetector
from roars.vision.arp import ARP
import roars.vision.cvutils as cvutils
import cv2
import numpy as np
import os
import json
#⬢⬢⬢⬢⬢➤ NODE
node = RosNode("rosnode_example")
#⬢⬢⬢⬢⬢➤ Sets HZ from parameters
node.setHz(node.setupParameter("hz", 30))
#⬢⬢⬢⬢⬢➤ Creates Camera Proxy
camera_topic = node.setupParameter(
"camera_topic",
"/camera/rgb/image_raw/compressed"
)
camera_file = node.getFileInPackage(
'roars',
'data/camera_calibrations/asus_xtion.yml'
)
camera = CameraRGB(
configuration_file=camera_file,
rgb_topic=camera_topic,
compressed_image="compressed" in camera_topic
)
#⬢⬢⬢⬢⬢➤ ARP
arp_configuration = node.getFileInPackage(
'roars',
'data/arp_configurations/prototype_configuration.json'
)
arp = ARP(configuration_file=arp_configuration, camera_file=camera_file)
#⬢⬢⬢⬢⬢➤ Points storage
points_per_object = node.setupParameter("points_per_object", 6)
collected_points = []
output_file = node.setupParameter("output_file", "/tmp/arp_objects.json")
#⬢⬢⬢⬢⬢➤ Camera Callback
def cameraCallback(frame):
#⬢⬢⬢⬢⬢➤ Grabs image from Frame
img = frame.rgb_image.copy()
arp_pose = arp.detect(img, debug_draw=True)
if arp_pose:
img_points = cvutils.reproject3DPoint(
arp_pose.p.x(),
arp_pose.p.y(),
arp_pose.p.z(),
camera=camera
)
cv2.circle(
img,
(int(img_points[0]), int(img_points[1])),
5,
(0, 0, 255),
-1
)
#⬢⬢⬢⬢⬢➤ Show
cv2.imshow("output", img)
c = cv2.waitKey(1)
if c == 113:
node.close()
if c == 32 and arp_pose != None:
print("New Point Added", arp_pose.p)
collected_points.append([
arp_pose.p.x(), arp_pose.p.y(), arp_pose.p.z()
])
if len(collected_points) % points_per_object == 0:
print("New Object Stored")
camera.registerUserCallabck(cameraCallback)
#⬢⬢⬢⬢⬢➤ Main Loop
while node.isActive():
node.tick()
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
probable_objects = list(chunks(collected_points, points_per_object))
objects = []
for o in probable_objects:
if len(o) == points_per_object:
objects.append(o)
with open(output_file, 'w') as handle:
handle.write(json.dumps(objects, indent=4))
| gpl-3.0 | 4,086,218,510,693,855,000 | 23.571429 | 73 | 0.625581 | false | 2.724393 | true | false | false |
IFAEControl/pirelay | pirelay/server.py | 1 | 1591 | #!/usr/bin/env python3
import time
from concurrent import futures
import grpc
from .protos import pirelay_pb2
from .protos import pirelay_pb2_grpc
from .relay import RelaysArray
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
PINS = [21]
class PiRelayServer(pirelay_pb2_grpc.PiRelayServicer):
def __init__(self, bcm_pins=[]):
self._relays = RelaysArray(bcm_pins=bcm_pins)
def Enable(self, request, context):
try:
self._relays.enable(request.channel)
except Exception as ex:
return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Error,
message=str(ex))
else:
return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Ok,
message="")
def Disable(self, request, context):
try:
self._relays.disable(request.channel)
except Exception as ex:
return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Error,
message=str(ex))
else:
return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Ok,
message="")
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
pirelay_pb2_grpc.add_PiRelayServicer_to_server(PiRelayServer(PINS), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
| lgpl-3.0 | -4,084,407,837,345,929,700 | 25.966102 | 79 | 0.574481 | false | 3.649083 | false | false | false |
tensorflow/model-optimization | tensorflow_model_optimization/g3doc/tools/build_docs.py | 1 | 3663 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tool to generate open source api_docs for tensorflow_model_optimization.
To use:
1. Install the tensorflow docs package, which is only compatible with Python
python3 -m pip install git+https://github.com/tensorflow/docs
2. Install TensorFlow Model Optimization. The API docs are generated from
`tfmot` from the import of the tfmot package below, based on what is exposed
under
https://github.com/tensorflow/model-optimization/tree/master/tensorflow_model_optimization/python/core/api.
See https://www.tensorflow.org/model_optimization/guide/install.
3. Run build_docs.py.
python3 build_docs.py --output_dir=/tmp/model_optimization_api
4. View the generated markdown files on a viewer. One option is to fork
https://github.com/tensorflow/model-optimization/, push a change that
copies the files to tensorflow_model_optimization/g3doc, and then
view the files on Github.
Note:
If duplicate or spurious docs are generated (e.g. internal names), consider
blacklisting them via the `private_map` argument below.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from tensorflow_docs.api_generator import generate_lib
import tensorflow_model_optimization as tfmot
flags.DEFINE_string("output_dir", "/tmp/model_optimization_api",
"Where to output the docs")
flags.DEFINE_string(
"code_url_prefix",
("https://github.com/tensorflow/model-optimization/blob/master/"
"tensorflow_model_optimization"),
"The url prefix for links to code.")
flags.DEFINE_bool("search_hints", True,
"Include metadata search hints in the generated files")
flags.DEFINE_string("site_path", "model_optimization/api_docs/python",
"Path prefix in the _toc.yaml")
FLAGS = flags.FLAGS
def main(unused_argv):
doc_generator = generate_lib.DocGenerator(
root_title="TensorFlow Model Optimization",
py_modules=[("tfmot", tfmot)],
base_dir=os.path.dirname(tfmot.__file__),
code_url_prefix=FLAGS.code_url_prefix,
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
# TODO(tfmot): remove this once the next release after 0.3.0 happens.
# This is needed in the interim because the API docs reflect
# the latest release and the current release still wildcard imports
# all of the classes below.
private_map={
"tfmot.sparsity.keras": [
# List of internal classes which get exposed when imported.
"InputLayer",
"custom_object_scope",
"pruning_sched",
"pruning_wrapper",
"absolute_import",
"division",
"print_function",
"compat"
]
},
)
doc_generator.build(output_dir=FLAGS.output_dir)
if __name__ == "__main__":
app.run(main)
| apache-2.0 | 1,166,959,432,661,376,000 | 33.556604 | 110 | 0.677041 | false | 4.097315 | false | false | false |
owlabs/incubator-airflow | airflow/executors/__init__.py | 1 | 3891 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.base_executor import BaseExecutor # noqa
from airflow.executors.local_executor import LocalExecutor
from airflow.executors.sequential_executor import SequentialExecutor
DEFAULT_EXECUTOR = None
def _integrate_plugins():
"""Integrate plugins to the context."""
from airflow.plugins_manager import executors_modules
for executors_module in executors_modules:
sys.modules[executors_module.__name__] = executors_module
globals()[executors_module._name] = executors_module
def get_default_executor():
"""Creates a new instance of the configured executor if none exists and returns it"""
global DEFAULT_EXECUTOR
if DEFAULT_EXECUTOR is not None:
return DEFAULT_EXECUTOR
executor_name = conf.get('core', 'EXECUTOR')
DEFAULT_EXECUTOR = _get_executor(executor_name)
log = LoggingMixin().log
log.info("Using executor %s", executor_name)
return DEFAULT_EXECUTOR
class Executors:
LocalExecutor = "LocalExecutor"
SequentialExecutor = "SequentialExecutor"
CeleryExecutor = "CeleryExecutor"
DaskExecutor = "DaskExecutor"
MesosExecutor = "MesosExecutor"
KubernetesExecutor = "KubernetesExecutor"
DebugExecutor = "DebugExecutor"
def _get_executor(executor_name):
"""
Creates a new instance of the named executor.
In case the executor name is not know in airflow,
look for it in the plugins
"""
if executor_name == Executors.LocalExecutor:
return LocalExecutor()
elif executor_name == Executors.SequentialExecutor:
return SequentialExecutor()
elif executor_name == Executors.CeleryExecutor:
from airflow.executors.celery_executor import CeleryExecutor
return CeleryExecutor()
elif executor_name == Executors.DaskExecutor:
from airflow.executors.dask_executor import DaskExecutor
return DaskExecutor()
elif executor_name == Executors.MesosExecutor:
from airflow.contrib.executors.mesos_executor import MesosExecutor
return MesosExecutor()
elif executor_name == Executors.KubernetesExecutor:
from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor
return KubernetesExecutor()
elif executor_name == Executors.DebugExecutor:
from airflow.executors.debug_executor import DebugExecutor
return DebugExecutor()
else:
# Loading plugins
_integrate_plugins()
executor_path = executor_name.split('.')
if len(executor_path) != 2:
raise AirflowException(
"Executor {0} not supported: "
"please specify in format plugin_module.executor".format(executor_name))
if executor_path[0] in globals():
return globals()[executor_path[0]].__dict__[executor_path[1]]()
else:
raise AirflowException("Executor {0} not supported.".format(executor_name))
| apache-2.0 | -8,116,916,838,794,192,000 | 36.776699 | 89 | 0.72038 | false | 4.347486 | false | false | false |
suutari/shoop | shuup/notify/template.py | 1 | 3011 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.utils.encoding import force_text
from jinja2.sandbox import SandboxedEnvironment
class NoLanguageMatches(Exception):
pass
def render_in_context(context, template_text, html_intent=False):
"""
Render the given Jinja2 template text in the script context.
:param context: Script context.
:type context: shuup.notify.script.Context
:param template_text: Jinja2 template text.
:type template_text: str
:param html_intent: Is the template text intended for HTML output?
This currently turns on autoescaping.
:type html_intent: bool
:return: Rendered template text
:rtype: str
:raises: Whatever Jinja2 might happen to raise
"""
# TODO: Add some filters/globals into this environment?
env = SandboxedEnvironment(autoescape=html_intent)
template = env.from_string(template_text)
return template.render(context.get_variables())
class Template(object):
def __init__(self, context, data):
"""
:param context: Script context
:type context: shuup.notify.script.Context
:param data: Template data dictionary
:type data: dict
"""
self.context = context
self.data = data
def _get_language_data(self, language):
return self.data.get(force_text(language).lower(), {})
def has_language(self, language, fields):
data = self._get_language_data(language)
return set(data.keys()) >= set(fields)
def render(self, language, fields):
"""
Render this template in the given language,
returning the given fields.
:param language: Language code (ISO 639-1 or ISO 639-2)
:type language: str
:param fields: Desired fields to render.
:type fields: list[str]
:return: Dict of field -> rendered content.
:rtype: dict[str, str]
"""
data = self._get_language_data(language)
rendered = {}
for field in fields:
field_template = data.get(field)
if field_template: # pragma: no branch
rendered[field] = render_in_context(self.context, field_template, html_intent=False)
return rendered
def render_first_match(self, language_preferences, fields):
# TODO: Document
for language in language_preferences:
if self.has_language(language, fields):
rendered = self.render(language=language, fields=fields)
rendered["_language"] = language
return rendered
raise NoLanguageMatches("No language in template matches any of languages %r for fields %r" % (
language_preferences, fields
))
| agpl-3.0 | -5,120,846,759,464,826,000 | 32.831461 | 103 | 0.645965 | false | 4.23488 | false | false | false |
danic96/Practica1 | Practica1/Aplicacio/views.py | 1 | 4321 | # from django.shortcuts import render
# Create your views here.
# from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic import DetailView, DeleteView
from rest_framework import generics
from models import Movie, Character, Team, Power, Location
from forms import MovieForm, CharacterForm, TeamForm, PowerForm, LocationForm
from Practica1.serializers import MovieSerializer
# Security Mixins
class LoginRequiredMixin(object):
@method_decorator(login_required())
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class CheckIsOwnerMixin(object):
def get_object(self, *args, **kwargs):
obj = super(CheckIsOwnerMixin, self).get_object(*args, **kwargs)
if not obj.user == self.request.user:
raise PermissionDenied
return obj
class LoginRequiredCheckIsOwnerUpdateView(LoginRequiredMixin, CheckIsOwnerMixin, UpdateView):
template_name = 'Aplicacio/form.html'
class MovieCreate(LoginRequiredMixin, CreateView):
model = Movie
template_name = 'Aplicacio/form.html'
form_class = MovieForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(MovieCreate, self).form_valid(form)
class CharacterCreate(LoginRequiredMixin, CreateView):
model = Character
template_name = 'Aplicacio/form.html'
form_class = CharacterForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(CharacterCreate, self).form_valid(form)
class TeamCreate(LoginRequiredMixin, CreateView):
model = Team
template_name = 'Aplicacio/form.html'
form_class = TeamForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(TeamCreate, self).form_valid(form)
class PowerCreate(LoginRequiredMixin, CreateView):
model = Power
template_name = 'Aplicacio/form.html'
form_class = PowerForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(PowerCreate, self).form_valid(form)
class LocationCreate(LoginRequiredMixin, CreateView):
model = Location
template_name = 'Aplicacio/form.html'
form_class = LocationForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(LocationCreate, self).form_valid(form)
"""
class LocationDelete(LoginRequiredMixin, CreateView):
model = Location
template_name = 'Aplicacio/form.html'
form_class = LocationForm
def form_valid(self, form):
form.instance.user = self.request.user
return super(LocationDelete, self).form_valid(form)
"""
"""
class Delete(DeleteView):
model = Location
success_url = reverse_lazy('all_locations') # This is where this view will
# redirect the user
template_name = 'Aplicacio/delete_location.html'
"""
class MovieDetail(DetailView):
model = Movie
template_name = 'Aplicacio/movie_detail.html'
"""
def get_context_data(self, **kwargs):
context = super(MovieDetail, self).get_context_data(**kwargs)
context['RATING_CHOICES'] = RestaurantReview.RATING_CHOICES
return context
"""
class CharacterDetail(DetailView):
model = Character
template_name = 'Aplicacio/character_detail.html'
class TeamDetail(DetailView):
model = Team
template_name = 'Aplicacio/team_detail.html'
class PowerDetail(DetailView):
model = Power
template_name = 'Aplicacio/power_detail.html'
class LocationDetail(DetailView):
model = Location
template_name = 'Aplicacio/location_detail.html'
def form_valid(self, form):
form.instance.user = self.request.user
return super(CharacterCreate, self).form_valid(form)
### RESTful API views ###
class APIMovieList(generics.ListCreateAPIView):
model = Movie
queryset = Movie.objects.all()
serializer_class = MovieSerializer
class APIMovieDetail(generics.RetrieveUpdateDestroyAPIView):
model = Movie
queryset = Movie.objects.all()
serializer_class = MovieSerializer
| mit | 6,965,548,810,274,474,000 | 27.058442 | 93 | 0.707938 | false | 3.889289 | false | false | false |
CorundumGames/Invasodado | game/ufo.py | 1 | 3605 | from math import sin
from random import choice, uniform, expovariate
from pygame import Rect
from core import color
from core import config
from core.particles import ParticleEmitter
from game.block import get_block
from game.gameobject import GameObject
from game import gamedata
### Constants ##################################################################
AVG_WAIT = 9000 #Expected time in frames between UFO appearance
DEATH = config.load_sound('ufo_explosion.wav')
FRAMES = tuple(
Rect(64 * (i % 4), 192 + 32 * (i // 4), 64, 32)
for i in range(10, -1, -1)
)
INVADE = config.load_sound('ufo.wav')
START_POS = (640, 16)
UFO_FRAMES = color.get_colored_objects(FRAMES)
UFO_STATES = ('IDLE', 'APPEARING', 'ACTIVE', 'DYING', 'LEAVING', 'LOWERING', 'GAMEOVER')
################################################################################
class UFO(GameObject):
STATES = config.Enum(*UFO_STATES)
GROUP = None
BLOCK_GROUP = None
def __init__(self):
super().__init__()
self._anim = 0.0
self.column = None
self.current_frame_list = UFO_FRAMES
self.image = config.get_sprite(FRAMES[0])
self.odds = expovariate(AVG_WAIT)
self.position = list(START_POS)
self.rect = Rect(START_POS, self.image.get_size())
self.state = UFO.STATES.IDLE
self.emitter = ParticleEmitter(color.random_color_particles, self.rect)
del self.acceleration
def appear(self):
'''
Appear on-screen, but not for very long!
'''
INVADE.play(-1)
self.position = list(START_POS)
self.rect.topleft = list(START_POS)
self.change_state(UFO.STATES.ACTIVE)
self.velocity[0] = -2.0
def move(self):
'''
Move left on the screen, and oscillate up and down.
'''
position = self.position
rect = self.rect
self._anim += 0.5
self.image = UFO_FRAMES[id(choice(color.LIST)) ] \
[int(self._anim) % len(FRAMES)]
position[0] += self.velocity[0]
position[1] += sin(self._anim/4)
rect.topleft = (position[0] + .5, position[1] + .5)
if rect.right < 0:
#If we've gone past the left edge of the screen...
self.change_state(UFO.STATES.LEAVING)
def die(self):
'''
Vanish and release a special Block that clears lots of other Blocks.
'''
self.emitter.rect = self.rect
self.emitter.burst(30)
DEATH.play()
UFO.BLOCK_GROUP.add(get_block((self.rect.centerx, 0), special=True))
gamedata.score += 90
self.change_state(UFO.STATES.LEAVING)
def leave(self):
INVADE.stop()
self.velocity[0] = 0
self.position = list(START_POS)
self.rect.topleft = START_POS
self.change_state(UFO.STATES.IDLE)
def wait(self):
'''
Wait off-screen, and only come back with a specific probability.
'''
if uniform(0, 1) < self.odds:
#With a certain probability...
self.odds = expovariate(AVG_WAIT)
self.change_state(UFO.STATES.APPEARING)
actions = {
STATES.IDLE : 'wait' ,
STATES.APPEARING: 'appear',
STATES.ACTIVE : 'move' ,
STATES.DYING : 'die' ,
STATES.LEAVING : 'leave' ,
STATES.GAMEOVER : None ,
} | gpl-3.0 | -1,058,903,097,034,391,700 | 32.700935 | 88 | 0.530929 | false | 3.583499 | false | false | false |
parksandwildlife/wastd | occurrence/migrations/0006_auto_20181129_1812.py | 1 | 1084 | # Generated by Django 2.0.8 on 2018-11-29 10:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('occurrence', '0005_auto_20181025_1720'),
]
operations = [
migrations.AlterField(
model_name='areaencounter',
name='source',
field=models.PositiveIntegerField(choices=[(0, 'Direct entry'), (1, 'Manual entry from paper datasheet'), (2, 'Digital data capture (ODK)'), (10, 'Threatened Fauna'), (11, 'Threatened Flora'), (12, 'Threatened Communities'), (13, 'Threatened Communities Boundaries'), (14, 'Threatened Communities Buffers'), (15, 'Threatened Communities Sites'), (20, 'Turtle Tagging Database WAMTRAM2'), (21, 'Ningaloo Turtle Program'), (22, 'Broome Turtle Program'), (23, 'Pt Hedland Turtle Program'), (24, 'Gnaraloo Turtle Program'), (25, 'Eco Beach Turtle Program'), (30, 'Cetacean Strandings Database'), (31, 'Pinniped Strandings Database')], default=0, help_text='Where was this record captured initially?', verbose_name='Data Source'),
),
]
| mit | -6,071,923,042,693,717,000 | 59.222222 | 738 | 0.671587 | false | 3.519481 | false | false | false |
csdms/dakota | dakotathon/tests/test_plugin_hydrotrend_run.py | 1 | 3466 | #!/usr/bin/env python
#
# Test running the dakota.plugin.hydrotrend module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper ([email protected])
import os
import shutil
# import filecmp
import glob
from nose.tools import with_setup, assert_true
from dakotathon.dakota import Dakota
from dakotathon.plugins.hydrotrend import is_installed as is_hydrotrend_installed
from dakotathon.utils import is_dakota_installed
from . import start_dir, data_dir
# Global variables -----------------------------------------------------
run_dir = os.getcwd()
config_file = os.path.join(run_dir, "dakota.yaml")
known_config_file = os.path.join(data_dir, "dakota.yaml")
# known_dat_file = os.path.join(data_dir, 'dakota.dat')
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print("\n*** " + __name__)
def setup():
"""Called at start of any test using it @with_setup()"""
pass
def teardown():
"""Called at end of any test using it @with_setup()"""
if os.path.exists(config_file):
os.remove(config_file)
if os.path.exists("dakota.in"):
os.remove("dakota.in")
if os.path.exists("run.log"):
os.remove("run.log")
if os.path.exists("stderr.log"):
os.remove("stderr.log")
if is_hydrotrend_installed():
for dname in glob.glob("HYDRO_*"):
shutil.rmtree(dname)
if is_dakota_installed():
for dname in glob.glob("run.*"):
shutil.rmtree(dname)
for fname in ["dakota." + ext for ext in ["dat", "out", "rst"]]:
if os.path.exists(fname):
os.remove(fname)
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
@with_setup(setup, teardown)
def test_run_by_setting_attributes():
"""Test running a HydroTrend simulation."""
d = Dakota(method="vector_parameter_study", plugin="hydrotrend")
d.template_file = os.path.join(data_dir, "HYDRO.IN.dtmpl")
d.auxiliary_files = os.path.join(data_dir, "HYDRO0.HYPS")
d.variables.descriptors = [
"starting_mean_annual_temperature",
"total_annual_precipitation",
]
d.variables.initial_point = [10.0, 1.5]
d.method.final_point = [20.0, 2.5]
d.method.n_steps = 5
d.responses.response_descriptors = ["Qs_median", "Q_mean"]
d.responses.response_files = ["HYDROASCII.QS", "HYDROASCII.Q"]
d.responses.response_statistics = ["median", "mean"]
d.setup()
assert_true(os.path.exists(d.input_file))
if is_dakota_installed() and is_hydrotrend_installed():
d.run()
assert_true(os.path.exists(d.output_file))
# assert_true(filecmp.cmp(known_dat_file, d.environment.data_file))
@with_setup(setup, teardown)
def test_run_from_config_file():
"""Test running a HydroTrend simulation from a config file."""
d = Dakota.from_file_like(known_config_file)
d.run_directory = run_dir
d.template_file = os.path.join(data_dir, "HYDRO.IN.dtmpl")
d.auxiliary_files = os.path.join(data_dir, "HYDRO0.HYPS")
d.serialize(config_file)
d.write_input_file()
assert_true(os.path.exists(d.input_file))
if is_dakota_installed() and is_hydrotrend_installed():
d.run()
assert_true(os.path.exists(d.output_file))
# assert_true(filecmp.cmp(known_dat_file, d.environment.data_file))
| mit | -8,400,592,787,912,912,000 | 31.092593 | 81 | 0.617426 | false | 3.16819 | true | false | false |
kysolvik/reservoir-id | reservoir-id/classifier_train.py | 1 | 6974 | #!/usr/bin/env python
"""
Train random forest classifier
Inputs: CSV from build_att_table, small area cutoff
Outputs: Packaged up Random Forest model
@authors: Kylen Solvik
Date Create: 3/17/17
"""
# Load libraries
import pandas as pd
from sklearn import model_selection
from sklearn import preprocessing
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import *
import numpy as np
import sys
import argparse
import os
import xgboost as xgb
# Parse arguments
parser = argparse.ArgumentParser(description='Train Random Forest classifier.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('prop_csv',
help='Path to attribute table (from build_att_table.py).',
type=str)
parser.add_argument('xgb_pkl',
help='Path to save random forest model as .pkl.',
type=str)
parser.add_argument('--area_lowbound',
help='Lower area bound. All regions <= in size will be ignored',
default=2,
type=int)
parser.add_argument('--path_prefix',
help='To be placed at beginnings of all other path args',
type=str,default='')
args = parser.parse_args()
def select_training_obs(full_csv_path):
"""Takes full csv and selects only the training observations.
Writes out to csv for further use"""
training_csv_path = full_csv_path.replace('.csv','_trainonly.csv')
if not os.path.isfile(training_csv_path):
dataset = pd.read_csv(full_csv_path,header=0)
training_dataset = dataset.loc[dataset['class'] > 0]
training_dataset.to_csv(training_csv_path,header=True,index=False)
return(training_csv_path)
def main():
# Set any attributes to exclude for this run
exclude_att_patterns = []
# Load dataset
training_csv = select_training_obs(args.path_prefix + args.prop_csv)
dataset = pd.read_csv(training_csv,header=0)
dataset_acut = dataset.loc[dataset['area'] > args.area_lowbound]
# Exclude attributes matching user input patterns, or if they are all nans
exclude_atts = []
for pattern in exclude_att_patterns:
col_list = [col for col in dataset_acut.columns if pattern in col]
exclude_atts.extend(col_list)
for att in dataset.columns[1:]:
if sum(np.isfinite(dataset[att])) == 0:
exclude_atts.append(att)
for att in list(set(exclude_atts)):
del dataset_acut[att]
(ds_y,ds_x) = dataset_acut.shape
print(ds_y,ds_x)
# Convert dataset to array
feature_names = dataset_acut.columns[2:]
array = dataset_acut.values
X = array[:,2:ds_x].astype(float)
Y = array[:,1].astype(int)
Y = Y-1 # Convert from 1s and 2s to 0-1
# Set nans to 0
X = np.nan_to_num(X)
# Separate test data
test_size = 0.2
seed = 5
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(
X, Y, test_size=test_size,
random_state=seed)
# Convert data to xgboost matrices
d_train = xgb.DMatrix(X_train,label=Y_train)
# d_test = xgb.DMatrix(X_test,label=Y_test)
#----------------------------------------------------------------------
# Paramater tuning
# Step 1: Find approximate n_estimators to use
early_stop_rounds = 40
n_folds = 5
xgb_model = xgb.XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
seed=27)
xgb_params = xgb_model.get_xgb_params()
cvresult = xgb.cv(xgb_params, d_train,
num_boost_round=xgb_params['n_estimators'], nfold=n_folds,
metrics='auc', early_stopping_rounds=early_stop_rounds,
)
n_est_best = (cvresult.shape[0] - early_stop_rounds)
print('Best number of rounds = {}'.format(n_est_best))
# Step 2: Tune hyperparameters
xgb_model = xgb.XGBClassifier()
params = {'max_depth': range(5,10,2),
'learning_rate': [0.1],
'gamma':[0,0.5,1],
'silent': [1],
'objective': ['binary:logistic'],
'n_estimators' : [n_est_best],
'subsample' : [0.7, 0.8,1],
'min_child_weight' : range(1,4,2),
'colsample_bytree':[0.7,0.8,1],
}
clf = GridSearchCV(xgb_model,params,n_jobs = 1,
cv = StratifiedKFold(Y_train,
n_folds=5, shuffle=True),
scoring = 'roc_auc',
verbose = 2,
refit = True)
clf.fit(X_train,Y_train)
best_parameters,score,_ = max(clf.grid_scores_,key=lambda x: x[1])
print('Raw AUC score:',score)
for param_name in sorted(best_parameters.keys()):
print("%s: %r" % (param_name, best_parameters[param_name]))
# Step 3: Decrease learning rate and up the # of trees
#xgb_finalcv = XGBClassifier()
tuned_params = clf.best_params_
tuned_params['n_estimators'] = 10000
tuned_params['learning_rate'] = 0.01
cvresult = xgb.cv(tuned_params, d_train,
num_boost_round=tuned_params['n_estimators'], nfold=n_folds,
metrics='auc', early_stopping_rounds=early_stop_rounds,
)
# Train model with cv results and predict on test set For test accuracy
n_est_final = int((cvresult.shape[0] - early_stop_rounds) / (1 - 1 / n_folds))
tuned_params['n_estimators'] = n_est_final
print(tuned_params)
xgb_train = xgb.XGBClassifier()
xgb_train.set_params(**tuned_params)
xgb_train.fit(X_train,Y_train)
bst_preds = xgb_train.predict(X_test)
print("Xgboost Test acc = " + str(accuracy_score(Y_test, bst_preds)))
print(confusion_matrix(Y_test, bst_preds))
print(classification_report(Y_test, bst_preds))
# Export cv classifier
joblib.dump(cvresult, args.path_prefix + args.xgb_pkl + 'cv')
# Export classifier trained on full data set
xgb_full = xgb.XGBClassifier()
xgb_full.set_params(**tuned_params)
xgb_full.fit(X,Y)
joblib.dump(xgb_full, args.path_prefix + args.xgb_pkl)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,813,554,001,576,761,000 | 37.10929 | 88 | 0.57571 | false | 3.680211 | true | false | false |
ambitioninc/django-user-guide | user_guide/templatetags/user_guide_tags.py | 1 | 2767 | """
Template tag for displaying user guides.
"""
import re
from django import template
from django.conf import settings
from django.template import loader
from django.template.defaulttags import CsrfTokenNode
from user_guide.models import GuideInfo
register = template.Library()
# The maximum number of guides to show per page
USER_GUIDE_SHOW_MAX = getattr(settings, 'USER_GUIDE_SHOW_MAX', 10)
# Use cookies to determine if guides should be shown
USER_GUIDE_USE_COOKIES = getattr(settings, 'USER_GUIDE_USE_COOKIES', False)
# The url to any custom CSS
USER_GUIDE_CSS_URL = getattr(
settings,
'USER_GUIDE_CSS_URL',
None
)
# The url to any custom JS
USER_GUIDE_JS_URL = getattr(
settings,
'USER_GUIDE_JS_URL',
None
)
@register.simple_tag(takes_context=True)
def user_guide(context, *args, **kwargs):
"""
Creates html items for all appropriate user guides.
Kwargs:
guide_name: A string name of a specific guide.
guide_tags: An array of string guide tags.
limit: An integer maxmimum number of guides to show at a single time.
Returns:
An html string containing the user guide scaffolding and any guide html.
"""
user = context['request'].user if 'request' in context and hasattr(context['request'], 'user') else None
if user and user.is_authenticated(): # No one is logged in
limit = kwargs.get('limit', USER_GUIDE_SHOW_MAX)
filters = {
'user': user,
'is_finished': False
}
# Handle special filters
if kwargs.get('guide_name'):
filters['guide__guide_name'] = kwargs.get('guide_name')
if kwargs.get('guide_tags'):
filters['guide__guide_tag__in'] = kwargs.get('guide_tags')
# Set the html
html = ''.join((
'<div data-guide="{0}" class="django-user-guide-item">{1}</div>'.format(
guide_info.id,
guide_info.guide.html
) for guide_info in GuideInfo.objects.select_related('guide').filter(**filters).only('guide')[:limit]
))
# Return the rendered template with the guide html
return loader.render_to_string('user_guide/window.html', {
'html': re.sub(r'\{\s*static\s*\}', settings.STATIC_URL, html),
'css_href': '{0}user_guide/build/django-user-guide.css'.format(settings.STATIC_URL),
'js_src': '{0}user_guide/build/django-user-guide.js'.format(settings.STATIC_URL),
'custom_css_href': USER_GUIDE_CSS_URL,
'custom_js_src': USER_GUIDE_JS_URL,
'use_cookies': str(USER_GUIDE_USE_COOKIES).lower(),
'csrf_node': CsrfTokenNode().render(context)
})
else:
return ''
| mit | -4,151,106,416,245,228,000 | 31.940476 | 113 | 0.62378 | false | 3.650396 | false | false | false |
praekelt/django-ultracache | bin/cache-purge-consumer.py | 1 | 3973 | """Subscribe to RabbitMQ and listen for purge instructions continuously. Manage
this script through eg. supervisor."""
import json
import traceback
from multiprocessing.pool import ThreadPool
from optparse import OptionParser
from time import sleep
import pika
import requests
import yaml
class Consumer:
channel = None
connection = None
def __init__(self):
self.pool = ThreadPool()
parser = OptionParser()
parser.add_option("-c", "--config", dest="config",
help="Configuration file", metavar="FILE")
(options, args) = parser.parse_args()
config_file = options.config
self.config = {}
if config_file:
self.config = yaml.load(open(config_file)) or {}
def log(self, msg):
name = self.config.get("logfile", None)
if not name:
return
if name == "stdout":
print(msg)
return
fp = open(name, "a")
try:
fp.write(msg + "\n")
finally:
fp.close()
def connect(self):
parameters = pika.URLParameters(
self.config.get(
"rabbit-url",
"amqp://guest:[email protected]:5672/%2F"
)
)
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.exchange_declare(
exchange="purgatory", exchange_type="fanout"
)
queue = self.channel.queue_declare(exclusive=True)
queue_name = queue.method.queue
self.channel.queue_bind(exchange="purgatory", queue=queue_name)
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(
self.on_message, queue=queue_name, no_ack=False, exclusive=True
)
def on_message(self, channel, method_frame, header_frame, body):
self.pool.apply_async(self.handle_message, (body,))
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
def handle_message(self, body):
if body:
try:
di = json.loads(body)
except ValueError:
path = body
headers = {}
else:
path = di["path"]
headers = di["headers"]
self.log("Purging %s with headers %s" % (path, str(headers)))
host = self.config.get("host", None)
try:
if host:
final_headers = {"Host": host}
final_headers.update(headers)
response = requests.request(
"PURGE", "http://" \
+ self.config.get("proxy-address", "127.0.0.1") + path,
headers=final_headers,
timeout=10
)
else:
response = requests.request(
"PURGE", "http://" \
+ self.config.get("proxy-address", "127.0.0.1") + path,
timeout=10,
headers=headers
)
except Exception as exception:
msg = traceback.format_exc()
self.log("Error purging %s: %s" % (path, msg))
else:
content = response.content
def consume(self):
loop = True
while loop:
try:
if self.channel is None:
raise pika.exceptions.ConnectionClosed()
self.channel.start_consuming()
except KeyboardInterrupt:
loop = False
self.channel.stop_consuming()
except pika.exceptions.ConnectionClosed:
try:
self.connect()
except pika.exceptions.ConnectionClosed:
sleep(1)
self.connection.close()
consumer = Consumer()
consumer.consume()
| bsd-3-clause | -856,625,678,671,990,400 | 31.300813 | 83 | 0.511956 | false | 4.489266 | true | false | false |
myriasofo/CLRS_exercises | algos/testSuite.py | 1 | 6292 | ''' WHAT: Simple test framework for checking algorithms
TASK:
*Handle output that's an object, eg. bst that gets modified
*option3: optional param - Class (accept input/output as arrays and TURN INTO object)
(option1: optional param - comparison function (instead of simple "!=")
(option2: optional param - Class (automatically deconstruct objects in arrays)
'''
import copy
def init(*args, **kwargs):
return TestSuite(*args, **kwargs)
class TestSuite:
def __init__(self, tests, dataStructures=None):
self.tests = tests
self.converter = DataStructureConverter(dataStructures) if dataStructures is not None else None
def test(self, function):
print('FUNCTION: {}'.format(function.__name__))
tests = copy.deepcopy(self.tests)
for i, test in enumerate(tests):
params, expected = test
try:
actual = self.runFunction(function, params)
if actual != expected:
self.printError(i+1, params, expected, actual)
return
except Exception as error:
self.printError(i+1, params, expected, 'ERROR')
raise error
def printError(self, iteration, params, expected, actual):
print()
print('ERROR: Iteration {}'.format(iteration))
print()
stringifiedParams = ', '.join([str(param) for param in params])
print('input: {}'.format(stringifiedParams))
print('ouptut expected: {}'.format(expected))
print('output actual: {}'.format(actual))
print()
def runFunction(self, function, params):
if self.converter is not None:
params = self.converter.convertInput(params)
params = copy.deepcopy(params)
actual = function(*params)
if self.converter is not None:
actual = self.converter.convertOuptut(actual)
return actual
class DataStructureConverter:
def __init__(self, config):
self.config = config
self.arrayToDs = {
'SinglyLinkedList': self.createSinglyLinkedList,
#'DoublyLinkedList': createSinglyLinkedList,
#'BinaryTree': createBinaryTree,
#'Queue': createQueue,
#'Stack': createStack,
}
self.dsToArray = {
'SinglyLinkedList': self.createArrayFromSinglyLinkedList,
#'DoublyLinkedList': createSinglyLinkedList,
#'BinaryTree': createBinaryTree,
#'Queue': createQueue,
#'Stack': createStack,
}
def convertInput(self, params):
if isinstance(self.config, str):
converted = []
for param in params:
ds = self.convertArrayToDs(param, self.config)
converted.append(ds)
return converted
elif isinstance(self.config, dict):
converted = []
for param, dsName in zip(params, self.config['input']):
if not isinstance(dsName, str):
converted.append(param)
else:
ds = self.convertArrayToDs(param, dsName)
converted.append(ds)
return converted
else:
raise Exception('ERROR: This is not the right format for dataStructure: {}'.format(self.config))
def convertOuptut(self, output):
if isinstance(self.config, str):
return self.convertDsToArray(output, self.config)
elif isinstance(self.config, dict):
return self.convertDsToArray(output, self.config['output'])
else:
raise Exception('ERROR: This is not the right format for dataStructure: {}'.format(self.ds))
def convertArrayToDs(self, array, dsName):
if dsName not in self.arrayToDs:
raise Exception('ERROR: Name of dataStructure not supported: {}'.format(dsName))
dsConstructor = self.arrayToDs[dsName]
ds = dsConstructor(array)
return ds
def convertDsToArray(self, ds, dsName):
if dsName not in self.dsToArray:
raise Exception('ERROR: Name of dataStructure not supported: {}'.format(dsName))
arrayConstructor = self.dsToArray[dsName]
array = arrayConstructor(ds)
return array
class Node:
# spec determined by leetcode
def __init__(self, val):
self.val = val
self.next = None
def createSinglyLinkedList(self, array, storeInArray=False):
if storeInArray:
container = []
head = None
curr = None
for elem in array:
node = self.Node(elem)
if storeInArray:
container.append(node)
if head is None:
head = node
curr = node
continue
curr.next = node
curr = node
if storeInArray:
return container
return head
def createArrayFromSinglyLinkedList(self, head):
array = []
while head is not None:
array.append(head.val)
head = head.next
return array
# custom
def createIntersectingLinkedLists(self, nA, nB, nIntersection):
headA = self.createSinglyLinkedList(range(nA))
headB = self.createSinglyLinkedList(range(nA, nA+nB))
if nIntersection is None or nIntersection == 0:
return headA, headB, None
headI = self.createSinglyLinkedList(range(nA+nB, nA+nB+nIntersection))
if headA is None:
headA = headI
else:
self.getEndofList(headA).next = headI
if headB is None:
headB = headI
else:
self.getEndofList(headB).next = headI
return headA, headB, headI
def getEndofList(self, head):
while head is not None and head.next is not None:
head = head.next
return head
### Example usage
def main():
import sys
sys.path.append('/Users/Abe/my/codingPractice/algos')
import testSuite
tests = [
([[3,2,1,5,6,4], 2], 5),
([[3,2,3,1,2,4,5,5,6], 4], 4)
]
t = testSuite.init(tests)
t.test(v1)
t.test(v2)
t.test(v3)
| mit | -4,231,904,569,753,463,300 | 30.148515 | 108 | 0.578512 | false | 4.205882 | true | false | false |
tis-intern-apparel/ApparelStrategy | server/dialogue_system/module/database.py | 1 | 5633 | # -*- coding: utf-8 -*-
import os
import codecs
class Personal:
point_id = ''
user_name = ''
user_pronoun = ''
sex = ''
phone = ''
email = ''
address = ''
class Cloth:
cloth_name = ''
color_code = ''
small_type = ''
price = ''
image_url = ''
big_type = ''
cloth_code = ''
cloth_describe = ''
class Evaluate:
clothes = []
osyaredo = 0
class DataBaseManager:
def __init__(self,data_dir):
self.data_dir = data_dir
self.clothes_path = os.path.join(data_dir,'clothes.csv')
self.evaluate_path = os.path.join(data_dir,'evaluate.csv')
self.personal_path = os.path.join(data_dir,'personal.csv')
def __split_csvline(self,line):
return line.replace('\n','').replace('"','').split(',')
def __struct_personal(self,line):
cols = self.__split_csvline(line)
personal = Personal()
personal.point_id = cols[0]
personal.user_name = cols[1]
personal.user_pronoun = cols[2]
personal.sex = cols[3]
personal.phone = cols[4]
personal.email = cols[5]
personal.address = cols[6]
personal.age = cols[7]
return personal
def __struct_cloth(self,line):
cols = self.__split_csvline(line)
cloth = Cloth()
cloth.cloth_name = cols[0]
cloth.color_code = cols[1]
cloth.small_type = cols[2]
cloth.price = cols[3]
cloth.image_url = cols[4]
cloth.big_type = cols[5]
cloth.cloth_code = cols[6]
cloth.cloth_describe = cols[7]
return cloth
def __struct_evaluate(self,line):
cols = self.__split_csvline(line)
osyare = Evaluate()
osyare.clothes = []
for c in cols:
if c == 'null':
break
else:
osyare.clothes.append(c)
osyare.osyaredo = cols[3]
return osyare
def get_personal_from_id(self,point_id):
"""
read personal data from point id
:param point_id: search point id
:return: personal object
"""
with codecs.open(self.personal_path,'r','utf-8') as f:
for line in f:
personal = self.__struct_personal(line)
if personal.point_id == point_id:
return personal
return None
def get_clothes_from_code(self, cloth_code):
"""
read cloth data from cloth_code
:param cloth_code: cloth code for searching
:return: cloth object
"""
with codecs.open(self.clothes_path, 'r', 'utf-8') as f:
for line in f:
cloth = self.__struct_cloth(line)
if cloth.cloth_code == cloth_code:
return cloth
return None
def get_evaluate_from_code(self, cloth_code):
"""
read evaluate(osyaredo) from cloth code
:param cloth_code: cloth code for searching evaluate
:return: evaluate object list
"""
result = []
with codecs.open(self.evaluate_path, 'r', 'utf-8') as f:
for line in f:
ev = self.__struct_evaluate(line)
if ev.clothes.count(cloth_code) > 0:
result.append(ev)
if len(result) > 0:
return result
else:
return None
def get_evaluate_from_codelist(self, cloth_codelist):
"""
read evaluate(osyaredo) from cloth code
:param cloth_code: cloth code for searching evaluate
:return: evaluate object list
"""
result = []
with codecs.open(self.evaluate_path, 'r', 'utf-8') as f:
for line in f:
ev = self.__struct_evaluate(line)
isContain = True
for cloth in cloth_codelist:
if not cloth.cloth_code in ev.clothes:
isContain = False
break
if isContain:
result.append(ev)
if len(result) > 0:
return result
else:
return None
def get_clothes_from_name(self, contains_name):
"""
read cloth data from keyword that contains cloth name
:param contains_name: key contains cloth name
:return: cloth object list
"""
result = []
with codecs.open(self.clothes_path, 'r', 'utf-8') as f:
for line in f:
cloth = self.__struct_cloth(line)
if cloth.cloth_name.count(contains_name) > 0:
result.append(cloth)
if len(result) > 0:
return result
else:
return None
def get_clothes_from_keys(self, season,price = None):
"""
read cloth data from keyword that contains cloth name
:param contains_name: key contains cloth name
:return: cloth object list
"""
result = []
with codecs.open(self.clothes_path, 'r', 'utf-8') as f:
for line in f:
cloth = self.__struct_cloth(line)
if cloth.cloth_describe.count(season) > 0 or cloth.cloth_name.count(season) > 0:
result.append(cloth)
if len(result) > 0:
return result
else:
return None
if __name__ == '__main__':
script_dir = os.path.dirname(__file__)
data_path = os.path.join(script_dir,'../../data')
manager = DataBaseManager(data_path)
personal = manager.get_clothes_from_name('ズボン')
for p in personal:
print(p.cloth_name)
| mit | 3,080,065,227,675,096,000 | 28.005155 | 96 | 0.52568 | false | 3.709295 | false | false | false |
agoose77/hivesystem | tutorial/layers/layer17/layers.py | 1 | 2334 | from __future__ import print_function
# import the main and action components
from maincomponent import maincomponent
from action1 import action1component
from action2 import action2component
from action3 import action3component
#import manager components
from action3components import animationmanager
from action3components import soundmanager
#keyboard mainloop
from keycodes import ascii_to_keycode
from getch import getch, kbhit, change_termios, restore_termios
def mainloop(keyfunc=None):
change_termios()
while True:
while not kbhit(): continue
key = getch()
if isinstance(key, bytes) and bytes != str: key = key.decode()
if key not in ascii_to_keycode: continue
keycode = ascii_to_keycode[key]
if keycode == "ESCAPE": break
if keyfunc is not None: keyfunc(keycode)
restore_termios()
#define a generic pseudo-hive class
import libcontext
class pseudohive(object):
components = {}
def __init__(self):
for componentname, componentclass in self.components.items():
component = componentclass()
setattr(self, componentname, component)
def build(self, contextname):
self._contextname = contextname
self._context = libcontext.context(self._contextname)
def place(self):
libcontext.push(self._contextname)
for componentname, componentclass in self.components.items():
component = getattr(self, componentname)
component.place()
libcontext.pop()
def close(self):
self._context.close()
#define the main (pseudo-)hive
class mainhive(pseudohive):
components = {
#action3 manager components
"animationmanager": animationmanager,
"soundmanager": soundmanager,
#main component and action components
"maincomponent": maincomponent,
"action1": action1component,
"action2": action2component,
"action3": action3component,
}
#Set up the main hive and run it
#Give us a new mainhive instance
main = mainhive()
#Build a context named "main"
main.build("main")
#Declare sockets and plugins
main.place()
#Build all connections, and validate the connection network
main.close()
#Run the main loop
main.maincomponent.start()
mainloop(main.maincomponent.keypress)
| bsd-2-clause | 2,937,745,632,804,940,000 | 25.522727 | 70 | 0.696658 | false | 4.116402 | false | false | false |
DrSkippy/Gravitational-Three-Body-Symmetric | sim_pendulum.py | 1 | 1975 | #!/usr/bin/env python
import csv
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# arg 1 = w init
# arg 2 = n periods
# arg 3 = n ratio
# time step
dt = np.float64(0.00010)
# constants
L_0 = np.float64(1.0) # unstretched length
g = np.float64(9.81) # gravitation
n = np.float64(sys.argv[3])
K_over_M = (n*n - 1)*g/L_0
# initial conditions
theta = np.float64(0)
L = L_0 + g/K_over_M # equilibrium length with gravity
# 2mgl = 1/2 m l^2 w^2
w_sep = np.sqrt(4.*g/L)
w_0 = np.float64(sys.argv[1])
w = w_0
#
v_l_0 = 0
v_l = v_l_0
# periods
T_p = 2.*np.pi/np.sqrt(g/L)
T_k = 2.*np.pi/np.sqrt(K_over_M)
# record some stuff
print "Tp = {} T/dt = {}".format(T_p, T_p/dt)
print "Tk = {} T/dt = {}".format(T_k, T_k/dt)
print "Tk/Tp = {}".format(T_k/T_p)
print "w_esc = {}".format(w_sep)
t = np.float64(0.0)
theta_last = theta
# keep some records
data = []
t_s = []
theta += w*dt/2.
L += v_l*dt/2.
for i in range(int(sys.argv[2])*int(T_p/dt)):
w += -dt*g*np.sin(theta)/L
v_l += -K_over_M*(L-L_0) + g*np.cos(theta) + w*w*L
theta += w*dt
theta = np.fmod(theta, 2.*np.pi)
L += v_l*dt
t += dt
data.append([t, theta, w, L, v_l])
if theta_last < 0 and theta > 0:
t_s.append(t)
theta_last = theta
# periods by measure
t_s = [t_s[i] - t_s[i-1] for i in range(1,len(t_s)) ]
print "avg period = {} std periods = {}".format(np.average(t_s), np.std(t_s))
# plots
df = pd.DataFrame().from_records(data)
df.columns = ["t", "theta", "omega", "l", "v_l"]
df.set_index("t")
ax = df.plot(kind="scatter", x="theta", y="omega", marker=".")
fig = ax.get_figure()
fig.savefig("phase1.png")
ax = df.plot(kind="scatter", x="l", y="v_l", marker=".")
fig = ax.get_figure()
fig.savefig("phase2.png")
# config space
df["y_c"] = -df["l"]
df["x_c"] = df["l"] * np.sin(df["theta"])
ax = df.plot(kind="scatter", x="x_c", y="y_c", marker=".")
fig = ax.get_figure()
fig.savefig("config.png")
| cc0-1.0 | 3,132,915,243,086,640,600 | 20.944444 | 77 | 0.578734 | false | 2.214126 | false | false | false |
metpy/SHARPpy | sharppy/version.py | 1 | 1931 | import os.path
import subprocess
release = False
__version__ = '0.2'
_repository_path = os.path.split(__file__)[0]
_git_file_path = os.path.join(_repository_path, '__git_version__.py')
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
def get_git_hash():
'''
Gets the last GIT commit hash and date for the repository, using the
path to this file.
'''
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except:
GIT_REVISION = None
return GIT_REVISION
def get_git_revision():
hash = get_git_hash()
if hash :
rev = '.dev.' + hash[:7]
try:
cmd = ['git', 'show', '%s' % (hash), '--date=short',
'--format="(%ad)"']
date = _minimal_ext_cmd(cmd).split('"')[1]
rev += date
except:
pass
else:
rev = ".dev.Unknown"
return rev
def write_git_version():
'Write the GIT revision to a file.'
rev = get_git_revision()
if rev == ".dev.Unknown":
if os.path.isfile(_git_file_path):
return
gitfile = open(_git_file_path, 'w')
gitfile.write('rev = "%s"\n' % rev)
gitfile.close()
def get_version():
'''
Get the version of the package, including the GIT revision if this
is an actual release.
'''
version = __version__
if not release:
try:
import __git_version__
version += __git_version__.rev
except ImportError:
version += get_git_revision()
return version
| bsd-3-clause | 6,243,977,662,548,874,000 | 23.75641 | 81 | 0.539617 | false | 3.504537 | false | false | false |
brian-o/CS-CourseWork | CS491/Program2/testForks.py | 1 | 2677 | ############################################################
'''
testForks.py
Written by: Brian O'Dell, Spetember 2017
A program to run each program a 500 times per thread count.
Then uses the data collected to make graphs and tables that
are useful to evaluate the programs running time.
'''
############################################################
from subprocess import *
from numba import jit
import numpy as np
import csv as csv
import pandas as pd
from pandas.plotting import table
import matplotlib.pyplot as plt
'''
Call the C program multiple times with variable arguments to gather data
The name of the executable should exist before running
'''
@jit
def doCount(name):
j = 0
while (j < 1025):
for i in range(0,501):
call([name,"-t",str(j), "-w"])
if (j == 0):
j = 1
else:
j = 2*j;
'''
Turn the data into something meaningful.
Takes all the data gets the average and standard deviation for each
number of threads. Then plots a graph based on it. Also, makes
a csv with the avg and stddev
'''
@jit
def exportData(name):
DF = pd.read_csv("data/"+name+".csv")
f = {'ExecTime':['mean','std']}
#group by the number of threads in the csv and
#apply the mean and standard deviation functions to the groups
avgDF = DF.groupby('NumThreads').agg(f)
avgTable = DF.groupby('NumThreads', as_index=False).agg(f)
#When the data csv was saved we used 0 to indicate serial execution
#this was so the rows would be in numerical order instead of Alphabetical
#Now rename index 0 to Serial to be an accurate representation
indexList = avgDF.index.tolist()
indexList[0] = 'Serial'
avgDF.index = indexList
#make the bar chart and set the axes
avgPlot = avgDF.plot(kind='bar',
title=('Run Times Using '+ name), legend='False', figsize=(15,8))
avgPlot.set_xlabel("Number of Forks")
avgPlot.set_ylabel("Run Time (seconds)")
#put the data values on top of the bars for clarity
avgPlot.legend(['mean','std deviation'])
for p in avgPlot.patches:
avgPlot.annotate((str(p.get_height())[:6]),
(p.get_x()-.01, p.get_height()), fontsize=9)
#save the files we need
plt.savefig('data/'+name+'Graph.png')
avgTable.to_csv('data/'+name+'Table.csv', index=False, encoding='utf-8')
def main():
doCount("./forkedSemaphor")
doCount("./forkedPrivateCount")
doCount("./forkedPrivateCount32")
exportData("forkedSemaphor")
exportData("forkedPrivateCount")
exportData("forkedPrivateCount32")
if __name__ == '__main__':
main()
| gpl-3.0 | 5,887,714,014,429,846,000 | 30.494118 | 82 | 0.623086 | false | 3.851799 | false | false | false |
PennyDreadfulMTG/Penny-Dreadful-Tools | modo_bugs/fetcher.py | 1 | 4118 | import os
import sys
from typing import Dict, List, Optional, Tuple
from bs4 import BeautifulSoup
from bs4.element import Tag
from shared import fetch_tools, lazy
def search_scryfall(query: str) -> Tuple[int, List[str], List[str]]:
"""Returns a tuple. First member is an integer indicating how many cards match the query total,
second member is a list of card names up to the maximum that could be fetched in a timely fashion."""
if query == '':
return 0, [], []
print(f'Searching scryfall for `{query}`')
result_json = fetch_tools.fetch_json('https://api.scryfall.com/cards/search?q=' + fetch_tools.escape(query), character_encoding='utf-8')
if 'code' in result_json.keys(): # The API returned an error
if result_json['status'] == 404: # No cards found
return 0, [], []
print('Error fetching scryfall data:\n', result_json)
return 0, [], []
for warning in result_json.get('warnings', []): # scryfall-provided human-readable warnings
print(warning)
result_data = result_json['data']
result_data.sort(key=lambda x: x['legalities']['penny'])
def get_frontside(scr_card: Dict) -> str:
"""If card is transform, returns first name. Otherwise, returns name.
This is to make sure cards are later found in the database"""
# not sure how to handle meld cards
if scr_card['layout'] in ['transform', 'flip', 'modal_dfc']:
return scr_card['card_faces'][0]['name']
return scr_card['name']
result_cardnames = [get_frontside(obj) for obj in result_data]
return result_json['total_cards'], result_cardnames, result_json.get('warnings', [])
def catalog_cardnames() -> List[str]:
result_json = fetch_tools.fetch_json('https://api.scryfall.com/catalog/card-names')
names: List[str] = result_json['data']
for n in names:
if ' // ' in n:
names.extend(n.split(' // '))
return names
def update_redirect(file: str, title: str, redirect: str, **kwargs: str) -> bool:
text = '---\ntitle: {title}\nredirect_to:\n - {url}\n'.format(title=title, url=redirect)
for key, value in kwargs.items():
text += f'{key}: {value}\n'
text = text + '---\n'
fname = f'{file}.md'
if not os.path.exists(fname):
bb_jekyl = open(fname, mode='w')
bb_jekyl.write('')
bb_jekyl.close()
bb_jekyl = open(fname, mode='r')
orig = bb_jekyl.read()
bb_jekyl.close()
if orig != text:
print(f'New {file} update!')
bb_jekyl = open(fname, mode='w')
bb_jekyl.write(text)
bb_jekyl.close()
return True
if 'always-scrape' in sys.argv:
return True
return False
def find_bug_blog() -> Tuple[Optional[str], bool]:
bug_blogs = [a for a in get_article_archive() if str(a[0].string).startswith('Magic Online Bug Blog')]
if not bug_blogs:
return (None, False)
(title, link) = bug_blogs[0]
print('Found: {0} ({1})'.format(title, link))
new = update_redirect('bug_blog', title.text, link)
return (link, new)
def find_announcements() -> Tuple[str, bool]:
articles = [a for a in get_article_archive() if str(a[0].string).startswith('Magic Online Announcements')]
(title, link) = articles[0]
print('Found: {0} ({1})'.format(title, link))
bn = 'Build Notes' in fetch_tools.fetch(link)
new = update_redirect('announcements', title.text, link, has_build_notes=str(bn))
return (link, new)
def parse_article_item_extended(a: Tag) -> Tuple[Tag, str]:
title = a.find_all('h3')[0]
link = 'http://magic.wizards.com' + a.find_all('a')[0]['href']
return (title, link)
@lazy.lazy_property
def get_article_archive() -> List[Tuple[Tag, str]]:
try:
html = fetch_tools.fetch('http://magic.wizards.com/en/articles/archive/184956')
except fetch_tools.FetchException:
html = fetch_tools.fetch('http://magic.wizards.com/en/articles/archive/')
soup = BeautifulSoup(html, 'html.parser')
return [parse_article_item_extended(a) for a in soup.find_all('div', class_='article-item-extended')]
| gpl-3.0 | -1,526,794,542,128,501,000 | 41.020408 | 140 | 0.629917 | false | 3.278662 | false | false | false |
heldergg/webpymail | webpymail/sabapp/models.py | 1 | 2844 | # -*- coding: utf-8 -*-
# sabapp - Simple Address Book Application
# Copyright (C) 2008 Helder Guerreiro
# This file is part of sabapp.
#
# sabapp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# sabapp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with sabapp. If not, see <http://www.gnu.org/licenses/>.
#
# Helder Guerreiro <[email protected]>
#
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
# Models:
ADDRESSBOOKTYPE = (
(1, _('User address book')),
(2, _('Server address book')),
(3, _('Site address book')),
)
class AddressManager(models.Manager):
def for_request(self, request):
'''Addresses available for request'''
host = request.session['host']
return super(AddressManager, self).get_queryset().filter(
Q(user__exact=request.user, imap_server__exact=host,
ab_type__exact=1) |
Q(imap_server__exact=host, ab_type__exact=2) |
Q(ab_type__exact=3))
def have_addr(self, request, addr):
address = self.for_request(request).filter(email__iexact=addr)
return bool(address)
class Address(models.Model):
user = models.ForeignKey(User, null=True)
imap_server = models.CharField(_('IMAP server'), max_length=128)
nickname = models.CharField(max_length=64, blank=True)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=64, blank=True)
email = models.EmailField(_('e-mail address'))
additional_info = models.CharField(_('aditional information'),
max_length=128, blank=True)
ab_type = models.IntegerField(choices=ADDRESSBOOKTYPE)
objects = AddressManager()
class Meta:
verbose_name = _('Address')
verbose_name_plural = _('Addresses')
db_table = 'address_book'
ordering = ['first_name', 'last_name', 'email']
def full_name(self):
return ('%s %s' % (self.first_name, self.last_name)).strip()
def mail_addr(self):
name = ('%s %s' % (self.first_name, self.last_name)).strip()
if name:
return '"%s" <%s>' % (name, self.email)
else:
return self.email
def __str__(self):
return self.mail_addr()
| gpl-3.0 | -5,009,104,855,560,248,000 | 31.318182 | 77 | 0.645921 | false | 3.707953 | false | false | false |
silentfuzzle/calibre | src/calibre/devices/kobo/driver.py | 1 | 147621 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import division
__license__ = 'GPL v3'
__copyright__ = '2010-2012, Timothy Legge <[email protected]>, Kovid Goyal <[email protected]> and David Forrester <[email protected]>'
__docformat__ = 'restructuredtext en'
'''
Driver for Kobo ereaders. Supports all e-ink devices.
Originally developed by Timothy Legge <[email protected]>.
Extended to support Touch firmware 2.0.0 and later and newer devices by David Forrester <[email protected]>
'''
import os, time, shutil
from contextlib import closing
from calibre.devices.usbms.books import BookList
from calibre.devices.usbms.books import CollectionsBookList
from calibre.devices.kobo.books import KTCollectionsBookList
from calibre.devices.kobo.books import Book
from calibre.devices.kobo.books import ImageWrapper
from calibre.devices.mime import mime_type_ext
from calibre.devices.usbms.driver import USBMS, debug_print
from calibre import prints, fsync
from calibre.ptempfile import PersistentTemporaryFile
from calibre.constants import DEBUG
from calibre.utils.config_base import prefs
EPUB_EXT = '.epub'
KEPUB_EXT = '.kepub'
# Implementation of QtQHash for strings. This doesn't seem to be in the Python implementation.
def qhash(inputstr):
instr = b""
if isinstance(inputstr, bytes):
instr = inputstr
elif isinstance(inputstr, unicode):
instr = inputstr.encode("utf8")
else:
return -1
h = 0x00000000
for x in bytearray(instr):
h = (h << 4) + x
h ^= (h & 0xf0000000) >> 23
h &= 0x0fffffff
return h
class DummyCSSPreProcessor(object):
def __call__(self, data, add_namespace=False):
return data
class KOBO(USBMS):
name = 'Kobo Reader Device Interface'
gui_name = 'Kobo Reader'
description = _('Communicate with the Kobo Reader')
author = 'Timothy Legge and David Forrester'
version = (2, 1, 8)
dbversion = 0
fwversion = 0
supported_dbversion = 120
has_kepubs = False
supported_platforms = ['windows', 'osx', 'linux']
booklist_class = CollectionsBookList
book_class = Book
# Ordered list of supported formats
FORMATS = ['kepub', 'epub', 'pdf', 'txt', 'cbz', 'cbr']
CAN_SET_METADATA = ['collections']
VENDOR_ID = [0x2237]
BCD = [0x0110, 0x0323, 0x0326]
ORIGINAL_PRODUCT_ID = [0x4165]
WIFI_PRODUCT_ID = [0x4161, 0x4162]
PRODUCT_ID = ORIGINAL_PRODUCT_ID + WIFI_PRODUCT_ID
VENDOR_NAME = ['KOBO_INC', 'KOBO']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['.KOBOEREADER', 'EREADER']
EBOOK_DIR_MAIN = ''
SUPPORTS_SUB_DIRS = True
SUPPORTS_ANNOTATIONS = True
# "kepubs" do not have an extension. The name looks like a GUID. Using an empty string seems to work.
VIRTUAL_BOOK_EXTENSIONS = frozenset(['kobo', ''])
EXTRA_CUSTOMIZATION_MESSAGE = [
_('The Kobo supports several collections including ')+
'Read, Closed, Im_Reading. ' +
_('Create tags for automatic management'),
_('Upload covers for books (newer readers)') +
':::'+_('Normally, the KOBO readers get the cover image from the'
' ebook file itself. With this option, calibre will send a '
'separate cover image to the reader, useful if you '
'have modified the cover.'),
_('Upload Black and White Covers'),
_('Show expired books') +
':::'+_('A bug in an earlier version left non kepubs book records'
' in the database. With this option Calibre will show the '
'expired records and allow you to delete them with '
'the new delete logic.'),
_('Show Previews') +
':::'+_('Kobo previews are included on the Touch and some other versions'
' by default they are no longer displayed as there is no good reason to '
'see them. Enable if you wish to see/delete them.'),
_('Show Recommendations') +
':::'+_('Kobo now shows recommendations on the device. In some cases these have '
'files but in other cases they are just pointers to the web site to buy. '
'Enable if you wish to see/delete them.'),
_('Attempt to support newer firmware') +
':::'+_('Kobo routinely updates the firmware and the '
'database version. With this option calibre will attempt '
'to perform full read-write functionality - Here be Dragons!! '
'Enable only if you are comfortable with restoring your kobo '
'to factory defaults and testing software'),
]
EXTRA_CUSTOMIZATION_DEFAULT = [
', '.join(['tags']),
True,
True,
True,
False,
False,
False
]
OPT_COLLECTIONS = 0
OPT_UPLOAD_COVERS = 1
OPT_UPLOAD_GRAYSCALE_COVERS = 2
OPT_SHOW_EXPIRED_BOOK_RECORDS = 3
OPT_SHOW_PREVIEWS = 4
OPT_SHOW_RECOMMENDATIONS = 5
OPT_SUPPORT_NEWER_FIRMWARE = 6
def initialize(self):
USBMS.initialize(self)
self.dbversion = 7
def device_database_path(self):
return self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite')
def books(self, oncard=None, end_session=True):
from calibre.ebooks.metadata.meta import path_to_ext
dummy_bl = BookList(None, None, None)
if oncard == 'carda' and not self._card_a_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
elif oncard == 'cardb' and not self._card_b_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
elif oncard and oncard != 'carda' and oncard != 'cardb':
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
prefix = self._card_a_prefix if oncard == 'carda' else \
self._card_b_prefix if oncard == 'cardb' \
else self._main_prefix
# Determine the firmware version
try:
with open(self.normalize_path(self._main_prefix + '.kobo/version'),
'rb') as f:
self.fwversion = f.readline().split(',')[2]
except:
self.fwversion = 'unknown'
if self.fwversion != '1.0' and self.fwversion != '1.4':
self.has_kepubs = True
debug_print('Version of driver: ', self.version, 'Has kepubs:', self.has_kepubs)
debug_print('Version of firmware: ', self.fwversion, 'Has kepubs:', self.has_kepubs)
self.booklist_class.rebuild_collections = self.rebuild_collections
# get the metadata cache
bl = self.booklist_class(oncard, prefix, self.settings)
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
# make a dict cache of paths so the lookup in the loop below is faster.
bl_cache = {}
for idx,b in enumerate(bl):
bl_cache[b.lpath] = idx
def update_booklist(prefix, path, title, authors, mime, date, ContentType, ImageID, readstatus, MimeType, expired, favouritesindex, accessibility):
changed = False
try:
lpath = path.partition(self.normalize_path(prefix))[2]
if lpath.startswith(os.sep):
lpath = lpath[len(os.sep):]
lpath = lpath.replace('\\', '/')
# debug_print("LPATH: ", lpath, " - Title: " , title)
playlist_map = {}
if lpath not in playlist_map:
playlist_map[lpath] = []
if readstatus == 1:
playlist_map[lpath].append('Im_Reading')
elif readstatus == 2:
playlist_map[lpath].append('Read')
elif readstatus == 3:
playlist_map[lpath].append('Closed')
# Related to a bug in the Kobo firmware that leaves an expired row for deleted books
# this shows an expired Collection so the user can decide to delete the book
if expired == 3:
playlist_map[lpath].append('Expired')
# A SHORTLIST is supported on the touch but the data field is there on most earlier models
if favouritesindex == 1:
playlist_map[lpath].append('Shortlist')
# Label Previews
if accessibility == 6:
playlist_map[lpath].append('Preview')
elif accessibility == 4:
playlist_map[lpath].append('Recommendation')
path = self.normalize_path(path)
# print "Normalized FileName: " + path
idx = bl_cache.get(lpath, None)
if idx is not None:
bl_cache[lpath] = None
if ImageID is not None:
imagename = self.normalize_path(self._main_prefix + '.kobo/images/' + ImageID + ' - NickelBookCover.parsed')
if not os.path.exists(imagename):
# Try the Touch version if the image does not exist
imagename = self.normalize_path(self._main_prefix + '.kobo/images/' + ImageID + ' - N3_LIBRARY_FULL.parsed')
# print "Image name Normalized: " + imagename
if not os.path.exists(imagename):
debug_print("Strange - The image name does not exist - title: ", title)
if imagename is not None:
bl[idx].thumbnail = ImageWrapper(imagename)
if (ContentType != '6' and MimeType != 'Shortcover'):
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
if self.update_metadata_item(bl[idx]):
# print 'update_metadata_item returned true'
changed = True
else:
debug_print(" Strange: The file: ", prefix, lpath, " does mot exist!")
if lpath in playlist_map and \
playlist_map[lpath] not in bl[idx].device_collections:
bl[idx].device_collections = playlist_map.get(lpath,[])
else:
if ContentType == '6' and MimeType == 'Shortcover':
book = Book(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576)
else:
try:
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
else:
debug_print(" Strange: The file: ", prefix, lpath, " does mot exist!")
title = "FILE MISSING: " + title
book = Book(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576)
except:
debug_print("prefix: ", prefix, "lpath: ", lpath, "title: ", title, "authors: ", authors,
"mime: ", mime, "date: ", date, "ContentType: ", ContentType, "ImageID: ", ImageID)
raise
# print 'Update booklist'
book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else []
if bl.add_book(book, replace_metadata=False):
changed = True
except: # Probably a path encoding error
import traceback
traceback.print_exc()
return changed
import sqlite3 as sqlite
with closing(sqlite.connect(
self.normalize_path(self._main_prefix +
'.kobo/KoboReader.sqlite'))) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
cursor.execute('select version from dbversion')
result = cursor.fetchone()
self.dbversion = result[0]
debug_print("Database Version: ", self.dbversion)
opts = self.settings()
if self.dbversion >= 33:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, IsDownloaded from content where '
'BookID is Null %(previews)s %(recomendations)s and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)'
if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')',
previews=' and Accessibility <> 6'
if opts.extra_customization[self.OPT_SHOW_PREVIEWS] == False else '',
recomendations=' and IsDownloaded in (\'true\', 1)'
if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] == False else '')
elif self.dbversion >= 16 and self.dbversion < 33:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, "1" as IsDownloaded from content where '
'BookID is Null and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)'
if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')')
elif self.dbversion < 16 and self.dbversion >= 14:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded from content where '
'BookID is Null and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)'
if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')')
elif self.dbversion < 14 and self.dbversion >= 8:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded from content where '
'BookID is Null and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)'
if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')')
else:
query= 'select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' \
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded from content where BookID is Null'
try:
cursor.execute(query)
except Exception as e:
err = str(e)
if not ('___ExpirationStatus' in err or 'FavouritesIndex' in err or
'Accessibility' in err or 'IsDownloaded' in err):
raise
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as '
'FavouritesIndex, "-1" as Accessibility from content where '
'BookID is Null')
cursor.execute(query)
changed = False
for i, row in enumerate(cursor):
# self.report_progress((i+1) / float(numrows), _('Getting list of books on device...'))
if not hasattr(row[3], 'startswith') or row[3].startswith("file:///usr/local/Kobo/help/"):
# These are internal to the Kobo device and do not exist
continue
path = self.path_from_contentid(row[3], row[5], row[4], oncard)
mime = mime_type_ext(path_to_ext(path)) if path.find('kepub') == -1 else 'application/epub+zip'
# debug_print("mime:", mime)
if oncard != 'carda' and oncard != 'cardb' and not row[3].startswith("file:///mnt/sd/"):
changed = update_booklist(self._main_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9], row[10])
# print "shortbook: " + path
elif oncard == 'carda' and row[3].startswith("file:///mnt/sd/"):
changed = update_booklist(self._card_a_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9], row[10])
if changed:
need_sync = True
cursor.close()
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
if idx is not None:
need_sync = True
del bl[idx]
# print "count found in cache: %d, count of files in metadata: %d, need_sync: %s" % \
# (len(bl_cache), len(bl), need_sync)
if need_sync: # self.count_found_in_bl != len(bl) or need_sync:
if oncard == 'cardb':
self.sync_booklists((None, None, bl))
elif oncard == 'carda':
self.sync_booklists((None, bl, None))
else:
self.sync_booklists((bl, None, None))
self.report_progress(1.0, _('Getting list of books on device...'))
return bl
def filename_callback(self, path, mi):
# debug_print("Kobo:filename_callback:Path - {0}".format(path))
idx = path.rfind('.')
ext = path[idx:]
if ext == KEPUB_EXT:
path = path + EPUB_EXT
# debug_print("Kobo:filename_callback:New path - {0}".format(path))
return path
def delete_via_sql(self, ContentID, ContentType):
# Delete Order:
# 1) shortcover_page
# 2) volume_shorcover
# 2) content
import sqlite3 as sqlite
debug_print('delete_via_sql: ContentID: ', ContentID, 'ContentType: ', ContentType)
with closing(sqlite.connect(self.normalize_path(self._main_prefix +
'.kobo/KoboReader.sqlite'))) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select ImageID from content where ContentID = ?', t)
ImageID = None
for row in cursor:
# First get the ImageID to delete the images
ImageID = row[0]
cursor.close()
cursor = connection.cursor()
if ContentType == 6 and self.dbversion < 8:
# Delete the shortcover_pages first
cursor.execute('delete from shortcover_page where shortcoverid in (select ContentID from content where BookID = ?)', t)
# Delete the volume_shortcovers second
cursor.execute('delete from volume_shortcovers where volumeid = ?', t)
# Delete the rows from content_keys
if self.dbversion >= 8:
cursor.execute('delete from content_keys where volumeid = ?', t)
# Delete the chapters associated with the book next
t = (ContentID,)
# Kobo does not delete the Book row (ie the row where the BookID is Null)
# The next server sync should remove the row
cursor.execute('delete from content where BookID = ?', t)
if ContentType == 6:
try:
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0, ___ExpirationStatus=3 '
'where BookID is Null and ContentID =?',t)
except Exception as e:
if 'no such column' not in str(e):
raise
try:
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0 '
'where BookID is Null and ContentID =?',t)
except Exception as e:
if 'no such column' not in str(e):
raise
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\' '
'where BookID is Null and ContentID =?',t)
else:
cursor.execute('delete from content where BookID is Null and ContentID =?',t)
connection.commit()
cursor.close()
if ImageID == None:
print "Error condition ImageID was not found"
print "You likely tried to delete a book that the kobo has not yet added to the database"
# If all this succeeds we need to delete the images files via the ImageID
return ImageID
def delete_images(self, ImageID, book_path):
if ImageID != None:
path_prefix = '.kobo/images/'
path = self._main_prefix + path_prefix + ImageID
file_endings = (' - iPhoneThumbnail.parsed', ' - bbMediumGridList.parsed', ' - NickelBookCover.parsed', ' - N3_LIBRARY_FULL.parsed',
' - N3_LIBRARY_GRID.parsed', ' - N3_LIBRARY_LIST.parsed', ' - N3_SOCIAL_CURRENTREAD.parsed', ' - N3_FULL.parsed',)
for ending in file_endings:
fpath = path + ending
fpath = self.normalize_path(fpath)
if os.path.exists(fpath):
# print 'Image File Exists: ' + fpath
os.unlink(fpath)
def delete_books(self, paths, end_session=True):
if self.modify_database_check("delete_books") == False:
return
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
path = self.normalize_path(path)
# print "Delete file normalized path: " + path
extension = os.path.splitext(path)[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(path)
ContentID = self.contentid_from_path(path, ContentType)
ImageID = self.delete_via_sql(ContentID, ContentType)
# print " We would now delete the Images for" + ImageID
self.delete_images(ImageID, path)
if os.path.exists(path):
# Delete the ebook
# print "Delete the ebook: " + path
os.unlink(path)
filepath = os.path.splitext(path)[0]
for ext in self.DELETE_EXTS:
if os.path.exists(filepath + ext):
# print "Filename: " + filename
os.unlink(filepath + ext)
if os.path.exists(path + ext):
# print "Filename: " + filename
os.unlink(path + ext)
if self.SUPPORTS_SUB_DIRS:
try:
# print "removed"
os.removedirs(os.path.dirname(path))
except:
pass
self.report_progress(1.0, _('Removing books from device...'))
def remove_books_from_metadata(self, paths, booklists):
if self.modify_database_check("remove_books_from_metatata") == False:
return
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
for bl in booklists:
for book in bl:
# print "Book Path: " + book.path
if path.endswith(book.path):
# print " Remove: " + book.path
bl.remove_book(book)
self.report_progress(1.0, _('Removing books from device metadata listing...'))
def add_books_to_metadata(self, locations, metadata, booklists):
metadata = iter(metadata)
for i, location in enumerate(locations):
self.report_progress((i+1) / float(len(locations)), _('Adding books to device metadata listing...'))
info = metadata.next()
blist = 2 if location[1] == 'cardb' else 1 if location[1] == 'carda' else 0
# Extract the correct prefix from the pathname. To do this correctly,
# we must ensure that both the prefix and the path are normalized
# so that the comparison will work. Book's __init__ will fix up
# lpath, so we don't need to worry about that here.
path = self.normalize_path(location[0])
if self._main_prefix:
prefix = self._main_prefix if \
path.startswith(self.normalize_path(self._main_prefix)) else None
if not prefix and self._card_a_prefix:
prefix = self._card_a_prefix if \
path.startswith(self.normalize_path(self._card_a_prefix)) else None
if not prefix and self._card_b_prefix:
prefix = self._card_b_prefix if \
path.startswith(self.normalize_path(self._card_b_prefix)) else None
if prefix is None:
prints('in add_books_to_metadata. Prefix is None!', path,
self._main_prefix)
continue
# print "Add book to metatdata: "
# print "prefix: " + prefix
lpath = path.partition(prefix)[2]
if lpath.startswith('/') or lpath.startswith('\\'):
lpath = lpath[1:]
# print "path: " + lpath
book = self.book_class(prefix, lpath, other=info)
if book.size is None or book.size == 0:
book.size = os.stat(self.normalize_path(path)).st_size
b = booklists[blist].add_book(book, replace_metadata=True)
if b:
b._new_book = True
self.report_progress(1.0, _('Adding books to device metadata listing...'))
def contentid_from_path(self, path, ContentType):
if ContentType == 6:
extension = os.path.splitext(path)[1]
if extension == '.kobo':
ContentID = os.path.splitext(path)[0]
# Remove the prefix on the file. it could be either
ContentID = ContentID.replace(self._main_prefix, '')
else:
ContentID = path
ContentID = ContentID.replace(self._main_prefix + self.normalize_path('.kobo/kepub/'), '')
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, '')
elif ContentType == 999: # HTML Files
ContentID = path
ContentID = ContentID.replace(self._main_prefix, "/mnt/onboard/")
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, "/mnt/sd/")
else: # ContentType = 16
ContentID = path
ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/")
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/")
ContentID = ContentID.replace("\\", '/')
return ContentID
def get_content_type_from_path(self, path):
# Strictly speaking the ContentType could be 6 or 10
# however newspapers have the same storage format
if path.find('kepub') >= 0:
ContentType = 6
return ContentType
def get_content_type_from_extension(self, extension):
if extension == '.kobo':
# Kobo books do not have book files. They do have some images though
# print "kobo book"
ContentType = 6
elif extension == '.pdf' or extension == '.epub':
# print "ePub or pdf"
ContentType = 16
elif extension == '.rtf' or extension == '.txt' or extension == '.htm' or extension == '.html':
# print "txt"
if self.fwversion == '1.0' or self.fwversion == '1.4' or self.fwversion == '1.7.4':
ContentType = 999
else:
ContentType = 901
else: # if extension == '.html' or extension == '.txt':
ContentType = 901 # Yet another hack: to get around Kobo changing how ContentID is stored
return ContentType
def path_from_contentid(self, ContentID, ContentType, MimeType, oncard):
path = ContentID
if oncard == 'cardb':
print 'path from_contentid cardb'
elif oncard == 'carda':
path = path.replace("file:///mnt/sd/", self._card_a_prefix)
# print "SD Card: " + path
else:
if ContentType == "6" and MimeType == 'Shortcover':
# This is a hack as the kobo files do not exist
# but the path is required to make a unique id
# for calibre's reference
path = self._main_prefix + path + '.kobo'
# print "Path: " + path
elif (ContentType == "6" or ContentType == "10") and MimeType == 'application/x-kobo-epub+zip':
if path.startswith("file:///mnt/onboard/"):
path = self._main_prefix + path.replace("file:///mnt/onboard/", '')
else:
path = self._main_prefix + '.kobo/kepub/' + path
# print "Internal: " + path
else:
# if path.startswith("file:///mnt/onboard/"):
path = path.replace("file:///mnt/onboard/", self._main_prefix)
path = path.replace("/mnt/onboard/", self._main_prefix)
# print "Internal: " + path
return path
def modify_database_check(self, function):
# Checks to see whether the database version is supported
# and whether the user has chosen to support the firmware version
if self.dbversion > self.supported_dbversion:
# Unsupported database
opts = self.settings()
if not opts.extra_customization[self.OPT_SUPPORT_NEWER_FIRMWARE]:
debug_print('The database has been upgraded past supported version')
self.report_progress(1.0, _('Removing books from device...'))
from calibre.devices.errors import UserFeedback
raise UserFeedback(_("Kobo database version unsupported - See details"),
_('Your Kobo is running an updated firmware/database version.'
' As calibre does not know about this updated firmware,'
' database editing is disabled, to prevent corruption.'
' You can still send books to your Kobo with calibre, '
' but deleting books and managing collections is disabled.'
' If you are willing to experiment and know how to reset'
' your Kobo to Factory defaults, you can override this'
' check by right clicking the device icon in calibre and'
' selecting "Configure this device" and then the '
' "Attempt to support newer firmware" option.'
' Doing so may require you to perform a factory reset of'
' your Kobo.') + ((
'\nDevice database version: %s.'
'\nDevice firmware version: %s') % (self.dbversion, self.fwversion))
, UserFeedback.WARN)
return False
else:
# The user chose to edit the database anyway
return True
else:
# Supported database version
return True
def get_file(self, path, *args, **kwargs):
tpath = self.munge_path(path)
extension = os.path.splitext(tpath)[1]
if extension == '.kobo':
from calibre.devices.errors import UserFeedback
raise UserFeedback(_("Not Implemented"),
_('".kobo" files do not exist on the device as books; '
'instead they are rows in the sqlite database. '
'Currently they cannot be exported or viewed.'),
UserFeedback.WARN)
return USBMS.get_file(self, path, *args, **kwargs)
@classmethod
def book_from_path(cls, prefix, lpath, title, authors, mime, date, ContentType, ImageID):
# debug_print("KOBO:book_from_path - title=%s"%title)
from calibre.ebooks.metadata import MetaInformation
if cls.settings().read_metadata or cls.MUST_READ_METADATA:
mi = cls.metadata_from_path(cls.normalize_path(os.path.join(prefix, lpath)))
else:
from calibre.ebooks.metadata.meta import metadata_from_filename
mi = metadata_from_filename(cls.normalize_path(os.path.basename(lpath)),
cls.build_template_regexp())
if mi is None:
mi = MetaInformation(os.path.splitext(os.path.basename(lpath))[0],
[_('Unknown')])
size = os.stat(cls.normalize_path(os.path.join(prefix, lpath))).st_size
book = cls.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=size, other=mi)
return book
def get_device_paths(self):
paths = {}
for prefix, path, source_id in [
('main', 'metadata.calibre', 0),
('card_a', 'metadata.calibre', 1),
('card_b', 'metadata.calibre', 2)
]:
prefix = getattr(self, '_%s_prefix'%prefix)
if prefix is not None and os.path.exists(prefix):
paths[source_id] = os.path.join(prefix, *(path.split('/')))
return paths
def reset_readstatus(self, connection, oncard):
cursor = connection.cursor()
# Reset Im_Reading list in the database
if oncard == 'carda':
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID like \'file:///mnt/sd/%\''
elif oncard != 'carda' and oncard != 'cardb':
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
try:
cursor.execute(query)
except:
debug_print(' Database Exception: Unable to reset ReadStatus list')
raise
else:
connection.commit()
# debug_print(' Commit: Reset ReadStatus list')
cursor.close()
def set_readstatus(self, connection, ContentID, ReadStatus):
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select DateLastRead from Content where BookID is Null and ContentID = ?', t)
result = cursor.fetchone()
if result is None:
datelastread = '1970-01-01T00:00:00'
else:
datelastread = result[0] if result[0] is not None else '1970-01-01T00:00:00'
t = (ReadStatus,datelastread,ContentID,)
try:
cursor.execute('update content set ReadStatus=?,FirstTimeReading=\'false\',DateLastRead=? where BookID is Null and ContentID = ?', t)
except:
debug_print(' Database Exception: Unable update ReadStatus')
raise
else:
connection.commit()
# debug_print(' Commit: Setting ReadStatus List')
cursor.close()
def reset_favouritesindex(self, connection, oncard):
# Reset FavouritesIndex list in the database
if oncard == 'carda':
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID like \'file:///mnt/sd/%\''
elif oncard != 'carda' and oncard != 'cardb':
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
cursor = connection.cursor()
try:
cursor.execute(query)
except Exception as e:
debug_print(' Database Exception: Unable to reset Shortlist list')
if 'no such column' not in str(e):
raise
else:
connection.commit()
# debug_print(' Commit: Reset FavouritesIndex list')
def set_favouritesindex(self, connection, ContentID):
cursor = connection.cursor()
t = (ContentID,)
try:
cursor.execute('update content set FavouritesIndex=1 where BookID is Null and ContentID = ?', t)
except Exception as e:
debug_print(' Database Exception: Unable set book as Shortlist')
if 'no such column' not in str(e):
raise
else:
connection.commit()
# debug_print(' Commit: Set FavouritesIndex')
def update_device_database_collections(self, booklists, collections_attributes, oncard):
debug_print("Kobo:update_device_database_collections - oncard='%s'"%oncard)
if self.modify_database_check("update_device_database_collections") == False:
return
# Only process categories in this list
supportedcategories = {
"Im_Reading":1,
"Read":2,
"Closed":3,
"Shortlist":4,
# "Preview":99, # Unsupported as we don't want to change it
}
# Define lists for the ReadStatus
readstatuslist = {
"Im_Reading":1,
"Read":2,
"Closed":3,
}
accessibilitylist = {
"Preview":6,
"Recommendation":4,
}
# debug_print('Starting update_device_database_collections', collections_attributes)
# Force collections_attributes to be 'tags' as no other is currently supported
# debug_print('KOBO: overriding the provided collections_attributes:', collections_attributes)
collections_attributes = ['tags']
collections = booklists.get_collections(collections_attributes)
# debug_print('Kobo:update_device_database_collections - Collections:', collections)
# Create a connection to the sqlite database
# Needs to be outside books collection as in the case of removing
# the last book from the collection the list of books is empty
# and the removal of the last book would not occur
import sqlite3 as sqlite
with closing(sqlite.connect(self.normalize_path(self._main_prefix +
'.kobo/KoboReader.sqlite'))) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
if collections:
# Need to reset the collections outside the particular loops
# otherwise the last item will not be removed
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14:
self.reset_favouritesindex(connection, oncard)
# Process any collections that exist
for category, books in collections.items():
if category in supportedcategories:
# debug_print("Category: ", category, " id = ", readstatuslist.get(category))
for book in books:
# debug_print(' Title:', book.title, 'category: ', category)
if category not in book.device_collections:
book.device_collections.append(category)
extension = os.path.splitext(book.path)[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
ContentID = self.contentid_from_path(book.path, ContentType)
if category in readstatuslist.keys():
# Manage ReadStatus
self.set_readstatus(connection, ContentID, readstatuslist.get(category))
elif category == 'Shortlist' and self.dbversion >= 14:
# Manage FavouritesIndex/Shortlist
self.set_favouritesindex(connection, ContentID)
elif category in accessibilitylist.keys():
# Do not manage the Accessibility List
pass
else: # No collections
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
debug_print("No Collections - reseting ReadStatus")
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14:
debug_print("No Collections - reseting FavouritesIndex")
self.reset_favouritesindex(connection, oncard)
# debug_print('Finished update_device_database_collections', collections_attributes)
def get_collections_attributes(self):
collections = []
opts = self.settings()
if opts.extra_customization and len(opts.extra_customization[self.OPT_COLLECTIONS]) > 0:
collections = [x.lower().strip() for x in opts.extra_customization[self.OPT_COLLECTIONS].split(',')]
return collections
def sync_booklists(self, booklists, end_session=True):
debug_print('KOBO:sync_booklists - start')
paths = self.get_device_paths()
blists = {}
for i in paths:
try:
if booklists[i] is not None:
#debug_print('Booklist: ', i)
blists[i] = booklists[i]
except IndexError:
pass
collections = self.get_collections_attributes()
#debug_print('KOBO: collection fields:', collections)
for i, blist in blists.items():
if i == 0:
oncard = 'main'
else:
oncard = 'carda'
self.update_device_database_collections(blist, collections, oncard)
USBMS.sync_booklists(self, booklists, end_session=end_session)
debug_print('KOBO:sync_booklists - end')
def rebuild_collections(self, booklist, oncard):
collections_attributes = []
self.update_device_database_collections(booklist, collections_attributes, oncard)
def upload_cover(self, path, filename, metadata, filepath):
'''
Upload book cover to the device. Default implementation does nothing.
:param path: The full path to the directory where the associated book is located.
:param filename: The name of the book file without the extension.
:param metadata: metadata belonging to the book. Use metadata.thumbnail
for cover
:param filepath: The full path to the ebook file
'''
opts = self.settings()
if not opts.extra_customization[self.OPT_UPLOAD_COVERS]:
# Building thumbnails disabled
debug_print('KOBO: not uploading cover')
return
if not opts.extra_customization[self.OPT_UPLOAD_GRAYSCALE_COVERS]:
uploadgrayscale = False
else:
uploadgrayscale = True
debug_print('KOBO: uploading cover')
try:
self._upload_cover(path, filename, metadata, filepath, uploadgrayscale)
except:
debug_print('FAILED to upload cover', filepath)
def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale):
from calibre.utils.magick.draw import save_cover_data_to
if metadata.cover:
cover = self.normalize_path(metadata.cover.replace('/', os.sep))
if os.path.exists(cover):
# Get ContentID for Selected Book
extension = os.path.splitext(filepath)[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(filepath)
ContentID = self.contentid_from_path(filepath, ContentType)
import sqlite3 as sqlite
with closing(sqlite.connect(self.normalize_path(self._main_prefix +
'.kobo/KoboReader.sqlite'))) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select ImageId from Content where BookID is Null and ContentID = ?', t)
result = cursor.fetchone()
if result is None:
debug_print("No rows exist in the database - cannot upload")
return
else:
ImageID = result[0]
# debug_print("ImageId: ", result[0])
cursor.close()
if ImageID != None:
path_prefix = '.kobo/images/'
path = self._main_prefix + path_prefix + ImageID
file_endings = {' - iPhoneThumbnail.parsed':(103,150),
' - bbMediumGridList.parsed':(93,135),
' - NickelBookCover.parsed':(500,725),
' - N3_LIBRARY_FULL.parsed':(355,530),
' - N3_LIBRARY_GRID.parsed':(149,233),
' - N3_LIBRARY_LIST.parsed':(60,90),
' - N3_FULL.parsed':(600,800),
' - N3_SOCIAL_CURRENTREAD.parsed':(120,186)}
for ending, resize in file_endings.items():
fpath = path + ending
fpath = self.normalize_path(fpath.replace('/', os.sep))
if os.path.exists(fpath):
with open(cover, 'rb') as f:
data = f.read()
# Return the data resized and in Grayscale if
# required
data = save_cover_data_to(data, 'dummy.jpg',
grayscale=uploadgrayscale,
resize_to=resize, return_data=True)
with open(fpath, 'wb') as f:
f.write(data)
fsync(f)
else:
debug_print("ImageID could not be retreived from the database")
def prepare_addable_books(self, paths):
'''
The Kobo supports an encrypted epub refered to as a kepub
Unfortunately Kobo decided to put the files on the device
with no file extension. I just hope that decision causes
them as much grief as it does me :-)
This has to make a temporary copy of the book files with a
epub extension to allow Calibre's normal processing to
deal with the file appropriately
'''
for idx, path in enumerate(paths):
if path.find('kepub') >= 0:
with closing(open(path, 'rb')) as r:
tf = PersistentTemporaryFile(suffix='.epub')
shutil.copyfileobj(r, tf)
# tf.write(r.read())
paths[idx] = tf.name
return paths
def create_annotations_path(self, mdata, device_path=None):
if device_path:
return device_path
return USBMS.create_annotations_path(self, mdata)
def get_annotations(self, path_map):
from calibre.devices.kobo.bookmark import Bookmark
EPUB_FORMATS = [u'epub']
epub_formats = set(EPUB_FORMATS)
def get_storage():
storage = []
if self._main_prefix:
storage.append(os.path.join(self._main_prefix, self.EBOOK_DIR_MAIN))
if self._card_a_prefix:
storage.append(os.path.join(self._card_a_prefix, self.EBOOK_DIR_CARD_A))
if self._card_b_prefix:
storage.append(os.path.join(self._card_b_prefix, self.EBOOK_DIR_CARD_B))
return storage
def resolve_bookmark_paths(storage, path_map):
pop_list = []
book_ext = {}
for id in path_map:
file_fmts = set()
for fmt in path_map[id]['fmts']:
file_fmts.add(fmt)
bookmark_extension = None
if file_fmts.intersection(epub_formats):
book_extension = list(file_fmts.intersection(epub_formats))[0]
bookmark_extension = 'epub'
if bookmark_extension:
for vol in storage:
bkmk_path = path_map[id]['path']
bkmk_path = bkmk_path
if os.path.exists(bkmk_path):
path_map[id] = bkmk_path
book_ext[id] = book_extension
break
else:
pop_list.append(id)
else:
pop_list.append(id)
# Remove non-existent bookmark templates
for id in pop_list:
path_map.pop(id)
return path_map, book_ext
storage = get_storage()
path_map, book_ext = resolve_bookmark_paths(storage, path_map)
bookmarked_books = {}
for id in path_map:
extension = os.path.splitext(path_map[id])[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(path_map[id])
ContentID = self.contentid_from_path(path_map[id], ContentType)
debug_print("get_annotations - ContentID: ", ContentID, "ContentType: ", ContentType)
bookmark_ext = extension
db_path = self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite')
myBookmark = Bookmark(db_path, ContentID, path_map[id], id, book_ext[id], bookmark_ext)
bookmarked_books[id] = self.UserAnnotation(type='kobo_bookmark', value=myBookmark)
# This returns as job.result in gui2.ui.annotations_fetched(self,job)
return bookmarked_books
def generate_annotation_html(self, bookmark):
import calendar
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, NavigableString
# Returns <div class="user_annotations"> ... </div>
#last_read_location = bookmark.last_read_location
#timestamp = bookmark.timestamp
percent_read = bookmark.percent_read
debug_print("Date: ", bookmark.last_read)
if bookmark.last_read is not None:
try:
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S"))))
except:
try:
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S.%f"))))
except:
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%SZ"))))
else:
#self.datetime = time.gmtime()
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
# debug_print("Percent read: ", percent_read)
ka_soup = BeautifulSoup()
dtc = 0
divTag = Tag(ka_soup,'div')
divTag['class'] = 'user_annotations'
# Add the last-read location
spanTag = Tag(ka_soup, 'span')
spanTag['style'] = 'font-weight:normal'
if bookmark.book_format == 'epub':
spanTag.insert(0,NavigableString(
_("<hr /><b>Book Last Read:</b> %(time)s<br /><b>Percentage Read:</b> %(pr)d%%<hr />") %
dict(time=last_read,
# loc=last_read_location,
pr=percent_read)))
else:
spanTag.insert(0,NavigableString(
_("<hr /><b>Book Last Read:</b> %(time)s<br /><b>Percentage Read:</b> %(pr)d%%<hr />") %
dict(time=last_read,
# loc=last_read_location,
pr=percent_read)))
divTag.insert(dtc, spanTag)
dtc += 1
divTag.insert(dtc, Tag(ka_soup,'br'))
dtc += 1
if bookmark.user_notes:
user_notes = bookmark.user_notes
annotations = []
# Add the annotations sorted by location
for location in sorted(user_notes):
if user_notes[location]['type'] == 'Bookmark':
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br />%(annotation)s<br /><hr />') %
dict(chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
annotation=user_notes[location]['annotation'] if user_notes[location]['annotation'] is not None else ""))
elif user_notes[location]['type'] == 'Highlight':
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><hr />') %
dict(chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
text=user_notes[location]['text']))
elif user_notes[location]['type'] == 'Annotation':
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><b>Notes:</b> %(annotation)s<br /><hr />') %
dict(chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
text=user_notes[location]['text'],
annotation=user_notes[location]['annotation']))
else:
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><b>Notes:</b> %(annotation)s<br /><hr />') %
dict(chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
text=user_notes[location]['text'],
annotation=user_notes[location]['annotation']))
for annotation in annotations:
divTag.insert(dtc, annotation)
dtc += 1
ka_soup.insert(0,divTag)
return ka_soup
def add_annotation_to_library(self, db, db_id, annotation):
from calibre.ebooks.BeautifulSoup import Tag
bm = annotation
ignore_tags = set(['Catalog', 'Clippings'])
if bm.type == 'kobo_bookmark':
mi = db.get_metadata(db_id, index_is_id=True)
debug_print("KOBO:add_annotation_to_library - Title: ", mi.title)
user_notes_soup = self.generate_annotation_html(bm.value)
if mi.comments:
a_offset = mi.comments.find('<div class="user_annotations">')
ad_offset = mi.comments.find('<hr class="annotations_divider" />')
if a_offset >= 0:
mi.comments = mi.comments[:a_offset]
if ad_offset >= 0:
mi.comments = mi.comments[:ad_offset]
if set(mi.tags).intersection(ignore_tags):
return
if mi.comments:
hrTag = Tag(user_notes_soup,'hr')
hrTag['class'] = 'annotations_divider'
user_notes_soup.insert(0, hrTag)
mi.comments += unicode(user_notes_soup.prettify())
else:
mi.comments = unicode(user_notes_soup.prettify())
# Update library comments
db.set_comment(db_id, mi.comments)
# Add bookmark file to db_id
# NOTE: As it is, this copied the book from the device back to the library. That meant it replaced the
# existing file. Taking this out for that reason, but some books have a ANNOT file that could be
# copied.
# db.add_format_with_hooks(db_id, bm.value.bookmark_extension,
# bm.value.path, index_is_id=True)
class KOBOTOUCH(KOBO):
name = 'KoboTouch'
gui_name = 'Kobo Touch/Glo/Mini/Aura HD'
author = 'David Forrester'
description = 'Communicate with the Kobo Touch, Glo, Mini and Aura HD ereaders. Based on the existing Kobo driver by %s.' % (KOBO.author)
# icon = I('devices/kobotouch.jpg')
supported_dbversion = 120
min_supported_dbversion = 53
min_dbversion_series = 65
min_dbversion_externalid = 65
min_dbversion_archive = 71
min_dbversion_images_on_sdcard = 77
min_dbversion_activity = 77
min_dbversion_keywords = 82
max_supported_fwversion = (3, 15, 1)
# The following document firwmare versions where new function or devices were added.
# Not all are used, but this feels a good place to record it.
min_fwversion_shelves = (2, 0, 0)
min_fwversion_images_on_sdcard = (2, 4, 1)
min_fwversion_images_tree = (2, 9, 0) # Cover images stored in tree under .kobo-images
min_aurah2o_fwversion = (3, 7, 0)
min_reviews_fwversion = (3, 12, 0)
min_glohd_fwversion = (3, 14, 0)
has_kepubs = True
booklist_class = KTCollectionsBookList
book_class = Book
MAX_PATH_LEN = 185 # 250 - (len(" - N3_LIBRARY_SHELF.parsed") + len("F:\.kobo\images\"))
KOBO_EXTRA_CSSFILE = 'kobo_extra.css'
EXTRA_CUSTOMIZATION_MESSAGE = [
_('The Kobo from firmware V2.0.0 supports bookshelves.'
' These are created on the Kobo. ' +
'Specify a tags type column for automatic management.'),
_('Create Bookshelves') +
':::'+_('Create new bookshelves on the Kobo if they do not exist. This is only for firmware V2.0.0 or later.'),
_('Delete Empty Bookshelves') +
':::'+_('Delete any empty bookshelves from the Kobo when syncing is finished. This is only for firmware V2.0.0 or later.'),
_('Upload covers for books') +
':::'+_('Upload cover images from the calibre library when sending books to the device.'),
_('Upload Black and White Covers'),
_('Keep cover aspect ratio') +
':::'+_('When uploading covers, do not change the aspect ratio when resizing for the device.'
' This is for firmware versions 2.3.1 and later.'),
_('Show archived books') +
':::'+_('Archived books are listed on the device but need to be downloaded to read.'
' Use this option to show these books and match them with books in the calibre library.'),
_('Show Previews') +
':::'+_('Kobo previews are included on the Touch and some other versions'
' by default they are no longer displayed as there is no good reason to '
'see them. Enable if you wish to see/delete them.'),
_('Show Recommendations') +
':::'+_('Kobo shows recommendations on the device. In some cases these have '
'files but in other cases they are just pointers to the web site to buy. '
'Enable if you wish to see/delete them.'),
_('Set Series information') +
':::'+_('The book lists on the Kobo devices can display series information. '
'This is not read by the device from the sideloaded books. '
'Series information can only be added to the device after the book has been processed by the device. '
'Enable if you wish to set series information.'),
_('Modify CSS') +
':::'+_('This allows addition of user CSS rules and removal of some CSS. '
'When sending a book, the driver adds the contents of {0} to all stylesheets in the ePub. '
'This file is searched for in the root directory of the main memory of the device. '
'As well as this, if the file contains settings for the "orphans" or "widows", '
'these are removed for all styles in the original stylesheet.').format(KOBO_EXTRA_CSSFILE),
_('Attempt to support newer firmware') +
':::'+_('Kobo routinely updates the firmware and the '
'database version. With this option Calibre will attempt '
'to perform full read-write functionality - Here be Dragons!! '
'Enable only if you are comfortable with restoring your kobo '
'to factory defaults and testing software. '
'This driver supports firmware V2.x.x and DBVersion up to ') + unicode(supported_dbversion),
_('Title to test when debugging') +
':::'+_('Part of title of a book that can be used when doing some tests for debugging. '
'The test is to see if the string is contained in the title of a book. '
'The better the match, the less extraneous output.'),
]
EXTRA_CUSTOMIZATION_DEFAULT = [
u'',
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
u''
]
OPT_COLLECTIONS = 0
OPT_CREATE_BOOKSHELVES = 1
OPT_DELETE_BOOKSHELVES = 2
OPT_UPLOAD_COVERS = 3
OPT_UPLOAD_GRAYSCALE_COVERS = 4
OPT_KEEP_COVER_ASPECT_RATIO = 5
OPT_SHOW_ARCHIVED_BOOK_RECORDS = 6
OPT_SHOW_PREVIEWS = 7
OPT_SHOW_RECOMMENDATIONS = 8
OPT_UPDATE_SERIES_DETAILS = 9
OPT_MODIFY_CSS = 10
OPT_SUPPORT_NEWER_FIRMWARE = 11
OPT_DEBUGGING_TITLE = 12
opts = None
TIMESTAMP_STRING = "%Y-%m-%dT%H:%M:%SZ"
AURA_PRODUCT_ID = [0x4203]
AURA_HD_PRODUCT_ID = [0x4193]
AURA_H2O_PRODUCT_ID = [0x4213]
GLO_PRODUCT_ID = [0x4173]
GLO_HD_PRODUCT_ID = [0x4223]
MINI_PRODUCT_ID = [0x4183]
TOUCH_PRODUCT_ID = [0x4163]
PRODUCT_ID = AURA_PRODUCT_ID + AURA_HD_PRODUCT_ID + AURA_H2O_PRODUCT_ID + \
GLO_PRODUCT_ID + GLO_HD_PRODUCT_ID + \
MINI_PRODUCT_ID + TOUCH_PRODUCT_ID
BCD = [0x0110, 0x0326]
# Image file name endings. Made up of: image size, min_dbversion, max_dbversion, isFullSize,
# Note: "200" has been used just as a much larger number than the current versions. It is just a lazy
# way of making it open ended.
COVER_FILE_ENDINGS = {
' - N3_FULL.parsed':[(600,800),0, 200,True,], # Used for screensaver, home screen
' - N3_LIBRARY_FULL.parsed':[(355,473),0, 200,False,], # Used for Details screen before FW2.8.1, then for current book tile on home screen
' - N3_LIBRARY_GRID.parsed':[(149,198),0, 200,False,], # Used for library lists
' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,False,],
' - AndroidBookLoadTablet_Aspect.parsed':[(355,473), 82, 200,False,], # Used for Details screen from FW2.8.1
# ' - N3_LIBRARY_SHELF.parsed': [(40,60),0, 52,],
}
GLO_COVER_FILE_ENDINGS = { # Glo and Aura share resolution, so the image sizes should be the same.
' - N3_FULL.parsed':[(758,1024),0, 200,True,], # Used for screensaver, home screen
' - N3_LIBRARY_FULL.parsed':[(355,479),0, 200,False,], # Used for Details screen before FW2.8.1, then for current book tile on home screen
' - N3_LIBRARY_GRID.parsed':[(149,201),0, 200,False,], # Used for library lists
' - AndroidBookLoadTablet_Aspect.parsed':[(355,479), 88, 200,False,], # Used for Details screen from FW2.8.1
}
AURA_HD_COVER_FILE_ENDINGS = {
' - N3_FULL.parsed': [(1080,1440), 0, 200,True,], # Used for screensaver, home screen
' - N3_LIBRARY_FULL.parsed':[(355, 471), 0, 200,False,], # Used for Details screen before FW2.8.1, then for current book tile on home screen
' - N3_LIBRARY_GRID.parsed':[(149, 198), 0, 200,False,], # Used for library lists
' - AndroidBookLoadTablet_Aspect.parsed':[(355, 471), 88, 200,False,], # Used for Details screen from FW2.8.1
}
# Following are the sizes used with pre2.1.4 firmware
# COVER_FILE_ENDINGS = {
# ' - N3_LIBRARY_FULL.parsed':[(355,530),0, 99,], # Used for Details screen
# ' - N3_LIBRARY_FULL.parsed':[(600,800),0, 99,],
# ' - N3_LIBRARY_GRID.parsed':[(149,233),0, 99,], # Used for library lists
# ' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,],
# ' - N3_LIBRARY_SHELF.parsed': [(40,60),0, 52,],
# ' - N3_FULL.parsed':[(600,800),0, 99,], # Used for screensaver if "Full screen" is checked.
# }
def initialize(self):
super(KOBOTOUCH, self).initialize()
self.bookshelvelist = []
def get_device_information(self, end_session=True):
self.set_device_name()
return super(KOBOTOUCH, self).get_device_information(end_session)
def books(self, oncard=None, end_session=True):
debug_print("KoboTouch:books - oncard='%s'"%oncard)
from calibre.ebooks.metadata.meta import path_to_ext
dummy_bl = self.booklist_class(None, None, None)
if oncard == 'carda' and not self._card_a_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - Asked to process 'carda', but do not have one!")
return dummy_bl
elif oncard == 'cardb' and not self._card_b_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - Asked to process 'cardb', but do not have one!")
return dummy_bl
elif oncard and oncard != 'carda' and oncard != 'cardb':
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - unknown card")
return dummy_bl
prefix = self._card_a_prefix if oncard == 'carda' else \
self._card_b_prefix if oncard == 'cardb' \
else self._main_prefix
debug_print("KoboTouch:books - oncard='%s', prefix='%s'"%(oncard, prefix))
# Determine the firmware version
try:
with open(self.normalize_path(self._main_prefix + '.kobo/version'), 'rb') as f:
self.fwversion = f.readline().split(',')[2]
self.fwversion = tuple((int(x) for x in self.fwversion.split('.')))
except:
self.fwversion = (0,0,0)
debug_print('Kobo device: %s' % self.gui_name)
debug_print('Version of driver:', self.version, 'Has kepubs:', self.has_kepubs)
debug_print('Version of firmware:', self.fwversion, 'Has kepubs:', self.has_kepubs)
debug_print('Firmware supports cover image tree:', self.fwversion >= self.min_fwversion_images_tree)
self.booklist_class.rebuild_collections = self.rebuild_collections
# get the metadata cache
bl = self.booklist_class(oncard, prefix, self.settings)
opts = self.settings()
debug_print("KoboTouch:books - opts.extra_customization=", opts.extra_customization)
debug_print("KoboTouch:books - prefs['manage_device_metadata']=", prefs['manage_device_metadata'])
if opts.extra_customization:
debugging_title = opts.extra_customization[self.OPT_DEBUGGING_TITLE]
debug_print("KoboTouch:books - set_debugging_title to '%s'" % debugging_title)
bl.set_debugging_title(debugging_title)
debug_print("KoboTouch:books - length bl=%d"%len(bl))
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
debug_print("KoboTouch:books - length bl after sync=%d"%len(bl))
# make a dict cache of paths so the lookup in the loop below is faster.
bl_cache = {}
for idx,b in enumerate(bl):
bl_cache[b.lpath] = idx
def update_booklist(prefix, path, title, authors, mime, date, ContentID, ContentType, ImageID, readstatus, MimeType, expired, favouritesindex, accessibility, isdownloaded, series, seriesnumber, userid, bookshelves):
show_debug = self.is_debugging_title(title)
# show_debug = authors == 'L. Frank Baum'
if show_debug:
debug_print("KoboTouch:update_booklist - title='%s'"%title, "ContentType=%s"%ContentType, "isdownloaded=", isdownloaded)
debug_print(
" prefix=%s, mime=%s, date=%s, readstatus=%d, MimeType=%s, expired=%d, favouritesindex=%d, accessibility=%d, isdownloaded=%s"%
(prefix, mime, date, readstatus, MimeType, expired, favouritesindex, accessibility, isdownloaded,))
changed = False
try:
lpath = path.partition(self.normalize_path(prefix))[2]
if lpath.startswith(os.sep):
lpath = lpath[len(os.sep):]
lpath = lpath.replace('\\', '/')
# debug_print("LPATH: ", lpath, " - Title: " , title)
playlist_map = {}
if lpath not in playlist_map:
playlist_map[lpath] = []
allow_shelves = True
if readstatus == 1:
playlist_map[lpath].append('Im_Reading')
elif readstatus == 2:
playlist_map[lpath].append('Read')
elif readstatus == 3:
playlist_map[lpath].append('Closed')
# Related to a bug in the Kobo firmware that leaves an expired row for deleted books
# this shows an expired Collection so the user can decide to delete the book
if expired == 3:
playlist_map[lpath].append('Expired')
allow_shelves = False
# A SHORTLIST is supported on the touch but the data field is there on most earlier models
if favouritesindex == 1:
playlist_map[lpath].append('Shortlist')
# The follwing is in flux:
# - FW2.0.0, DBVersion 53,55 accessibility == 1
# - FW2.1.2 beta, DBVersion == 56, accessibility == -1:
# So, the following should be OK
if isdownloaded == 'false':
if self.dbversion < 56 and accessibility <= 1 or self.dbversion >= 56 and accessibility == -1:
playlist_map[lpath].append('Deleted')
allow_shelves = False
if show_debug:
debug_print("KoboTouch:update_booklist - have a deleted book")
elif self.supports_kobo_archive() and (accessibility == 1 or accessibility == 2):
playlist_map[lpath].append('Archived')
allow_shelves = True
# Label Previews and Recommendations
if accessibility == 6:
if userid == '':
playlist_map[lpath].append('Recommendation')
allow_shelves = False
else:
playlist_map[lpath].append('Preview')
allow_shelves = False
elif accessibility == 4: # Pre 2.x.x firmware
playlist_map[lpath].append('Recommendation')
allow_shelves = False
kobo_collections = playlist_map[lpath][:]
if allow_shelves:
# debug_print('KoboTouch:update_booklist - allowing shelves - title=%s' % title)
if len(bookshelves) > 0:
playlist_map[lpath].extend(bookshelves)
if show_debug:
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
path = self.normalize_path(path)
# print "Normalized FileName: " + path
idx = bl_cache.get(lpath, None)
if idx is not None: # and not (accessibility == 1 and isdownloaded == 'false'):
if show_debug:
self.debug_index = idx
debug_print("KoboTouch:update_booklist - idx=%d"%idx)
debug_print("KoboTouch:update_booklist - lpath=%s"%lpath)
debug_print('KoboTouch:update_booklist - bl[idx].device_collections=', bl[idx].device_collections)
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
debug_print('KoboTouch:update_booklist - bookshelves=', bookshelves)
debug_print('KoboTouch:update_booklist - kobo_collections=', kobo_collections)
debug_print('KoboTouch:update_booklist - series="%s"' % bl[idx].series)
debug_print('KoboTouch:update_booklist - the book=', bl[idx])
debug_print('KoboTouch:update_booklist - the authors=', bl[idx].authors)
debug_print('KoboTouch:update_booklist - application_id=', bl[idx].application_id)
bl_cache[lpath] = None
if ImageID is not None:
imagename = self.imagefilename_from_imageID(prefix, ImageID)
if imagename is not None:
bl[idx].thumbnail = ImageWrapper(imagename)
if (ContentType == '6' and MimeType != 'application/x-kobo-epub+zip'):
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
if self.update_metadata_item(bl[idx]):
# print 'update_metadata_item returned true'
changed = True
else:
debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
debug_print("KoboTouch:update_booklist - book size=", bl[idx].size)
if show_debug:
debug_print("KoboTouch:update_booklist - ContentID='%s'"%ContentID)
bl[idx].contentID = ContentID
bl[idx].kobo_series = series
bl[idx].kobo_series_number = seriesnumber
bl[idx].can_put_on_shelves = allow_shelves
if lpath in playlist_map:
bl[idx].device_collections = playlist_map.get(lpath,[])
bl[idx].current_shelves = bookshelves
bl[idx].kobo_collections = kobo_collections
if show_debug:
debug_print('KoboTouch:update_booklist - updated bl[idx].device_collections=', bl[idx].device_collections)
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map, 'changed=', changed)
# debug_print('KoboTouch:update_booklist - book=', bl[idx])
debug_print("KoboTouch:update_booklist - book class=%s"%bl[idx].__class__)
debug_print("KoboTouch:update_booklist - book title=%s"%bl[idx].title)
else:
if show_debug:
debug_print('KoboTouch:update_booklist - idx is none')
try:
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
else:
if isdownloaded == 'true': # A recommendation or preview is OK to not have a file
debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
title = "FILE MISSING: " + title
book = self.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=0)
if show_debug:
debug_print('KoboTouch:update_booklist - book file does not exist. ContentID="%s"'%ContentID)
except Exception as e:
debug_print("KoboTouch:update_booklist - exception creating book: '%s'"%str(e))
debug_print(" prefix: ", prefix, "lpath: ", lpath, "title: ", title, "authors: ", authors,
"mime: ", mime, "date: ", date, "ContentType: ", ContentType, "ImageID: ", ImageID)
raise
if show_debug:
debug_print('KoboTouch:update_booklist - class:', book.__class__)
# debug_print(' resolution:', book.__class__.__mro__)
debug_print(" contentid: '%s'"%book.contentID)
debug_print(" title:'%s'"%book.title)
debug_print(" the book:", book)
debug_print(" author_sort:'%s'"%book.author_sort)
debug_print(" bookshelves:", bookshelves)
debug_print(" kobo_collections:", kobo_collections)
# print 'Update booklist'
book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else []
book.current_shelves = bookshelves
book.kobo_collections = kobo_collections
book.contentID = ContentID
book.kobo_series = series
book.kobo_series_number = seriesnumber
book.can_put_on_shelves = allow_shelves
# debug_print('KoboTouch:update_booklist - title=', title, 'book.device_collections', book.device_collections)
if bl.add_book(book, replace_metadata=False):
changed = True
if show_debug:
debug_print(' book.device_collections', book.device_collections)
debug_print(' book.title', book.title)
except: # Probably a path encoding error
import traceback
traceback.print_exc()
return changed
def get_bookshelvesforbook(connection, ContentID):
# debug_print("KoboTouch:get_bookshelvesforbook - " + ContentID)
bookshelves = []
if not self.supports_bookshelves():
return bookshelves
cursor = connection.cursor()
query = "select ShelfName " \
"from ShelfContent " \
"where ContentId = ? " \
"and _IsDeleted = 'false' " \
"and ShelfName is not null" # This should never be nulll, but it is protection against an error cause by a sync to the Kobo server
values = (ContentID, )
cursor.execute(query, values)
for i, row in enumerate(cursor):
bookshelves.append(row[0])
cursor.close()
# debug_print("KoboTouch:get_bookshelvesforbook - count bookshelves=" + unicode(count_bookshelves))
return bookshelves
self.debug_index = 0
import sqlite3 as sqlite
with closing(sqlite.connect(self.device_database_path())) as connection:
debug_print("KoboTouch:books - reading device database")
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
cursor.execute('select version from dbversion')
result = cursor.fetchone()
self.dbversion = result[0]
debug_print("Database Version=%d"%self.dbversion)
self.bookshelvelist = self.get_bookshelflist(connection)
debug_print("KoboTouch:books - shelf list:", self.bookshelvelist)
opts = self.settings()
columns = 'Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ImageID, ReadStatus'
if self.dbversion >= 16:
columns += ', ___ExpirationStatus, FavouritesIndex, Accessibility'
else:
columns += ', "-1" as ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility'
if self.dbversion >= 33:
columns += ', IsDownloaded'
else:
columns += ', "1" as IsDownloaded'
if self.supports_series():
columns += ", Series, SeriesNumber, ___UserID, ExternalId"
else:
columns += ', null as Series, null as SeriesNumber, ___UserID, null as ExternalId'
where_clause = ''
if self.supports_kobo_archive():
where_clause = (" where BookID is Null "
" and ((Accessibility = -1 and IsDownloaded in ('true', 1 )) or (Accessibility in (1,2) %(expiry)s) "
" %(previews)s %(recomendations)s )"
" and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) and ContentType = 6)") % \
dict(
expiry="" if opts.extra_customization[self.OPT_SHOW_ARCHIVED_BOOK_RECORDS] else "and IsDownloaded in ('true', 1)",
previews=" or (Accessibility in (6) and ___UserID <> '')" if opts.extra_customization[self.OPT_SHOW_PREVIEWS] else "",
recomendations=" or (Accessibility in (-1, 4, 6) and ___UserId = '')" if opts.extra_customization[
self.OPT_SHOW_RECOMMENDATIONS] else ""
)
elif self.supports_series():
where_clause = (" where BookID is Null "
" and ((Accessibility = -1 and IsDownloaded in ('true', 1)) or (Accessibility in (1,2)) %(previews)s %(recomendations)s )"
" and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s)") % \
dict(
expiry=" and ContentType = 6" if opts.extra_customization[self.OPT_SHOW_ARCHIVED_BOOK_RECORDS] else "",
previews=" or (Accessibility in (6) and ___UserID <> '')" if opts.extra_customization[self.OPT_SHOW_PREVIEWS] else "",
recomendations=" or (Accessibility in (-1, 4, 6) and ___UserId = '')" if opts.extra_customization[
self.OPT_SHOW_RECOMMENDATIONS] else ""
)
elif self.dbversion >= 33:
where_clause = (' where BookID is Null %(previews)s %(recomendations)s and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s)') % \
dict(
expiry=' and ContentType = 6' if opts.extra_customization[self.OPT_SHOW_ARCHIVED_BOOK_RECORDS] else '',
previews=' and Accessibility <> 6' if opts.extra_customization[self.OPT_SHOW_PREVIEWS] == False else '',
recomendations=' and IsDownloaded in (\'true\', 1)' if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] == False else ''
)
elif self.dbversion >= 16:
where_clause = (' where BookID is Null '
'and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s)') % \
dict(expiry=' and ContentType = 6' if opts.extra_customization[self.OPT_SHOW_ARCHIVED_BOOK_RECORDS] else '')
else:
where_clause = ' where BookID is Null'
# Note: The card condition should not need the contentId test for the SD
# card. But the ExternalId does not get set for sideloaded kepubs on the
# SD card.
card_condition = ''
if self.has_externalid():
card_condition = " AND (externalId IS NOT NULL AND externalId <> '' OR contentId LIKE 'file:///mnt/sd/%')" if oncard == 'carda' else " AND (externalId IS NULL OR externalId = '') AND contentId NOT LIKE 'file:///mnt/sd/%'"
else:
card_condition = " AND contentId LIKE 'file:///mnt/sd/%'" if oncard == 'carda' else " AND contentId NOT LIKE'file:///mnt/sd/%'"
query = 'SELECT ' + columns + ' FROM content ' + where_clause + card_condition
debug_print("KoboTouch:books - query=", query)
try:
cursor.execute(query)
except Exception as e:
err = str(e)
if not ('___ExpirationStatus' in err
or 'FavouritesIndex' in err
or 'Accessibility' in err
or 'IsDownloaded' in err
or 'Series' in err
or 'ExternalId' in err
):
raise
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as '
'FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded, null as Series, null as SeriesNumber'
' from content where BookID is Null')
cursor.execute(query)
changed = False
for i, row in enumerate(cursor):
# self.report_progress((i+1) / float(numrows), _('Getting list of books on device...'))
show_debug = self.is_debugging_title(row[0])
if show_debug:
debug_print("KoboTouch:books - looping on database - row=%d" % i)
debug_print("KoboTouch:books - title='%s'"%row[0], "authors=", row[1])
debug_print("KoboTouch:books - row=", row)
if not hasattr(row[3], 'startswith') or row[3].lower().startswith("file:///usr/local/kobo/help/") or row[3].lower().startswith("/usr/local/kobo/help/"):
# These are internal to the Kobo device and do not exist
continue
externalId = None if row[15] and len(row[15]) == 0 else row[15]
path = self.path_from_contentid(row[3], row[5], row[4], oncard, externalId)
mime = mime_type_ext(path_to_ext(path)) if path.find('kepub') == -1 else 'application/x-kobo-epub+zip'
# debug_print("mime:", mime)
if show_debug:
debug_print("KoboTouch:books - path='%s'"%path, " ContentID='%s'"%row[3], " externalId=%s" % externalId)
bookshelves = get_bookshelvesforbook(connection, row[3])
prefix = self._card_a_prefix if oncard == 'carda' else self._main_prefix
changed = update_booklist(prefix, path, row[0], row[1], mime, row[2], row[3], row[5],
row[6], row[7], row[4], row[8], row[9], row[10], row[11],
row[12], row[13], row[14], bookshelves)
if changed:
need_sync = True
cursor.close()
if not prefs['manage_device_metadata'] == 'on_connect':
self.dump_bookshelves(connection)
else:
debug_print("KoboTouch:books - automatically managing metadata")
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(bl_cache.itervalues(), reverse=True):
if idx is not None:
if not os.path.exists(self.normalize_path(os.path.join(prefix, bl[idx].lpath))):
need_sync = True
del bl[idx]
# else:
# debug_print("KoboTouch:books - Book in mtadata.calibre, on file system but not database - bl[idx].title:'%s'"%bl[idx].title)
# print "count found in cache: %d, count of files in metadata: %d, need_sync: %s" % \
# (len(bl_cache), len(bl), need_sync)
# Bypassing the KOBO sync_booklists as that does things we don't need to do
# Also forcing sync to see if this solves issues with updating shelves and matching books.
if need_sync or True: # self.count_found_in_bl != len(bl) or need_sync:
debug_print("KoboTouch:books - about to sync_booklists")
if oncard == 'cardb':
USBMS.sync_booklists(self, (None, None, bl))
elif oncard == 'carda':
USBMS.sync_booklists(self, (None, bl, None))
else:
USBMS.sync_booklists(self, (bl, None, None))
debug_print("KoboTouch:books - have done sync_booklists")
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - end - oncard='%s'"%oncard)
return bl
def path_from_contentid(self, ContentID, ContentType, MimeType, oncard, externalId):
path = ContentID
if not externalId:
return super(KOBOTOUCH, self).path_from_contentid(ContentID, ContentType, MimeType, oncard)
if oncard == 'cardb':
print 'path from_contentid cardb'
else:
if (ContentType == "6" or ContentType == "10"): # and MimeType == 'application/x-kobo-epub+zip':
if path.startswith("file:///mnt/onboard/"):
path = self._main_prefix + path.replace("file:///mnt/onboard/", '')
elif path.startswith("file:///mnt/sd/"):
path = self._card_a_prefix + path.replace("file:///mnt/sd/", '')
elif externalId:
path = self._card_a_prefix + 'koboExtStorage/kepub/' + path
else:
path = self._main_prefix + '.kobo/kepub/' + path
else: # Should never get here, but, just in case...
# if path.startswith("file:///mnt/onboard/"):
path = path.replace("file:///mnt/onboard/", self._main_prefix)
path = path.replace("file:///mnt/sd/", self._card_a_prefix)
path = path.replace("/mnt/onboard/", self._main_prefix)
# print "Internal: " + path
return path
def imagefilename_from_imageID(self, prefix, ImageID):
show_debug = self.is_debugging_title(ImageID)
path = self.images_path(prefix, ImageID)
# path = self.normalize_path(path.replace('/', os.sep))
for ending, cover_options in self.cover_file_endings().items():
fpath = path + ending
if os.path.exists(fpath):
if show_debug:
debug_print("KoboTouch:imagefilename_from_imageID - have cover image fpath=%s" % (fpath))
return fpath
if show_debug:
debug_print("KoboTouch:imagefilename_from_imageID - no cover image found - ImageID=%s" % (ImageID))
return None
def get_extra_css(self):
extra_sheet = None
if self.modifying_css():
extra_css_path = os.path.join(self._main_prefix, self.KOBO_EXTRA_CSSFILE)
if os.path.exists(extra_css_path):
from cssutils import parseFile as cssparseFile
try:
extra_sheet = cssparseFile(extra_css_path)
debug_print("KoboTouch:get_extra_css: Using extra CSS in {0} ({1} rules)".format(extra_css_path, len(extra_sheet.cssRules)))
if len(extra_sheet.cssRules) ==0:
debug_print("KoboTouch:get_extra_css: Extra CSS file has no valid rules. CSS will not be modified.")
extra_sheet = None
except Exception as e:
debug_print("KoboTouch:get_extra_css: Problem parsing extra CSS file {0}".format(extra_css_path))
debug_print("KoboTouch:get_extra_css: Exception {0}".format(e))
return extra_sheet
def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None):
debug_print('KoboTouch:upload_books - %d books'%(len(files)))
debug_print('KoboTouch:upload_books - files=', files)
if self.modifying_epub():
self.extra_sheet = self.get_extra_css()
i = 0
for file, n, mi in zip(files, names, metadata):
debug_print("KoboTouch:upload_books: Processing book: {0} by {1}".format(mi.title, " and ".join(mi.authors)))
debug_print("KoboTouch:upload_books: file=%s, name=%s" % (file, n))
self.report_progress(i / float(len(files)), "Processing book: {0} by {1}".format(mi.title, " and ".join(mi.authors)))
mi.kte_calibre_name = n
self._modify_epub(file, mi)
i += 1
self.report_progress(0, 'Working...')
result = super(KOBOTOUCH, self).upload_books(files, names, on_card, end_session, metadata)
# debug_print('KoboTouch:upload_books - result=', result)
if self.dbversion >= 53:
import sqlite3 as sqlite
try:
with closing(sqlite.connect(self.normalize_path(self._main_prefix +
'.kobo/KoboReader.sqlite'))) as connection:
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
cleanup_query = "DELETE FROM content WHERE ContentID = ? AND Accessibility = 1 AND IsDownloaded = 'false'"
for fname, cycle in result:
show_debug = self.is_debugging_title(fname)
contentID = self.contentid_from_path(fname, 6)
if show_debug:
debug_print('KoboTouch:upload_books: fname=', fname)
debug_print('KoboTouch:upload_books: contentID=', contentID)
cleanup_values = (contentID,)
# debug_print('KoboTouch:upload_books: Delete record left if deleted on Touch')
cursor.execute(cleanup_query, cleanup_values)
self.set_filesize_in_device_database(connection, contentID, fname)
if not self.copying_covers():
imageID = self.imageid_from_contentid(contentID)
self.delete_images(imageID, fname)
connection.commit()
cursor.close()
except Exception as e:
debug_print('KoboTouch:upload_books - Exception: %s'%str(e))
return result
def _modify_epub(self, file, metadata, container=None):
debug_print("KoboTouch:_modify_epub:Processing {0} - {1}".format(metadata.author_sort, metadata.title))
# Currently only modifying CSS, so if no stylesheet, don't do anything
if not self.extra_sheet:
debug_print("KoboTouch:_modify_epub: no CSS file")
return True
commit_container = False
if not container:
commit_container = True
try:
from calibre.ebooks.oeb.polish.container import get_container
debug_print("KoboTouch:_modify_epub: creating container")
container = get_container(file)
container.css_preprocessor = DummyCSSPreProcessor()
except Exception as e:
debug_print("KoboTouch:_modify_epub: exception from get_container {0} - {1}".format(metadata.author_sort, metadata.title))
debug_print("KoboTouch:_modify_epub: exception is: {0}".format(e))
return False
else:
debug_print("KoboTouch:_modify_epub: received container")
from calibre.ebooks.oeb.base import OEB_STYLES
for cssname, mt in container.mime_map.iteritems():
if mt in OEB_STYLES:
newsheet = container.parsed(cssname)
oldrules = len(newsheet.cssRules)
# remove any existing @page rules in epub css
# if css to be appended contains an @page rule
if self.extra_sheet and len([r for r in self.extra_sheet if r.type == r.PAGE_RULE]):
page_rules = [r for r in newsheet if r.type == r.PAGE_RULE]
if len(page_rules) > 0:
debug_print("KoboTouch:_modify_epub:Removing existing @page rules")
for rule in page_rules:
rule.style = ''
# remove any existing widow/orphan settings in epub css
# if css to be appended contains a widow/orphan rule or we there is no extra CSS file
if (len([r for r in self.extra_sheet if r.type == r.STYLE_RULE
and (r.style['widows'] or r.style['orphans'])]) > 0):
widow_orphan_rules = [r for r in newsheet if r.type == r.STYLE_RULE
and (r.style['widows'] or r.style['orphans'])]
if len(widow_orphan_rules) > 0:
debug_print("KoboTouch:_modify_epub:Removing existing widows/orphans attribs")
for rule in widow_orphan_rules:
rule.style.removeProperty('widows')
rule.style.removeProperty('orphans')
# append all rules from kobo extra css stylesheet
for addrule in [r for r in self.extra_sheet.cssRules]:
newsheet.insertRule(addrule, len(newsheet.cssRules))
debug_print("KoboTouch:_modify_epub:CSS rules {0} -> {1} ({2})".format(oldrules, len(newsheet.cssRules), cssname))
container.dirty(cssname)
if commit_container:
debug_print("KoboTouch:_modify_epub: committing container.")
os.unlink(file)
container.commit(file)
return True
def delete_via_sql(self, ContentID, ContentType):
imageId = super(KOBOTOUCH, self).delete_via_sql(ContentID, ContentType)
if self.dbversion >= 53:
import sqlite3 as sqlite
debug_print('KoboTouch:delete_via_sql: ContentID="%s"'%ContentID, 'ContentType="%s"'%ContentType)
try:
with closing(sqlite.connect(self.device_database_path())) as connection:
debug_print('KoboTouch:delete_via_sql: have database connection')
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
debug_print('KoboTouch:delete_via_sql: have cursor')
t = (ContentID,)
# Delete the Bookmarks
debug_print('KoboTouch:delete_via_sql: Delete from Bookmark')
cursor.execute('DELETE FROM Bookmark WHERE VolumeID = ?', t)
# Delete from the Bookshelf
debug_print('KoboTouch:delete_via_sql: Delete from the Bookshelf')
cursor.execute('delete from ShelfContent where ContentID = ?', t)
# ContentType 6 is now for all books.
debug_print('KoboTouch:delete_via_sql: BookID is Null')
cursor.execute('delete from content where BookID is Null and ContentID =?',t)
# Remove the content_settings entry
debug_print('KoboTouch:delete_via_sql: delete from content_settings')
cursor.execute('delete from content_settings where ContentID =?',t)
# Remove the ratings entry
debug_print('KoboTouch:delete_via_sql: delete from ratings')
cursor.execute('delete from ratings where ContentID =?',t)
# Remove any entries for the Activity table - removes tile from new home page
if self.has_activity_table():
debug_print('KoboTouch:delete_via_sql: delete from Activity')
cursor.execute('delete from Activity where Id =?', t)
connection.commit()
cursor.close()
debug_print('KoboTouch:delete_via_sql: finished SQL')
debug_print('KoboTouch:delete_via_sql: After SQL, no exception')
except Exception as e:
debug_print('KoboTouch:delete_via_sql - Database Exception: %s'%str(e))
debug_print('KoboTouch:delete_via_sql: imageId="%s"'%imageId)
if imageId is None:
imageId = self.imageid_from_contentid(ContentID)
return imageId
def delete_images(self, ImageID, book_path):
debug_print("KoboTouch:delete_images - ImageID=", ImageID)
if ImageID != None:
path = self.images_path(book_path, ImageID)
debug_print("KoboTouch:delete_images - path=%s" % path)
for ending in self.cover_file_endings().keys():
fpath = path + ending
fpath = self.normalize_path(fpath)
debug_print("KoboTouch:delete_images - fpath=%s" % fpath)
if os.path.exists(fpath):
debug_print("KoboTouch:delete_images - Image File Exists")
os.unlink(fpath)
try:
os.removedirs(os.path.dirname(path))
except:
pass
def contentid_from_path(self, path, ContentType):
show_debug = self.is_debugging_title(path) and True
if show_debug:
debug_print("KoboTouch:contentid_from_path - path='%s'"%path, "ContentType='%s'"%ContentType)
debug_print("KoboTouch:contentid_from_path - self._main_prefix='%s'"%self._main_prefix, "self._card_a_prefix='%s'"%self._card_a_prefix)
if ContentType == 6:
extension = os.path.splitext(path)[1]
if extension == '.kobo':
ContentID = os.path.splitext(path)[0]
# Remove the prefix on the file. it could be either
ContentID = ContentID.replace(self._main_prefix, '')
elif extension == '':
ContentID = path
ContentID = ContentID.replace(self._main_prefix + self.normalize_path('.kobo/kepub/'), '')
else:
ContentID = path
ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/")
if show_debug:
debug_print("KoboTouch:contentid_from_path - 1 ContentID='%s'"%ContentID)
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/")
else: # ContentType = 16
debug_print("KoboTouch:contentid_from_path ContentType other than 6 - ContentType='%d'"%ContentType, "path='%s'"%path)
ContentID = path
ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/")
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/")
ContentID = ContentID.replace("\\", '/')
if show_debug:
debug_print("KoboTouch:contentid_from_path - end - ContentID='%s'"%ContentID)
return ContentID
def get_content_type_from_extension(self, extension):
debug_print("KoboTouch:get_content_type_from_extension - start")
# With new firmware, ContentType appears to be 6 for all types of sideloaded books.
if self.fwversion >= (1,9,17) or extension == '.kobo' or extension == '.mobi':
debug_print("KoboTouch:get_content_type_from_extension - V2 firmware")
ContentType = 6
# For older firmware, it depends on the type of file.
elif extension == '.kobo' or extension == '.mobi':
ContentType = 6
else:
ContentType = 901
return ContentType
def update_device_database_collections(self, booklists, collections_attributes, oncard):
debug_print("KoboTouch:update_device_database_collections - oncard='%s'"%oncard)
if self.modify_database_check("update_device_database_collections") == False:
return
# Only process categories in this list
supportedcategories = {
"Im_Reading": 1,
"Read": 2,
"Closed": 3,
"Shortlist": 4,
"Archived": 5,
# "Preview":99, # Unsupported as we don't want to change it
}
# Define lists for the ReadStatus
readstatuslist = {
"Im_Reading":1,
"Read":2,
"Closed":3,
}
accessibilitylist = {
"Preview":6,
"Recommendation":4,
"Deleted":1,
}
# specialshelveslist = {
# "Shortlist":1,
# "Wishlist":2,
# }
# debug_print('KoboTouch:update_device_database_collections - collections_attributes=', collections_attributes)
opts = self.settings()
if opts.extra_customization:
create_bookshelves = opts.extra_customization[self.OPT_CREATE_BOOKSHELVES] and self.supports_bookshelves()
delete_empty_shelves = opts.extra_customization[self.OPT_DELETE_BOOKSHELVES] and self.supports_bookshelves()
update_series_details = opts.extra_customization[self.OPT_UPDATE_SERIES_DETAILS] and self.supports_series()
debugging_title = opts.extra_customization[self.OPT_DEBUGGING_TITLE]
debug_print("KoboTouch:update_device_database_collections - set_debugging_title to '%s'" % debugging_title)
booklists.set_debugging_title(debugging_title)
else:
delete_empty_shelves = False
create_bookshelves = False
update_series_details = False
opts = self.settings()
if opts.extra_customization:
create_bookshelves = opts.extra_customization[self.OPT_CREATE_BOOKSHELVES] and self.supports_bookshelves()
delete_empty_shelves = opts.extra_customization[self.OPT_DELETE_BOOKSHELVES] and self.supports_bookshelves()
else:
delete_empty_shelves = False
bookshelf_attribute = len(collections_attributes)
collections = booklists.get_collections(collections_attributes) if bookshelf_attribute else None
# debug_print('KoboTouch:update_device_database_collections - Collections:', collections)
# Create a connection to the sqlite database
# Needs to be outside books collection as in the case of removing
# the last book from the collection the list of books is empty
# and the removal of the last book would not occur
import sqlite3 as sqlite
with closing(sqlite.connect(self.normalize_path(self._main_prefix +
'.kobo/KoboReader.sqlite'))) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
if collections:
# debug_print("KoboTouch:update_device_database_collections - length collections=" + unicode(len(collections)))
# Need to reset the collections outside the particular loops
# otherwise the last item will not be removed
if self.dbversion < 53:
debug_print("KoboTouch:update_device_database_collections - calling reset_readstatus")
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves:
debug_print("KoboTouch:update_device_database_collections - calling reset_favouritesindex")
self.reset_favouritesindex(connection, oncard)
# debug_print("KoboTouch:update_device_database_collections - length collections=", len(collections))
# debug_print("KoboTouch:update_device_database_collections - self.bookshelvelist=", self.bookshelvelist)
# Process any collections that exist
for category, books in collections.items():
debug_print("KoboTouch:update_device_database_collections - category='%s' books=%d"%(category, len(books)))
if create_bookshelves and not (category in supportedcategories or category in readstatuslist or category in accessibilitylist):
self.check_for_bookshelf(connection, category)
# if category in self.bookshelvelist:
# debug_print("Category: ", category, " id = ", readstatuslist.get(category))
for book in books:
# debug_print(' Title:', book.title, 'category: ', category)
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print(' Title="%s"'%book.title, 'category="%s"'%category)
# debug_print(book)
debug_print(' class=%s'%book.__class__)
debug_print(' book.contentID="%s"'%book.contentID)
debug_print(' book.application_id="%s"'%book.application_id)
if book.application_id is None:
continue
category_added = False
if book.contentID is None:
debug_print(' Do not know ContentID - Title="%s"'%book.title)
extension = os.path.splitext(book.path)[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path)
book.contentID = self.contentid_from_path(book.path, ContentType)
if category in self.bookshelvelist and self.supports_bookshelves():
if show_debug:
debug_print(' length book.device_collections=%d'%len(book.device_collections))
if category not in book.device_collections:
if show_debug:
debug_print(' Setting bookshelf on device')
self.set_bookshelf(connection, book, category)
category_added = True
elif category in readstatuslist.keys():
# Manage ReadStatus
self.set_readstatus(connection, book.contentID, readstatuslist.get(category))
category_added = True
elif category == 'Shortlist' and self.dbversion >= 14:
if show_debug:
debug_print(' Have an older version shortlist - %s'%book.title)
# Manage FavouritesIndex/Shortlist
if not self.supports_bookshelves():
if show_debug:
debug_print(' and about to set it - %s'%book.title)
self.set_favouritesindex(connection, book.contentID)
category_added = True
elif category in accessibilitylist.keys():
# Do not manage the Accessibility List
pass
if category_added and category not in book.device_collections:
if show_debug:
debug_print(' adding category to book.device_collections', book.device_collections)
book.device_collections.append(category)
else:
if show_debug:
debug_print(' category not added to book.device_collections', book.device_collections)
debug_print("KoboTouch:update_device_database_collections - end for category='%s'"%category)
elif bookshelf_attribute: # No collections but have set the shelf option
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
debug_print("No Collections - reseting ReadStatus")
if self.dbversion < 53:
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves:
debug_print("No Collections - resetting FavouritesIndex")
self.reset_favouritesindex(connection, oncard)
# Set the series info and cleanup the bookshelves only if the firmware supports them and the user has set the options.
if (self.supports_bookshelves() or self.supports_series()) and (bookshelf_attribute or update_series_details):
debug_print("KoboTouch:update_device_database_collections - managing bookshelves and series.")
self.series_set = 0
books_in_library = 0
for book in booklists:
if book.application_id is not None:
books_in_library += 1
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print("KoboTouch:update_device_database_collections - book.title=%s" % book.title)
if update_series_details:
self.set_series(connection, book)
if bookshelf_attribute:
if show_debug:
debug_print("KoboTouch:update_device_database_collections - about to remove a book from shelves book.title=%s" % book.title)
self.remove_book_from_device_bookshelves(connection, book)
book.device_collections.extend(book.kobo_collections)
if not prefs['manage_device_metadata'] == 'manual' and delete_empty_shelves:
debug_print("KoboTouch:update_device_database_collections - about to clear empty bookshelves")
self.delete_empty_bookshelves(connection)
debug_print("KoboTouch:update_device_database_collections - Number of series set=%d Number of books=%d" % (self.series_set, books_in_library))
self.dump_bookshelves(connection)
debug_print('KoboTouch:update_device_database_collections - Finished ')
def rebuild_collections(self, booklist, oncard):
debug_print("KoboTouch:rebuild_collections")
collections_attributes = self.get_collections_attributes()
debug_print('KoboTouch:rebuild_collections: collection fields:', collections_attributes)
self.update_device_database_collections(booklist, collections_attributes, oncard)
def upload_cover(self, path, filename, metadata, filepath):
'''
Upload book cover to the device. Default implementation does nothing.
:param path: The full path to the directory where the associated book is located.
:param filename: The name of the book file without the extension.
:param metadata: metadata belonging to the book. Use metadata.thumbnail
for cover
:param filepath: The full path to the ebook file
'''
debug_print("KoboTouch:upload_cover - path='%s' filename='%s' "%(path, filename))
debug_print(" filepath='%s' "%(filepath))
opts = self.settings()
if not self.copying_covers():
# Building thumbnails disabled
# debug_print('KoboTouch: not uploading cover')
return
# Only upload covers to SD card if that is supported
if self._card_a_prefix and os.path.abspath(path).startswith(os.path.abspath(self._card_a_prefix)) and not self.supports_covers_on_sdcard():
return
if not opts.extra_customization[self.OPT_UPLOAD_GRAYSCALE_COVERS]:
uploadgrayscale = False
else:
uploadgrayscale = True
# debug_print('KoboTouch: uploading cover')
try:
self._upload_cover(path, filename, metadata, filepath, uploadgrayscale, self.keep_cover_aspect())
except Exception as e:
debug_print('KoboTouch: FAILED to upload cover=%s Exception=%s'%(filepath, str(e)))
def imageid_from_contentid(self, ContentID):
ImageID = ContentID.replace('/', '_')
ImageID = ImageID.replace(' ', '_')
ImageID = ImageID.replace(':', '_')
ImageID = ImageID.replace('.', '_')
return ImageID
def images_path(self, path, imageId=None):
if self._card_a_prefix and os.path.abspath(path).startswith(os.path.abspath(self._card_a_prefix)) and self.supports_covers_on_sdcard():
path_prefix = 'koboExtStorage/images-cache/' if self.supports_images_tree() else 'koboExtStorage/images/'
path = os.path.join(self._card_a_prefix, path_prefix)
else:
path_prefix = '.kobo-images/' if self.supports_images_tree() else '.kobo/images/'
path = os.path.join(self._main_prefix, path_prefix)
if self.supports_images_tree() and imageId:
hash1 = qhash(imageId)
dir1 = hash1 & (0xff * 1)
dir2 = (hash1 & (0xff00 * 1)) >> 8
path = os.path.join(path, "%s" % dir1, "%s" % dir2)
if imageId:
path = os.path.join(path, imageId)
return path
def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale, keep_cover_aspect=False):
from calibre.utils.magick.draw import save_cover_data_to, identify_data
debug_print("KoboTouch:_upload_cover - filename='%s' uploadgrayscale='%s' "%(filename, uploadgrayscale))
if metadata.cover:
show_debug = self.is_debugging_title(filename)
if show_debug:
debug_print("KoboTouch:_upload_cover - path='%s'"%path, "filename='%s'"%filename)
debug_print(" filepath='%s'"%filepath)
cover = self.normalize_path(metadata.cover.replace('/', os.sep))
if os.path.exists(cover):
# Get ContentID for Selected Book
extension = os.path.splitext(filepath)[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(filepath)
ContentID = self.contentid_from_path(filepath, ContentType)
try:
import sqlite3 as sqlite
with closing(sqlite.connect(self.device_database_path())) as connection:
# return bytestrings if the content cannot the decoded as unicode
connection.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select ImageId from Content where BookID is Null and ContentID = ?', t)
result = cursor.fetchone()
if result is None:
ImageID = self.imageid_from_contentid(ContentID)
debug_print("KoboTouch:_upload_cover - No rows exist in the database - generated ImageID='%s'" % ImageID)
else:
ImageID = result[0]
# debug_print("ImageId: ", result[0])
cursor.close()
if ImageID != None:
path = self.images_path(path, ImageID)
if show_debug:
debug_print("KoboTouch:_upload_cover - About to loop over cover endings")
image_dir = os.path.dirname(os.path.abspath(path))
if not os.path.exists(image_dir):
debug_print("KoboTouch:_upload_cover - Image directory does not exust. Creating path='%s'" % (image_dir))
os.makedirs(image_dir)
for ending, cover_options in self.cover_file_endings().items():
resize, min_dbversion, max_dbversion, isFullsize = cover_options
if show_debug:
debug_print("KoboTouch:_upload_cover - resize=%s min_dbversion=%d max_dbversion=%d" % (resize, min_dbversion, max_dbversion))
if self.dbversion >= min_dbversion and self.dbversion <= max_dbversion:
if show_debug:
debug_print("KoboTouch:_upload_cover - creating cover for ending='%s'"%ending) # , "resize'%s'"%resize)
fpath = path + ending
fpath = self.normalize_path(fpath.replace('/', os.sep))
with open(cover, 'rb') as f:
data = f.read()
if keep_cover_aspect:
if isFullsize:
resize = None
else:
width, height, fmt = identify_data(data)
cover_aspect = width / height
if cover_aspect > 1:
resize = (resize[0], int(resize[0] / cover_aspect))
elif cover_aspect < 1:
resize = (int(cover_aspect * resize[1]), resize[1])
# Return the data resized and in Grayscale if
# required
data = save_cover_data_to(data, 'dummy.jpg',
grayscale=uploadgrayscale,
resize_to=resize, return_data=True)
with open(fpath, 'wb') as f:
f.write(data)
fsync(f)
except Exception as e:
err = str(e)
debug_print("KoboTouch:_upload_cover - Exception string: %s"%err)
raise
else:
debug_print("KoboTouch:_upload_cover - ImageID could not be retrieved from the database")
def remove_book_from_device_bookshelves(self, connection, book):
show_debug = self.is_debugging_title(book.title) # or True
remove_shelf_list = set(book.current_shelves) - set(book.device_collections)
if show_debug:
debug_print('KoboTouch:remove_book_from_device_bookshelves - book.application_id="%s"'%book.application_id)
debug_print('KoboTouch:remove_book_from_device_bookshelves - book.contentID="%s"'%book.contentID)
debug_print('KoboTouch:remove_book_from_device_bookshelves - book.device_collections=', book.device_collections)
debug_print('KoboTouch:remove_book_from_device_bookshelves - book.current_shelves=', book.current_shelves)
debug_print('KoboTouch:remove_book_from_device_bookshelves - remove_shelf_list=', remove_shelf_list)
if len(remove_shelf_list) == 0:
return
query = 'DELETE FROM ShelfContent WHERE ContentId = ?'
values = [book.contentID,]
if book.device_collections:
placeholder = '?'
placeholders = ','.join(placeholder for unused in book.device_collections)
query += ' and ShelfName not in (%s)' % placeholders
values.extend(book.device_collections)
if show_debug:
debug_print('KoboTouch:remove_book_from_device_bookshelves query="%s"'%query)
debug_print('KoboTouch:remove_book_from_device_bookshelves values="%s"'%values)
cursor = connection.cursor()
cursor.execute(query, values)
connection.commit()
cursor.close()
def set_filesize_in_device_database(self, connection, contentID, fpath):
show_debug = self.is_debugging_title(fpath)
if show_debug:
debug_print('KoboTouch:set_filesize_in_device_database contentID="%s"'%contentID)
test_query = 'SELECT ___FileSize ' \
'FROM content ' \
'WHERE ContentID = ? ' \
' AND ContentType = 6'
test_values = (contentID, )
updatequery = 'UPDATE content ' \
'SET ___FileSize = ? ' \
'WHERE ContentId = ? ' \
'AND ContentType = 6'
cursor = connection.cursor()
cursor.execute(test_query, test_values)
result = cursor.fetchone()
if result is None:
if show_debug:
debug_print(' Did not find a record - new book on device')
elif os.path.exists(fpath):
file_size = os.stat(self.normalize_path(fpath)).st_size
if show_debug:
debug_print(' Found a record - will update - ___FileSize=', result[0], ' file_size=', file_size)
if file_size != int(result[0]):
update_values = (file_size, contentID, )
cursor.execute(updatequery, update_values)
if show_debug:
debug_print(' Size updated.')
connection.commit()
cursor.close()
# debug_print("KoboTouch:set_filesize_in_device_database - end")
def delete_empty_bookshelves(self, connection):
debug_print("KoboTouch:delete_empty_bookshelves - start")
delete_query = ("DELETE FROM Shelf "
"WHERE Shelf._IsSynced = 'false' "
"AND Shelf.InternalName not in ('Shortlist', 'Wishlist') "
"AND NOT EXISTS "
"(SELECT 1 FROM ShelfContent c "
"WHERE Shelf.Name = C.ShelfName "
"AND c._IsDeleted <> 'true')")
update_query = ("UPDATE Shelf "
"SET _IsDeleted = 'true' "
"WHERE Shelf._IsSynced = 'true' "
"AND Shelf.InternalName not in ('Shortlist', 'Wishlist') "
"AND NOT EXISTS "
"(SELECT 1 FROM ShelfContent C "
"WHERE Shelf.Name = C.ShelfName "
"AND c._IsDeleted <> 'true')")
delete_activity_query = ("DELETE FROM Activity "
"WHERE Type = 'Shelf' "
"AND NOT EXISTS "
"(SELECT 1 FROM Shelf "
"WHERE Shelf.Name = Activity.Id "
"AND Shelf._IsDeleted = 'false')"
)
cursor = connection.cursor()
cursor.execute(delete_query)
cursor.execute(update_query)
if self.has_activity_table():
cursor.execute(delete_activity_query)
connection.commit()
cursor.close()
debug_print("KoboTouch:delete_empty_bookshelves - end")
def get_bookshelflist(self, connection):
# Retrieve the list of booksehelves
# debug_print('KoboTouch:get_bookshelflist')
bookshelves = []
if not self.supports_bookshelves():
return bookshelves
query = 'SELECT Name FROM Shelf WHERE _IsDeleted = "false"'
cursor = connection.cursor()
cursor.execute(query)
# count_bookshelves = 0
for i, row in enumerate(cursor):
bookshelves.append(row[0])
# count_bookshelves = i + 1
cursor.close()
# debug_print("KoboTouch:get_bookshelflist - count bookshelves=" + unicode(count_bookshelves))
return bookshelves
def set_bookshelf(self, connection, book, shelfName):
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print('KoboTouch:set_bookshelf book.ContentID="%s"'%book.contentID)
debug_print('KoboTouch:set_bookshelf book.current_shelves="%s"'%book.current_shelves)
if shelfName in book.current_shelves:
if show_debug:
debug_print(' book already on shelf.')
return
test_query = 'SELECT _IsDeleted FROM ShelfContent WHERE ShelfName = ? and ContentId = ?'
test_values = (shelfName, book.contentID, )
addquery = 'INSERT INTO ShelfContent ("ShelfName","ContentId","DateModified","_IsDeleted","_IsSynced") VALUES (?, ?, ?, "false", "false")'
add_values = (shelfName, book.contentID, time.strftime(self.TIMESTAMP_STRING, time.gmtime()), )
updatequery = 'UPDATE ShelfContent SET _IsDeleted = "false" WHERE ShelfName = ? and ContentId = ?'
update_values = (shelfName, book.contentID, )
cursor = connection.cursor()
cursor.execute(test_query, test_values)
result = cursor.fetchone()
if result is None:
if show_debug:
debug_print(' Did not find a record - adding')
cursor.execute(addquery, add_values)
elif result[0] == 'true':
if show_debug:
debug_print(' Found a record - updating - result=', result)
cursor.execute(updatequery, update_values)
connection.commit()
cursor.close()
# debug_print("KoboTouch:set_bookshelf - end")
def check_for_bookshelf(self, connection, bookshelf_name):
show_debug = self.is_debugging_title(bookshelf_name)
if show_debug:
debug_print('KoboTouch:check_for_bookshelf bookshelf_name="%s"'%bookshelf_name)
test_query = 'SELECT InternalName, Name, _IsDeleted FROM Shelf WHERE Name = ?'
test_values = (bookshelf_name, )
addquery = 'INSERT INTO "main"."Shelf"'
add_values = (time.strftime(self.TIMESTAMP_STRING, time.gmtime()),
bookshelf_name,
time.strftime(self.TIMESTAMP_STRING, time.gmtime()),
bookshelf_name,
"false",
"true",
"false",
)
if self.dbversion < 64:
addquery += ' ("CreationDate","InternalName","LastModified","Name","_IsDeleted","_IsVisible","_IsSynced")'\
' VALUES (?, ?, ?, ?, ?, ?, ?)'
else:
addquery += ' ("CreationDate", "InternalName","LastModified","Name","_IsDeleted","_IsVisible","_IsSynced", "Id")'\
' VALUES (?, ?, ?, ?, ?, ?, ?, ?)'
add_values = add_values +(bookshelf_name,)
if show_debug:
debug_print('KoboTouch:check_for_bookshelf addquery=', addquery)
debug_print('KoboTouch:check_for_bookshelf add_values=', add_values)
updatequery = 'UPDATE Shelf SET _IsDeleted = "false" WHERE Name = ?'
cursor = connection.cursor()
cursor.execute(test_query, test_values)
result = cursor.fetchone()
if result is None:
if show_debug:
debug_print(' Did not find a record - adding shelf "%s"' % bookshelf_name)
cursor.execute(addquery, add_values)
elif result[2] == 'true':
debug_print('KoboTouch:check_for_bookshelf - Shelf "%s" is deleted - undeleting. result[2]="%s"' % (bookshelf_name, unicode(result[2])))
cursor.execute(updatequery, test_values)
connection.commit()
cursor.close()
# Update the bookshelf list.
self.bookshelvelist = self.get_bookshelflist(connection)
# debug_print("KoboTouch:set_bookshelf - end")
def remove_from_bookshelves(self, connection, oncard, ContentID=None, bookshelves=None):
debug_print('KoboTouch:remove_from_bookshelf ContentID=', ContentID)
if not self.supports_bookshelves():
return
query = 'DELETE FROM ShelfContent'
values = []
if ContentID is not None:
query += ' WHERE ContentId = ?'
values.append(ContentID)
else:
if oncard == 'carda':
query += ' WHERE ContentID like \'file:///mnt/sd/%\''
elif oncard != 'carda' and oncard != 'cardb':
query += ' WHERE ContentID not like \'file:///mnt/sd/%\''
if bookshelves:
placeholder = '?'
placeholders = ','.join(placeholder for unused in bookshelves)
query += ' and ShelfName in (%s)' % placeholders
values.append(bookshelves)
debug_print('KoboTouch:remove_from_bookshelf query=', query)
debug_print('KoboTouch:remove_from_bookshelf values=', values)
cursor = connection.cursor()
cursor.execute(query, values)
connection.commit()
cursor.close()
debug_print("KoboTouch:remove_from_bookshelf - end")
def set_series(self, connection, book):
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print('KoboTouch:set_series book.kobo_series="%s"'%book.kobo_series)
debug_print('KoboTouch:set_series book.series="%s"'%book.series)
debug_print('KoboTouch:set_series book.series_index=', book.series_index)
if book.series == book.kobo_series:
kobo_series_number = None
if book.kobo_series_number is not None:
try:
kobo_series_number = float(book.kobo_series_number)
except:
kobo_series_number = None
if kobo_series_number == book.series_index:
if show_debug:
debug_print('KoboTouch:set_series - series info the same - not changing')
return
update_query = 'UPDATE content SET Series=?, SeriesNumber==? where BookID is Null and ContentID = ?'
if book.series is None:
update_values = (None, None, book.contentID, )
elif book.series_index is None: # This should never happen, but...
update_values = (book.series, None, book.contentID, )
else:
update_values = (book.series, "%g"%book.series_index, book.contentID, )
cursor = connection.cursor()
try:
if show_debug:
debug_print('KoboTouch:set_series - about to set - parameters:', update_values)
cursor.execute(update_query, update_values)
self.series_set += 1
except:
debug_print(' Database Exception: Unable to set series info')
raise
else:
connection.commit()
cursor.close()
if show_debug:
debug_print("KoboTouch:set_series - end")
@classmethod
def settings(cls):
opts = cls._config().parse()
if isinstance(cls.EXTRA_CUSTOMIZATION_DEFAULT, list):
if opts.extra_customization is None:
opts.extra_customization = []
if not isinstance(opts.extra_customization, list):
opts.extra_customization = [opts.extra_customization]
if len(cls.EXTRA_CUSTOMIZATION_DEFAULT) > len(opts.extra_customization):
extra_options_offset = 0
extra_customization = []
for i,d in enumerate(cls.EXTRA_CUSTOMIZATION_DEFAULT):
if i >= len(opts.extra_customization) + extra_options_offset:
extra_customization.append(d)
elif d.__class__ != opts.extra_customization[i - extra_options_offset].__class__:
extra_options_offset += 1
extra_customization.append(d)
else:
extra_customization.append(opts.extra_customization[i - extra_options_offset])
opts.extra_customization = extra_customization
return opts
def isAura(self):
return self.detected_device.idProduct in self.AURA_PRODUCT_ID
def isAuraHD(self):
return self.detected_device.idProduct in self.AURA_HD_PRODUCT_ID
def isAuraH2O(self):
return self.detected_device.idProduct in self.AURA_H2O_PRODUCT_ID
def isGlo(self):
return self.detected_device.idProduct in self.GLO_PRODUCT_ID
def isGloHD(self):
return self.detected_device.idProduct in self.GLO_HD_PRODUCT_ID
def isMini(self):
return self.detected_device.idProduct in self.MINI_PRODUCT_ID
def isTouch(self):
return self.detected_device.idProduct in self.TOUCH_PRODUCT_ID
def cover_file_endings(self):
return self.GLO_COVER_FILE_ENDINGS if self.isGlo() or self.isAura() \
else self.AURA_HD_COVER_FILE_ENDINGS if self.isAuraHD() or self.isAuraH2O() or self.isGloHD() \
else self.COVER_FILE_ENDINGS
def set_device_name(self):
device_name = self.gui_name
if self.isAura():
device_name = 'Kobo Aura'
elif self.isAuraHD():
device_name = 'Kobo Aura HD'
elif self.isAuraH2O():
device_name = 'Kobo Aura H2O'
elif self.isGlo():
device_name = 'Kobo Glo'
elif self.isGloHD():
device_name = 'Kobo Glo HD'
elif self.isMini():
device_name = 'Kobo Mini'
elif self.isTouch():
device_name = 'Kobo Touch'
self.__class__.gui_name = device_name
return device_name
def copying_covers(self):
opts = self.settings()
return opts.extra_customization[self.OPT_UPLOAD_COVERS] or opts.extra_customization[self.OPT_KEEP_COVER_ASPECT_RATIO]
def keep_cover_aspect(self):
opts = self.settings()
return opts.extra_customization[self.OPT_KEEP_COVER_ASPECT_RATIO]
def modifying_epub(self):
return self.modifying_css()
def modifying_css(self):
opts = self.settings()
return opts.extra_customization[self.OPT_MODIFY_CSS]
def supports_bookshelves(self):
return self.dbversion >= self.min_supported_dbversion
def supports_series(self):
return self.dbversion >= self.min_dbversion_series
def supports_kobo_archive(self):
return self.dbversion >= self.min_dbversion_archive
def supports_covers_on_sdcard(self):
return self.dbversion >= self.min_dbversion_images_on_sdcard and self.fwversion >= self.min_fwversion_images_on_sdcard
def supports_images_tree(self):
return self.fwversion >= self.min_fwversion_images_tree
def has_externalid(self):
return self.dbversion >= self.min_dbversion_externalid
def has_activity_table(self):
return self.dbversion >= self.min_dbversion_activity
def modify_database_check(self, function):
# Checks to see whether the database version is supported
# and whether the user has chosen to support the firmware version
# debug_print("KoboTouch:modify_database_check - self.fwversion > self.max_supported_fwversion=", self.fwversion > self.max_supported_fwversion)
if self.dbversion > self.supported_dbversion or self.fwversion > self.max_supported_fwversion:
# Unsupported database
opts = self.settings()
if not opts.extra_customization[self.OPT_SUPPORT_NEWER_FIRMWARE]:
debug_print('The database has been upgraded past supported version')
self.report_progress(1.0, _('Removing books from device...'))
from calibre.devices.errors import UserFeedback
raise UserFeedback(_("Kobo database version unsupported - See details"),
_('Your Kobo is running an updated firmware/database version.'
' As calibre does not know about this updated firmware,'
' database editing is disabled, to prevent corruption.'
' You can still send books to your Kobo with calibre, '
' but deleting books and managing collections is disabled.'
' If you are willing to experiment and know how to reset'
' your Kobo to Factory defaults, you can override this'
' check by right clicking the device icon in calibre and'
' selecting "Configure this device" and then the '
' "Attempt to support newer firmware" option.'
' Doing so may require you to perform a factory reset of'
' your Kobo.') + (
'\nDevice database version: %s.'
'\nDevice firmware version: %s'
) % (self.dbversion, self.fwversion),
UserFeedback.WARN)
return False
else:
# The user chose to edit the database anyway
return True
else:
# Supported database version
return True
@classmethod
def is_debugging_title(cls, title):
if not DEBUG:
return False
# debug_print("KoboTouch:is_debugging - title=", title)
is_debugging = False
opts = cls.settings()
if opts.extra_customization:
debugging_title = opts.extra_customization[cls.OPT_DEBUGGING_TITLE]
is_debugging = len(debugging_title) > 0 and title.lower().find(debugging_title.lower()) >= 0 or len(title) == 0
return is_debugging
def dump_bookshelves(self, connection):
if not (DEBUG and self.supports_bookshelves() and False):
return
debug_print('KoboTouch:dump_bookshelves - start')
shelf_query = 'SELECT * FROM Shelf'
shelfcontent_query = 'SELECT * FROM ShelfContent'
placeholder = '%s'
cursor = connection.cursor()
prints('\nBookshelves on device:')
cursor.execute(shelf_query)
i = 0
for row in cursor:
placeholders = ', '.join(placeholder for unused in row)
prints(placeholders%row)
i += 1
if i == 0:
prints("No shelves found!!")
else:
prints("Number of shelves=%d"%i)
prints('\nBooks on shelves on device:')
cursor.execute(shelfcontent_query)
i = 0
for row in cursor:
placeholders = ', '.join(placeholder for unused in row)
prints(placeholders%row)
i += 1
if i == 0:
prints("No books are on any shelves!!")
else:
prints("Number of shelved books=%d"%i)
cursor.close()
debug_print('KoboTouch:dump_bookshelves - end')
| gpl-3.0 | 6,730,758,367,339,175,000 | 49.074966 | 237 | 0.549732 | false | 4.174212 | false | false | false |
gfyoung/pandas | pandas/tests/io/pytables/test_complex.py | 1 | 6374 | from warnings import catch_warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.tests.io.pytables.common import ensure_clean_path, ensure_clean_store
from pandas.io.pytables import read_hdf
# TODO(ArrayManager) HDFStore relies on accessing the blocks
pytestmark = td.skip_array_manager_not_yet_implemented
def test_complex_fixed(setup_path):
df = DataFrame(
np.random.rand(4, 5).astype(np.complex64),
index=list("abcd"),
columns=list("ABCDE"),
)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df")
reread = read_hdf(path, "df")
tm.assert_frame_equal(df, reread)
df = DataFrame(
np.random.rand(4, 5).astype(np.complex128),
index=list("abcd"),
columns=list("ABCDE"),
)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df")
reread = read_hdf(path, "df")
tm.assert_frame_equal(df, reread)
def test_complex_table(setup_path):
df = DataFrame(
np.random.rand(4, 5).astype(np.complex64),
index=list("abcd"),
columns=list("ABCDE"),
)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table")
reread = read_hdf(path, "df")
tm.assert_frame_equal(df, reread)
df = DataFrame(
np.random.rand(4, 5).astype(np.complex128),
index=list("abcd"),
columns=list("ABCDE"),
)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", mode="w")
reread = read_hdf(path, "df")
tm.assert_frame_equal(df, reread)
def test_complex_mixed_fixed(setup_path):
complex64 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64
)
complex128 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
)
df = DataFrame(
{
"A": [1, 2, 3, 4],
"B": ["a", "b", "c", "d"],
"C": complex64,
"D": complex128,
"E": [1.0, 2.0, 3.0, 4.0],
},
index=list("abcd"),
)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df")
reread = read_hdf(path, "df")
tm.assert_frame_equal(df, reread)
def test_complex_mixed_table(setup_path):
complex64 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64
)
complex128 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
)
df = DataFrame(
{
"A": [1, 2, 3, 4],
"B": ["a", "b", "c", "d"],
"C": complex64,
"D": complex128,
"E": [1.0, 2.0, 3.0, 4.0],
},
index=list("abcd"),
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["A", "B"])
result = store.select("df", where="A>2")
tm.assert_frame_equal(df.loc[df.A > 2], result)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table")
reread = read_hdf(path, "df")
tm.assert_frame_equal(df, reread)
def test_complex_across_dimensions_fixed(setup_path):
with catch_warnings(record=True):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list("abcd"))
df = DataFrame({"A": s, "B": s})
objs = [s, df]
comps = [tm.assert_series_equal, tm.assert_frame_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(setup_path) as path:
obj.to_hdf(path, "obj", format="fixed")
reread = read_hdf(path, "obj")
comp(obj, reread)
def test_complex_across_dimensions(setup_path):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list("abcd"))
df = DataFrame({"A": s, "B": s})
with catch_warnings(record=True):
objs = [df]
comps = [tm.assert_frame_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(setup_path) as path:
obj.to_hdf(path, "obj", format="table")
reread = read_hdf(path, "obj")
comp(obj, reread)
def test_complex_indexing_error(setup_path):
complex128 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
)
df = DataFrame(
{"A": [1, 2, 3, 4], "B": ["a", "b", "c", "d"], "C": complex128},
index=list("abcd"),
)
msg = (
"Columns containing complex values can be stored "
"but cannot be indexed when using table format. "
"Either use fixed format, set index=False, "
"or do not include the columns containing complex "
"values to data_columns when initializing the table."
)
with ensure_clean_store(setup_path) as store:
with pytest.raises(TypeError, match=msg):
store.append("df", df, data_columns=["C"])
def test_complex_series_error(setup_path):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list("abcd"))
msg = (
"Columns containing complex values can be stored "
"but cannot be indexed when using table format. "
"Either use fixed format, set index=False, "
"or do not include the columns containing complex "
"values to data_columns when initializing the table."
)
with ensure_clean_path(setup_path) as path:
with pytest.raises(TypeError, match=msg):
s.to_hdf(path, "obj", format="t")
with ensure_clean_path(setup_path) as path:
s.to_hdf(path, "obj", format="t", index=False)
reread = read_hdf(path, "obj")
tm.assert_series_equal(s, reread)
def test_complex_append(setup_path):
df = DataFrame(
{"a": np.random.randn(100).astype(np.complex128), "b": np.random.randn(100)}
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["b"])
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(pd.concat([df, df], 0), result)
| bsd-3-clause | -5,007,679,795,284,820,000 | 30.554455 | 84 | 0.562441 | false | 3.067372 | true | false | false |
SelfDrivUTT/selfdrivutt | robot/raspberry/controls.py | 1 | 3292 | import socket
import sys
import os
import curses
from threading import Thread
class RemoteControlServer(object):
"""docstring for Curses_control"""
def __init__(self):
super(RemoteControl, self).__init__()
self.data = ''
self.stopped = False
self.HOST = os.environ.get('COMMAND_HOST', 'localhost')
self.PORT = os.environ.get('COMMAND_PORT', 9089)
def start(self):
self.socket_server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print('Socket created')
self.socket_server.bind((self.HOST, self.PORT))
print('Socket bind complete')
self.socket_server.listen(10)
print('Socket now listening')
self.conn, self.addr = self.socket_server.accept() # Accept the connection once (for starter)
print('Connected with ' + self.addr[0] + ':' + str(self.addr[1]))
Thread(target=self.update, args=()).start()
return self
def update(self):
while True:
try:
self.data = self.conn.recv(1024)
self.conn.send(self.data)
print(self.data)
if self.data == 27:
self.stop()
return
except socket.error as e:
print(e)
self.stop()
return
else:
if len(self.data) == 0:
print 'orderly shutdown on server end'
self.stop()
else:
print(self.data)
def read(self):
return self.data
def stop(self):
self.stopped = True
self.conn.close()
self.socket_server.close()
class CursesControl(object):
"""docstring for Curses_control"""
def __init__(self):
super(CursesControl, self).__init__()
# self.screen.nodelay()
self.event = 'unload'
self.stopped = False
def start(self):
self.screen = curses.initscr()
Thread(target=self.update, args=()).start()
return self
def update(self):
while True:
try:
curses.noecho()
curses.curs_set(0)
self.screen.keypad(1)
self.screen.addstr("Press a key, " + str(self.event))
self.event = self.screen.getch()
finally:
curses.endwin()
if self.stopped or self.event == 27:
return
def read(self):
if self.event == curses.KEY_LEFT:
command = 'left'
elif self.event == curses.KEY_RIGHT:
command = 'right'
elif self.event == curses.KEY_UP:
command = 'up'
elif self.event == curses.KEY_DOWN:
command = 'down'
elif self.event == 32: # SPACE
command = 'stop'
elif self.event == 27: # ESC key
command = 'quit'
elif self.event == ord('p'): # P key
command = 'auto_logic_based'
elif self.event == ord('o'): # O key
command = 'stream'
elif self.event == ord('m'): # O key
command = 'auto_neural_network'
else:
command = '?'
return command
def stop(self):
self.stopped = True
| mit | 2,659,452,182,422,635,000 | 28.132743 | 101 | 0.512151 | false | 4.182973 | false | false | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/434fb0f05794_add_ignore_and_dev_note_to_genomics_.py | 1 | 1673 | """add ignore and dev note to genomics models.
Revision ID: 434fb0f05794
Revises: 994dfe6e53ee
Create Date: 2020-09-30 14:39:16.244636
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '434fb0f05794'
down_revision = '994dfe6e53ee'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('genomic_gc_validation_metrics', sa.Column('dev_note', sa.String(length=255), nullable=True))
op.add_column('genomic_gc_validation_metrics', sa.Column('ignore_flag', sa.SmallInteger(), nullable=True))
op.add_column('genomic_set_member', sa.Column('dev_note', sa.String(length=255), nullable=True))
op.add_column('genomic_set_member_history', sa.Column('dev_note', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('genomic_set_member', 'dev_note')
op.drop_column('genomic_set_member_history', 'dev_note')
op.drop_column('genomic_gc_validation_metrics', 'ignore_flag')
op.drop_column('genomic_gc_validation_metrics', 'dev_note')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | -7,922,045,653,497,436,000 | 28.875 | 111 | 0.679617 | false | 3.267578 | false | false | false |
dannyroberts/eulxml | eulxml/xmlmap/premis.py | 1 | 5516 | # file eulxml/xmlmap/premis.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
:mod:`eulxml.xmlmap` classes for dealing with the `PREMIS
<http://www.loc.gov/standards/premis/>`_ metadata format for
preservation metadata.
-----
'''
from eulxml import xmlmap
PREMIS_NAMESPACE = 'info:lc/xmlns/premis-v2'
'authoritative namespace for PREMIS'
PREMIS_SCHEMA = 'http://www.loc.gov/standards/premis/v2/premis-v2-1.xsd'
'authoritative schema location for PREMIS'
class BasePremis(xmlmap.XmlObject):
'''Base PREMIS class with namespace declaration common to all PREMIS
XmlObjects.
.. Note::
This class is intended mostly for internal use, but could be
useful when extending or adding additional PREMIS
:class:`~eulxml.xmlmap.XmlObject` classes. The
:attr:`PREMIS_NAMESPACE` is mapped to the prefix **p**.
'''
ROOT_NS = PREMIS_NAMESPACE
ROOT_NAMESPACES = {
'p': PREMIS_NAMESPACE,
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'
}
class PremisRoot(BasePremis):
'''Base class with a schema declaration for any of the
root/stand-alone PREMIS elements:
* ``<premis>`` - :class:`Premis`
* ``<object>`` - :class:`Object`
* ``<event>`` - :class:`Event`
* ``<agent>``
* ``<rights>``
'''
XSD_SCHEMA = PREMIS_SCHEMA
class Object(PremisRoot):
'''Preliminary :class:`~eulxml.xmlmap.XmlObject` for a PREMIS
object.
Curently only includes the minimal required fields.
'''
ROOT_NAME = 'object'
type = xmlmap.StringField('@xsi:type') # file, representation, bitstream
'''type of object (e.g., file, representation, bitstream).
.. Note::
To be schema valid, object types must be in the PREMIS namespace, e.g.::
from eulxml.xmlmap import premis
obj = premis.Object()
obj.type = "p:file"
'''
id_type = xmlmap.StringField('p:objectIdentifier/p:objectIdentifierType')
'identifier type (`objectIdentifier/objectIdentifierType`)'
id = xmlmap.StringField('p:objectIdentifier/p:objectIdentifierValue')
'identifier value (`objectIdentifier/objectIdentifierValue`)'
class Event(PremisRoot):
'''Preliminary :class:`~eulxml.xmlmap.XmlObject` for a PREMIS
event.
.. Note::
The PREMIS schema requires that elements occur in a specified
order, which :mod:`eulxml` does not currently handle or manage.
As a work-around, when creating a new :class:`Event` from
scratch, you should set the following required fields in this
order: identifier (:attr:`id` and :attr:`ad_type`
'''
ROOT_NAME = 'event'
type = xmlmap.StringField('p:eventType')
'event type (``eventType``)'
id_type = xmlmap.StringField('p:eventIdentifier/p:eventIdentifierType')
'identifier type (`eventIdentifier/eventIdentifierType`)'
id = xmlmap.StringField('p:eventIdentifier/p:eventIdentifierValue')
'identifier value (`eventIdentifier/eventIdentifierValue`)'
date = xmlmap.StringField('p:eventDateTime')
'date/time for the event (`eventDateTime`)'
detail = xmlmap.StringField('p:eventDetail', required=False)
'event detail (`eventDetail`)'
outcome = xmlmap.StringField('p:eventOutcomeInformation/p:eventOutcome', required=False)
'''outcome of the event (`eventOutcomeInformation/eventOutcome`).
.. Note::
In this preliminary implementation, the outcome detail fields
are not mapped.
'''
# leaving out outcome detail for now...
# agent (optional, could be repeated)
agent_type = xmlmap.StringField('p:linkingAgentIdentifier/p:linkingAgentIdentifierType')
agent_id = xmlmap.StringField('p:linkingAgentIdentifier/p:linkingAgentIdentifierValue')
# object (optional, could be repeated)
object_type = xmlmap.StringField('p:linkingObjectIdentifier/p:linkingObjectIdentifierType')
object_id = xmlmap.StringField('p:linkingObjectIdentifier/p:linkingObjectIdentifierValue')
class Premis(PremisRoot):
'''Preliminary :class:`~eulxml.xmlmap.XmlObject` for a PREMIS
container element that can contain any of the other top-level
PREMIS elements.
Curently only includes mappings for a single object and list of
events.
'''
ROOT_NAME = 'premis'
version = xmlmap.StringField('@version')
'''Version of PREMIS in use; by default, new instances of
:class:`Premis` will be initialized with a version of 2.1'''
object = xmlmap.NodeField('p:object', Object)
'a single PREMIS :class:`object`'
events = xmlmap.NodeListField('p:event', Event)
'list of PREMIS events, as instances of :class:`Event`'
def __init__(self, *args, **kwargs):
# version is required for schema-validity; don't override a
# user-supplied version, but otherwise default to 2.1
if 'version' not in kwargs:
kwargs['version'] = '2.1'
super(Premis, self).__init__(*args, **kwargs)
| apache-2.0 | 2,107,150,789,629,381,400 | 36.020134 | 95 | 0.688724 | false | 3.762619 | false | false | false |
mightbejosh/dj-braintree | djbraintree/admin.py | 1 | 6860 | # -*- coding: utf-8 -*-
"""
Note: Django 1.4 support was dropped in #107
https://github.com/pydanny/dj-braintree/pull/107
"""
from django.contrib import admin
from .models import Transaction
from .models import Customer
class CustomerHasCardListFilter(admin.SimpleListFilter):
title = "card presence"
parameter_name = "has_card"
def lookups(self, request, model_admin):
return [
["yes", "Has Card"],
["no", "Does Not Have a Card"]
]
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.exclude(card_fingerprint="")
if self.value() == "no":
return queryset.filter(card_fingerprint="")
class InvoiceCustomerHasCardListFilter(admin.SimpleListFilter):
title = "card presence"
parameter_name = "has_card"
def lookups(self, request, model_admin):
return [
["yes", "Has Card"],
["no", "Does Not Have a Card"]
]
def queryset(self, request, queryset):
if self.value() == "yes":
return queryset.exclude(customer__card_fingerprint="")
if self.value() == "no":
return queryset.filter(customer__card_fingerprint="")
#
# class CustomerSubscriptionStatusListFilter(admin.SimpleListFilter):
# title = "subscription status"
# parameter_name = "sub_status"
#
# def lookups(self, request, model_admin):
# statuses = [
# [x, x.replace("_", " ").title()]
# for x in CurrentSubscription.objects.all().values_list(
# "status",
# flat=True
# ).distinct()
# ]
# statuses.append(["none", "No Subscription"])
# return statuses
#
# def queryset(self, request, queryset):
# if self.value() is None:
# return queryset.all()
# else:
# return queryset.filter(current_subscription__status=self.value())
#
#
# def send_charge_receipt(modeladmin, request, queryset):
# """
# Function for sending receipts from the admin if a receipt is not sent for
# a specific charge.
# """
# for charge in queryset:
# charge.send_receipt()
#
#
# admin.site.register(
# Charge,
# readonly_fields=('created',),
# list_display=[
# "braintree_id",
# "customer",
# "amount",
# "description",
# "paid",
# "disputed",
# "refunded",
# "fee",
# "receipt_sent",
# "created"
# ],
# search_fields=[
# "braintree_id",
# "customer__braintree_id",
# "card_last_4",
# "invoice__braintree_id"
# ],
# list_filter=[
# "paid",
# "disputed",
# "refunded",
# "card_kind",
# "created"
# ],
# raw_id_fields=[
# "customer",
# "invoice"
# ],
# actions=(send_charge_receipt,),
# )
#
# admin.site.register(
# EventProcessingException,
# readonly_fields=('created',),
# list_display=[
# "message",
# "event",
# "created"
# ],
# search_fields=[
# "message",
# "traceback",
# "data"
# ],
# )
#
# admin.site.register(
# Event,
# raw_id_fields=["customer"],
# readonly_fields=('created',),
# list_display=[
# "braintree_id",
# "kind",
# "livemode",
# "valid",
# "processed",
# "created"
# ],
# list_filter=[
# "kind",
# "created",
# "valid",
# "processed"
# ],
# search_fields=[
# "braintree_id",
# "customer__braintree_id",
# "validated_message"
# ],
# )
#
#
# class CurrentSubscriptionInline(admin.TabularInline):
# model = CurrentSubscription
#
#
# def subscription_status(obj):
# return obj.current_subscription.status
# subscription_status.short_description = "Subscription Status"
#
#
# admin.site.register(
# Customer,
# raw_id_fields=["subscriber"],
# readonly_fields=('created',),
# list_display=[
# "braintree_id",
# "subscriber",
# "card_kind",
# "card_last_4",
# subscription_status,
# "created"
# ],
# list_filter=[
# "card_kind",
# CustomerHasCardListFilter,
# CustomerSubscriptionStatusListFilter
# ],
# search_fields=[
# "braintree_id"
# ],
# inlines=[CurrentSubscriptionInline]
# )
#
#
# class InvoiceItemInline(admin.TabularInline):
# model = InvoiceItem
#
#
# def customer_has_card(obj):
# """ Returns True if the customer has a card attached to its account."""
# return obj.customer.card_fingerprint != ""
# customer_has_card.short_description = "Customer Has Card"
#
#
# def customer_email(obj):
# """ Returns a string representation of the customer's email."""
# return str(obj.customer.subscriber.email)
# customer_email.short_description = "Customer"
#
#
# admin.site.register(
# Invoice,
# raw_id_fields=["customer"],
# readonly_fields=('created',),
# list_display=[
# "braintree_id",
# "paid",
# "closed",
# customer_email,
# customer_has_card,
# "period_start",
# "period_end",
# "subtotal",
# "total",
# "created"
# ],
# search_fields=[
# "braintree_id",
# "customer__braintree_id"
# ],
# list_filter=[
# InvoiceCustomerHasCardListFilter,
# "paid",
# "closed",
# "attempted",
# "attempts",
# "created",
# "date",
# "period_end",
# "total"
# ],
# inlines=[InvoiceItemInline]
# )
#
#
# admin.site.register(
# Transfer,
# raw_id_fields=["event"],
# readonly_fields=('created',),
# list_display=[
# "braintree_id",
# "amount",
# "status",
# "date",
# "description",
# "created"
# ],
# search_fields=[
# "braintree_id",
# "event__braintree_id"
# ]
# )
#
#
# class PlanAdmin(admin.ModelAdmin):
#
# def save_model(self, request, obj, form, change):
# """Update or create objects using our custom methods that
# sync with Braintree."""
#
# if change:
# obj.update_name()
#
# else:
# Plan.get_or_create(**form.cleaned_data)
#
# def get_readonly_fields(self, request, obj=None):
# readonly_fields = list(self.readonly_fields)
# if obj:
# readonly_fields.extend([
# 'braintree_id',
# 'amount',
# 'currency',
# 'interval',
# 'interval_count',
# 'trial_period_days'])
#
# return readonly_fields
#
# admin.site.register(Plan, PlanAdmin)
| bsd-3-clause | 6,652,920,508,881,095,000 | 23.326241 | 79 | 0.525364 | false | 3.418037 | false | false | false |
ericchill/gnofract4d | fract4d/fc.py | 1 | 16906 | #!/usr/bin/env python
# A compiler from UltraFractal or Fractint formula files to C code
# The UltraFractal manual is the best current description of the file
# format. You can download it from http://www.ultrafractal.com/uf3-manual.zip
# The implementation is based on the outline in "Modern Compiler
# Implementation in ML: basic techniques" (Appel 1997, Cambridge)
# Overall structure:
# fractlexer.py and fractparser.py are the lexer and parser, respectively.
# They use the PLY package to do lexing and SLR parsing, and produce as
# output an abstract syntax tree (defined in the Absyn module).
# The Translate module type-checks the code, maintains the symbol
# table (symbol.py) and converts it into an intermediate form (ir.py)
# Canon performs several simplifying passes on the IR to make it easier
# to deal with, then codegen converts it into a linear sequence of
# simple C instructions
# Finally we invoke the C compiler to convert to a native code shared library
import getopt
import sys
import commands
import os.path
import stat
import random
import hashlib
import re
import copy
import fractconfig
import fractparser
import fractlexer
import translate
import codegen
import fracttypes
import absyn
import preprocessor
import cache
import gradient
class FormulaTypes:
FRACTAL = 0
COLORFUNC = 1
TRANSFORM = 2
GRADIENT = 3
NTYPES = 4
GRAD_UGR=0
GRAD_MAP=1
GRAD_GGR=2
GRAD_CS=3
matches = [
re.compile(r'(\.frm\Z)|(\.ufm\Z)', re.IGNORECASE),
re.compile(r'(\.cfrm\Z)|(\.ucl\Z)', re.IGNORECASE),
re.compile(r'\.uxf\Z', re.IGNORECASE),
re.compile(r'(\.ugr\Z)|(\.map\Z)|(\.ggr\Z)|(\.cs\Z)|(\.pal\Z)', re.IGNORECASE)
]
# indexed by FormulaTypes above
extensions = [ "frm", "cfrm", "uxf", "ggr", "pal"]
@staticmethod
def extension_from_type(t):
return FormulaTypes.extensions[t]
@staticmethod
def guess_type_from_filename(filename):
if FormulaTypes.matches[FormulaTypes.FRACTAL].search(filename):
return translate.T
elif FormulaTypes.matches[FormulaTypes.COLORFUNC].search(filename):
return translate.ColorFunc
elif FormulaTypes.matches[FormulaTypes.TRANSFORM].search(filename):
return translate.Transform
elif FormulaTypes.matches[FormulaTypes.GRADIENT].search(filename):
return translate.GradientFunc
@staticmethod
def guess_formula_type_from_filename(filename):
for i in xrange(FormulaTypes.NTYPES):
if FormulaTypes.matches[i].search(filename):
return i
raise ValueError("Unknown file type for '%s'" % filename)
@staticmethod
def guess_gradient_subtype_from_filename(filename):
filename = filename.lower()
if filename.endswith(".ugr"):
return FormulaTypes.GRAD_UGR
if filename.endswith(".map") or filename.endswith(".pal"):
return FormulaTypes.GRAD_MAP
if filename.endswith(".ggr"):
return FormulaTypes.GRAD_GGR
if filename.endswith(".cs"):
return FormulaTypes.GRAD_CS
raise ValueError("Unknown gradient type for '%s'" % filename)
@staticmethod
def isFormula(filename):
for matcher in FormulaTypes.matches:
if matcher.search(filename):
return True
return False
class FormulaFile:
def __init__(self, formulas, contents,mtime,filename):
self.formulas = formulas
self.contents = contents
self.mtime = mtime
self.filename = filename
self.file_backed = True
def out_of_date(self):
return self.file_backed and \
os.stat(self.filename)[stat.ST_MTIME] > self.mtime
def get_formula(self,formula):
return self.formulas.get(formula)
def get_formula_names(self, skip_type=None):
'''return all the coloring funcs except those marked as only suitable
for the OTHER kind (inside vs outside)'''
names = []
for name in self.formulas.keys():
sym = self.formulas[name].symmetry
if sym == None or sym == "BOTH" or sym != skip_type:
names.append(name)
return names
class Compiler:
def __init__(self):
self.parser = fractparser.parser
self.lexer = fractlexer.lexer
self.c_code = ""
self.path_lists = [ [], [], [], [] ]
self.cache = cache.T()
self.cache_dir = os.path.expanduser("~/.gnofract4d-cache/")
self.init_cache()
if 'win' != sys.platform[:3]:
self.compiler_name = "gcc"
self.flags = "-fPIC -DPIC -g -O3 -shared"
self.output_flag = "-o "
self.libs = "-lm"
else:
self.compiler_name = "cl"
self.flags = "/EHsc /Gd /nologo /W3 /LD /MT /TP /DWIN32 /DWINDOWS /D_USE_MATH_DEFINES"
self.output_flag = "/Fe"
self.libs = "/link /LIBPATH:\"%s/fract4d\" fract4d_stdlib.lib" % sys.path[0] # /DELAYLOAD:fract4d_stdlib.pyd DelayImp.lib
self.tree_cache = {}
self.leave_dirty = False
self.next_inline_number = 0
def _get_files(self):
return self.cache.files
files = property(_get_files)
def update_from_prefs(self,prefs):
self.compiler_name = prefs.get("compiler","name")
self.flags = prefs.get("compiler","options")
self.set_func_path_list(prefs.get_list("formula_path"))
self.path_lists[FormulaTypes.GRADIENT] = copy.copy(
prefs.get_list("map_path"))
def set_flags(self,flags):
self.flags = flags
def add_path(self,path,type):
self.path_lists[type].append(path)
def add_func_path(self,path):
self.path_lists[FormulaTypes.FRACTAL].append(path)
self.path_lists[FormulaTypes.COLORFUNC].append(path)
self.path_lists[FormulaTypes.TRANSFORM].append(path)
def set_func_path_list(self,list):
self.path_lists[FormulaTypes.FRACTAL] = copy.copy(list)
self.path_lists[FormulaTypes.COLORFUNC] = copy.copy(list)
self.path_lists[FormulaTypes.TRANSFORM] = copy.copy(list)
def init_cache(self):
self.cache.init()
def find_files(self,type):
files = {}
for dir in self.path_lists[type]:
if not os.path.isdir(dir):
continue
for file in os.listdir(dir):
if os.path.isfile(os.path.join(dir,file)):
files[file] = 1
return files.keys()
def find_files_of_type(self,type):
matcher = FormulaTypes.matches[type]
return [file for file in self.find_files(type)
if matcher.search(file)]
def find_formula_files(self):
return self.find_files_of_type(FormulaTypes.FRACTAL)
def find_colorfunc_files(self):
return self.find_files_of_type(FormulaTypes.COLORFUNC)
def find_transform_files(self):
return self.find_files_of_type(FormulaTypes.TRANSFORM)
def get_text(self,fname):
file = self.files.get(fname)
if not file:
self.load_formula_file(fname)
return self.files[fname].contents
def nextInlineFile(self,type):
self.next_inline_number += 1
ext = FormulaTypes.extension_from_type(type)
return "__inline__%d.%s" % (self.next_inline_number, ext)
def add_inline_formula(self,formbody, formtype):
# formbody contains a string containing the contents of a formula
formulas = self.parse_file(formbody)
fname = self.nextInlineFile(formtype)
ff = FormulaFile(formulas,formbody,0,fname)
ff.file_backed = False
self.files[fname] = ff
names = ff.get_formula_names()
if len(names) == 0:
formName = "error"
else:
formName = names[0]
return (fname, formName)
def last_chance(self,filename):
'''does nothing here, but can be overridden by GUI to prompt user.'''
raise IOError("Can't find formula file %s in formula search path" % \
filename)
def compile_one(self,formula):
self.compile(formula)
t = translate.T(absyn.Formula("",[],-1))
cg = self.compile(t)
t.merge(formula,"")
outputfile = os.path.abspath(self.generate_code(t, cg))
return outputfile
def compile_all(self,formula,cf0,cf1,transforms,options={}):
self.compile(formula,options)
self.compile(cf0,options)
self.compile(cf1,options)
for transform in transforms:
self.compile(transform,options)
# create temp empty formula and merge everything into that
t = translate.T(absyn.Formula("",[],-1))
cg = self.compile(t,options)
t.merge(formula,"")
t.merge(cf0,"cf0_")
t.merge(cf1,"cf1_")
for transform in transforms:
t.merge(transform,"t_")
outputfile = os.path.abspath(self.generate_code(t, cg))
return outputfile
def find_file(self,filename,type):
if os.path.exists(filename):
dir = os.path.dirname(filename)
if self.path_lists[type].count(dir) == 0:
# add directory to search path
self.path_lists[type].append(dir)
return filename
filename = os.path.basename(filename)
for path in self.path_lists[type]:
f = os.path.join(path,filename)
if os.path.exists(f):
return f
return self.last_chance(filename)
def add_endlines(self,result,final_line):
"Add info on which is the final source line of each formula"
if None == result:
return
l = len(result.children)
for i in xrange(l):
if i == l - 1:
result.children[i].last_line = final_line
else:
result.children[i].last_line = result.children[i+1].pos-1
def parse_file(self,s):
self.lexer.lineno = 1
result = None
try:
pp = preprocessor.T(s)
result = self.parser.parse(pp.out())
except preprocessor.Error, err:
# create an Error formula listing the problem
result = self.parser.parse('error {\n}\n')
result.children[0].children[0] = \
absyn.PreprocessorError(str(err), -1)
#print result.pretty()
self.add_endlines(result,self.lexer.lineno)
formulas = {}
for formula in result.children:
formulas[formula.leaf] = formula
return formulas
def load_formula_file(self, filename):
try:
type = FormulaTypes.guess_formula_type_from_filename(filename)
filename = self.find_file(filename,type)
s = open(filename,"r").read() # read in a whole file
basefile = os.path.basename(filename)
mtime = os.stat(filename)[stat.ST_MTIME]
if type == FormulaTypes.GRADIENT:
# don't try and parse gradient files apart from UGRs
subtype = FormulaTypes.guess_gradient_subtype_from_filename(filename)
if subtype == FormulaTypes.GRAD_UGR:
formulas = self.parse_file(s)
else:
formulas = {}
else:
formulas = self.parse_file(s)
ff = FormulaFile(formulas,s,mtime,filename)
self.files[basefile] = ff
return ff
except Exception, err:
#print "Error parsing '%s' : %s" % (filename, err)
raise
def out_of_date(self,filename):
basefile = os.path.basename(filename)
ff = self.files.get(basefile)
if not ff:
self.load_formula_file(filename)
ff = self.files.get(basefile)
return ff.out_of_date()
def get_file(self,filename):
basefile = os.path.basename(filename)
ff = self.files.get(basefile)
if not ff:
self.load_formula_file(filename)
ff = self.files.get(basefile)
elif ff.out_of_date():
self.load_formula_file(filename)
ff = self.files.get(basefile)
return ff
def get_formula_text(self,filename,formname):
ff = self.get_file(filename)
form = ff.get_formula(formname)
start_line = form.pos-1
last_line = form.last_line
lines = ff.contents.splitlines()
return "\n".join(lines[start_line:last_line])
def is_inline(self,filename, formname):
return not self.files[filename].file_backed
def compile(self,ir,options={}):
cg = codegen.T(ir.symbols,options)
cg.output_all(ir)
return cg
def hashcode(self,c_code):
hash = hashlib.md5()
hash.update(c_code)
hash.update(self.compiler_name)
hash.update(self.flags)
hash.update(self.libs)
return hash.hexdigest()
def generate_code(self,ir, cg, outputfile=None,cfile=None):
cg.output_decls(ir)
self.c_code = cg.output_c(ir)
hash = self.hashcode(self.c_code)
if outputfile == None:
outputfile = self.cache.makefilename(hash,".so")
if os.path.exists(outputfile):
# skip compilation - we already have this code
return outputfile
if cfile == None:
cfile = self.cache.makefilename(hash,".c")
if 'win' in sys.platform:
objfile = self.cache.makefilename(hash, ".obj")
open(cfile,"w").write(self.c_code)
# -march=i686 for 10% speed gain
cmd = "%s \"%s\" %s %s\"%s\"" % \
(self.compiler_name, cfile, self.flags, self.output_flag, outputfile)
if 'win' == sys.platform[:3]:
cmd += " /Fo\"%s\"" % objfile
cmd += " %s" % self.libs
#print "cmd: %s" % cmd
(status,output) = commands.getstatusoutput(cmd)
if status != 0:
raise fracttypes.TranslationError(
"Error reported by C compiler:%s" % output)
return outputfile
def get_parsetree(self,filename,formname):
ff = self.get_file(filename)
if ff == None : return None
return ff.get_formula(formname)
def guess_type_from_filename(self,filename):
return FormulaTypes.guess_type_from_filename(filename)
def get_formula(self, filename, formname,prefix=""):
type = self.guess_type_from_filename(filename)
f = self.get_parsetree(filename,formname)
if f != None:
f = type(f,prefix)
return f
def get_gradient(self, filename, formname):
g = gradient.Gradient()
if formname == None:
g.load(open(self.find_file(filename, 3))) # FIXME
else:
compiled_gradient = self.get_formula(filename,formname)
g.load_ugr(compiled_gradient)
return g
def get_random_gradient(self):
return self.get_random_formula(3) # FIXME
def get_random_formula(self,type):
files = self.find_files_of_type(type)
file = random.choice(files)
if gradient.FileType.guess(file) == gradient.FileType.UGR:
ff = self.get_file(file)
formulas = ff.formulas.keys()
formula = random.choice(formulas)
else:
formula = None
return (file,formula)
def clear_cache(self):
self.cache.clear()
def __del__(self):
if not self.leave_dirty:
self.clear_cache()
instance = Compiler()
instance.update_from_prefs(fractconfig.instance)
def usage():
print "FC : a compiler from Fractint .frm files to C code"
print "fc.py -o [outfile] -f [formula] infile"
sys.exit(1)
def generate(fc,formulafile, formula, outputfile, cfile):
# find the function we want
ir = fc.get_formula(formulafile,formula)
if ir == None:
raise Exception("Can't find formula %s in %s" % \
(formula, formulafile))
if ir.errors != []:
print "Errors during translation"
for e in ir.errors:
print e
raise Exception("Errors during translation")
cg = fc.compile(ir)
fc.generate_code(ir, cg, outputfile,cfile)
def main(args):
fc = Compiler()
fc.leave_dirty = True
for arg in args:
ff = fc.load_formula_file(arg)
for name in ff.get_formula_names():
print name
form = fc.get_formula(arg,name)
cg = fc.compile(form)
if __name__ == '__main__':
main(sys.argv[1:])
| bsd-3-clause | -7,829,546,766,683,979,000 | 31.827184 | 133 | 0.588726 | false | 3.784643 | false | false | false |
sileht/pifpaf | pifpaf/drivers/zookeeper.py | 1 | 2019 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pifpaf import drivers
class ZooKeeperDriver(drivers.Driver):
DEFAULT_PORT = 2181
PATH = ["/usr/share/zookeeper/bin",
"/usr/local/opt/zookeeper/libexec/bin"]
def __init__(self, port=DEFAULT_PORT, **kwargs):
"""Create a new ZooKeeper server."""
super(ZooKeeperDriver, self).__init__(**kwargs)
self.port = port
@classmethod
def get_options(cls):
return [
{"param_decls": ["--port"],
"type": int,
"default": cls.DEFAULT_PORT,
"help": "port to use for ZooKeeper"},
]
def _setUp(self):
super(ZooKeeperDriver, self)._setUp()
cfgfile = os.path.join(self.tempdir, "zoo.cfg")
with open(cfgfile, "w") as f:
f.write("""dataDir=%s
clientPort=%s""" % (self.tempdir, self.port))
logdir = os.path.join(self.tempdir, "log")
os.mkdir(logdir)
self.putenv("ZOOCFGDIR", self.tempdir, True)
self.putenv("ZOOCFG", cfgfile, True)
self.putenv("ZOO_LOG_DIR", logdir, True)
c, _ = self._exec(
["zkServer.sh", "start", cfgfile],
wait_for_line="STARTED",
path=self.PATH)
self.addCleanup(self._exec,
["zkServer.sh", "stop", cfgfile],
path=self.PATH)
self.putenv("ZOOKEEPER_PORT", str(self.port))
self.putenv("URL", "zookeeper://localhost:%d" % self.port)
| apache-2.0 | 3,448,701,531,717,726,000 | 30.061538 | 69 | 0.600792 | false | 3.605357 | false | false | false |
patrickhoefler/lwd | lwd.py | 1 | 7546 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Functions for turning the Wikidata dump into Linked Data
import codecs
import glob
import gzip
import json
import math
import os
import sys
import time
import xml.etree.cElementTree as ET
import settings
def process_dump():
# Print some status info
print 'Processing ' + settings.dump_filename
# Make sure the output folders exist
if not os.path.exists('output'):
os.mkdir('output')
if not os.path.exists('output/' + settings.output_folder):
os.mkdir('output/' + settings.output_folder)
if not os.path.exists('output/' + settings.output_folder + '/ttl'):
os.mkdir('output/' + settings.output_folder + '/ttl')
# Delete all old files
for f in glob.glob('output/' + settings.output_folder + '/ttl/*.ttl'):
os.remove(f)
# Initiate variables
entity_counter = 0
element_id = ''
# Start the clock
start_time = time.time()
# Load the dump file and create the iterator
context = ET.iterparse(settings.dump_filename, events=('start', 'end'))
context = iter(context)
event, root = context.next()
# Iterate over the dump file
for event, element in context:
# Check if we have reached the max number of processed entities
if settings.max_processed_entities > 0 and entity_counter == settings.max_processed_entities:
break
# Get the ID of the current entity
if event == 'end' and element.tag == '{http://www.mediawiki.org/xml/export-0.8/}title':
if element.text.find('Q') == 0:
element_id = element.text
elif element.text.find('Property:P') == 0:
element_id = element.text.split(':')[1]
# Get the data of the current entity
if element_id and event == 'end' and element.tag == '{http://www.mediawiki.org/xml/export-0.8/}text':
if element.text:
triples = get_nt_for_entity(element_id, element.text)
batch_id = str(int(math.floor(int(element_id[1:]) / settings.batchsize)) * settings.batchsize).zfill(8)
batchfile_ttl_name = 'output/' + settings.output_folder + '/ttl/' + element_id[0] + '_Batch_' + batch_id + '.ttl'
# If ttl file doesn't exist, create it and add the prefixes
if not os.path.isfile(batchfile_ttl_name):
prefixes = '# Extracted from ' + settings.dump_filename + ' with LWD (http://github.com/patrickhoefler/lwd)'
prefixes += """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix wd: <http://www.wikidata.org/entity/> .
""".replace(' ', '')
with codecs.open(batchfile_ttl_name, 'a', 'utf-8') as batchfile_ttl:
batchfile_ttl.write(prefixes)
# Write the triples to the batchfile
with codecs.open(batchfile_ttl_name, 'a', 'utf-8') as batchfile_ttl:
batchfile_ttl.write(triples)
# One more entity
entity_counter += 1
# Print some progress
if entity_counter % 1000 == 0:
sys.stdout.write('.')
sys.stdout.flush()
# Print some statistics
if entity_counter % 10000 == 0:
lap_time = time.time()
print '\nProcessed ' + str(entity_counter) + ' entities in ' + str(lap_time - start_time) + ' seconds, on average ' + str(entity_counter / (lap_time - start_time)) + ' per second'
# Reset the element ID in preparation for the next iteration
element_id = ''
# Save the memory, save the world
root.clear()
# Stop the clock and print some final statistics
end_time = time.time()
print('\nProcessed ' + str(entity_counter) + ' entities in ' + str(end_time - start_time) + ' seconds, on average ' + str(entity_counter / (end_time - start_time)) + ' per second')
number_of_files = len(os.listdir('output/' + settings.output_folder + '/ttl'))
if number_of_files != 1:
plural = 's'
else:
plural = ''
print('Created ' + str(number_of_files) + ' .ttl file' + plural + ' in ./' + 'output/' + settings.output_folder + '/ttl')
def get_nt_for_entity(element_id, element_data):
# Turn the data JSON string into an object
data = json.loads(element_data)
entity_uri = 'wd:' + element_id
triples = ''
# Get the label in English
try:
triples = triples + entity_uri + ' rdfs:label ' + '"' + data['label']['en'].replace('\\', '\\\\').replace('"', '\\"') + '"@en .\n'
except:
# print 'No label for ' + element_id
pass
# Get the description in English
try:
triples = triples + entity_uri + ' rdfs:comment ' + '"' + data['description']['en'].replace('\\', '\\\\').replace('"', '\\"') + '"@en .\n'
except:
# print 'No description for ' + element_id
pass
# Are there any claims in the current element?
if data.get('claims'):
# Iterate over all claims
for claim in data['claims']:
predicate_id = 'P' + str(claim['m'][1])
predicate_uri = 'wd:' + predicate_id
if len(claim['m']) > 2:
# Is it an object property?
if claim['m'][2] == 'wikibase-entityid':
object_id = 'Q' + str(claim['m'][3]['numeric-id'])
object_uri = 'wd:' + object_id
triples = triples + entity_uri + ' ' + predicate_uri + ' ' + object_uri + ' .\n'
# Add RDF type
if predicate_id == 'P31':
triples = triples + entity_uri + ' rdf:type ' + object_uri + ' .\n'
# Is it a string value property?
if claim['m'][2] == 'string':
triples = triples + entity_uri + ' ' + predicate_uri + ' "' + claim['m'][3].replace('\\', '\\\\').replace('"', '\\"') + '" .\n'
return triples
def compress_ttl_files():
# Print some status info
print 'Compressing'
# Make sure the output folders exist
if not os.path.exists('output'):
os.mkdir('output')
if not os.path.exists('output/' + settings.output_folder):
os.mkdir('output/' + settings.output_folder)
if not os.path.exists('output/' + settings.output_folder + '/gz'):
os.mkdir('output/' + settings.output_folder + '/gz')
# Delete all old files
for f in glob.glob('output/' + settings.output_folder + '/gz/*.gz'):
os.remove(f)
# Compress all files
for input_file_name in glob.glob('output/' + settings.output_folder + '/ttl/*.ttl'):
with open(input_file_name, 'rb') as input_file:
with gzip.open('output/' + settings.output_folder + '/gz/' + input_file_name.split('/')[-1] + '.gz', 'wb') as output_file:
output_file.writelines(input_file)
# Print some progress
sys.stdout.write('.')
sys.stdout.flush()
# Print some final statistics
number_of_files = len(os.listdir('output/' + settings.output_folder + '/gz'))
if number_of_files != 1:
plural = 's'
else:
plural = ''
print('\nCreated ' + str(number_of_files) + ' .gz file' + plural + ' in ./' + 'output/' + settings.output_folder + '/gz')
| mit | -7,322,916,570,662,439,000 | 37.111111 | 199 | 0.551816 | false | 3.811111 | false | false | false |
calebmadrigal/algorithms-in-python | heap.py | 1 | 3668 | """heap.py - implementation of a heap priority queue. """
__author__ = "Caleb Madrigal"
__date__ = "2015-02-17"
import math
from enum import Enum
from autoresizelist import AutoResizeList
class HeapType(Enum):
maxheap = 1
minheap = 2
class Heap:
def __init__(self, initial_data=None, heap_type=HeapType.maxheap):
self.heap_type = heap_type
if heap_type == HeapType.maxheap:
self.comparator = lambda x, y: x > y
else:
self.comparator = lambda x, y: x < y
self.data = AutoResizeList()
if initial_data is not None:
self.build_heap(initial_data)
self._size = len(self.data)
def _left_child(self, index):
return 2*index + 1
def _right_child(self, index):
return 2*index + 2
def _parent(self, index):
return math.floor((index - 1) / 2.0)
def _is_root(self, index):
return index == 0
def _swap(self, i1, i2):
self.data[i1], self.data[i2] = self.data[i2], self.data[i1]
def build_heap(self, initial_data):
for i in initial_data:
self.data.prepend(i)
self.heap_down(0)
def heap_up(self, index):
# If we are at the root, return - we are done
if self._is_root(index):
return
# Else, compare the current node with the parent node, and if this node should be higher
# then the parent node, then swap and recursively call on the parent index
parent_index = self._parent(index)
if self.comparator(self.data[index], self.data[parent_index]):
self._swap(index, parent_index)
self.heap_up(parent_index)
def heap_down(self, index):
left_index = self._left_child(index)
right_index = self._right_child(index)
try:
left = self.data[left_index]
except IndexError:
left = None
try:
right = self.data[right_index]
except IndexError:
right = None
# Find the largest child
largest_child = left
largest_child_index = left_index
if left is not None and right is not None:
if self.comparator(right, left):
largest_child = right
largest_child_index = right_index
elif right is not None:
largest_child = right
largest_child_index = right_index
# If the largest child is not None and is higher priority than the current, then swap
# and recursively call on on the child index
if largest_child is not None and self.comparator(largest_child, self.data[index]):
self._swap(index, largest_child_index)
self.heap_down(largest_child_index)
def push(self, item):
insert_index = self._size # Insert at the end
self._size += 1
self.data[insert_index] = item
self.heap_up(insert_index)
return self
def peek(self):
return self.data[0]
def pop(self):
if len(self.data) < 1 or self.data[0] is None:
return None
# Take item from the root
item = self.data[0]
# Move the bottom-most, right-most item to the root
self.data[0] = self.data[self._size-1]
self.data[self._size-1] = None
self._size -= 1
self.heap_down(0)
return item
def size(self):
return self._size
def __repr__(self):
return str(self.data)
if __name__ == "__main__":
import unittest
testsuite = unittest.TestLoader().discover('test', pattern="*heap*")
unittest.TextTestRunner(verbosity=1).run(testsuite)
| mit | -855,685,561,109,325,200 | 27.65625 | 96 | 0.581516 | false | 3.785346 | true | false | false |
unkyulee/elastic-cms | src/web/modules/post/services/config.py | 1 | 2826 | import web.util.tools as tools
import os
from web import app
import lib.es as es
def get(p):
# get host and index from the global config
h = tools.get_conf(p['host'], p['navigation']['id'], 'host', 'http://localhost:9200')
n = tools.get_conf(p['host'], p['navigation']['id'], 'index', '')
return {
'name': get_conf(h, n, 'name', ''),
'description': get_conf(h, n, 'description', ''),
'host': h,
'index': n,
'upload_dir':
get_conf(h, n, 'upload_dir',
os.path.join( app.config.get('BASE_DIR'), 'uploads' )
),
'allowed_exts': get_conf(h, n, 'allowed_exts',''),
'page_size': get_conf(h, n, 'page_size', '10'),
'query': get_conf(h, n, 'query', '*'),
'sort_field': get_conf(h, n, 'sort_field', '_score'),
'sort_dir': get_conf(h, n, 'sort_dir', 'desc'),
'top': get_conf(h, n, 'top', ''),
'footer': get_conf(h, n, 'footer', ''),
'side': get_conf(h, n, 'side', ''),
'content_header': get_conf(h, n, 'content_header', ''),
'content_footer': get_conf(h, n, 'content_footer', ''),
'intro': get_conf(h, n, 'intro', ''),
'search_query': get_conf(h, n, 'search_query', ''),
'search_item_template': get_conf(h, n, 'search_item_template', ''),
'keep_history': get_conf(h, n, 'keep_history', 'Yes'),
}
def set(p):
# get host, index
host = p['c']['host']
if not host:
host = tools.get('host')
index = p['c']['index']
if not index:
index = tools.get('index')
# get host and index from the global config
tools.set_conf(p['host'], p['navigation']['id'], 'host', host)
tools.set_conf(p['host'], p['navigation']['id'], 'index', index)
# set local config
if p['c']['index']: # save local config only when index is already created
set_conf(host, index, 'name', tools.get('name'))
set_conf(host, index, 'description', tools.get('description'))
set_conf(host, index, 'upload_dir', tools.get('upload_dir'))
set_conf(host, index, 'allowed_exts', tools.get('allowed_exts'))
set_conf(host, index, 'page_size', tools.get('page_size'))
set_conf(host, index, 'query', tools.get('query'))
set_conf(host, index, 'sort_field', tools.get('sort_field'))
set_conf(host, index, 'sort_dir', tools.get('sort_dir'))
set_conf(host, index, 'keep_history', tools.get('keep_history'))
def get_conf(host, index, name, default):
ret = es.get(host, index, "config", name)
return ret.get('value') if ret and ret.get('value') else default
def set_conf(host, index, name, value):
config = {
'name': name,
'value': value
}
es.update(host, index, "config", name, config)
es.flush(host, index)
| mit | -7,525,611,961,569,722,000 | 37.189189 | 89 | 0.54954 | false | 3.18602 | true | false | false |
GutenkunstLab/SloppyCell | test/test_FixedPoints.py | 1 | 3610 | import unittest
import scipy
from SloppyCell.ReactionNetworks import *
lorenz = Network('lorenz')
lorenz.add_compartment('basic')
lorenz.add_species('x', 'basic', 0.5)
lorenz.add_species('y', 'basic', 0.5)
lorenz.add_species('z', 'basic', 0.5)
lorenz.add_parameter('sigma', 1.0)
lorenz.add_parameter('r', 2.0)
lorenz.add_parameter('b', 2.0)
lorenz.add_rate_rule('x', 'sigma*(y-x)')
lorenz.add_rate_rule('y', 'r*x - y - x*z')
lorenz.add_rate_rule('z', 'x*y - b*z')
class test_fixedpoints(unittest.TestCase):
def test_basic(self):
""" Test basic fixed-point finding """
net = lorenz.copy('test')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1], with_logs=False)
# This should find the fixed-point [sqrt(2), sqrt(2), 1]
self.assertAlmostEqual(fp[0], scipy.sqrt(2), 6, 'Failed on basic 1,0.')
self.assertAlmostEqual(fp[1], scipy.sqrt(2), 6, 'Failed on basic 1,1.')
self.assertAlmostEqual(fp[2], 1, 6, 'Failed on basic 1,2.')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[-0.1,-0.1,-0.1],
with_logs=False)
# This should find the fixed-point [0, 0, 0]
self.assertAlmostEqual(fp[0], 0, 6, 'Failed on basic 2,0.')
self.assertAlmostEqual(fp[1], 0, 6, 'Failed on basic 2,1.')
self.assertAlmostEqual(fp[2], 0, 6, 'Failed on basic 2,2.')
def test_withlogs(self):
""" Test fixed-point finding with logs """
net = lorenz.copy('test')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1], with_logs=True)
# This should find the fixed-point [sqrt(2), sqrt(2), 1]
self.assertAlmostEqual(fp[0], scipy.sqrt(2), 6, 'Failed on logs 1,0.')
self.assertAlmostEqual(fp[1], scipy.sqrt(2), 6, 'Failed on logs 1,1.')
self.assertAlmostEqual(fp[2], 1, 6, 'Failed on logs 1,2.')
fp = Dynamics.dyn_var_fixed_point(net, dv0=[0.1,0.1,0.1],
with_logs=True)
# This should find the fixed-point [0, 0, 0]
self.assertAlmostEqual(fp[0], 0, 6, 'Failed on logs 2,0.')
self.assertAlmostEqual(fp[1], 0, 6, 'Failed on logs 2,1.')
self.assertAlmostEqual(fp[2], 0, 6, 'Failed on logs 2,2.')
def test_stability(self):
net = lorenz.copy('test')
# The sqrt(b*(r-1)), sqrt(b*(r-1)), r-1 fixed point is stable for r < rH
# Strogatz, Nonlinear Dynamics and Chaos (p. 316)
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1],
stability=True)
self.assertEqual(stable, -1, 'Failed to classify stable fixed point')
# (0,0,0) is a saddle here
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[0.01,0.01,0.01],
stability=True)
self.assertEqual(stable, 0, 'Failed to classify saddle')
# (0,0,0) is a stable node here
net.set_var_ic('r', 0.5)
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[0.1,0.1,0.1],
stability=True)
self.assertEqual(stable, -1, 'Failed to classify stable fixed point')
# Now make the far fixed point a saddle...
net.set_var_ic('sigma', 6.0)
net.set_var_ic('r', 25)
fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[10,10,10],
stability=True)
self.assertEqual(stable, 0, 'Failed to classify saddle')
suite = unittest.makeSuite(test_fixedpoints)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -4,611,129,762,351,684,600 | 43.567901 | 80 | 0.565097 | false | 3.088109 | true | false | false |
tcarmelveilleux/IcarusAltimeter | Analysis/altitude_analysis.py | 1 | 1202 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 19:34:31 2015
@author: Tennessee
"""
import numpy as np
import matplotlib.pyplot as plt
def altitude(atm_hpa, sea_level_hpa):
return 44330 * (1.0 - np.power(atm_hpa / sea_level_hpa, 0.1903))
def plot_alt():
default_msl = 101300.0
pressure = np.linspace(97772.58 / 100.0, 79495.0 / 100.0, 2000)
alt_nominal = altitude(pressure, default_msl) - altitude(97772.58 / 100.0, default_msl)
alt_too_high = altitude(pressure, default_msl + (1000 / 100.0)) - altitude(97772.58 / 100.0, default_msl + (1000 / 100.0))
alt_too_low = altitude(pressure, default_msl - (1000 / 100.0)) - altitude(97772.58 / 100.0, default_msl - (1000 / 100.0))
f1 = plt.figure()
ax = f1.gca()
ax.plot(pressure, alt_nominal, "b-", label="nom")
ax.plot(pressure, alt_too_high, "r-", label="high")
ax.plot(pressure, alt_too_low, "g-", label="low")
ax.legend()
f1.show()
f2 = plt.figure()
ax = f2.gca()
ax.plot(pressure, alt_too_high - alt_nominal, "r-", label="high")
ax.plot(pressure, alt_too_low - alt_nominal, "g-", label="low")
ax.legend()
f2.show()
| mit | 4,210,792,572,743,000,600 | 26.953488 | 126 | 0.58985 | false | 2.731818 | false | false | false |
aESeguridad/GERE | venv/lib/python2.7/site-packages/flask_weasyprint/__init__.py | 1 | 7726 | # coding: utf8
"""
flask_weasyprint
~~~~~~~~~~~~~~~~
Flask-WeasyPrint: Make PDF in your Flask app with WeasyPrint.
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
import weasyprint
from flask import request, current_app
from werkzeug.test import Client, ClientRedirectError
from werkzeug.wrappers import Response
try:
import urlparse
except ImportError: # Python 3
from urllib import parse as urlparse
try:
unicode
except NameError: # Python 3
unicode = str
VERSION = '0.5'
__all__ = ['VERSION', 'make_flask_url_dispatcher', 'make_url_fetcher',
'HTML', 'CSS', 'render_pdf']
DEFAULT_PORTS = frozenset([('http', 80), ('https', 443)])
def make_flask_url_dispatcher():
"""Return an URL dispatcher based on the current :ref:`request context
<flask:request-context>`.
You generally don’t need to call this directly.
The context is used when the dispatcher is first created but not
afterwards. It is not required after this function has returned.
Dispatch to the context’s app URLs below the context’s root URL.
If the app has a ``SERVER_NAME`` :ref:`config <flask:config>`, also
accept URLs that have that domain name or a subdomain thereof.
"""
def parse_netloc(netloc):
"""Return (hostname, port)."""
parsed = urlparse.urlsplit('http://' + netloc)
return parsed.hostname, parsed.port
app = current_app._get_current_object()
root_path = request.script_root
server_name = app.config.get('SERVER_NAME')
if server_name:
hostname, port = parse_netloc(server_name)
def accept(url):
"""Accept any URL scheme; also accept subdomains."""
return url.hostname is not None and (
url.hostname == hostname or
url.hostname.endswith('.' + hostname))
else:
scheme = request.scheme
hostname, port = parse_netloc(request.host)
if (scheme, port) in DEFAULT_PORTS:
port = None
def accept(url):
"""Do not accept subdomains."""
return (url.scheme, url.hostname) == (scheme, hostname)
def dispatch(url_string):
if isinstance(url_string, bytes):
url_string = url_string.decode('utf8')
url = urlparse.urlsplit(url_string)
url_port = url.port
if (url.scheme, url_port) in DEFAULT_PORTS:
url_port = None
if accept(url) and url_port == port and url.path.startswith(root_path):
netloc = url.netloc
if url.port and not url_port:
netloc = netloc.rsplit(':', 1)[0] # remove default port
base_url = '%s://%s%s' % (url.scheme, netloc, root_path)
path = url.path[len(root_path):]
if url.query:
path += '?' + url.query
# Ignore url.fragment
return app, base_url, path
return dispatch
def make_url_fetcher(dispatcher=None,
next_fetcher=weasyprint.default_url_fetcher):
"""Return an function suitable as a ``url_fetcher`` in WeasyPrint.
You generally don’t need to call this directly.
If ``dispatcher`` is not provided, :func:`make_flask_url_dispatcher`
is called to get one. This requires a request context.
Otherwise, it must be a callable that take an URL and return either
``None`` or a ``(wsgi_callable, base_url, path)`` tuple. For None
``next_fetcher`` is used. (By default, fetch normally over the network.)
For a tuple the request is made at the WSGI level.
``wsgi_callable`` must be a Flask application or another WSGI callable.
``base_url`` is the root URL for the application while ``path``
is the path within the application.
Typically ``base_url + path`` is equal or equivalent to the passed URL.
"""
if dispatcher is None:
dispatcher = make_flask_url_dispatcher()
def flask_url_fetcher(url):
redirect_chain = set()
while 1:
result = dispatcher(url)
if result is None:
return next_fetcher(url)
app, base_url, path = result
client = Client(app, response_wrapper=Response)
if isinstance(path, unicode):
# TODO: double-check this. Apparently Werzeug %-unquotes bytes
# but not Unicode URLs. (IRI vs. URI or something.)
path = path.encode('utf8')
response = client.get(path, base_url=base_url)
if response.status_code == 200:
return dict(
string=response.data,
mime_type=response.mimetype,
encoding=response.charset,
redirected_url=url)
# The test client can follow redirects, but do it ourselves
# to get access to the redirected URL.
elif response.status_code in (301, 302, 303, 305, 307):
redirect_chain.add(url)
url = response.location
if url in redirect_chain:
raise ClientRedirectError('loop detected')
else:
raise ValueError('Flask-WeasyPrint got HTTP status %s for %s%s'
% (response.status, base_url, path))
return flask_url_fetcher
def _wrapper(class_, *args, **kwargs):
if args:
guess = args[0]
args = args[1:]
else:
guess = kwargs.pop('guess', None)
if guess is not None and not hasattr(guess, 'read'):
# Assume a (possibly relative) URL
guess = urlparse.urljoin(request.url, guess)
if 'string' in kwargs and 'base_url' not in kwargs:
# Strings do not have an "intrinsic" base URL, use the request context.
kwargs['base_url'] = request.url
kwargs['url_fetcher'] = make_url_fetcher()
return class_(guess, *args, **kwargs)
def HTML(*args, **kwargs):
"""Like `weasyprint.HTML()
<http://weasyprint.org/using/#the-weasyprint-html-class>`_ but:
* :func:`make_url_fetcher` is used to create an ``url_fetcher``
* If ``guess`` is not a file object, it is an URL relative to the current
request context.
This means that you can just pass a result from :func:`flask.url_for`.
* If ``string`` is passed, ``base_url`` defaults to the current
request’s URL.
This requires a Flask request context.
"""
return _wrapper(weasyprint.HTML, *args, **kwargs)
def CSS(*args, **kwargs):
return _wrapper(weasyprint.CSS, *args, **kwargs)
CSS.__doc__ = HTML.__doc__.replace('HTML', 'CSS').replace('html', 'css')
def render_pdf(html, stylesheets=None, download_filename=None):
"""Render a PDF to a response with the correct ``Content-Type`` header.
:param html:
Either a :class:`weasyprint.HTML` object or an URL to be passed
to :func:`flask_weasyprint.HTML`. The latter case requires
a request context.
:param stylesheets:
A list of user stylesheets, passed to
:meth:`~weasyprint.HTML.write_pdf`
:param download_filename:
If provided, the ``Content-Disposition`` header is set so that most
web browser will show the "Save as…" dialog with the value as the
default filename.
:returns: a :class:`flask.Response` object.
"""
if not hasattr(html, 'write_pdf'):
html = HTML(html)
pdf = html.write_pdf(stylesheets=stylesheets)
response = current_app.response_class(pdf, mimetype='application/pdf')
if download_filename:
response.headers.add('Content-Disposition', 'attachment',
filename=download_filename)
return response
| gpl-3.0 | 6,181,784,262,939,823,000 | 35.046729 | 79 | 0.612004 | false | 4.066421 | false | false | false |
ThomasHabets/python-pyhsm | examples/yhsm-monitor-exit.py | 1 | 1480 | #!/usr/bin/env python
#
# Copyright (c) 2011, Yubico AB
# All rights reserved.
#
# Utility to send a MONITOR_EXIT command to a YubiHSM.
#
# MONITOR_EXIT only works if the YubiHSM is in debug mode. It would
# be a security problem to allow remote reconfiguration of a production
# YubiHSM.
#
# If your YubiHSM is not in debug mode, enter configuration mode by
# pressing the small button while inserting the YubiHSM in the USB port.
#
import sys
sys.path.append('Lib');
import pyhsm
device = "/dev/ttyACM0"
# simplified arguments parsing
d_argv = dict.fromkeys(sys.argv)
debug = d_argv.has_key('-v')
raw = d_argv.has_key('-v')
if d_argv.has_key('-h'):
sys.stderr.write("Syntax: %s [-v] [-R]\n" % (sys.argv[0]))
sys.stderr.write("\nOptions :\n")
sys.stderr.write(" -v verbose\n")
sys.stderr.write(" -R raw MONITOR_EXIT command\n")
sys.exit(0)
res = 0
try:
s = pyhsm.base.YHSM(device=device, debug = debug)
if raw:
# No initialization
s.write('\x7f\xef\xbe\xad\xba\x10\x41\x52\x45')
else:
print "Version: %s" % s.info()
s.monitor_exit()
print "Exited monitor-mode (maybe)"
if raw:
print "s.stick == %s" % s.stick
print "s.stick.ser == %s" % s.stick.ser
for _ in xrange(3):
s.stick.ser.write("\n")
line = s.stick.ser.readline()
print "%s" % (line)
except pyhsm.exception.YHSM_Error, e:
print "ERROR: %s" % e
res = 1
sys.exit(res)
| bsd-2-clause | 5,347,888,154,988,822,000 | 24.084746 | 72 | 0.618243 | false | 2.808349 | false | false | false |
darvin/qtdjango | src/qtdjango/settings.py | 1 | 5503 | # -*- coding: utf-8 -*-
from qtdjango.helpers import test_connection
__author__ = 'darvin'
from qtdjango.connection import *
__author__ = 'darvin'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class BooleanEdit(QCheckBox):
def text(self):
return QVariant(self.checkState()).toString()
def setText(self, text):
self.setChecked(QVariant(text).toBool())
class SettingsDialog(QDialog):
widgets_table = [
# (name, caption, widget object, default value),
("address", u"Адрес сервера", QLineEdit, "http://127.0.0.1:8000"),
("api_path", u"Путь к api сервера", QLineEdit, "/api/"),
("server_package", u"Название пакета сервера", QLineEdit, "none"),
("login", u"Ваш логин", QLineEdit, ""),
("password", u"Ваш пароль", QLineEdit, ""),
("open_links_in_external_browser", \
u"Открывать ссылки из окна информации во внешнем браузере", BooleanEdit, True),
]
def __init__(self, parent=None, error_message=None, models_manager=None):
super(SettingsDialog, self).__init__(parent)
self.setWindowTitle(u"Настройки")
self.setModal(True)
self.formlayout = QFormLayout()
self.models_manager = models_manager
self.settings = QSettings()
self.message_widget = QLabel()
self.__widgets = []
for name, caption, widget_class, default in self.widgets_table:
self.__widgets.append((name, caption, widget_class(), default))
for name, caption, widget, default in self.__widgets:
self.formlayout.addRow(caption, widget)
widget.setText(self.settings.value(name, default).toString())
self.formlayout.addRow(self.message_widget)
if error_message is not None:
self.message(**error_message)
buttonBox = QDialogButtonBox(QDialogButtonBox.Save\
| QDialogButtonBox.Cancel |QDialogButtonBox.RestoreDefaults)
testButton = QPushButton(u"Тестировать соединение")
buttonBox.addButton(testButton, QDialogButtonBox.ActionRole)
testButton.clicked.connect(self.test)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
buttonBox.button(QDialogButtonBox.RestoreDefaults).clicked.connect(self.restore)
self.formlayout.addRow(buttonBox)
self.setLayout(self.formlayout)
def accept(self):
if self.test():
for name, caption, widget, default in self.__widgets:
self.settings.setValue(name, widget.text())
self.models_manager.set_connection_params(\
self.get_value("address"), \
self.get_value("api_path"), \
self.get_value("login"),\
self.get_value("password"))
QDialog.accept(self)
def restore(self):
for name, caption, widget, default in self.__widgets:
widget.setText(default)
def message(self, text, error=False, works=False, fields=[]):
self.message_widget.setText(text)
if error:
color = "red"
elif works:
color = "green"
else:
color = "black"
css = "QLabel { color : %s; }" % color
self.message_widget.setStyleSheet(css)
for name, caption, widget, default in self.__widgets:
self.formlayout.labelForField(widget).setStyleSheet("")
if name in fields:
self.formlayout.labelForField(widget).setStyleSheet(css)
def get_value(self, name):
return unicode(self.settings.value(name).toString())
def test(self):
s = {}
for name, caption, widget, default in self.__widgets:
s[name] = unicode(widget.text())
try:
remote_version = test_connection(s["address"],s["api_path"],s["login"],s["password"])
import qtdjango
if qtdjango.__version__==remote_version:
self.message(text=u"Удаленный сервер настроен правильно!", works=True)
return True
elif remote_version is not None:
self.message(u"Версия системы на удаленном сервере отличается от\
версии системы на клиенте")
return True
except SocketError:
self.message(text=u"Ошибка при подключении к удаленному серверу", error=True, fields=\
("address",))
except ServerNotFoundError:
self.message(text=u"Удаленный сервер недоступен", error=True, fields=\
("address",))
except NotQtDjangoResponceError:
self.message(text=u"Не правильно настроен путь на удаленном сервере или \
удаленный сервер не является сервером системы", error=True, fields=\
("address","api_path"))
except AuthError:
self.message(text=u"Неверное имя пользователя или пароль", error=True, fields=\
("login","password"))
return False
| gpl-2.0 | 3,427,938,179,775,995,000 | 34.326389 | 98 | 0.599568 | false | 3.418683 | true | false | false |
barbarahui/nuxeo-calisphere | s3stash/nxstash_mediajson.py | 1 | 4444 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import os
from s3stash.nxstashref import NuxeoStashRef
from deepharvest.deepharvest_nuxeo import DeepHarvestNuxeo
from deepharvest.mediajson import MediaJson
from dplaingestion.mappers.ucldc_nuxeo_mapper import UCLDCNuxeoMapper
import json
import s3stash.s3tools
FILENAME_FORMAT = "{}-media.json"
class NuxeoStashMediaJson(NuxeoStashRef):
''' create and stash media.json file for a nuxeo object '''
def __init__(self,
path,
bucket,
region,
pynuxrc='~/.pynuxrc',
replace=True,
**kwargs):
super(NuxeoStashMediaJson, self).__init__(path, bucket, region,
pynuxrc, replace, **kwargs)
self.dh = DeepHarvestNuxeo(
self.path, self.bucket, pynuxrc=self.pynuxrc)
self.mj = MediaJson()
self.filename = FILENAME_FORMAT.format(self.uid)
self.filepath = os.path.join(self.tmp_dir, self.filename)
self._update_report('filename', self.filename)
self._update_report('filepath', self.filepath)
def nxstashref(self):
return self.nxstash_mediajson()
def nxstash_mediajson(self):
''' create media.json file for object and stash on s3 '''
self._update_report('stashed', False)
# extract and transform metadata for parent obj and any components
parent_md = self._get_parent_metadata(self.metadata)
component_md = [
self._get_component_metadata(c)
for c in self.dh.fetch_components(self.metadata)
]
# create media.json file
media_json = self.mj.create_media_json(parent_md, component_md)
self._write_file(media_json, self.filepath)
# stash media.json file on s3
stashed, s3_report = s3stash.s3tools.s3stash(
self.filepath, self.bucket, self.filename, self.region,
'application/json', self.replace)
self._update_report('s3_stash', s3_report)
self._update_report('stashed', stashed)
self._remove_tmp()
return self.report
def _get_parent_metadata(self, obj):
''' assemble top-level (parent) object metadata '''
metadata = {}
metadata['label'] = obj['title']
# only provide id, href, format if Nuxeo Document has file attached
full_metadata = self.nx.get_metadata(uid=obj['uid'])
if self.dh.has_file(full_metadata):
metadata['id'] = obj['uid']
metadata['href'] = self.dh.get_object_download_url(full_metadata)
metadata['format'] = self.dh.get_calisphere_object_type(obj[
'type'])
if metadata['format'] == 'video':
metadata['dimensions'] = self.dh.get_video_dimensions(
full_metadata)
return metadata
def _get_component_metadata(self, obj):
''' assemble component object metadata '''
metadata = {}
full_metadata = self.nx.get_metadata(uid=obj['uid'])
metadata['label'] = obj['title']
metadata['id'] = obj['uid']
metadata['href'] = self.dh.get_object_download_url(full_metadata)
# extract additional ucldc metadata from 'properties' element
ucldc_md = self._get_ucldc_schema_properties(full_metadata)
for key, value in ucldc_md.iteritems():
metadata[key] = value
# map 'type'
metadata['format'] = self.dh.get_calisphere_object_type(obj['type'])
return metadata
def _get_ucldc_schema_properties(self, metadata):
''' get additional metadata as mapped by harvester '''
properties = {}
mapper = UCLDCNuxeoMapper(metadata)
mapper.map_original_record()
mapper.map_source_resource()
properties = mapper.mapped_data['sourceResource']
properties.update(mapper.mapped_data['originalRecord'])
return properties
def _write_file(self, content_dict, filepath):
""" convert dict to json and write to file """
content_json = json.dumps(
content_dict, indent=4, separators=(',', ': '), sort_keys=False)
with open(filepath, 'wb') as f:
f.write(content_json)
f.flush()
def main(argv=None):
pass
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause | -6,770,037,951,671,929,000 | 32.413534 | 77 | 0.602835 | false | 3.788576 | false | false | false |
anjel-ershova/python_training | fixture/fixture_group.py | 1 | 4896 | from model.model_group import Group
import random
class GroupHelper:
def __init__(self, app):
self.app = app
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_group_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def select_first_group(self):
wd = self.app.wd
self.select_group_by_index(0)
def edit_if_not_none(self, field, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field).click()
wd.find_element_by_name(field).clear()
wd.find_element_by_name(field).send_keys(text)
else:
pass
def fill_group_form(self, group):
wd = self.app.wd
self.edit_if_not_none("group_name", group.name)
self.edit_if_not_none("group_header", group.header)
self.edit_if_not_none("group_footer", group.footer)
def create(self, group):
wd = self.app.wd
# open_groups_page
wd.find_element_by_link_text("groups").click()
# init group creation
wd.find_element_by_name("new").click()
self.fill_group_form(group)
# submit group creation
wd.find_element_by_name("submit").click()
self.return_to_groups_page()
self.group_cache = None
def edit_first_group(self):
wd = self.app.wd
self.edit_group_by_index(0)
def edit_group_by_index(self, index, new_group_data):
wd = self.app.wd
self.app.navigation.open_groups_page()
self.select_group_by_index(index)
# click edit button
wd.find_element_by_name("edit").click()
self.fill_group_form(new_group_data)
# submit edition
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def edit_group_by_id(self, id, new_group_data):
wd = self.app.wd
self.app.navigation.open_groups_page()
self.select_group_by_id(id)
# click edit button
wd.find_element_by_name("edit").click()
self.fill_group_form(new_group_data)
# submit edition
wd.find_element_by_name("update").click()
self.return_to_groups_page()
self.group_cache = None
def add_selected_contact_to_selected_group_by_id(self, target_group):
wd = self.app.wd
# открыть выпадающий список
to_group = wd.find_element_by_name("to_group")
to_group.click()
# выбор произвольной группы по value
to_group.find_element_by_css_selector("[value='%s']" % target_group.id).click()
wd.find_element_by_name("add").click()
def select_some_group_to_view(self, target_group):
wd = self.app.wd
# открыть выпадающий список
view_group = wd.find_element_by_name("group")
view_group.click()
# выбор произвольной группы по value
view_group.find_element_by_css_selector("[value='%s']" % target_group.id).click()
# def click_add_contact_to_group_button(self):
# wd = self.app.wd
# wd.find_element_by_name("add").click()
# self.app.navigation.open_home_page()
def delete_first_group(self):
wd = self.app.wd
self.delete_group_by_index(0)
def delete_group_by_index(self, index):
wd = self.app.wd
self.app.navigation.open_groups_page()
self.select_group_by_index(index)
# submit deletion
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def delete_group_by_id(self, id):
wd = self.app.wd
self.app.navigation.open_groups_page()
self.select_group_by_id(id)
wd.find_element_by_name("delete").click()
self.return_to_groups_page()
self.group_cache = None
def return_to_groups_page(self):
wd = self.app.wd
wd.find_element_by_link_text("group page").click()
def count(self):
wd = self.app.wd
self.app.navigation.open_groups_page()
return len(wd.find_elements_by_name("selected[]"))
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.app.navigation.open_groups_page()
self.group_cache = []
wd.find_elements_by_css_selector("span.group")
for element in wd.find_elements_by_css_selector("span.group"):
text = element.text
id = int(element.find_element_by_name("selected[]").get_attribute("value"))
self.group_cache.append(Group(name=text, id=id))
return list(self.group_cache)
| apache-2.0 | 4,500,193,398,506,461,700 | 33.285714 | 91 | 0.592708 | false | 3.219316 | false | false | false |
bb111189/Arky2 | boilerplate/external/pycountry/__init__.py | 1 | 3459 | # vim:fileencoding=utf-8
# Copyright (c) 2008-2011 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id$
"""pycountry"""
import os.path
import pycountry.db
LOCALES_DIR = os.path.join(os.path.dirname(__file__), 'locales')
DATABASE_DIR = os.path.join(os.path.dirname(__file__), 'databases')
class Countries(pycountry.db.Database):
"""Provides access to an ISO 3166 database (Countries)."""
field_map = dict(alpha_2_code='alpha2',
alpha_3_code='alpha3',
numeric_code='numeric',
name='name',
official_name='official_name',
common_name='common_name')
data_class_name = 'Country'
xml_tag = 'iso_3166_entry'
class Scripts(pycountry.db.Database):
"""Providess access to an ISO 15924 database (Scripts)."""
field_map = dict(alpha_4_code='alpha4',
numeric_code='numeric',
name='name')
data_class_name = 'Script'
xml_tag = 'iso_15924_entry'
class Currencies(pycountry.db.Database):
"""Providess access to an ISO 4217 database (Currencies)."""
field_map = dict(letter_code='letter',
numeric_code='numeric',
currency_name='name')
data_class_name = 'Currency'
xml_tag = 'iso_4217_entry'
class Languages(pycountry.db.Database):
"""Providess access to an ISO 639-1/2 database (Languages)."""
field_map = dict(iso_639_2B_code='bibliographic',
iso_639_2T_code='terminology',
iso_639_1_code='alpha2',
common_name='common_name',
name='name')
data_class_name = 'Language'
xml_tag = 'iso_639_entry'
class Subdivision(pycountry.db.Data):
parent_code = None
def __init__(self, element, **kw):
super(Subdivision, self).__init__(element, **kw)
self.type = element.parentNode.attributes.get('type').value
self.country_code = self.code.split('-')[0]
if self.parent_code is not None:
self.parent_code = '%s-%s' % (self.country_code, self.parent_code)
@property
def country(self):
return countries.get(alpha2=self.country_code)
@property
def parent(self):
return subdivisions.get(code=self.parent_code)
class Subdivisions(pycountry.db.Database):
# Note: subdivisions can be hierarchical to other subdivisions. The
# parent_code attribute is related to other subdivisons, *not*
# the country!
xml_tag = 'iso_3166_2_entry'
data_class_base = Subdivision
data_class_name = 'Subdivision'
field_map = dict(code='code',
name='name',
parent='parent_code')
no_index = ['name', 'parent_code']
def __init__(self, *args, **kw):
super(Subdivisions, self).__init__(*args, **kw)
# Add index for the country code.
self.indices['country_code'] = {}
for subdivision in self:
divs = self.indices['country_code'].setdefault(
subdivision.country_code, set())
divs.add(subdivision)
countries = Countries(os.path.join(DATABASE_DIR, 'iso3166.xml'))
scripts = Scripts(os.path.join(DATABASE_DIR, 'iso15924.xml'))
currencies = Currencies(os.path.join(DATABASE_DIR, 'iso4217.xml'))
languages = Languages(os.path.join(DATABASE_DIR, 'iso639.xml'))
subdivisions = Subdivisions(os.path.join(DATABASE_DIR, 'iso3166_2.xml'))
| lgpl-3.0 | 8,592,734,702,324,915,000 | 30.445455 | 78 | 0.601619 | false | 3.51882 | false | false | false |
ecell/libmoleculizer | python-src/bngparser/src/moleculizer/moleculeinterpreter.py | 1 | 6341 | ###############################################################################
# BNGMZRConverter - A utility program for converting bngl input files to mzr
# input files.
# Copyright (C) 2007, 2008, 2009 The Molecular Sciences Institute
#
# Moleculizer is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Moleculizer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Moleculizer; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Original Author:
# Nathan Addy, Scientific Programmer Email: [email protected]
# The Molecular Sciences Institute Email: [email protected]
#
#
###############################################################################
from moleculizermol import MoleculizerMol, MoleculizerSmallMol, MoleculizerModMol, isSmallMol, isModMol
from util import DataUnifier
from xmlobject import XmlObject
import pdb
# This class parses a Mols Block into a list of small-mols and big mols.
# It also manages the list of modifications.
class MoleculeDictionary:
class DuplicateMolDefinitionException(Exception): pass
class BadMolDefinitionException(Exception): pass
listOfNullModifications = ["none"]
def __init__(self, moleculeBlock, paramDict):
self.rawMoleculeDefinitions = moleculeBlock[:]
self.paramDict = paramDict
self.registeredMoleculesDictionary = {}
self.smallMolsDictionary = {}
self.modMolsDictionary = {}
self.initialize()
def initialize(self):
self.rawMoleculeDefinitions = DataUnifier( self.rawMoleculeDefinitions )
for line in self.rawMoleculeDefinitions:
if isSmallMol(line):
print "SM: %s" % line
MoleculizerSmallMol(line)
elif isModMol(line):
print "MM: %s" % line
MoleculizerModMol(line)
else:
print "'%s' is neither a ModMol nor a SmallMol, according to the isSmallMol and isModMol functions." % line
raise "Hello"
def parseMoleculeTypesLine(self, moleculeTypesLine):
parsedMol = MoleculizerMol(moleculeTypesLine)
parsedMolName = parsedMol.getName()
if parsedMolName in self.registeredMoleculesDictionary.keys():
raise DuplicateMolDefinitionException("Error, molecule %s already defined in the MoleculeInterpreter" % parsedMolName)
self.registeredMoleculesDictionary[parsedMolName] = parsedMol
def getUniversalModificationList(self):
return MoleculizerMol.modificationStates[:]
def addModifications(self, parentElmt):
for modification in self.getUniversalModificationList():
modificationTypeElmt = XmlObject("modification")
modificationTypeElmt.addAttribute("name", modification)
modificationTypeElmt.attachToParent(parentElmt)
weightElmt = XmlObject("weight-delta")
weightElmt.attachToParent(modificationTypeElmt)
if self.representsNullModification(modification):
weightDelta = 0.0
else:
weightDelta = 1.0
weightElmt.addAttribute("daltons", weightDelta)
def addMols(self, parentElmt):
for molName in self.registeredMoleculesDictionary.keys():
self.addModMolElmtToMolsElmt(parentElmt, self.registeredMoleculesDictionary[molName])
def addModMolElmt(self, parentElmt):
pass
def addModMolElmtToMolsElmt(self, xmlObject, moleculizerMolObject):
assert(isinstance(xmlObject, XmlObject))
assert(isinstance(moleculizerMolObject, MoleculizerMol))
newModMol = XmlObject("mod-mol")
newModMol.addAttribute("name", moleculizerMolObject.getName())
weightElement = XmlObject("weight")
# Obviously this is one of the big deficiencies of this thing. What shall
# we set the (mandatory) weights to? For now, let's just put in something
# arbitratry. But this is a big issue that ought to be fixed as soon as all
# the basic facilities of the code have been built in.
if moleculizerMolObject.getName() == "Pheromone":
weightElement.addAttribute("daltons", 10.0)
else:
weightElement.addAttribute("daltons", 100.0)
newModMol.addSubElement(weightElement)
for binding in moleculizerMolObject.bindingSites:
self.addBindingSiteElmtToModMolElmt(binding, moleculizerMolObject, newModMol)
for modification in moleculizerMolObject.modificationSites:
modSite, defaultModState = modification
modSiteElmt = XmlObject("mod-site")
modSiteElmt.addAttribute("name", modSite)
defModRefElmt = XmlObject("default-mod-ref")
defModRefElmt.addAttribute("name", defaultModState)
defModRefElmt.attachToParent(modSiteElmt).attachToParent(newModMol)
xmlObject.addSubElement(newModMol)
return
def addBindingSiteElmtToModMolElmt(self, bindingName, moleculizerMol, xmlObject):
newBindingElmt = XmlObject("binding-site")
newBindingElmt.addAttribute("name", bindingName)
defaultShape = XmlObject("default-shape-ref")
defaultShape.addAttribute("name", "default")
defaultShape.attachToParent(newBindingElmt)
for shapeName in moleculizerMol.bindingSites[bindingName]:
siteShapeElmt = XmlObject("site-shape")
siteShapeElmt.addAttribute("name", shapeName)
siteShapeElmt.attachToParent(newBindingElmt)
xmlObject.addSubElement(newBindingElmt)
return
def representsNullModification(self, modificationType):
return modificationType.lower() in MoleculeDictionary.listOfNullModifications
| gpl-2.0 | 4,373,819,553,800,396,000 | 37.664634 | 130 | 0.674972 | false | 4.04917 | false | false | false |
mpi-sws-rse/antevents-python | examples/event_library_comparison/event.py | 1 | 8071 | """This version uses a traditional event-driven version,
using continuation passing style. Each method call is passed
a completion callback and an error callback
"""
from statistics import median
import json
import asyncio
import random
import time
import hbmqtt.client
from collections import deque
from antevents.base import SensorEvent
URL = "mqtt://localhost:1883"
class RandomSensor:
def __init__(self, sensor_id, mean=100.0, stddev=20.0, stop_after_events=None):
self.sensor_id = sensor_id
self.mean = mean
self.stddev = stddev
self.stop_after_events = stop_after_events
if stop_after_events is not None:
def generator():
for i in range(stop_after_events):
yield round(random.gauss(mean, stddev), 1)
else: # go on forever
def generator():
while True:
yield round(random.gauss(mean, stddev), 1)
self.generator = generator()
def sample(self):
return self.generator.__next__()
def __repr__(self):
if self.stop_after_events is None:
return 'RandomSensor(%s, mean=%s, stddev=%s)' % \
(self.sensor_id, self.mean, self.stddev)
else:
return 'RandomSensor(%s, mean=%s, stddev=%s, stop_after_events=%s)' % \
(self.sensor_id, self.mean, self.stddev, self.stop_after_events)
class PeriodicMedianTransducer:
"""Emit an event once every ``period`` input events.
The value is the median of the inputs received since the last
emission.
"""
def __init__(self, period=5):
self.period = period
self.samples = [None for i in range(period)]
self.events_since_last = 0
self.last_event = None # this is used in emitting the last event
def step(self, v):
self.samples[self.events_since_last] = v.val
self.events_since_last += 1
if self.events_since_last==self.period:
val = median(self.samples)
event = SensorEvent(sensor_id=v.sensor_id, ts=v.ts, val=val)
self.events_since_last = 0
return event
else:
self.last_event = v # save in case we complete before completing a period
return None
def complete(self):
if self.events_since_last>0:
# if we have some partial state, we emit one final event that
# averages whatever we saw since the last emission.
return SensorEvent(sensor_id=self.last_event.sensor_id,
ts=self.last_event.ts,
val=median(self.samples[0:self.events_since_last]))
def csv_writer(evt):
print("csv_writer(%s)" % repr(evt))
class MqttWriter:
"""All the processing is asynchronous. We ensure that a given send has
completed and the callbacks called before we process the next one.
"""
def __init__(self, url, topic, event_loop):
self.url = url
self.topic = topic
self.client = hbmqtt.client.MQTTClient(loop=event_loop)
self.event_loop = event_loop
self.connected = False
self.pending_task = None
self.request_queue = deque()
def _to_message(self, msg):
return bytes(json.dumps((msg.sensor_id, msg.ts, msg.val),), encoding='utf-8')
def _request_done(self, f, completion_cb, error_cb):
assert f==self.pending_task
self.pending_task = None
exc = f.exception()
if exc:
self.event_loop.call_soon(error_cb, exc)
else:
self.event_loop.call_soon(completion_cb)
if len(self.request_queue)>0:
self.event_loop.call_soon(self._process_queue)
def _process_queue(self):
assert self.pending_task == None
assert len(self.request_queue)>0
(msg, completion_cb, error_cb) = self.request_queue.popleft()
if msg is not None:
print("send from queue: %s" % msg)
self.pending_task = self.event_loop.create_task(
self.client.publish(self.topic, msg)
)
else: # None means that we wanted a disconnect
print("disconnect")
self.pending_task = self.event_loop.create_task(
self.client.disconnect()
)
self.pending_task.add_done_callback(lambda f:
self._request_done(f, completion_cb,
error_cb))
def send(self, msg, completion_cb, error_cb):
if not self.connected:
print("attempting connection")
self.request_queue.append((self._to_message(msg),
completion_cb, error_cb),)
self.connected = True
self.pending_task = self.event_loop.create_task(self.client.connect(self.url))
def connect_done(f):
assert f==self.pending_task
print("connected")
self.pending_task = None
self.event_loop.call_soon(self._process_queue)
self.pending_task.add_done_callback(connect_done)
elif self.pending_task:
self.request_queue.append((self._to_message(msg), completion_cb,
error_cb),)
else:
print("sending %s" % self._to_message(msg))
self.pending_task = self.event_loop.create_task(
self.client.publish(self.topic, self._to_message(msg))
)
self.pending_task.add_done_callback(lambda f:
self._request_done(f, completion_cb,
error_cb))
def disconnect(self, completion_cb, error_cb, drop_queue=False):
if not self.connected:
return
if len(self.request_queue)>0 and drop_queue: # for error situations
self.request_queue = deque()
if self.pending_task:
self.request_queue.append((None, completion_cb, error_cb),)
else:
print("disconnecting")
self.pending_task = self.event_loop.create_task(
self.client.disconnect()
)
self.pending_task.add_done_callback(lambda f:
self._request_done(f, completion_cb,
error_cb))
def sample_and_process(sensor, mqtt_writer, xducer, completion_cb, error_cb):
try:
sample = sensor.sample()
except StopIteration:
final_event = xducer.complete()
if final_event:
mqtt_writer.send(final_event,
lambda: mqtt_writer.disconnect(lambda: completion_cb(False), error_cb),
error_cb)
else:
mqtt_writer.disconnect(lambda: completion_cb(False), error_cb)
return
event = SensorEvent(sensor_id=sensor.sensor_id, ts=time.time(), val=sample)
csv_writer(event)
median_event = xducer.step(event)
if median_event:
mqtt_writer.send(median_event,
lambda: completion_cb(True), error_cb)
else:
completion_cb(True)
sensor = RandomSensor('sensor-2', stop_after_events=12)
transducer = PeriodicMedianTransducer(5)
event_loop = asyncio.get_event_loop()
writer = MqttWriter(URL, sensor.sensor_id, event_loop)
def loop():
def completion_cb(more):
if more:
event_loop.call_later(0.5, loop)
else:
print("all done, no more callbacks to schedule")
event_loop.stop()
def error_cb(e):
print("Got error: %s" % e)
event_loop.stop()
event_loop.call_soon(
lambda: sample_and_process(sensor, writer, transducer,
completion_cb, error_cb)
)
event_loop.call_soon(loop)
event_loop.run_forever()
print("that's all folks")
| apache-2.0 | 4,810,702,886,399,499,000 | 37.251185 | 100 | 0.563127 | false | 4.031469 | false | false | false |
saikrishnar/vsm_preparer | scripts/quincontext.py | 1 | 1778 | #! /usr/bin/python
import os
def main(folder):
#vectors = []
#f = open('../dictionary/vectors.txt')
#for line in f:
# representation = line.strip('\n')
# vectors.append(representation)
#f.close()
for d, ds, fs in os.walk(folder):
for fname in fs:
if fname[-4:] != '.dur':
continue
fullfname = d + '/' + fname
phone_array = []
dur_array = []
fr = open(fullfname)
for line in fr:
if line.split('\n')[0] == '#':
continue
[phone, dur] = line.rstrip('\n').split()
phone_array.append(phone)
dur_array.append(dur)
fw = open(fullfname[:-4] + '.quindur', 'w')
for i in range(2, len(dur_array) - 2 ):
phoneme_2p = phone_array[i-2]
#phoneme_2p_index = uniquephone_list.index(phoneme_2p)
phoneme_1p = phone_array[i-1]
#phoneme_1p_index = uniquephone_list.index(phoneme_1p)
phoneme = phone_array[i]
#phoneme_index = uniquephone_list.index(phoneme)
phoneme_1n = phone_array[i+1]
#phoneme_1n_index = uniquephone_list.index(phoneme_1n)
phoneme_2n = phone_array[i+2]
#phoneme_2n_index = uniquephone_list.index(phoneme_2n)
duration = dur_array[i]
fw.write( str(float(duration)) + ' ' + phoneme_2p + ' ' + phoneme_1p + ' ' + phoneme + ' ' + phoneme_1n + ' ' + phoneme_2n + '\n')
fw.close()
fr.close()
if __name__ == '__main__':
folder = '../lab'
main(folder)
| gpl-2.0 | 4,492,283,820,192,578,000 | 34.56 | 151 | 0.45838 | false | 3.493124 | false | false | false |
Eksmo/calibre | src/calibre/gui2/viewer/position.py | 1 | 1815 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import json
class PagePosition(object):
def __init__(self, document):
self.document = document
@property
def viewport_cfi(self):
ans = None
res = self.document.mainFrame().evaluateJavaScript('''
ans = 'undefined';
if (window.paged_display) {
ans = window.paged_display.current_cfi();
if (!ans) ans = 'undefined';
}
ans;
''')
if res.isValid() and not res.isNull() and res.type() == res.String:
c = unicode(res.toString())
if c != 'undefined':
ans = c
return ans
def scroll_to_cfi(self, cfi):
if cfi:
cfi = json.dumps(cfi)
self.document.mainFrame().evaluateJavaScript(
'paged_display.jump_to_cfi(%s)'%cfi)
@property
def current_pos(self):
ans = self.viewport_cfi
if not ans:
ans = self.document.scroll_fraction
return ans
def __enter__(self):
self.save()
def __exit__(self, *args):
self.restore()
def save(self):
self._cpos = self.current_pos
def restore(self):
if self._cpos is None: return
self.to_pos(self._cpos)
self._cpos = None
def to_pos(self, pos):
if isinstance(pos, (int, float)):
self.document.scroll_fraction = pos
else:
self.scroll_to_cfi(pos)
def set_pos(self, pos):
self._cpos = pos
| gpl-3.0 | -6,051,221,796,672,716,000 | 24.928571 | 75 | 0.534986 | false | 3.659274 | false | false | false |
openego/oeplatform | modelview/migrations/0022_auto_20160303_2233.py | 1 | 1468 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-03 21:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("modelview", "0021_auto_20160303_2233")]
operations = [
migrations.AlterField(
model_name="energymodel",
name="model_file_format",
field=models.CharField(
choices=[
(".exe", ".exe"),
(".gms", ".gms"),
(".py", ".py"),
(".xls", ".xls"),
("other", "other"),
],
default="other",
help_text="In which format is the model saved?",
max_length=5,
verbose_name="Model file format",
),
),
migrations.AlterField(
model_name="energymodel",
name="model_input",
field=models.CharField(
choices=[
(".csv", ".csv"),
(".py", ".py"),
("text", "text"),
(".xls", ".xls"),
("other", "other"),
],
default="other",
help_text="Of which file format are the input and output data?",
max_length=5,
verbose_name="Input/output data file format",
),
),
]
| agpl-3.0 | -647,503,568,038,892,000 | 30.234043 | 80 | 0.419619 | false | 4.781759 | false | false | false |
AjabWorld/ajabsacco | ajabsacco/core/facades/loans/transactions.py | 1 | 8484 | from ajabsacco.core.models import (
LoanTransactionEntry,
LoanProduct,
LoanAccount,
Message as SMSMessage,
)
from decimal import Decimal as D
from django.db.models import Q, Sum, F
from django.utils import timezone
from django.db import transaction as db_transaction
from ajabsacco.core import codes
from ajabsacco.core.sms import templates
from ajabsacco.core.utils import record_log, month_delta
from ajabsacco.core.exceptions import *
import ledger as ledger_facades
from ajabsacco.core.facades import transactions as transaction_facades
from ajabsacco.core.facades.loans import validations as validation_facades
import logging
logger = logging.getLogger('core.ajabsacco.loans')
import uuid
def allocate_repayment(loan_account, amount, *args, **kwargs):
with db_transaction.atomic():
fee_accrued = ledger_facades.loan_account_fees_due(loan_account)
penalties_accrued = ledger_facades.loan_account_penalties_due(loan_account)
interest_accrued = ledger_facades.loan_account_interest_due(loan_account)
principal_accrued = ledger_facades.loan_account_principal_due(loan_account)
#1. Align the order we will deduct the accruals
accruals = {
LoanProduct.ALLOCATION_CHOICE_FEE: (fee_accrued or D('0.0'), post_loan_fee),
LoanProduct.ALLOCATION_CHOICE_PENALTY: (penalties_accrued or D('0.0'), post_penalty_fee),
LoanProduct.ALLOCATION_CHOICE_INTEREST: (interest_accrued or D('0.0'), post_loan_interest),
}
amount = (amount or 0)
#get a sum of all accruals
total_accruals = sum(i[0] for i in accruals.values())
#Ensure we have sane values
if (amount > 0) and (principal_accrued > 0):
transaction_id = uuid.uuid4()
#setup allocation balance, to help us check to total allocation
allocation_balance = amount
if total_accruals > 0:
items_allocated = 0
#Loop through the allocation order
for allocation_item in LoanProduct.ALLOCATION_ORDER:
#Loop through all the accruals we are expecting to collect
for accrued_item, allocation_tuple in accruals.iteritems():
#put aside the variables from the turple
accrued_amount, transaction_func = allocation_tuple
#if allocation item is equal to accrued item code, and accrued amount is more than 1
#Check to ensure we do not get to negative numbers
if (allocation_item == accrued_item) and (accrued_amount > 0) and (allocation_balance > 0):
#if amount accrued is sizable, deduct
transaction_func(loan_account, accrued_amount, transaction_id=transaction_id)
#stamp new allocation
items_allocated += 1
#deduct amount posted from balance
allocation_balance -= accrued_amount
post_loan_principal(
loan_account,
allocation_balance,
transaction_id=transaction_id
)
loan_account.last_repayment_date = timezone.now()
loan_account.save()
else:
logger.debug("amount: %s and total_accruals %s" % (amount, total_accruals))
raise AmountNotSufficientException()
def apply_accruals(loan_account, approved_by=None):
with db_transaction.atomic():
date_disbursed = loan_account.date_disbursed
if date_disbursed is None:
raise ActionOnUndisbursedLoanException(
"You cannot apply accruals on Un-disbursed Loan %s" % loan_account.account_number)
date_today = timezone.now()
month_diff = month_delta(date_disbursed, date_today, days_of_the_month=30)
grace_period = loan_account.grace_period
grace_period_type = loan_account.product.grace_period_type
within_grace_period = ((month_diff - grace_period) < 1)
within_repayment_period = ((month_diff - loan_account.repayment_period) < 1)
if (not grace_period == 0):
if (within_grace_period):
if (grace_period_type == LoanProduct.FULL_GRACE_PERIOD):
#No need to proceed, we don't want to accrue anything
interest_due = 0
elif (grace_period_type == LoanProduct.PRINCIPAL_GRACE_PERIOD):
principal_due = loan_account.amount
interest_due = (
(principal_due * loan_account.interest_rate) /
loan_account.repayment_period
)
else:
if within_repayment_period:
principal_due = ledger_facades.loan_account_principal_due(loan_account)
interest_due = (
(principal_due * (loan_account.interest_rate / D('100.0'))) /
loan_account.repayment_period
)
if interest_due > 0:
apply_interest_to_account(loan_account, interest_due)
def disburse_loan(loan_account, *args, **kwargs):
with db_transaction.atomic():
validation_facades.validate_disbursement(loan_account)
debit_entry, credit_entry = transaction_facades.create_transaction(
LoanTransactionEntry, loan_account, loan_account.amount,
transaction_type=codes.TRANSACTION_TYPE_LOAN_DISBURSAL, *args, **kwargs
)
loan_account.status = LoanAccount.ACTIVE
loan_account.date_disbursed = timezone.now()
loan_account.save()
return (debit_entry, credit_entry)
def apply_interest_to_account(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_INTEREST_APPLY, *args, **kwargs)
def apply_fee_to_account(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_FEE_APPLY, *args, **kwargs)
def apply_penalty_to_account(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_PENALTY_APPLY, *args, **kwargs)
def write_off_loan_principal(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_PRINCIPAL_WRITE_OFF, *args, **kwargs)
def write_off_loan_interest(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_INTEREST_WRITE_OFF, *args, **kwargs)
def write_off_loan_penalty(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_PENALTY_WRITE_OFF, *args, **kwargs)
def write_off_loan_fee(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_FEE_WRITE_OFF, *args, **kwargs)
def post_loan_principal(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_PRINCIPAL_POSTING, *args, **kwargs)
def post_loan_interest(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_INTEREST_POSTING, *args, **kwargs)
def post_loan_fee(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_FEE_POSTING, *args, **kwargs)
def post_penalty_fee(loan_account, amount, *args, **kwargs):
return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount,
transaction_type=codes.TRANSACTION_TYPE_PENALTY_POSTING, *args, **kwargs)
| apache-2.0 | 3,779,830,831,397,354,500 | 45.360656 | 115 | 0.658298 | false | 3.649032 | false | false | false |
ArcherSys/ArcherSys | Lib/test/reperf.py | 1 | 1754 | <<<<<<< HEAD
<<<<<<< HEAD
import re
import time
def main():
s = "\13hello\14 \13world\14 " * 1000
p = re.compile(r"([\13\14])")
timefunc(10, p.sub, "", s)
timefunc(10, p.split, s)
timefunc(10, p.findall, s)
def timefunc(n, func, *args, **kw):
t0 = time.perf_counter()
try:
for i in range(n):
result = func(*args, **kw)
return result
finally:
t1 = time.perf_counter()
if n > 1:
print(n, "times", end=' ')
print(func.__name__, "%.3f" % (t1-t0), "CPU seconds")
main()
=======
import re
import time
def main():
s = "\13hello\14 \13world\14 " * 1000
p = re.compile(r"([\13\14])")
timefunc(10, p.sub, "", s)
timefunc(10, p.split, s)
timefunc(10, p.findall, s)
def timefunc(n, func, *args, **kw):
t0 = time.perf_counter()
try:
for i in range(n):
result = func(*args, **kw)
return result
finally:
t1 = time.perf_counter()
if n > 1:
print(n, "times", end=' ')
print(func.__name__, "%.3f" % (t1-t0), "CPU seconds")
main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import re
import time
def main():
s = "\13hello\14 \13world\14 " * 1000
p = re.compile(r"([\13\14])")
timefunc(10, p.sub, "", s)
timefunc(10, p.split, s)
timefunc(10, p.findall, s)
def timefunc(n, func, *args, **kw):
t0 = time.perf_counter()
try:
for i in range(n):
result = func(*args, **kw)
return result
finally:
t1 = time.perf_counter()
if n > 1:
print(n, "times", end=' ')
print(func.__name__, "%.3f" % (t1-t0), "CPU seconds")
main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | -53,206,256,789,101,470 | 22.386667 | 61 | 0.513683 | false | 2.819936 | false | false | false |
rr-/ida-images | rgb-ida.py | 1 | 3099 | import idaapi
import librgb
from librgb.qt_shims import QtGui # important for PySide legacy IDA
from librgb.qt_shims import QtWidgets
try:
MAJOR, MINOR = map(int, idaapi.get_kernel_version().split("."))
except AttributeError:
MAJOR, MINOR = 6, 6
USING_IDA7API = MAJOR > 6
USING_PYQT5 = USING_IDA7API or (MAJOR == 6 and MINOR >= 9)
class DockableShim(object):
def __init__(self, title):
self._title = title
# IDA 7+ Widgets
if USING_IDA7API:
import sip
self._form = idaapi.create_empty_widget(self._title)
self.widget = sip.wrapinstance(long(self._form), QtWidgets.QWidget)
# legacy IDA PluginForm's
else:
self._form = idaapi.create_tform(self._title, None)
if USING_PYQT5:
self.widget = idaapi.PluginForm.FormToPyQtWidget(self._form)
else:
self.widget = idaapi.PluginForm.FormToPySideWidget(self._form)
def show(self):
if USING_IDA7API:
flags = (
idaapi.PluginForm.WOPN_TAB
| idaapi.PluginForm.WOPN_MENU
| idaapi.PluginForm.WOPN_RESTORE
| idaapi.PluginForm.WOPN_PERSIST
)
idaapi.display_widget(self._form, flags)
# legacy IDA PluginForm's
else:
flags = (
idaapi.PluginForm.FORM_TAB
| idaapi.PluginForm.FORM_MENU
| idaapi.PluginForm.FORM_RESTORE
| idaapi.PluginForm.FORM_PERSIST
| 0x80
) # idaapi.PluginForm.FORM_QWIDGET
idaapi.open_tform(self._form, flags)
class ImagePreviewPlugin(idaapi.plugin_t):
flags = 0
wanted_name = "Image previewer"
wanted_hotkey = "Alt + I"
comment = "Preview memory as image"
help = "https://github.com/rr-/ida-images"
def init(self):
return idaapi.PLUGIN_OK
def term(self):
pass
def run(self, arg):
class IdaWindowAdapter(librgb.GenericWindowAdapter):
def ask_address(self, address):
return AskAddr(address, "Please enter an address")
def ask_file(self):
return AskFile(1, "*.png", "Save the image as...")
image_preview_form = DockableShim("Image preview")
params = librgb.RendererParams()
params.readers = [librgb.MemoryReader()]
params.format = librgb.PixelFormats.GRAY8
params.width = 800
params.height = 600
params.flip = False
params.brightness = 50
adapter = IdaWindowAdapter(params)
shortcut_manager = librgb.ShortcutManager(adapter, params)
for shortcut, func in shortcut_manager.shortcuts.items():
adapter.define_shortcut(shortcut, image_preview_form.widget, func)
layout = adapter.create_layout()
image_preview_form.widget.setLayout(layout)
adapter.draw()
image_preview_form.show()
def PLUGIN_ENTRY():
return ImagePreviewPlugin()
if __name__ == "__main__":
ImagePreviewPlugin().run(0)
| mit | -2,430,317,480,103,620,600 | 29.683168 | 79 | 0.595999 | false | 3.663121 | false | false | false |
zencore-dobetter/zencore-utils | src/zencore/utils/redis.py | 1 | 3995 | import uuid
import math
import time
import logging
import redis as engine
from zencore.errors import WrongParameterTypeError
from .types import smart_force_to_string
logger = logging.getLogger(__name__)
class RedisLock(object):
def __init__(self, url, name=None, app_name=None, expire=None, prefix="zencore:lock:", tick=5, **kwargs):
self.url = url
self.connection = engine.Redis.from_url(url, **kwargs)
self.app_name = app_name or str(uuid.uuid4())
self.prefix = prefix
self.expire = expire
self.tick = tick
if name:
self.setup(name)
def setup(self, name):
self.lock_name = ":".join([self.prefix, name])
self.signal_name = ":".join([self.prefix, name, "signal"])
def acquire(self, blocking=True, timeout=-1):
stime = time.clock()
while True:
result = self.acquire_nowait()
if result:
return True
if not blocking:
return False
if timeout == 0:
return False
if timeout > 0:
delta = math.ceil(timeout - (time.clock() - stime))
if delta < 0:
return False
if delta > self.tick:
delta = self.tick
else:
delta = self.tick
event = self.connection.blpop(self.signal_name, timeout=delta)
if event is None:
return False
def acquire_nowait(self):
result = self.connection.setnx(self.lock_name, self.app_name)
if result:
if self.expire:
self.connection.expire(self.lock_name, self.expire)
self.connection.delete(self.signal_name)
return True
return False
def release(self):
if self.is_lock_owner():
self.connection.delete(self.lock_name)
self.connection.rpush(self.signal_name, 1)
def force_clean(self):
self.connection.delete(self.lock_name)
self.connection.rpush(self.signal_name, 1)
def get_current_lock_owner(self, ):
return smart_force_to_string(self.connection.get(self.lock_name))
def is_lock_owner(self):
return self.get_current_lock_owner() == self.app_name
class Counter(object):
def __init__(self, connection, namespace):
self.connection = connection
self.namespace = namespace
def incr(self, name):
key = self.make_counter_key(name)
self.connection.incr(key)
def get(self, name):
key = self.make_counter_key(name)
return int(self.connection.get(key))
def getall(self):
keys = self.connection.keys(self.make_counter_pattern())
if not keys:
return {}
keys = [key.decode("utf-8") for key in keys]
values = [int(value) for value in self.connection.mget(*keys)]
return dict(zip(keys, values))
def make_counter_key(self, name):
return "{}:{}".format(self.namespace, name)
def make_counter_pattern(self):
return "{}:*".format(self.namespace)
def get_redis(config):
"""
从配置文件获取redis对象。
"""
if isinstance(config, engine.StrictRedis):
return config
if isinstance(config, str):
return engine.Redis.from_url(config)
if isinstance(config, dict):
url = config.get("url")
host = config.get("host")
if url:
db = config.get("db", None)
options = config.get("options", {})
return engine.Redis.from_url(url, db, **options)
if host:
return engine.Redis(**config)
logger.error("get_redis got wrong parameter type error.")
raise WrongParameterTypeError()
# ###########################################################################
# 重复或不推荐使用
# ###########################################################################
make_redis_instance = get_redis
| mit | 3,164,183,319,776,613,000 | 29.929688 | 109 | 0.558474 | false | 3.9749 | true | false | false |
p/webracer | tests/request_via_form_test.py | 1 | 1451 | import webracer
import nose.plugins.attrib
from . import utils
from .apps import form_app
utils.app_runner_setup(__name__, form_app.app, 8059)
@nose.plugins.attrib.attr('client')
@webracer.config(host='localhost', port=8059)
class RequestViaFormTest(webracer.WebTestCase):
def test_get_form_as_url(self):
self.get('/method_check_form')
self.assert_status(200)
form = self.response.form()
self.get(form)
self.assertEqual('GET', self.response.body)
def test_post_form_as_url(self):
self.get('/textarea')
self.assert_status(200)
form = self.response.form()
self.post(form)
self.assertEqual('{}', self.response.body)
def test_post_form_with_elements(self):
self.get('/textarea')
self.assert_status(200)
form = self.response.form()
elements = form.elements
self.post(form, elements)
json = self.response.json
self.assertEqual(dict(field='hello world'), json)
def test_post_form_with_mutated_elements(self):
self.get('/textarea')
self.assert_status(200)
form = self.response.form()
elements = form.elements.mutable
elements.set_value('field', 'changed')
self.post(form, elements)
json = self.response.json
self.assertEqual(dict(field='changed'), json)
| bsd-2-clause | 4,729,082,334,174,765,000 | 28.02 | 57 | 0.598897 | false | 3.818421 | true | false | false |
Ichag/django-timelinejs3 | timeline/migrations/0009_auto_20150819_0648.py | 1 | 3020 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('timeline', '0008_auto_20150818_2241'),
]
operations = [
migrations.AlterField(
model_name='options',
name='duration',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='height',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='marker_height_min',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='marker_padding',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='marker_width_min',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='menubar_height',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='optimal_tick_width',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='scale_factor',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='skinny_size',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='slide_default_fade',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='slide_padding_lr',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='start_at_slide',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='timenav_height',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='timenav_height_min',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='timenav_height_percentage',
field=models.IntegerField(null=True, blank=True),
),
migrations.AlterField(
model_name='options',
name='width',
field=models.IntegerField(null=True, blank=True),
),
]
| bsd-3-clause | 4,725,792,827,878,784,000 | 31.12766 | 61 | 0.54404 | false | 4.596651 | false | false | false |
whiteshield/EHScripter | EHScripter/netsparker.py | 1 | 6143 | ##!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import re
import string
from io import StringIO
from lxml import etree
try:
from .util import *
except Exception as e:
from util import *
class NetsparkerToMarkdown:
def __init__(self, options):
self.options=options
self.template=string.Template(self.options['template'])
if self.options['merge']:
self.template=string.Template(self.options['merge_template'])
self.merge_findinglist_template=string.Template(self.options['merge_findinglist_template'])
self.process()
def process(self):
if not os.path.exists(self.options['output_dir']):
os.makedirs(self.options['output_dir'])
filelist=[]
if os.path.isfile(self.options['load_file']):
filelist.append(self.options['load_file'])
elif os.path.isdir(self.options['load_file']):
for name in os.listdir(self.options["load_file"]):
if os.path.isfile(self.options['load_file']+'/'+name) and len(name)>11 and name[-11:]==".netsparker":
filelist.append(self.options["load_file"]+'/'+name)
counter=1
findings={}
for processfile in filelist:
content=open(processfile).read()
fileparts=content.split('<!-- Vulnerability Details -->')
vulns=fileparts[1].split('<h1')
fullparser=etree.HTMLParser()
fullhtml=etree.parse(StringIO(content), fullparser)
Target=self.attrib(fullhtml.xpath("//span[@class='dashboard-url']/a"),'href','N/A')
for vuln in vulns[1:]:
vuln='<h1'+vuln
parser=etree.HTMLParser()
vulnobj=etree.parse(StringIO(vuln), parser)
h1=self.value(vulnobj.xpath('//h1//text()'),'N/A')
Vulnerability=re.sub(r'\d+\\\. ','',h1)
Risk=self.value(vulnobj.xpath("//div[@class='vuln-block']/div[2]//text()"),'N/A').title()
VulnDesc=self.value(vulnobj.xpath("//div[@class='vulndesc']//text()"),'N/A')
if Risk=='Information':
Risk='Info'
if Risk=='Important':
Risk='High'
VulnDetails=vulnobj.xpath("//div[@class='vulnerability-detail']")
for VulnDetail in VulnDetails:
h2=self.value(VulnDetail.xpath('./div/h2//text()'),'N/A')
SubVulnerability=re.sub(r'\d+\.\d+\. ','',h2)
Link=self.attrib(VulnDetail.xpath('./div/div[2]/a'),'href','N/A')
ParamTableRows=VulnDetail.xpath('./div/table//tr')
lines=0;
ParamTable=''
for ParamTableRow in ParamTableRows:
ParamTableCells=ParamTableRow.xpath('./td')
cells=0
for ParamTableCell in ParamTableCells:
cell=self.value(ParamTableCell.xpath('.//text()'),'N/A').strip()
ParamTable+='| %s '%cell
cells+=1
ParamTable='%s|\n'%ParamTable
if lines==0:
sepstr=''
for i in range(0,cells):
sepstr+='| ------- '
sepstr='%s|\n'%sepstr
ParamTable+=sepstr
lines+=1
d={'Target':Target, 'Vulnerability':Vulnerability, 'Risk':Risk, 'VulnDesc':VulnDesc, 'SubVulnerability':SubVulnerability, 'Link':Link, 'ParamTable':ParamTable,'findinglist':''}
if not self.options['merge']:
dirname=slugify('%s-%s-%s-%04d-netsparker'%(Risk, Target, Vulnerability, counter))
if not os.path.exists(self.options['output_dir']+'/'+dirname):
os.makedirs(self.options['output_dir']+'/'+dirname)
counter+=1
temp=self.template
text=temp.substitute(d)
if self.options['result_overwrite'] or (not os.path.exists(self.options['output_dir']+'/'+dirname+'/document.md')):
tmpfile = open(self.options['output_dir']+'/'+dirname+'/document.md', 'w');
tmpfile.write(text)
tmpfile.close()
else :
slug=slugify('%s-%s-netsparker'%(Risk, Vulnerability))
if not findings.get(slug):
findings[slug]=[]
findings[slug].append(d)
for key, values in findings.items():
findinglist = ''
for d in values:
d['VulnDesc']=d['VulnDesc'].replace('$','$$')
d['ParamTable']=d['ParamTable'].replace('$','$$')
d['Link']=d['Link'].replace('$','$$')
temp=self.merge_findinglist_template
text=temp.substitute(d)
findinglist+=text+"\n\n"
d['findinglist']=findinglist
filename=key+".md";
temp=self.template
text=temp.substitute(d)
if self.options['result_overwrite'] or (not os.path.exists(self.options['output_dir']+'/'+filename)):
tmpfile = open(self.options['output_dir']+'/'+filename, 'w');
tmpfile.write(text)
tmpfile.close()
def value(self, x, default):
try:
#ret=x[0].strip()
ret="\n".join([html2markdown(html2markdown(y.strip(), True)) for y in x])
except Exception as e:
try:
ret=x.strip()
except Exception as ee:
ret=default
return ret
def attrib(self, x, attr, default):
try:
ret=x[0].attrib[attr]
except Exception as e:
try:
ret=x.attrib[attr]
except Exception as ee:
ret=default
return ret
| gpl-2.0 | 4,605,033,301,372,981,000 | 44.503704 | 196 | 0.496988 | false | 4.122819 | false | false | false |
swcurran/tfrs | backend/api/models/UserViewModel.py | 1 | 1507 | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
class UserViewModel(models.Model):
given_name = models.CharField(max_length=255, blank=True, null=True)
surname = models.CharField(max_length=255, blank=True, null=True)
email = models.CharField(max_length=255, blank=True, null=True)
active = models.BooleanField()
sm_authorization_id = models.CharField(max_length=255, blank=True, null=True)
user_roles = models.ManyToManyField('UserRole',
related_name='UserViewModeluser_roles',
blank=True)
class Meta:
abstract = True
| apache-2.0 | 9,088,370,143,756,785,000 | 40.861111 | 208 | 0.704048 | false | 4.406433 | false | false | false |
vanesa/kid-o | kido/admin/utils.py | 1 | 3921 | # -*- coding: utf-8 -*-
""" Flask-Admin utilities."""
from flask import abort, redirect, request, url_for
from flask_admin import AdminIndexView, expose
from flask_admin.base import MenuLink
from flask_admin.contrib.sqla import ModelView
from flask_login import current_user
from functools import wraps
from kido import app
from kido.constants import PERMISSION_ADMIN
def admin_required(f):
@wraps(f)
def decorated(*args, **kwargs):
if not current_user.is_authenticated:
return redirect(url_for("views.general.login", next=request.url))
users_permissions = current_user.permissions
if PERMISSION_ADMIN not in users_permissions:
app.logger.debug("Not an admin")
abort(404)
return f(*args, **kwargs)
return decorated
def permission_required(permissions):
if not isinstance(permissions, (list, set, tuple)):
permissions = [permissions]
permissions = [x.upper() for x in permissions]
def decorator(method):
@wraps(method)
def f(*args, **kwargs):
if not current_user.is_authenticated:
return redirect(url_for("views.general.login", next=request.url))
users_permissions = current_user.permissions
if PERMISSION_ADMIN not in users_permissions:
for permission in permissions:
if permission not in users_permissions:
app.logger.debug("Missing permission: {0}".format(permission))
abort(404)
return method(*args, **kwargs)
return f
return decorator
class AuthenticatedMenuLink(MenuLink):
def is_accessible(self):
return current_user.is_authenticated
class CustomAdminIndexView(AdminIndexView):
extra_css = None
extra_js = None
@expose("/")
@admin_required
def index(self):
if not current_user.is_authenticated:
return redirect(url_for("views.general.login", next=request.url))
return super(CustomAdminIndexView, self).index()
@expose("/login/")
def login_view(self):
return redirect(url_for("views.general.login", next=request.url))
@expose("/logout/")
def logout_view(self):
return redirect("/logout")
class CustomModelView(ModelView):
page_size = 50
extra_css = None
extra_js = None
action_template = "admin/action.html"
edit_template = "admin/model/edit.html"
create_template = "admin/model/create.html"
list_template = "admin/model/custom_list.html"
_include = None
class_attributes = [
"page_size",
"can_create",
"can_edit",
"can_delete",
"column_searchable_list",
"column_filters",
"column_exclude_list",
"column_default_sort",
]
def __init__(self, *args, **kwargs):
if "exclude" in kwargs:
self.form_excluded_columns = kwargs["exclude"]
del kwargs["exclude"]
if "include" in kwargs:
self._include = kwargs["include"]
del kwargs["include"]
for item in self.class_attributes:
if item in kwargs:
setattr(self, item, kwargs[item])
del kwargs[item]
super(CustomModelView, self).__init__(*args, **kwargs)
def get_list_columns(self):
if self._include:
return self.get_column_names(
only_columns=self.scaffold_list_columns() + self._include,
excluded_columns=self.column_exclude_list,
)
return super(CustomModelView, self).get_list_columns()
def is_accessible(self):
if not current_user.is_authenticated:
return False
users_permissions = current_user.permissions
return PERMISSION_ADMIN in users_permissions
def inaccessible_callback(self, name, **kwargs):
return abort(404)
| bsd-3-clause | -352,002,768,738,183,740 | 28.931298 | 86 | 0.61872 | false | 4.193583 | false | false | false |
eepgwde/pyeg0 | gmus/GMus0.py | 1 | 1699 | ## @file GMus0.py
# @brief Application support class for the Unofficial Google Music API.
# @author weaves
#
# @details
# This class uses @c gmusicapi.
#
# @note
# An application support class is one that uses a set of driver classes
# to provide a set of higher-level application specific methods.
#
# @see
# https://github.com/simon-weber/Unofficial-Google-Music-API
# http://unofficial-google-music-api.readthedocs.org/en/latest/
from __future__ import print_function
from GMus00 import GMus00
import logging
import ConfigParser, os, logging
import pandas as pd
import json
from gmusicapi import Mobileclient
## Set of file paths for the configuration file.
paths = ['site.cfg', os.path.expanduser('~/share/site/.safe/gmusic.cfg')]
## Google Music API login, search and result cache.
#
# The class needs to a configuration file with these contents. (The
# values of the keys must be a valid Google Play account.)
#
# <pre>
# [credentials]
# username=username\@gmail.com
# password=SomePassword9
# </pre>
class GMus0(GMus00):
## Ad-hoc method to find the indices of duplicated entries.
def duplicated(self):
# self._df = self._df.sort(['album', 'title', 'creationTimestamp'],
# ascending=[1, 1, 0])
df = self.df[list(['title', 'album', 'creationTimestamp'])]
df['n0'] = df['title'] + '|' + df['album']
df = df.sort(['n0','creationTimestamp'], ascending=[1, 0])
# Only rely on counts of 2.
s0 = pd.Series(df.n0)
s1 = s0.value_counts()
s2 = set( (s1[s1.values >= 2]).index )
df1 = df[df.n0.isin(s2)]
df1['d'] = df1.duplicated('n0')
s3 = list(df1[df1.d].index)
return s3
| gpl-3.0 | 6,076,076,360,128,342,000 | 30.462963 | 74 | 0.656857 | false | 3.23619 | false | false | false |
mdmueller/ascii-profiling | parallel.py | 1 | 4245 | import timeit
import time
from astropy.io import ascii
import pandas
import numpy as np
from astropy.table import Table, Column
from tempfile import NamedTemporaryFile
import random
import string
import matplotlib.pyplot as plt
import webbrowser
def make_table(table, size=10000, n_floats=10, n_ints=0, n_strs=0, float_format=None, str_val=None):
if str_val is None:
str_val = "abcde12345"
cols = []
for i in xrange(n_floats):
dat = np.random.uniform(low=1, high=10, size=size)
cols.append(Column(dat, name='f{}'.format(i)))
for i in xrange(n_ints):
dat = np.random.randint(low=-9999999, high=9999999, size=size)
cols.append(Column(dat, name='i{}'.format(i)))
for i in xrange(n_strs):
if str_val == 'random':
dat = np.array([''.join([random.choice(string.letters) for j in range(10)]) for k in range(size)])
else:
dat = np.repeat(str_val, size)
cols.append(Column(dat, name='s{}'.format(i)))
t = Table(cols)
if float_format is not None:
for col in t.columns.values():
if col.name.startswith('f'):
col.format = float_format
t.write(table.name, format='ascii')
output_text = []
def plot_case(n_floats=10, n_ints=0, n_strs=0, float_format=None, str_val=None):
global table1, output_text
n_rows = (10000, 20000, 50000, 100000, 200000) # include 200000 for publish run
numbers = (1, 1, 1, 1, 1)
repeats = (3, 2, 1, 1, 1)
times_fast = []
times_fast_parallel = []
times_pandas = []
for n_row, number, repeat in zip(n_rows, numbers, repeats):
table1 = NamedTemporaryFile()
make_table(table1, n_row, n_floats, n_ints, n_strs, float_format, str_val)
t = timeit.repeat("ascii.read(table1.name, format='basic', guess=False, use_fast_converter=True)",
setup='from __main__ import ascii, table1', number=number, repeat=repeat)
times_fast.append(min(t) / number)
t = timeit.repeat("ascii.read(table1.name, format='basic', guess=False, parallel=True, use_fast_converter=True)",
setup='from __main__ import ascii, table1', number=number, repeat=repeat)
times_fast_parallel.append(min(t) / number)
t = timeit.repeat("pandas.read_csv(table1.name, sep=' ', header=0)",
setup='from __main__ import table1, pandas', number=number, repeat=repeat)
times_pandas.append(min(t) / number)
plt.loglog(n_rows, times_fast, '-or', label='io.ascii Fast-c')
plt.loglog(n_rows, times_fast_parallel, '-og', label='io.ascii Parallel Fast-c')
plt.loglog(n_rows, times_pandas, '-oc', label='Pandas')
plt.grid()
plt.legend(loc='best')
plt.title('n_floats={} n_ints={} n_strs={} float_format={} str_val={}'.format(
n_floats, n_ints, n_strs, float_format, str_val))
plt.xlabel('Number of rows')
plt.ylabel('Time (sec)')
img_file = 'graph{}.png'.format(len(output_text) + 1)
plt.savefig(img_file)
plt.clf()
text = 'Pandas to io.ascii Fast-C speed ratio: {:.2f} : 1<br/>'.format(times_fast[-1] / times_pandas[-1])
text += 'io.ascii parallel to Pandas speed ratio: {:.2f} : 1'.format(times_pandas[-1] / times_fast_parallel[-1])
output_text.append((img_file, text))
plot_case(n_floats=10, n_ints=0, n_strs=0)
plot_case(n_floats=10, n_ints=10, n_strs=10)
plot_case(n_floats=10, n_ints=10, n_strs=10, float_format='%.4f')
plot_case(n_floats=10, n_ints=0, n_strs=0, float_format='%.4f')
plot_case(n_floats=0, n_ints=0, n_strs=10)
plot_case(n_floats=0, n_ints=0, n_strs=10, str_val="'asdf asdfa'")
plot_case(n_floats=0, n_ints=0, n_strs=10, str_val="random")
plot_case(n_floats=0, n_ints=10, n_strs=0)
html_file = open('out.html', 'w')
html_file.write('<html><head><meta charset="utf-8"/><meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>')
html_file.write('</html><body><h1 style="text-align:center;">Profile of io.ascii</h1>')
for img, descr in output_text:
html_file.write('<img src="{}"><p style="font-weight:bold;">{}</p><hr>'.format(img, descr))
html_file.write('</body></html>')
html_file.close()
webbrowser.open('out.html')
| mit | -9,088,434,750,835,889,000 | 45.141304 | 122 | 0.623793 | false | 2.983134 | false | false | false |
patriciohc/carga-de-xls-a-MySQL | Choose_file.py | 1 | 3639 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Choose_file.ui'
#
# Created: Sat Oct 17 15:55:19 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(524, 146)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayoutWidget = QtGui.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 501, 81))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label = QtGui.QLabel(self.verticalLayoutWidget)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_2.addWidget(self.label)
self.txtFile = QtGui.QLineEdit(self.verticalLayoutWidget)
self.txtFile.setObjectName(_fromUtf8("txtFile"))
self.horizontalLayout_2.addWidget(self.txtFile)
self.btChooseFile = QtGui.QPushButton(self.verticalLayoutWidget)
self.btChooseFile.setObjectName(_fromUtf8("btChooseFile"))
self.horizontalLayout_2.addWidget(self.btChooseFile)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btClose = QtGui.QPushButton(self.verticalLayoutWidget)
self.btClose.setObjectName(_fromUtf8("btClose"))
self.horizontalLayout.addWidget(self.btClose)
self.btLoadFile = QtGui.QPushButton(self.verticalLayoutWidget)
self.btLoadFile.setObjectName(_fromUtf8("btLoadFile"))
self.horizontalLayout.addWidget(self.btLoadFile)
self.verticalLayout.addLayout(self.horizontalLayout)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 524, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.label.setText(_translate("MainWindow", "File", None))
self.btChooseFile.setText(_translate("MainWindow", "Choose", None))
self.btClose.setText(_translate("MainWindow", "Cerrar", None))
self.btLoadFile.setText(_translate("MainWindow", "Cargar Archivo", None))
| apache-2.0 | -8,237,645,389,629,048,000 | 46.25974 | 82 | 0.718054 | false | 4.206936 | false | false | false |
c0cky/mediathread | mediathread/djangosherd/api.py | 1 | 4549 | # pylint: disable-msg=R0904
from tastypie import fields
from tastypie.resources import ModelResource
from mediathread.api import UserResource, TagResource
from mediathread.assetmgr.models import Asset
from mediathread.djangosherd.models import SherdNote, DiscussionIndex
from mediathread.projects.models import ProjectNote
from mediathread.taxonomy.api import TermResource
from mediathread.taxonomy.models import TermRelationship
class SherdNoteResource(ModelResource):
author = fields.ForeignKey(UserResource, 'author',
full=True, null=True, blank=True)
class Meta:
queryset = SherdNote.objects.select_related('asset').order_by("id")
excludes = ['tags', 'body', 'added', 'modified']
list_allowed_methods = []
detail_allowed_methods = []
def dehydrate(self, bundle):
try:
bundle.data['is_global_annotation'] = \
bundle.obj.is_global_annotation()
bundle.data['asset_id'] = str(bundle.obj.asset.id)
bundle.data['is_null'] = bundle.obj.is_null()
bundle.data['annotation'] = bundle.obj.annotation()
bundle.data['url'] = bundle.obj.get_absolute_url()
modified = bundle.obj.modified.strftime("%m/%d/%y %I:%M %p") \
if bundle.obj.modified else ''
bundle.data['metadata'] = {
'tags': TagResource().render_list(bundle.request,
bundle.obj.tags_split()),
'body': bundle.obj.body.strip() if bundle.obj.body else '',
'primary_type': bundle.obj.asset.primary.label,
'modified': modified,
'timecode': bundle.obj.range_as_timecode(),
'title': bundle.obj.title
}
editable = (bundle.request.user.id ==
getattr(bundle.obj, 'author_id', -1))
citable = bundle.request.GET.get('citable', '') == 'true'
# assumed: there is only one ProjectNote per annotation
reference = ProjectNote.objects.filter(
annotation__id=bundle.obj.id).first()
if reference:
# notes in a submitted response are not editable
editable = editable and not reference.project.is_submitted()
if citable:
# this is a heavy operation. don't call it unless needed
citable = reference.project.can_cite(bundle.request.course,
bundle.request.user)
bundle.data['editable'] = editable
bundle.data['citable'] = citable
termResource = TermResource()
vocabulary = {}
related = TermRelationship.objects.get_for_object(
bundle.obj).prefetch_related('term__vocabulary')
for rel in related:
if rel.term.vocabulary.id not in vocabulary:
vocabulary[rel.term.vocabulary.id] = {
'id': rel.term.vocabulary.id,
'display_name': rel.term.vocabulary.display_name,
'terms': []
}
vocabulary[rel.term.vocabulary.id]['terms'].append(
termResource.render_one(bundle.request, rel.term))
bundle.data['vocabulary'] = vocabulary.values()
except Asset.DoesNotExist:
bundle.data['asset_id'] = ''
bundle.data['metadata'] = {'title': 'Item Deleted'}
return bundle
def render_one(self, request, selection, asset_key):
# assumes user is allowed to see this note
bundle = self.build_bundle(obj=selection, request=request)
dehydrated = self.full_dehydrate(bundle)
bundle.data['asset_key'] = '%s_%s' % (asset_key,
bundle.data['asset_id'])
return self._meta.serializer.to_simple(dehydrated, None)
class DiscussionIndexResource(object):
def render_list(self, request, indicies):
collaborations = DiscussionIndex.with_permission(request, indicies)
ctx = {
'references': [{
'id': obj.collaboration.object_pk,
'title': obj.collaboration.title,
'type': obj.get_type_label(),
'url': obj.get_absolute_url(),
'modified': obj.modified.strftime("%m/%d/%y %I:%M %p")}
for obj in collaborations]}
return ctx
| gpl-2.0 | -7,704,276,080,529,471,000 | 41.915094 | 79 | 0.56386 | false | 4.275376 | false | false | false |
fdslight/fdslight | freenet/handlers/tundev.py | 1 | 5566 | #!/usr/bin/env python3
import os, sys
import pywind.evtframework.handlers.handler as handler
import freenet.lib.fn_utils as fn_utils
import freenet.lib.simple_qos as simple_qos
try:
import fcntl
except ImportError:
pass
class tun_base(handler.handler):
__creator_fd = None
# 要写入到tun的IP包
___ip_packets_for_write = []
# 写入tun设备的最大IP数据包的个数
__MAX_WRITE_QUEUE_SIZE = 1024
# 当前需要写入tun设备的IP数据包的个数
__current_write_queue_n = 0
__BLOCK_SIZE = 16 * 1024
__qos = None
def __create_tun_dev(self, name):
"""创建tun 设备
:param name:
:return fd:
"""
tun_fd = fn_utils.tuntap_create(name, fn_utils.IFF_TUN | fn_utils.IFF_NO_PI)
fn_utils.interface_up(name)
if tun_fd < 0:
raise SystemError("can not create tun device,please check your root")
return tun_fd
@property
def creator(self):
return self.__creator_fd
def init_func(self, creator_fd, tun_dev_name, *args, **kwargs):
"""
:param creator_fd:
:param tun_dev_name:tun 设备名称
:param subnet:如果是服务端则需要则个参数
"""
tun_fd = self.__create_tun_dev(tun_dev_name)
if tun_fd < 3:
print("error:create tun device failed:%s" % tun_dev_name)
sys.exit(-1)
self.__creator_fd = creator_fd
self.__qos = simple_qos.qos(simple_qos.QTYPE_DST)
self.set_fileno(tun_fd)
fcntl.fcntl(tun_fd, fcntl.F_SETFL, os.O_NONBLOCK)
self.dev_init(tun_dev_name, *args, **kwargs)
return tun_fd
def dev_init(self, dev_name, *args, **kwargs):
pass
def evt_read(self):
for i in range(32):
try:
ip_packet = os.read(self.fileno, self.__BLOCK_SIZE)
except BlockingIOError:
break
self.__qos.add_to_queue(ip_packet)
self.__qos_from_tundev()
def task_loop(self):
self.__qos_from_tundev()
def __qos_from_tundev(self):
results = self.__qos.get_queue()
for ip_packet in results:
self.handle_ip_packet_from_read(ip_packet)
if not results:
self.del_loop_task(self.fileno)
else:
self.add_to_loop_task(self.fileno)
def evt_write(self):
try:
ip_packet = self.___ip_packets_for_write.pop(0)
except IndexError:
self.remove_evt_write(self.fileno)
return
self.__current_write_queue_n -= 1
try:
os.write(self.fileno, ip_packet)
except BlockingIOError:
self.__current_write_queue_n += 1
self.___ip_packets_for_write.insert(0, ip_packet)
return
''''''
def handle_ip_packet_from_read(self, ip_packet):
"""处理读取过来的IP包,重写这个方法
:param ip_packet:
:return None:
"""
pass
def handle_ip_packet_for_write(self, ip_packet):
"""处理要写入的IP包,重写这个方法
:param ip_packet:
:return new_ip_packet:
"""
pass
def error(self):
self.dev_error()
def dev_error(self):
"""重写这个方法
:return:
"""
pass
def timeout(self):
self.dev_timeout()
def dev_timeout(self):
"""重写这个方法
:return:
"""
pass
def delete(self):
self.dev_delete()
def dev_delete(self):
"""重写这个方法
:return:
"""
pass
def add_to_sent_queue(self, ip_packet):
# 丢到超出规定的数据包,防止内存过度消耗
n_ip_message = self.handle_ip_packet_for_write(ip_packet)
if not n_ip_message: return
if self.__current_write_queue_n == self.__MAX_WRITE_QUEUE_SIZE:
# 删除第一个包,防止队列过多
self.__current_write_queue_n -= 1
self.___ip_packets_for_write.pop(0)
return
self.__current_write_queue_n += 1
self.___ip_packets_for_write.append(n_ip_message)
class tundevs(tun_base):
"""服务端的tun数据处理
"""
def dev_init(self, dev_name):
self.register(self.fileno)
self.add_evt_read(self.fileno)
def handle_ip_packet_from_read(self, ip_packet):
self.dispatcher.send_msg_to_tunnel_from_tun(ip_packet)
def handle_ip_packet_for_write(self, ip_packet):
return ip_packet
def dev_delete(self):
self.unregister(self.fileno)
os.close(self.fileno)
def dev_error(self):
self.delete_handler(self.fileno)
def dev_timeout(self):
pass
def handle_msg_from_tunnel(self, message):
self.add_to_sent_queue(message)
self.add_evt_write(self.fileno)
class tundevc(tun_base):
def dev_init(self, dev_name):
self.register(self.fileno)
self.add_evt_read(self.fileno)
def handle_ip_packet_from_read(self, ip_packet):
self.dispatcher.handle_msg_from_tundev(ip_packet)
def handle_ip_packet_for_write(self, ip_packet):
return ip_packet
def dev_delete(self):
self.unregister(self.fileno)
os.close(self.fileno)
def dev_error(self):
self.delete_handler(self.fileno)
def dev_timeout(self):
pass
def msg_from_tunnel(self, message):
self.add_to_sent_queue(message)
self.add_evt_write(self.fileno)
| bsd-2-clause | 794,198,108,318,362,000 | 23.490741 | 84 | 0.567864 | false | 3.099004 | false | false | false |
vesellov/bitdust.devel | customer/data_sender.py | 1 | 14665 | #!/usr/bin/python
# data_sender.py
#
# Copyright (C) 2008-2018 Veselin Penev, https://bitdust.io
#
# This file (data_sender.py) is part of BitDust Software.
#
# BitDust is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BitDust Software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with BitDust Software. If not, see <http://www.gnu.org/licenses/>.
#
# Please contact us if you have any questions at [email protected]
#
#
#
#
"""
.. module:: data_sender.
.. raw:: html
<a href="https://bitdust.io/automats/data_sender/data_sender.png" target="_blank">
<img src="https://bitdust.io/automats/data_sender/data_sender.png" style="max-width:100%;">
</a>
A state machine to manage data sending process, acts very simple:
1) when new local data is created it tries to send it to the correct supplier
2) wait while ``p2p.io_throttle`` is doing some data transmission to remote suppliers
3) calls ``p2p.backup_matrix.ScanBlocksToSend()`` to get a list of pieces needs to be send
4) this machine is restarted every minute to check if some more data needs to be send
5) also can be restarted at any time when it is needed
EVENTS:
* :red:`block-acked`
* :red:`block-failed`
* :red:`init`
* :red:`new-data`
* :red:`restart`
* :red:`scan-done`
* :red:`timer-1min`
* :red:`timer-1sec`
"""
#------------------------------------------------------------------------------
from __future__ import absolute_import
from io import open
#------------------------------------------------------------------------------
_Debug = True
_DebugLevel = 12
#------------------------------------------------------------------------------
import os
import time
#------------------------------------------------------------------------------
from logs import lg
from automats import automat
from automats import global_state
from lib import misc
from lib import packetid
from contacts import contactsdb
from userid import my_id
from main import settings
from p2p import contact_status
from . import io_throttle
#------------------------------------------------------------------------------
_DataSender = None
_ShutdownFlag = False
#------------------------------------------------------------------------------
def A(event=None, arg=None):
"""
Access method to interact with the state machine.
"""
global _DataSender
if _DataSender is None:
_DataSender = DataSender(
name='data_sender',
state='READY',
debug_level=_DebugLevel,
log_events=_Debug,
log_transitions=_Debug,
)
if event is not None:
_DataSender.automat(event, arg)
return _DataSender
def Destroy():
"""
Destroy the state machine and remove the instance from memory.
"""
global _DataSender
if _DataSender is None:
return
_DataSender.destroy()
del _DataSender
_DataSender = None
class DataSender(automat.Automat):
"""
A class to manage process of sending data packets to remote suppliers.
"""
timers = {
'timer-1min': (60, ['READY']),
'timer-1min': (60, ['READY']),
'timer-1sec': (1.0, ['SENDING']),
}
statistic = {}
def state_changed(self, oldstate, newstate, event, arg):
global_state.set_global_state('DATASEND ' + newstate)
def A(self, event, arg):
#---READY---
if self.state == 'READY':
if event == 'new-data' or event == 'timer-1min' or event == 'restart':
self.state = 'SCAN_BLOCKS'
self.doScanAndQueue(arg)
elif event == 'init':
pass
#---SCAN_BLOCKS---
elif self.state == 'SCAN_BLOCKS':
if event == 'scan-done' and self.isQueueEmpty(arg):
self.state = 'READY'
self.doRemoveUnusedFiles(arg)
elif event == 'scan-done' and not self.isQueueEmpty(arg):
self.state = 'SENDING'
#---SENDING---
elif self.state == 'SENDING':
if event == 'restart' or ( ( event == 'timer-1sec' or event == 'block-acked' or event == 'block-failed' or event == 'new-data' ) and self.isQueueEmpty(arg) ):
self.state = 'SCAN_BLOCKS'
self.doScanAndQueue(arg)
return None
def isQueueEmpty(self, arg):
if not arg:
return io_throttle.IsSendingQueueEmpty()
remoteID, _ = arg
return io_throttle.OkToSend(remoteID)
def doScanAndQueue(self, arg):
global _ShutdownFlag
if _Debug:
lg.out(_DebugLevel, 'data_sender.doScanAndQueue _ShutdownFlag=%r' % _ShutdownFlag)
if _Debug:
log = open(os.path.join(settings.LogsDir(), 'data_sender.log'), 'w')
log.write(u'doScanAndQueue %s\n' % time.asctime()) # .decode('utf-8')
if _ShutdownFlag:
if _Debug:
log.write(u'doScanAndQueue _ShutdownFlag is True\n')
self.automat('scan-done')
if _Debug:
log.flush()
log.close()
return
for customer_idurl in contactsdb.known_customers():
if '' not in contactsdb.suppliers(customer_idurl):
from storage import backup_matrix
for backupID in misc.sorted_backup_ids(
list(backup_matrix.local_files().keys()), True):
this_customer_idurl = packetid.CustomerIDURL(backupID)
if this_customer_idurl != customer_idurl:
continue
packetsBySupplier = backup_matrix.ScanBlocksToSend(backupID)
if _Debug:
log.write(u'%s\n' % packetsBySupplier)
for supplierNum in packetsBySupplier.keys():
supplier_idurl = contactsdb.supplier(supplierNum, customer_idurl=customer_idurl)
if not supplier_idurl:
lg.warn('unknown supplier_idurl supplierNum=%s for %s, customer_idurl=%s' % (
supplierNum, backupID, customer_idurl))
continue
for packetID in packetsBySupplier[supplierNum]:
backupID_, _, supplierNum_, _ = packetid.BidBnSnDp(packetID)
if backupID_ != backupID:
lg.warn('unexpected backupID supplierNum=%s for %s, customer_idurl=%s' % (
packetID, backupID, customer_idurl))
continue
if supplierNum_ != supplierNum:
lg.warn('unexpected supplierNum %s for %s, customer_idurl=%s' % (
packetID, backupID, customer_idurl))
continue
if io_throttle.HasPacketInSendQueue(
supplier_idurl, packetID):
if _Debug:
log.write(u'%s already in sending queue for %s\n' % (packetID, supplier_idurl))
continue
if not io_throttle.OkToSend(supplier_idurl):
if _Debug:
log.write(u'skip, not ok to send %s\n' % supplier_idurl)
continue
customerGlobalID, pathID = packetid.SplitPacketID(packetID)
# tranByID = gate.transfers_out_by_idurl().get(supplier_idurl, [])
# if len(tranByID) > 3:
# log.write(u'transfers by %s: %d\n' % (supplier_idurl, len(tranByID)))
# continue
customerGlobalID, pathID = packetid.SplitPacketID(packetID)
filename = os.path.join(
settings.getLocalBackupsDir(),
customerGlobalID,
pathID,
)
if not os.path.isfile(filename):
if _Debug:
log.write(u'%s is not a file\n' % filename)
continue
if io_throttle.QueueSendFile(
filename,
packetID,
supplier_idurl,
my_id.getLocalID(),
self._packetAcked,
self._packetFailed,
):
if _Debug:
log.write(u'io_throttle.QueueSendFile %s\n' % packetID)
else:
if _Debug:
log.write(u'io_throttle.QueueSendFile FAILED %s\n' % packetID)
# lg.out(6, ' %s for %s' % (packetID, backupID))
# DEBUG
# break
self.automat('scan-done')
if _Debug:
log.flush()
log.close()
# def doPrintStats(self, arg):
# """
# """
# if lg.is_debug(18):
# transfers = transport_control.current_transfers()
# bytes_stats = transport_control.current_bytes_transferred()
# s = ''
# for info in transfers:
# s += '%s ' % (diskspace.MakeStringFromBytes(bytes_stats[info.transfer_id]).replace(' ', '').replace('bytes', 'b'))
# lg.out(0, 'transfers: ' + s[:120])
def doRemoveUnusedFiles(self, arg):
# we want to remove files for this block
# because we only need them during rebuilding
if settings.getBackupsKeepLocalCopies() is True:
# if user set this in settings - he want to keep the local files
return
# ... user do not want to keep local backups
if settings.getGeneralWaitSuppliers() is True:
from customer import fire_hire
# but he want to be sure - all suppliers are green for a long time
if len(contact_status.listOfflineSuppliers()) > 0 or time.time(
) - fire_hire.GetLastFireTime() < 24 * 60 * 60:
# some people are not there or we do not have stable team yet
# do not remove the files because we need it to rebuild
return
count = 0
from storage import backup_matrix
from storage import restore_monitor
from storage import backup_rebuilder
if _Debug:
lg.out(_DebugLevel, 'data_sender.doRemoveUnusedFiles')
for backupID in misc.sorted_backup_ids(
list(backup_matrix.local_files().keys())):
if restore_monitor.IsWorking(backupID):
if _Debug:
lg.out(
_DebugLevel,
' %s : SKIP, because restoring' %
backupID)
continue
if backup_rebuilder.IsBackupNeedsWork(backupID):
if _Debug:
lg.out(
_DebugLevel,
' %s : SKIP, because needs rebuilding' %
backupID)
continue
if not backup_rebuilder.ReadStoppedFlag():
if backup_rebuilder.A().currentBackupID is not None:
if backup_rebuilder.A().currentBackupID == backupID:
if _Debug:
lg.out(
_DebugLevel,
' %s : SKIP, because rebuilding is in process' %
backupID)
continue
packets = backup_matrix.ScanBlocksToRemove(
backupID, settings.getGeneralWaitSuppliers())
for packetID in packets:
customer, pathID = packetid.SplitPacketID(packetID)
filename = os.path.join(settings.getLocalBackupsDir(), customer, pathID)
if os.path.isfile(filename):
try:
os.remove(filename)
# lg.out(6, ' ' + os.path.basename(filename))
except:
lg.exc()
continue
count += 1
if _Debug:
lg.out(_DebugLevel, ' %d files were removed' % count)
backup_matrix.ReadLocalFiles()
def _packetAcked(self, packet, ownerID, packetID):
from storage import backup_matrix
backupID, blockNum, supplierNum, dataORparity = packetid.BidBnSnDp(packetID)
backup_matrix.RemoteFileReport(
backupID, blockNum, supplierNum, dataORparity, True)
if ownerID not in self.statistic:
self.statistic[ownerID] = [0, 0]
self.statistic[ownerID][0] += 1
self.automat('block-acked', (ownerID, packetID))
def _packetFailed(self, remoteID, packetID, why):
from storage import backup_matrix
backupID, blockNum, supplierNum, dataORparity = packetid.BidBnSnDp(
packetID)
backup_matrix.RemoteFileReport(
backupID, blockNum, supplierNum, dataORparity, False)
if remoteID not in self.statistic:
self.statistic[remoteID] = [0, 0]
self.statistic[remoteID][1] += 1
self.automat('block-failed', (remoteID, packetID))
def statistic():
"""
The ``data_sender()`` keeps track of sending results with every supplier.
This is used by ``fire_hire()`` to decide how reliable is given
supplier.
"""
global _DataSender
if _DataSender is None:
return {}
return _DataSender.statistic
def SetShutdownFlag():
"""
Set flag to indicate that no need to send anything anymore.
"""
global _ShutdownFlag
_ShutdownFlag = True
| agpl-3.0 | 4,224,043,803,797,804,000 | 38.422043 | 170 | 0.512104 | false | 4.385467 | false | false | false |
agacek/camkes-tool | camkes/internal/version.py | 1 | 1813 | #
# Copyright 2014, NICTA
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(NICTA_BSD)
#
'''Versioning functionality. This computes a version identifier based on the
current source code state. It was decided this was more reliable while the tool
is under active development. Note that any extraneous files in your source
directory that match the version filters will be accumulated in the version
computation.'''
from memoization import memoized
import hashlib, os, re
@memoized
def version():
# Files to consider relevant. Each entry should be a pair of (path, filter)
# where 'path' is relative to the directory of this file and 'filter' is a
# regex describing which filenames to match under the given path.
SOURCES = [
('../', r'^.*\.py$'), # Python sources
('../templates', r'.*'), # Templates
]
my_path = os.path.dirname(os.path.abspath(__file__))
sources = set()
# Accumulate all relevant source files.
for s in SOURCES:
path = os.path.join(my_path, s[0])
regex = re.compile(s[1])
for root, _, files in os.walk(path):
for f in files:
if regex.match(f):
sources.add(os.path.abspath(os.path.join(root, f)))
# Hash each file and hash a concatenation of these hashes. Note, hashing a
# hash is not good practice for cryptography, but it's fine for this
# purpose.
hfinal = hashlib.sha1() #pylint: disable=E1101
for s in sources:
with open(s, 'r') as f:
h = hashlib.sha1(f.read()).hexdigest() #pylint: disable=E1101
hfinal.update('%s|' % h) #pylint: disable=E1101
return hfinal.hexdigest()
| bsd-2-clause | -41,992,440,657,990,110 | 35.26 | 79 | 0.656922 | false | 3.832981 | false | false | false |
ibrica/universe-server | play.py | 1 | 1073 | from multiprocessing import Process
import time
import gym
import universe
from universe.spaces.vnc_event import keycode
from envs import create_env
def start_game(model, env_name):
"""regular Python process, not using torch"""
p = Process(target=play_game, args=(model,env_name))
p.start()
# Don't wait with join, respond to user request
def play_game(model, env_name):
"""Play game with saved model if ther's no model play random"""
env = create_env(env_name, client_id="play1",remotes=1) # Local docker container
max_game_length = 10000
state = env.reset()
reward_sum = 0
start_time = time.time()
for step in range(max_game_length ):
state, reward, done, _ = env.step( ['up' for i in range(60)]) #no saved model for now keep pressing up, 60 times in minute
reward_sum += reward
print("Time {}, game reward {}, game length {}".format(
time.strftime("%Hh %Mm %Ss"),
reward_sum,
time.gmtime(time.time() - start_time)))
if done:
break | mit | -6,093,060,618,181,687,000 | 33.645161 | 130 | 0.630941 | false | 3.764912 | false | false | false |
droundy/deft | talks/colloquium/figs/plot-walls.py | 1 | 3242 | #!/usr/bin/python
# We need the following two lines in order for matplotlib to work
# without access to an X server.
from __future__ import division
import matplotlib
matplotlib.use('Agg')
import pylab, numpy, sys
xmax = 2.5
xmin = -0.4
def plotit(dftdata, mcdata):
dft_len = len(dftdata[:,0])
dft_dr = dftdata[2,0] - dftdata[1,0]
mcdata = numpy.insert(mcdata,0,0,0)
mcdata[0,0]=-10
mcoffset = 10/2
offset = -3/2
n0 = dftdata[:,6]
nA = dftdata[:,8]
nAmc = mcdata[:,11]
n0mc = mcdata[:,10]
pylab.figure(figsize=(6, 6))
pylab.subplots_adjust(hspace=0.001)
n_plt = pylab.subplot(3,1,3)
n_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,1]*4*numpy.pi/3,"b-",label='$n$ Monte Carlo')
n_plt.plot(dftdata[:,0]/2+offset,dftdata[:,1]*4*numpy.pi/3,"b--",label='$n$ DFT')
n_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5)
n_plt.yaxis.set_major_locator(pylab.MaxNLocator(6,steps=[1,5,10],prune='upper'))
pylab.ylim(ymin=0)
pylab.xlim(xmin, xmax)
pylab.xlabel("$z/\sigma$")
pylab.ylabel("$n(\mathbf{r})$")
n_plt.axvline(x=0, color='k', linestyle=':')
n = len(mcdata[:,0])
#pylab.twinx()
dftr = dftdata[:,0]/2+offset
thiswork = dftdata[:,5]
gross = dftdata[:,7]
stop_here = int(dft_len - 1/dft_dr)
print stop_here
start_here = int(2.5/dft_dr)
off = 1
me = 40
A_plt = pylab.subplot(3,1,1)
A_plt.axvline(x=0, color='k', linestyle=':')
A_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,2+2*off]/nAmc,"r-",label="$g_\sigma^A$ Monte Carlo")
A_plt.plot(dftr[dftr>=0],thiswork[dftr>=0],"ro",markevery=me*.8,label="$g_\sigma^A$ this work")
A_plt.plot(dftr[dftr>=0],gross[dftr>=0],"rx",markevery=me,label="Gross",
markerfacecolor='none',markeredgecolor='red', markeredgewidth=1)
A_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5)
A_plt.yaxis.set_major_locator(pylab.MaxNLocator(integer=True,prune='upper'))
pylab.ylim(ymin=0)
pylab.ylabel("$g_\sigma^A$")
pylab.xlim(xmin, xmax)
n0mc[0]=1
mcdata[0,10]=1
S_plt = pylab.subplot(3,1,2)
S_plt.axvline(x=0, color='k', linestyle=':')
S_plt.plot(mcdata[:,0]/2+mcoffset,mcdata[:,3+2*off]/n0mc,"g-",label="$g_\sigma^S$ Monte Carlo")
S_plt.plot(dftdata[:,0]/2+offset,dftdata[:,4],"gx",markevery=me/2,label="Yu and Wu")
S_plt.legend(loc='best', ncol=1).draw_frame(False) #.get_frame().set_alpha(0.5)
#pylab.ylim(ymax=12)
S_plt.yaxis.set_major_locator(pylab.MaxNLocator(5,integer=True,prune='upper'))
pylab.xlim(xmin, xmax)
pylab.ylim(ymin=0)
pylab.ylabel("$g_\sigma^S$")
xticklabels = A_plt.get_xticklabels() + S_plt.get_xticklabels()
pylab.setp(xticklabels, visible=False)
mcdata10 = numpy.loadtxt('../../papers/contact/figs/mc-walls-20-196.dat')
dftdata10 = numpy.loadtxt('../../papers/contact/figs/wallsWB-0.10.dat')
mcdata40 = numpy.loadtxt('../../papers/contact/figs/mc-walls-20-817.dat')
dftdata40 = numpy.loadtxt('../../papers/contact/figs/wallsWB-0.40.dat')
plotit(dftdata10, mcdata10)
pylab.savefig('figs/walls-10.pdf', transparent=True)
plotit(dftdata40, mcdata40)
pylab.savefig('figs/walls-40.pdf', transparent=True)
| gpl-2.0 | -4,687,526,299,663,627,000 | 33.489362 | 99 | 0.637569 | false | 2.515128 | false | false | false |
AprilBrother/esptool | esptool.py | 1 | 28432 | #!/usr/bin/env python
#
# ESP8266 ROM Bootloader Utility
# https://github.com/themadinventor/esptool
#
# Copyright (C) 2014 Fredrik Ahlberg
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import struct
import serial
import time
import argparse
import os
import subprocess
import tempfile
class ESPROM:
# These are the currently known commands supported by the ROM
ESP_FLASH_BEGIN = 0x02
ESP_FLASH_DATA = 0x03
ESP_FLASH_END = 0x04
ESP_MEM_BEGIN = 0x05
ESP_MEM_END = 0x06
ESP_MEM_DATA = 0x07
ESP_SYNC = 0x08
ESP_WRITE_REG = 0x09
ESP_READ_REG = 0x0a
# Maximum block sized for RAM and Flash writes, respectively.
ESP_RAM_BLOCK = 0x1800
ESP_FLASH_BLOCK = 0x100
# Default baudrate. The ROM auto-bauds, so we can use more or less whatever we want.
ESP_ROM_BAUD = 115200
# First byte of the application image
ESP_IMAGE_MAGIC = 0xe9
# Initial state for the checksum routine
ESP_CHECKSUM_MAGIC = 0xef
# OTP ROM addresses
ESP_OTP_MAC0 = 0x3ff00050
ESP_OTP_MAC1 = 0x3ff00054
# Sflash stub: an assembly routine to read from spi flash and send to host
SFLASH_STUB = "\x80\x3c\x00\x40\x1c\x4b\x00\x40\x21\x11\x00\x40\x00\x80" \
"\xfe\x3f\xc1\xfb\xff\xd1\xf8\xff\x2d\x0d\x31\xfd\xff\x41\xf7\xff\x4a" \
"\xdd\x51\xf9\xff\xc0\x05\x00\x21\xf9\xff\x31\xf3\xff\x41\xf5\xff\xc0" \
"\x04\x00\x0b\xcc\x56\xec\xfd\x06\xff\xff\x00\x00"
def __init__(self, port=0, baud=ESP_ROM_BAUD):
self._port = serial.Serial(port)
# setting baud rate in a separate step is a workaround for
# CH341 driver on some Linux versions (this opens at 9600 then
# sets), shouldn't matter for other platforms/drivers. See
# https://github.com/themadinventor/esptool/issues/44#issuecomment-107094446
self._port.baudrate = baud
""" Read bytes from the serial port while performing SLIP unescaping """
def read(self, length=1):
b = ''
while len(b) < length:
c = self._port.read(1)
if c == '\xdb':
c = self._port.read(1)
if c == '\xdc':
b = b + '\xc0'
elif c == '\xdd':
b = b + '\xdb'
else:
raise FatalError('Invalid SLIP escape')
else:
b = b + c
return b
""" Write bytes to the serial port while performing SLIP escaping """
def write(self, packet):
buf = '\xc0' \
+ (packet.replace('\xdb','\xdb\xdd').replace('\xc0','\xdb\xdc')) \
+ '\xc0'
self._port.write(buf)
""" Calculate checksum of a blob, as it is defined by the ROM """
@staticmethod
def checksum(data, state=ESP_CHECKSUM_MAGIC):
for b in data:
state ^= ord(b)
return state
""" Send a request and read the response """
def command(self, op=None, data=None, chk=0):
if op:
pkt = struct.pack('<BBHI', 0x00, op, len(data), chk) + data
self.write(pkt)
# tries to get a response until that response has the
# same operation as the request or a retries limit has
# exceeded. This is needed for some esp8266s that
# reply with more sync responses than expected.
retries = 100
while retries > 0:
(op_ret, val, body) = self.receive_response()
if op is None or op_ret == op:
return val, body # valid response received
retries = retries - 1
raise FatalError("Response doesn't match request")
""" Receive a response to a command """
def receive_response(self):
# Read header of response and parse
if self._port.read(1) != '\xc0':
raise FatalError('Invalid head of packet')
hdr = self.read(8)
(resp, op_ret, len_ret, val) = struct.unpack('<BBHI', hdr)
if resp != 0x01:
raise FatalError('Invalid response 0x%02x" to command' % resp)
# The variable-length body
body = self.read(len_ret)
# Terminating byte
if self._port.read(1) != chr(0xc0):
raise FatalError('Invalid end of packet')
return op_ret, val, body
""" Perform a connection test """
def sync(self):
self.command(ESPROM.ESP_SYNC, '\x07\x07\x12\x20' + 32 * '\x55')
for i in xrange(7):
self.command()
""" Try connecting repeatedly until successful, or giving up """
def connect(self):
print 'Connecting...'
for _ in xrange(4):
# worst-case latency timer should be 255ms (probably <20ms)
self._port.timeout = 0.3
for _ in xrange(4):
try:
self._port.flushInput()
self._port.flushOutput()
self.sync()
self._port.timeout = 5
return
except:
time.sleep(0.05)
raise FatalError('Failed to connect to ESP8266')
""" Read memory address in target """
def read_reg(self, addr):
res = self.command(ESPROM.ESP_READ_REG, struct.pack('<I', addr))
if res[1] != "\0\0":
raise FatalError('Failed to read target memory')
return res[0]
""" Write to memory address in target """
def write_reg(self, addr, value, mask, delay_us=0):
if self.command(ESPROM.ESP_WRITE_REG,
struct.pack('<IIII', addr, value, mask, delay_us))[1] != "\0\0":
raise FatalError('Failed to write target memory')
""" Start downloading an application image to RAM """
def mem_begin(self, size, blocks, blocksize, offset):
if self.command(ESPROM.ESP_MEM_BEGIN,
struct.pack('<IIII', size, blocks, blocksize, offset))[1] != "\0\0":
raise FatalError('Failed to enter RAM download mode')
""" Send a block of an image to RAM """
def mem_block(self, data, seq):
if self.command(ESPROM.ESP_MEM_DATA,
struct.pack('<IIII', len(data), seq, 0, 0) + data,
ESPROM.checksum(data))[1] != "\0\0":
raise FatalError('Failed to write to target RAM')
""" Leave download mode and run the application """
def mem_finish(self, entrypoint=0):
if self.command(ESPROM.ESP_MEM_END,
struct.pack('<II', int(entrypoint == 0), entrypoint))[1] != "\0\0":
raise FatalError('Failed to leave RAM download mode')
""" Start downloading to Flash (performs an erase) """
def flash_begin(self, size, offset):
old_tmo = self._port.timeout
num_blocks = (size + ESPROM.ESP_FLASH_BLOCK - 1) / ESPROM.ESP_FLASH_BLOCK
sectors_per_block = 16
sector_size = 4096
num_sectors = (size + sector_size - 1) / sector_size
start_sector = offset / sector_size
head_sectors = sectors_per_block - (start_sector % sectors_per_block)
if num_sectors < head_sectors:
head_sectors = num_sectors
if num_sectors < 2 * head_sectors:
erase_size = (num_sectors + 1) / 2 * sector_size
else:
erase_size = (num_sectors - head_sectors) * sector_size
self._port.timeout = 10
result = self.command(ESPROM.ESP_FLASH_BEGIN,
struct.pack('<IIII', erase_size, num_blocks, ESPROM.ESP_FLASH_BLOCK, offset))[1]
if result != "\0\0":
raise FatalError.WithResult('Failed to enter Flash download mode (result "%s")', result)
self._port.timeout = old_tmo
""" Write block to flash """
def flash_block(self, data, seq):
result = self.command(ESPROM.ESP_FLASH_DATA, struct.pack('<IIII', len(data), seq, 0, 0) + data, ESPROM.checksum(data))[1]
if result != "\0\0":
raise FatalError.WithResult('Failed to write to target Flash after seq %d (got result %%s)' % seq, result)
""" Leave flash mode and run/reboot """
def flash_finish(self, reboot=False):
pkt = struct.pack('<I', int(not reboot))
if self.command(ESPROM.ESP_FLASH_END, pkt)[1] != "\0\0":
raise FatalError('Failed to leave Flash mode')
""" Run application code in flash """
def run(self, reboot=False):
# Fake flash begin immediately followed by flash end
self.flash_begin(0, 0)
self.flash_finish(reboot)
""" Read MAC from OTP ROM """
def read_mac(self):
mac0 = self.read_reg(self.ESP_OTP_MAC0)
mac1 = self.read_reg(self.ESP_OTP_MAC1)
if ((mac1 >> 16) & 0xff) == 0:
oui = (0x18, 0xfe, 0x34)
elif ((mac1 >> 16) & 0xff) == 1:
oui = (0xac, 0xd0, 0x74)
else:
raise FatalError("Unknown OUI")
return oui + ((mac1 >> 8) & 0xff, mac1 & 0xff, (mac0 >> 24) & 0xff)
""" Read SPI flash manufacturer and device id """
def flash_id(self):
self.flash_begin(0, 0)
self.write_reg(0x60000240, 0x0, 0xffffffff)
self.write_reg(0x60000200, 0x10000000, 0xffffffff)
flash_id = self.read_reg(0x60000240)
self.flash_finish(False)
return flash_id
""" Read SPI flash """
def flash_read(self, offset, size, count=1):
# Create a custom stub
stub = struct.pack('<III', offset, size, count) + self.SFLASH_STUB
# Trick ROM to initialize SFlash
self.flash_begin(0, 0)
# Download stub
self.mem_begin(len(stub), 1, len(stub), 0x40100000)
self.mem_block(stub, 0)
self.mem_finish(0x4010001c)
# Fetch the data
data = ''
for _ in xrange(count):
if self._port.read(1) != '\xc0':
raise FatalError('Invalid head of packet (sflash read)')
data += self.read(size)
if self._port.read(1) != chr(0xc0):
raise FatalError('Invalid end of packet (sflash read)')
return data
""" Abuse the loader protocol to force flash to be left in write mode """
def flash_unlock_dio(self):
# Enable flash write mode
self.flash_begin(0, 0)
# Reset the chip rather than call flash_finish(), which would have
# write protected the chip again (why oh why does it do that?!)
self.mem_begin(0,0,0,0x40100000)
self.mem_finish(0x40000080)
""" Perform a chip erase of SPI flash """
def flash_erase(self):
# Trick ROM to initialize SFlash
self.flash_begin(0, 0)
# This is hacky: we don't have a custom stub, instead we trick
# the bootloader to jump to the SPIEraseChip() routine and then halt/crash
# when it tries to boot an unconfigured system.
self.mem_begin(0,0,0,0x40100000)
self.mem_finish(0x40004984)
# Yup - there's no good way to detect if we succeeded.
# It it on the other hand unlikely to fail.
class ESPFirmwareImage:
def __init__(self, filename=None):
self.segments = []
self.entrypoint = 0
self.flash_mode = 0
self.flash_size_freq = 0
if filename is not None:
f = file(filename, 'rb')
(magic, segments, self.flash_mode, self.flash_size_freq, self.entrypoint) = struct.unpack('<BBBBI', f.read(8))
# some sanity check
if magic != ESPROM.ESP_IMAGE_MAGIC or segments > 16:
raise FatalError('Invalid firmware image')
for i in xrange(segments):
(offset, size) = struct.unpack('<II', f.read(8))
if offset > 0x40200000 or offset < 0x3ffe0000 or size > 65536:
raise FatalError('Suspicious segment 0x%x, length %d' % (offset, size))
segment_data = f.read(size)
if len(segment_data) < size:
raise FatalError('End of file reading segment 0x%x, length %d (actual length %d)' % (offset, size, len(segment_data)))
self.segments.append((offset, size, segment_data))
# Skip the padding. The checksum is stored in the last byte so that the
# file is a multiple of 16 bytes.
align = 15 - (f.tell() % 16)
f.seek(align, 1)
self.checksum = ord(f.read(1))
def add_segment(self, addr, data):
# Data should be aligned on word boundary
l = len(data)
if l % 4:
data += b"\x00" * (4 - l % 4)
if l > 0:
self.segments.append((addr, len(data), data))
def save(self, filename):
f = file(filename, 'wb')
f.write(struct.pack('<BBBBI', ESPROM.ESP_IMAGE_MAGIC, len(self.segments),
self.flash_mode, self.flash_size_freq, self.entrypoint))
checksum = ESPROM.ESP_CHECKSUM_MAGIC
for (offset, size, data) in self.segments:
f.write(struct.pack('<II', offset, size))
f.write(data)
checksum = ESPROM.checksum(data, checksum)
align = 15 - (f.tell() % 16)
f.seek(align, 1)
f.write(struct.pack('B', checksum))
class ELFFile:
def __init__(self, name):
self.name = name
self.symbols = None
def _fetch_symbols(self):
if self.symbols is not None:
return
self.symbols = {}
try:
tool_nm = "xtensa-lx106-elf-nm"
if os.getenv('XTENSA_CORE') == 'lx106':
tool_nm = "xt-nm"
proc = subprocess.Popen([tool_nm, self.name], stdout=subprocess.PIPE)
except OSError:
print "Error calling %s, do you have Xtensa toolchain in PATH?" % tool_nm
sys.exit(1)
for l in proc.stdout:
fields = l.strip().split()
try:
if fields[0] == "U":
print "Warning: ELF binary has undefined symbol %s" % fields[1]
continue
self.symbols[fields[2]] = int(fields[0], 16)
except ValueError:
raise FatalError("Failed to strip symbol output from nm: %s" % fields)
def get_symbol_addr(self, sym):
self._fetch_symbols()
return self.symbols[sym]
def get_entry_point(self):
tool_readelf = "xtensa-lx106-elf-readelf"
if os.getenv('XTENSA_CORE') == 'lx106':
tool_readelf = "xt-readelf"
try:
proc = subprocess.Popen([tool_readelf, "-h", self.name], stdout=subprocess.PIPE)
except OSError:
print "Error calling %s, do you have Xtensa toolchain in PATH?" % tool_readelf
sys.exit(1)
for l in proc.stdout:
fields = l.strip().split()
if fields[0] == "Entry":
return int(fields[3], 0)
def load_section(self, section):
tool_objcopy = "xtensa-lx106-elf-objcopy"
if os.getenv('XTENSA_CORE') == 'lx106':
tool_objcopy = "xt-objcopy"
tmpsection = tempfile.mktemp(suffix=".section")
try:
subprocess.check_call([tool_objcopy, "--only-section", section, "-Obinary", self.name, tmpsection])
with open(tmpsection, "rb") as f:
data = f.read()
finally:
os.remove(tmpsection)
return data
def arg_auto_int(x):
return int(x, 0)
def div_roundup(a, b):
""" Return a/b rounded up to nearest integer,
equivalent result to int(math.ceil(float(int(a)) / float(int(b))), only
without possible floating point accuracy errors.
"""
return (int(a) + int(b) - 1) / int(b)
class FatalError(RuntimeError):
"""
Wrapper class for runtime errors that aren't caused by internal bugs, but by
ESP8266 responses or input content.
"""
def __init__(self, message):
RuntimeError.__init__(self, message)
@staticmethod
def WithResult(message, result):
"""
Return a fatal error object that includes the hex values of
'result' as a string formatted argument.
"""
return FatalError(message % ", ".join(hex(ord(x)) for x in result))
def main():
parser = argparse.ArgumentParser(description='ESP8266 ROM Bootloader Utility', prog='esptool')
parser.add_argument(
'--port', '-p',
help='Serial port device',
default='/dev/ttyUSB0')
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=arg_auto_int,
default=ESPROM.ESP_ROM_BAUD)
subparsers = parser.add_subparsers(
dest='operation',
help='Run esptool {command} -h for additional help')
parser_load_ram = subparsers.add_parser(
'load_ram',
help='Download an image to RAM and execute')
parser_load_ram.add_argument('filename', help='Firmware image')
parser_dump_mem = subparsers.add_parser(
'dump_mem',
help='Dump arbitrary memory to disk')
parser_dump_mem.add_argument('address', help='Base address', type=arg_auto_int)
parser_dump_mem.add_argument('size', help='Size of region to dump', type=arg_auto_int)
parser_dump_mem.add_argument('filename', help='Name of binary dump')
parser_read_mem = subparsers.add_parser(
'read_mem',
help='Read arbitrary memory location')
parser_read_mem.add_argument('address', help='Address to read', type=arg_auto_int)
parser_write_mem = subparsers.add_parser(
'write_mem',
help='Read-modify-write to arbitrary memory location')
parser_write_mem.add_argument('address', help='Address to write', type=arg_auto_int)
parser_write_mem.add_argument('value', help='Value', type=arg_auto_int)
parser_write_mem.add_argument('mask', help='Mask of bits to write', type=arg_auto_int)
parser_write_flash = subparsers.add_parser(
'write_flash',
help='Write a binary blob to flash')
parser_write_flash.add_argument('addr_filename', nargs='+', help='Address and binary file to write there, separated by space')
parser_write_flash.add_argument('--flash_freq', '-ff', help='SPI Flash frequency',
choices=['40m', '26m', '20m', '80m'], default='40m')
parser_write_flash.add_argument('--flash_mode', '-fm', help='SPI Flash mode',
choices=['qio', 'qout', 'dio', 'dout'], default='qio')
parser_write_flash.add_argument('--flash_size', '-fs', help='SPI Flash size in Mbit',
choices=['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default='4m')
subparsers.add_parser(
'run',
help='Run application code in flash')
parser_image_info = subparsers.add_parser(
'image_info',
help='Dump headers from an application image')
parser_image_info.add_argument('filename', help='Image file to parse')
parser_make_image = subparsers.add_parser(
'make_image',
help='Create an application image from binary files')
parser_make_image.add_argument('output', help='Output image file')
parser_make_image.add_argument('--segfile', '-f', action='append', help='Segment input file')
parser_make_image.add_argument('--segaddr', '-a', action='append', help='Segment base address', type=arg_auto_int)
parser_make_image.add_argument('--entrypoint', '-e', help='Address of entry point', type=arg_auto_int, default=0)
parser_elf2image = subparsers.add_parser(
'elf2image',
help='Create an application image from ELF file')
parser_elf2image.add_argument('input', help='Input ELF file')
parser_elf2image.add_argument('--output', '-o', help='Output filename prefix', type=str)
parser_elf2image.add_argument('--flash_freq', '-ff', help='SPI Flash frequency',
choices=['40m', '26m', '20m', '80m'], default='40m')
parser_elf2image.add_argument('--flash_mode', '-fm', help='SPI Flash mode',
choices=['qio', 'qout', 'dio', 'dout'], default='qio')
parser_elf2image.add_argument('--flash_size', '-fs', help='SPI Flash size in Mbit',
choices=['4m', '2m', '8m', '16m', '32m', '16m-c1', '32m-c1', '32m-c2'], default='4m')
subparsers.add_parser(
'read_mac',
help='Read MAC address from OTP ROM')
subparsers.add_parser(
'flash_id',
help='Read SPI flash manufacturer and device ID')
parser_read_flash = subparsers.add_parser(
'read_flash',
help='Read SPI flash content')
parser_read_flash.add_argument('address', help='Start address', type=arg_auto_int)
parser_read_flash.add_argument('size', help='Size of region to dump', type=arg_auto_int)
parser_read_flash.add_argument('filename', help='Name of binary dump')
subparsers.add_parser(
'erase_flash',
help='Perform Chip Erase on SPI flash')
args = parser.parse_args()
# Create the ESPROM connection object, if needed
esp = None
if args.operation not in ('image_info','make_image','elf2image'):
esp = ESPROM(args.port, args.baud)
esp.connect()
# Do the actual work. Should probably be split into separate functions.
if args.operation == 'load_ram':
image = ESPFirmwareImage(args.filename)
print 'RAM boot...'
for (offset, size, data) in image.segments:
print 'Downloading %d bytes at %08x...' % (size, offset),
sys.stdout.flush()
esp.mem_begin(size, div_roundup(size, esp.ESP_RAM_BLOCK), esp.ESP_RAM_BLOCK, offset)
seq = 0
while len(data) > 0:
esp.mem_block(data[0:esp.ESP_RAM_BLOCK], seq)
data = data[esp.ESP_RAM_BLOCK:]
seq += 1
print 'done!'
print 'All segments done, executing at %08x' % image.entrypoint
esp.mem_finish(image.entrypoint)
elif args.operation == 'read_mem':
print '0x%08x = 0x%08x' % (args.address, esp.read_reg(args.address))
elif args.operation == 'write_mem':
esp.write_reg(args.address, args.value, args.mask, 0)
print 'Wrote %08x, mask %08x to %08x' % (args.value, args.mask, args.address)
elif args.operation == 'dump_mem':
f = file(args.filename, 'wb')
for i in xrange(args.size / 4):
d = esp.read_reg(args.address + (i * 4))
f.write(struct.pack('<I', d))
if f.tell() % 1024 == 0:
print '\r%d bytes read... (%d %%)' % (f.tell(),
f.tell() * 100 / args.size),
sys.stdout.flush()
print 'Done!'
elif args.operation == 'write_flash':
assert len(args.addr_filename) % 2 == 0
flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size]
flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
flash_info = struct.pack('BB', flash_mode, flash_size_freq)
while args.addr_filename:
address = int(args.addr_filename[0], 0)
filename = args.addr_filename[1]
args.addr_filename = args.addr_filename[2:]
image = file(filename, 'rb').read()
print 'Erasing flash...'
blocks = div_roundup(len(image), esp.ESP_FLASH_BLOCK)
esp.flash_begin(blocks * esp.ESP_FLASH_BLOCK, address)
seq = 0
written = 0
t = time.time()
while len(image) > 0:
print '\rWriting at 0x%08x... (%d %%)' % (address + seq * esp.ESP_FLASH_BLOCK, 100 * (seq + 1) / blocks),
sys.stdout.flush()
block = image[0:esp.ESP_FLASH_BLOCK]
# Fix sflash config data
if address == 0 and seq == 0 and block[0] == '\xe9':
block = block[0:2] + flash_info + block[4:]
# Pad the last block
block = block + '\xff' * (esp.ESP_FLASH_BLOCK - len(block))
esp.flash_block(block, seq)
image = image[esp.ESP_FLASH_BLOCK:]
seq += 1
written += len(block)
t = time.time() - t
print '\rWrote %d bytes at 0x%08x in %.1f seconds (%.1f kbit/s)...' % (written, address, t, written / t * 8 / 1000)
print '\nLeaving...'
if args.flash_mode == 'dio':
esp.flash_unlock_dio()
else:
esp.flash_begin(0, 0)
esp.flash_finish(False)
elif args.operation == 'run':
esp.run()
elif args.operation == 'image_info':
image = ESPFirmwareImage(args.filename)
print ('Entry point: %08x' % image.entrypoint) if image.entrypoint != 0 else 'Entry point not set'
print '%d segments' % len(image.segments)
print
checksum = ESPROM.ESP_CHECKSUM_MAGIC
for (idx, (offset, size, data)) in enumerate(image.segments):
print 'Segment %d: %5d bytes at %08x' % (idx + 1, size, offset)
checksum = ESPROM.checksum(data, checksum)
print
print 'Checksum: %02x (%s)' % (image.checksum, 'valid' if image.checksum == checksum else 'invalid!')
elif args.operation == 'make_image':
image = ESPFirmwareImage()
if len(args.segfile) == 0:
raise FatalError('No segments specified')
if len(args.segfile) != len(args.segaddr):
raise FatalError('Number of specified files does not match number of specified addresses')
for (seg, addr) in zip(args.segfile, args.segaddr):
data = file(seg, 'rb').read()
image.add_segment(addr, data)
image.entrypoint = args.entrypoint
image.save(args.output)
elif args.operation == 'elf2image':
if args.output is None:
args.output = args.input + '-'
e = ELFFile(args.input)
image = ESPFirmwareImage()
image.entrypoint = e.get_entry_point()
for section, start in ((".text", "_text_start"), (".data", "_data_start"), (".rodata", "_rodata_start")):
data = e.load_section(section)
image.add_segment(e.get_symbol_addr(start), data)
image.flash_mode = {'qio':0, 'qout':1, 'dio':2, 'dout': 3}[args.flash_mode]
image.flash_size_freq = {'4m':0x00, '2m':0x10, '8m':0x20, '16m':0x30, '32m':0x40, '16m-c1': 0x50, '32m-c1':0x60, '32m-c2':0x70}[args.flash_size]
image.flash_size_freq += {'40m':0, '26m':1, '20m':2, '80m': 0xf}[args.flash_freq]
image.save(args.output + "0x00000.bin")
data = e.load_section(".irom0.text")
off = e.get_symbol_addr("_irom0_text_start") - 0x40200000
assert off >= 0
f = open(args.output + "0x%05x.bin" % off, "wb")
f.write(data)
f.close()
elif args.operation == 'read_mac':
mac = esp.read_mac()
print 'MAC: %s' % ':'.join(map(lambda x: '%02x' % x, mac))
elif args.operation == 'flash_id':
flash_id = esp.flash_id()
print 'Manufacturer: %02x' % (flash_id & 0xff)
print 'Device: %02x%02x' % ((flash_id >> 8) & 0xff, (flash_id >> 16) & 0xff)
elif args.operation == 'read_flash':
print 'Please wait...'
file(args.filename, 'wb').write(esp.flash_read(args.address, 1024, div_roundup(args.size, 1024))[:args.size])
elif args.operation == 'erase_flash':
esp.flash_erase()
if __name__ == '__main__':
try:
main()
except FatalError as e:
print '\nA fatal error occurred: %s' % e
sys.exit(2)
| gpl-2.0 | 1,992,661,783,218,690,600 | 38.709497 | 152 | 0.573579 | false | 3.507958 | false | false | false |
eharney/cinder | cinder/api/v3/attachments.py | 1 | 11362 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes attachments API."""
from oslo_log import log as logging
import webob
from cinder.api import common
from cinder.api import microversions as mv
from cinder.api.openstack import wsgi
from cinder.api.v3.views import attachments as attachment_views
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import utils
from cinder.volume import api as volume_api
LOG = logging.getLogger(__name__)
class AttachmentsController(wsgi.Controller):
"""The Attachments API controller for the OpenStack API."""
_view_builder_class = attachment_views.ViewBuilder
allowed_filters = {'volume_id', 'status', 'instance_id', 'attach_status'}
def __init__(self, ext_mgr=None):
"""Initialize controller class."""
self.volume_api = volume_api.API()
self.ext_mgr = ext_mgr
super(AttachmentsController, self).__init__()
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def show(self, req, id):
"""Return data about the given attachment."""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
return attachment_views.ViewBuilder.detail(attachment)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def index(self, req):
"""Return a summary list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def detail(self, req):
"""Return a detailed list of attachments."""
attachments = self._items(req)
return attachment_views.ViewBuilder.list(attachments, detail=True)
@common.process_general_filtering('attachment')
def _process_attachment_filtering(self, context=None, filters=None,
req_version=None):
utils.remove_invalid_filter_options(context, filters,
self.allowed_filters)
def _items(self, req):
"""Return a list of attachments, transformed through view builder."""
context = req.environ['cinder.context']
req_version = req.api_version_request
# Pop out non search_opts and create local variables
search_opts = req.GET.copy()
sort_keys, sort_dirs = common.get_sort_params(search_opts)
marker, limit, offset = common.get_pagination_params(search_opts)
self._process_attachment_filtering(context=context,
filters=search_opts,
req_version=req_version)
if search_opts.get('instance_id', None):
search_opts['instance_uuid'] = search_opts.pop('instance_id', None)
if context.is_admin and 'all_tenants' in search_opts:
del search_opts['all_tenants']
return objects.VolumeAttachmentList.get_all(
context, search_opts=search_opts, marker=marker, limit=limit,
offset=offset, sort_keys=sort_keys, sort_direction=sort_dirs)
else:
return objects.VolumeAttachmentList.get_all_by_project(
context, context.project_id, search_opts=search_opts,
marker=marker, limit=limit, offset=offset, sort_keys=sort_keys,
sort_direction=sort_dirs)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
@wsgi.response(202)
def create(self, req, body):
"""Create an attachment.
This method can be used to create an empty attachment (reserve) or to
create and initialize a volume attachment based on the provided input
parameters.
If the caller does not yet have the connector information but needs to
reserve an attachment for the volume (ie Nova BootFromVolume) the
create can be called with just the volume-uuid and the server
identifier. This will reserve an attachment, mark the volume as
reserved and prevent any new attachment_create calls from being made
until the attachment is updated (completed).
The alternative is that the connection can be reserved and initialized
all at once with a single call if the caller has all of the required
information (connector data) at the time of the call.
NOTE: In Nova terms server == instance, the server_id parameter
referenced below is the UUID of the Instance, for non-nova consumers
this can be a server UUID or some other arbitrary unique identifier.
Expected format of the input parameter 'body':
.. code-block:: json
{
"attachment":
{
"volume_uuid": "volume-uuid",
"instance_uuid": "nova-server-uuid",
"connector": "null|<connector-object>"
}
}
Example connector:
.. code-block:: json
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip":"192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": false,
"mountpoint": "/dev/vdb",
"mode": "null|rw|ro"
}
}
NOTE all that's required for a reserve is volume_uuid
and an instance_uuid.
returns: A summary view of the attachment object
"""
context = req.environ['cinder.context']
instance_uuid = body['attachment'].get('instance_uuid', None)
if not instance_uuid:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'instance_uuid' "
"to create attachment."))
volume_uuid = body['attachment'].get('volume_uuid', None)
if not volume_uuid:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'volume_uuid' "
"to create attachment."))
volume_ref = objects.Volume.get_by_id(
context,
volume_uuid)
connector = body['attachment'].get('connector', None)
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_create(context,
volume_ref,
instance_uuid,
connector=connector))
except exception.NotAuthorized:
raise
except exception.CinderException as ex:
err_msg = _(
"Unable to create attachment for volume (%s).") % ex.msg
LOG.exception(err_msg)
except Exception as ex:
err_msg = _("Unable to create attachment for volume.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def update(self, req, id, body):
"""Update an attachment record.
Update a reserved attachment record with connector information and set
up the appropriate connection_info from the driver.
Expected format of the input parameter 'body':
.. code:: json
{
"attachment":
{
"connector":
{
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip":"192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": False,
"mountpoint": "/dev/vdb",
"mode": None|"rw"|"ro",
}
}
}
"""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
connector = body['attachment'].get('connector', None)
if not connector:
raise webob.exc.HTTPBadRequest(
explanation=_("Must specify 'connector' "
"to update attachment."))
err_msg = None
try:
attachment_ref = (
self.volume_api.attachment_update(context,
attachment_ref,
connector))
except exception.NotAuthorized:
raise
except exception.CinderException as ex:
err_msg = (
_("Unable to update attachment.(%s).") % ex.msg)
LOG.exception(err_msg)
except Exception:
err_msg = _("Unable to update the attachment.")
LOG.exception(err_msg)
finally:
if err_msg:
raise webob.exc.HTTPInternalServerError(explanation=err_msg)
# TODO(jdg): Test this out some more, do we want to return and object
# or a dict?
return attachment_views.ViewBuilder.detail(attachment_ref)
@wsgi.Controller.api_version(mv.NEW_ATTACH)
def delete(self, req, id):
"""Delete an attachment.
Disconnects/Deletes the specified attachment, returns a list of any
known shared attachment-id's for the effected backend device.
returns: A summary list of any attachments sharing this connection
"""
context = req.environ['cinder.context']
attachment = objects.VolumeAttachment.get_by_id(context, id)
attachments = self.volume_api.attachment_delete(context, attachment)
return attachment_views.ViewBuilder.list(attachments)
@wsgi.response(202)
@wsgi.Controller.api_version(mv.NEW_ATTACH_COMPLETION)
@wsgi.action('os-complete')
def complete(self, req, id, body):
"""Mark a volume attachment process as completed (in-use)."""
context = req.environ['cinder.context']
attachment_ref = (
objects.VolumeAttachment.get_by_id(context, id))
volume_ref = objects.Volume.get_by_id(
context,
attachment_ref.volume_id)
attachment_ref.update({'attach_status': 'attached'})
attachment_ref.save()
volume_ref.update({'status': 'in-use', 'attach_status': 'attached'})
volume_ref.save()
def create_resource(ext_mgr):
"""Create the wsgi resource for this controller."""
return wsgi.Resource(AttachmentsController(ext_mgr))
| apache-2.0 | -5,867,306,877,933,996,000 | 38.451389 | 79 | 0.58106 | false | 4.6 | false | false | false |
turbokongen/home-assistant | homeassistant/components/plex/config_flow.py | 1 | 15991 | """Config flow for Plex."""
import copy
import logging
from aiohttp import web_response
import plexapi.exceptions
from plexapi.gdm import GDM
from plexauth import PlexAuth
import requests.exceptions
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_HOST,
CONF_PORT,
CONF_SOURCE,
CONF_SSL,
CONF_TOKEN,
CONF_URL,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import get_url
from .const import ( # pylint: disable=unused-import
AUTH_CALLBACK_NAME,
AUTH_CALLBACK_PATH,
AUTOMATIC_SETUP_STRING,
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_SERVER_IDENTIFIER,
CONF_USE_EPISODE_ART,
DEFAULT_PORT,
DEFAULT_SSL,
DEFAULT_VERIFY_SSL,
DOMAIN,
MANUAL_SETUP_STRING,
PLEX_SERVER_CONFIG,
SERVERS,
X_PLEX_DEVICE_NAME,
X_PLEX_PLATFORM,
X_PLEX_PRODUCT,
X_PLEX_VERSION,
)
from .errors import NoServersFound, ServerNotSpecified
from .server import PlexServer
_LOGGER = logging.getLogger(__package__)
@callback
def configured_servers(hass):
"""Return a set of the configured Plex servers."""
return {
entry.data[CONF_SERVER_IDENTIFIER]
for entry in hass.config_entries.async_entries(DOMAIN)
}
async def async_discover(hass):
"""Scan for available Plex servers."""
gdm = GDM()
await hass.async_add_executor_job(gdm.scan)
for server_data in gdm.entries:
await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: config_entries.SOURCE_INTEGRATION_DISCOVERY},
data=server_data,
)
class PlexFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Plex config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PlexOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the Plex flow."""
self.current_login = {}
self.available_servers = None
self.plexauth = None
self.token = None
self.client_id = None
self._manual = False
async def async_step_user(
self, user_input=None, errors=None
): # pylint: disable=arguments-differ
"""Handle a flow initialized by the user."""
if user_input is not None:
return await self.async_step_plex_website_auth()
if self.show_advanced_options:
return await self.async_step_user_advanced(errors=errors)
return self.async_show_form(step_id="user", errors=errors)
async def async_step_user_advanced(self, user_input=None, errors=None):
"""Handle an advanced mode flow initialized by the user."""
if user_input is not None:
if user_input.get("setup_method") == MANUAL_SETUP_STRING:
self._manual = True
return await self.async_step_manual_setup()
return await self.async_step_plex_website_auth()
data_schema = vol.Schema(
{
vol.Required("setup_method", default=AUTOMATIC_SETUP_STRING): vol.In(
[AUTOMATIC_SETUP_STRING, MANUAL_SETUP_STRING]
)
}
)
return self.async_show_form(
step_id="user_advanced", data_schema=data_schema, errors=errors
)
async def async_step_manual_setup(self, user_input=None, errors=None):
"""Begin manual configuration."""
if user_input is not None and errors is None:
user_input.pop(CONF_URL, None)
host = user_input.get(CONF_HOST)
if host:
port = user_input[CONF_PORT]
prefix = "https" if user_input.get(CONF_SSL) else "http"
user_input[CONF_URL] = f"{prefix}://{host}:{port}"
elif CONF_TOKEN not in user_input:
return await self.async_step_manual_setup(
user_input=user_input, errors={"base": "host_or_token"}
)
return await self.async_step_server_validate(user_input)
previous_input = user_input or {}
data_schema = vol.Schema(
{
vol.Optional(
CONF_HOST,
description={"suggested_value": previous_input.get(CONF_HOST)},
): str,
vol.Required(
CONF_PORT, default=previous_input.get(CONF_PORT, DEFAULT_PORT)
): int,
vol.Required(
CONF_SSL, default=previous_input.get(CONF_SSL, DEFAULT_SSL)
): bool,
vol.Required(
CONF_VERIFY_SSL,
default=previous_input.get(CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL),
): bool,
vol.Optional(
CONF_TOKEN,
description={"suggested_value": previous_input.get(CONF_TOKEN)},
): str,
}
)
return self.async_show_form(
step_id="manual_setup", data_schema=data_schema, errors=errors
)
async def async_step_server_validate(self, server_config):
"""Validate a provided configuration."""
errors = {}
self.current_login = server_config
plex_server = PlexServer(self.hass, server_config)
try:
await self.hass.async_add_executor_job(plex_server.connect)
except NoServersFound:
_LOGGER.error("No servers linked to Plex account")
errors["base"] = "no_servers"
except (plexapi.exceptions.BadRequest, plexapi.exceptions.Unauthorized):
_LOGGER.error("Invalid credentials provided, config not created")
errors[CONF_TOKEN] = "faulty_credentials"
except requests.exceptions.SSLError as error:
_LOGGER.error("SSL certificate error: [%s]", error)
errors["base"] = "ssl_error"
except (plexapi.exceptions.NotFound, requests.exceptions.ConnectionError):
server_identifier = (
server_config.get(CONF_URL) or plex_server.server_choice or "Unknown"
)
_LOGGER.error("Plex server could not be reached: %s", server_identifier)
errors[CONF_HOST] = "not_found"
except ServerNotSpecified as available_servers:
self.available_servers = available_servers.args[0]
return await self.async_step_select_server()
except Exception as error: # pylint: disable=broad-except
_LOGGER.exception("Unknown error connecting to Plex server: %s", error)
return self.async_abort(reason="unknown")
if errors:
if self._manual:
return await self.async_step_manual_setup(
user_input=server_config, errors=errors
)
return await self.async_step_user(errors=errors)
server_id = plex_server.machine_identifier
url = plex_server.url_in_use
token = server_config.get(CONF_TOKEN)
entry_config = {CONF_URL: url}
if self.client_id:
entry_config[CONF_CLIENT_ID] = self.client_id
if token:
entry_config[CONF_TOKEN] = token
if url.startswith("https"):
entry_config[CONF_VERIFY_SSL] = server_config.get(
CONF_VERIFY_SSL, DEFAULT_VERIFY_SSL
)
data = {
CONF_SERVER: plex_server.friendly_name,
CONF_SERVER_IDENTIFIER: server_id,
PLEX_SERVER_CONFIG: entry_config,
}
entry = await self.async_set_unique_id(server_id)
if self.context[CONF_SOURCE] == config_entries.SOURCE_REAUTH:
self.hass.config_entries.async_update_entry(entry, data=data)
_LOGGER.debug("Updated config entry for %s", plex_server.friendly_name)
await self.hass.config_entries.async_reload(entry.entry_id)
return self.async_abort(reason="reauth_successful")
self._abort_if_unique_id_configured()
_LOGGER.debug("Valid config created for %s", plex_server.friendly_name)
return self.async_create_entry(title=plex_server.friendly_name, data=data)
async def async_step_select_server(self, user_input=None):
"""Use selected Plex server."""
config = dict(self.current_login)
if user_input is not None:
config[CONF_SERVER] = user_input[CONF_SERVER]
return await self.async_step_server_validate(config)
configured = configured_servers(self.hass)
available_servers = [
name
for (name, server_id) in self.available_servers
if server_id not in configured
]
if not available_servers:
return self.async_abort(reason="all_configured")
if len(available_servers) == 1:
config[CONF_SERVER] = available_servers[0]
return await self.async_step_server_validate(config)
return self.async_show_form(
step_id="select_server",
data_schema=vol.Schema(
{vol.Required(CONF_SERVER): vol.In(available_servers)}
),
errors={},
)
async def async_step_integration_discovery(self, discovery_info):
"""Handle GDM discovery."""
machine_identifier = discovery_info["data"]["Resource-Identifier"]
await self.async_set_unique_id(machine_identifier)
self._abort_if_unique_id_configured()
host = f"{discovery_info['from'][0]}:{discovery_info['data']['Port']}"
name = discovery_info["data"]["Name"]
self.context["title_placeholders"] = {
"host": host,
"name": name,
}
return await self.async_step_user()
async def async_step_plex_website_auth(self):
"""Begin external auth flow on Plex website."""
self.hass.http.register_view(PlexAuthorizationCallbackView)
hass_url = get_url(self.hass)
headers = {"Origin": hass_url}
payload = {
"X-Plex-Device-Name": X_PLEX_DEVICE_NAME,
"X-Plex-Version": X_PLEX_VERSION,
"X-Plex-Product": X_PLEX_PRODUCT,
"X-Plex-Device": self.hass.config.location_name,
"X-Plex-Platform": X_PLEX_PLATFORM,
"X-Plex-Model": "Plex OAuth",
}
session = async_get_clientsession(self.hass)
self.plexauth = PlexAuth(payload, session, headers)
await self.plexauth.initiate_auth()
forward_url = f"{hass_url}{AUTH_CALLBACK_PATH}?flow_id={self.flow_id}"
auth_url = self.plexauth.auth_url(forward_url)
return self.async_external_step(step_id="obtain_token", url=auth_url)
async def async_step_obtain_token(self, user_input=None):
"""Obtain token after external auth completed."""
token = await self.plexauth.token(10)
if not token:
return self.async_external_step_done(next_step_id="timed_out")
self.token = token
self.client_id = self.plexauth.client_identifier
return self.async_external_step_done(next_step_id="use_external_token")
async def async_step_timed_out(self, user_input=None):
"""Abort flow when time expires."""
return self.async_abort(reason="token_request_timeout")
async def async_step_use_external_token(self, user_input=None):
"""Continue server validation with external token."""
server_config = {CONF_TOKEN: self.token}
return await self.async_step_server_validate(server_config)
async def async_step_reauth(self, data):
"""Handle a reauthorization flow request."""
self.current_login = dict(data)
return await self.async_step_user()
class PlexOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Plex options."""
def __init__(self, config_entry):
"""Initialize Plex options flow."""
self.options = copy.deepcopy(dict(config_entry.options))
self.server_id = config_entry.data[CONF_SERVER_IDENTIFIER]
async def async_step_init(self, user_input=None):
"""Manage the Plex options."""
return await self.async_step_plex_mp_settings()
async def async_step_plex_mp_settings(self, user_input=None):
"""Manage the Plex media_player options."""
plex_server = self.hass.data[DOMAIN][SERVERS][self.server_id]
if user_input is not None:
self.options[MP_DOMAIN][CONF_USE_EPISODE_ART] = user_input[
CONF_USE_EPISODE_ART
]
self.options[MP_DOMAIN][CONF_IGNORE_NEW_SHARED_USERS] = user_input[
CONF_IGNORE_NEW_SHARED_USERS
]
self.options[MP_DOMAIN][CONF_IGNORE_PLEX_WEB_CLIENTS] = user_input[
CONF_IGNORE_PLEX_WEB_CLIENTS
]
account_data = {
user: {"enabled": bool(user in user_input[CONF_MONITORED_USERS])}
for user in plex_server.accounts
}
self.options[MP_DOMAIN][CONF_MONITORED_USERS] = account_data
return self.async_create_entry(title="", data=self.options)
available_accounts = {name: name for name in plex_server.accounts}
available_accounts[plex_server.owner] += " [Owner]"
default_accounts = plex_server.accounts
known_accounts = set(plex_server.option_monitored_users)
if known_accounts:
default_accounts = {
user
for user in plex_server.option_monitored_users
if plex_server.option_monitored_users[user]["enabled"]
}
for user in plex_server.accounts:
if user not in known_accounts:
available_accounts[user] += " [New]"
if not plex_server.option_ignore_new_shared_users:
for new_user in plex_server.accounts - known_accounts:
default_accounts.add(new_user)
return self.async_show_form(
step_id="plex_mp_settings",
data_schema=vol.Schema(
{
vol.Required(
CONF_USE_EPISODE_ART,
default=plex_server.option_use_episode_art,
): bool,
vol.Optional(
CONF_MONITORED_USERS, default=default_accounts
): cv.multi_select(available_accounts),
vol.Required(
CONF_IGNORE_NEW_SHARED_USERS,
default=plex_server.option_ignore_new_shared_users,
): bool,
vol.Required(
CONF_IGNORE_PLEX_WEB_CLIENTS,
default=plex_server.option_ignore_plexweb_clients,
): bool,
}
),
)
class PlexAuthorizationCallbackView(HomeAssistantView):
"""Handle callback from external auth."""
url = AUTH_CALLBACK_PATH
name = AUTH_CALLBACK_NAME
requires_auth = False
async def get(self, request):
"""Receive authorization confirmation."""
hass = request.app["hass"]
await hass.config_entries.flow.async_configure(
flow_id=request.query["flow_id"], user_input=None
)
return web_response.Response(
headers={"content-type": "text/html"},
text="<script>window.close()</script>Success! This window can be closed",
)
| apache-2.0 | 1,489,924,755,053,045,500 | 36.36215 | 85 | 0.598837 | false | 4.022893 | true | false | false |
italomaia/turtle-linux | games/Dynamite/pgu/test.py | 1 | 1624 | import pygame
from pygame.locals import *
import gui
screen = pygame.display.set_mode(
(640, 480), FULLSCREEN ) # try adding DOUBLEBUF | HWSURFACE
# pygame.mouse.set_visible(0)
app = gui.App()
c = gui.Container(width=640,height=480)
##
## dialog 1
##
t1 = gui.Table()
t1.tr()
t1.add(gui.Label("Gal Test"))
t2 = gui.Table()
t2.tr()
t2.add(gui.Label("Gui Widgets"))
t2.add(gui.Input())
t2.tr()
t2.add(gui.Label("Button"))
t2.add(gui.Button("Click Me!"))
d1 = gui.Dialog(t1, t2)
c.add(d1, 50, 150)
##
## dialog 2
##
t3 = gui.Table()
t3.tr()
t3.add(gui.Label("Another one"))
t4 = gui.Table()
t4.tr()
t4.add(gui.Label("Name"))
t4.add(gui.Input())
t4.tr()
t4.add(gui.Label("Ohh"))
b1 = gui.Button("OK")
t4.add(b1)
d2 = gui.Dialog(t3, t4)
c.add(d2, 50, 300)
##
## some labels
##
l1 = gui.Label("Suppose this is a menu", color=(255, 255, 255) )
c.add(l1, 50, 50)
l2 = gui.Label("Click <SPACE> to hide top dialog", color=(255, 255,
255) )
c.add(l2, 50, 75)
l3 = gui.Label("Opps... Did it happen?", color=(255, 255, 255) )
##
## app begins
##
app.init(widget=c,screen=screen)
FRAME_EVT = USEREVENT + 1
pygame.event.Event(FRAME_EVT)
pygame.time.set_timer(FRAME_EVT, 30)
_quit = 0
while _quit == 0:
event = pygame.event.wait()
if event.type == FRAME_EVT:
pygame.display.flip()
continue
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
_quit = 1
continue
elif event.key == K_SPACE:
d1.close()
c.add(l3, 100, 100)
app._event(event)
screen.fill((0,0,0))
app.paint(screen)
| gpl-3.0 | 4,685,452,212,621,695,000 | 14.037037 | 68 | 0.589286 | false | 2.47561 | false | false | false |
wfx/epack | epack/libarchive/ffi.py | 1 | 7623 | # This file is part of a program licensed under the terms of the GNU Lesser
# General Public License version 2 (or at your option any later version)
# as published by the Free Software Foundation: http://www.gnu.org/licenses/
from __future__ import division, print_function, unicode_literals
from ctypes import (
c_char_p, c_int, c_uint, c_longlong, c_size_t, c_void_p,
c_wchar_p, CFUNCTYPE, POINTER,
)
try:
from ctypes import c_ssize_t
except ImportError:
from ctypes import c_longlong as c_ssize_t
import ctypes
from ctypes.util import find_library
import logging
import mmap
import os
from .exception import ArchiveError
logger = logging.getLogger('libarchive')
page_size = mmap.PAGESIZE
libarchive_path = os.environ.get('LIBARCHIVE') or \
find_library('archive') or \
find_library('libarchive') or \
'libarchive.so'
libarchive = ctypes.cdll.LoadLibrary(libarchive_path)
# Constants
ARCHIVE_EOF = 1 # Found end of archive.
ARCHIVE_OK = 0 # Operation was successful.
ARCHIVE_RETRY = -10 # Retry might succeed.
ARCHIVE_WARN = -20 # Partial success.
ARCHIVE_FAILED = -25 # Current operation cannot complete.
ARCHIVE_FATAL = -30 # No more operations are possible.
AE_IFMT = 0o170000
AE_IFREG = 0o100000
AE_IFLNK = 0o120000
AE_IFSOCK = 0o140000
AE_IFCHR = 0o020000
AE_IFBLK = 0o060000
AE_IFDIR = 0o040000
AE_IFIFO = 0o010000
# Callback types
WRITE_CALLBACK = CFUNCTYPE(
c_ssize_t, c_void_p, c_void_p, POINTER(c_void_p), c_size_t
)
OPEN_CALLBACK = CFUNCTYPE(c_int, c_void_p, c_void_p)
CLOSE_CALLBACK = CFUNCTYPE(c_int, c_void_p, c_void_p)
VOID_CB = lambda *_: ARCHIVE_OK
# Type aliases, for readability
c_archive_p = c_void_p
c_archive_entry_p = c_void_p
# Helper functions
def _error_string(archive_p):
msg = error_string(archive_p)
if msg is None:
return
try:
return msg.decode('ascii')
except UnicodeDecodeError:
return msg
def archive_error(archive_p, retcode):
msg = _error_string(archive_p)
raise ArchiveError(msg, errno(archive_p), retcode, archive_p)
def check_null(ret, func, args):
if ret is None:
raise ArchiveError(func.__name__+' returned NULL')
return ret
def check_int(retcode, func, args):
if retcode >= 0:
return retcode
elif retcode == ARCHIVE_WARN:
logger.warning(_error_string(args[0]))
return retcode
else:
raise archive_error(args[0], retcode)
def ffi(name, argtypes, restype, errcheck=None):
f = getattr(libarchive, 'archive_'+name)
f.argtypes = argtypes
f.restype = restype
if errcheck:
f.errcheck = errcheck
globals()[name] = f
return f
# FFI declarations
# archive_util
errno = ffi('errno', [c_archive_p], c_int)
error_string = ffi('error_string', [c_archive_p], c_char_p)
# archive_entry
ffi('entry_new', [], c_archive_entry_p, check_null)
ffi('entry_filetype', [c_archive_entry_p], c_int)
ffi('entry_mtime', [c_archive_entry_p], c_int)
ffi('entry_perm', [c_archive_entry_p], c_int)
ffi('entry_pathname_w', [c_archive_entry_p], c_wchar_p)
ffi('entry_sourcepath', [c_archive_entry_p], c_char_p)
ffi('entry_size', [c_archive_entry_p], c_longlong)
ffi('entry_size_is_set', [c_archive_entry_p], c_int)
ffi('entry_update_pathname_utf8', [c_archive_entry_p, c_char_p], None)
ffi('entry_clear', [c_archive_entry_p], c_archive_entry_p)
ffi('entry_free', [c_archive_entry_p], None)
# archive_read
ffi('read_new', [], c_archive_p, check_null)
READ_FORMATS = set((
'7zip', 'all', 'ar', 'cab', 'cpio', 'empty', 'iso9660', 'lha', 'mtree',
'rar', 'raw', 'tar', 'xar', 'zip'
))
for f_name in list(READ_FORMATS):
try:
ffi('read_support_format_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('read format "%s" is not supported' % f_name)
READ_FORMATS.remove(f_name)
READ_FILTERS = set((
'all', 'bzip2', 'compress', 'grzip', 'gzip', 'lrzip', 'lzip', 'lzma',
'lzop', 'none', 'rpm', 'uu', 'xz'
))
for f_name in list(READ_FILTERS):
try:
ffi('read_support_filter_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('read filter "%s" is not supported' % f_name)
READ_FILTERS.remove(f_name)
ffi('read_open_fd', [c_archive_p, c_int, c_size_t], c_int, check_int)
ffi('read_open_filename_w', [c_archive_p, c_wchar_p, c_size_t],
c_int, check_int)
ffi('read_open_memory', [c_archive_p, c_void_p, c_size_t], c_int, check_int)
ffi('read_next_header', [c_archive_p, POINTER(c_void_p)], c_int, check_int)
ffi('read_next_header2', [c_archive_p, c_void_p], c_int, check_int)
ffi('read_close', [c_archive_p], c_int, check_int)
ffi('read_free', [c_archive_p], c_int, check_int)
# archive_read_disk
ffi('read_disk_new', [], c_archive_p, check_null)
ffi('read_disk_set_standard_lookup', [c_archive_p], c_int, check_int)
ffi('read_disk_open', [c_archive_p, c_char_p], c_int, check_int)
ffi('read_disk_open_w', [c_archive_p, c_wchar_p], c_int, check_int)
ffi('read_disk_descend', [c_archive_p], c_int, check_int)
# archive_read_data
ffi('read_data_block',
[c_archive_p, POINTER(c_void_p), POINTER(c_size_t), POINTER(c_longlong)],
c_int, check_int)
ffi('read_data', [c_archive_p, c_void_p, c_size_t], c_ssize_t, check_int)
ffi('read_data_skip', [c_archive_p], c_int, check_int)
# archive_write
ffi('write_new', [], c_archive_p, check_null)
ffi('write_disk_new', [], c_archive_p, check_null)
ffi('write_disk_set_options', [c_archive_p, c_int], c_int, check_int)
WRITE_FORMATS = set((
'7zip', 'ar_bsd', 'ar_svr4', 'cpio', 'cpio_newc', 'gnutar', 'iso9660',
'mtree', 'mtree_classic', 'pax', 'pax_restricted', 'shar', 'shar_dump',
'ustar', 'v7tar', 'xar', 'zip'
))
for f_name in list(WRITE_FORMATS):
try:
ffi('write_set_format_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('write format "%s" is not supported' % f_name)
WRITE_FORMATS.remove(f_name)
WRITE_FILTERS = set((
'b64encode', 'bzip2', 'compress', 'grzip', 'gzip', 'lrzip', 'lzip', 'lzma',
'lzop', 'uuencode', 'xz'
))
for f_name in list(WRITE_FILTERS):
try:
ffi('write_add_filter_'+f_name, [c_archive_p], c_int, check_int)
except AttributeError: # pragma: no cover
logger.warning('write filter "%s" is not supported' % f_name)
WRITE_FILTERS.remove(f_name)
ffi('write_open',
[c_archive_p, c_void_p, OPEN_CALLBACK, WRITE_CALLBACK, CLOSE_CALLBACK],
c_int, check_int)
ffi('write_open_fd', [c_archive_p, c_int], c_int, check_int)
ffi('write_open_filename', [c_archive_p, c_char_p], c_int, check_int)
ffi('write_open_filename_w', [c_archive_p, c_wchar_p], c_int, check_int)
ffi('write_open_memory',
[c_archive_p, c_void_p, c_size_t, POINTER(c_size_t)],
c_int, check_int)
ffi('write_get_bytes_in_last_block', [c_archive_p], c_int, check_int)
ffi('write_get_bytes_per_block', [c_archive_p], c_int, check_int)
ffi('write_set_bytes_in_last_block', [c_archive_p, c_int], c_int, check_int)
ffi('write_set_bytes_per_block', [c_archive_p, c_int], c_int, check_int)
ffi('write_header', [c_archive_p, c_void_p], c_int, check_int)
ffi('write_data', [c_archive_p, c_void_p, c_size_t], c_ssize_t, check_int)
ffi('write_data_block', [c_archive_p, c_void_p, c_size_t, c_longlong],
c_int, check_int)
ffi('write_finish_entry', [c_archive_p], c_int, check_int)
ffi('write_close', [c_archive_p], c_int, check_int)
ffi('write_free', [c_archive_p], c_int, check_int)
| gpl-3.0 | -946,142,099,526,545,800 | 30.114286 | 79 | 0.647908 | false | 2.710882 | false | false | false |
chintak/scikit-image | skimage/feature/util.py | 1 | 4726 | import numpy as np
from skimage.util import img_as_float
class FeatureDetector(object):
def __init__(self):
self.keypoints_ = np.array([])
def detect(self, image):
"""Detect keypoints in image.
Parameters
----------
image : 2D array
Input image.
"""
raise NotImplementedError()
class DescriptorExtractor(object):
def __init__(self):
self.descriptors_ = np.array([])
def extract(self, image, keypoints):
"""Extract feature descriptors in image for given keypoints.
Parameters
----------
image : 2D array
Input image.
keypoints : (N, 2) array
Keypoint locations as ``(row, col)``.
"""
raise NotImplementedError()
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches,
keypoints_color='k', matches_color=None, only_matches=False):
"""Plot matched features.
Parameters
----------
ax : matplotlib.axes.Axes
Matches and image are drawn in this ax.
image1 : (N, M [, 3]) array
First grayscale or color image.
image2 : (N, M [, 3]) array
Second grayscale or color image.
keypoints1 : (K1, 2) array
First keypoint coordinates as ``(row, col)``.
keypoints2 : (K2, 2) array
Second keypoint coordinates as ``(row, col)``.
matches : (Q, 2) array
Indices of corresponding matches in first and second set of
descriptors, where ``matches[:, 0]`` denote the indices in the first
and ``matches[:, 1]`` the indices in the second set of descriptors.
keypoints_color : matplotlib color, optional
Color for keypoint locations.
matches_color : matplotlib color, optional
Color for lines which connect keypoint matches. By default the
color is chosen randomly.
only_matches : bool, optional
Whether to only plot matches and not plot the keypoint locations.
"""
image1 = img_as_float(image1)
image2 = img_as_float(image2)
new_shape1 = list(image1.shape)
new_shape2 = list(image2.shape)
if image1.shape[0] < image2.shape[0]:
new_shape1[0] = image2.shape[0]
elif image1.shape[0] > image2.shape[0]:
new_shape2[0] = image1.shape[0]
if image1.shape[1] < image2.shape[1]:
new_shape1[1] = image2.shape[1]
elif image1.shape[1] > image2.shape[1]:
new_shape2[1] = image1.shape[1]
if new_shape1 != image1.shape:
new_image1 = np.zeros(new_shape1, dtype=image1.dtype)
new_image1[:image1.shape[0], :image1.shape[1]] = image1
image1 = new_image1
if new_shape2 != image2.shape:
new_image2 = np.zeros(new_shape2, dtype=image2.dtype)
new_image2[:image2.shape[0], :image2.shape[1]] = image2
image2 = new_image2
image = np.concatenate([image1, image2], axis=1)
offset = image1.shape
if not only_matches:
ax.scatter(keypoints1[:, 1], keypoints1[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.imshow(image)
ax.axis((0, 2 * offset[1], offset[0], 0))
for i in range(matches.shape[0]):
idx1 = matches[i, 0]
idx2 = matches[i, 1]
if matches_color is None:
color = np.random.rand(3, 1)
else:
color = matches_color
ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]),
(keypoints1[idx1, 0], keypoints2[idx2, 0]),
'-', color=color)
def _prepare_grayscale_input_2D(image):
image = np.squeeze(image)
if image.ndim != 2:
raise ValueError("Only 2-D gray-scale images supported.")
return img_as_float(image)
def _mask_border_keypoints(image_shape, keypoints, distance):
"""Mask coordinates that are within certain distance from the image border.
Parameters
----------
image_shape : (2, ) array_like
Shape of the image as ``(rows, cols)``.
keypoints : (N, 2) array
Keypoint coordinates as ``(rows, cols)``.
distance : int
Image border distance.
Returns
-------
mask : (N, ) bool array
Mask indicating if pixels are within the image (``True``) or in the
border region of the image (``False``).
"""
rows = image_shape[0]
cols = image_shape[1]
mask = (((distance - 1) < keypoints[:, 0])
& (keypoints[:, 0] < (rows - distance + 1))
& ((distance - 1) < keypoints[:, 1])
& (keypoints[:, 1] < (cols - distance + 1)))
return mask
| bsd-3-clause | 973,310,001,294,730,000 | 28.354037 | 79 | 0.585485 | false | 3.71249 | false | false | false |
pavlov99/jsonapi | jsonapi/utils.py | 1 | 2220 | """ JSON:API utils."""
class _classproperty(property):
""" Implement property behaviour for classes.
class A():
@_classproperty
@classmethod
def name(cls):
return cls.__name__
"""
def __get__(self, obj, type_):
return self.fget.__get__(None, type_)()
def _cached(f):
""" Decorator that makes a method cached."""
attr_name = '_cached_' + f.__name__
def wrapper(obj, *args, **kwargs):
if not hasattr(obj, attr_name):
setattr(obj, attr_name, f(obj, *args, **kwargs))
return getattr(obj, attr_name)
return wrapper
classproperty = lambda f: _classproperty(classmethod(f))
cached_property = lambda f: property(_cached(f))
cached_classproperty = lambda f: classproperty(_cached(f))
class Choices(object):
""" Choices."""
def __init__(self, *choices):
self._choices = []
self._choice_dict = {}
for choice in choices:
if isinstance(choice, (list, tuple)):
if len(choice) == 2:
choice = (choice[0], choice[1], choice[1])
elif len(choice) != 3:
raise ValueError(
"Choices can't handle a list/tuple of length {0}, only\
2 or 3".format(choice))
else:
choice = (choice, choice, choice)
self._choices.append((choice[0], choice[2]))
self._choice_dict[choice[1]] = choice[0]
def __getattr__(self, attname):
try:
return self._choice_dict[attname]
except KeyError:
raise AttributeError(attname)
def __iter__(self):
return iter(self._choices)
def __getitem__(self, index):
return self._choices[index]
def __delitem__(self, index):
del self._choices[index]
def __setitem__(self, index, value):
self._choices[index] = value
def __repr__(self):
return "{0}({1})".format(
self.__class__.__name__,
self._choices
)
def __len__(self):
return len(self._choices)
def __contains__(self, element):
return element in self._choice_dict.values()
| mit | -3,830,283,769,636,155,400 | 23.94382 | 79 | 0.530631 | false | 4.157303 | false | false | false |
NicWayand/xray | xarray/plot/utils.py | 1 | 6442 | import pkg_resources
import numpy as np
import pandas as pd
from ..core.pycompat import basestring
def _load_default_cmap(fname='default_colormap.csv'):
"""
Returns viridis color map
"""
from matplotlib.colors import LinearSegmentedColormap
# Not sure what the first arg here should be
f = pkg_resources.resource_stream(__name__, fname)
cm_data = pd.read_csv(f, header=None).values
return LinearSegmentedColormap.from_list('viridis', cm_data)
def _determine_extend(calc_data, vmin, vmax):
extend_min = calc_data.min() < vmin
extend_max = calc_data.max() > vmax
if extend_min and extend_max:
extend = 'both'
elif extend_min:
extend = 'min'
elif extend_max:
extend = 'max'
else:
extend = 'neither'
return extend
def _build_discrete_cmap(cmap, levels, extend, filled):
"""
Build a discrete colormap and normalization of the data.
"""
import matplotlib as mpl
if not filled:
# non-filled contour plots
extend = 'max'
if extend == 'both':
ext_n = 2
elif extend in ['min', 'max']:
ext_n = 1
else:
ext_n = 0
n_colors = len(levels) + ext_n - 1
pal = _color_palette(cmap, n_colors)
new_cmap, cnorm = mpl.colors.from_levels_and_colors(
levels, pal, extend=extend)
# copy the old cmap name, for easier testing
new_cmap.name = getattr(cmap, 'name', cmap)
return new_cmap, cnorm
def _color_palette(cmap, n_colors):
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
colors_i = np.linspace(0, 1., n_colors)
if isinstance(cmap, (list, tuple)):
# we have a list of colors
try:
# first try to turn it into a palette with seaborn
from seaborn.apionly import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except ImportError:
# if that fails, use matplotlib
# in this case, is there any difference between mpl and seaborn?
cmap = ListedColormap(cmap, N=n_colors)
pal = cmap(colors_i)
elif isinstance(cmap, basestring):
# we have some sort of named palette
try:
# first try to turn it into a palette with seaborn
from seaborn.apionly import color_palette
pal = color_palette(cmap, n_colors=n_colors)
except (ImportError, ValueError):
# ValueError is raised when seaborn doesn't like a colormap
# (e.g. jet). If that fails, use matplotlib
try:
# is this a matplotlib cmap?
cmap = plt.get_cmap(cmap)
except ValueError:
# or maybe we just got a single color as a string
cmap = ListedColormap([cmap], N=n_colors)
pal = cmap(colors_i)
else:
# cmap better be a LinearSegmentedColormap (e.g. viridis)
pal = cmap(colors_i)
return pal
def _determine_cmap_params(plot_data, vmin=None, vmax=None, cmap=None,
center=None, robust=False, extend=None,
levels=None, filled=True, cnorm=None):
"""
Use some heuristics to set good defaults for colorbar and range.
Adapted from Seaborn:
https://github.com/mwaskom/seaborn/blob/v0.6/seaborn/matrix.py#L158
Parameters
==========
plot_data: Numpy array
Doesn't handle xarray objects
Returns
=======
cmap_params : dict
Use depends on the type of the plotting function
"""
ROBUST_PERCENTILE = 2.0
import matplotlib as mpl
calc_data = np.ravel(plot_data[~pd.isnull(plot_data)])
# Setting center=False prevents a divergent cmap
possibly_divergent = center is not False
# Set center to 0 so math below makes sense but remember its state
center_is_none = False
if center is None:
center = 0
center_is_none = True
# Setting both vmin and vmax prevents a divergent cmap
if (vmin is not None) and (vmax is not None):
possibly_divergent = False
# vlim might be computed below
vlim = None
if vmin is None:
if robust:
vmin = np.percentile(calc_data, ROBUST_PERCENTILE)
else:
vmin = calc_data.min()
elif possibly_divergent:
vlim = abs(vmin - center)
if vmax is None:
if robust:
vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)
else:
vmax = calc_data.max()
elif possibly_divergent:
vlim = abs(vmax - center)
if possibly_divergent:
# kwargs not specific about divergent or not: infer defaults from data
divergent = ((vmin < 0) and (vmax > 0)) or not center_is_none
else:
divergent = False
# A divergent map should be symmetric around the center value
if divergent:
if vlim is None:
vlim = max(abs(vmin - center), abs(vmax - center))
vmin, vmax = -vlim, vlim
# Now add in the centering value and set the limits
vmin += center
vmax += center
# Choose default colormaps if not provided
if cmap is None:
if divergent:
cmap = "RdBu_r"
else:
cmap = "viridis"
# Allow viridis before matplotlib 1.5
if cmap == "viridis":
cmap = _load_default_cmap()
# Handle discrete levels
if levels is not None:
if isinstance(levels, int):
ticker = mpl.ticker.MaxNLocator(levels)
levels = ticker.tick_values(vmin, vmax)
vmin, vmax = levels[0], levels[-1]
if extend is None:
extend = _determine_extend(calc_data, vmin, vmax)
if levels is not None:
cmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled)
return dict(vmin=vmin, vmax=vmax, cmap=cmap, extend=extend,
levels=levels, norm=cnorm)
def _infer_xy_labels(darray, x, y):
"""
Determine x and y labels. For use in _plot2d
darray must be a 2 dimensional data array.
"""
if x is None and y is None:
if darray.ndim != 2:
raise ValueError('DataArray must be 2d')
y, x = darray.dims
elif x is None or y is None:
raise ValueError('cannot supply only one of x and y')
elif any(k not in darray.coords for k in (x, y)):
raise ValueError('x and y must be coordinate variables')
return x, y
| apache-2.0 | 5,193,884,461,763,979,000 | 28.686636 | 78 | 0.603695 | false | 3.789412 | false | false | false |
xozzo/pyfootball | setup.py | 1 | 1257 | from setuptools import setup, find_packages
import os
if os.path.exists('README.rst'):
readme_path = 'README.rst'
else:
readme_path = 'README.md'
setup(
name='pyfootball',
version='1.0.1',
description='A client library for the football-data.org REST API',
long_description=open(readme_path).read(),
url='https://github.com/xozzo/pyfootball',
author='Timothy Ng',
author_email='[email protected]',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5'
],
keywords='api wrapper client library football data',
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'venv']),
install_requires=['requests'],
test_suite='tests',
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev]
extras_require={
'dev': ['sphinx', 'sphinx-autobuild']
}
)
| mit | -1,856,567,441,525,745,200 | 27.568182 | 73 | 0.6428 | false | 3.797583 | false | false | false |
foursquare/pants | contrib/go/src/python/pants/contrib/go/tasks/go_test.py | 1 | 2117 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import filter
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.contrib.go.tasks.go_workspace_task import GoWorkspaceTask
class GoTest(GoWorkspaceTask):
"""Runs `go test` on Go packages.
To run a library's tests, GoTest only requires a Go workspace to be initialized
(see GoWorkspaceTask) with links to necessary source files. It does not require
GoCompile to first compile the library to be tested -- in fact, GoTest will ignore
any binaries in "$GOPATH/pkg/", because Go test files (which live in the package
they are testing) are ignored in normal compilation, so Go test must compile everything
from scratch.
"""
@classmethod
def register_options(cls, register):
super(GoTest, cls).register_options(register)
register('--build-and-test-flags', default='',
fingerprint=True,
help='Flags to pass in to `go test` tool.')
@classmethod
def supports_passthru_args(cls):
return True
def execute(self):
# Only executes the tests from the package specified by the target roots, so
# we don't run the tests for _all_ dependencies of said package.
targets = filter(self.is_local_src, self.context.target_roots)
for target in targets:
self.ensure_workspace(target)
self._go_test(target)
def _go_test(self, target):
args = (self.get_options().build_and_test_flags.split()
+ [target.import_path]
+ self.get_passthru_args())
result, go_cmd = self.go_dist.execute_go_cmd('test', gopath=self.get_gopath(target), args=args,
workunit_factory=self.context.new_workunit,
workunit_labels=[WorkUnitLabel.TEST])
if result != 0:
raise TaskError('{} failed with exit code {}'.format(go_cmd, result))
| apache-2.0 | -9,153,807,366,505,908,000 | 38.943396 | 99 | 0.683042 | false | 4.00189 | true | false | false |
bodylabs/blmath | blmath/geometry/transform/correspondence.py | 1 | 2095 | # FIXME -- move back to core
def apply_correspondence(correspondence_src, correspondence_dst, vertices):
"""
Apply a correspondence defined between two vertex sets to a new set.
Identifies a correspondence between `correspondence_src` and
`correspondence_dst` then applies that correspondence to `vertices`.
That is, `correspondence_src` is to `correspondence_dst` as `vertices` is
to [ return value ].
`correspondence_src` and `vertices` must have the same topology. The return
value will have the same topology as `correspondence_dst`. Arguments can
be passed as `chumpy` or `numpy` arrays.
The most common usecase here is establishing a relationship between an
alignment and a pointcloud or set of landmarks. The pointcloud or landmarks
can then be moved automatically as the alignment is adjusted (e.g. fit to a
different mesh, reposed, etc).
Args:
correspondence_src: The source vertices for the correspondence
correspondence_dst: The destination vertices for the correspondence
vertices: The vertices to map using the defined correspondence
Returns:
the mapped version of `vertices`
Example usage
-------------
>>> transformed_scan_vertices = apply_correspondence(
... correspondence_src=alignment.v,
... correspondence_dst=scan.v,
... vertices=reposed_alignment.v
... )
>>> transformed_scan = Mesh(v=transformed_scan_vertices, vc=scan.vc)
"""
import chumpy as ch
from bodylabs.mesh.landmarking.transformed_lm import TransformedCoeffs
from bodylabs.mesh.landmarking.transformed_lm import TransformedLms
ch_desired = any([
isinstance(correspondence_src, ch.Ch),
isinstance(correspondence_dst, ch.Ch),
isinstance(vertices, ch.Ch),
])
coeffs = TransformedCoeffs(
src_v=correspondence_src, dst_v=correspondence_dst)
transformed_vertices = TransformedLms(
transformed_coeffs=coeffs, src_v=vertices)
return transformed_vertices if ch_desired else transformed_vertices.r
| bsd-2-clause | -4,415,321,806,514,047,000 | 36.410714 | 79 | 0.705967 | false | 4.052224 | false | false | false |
lepinkainen/pyfibot | pyfibot/modules/module_geoip.py | 1 | 1389 | from __future__ import unicode_literals, print_function, division
import pygeoip
import os.path
import sys
import socket
try:
from modules.module_usertrack import get_table
user_track_available = True
except ImportError:
user_track_available = False
# http://dev.maxmind.com/geoip/legacy/geolite/
DATAFILE = os.path.join(sys.path[0], "GeoIP.dat")
# STANDARD = reload from disk
# MEMORY_CACHE = load to memory
# MMAP_CACHE = memory using mmap
gi4 = pygeoip.GeoIP(DATAFILE, pygeoip.MEMORY_CACHE)
def command_geoip(bot, user, channel, args):
"""Determine the user's country based on host or nick, if module_usertrack is used."""
if not args:
return bot.say(channel, "usage: .geoip HOST/NICK")
host = args
nick = None
if user_track_available:
table = get_table(bot, channel)
user = table.find_one(nick=args)
if user:
nick = user["nick"]
host = user["host"]
try:
country = gi4.country_name_by_name(host)
except socket.gaierror:
country = None
if country:
if nick:
return bot.say(channel, "%s (%s) is in %s" % (nick, host, country))
return bot.say(channel, "%s is in %s" % (host, country))
if nick:
return bot.say(channel, "Host not found for %s (%s)" % (nick, host))
return bot.say(channel, "Host not found for %s" % host)
| bsd-3-clause | -4,394,593,471,870,656,500 | 26.78 | 90 | 0.636429 | false | 3.412776 | false | false | false |
llou/panopticon | panopticon/core/database.py | 1 | 7145 | # database.py is part of Panopticon.
# Panopticon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Panopticon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Panopticon. If not, see <http://www.gnu.org/licenses/>.
from contextlib import contextmanager
from paramiko import RSAKey as pRSAKey, DSSKey
from sqlalchemy import create_engine, Column, DateTime, String, Integer, Text, Boolean
from sqlalchemy.orm import sessionmaker, relationship, backref
from sqlalchemy.sql import not_
from sqlalchemy.schema import ForeignKey
from sqlalchemy.pool import NullPool
from sqlalchemy.ext.declarative import declarative_base
from panopticon.core.util.database import key_value_property
Base = declarative_base()
class Value(Base):
__tablename__ = "values"
id = Column(Integer(), primary_key=True)
name = Column(String(1000))
value = Column(String(1000), nullable=True)
parent_id = Column(Integer, ForeignKey("values.id"), nullable=True)
values = relationship("Value", backref=backref('parent', remote_side=[id],
cascade="all"))
type = Column(String(20))
def __init__(self, name, _type, value="", parent_id=None):
self.name = name
self.type = _type
self.value = value
self.parent_id = parent_id
@property
def root(self):
return self.id == self.parent
class Service(Base):
__tablename__ = "services"
name = Column(String(50), primary_key=True)
class Computer(Base):
__tablename__ = "computers"
__table_args__ = {'sqlite_autoincrement':True}
name = Column(String(255), primary_key=True)
key_name = Column(String(100), ForeignKey('keys.name', onupdate="CASCADE"))
active = Column(Boolean(), default=True)
key = relationship("Key", backref=backref('computers'))
logs = relationship("Log", backref="computer", order_by="Log.time")
def __init__(self, name, key_name="", active=True):
self.name = name
self.active = active
self.key_name = key_name
class Log(Base):
__tablename__ = "logs"
id = Column('id', Integer, primary_key=True)
time = Column(DateTime())
level = Column(String(10))
message = Column(Text())
computer_name = Column(String(255), ForeignKey('computers.name',
ondelete="CASCADE", onupdate="CASCADE"), index=True)
service_name = Column(String(255), ForeignKey('services.name',
ondelete="CASCADE", onupdate="CASCADE"), index=True)
role_name = Column(String(255), index=True)
action_name = Column(String(255), index=True)
def __init__(self, time, level, message, computer_name="",
service_name="", role_name="", action_name=""):
self.time = time
self.level = level
self.message = message
self.computer_name = computer_name
class FileTrack(Base):
__tablename__ = "filetracks"
uid = Column("uid", String(32), primary_key=True)
_computer_name = Column("computer_name", String(255),ForeignKey('computers.name'))
_path = Column("path", Text())
modification_time = Column("modification_time", DateTime())
md5 = Column("md5", String(32))
def __init__(self, computer_name, path, modification_time, md5=""):
self.computer_name = computer_name
self.path = path
self.modification_time = modification_time
self.md5 = md5
self.update_uid()
@property
def computer_name(self):
return self._computer_name
@computer_name.setter
def computer_name(self, value):
self._computer_name = value
self.update_uid()
@property
def path(self):
return self._path
@path.setter
def path(self, value):
self._path = value
self.update_uid()
def update_uid(self):
if self.computer_name and self.path:
self.uid = "%s:%s" % (self.computer_name, self.path)
else:
self.uid = ""
class Key(Base):
__tablename__ = "keys"
name = Column(String(100), primary_key=True)
algorithm = Column(String(20))
v1 = Column(String(2048))
v2 = Column(String(2048))
v3 = Column(String(2048))
v4 = Column(String(2048))
key_class = None
key_vals = []
__mapper_args__ = {'polymorphic_on' : algorithm}
@classmethod
def build_from_paramiko_key(cls, name, p_key):
if isinstance(p_key, pRSAKey):
return RSAKey(name, p_key.e, p_key.n)
elif isinstance(p_key, DSSKey):
return DSAKey(name, p_key.p, p_key.q, p_key.g, p_key.y)
else:
raise Exception("Not valid key")
def __init__(self, name, algorithm, v1, v2, v3, v4):
self.name = name
self.algorithm = algorithm
self.v1 = v1
self.v2 = v2
self.v3 = v3
self.v4 = v4
def get_paramiko_key(self):
vals = [ getattr(self, x) for x in self.key_vals ]
return self.key_class(vals=vals)
class RSAKey(Key):
__mapper_args__ = {'polymorphic_identity':'rsa'}
key_class = pRSAKey
key_vals = [ 'e', 'n' ]
def __init__(self, name, e, n):
self.name = name
self.algorithm = "rsa"
self.e = e
self.n = n
e = key_value_property("v1")
n = key_value_property("v2")
class DSAKey(Key):
__mapper_args__ = {'polymorphic_identity':'dsa'}
key_class = DSSKey
key_vals = [ 'p', 'q', 'g', 'y' ]
def __init__(self, name, p, q, g, y):
self.name = name
self.algorithm = "dsa"
self.p = p
self.q = q
self.g = g
self.y = y
p = key_value_property("v1")
q = key_value_property("v2")
g = key_value_property("v3")
y = key_value_property("v4")
class PanopticonDB(object):
def __init__(self, panopticon, engine=None):
self.panopticon = panopticon
self.engine = engine if engine is not None else create_engine(panopticon.db_url, poolclass=NullPool)
Base.metadata.create_all(self.engine)
self.Session = sessionmaker(bind=self.engine)
self.sync()
@contextmanager
def get_session(self):
session = self.Session()
yield session
session.commit()
session.close()
def purge(self,sure=False):
if sure:
Base.metadata.drop_all(self.engine)
Base.metadata.create_all(self.engine)
def sync(self):
computer_names = [ x[0] for x in self.panopticon.computers ]
with self.get_session() as session:
session.execute(Computer.__table__.update().where(Computer.name.in_(computer_names)).values(active=True))
session.execute(Computer.__table__.update().where(not_(Computer.name.in_(computer_names))).values(active=True))
| gpl-3.0 | 3,336,946,915,647,172,000 | 31.775229 | 123 | 0.626312 | false | 3.581454 | false | false | false |
i-namekawa/TopSideMonitor | plotting.py | 1 | 37323 | import os, sys, time
from glob import glob
import cv2
from pylab import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
matplotlib.rcParams['figure.facecolor'] = 'w'
from scipy.signal import argrelextrema
import scipy.stats as stats
import scipy.io as sio
from scipy import signal
from xlwt import Workbook
# specify these in mm to match your behavior chamber.
CHMAMBER_LENGTH=235
WATER_HIGHT=40
# quick plot should also show xy_within and location_one_third etc
# summary PDF: handle exception when a pickle file missing some fish in other pickle file
## these three taken from http://stackoverflow.com/a/18420730/566035
def strided_sliding_std_dev(data, radius=5):
windowed = rolling_window(data, (2*radius, 2*radius))
shape = windowed.shape
windowed = windowed.reshape(shape[0], shape[1], -1)
return windowed.std(axis=-1)
def rolling_window(a, window):
"""Takes a numpy array *a* and a sequence of (or single) *window* lengths
and returns a view of *a* that represents a moving window."""
if not hasattr(window, '__iter__'):
return rolling_window_lastaxis(a, window)
for i, win in enumerate(window):
if win > 1:
a = a.swapaxes(i, -1)
a = rolling_window_lastaxis(a, win)
a = a.swapaxes(-2, i)
return a
def rolling_window_lastaxis(a, window):
"""Directly taken from Erik Rigtorp's post to numpy-discussion.
<http://www.mail-archive.com/[email protected]/msg29450.html>"""
if window < 1:
raise ValueError, "`window` must be at least 1."
if window > a.shape[-1]:
raise ValueError, "`window` is too long."
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
## stealing ends here... //
def filterheadxy(headx,heady,thrs_denom=10):
b, a = signal.butter(8, 0.125)
dhy = np.abs(np.hstack((0, np.diff(heady,1))))
thrs = np.nanstd(dhy)/thrs_denom
ind2remove = dhy>thrs
headx[ind2remove] = np.nan
heady[ind2remove] = np.nan
headx = interp_nan(headx)
heady = interp_nan(heady)
headx = signal.filtfilt(b, a, headx, padlen=150)
heady = signal.filtfilt(b, a, heady, padlen=150)
return headx,heady
def smoothRad(theta, thrs=np.pi/4*3):
jumps = (np.diff(theta) > thrs).nonzero()[0]
print 'jumps.size', jumps.size
while jumps.size:
# print '%d/%d' % (jumps[0], theta.size)
theta[jumps+1] -= np.pi
jumps = (np.diff(theta) > thrs).nonzero()[0]
return theta
def datadct2array(data, key1, key2):
# put these in a MATLAB CELL
trialN = len(data[key1][key2])
matchedUSnameP = np.zeros((trialN,), dtype=np.object)
fnameP = np.zeros((trialN,), dtype=np.object)
# others to append to a list
eventsP = []
speed3DP = []
movingSTDP = []
d2inflowP = []
xP, yP, zP = [], [], []
XP, YP, ZP = [], [], []
ringpixelsP = []
peaks_withinP = []
swimdir_withinP = []
xy_withinP = []
location_one_thirdP = []
dtheta_shapeP = []
dtheta_velP = []
turns_shapeP = []
turns_velP = []
for n, dct in enumerate(data[key1][key2]):
# MATLAB CELL
matchedUSnameP[n] = dct['matchedUSname']
fnameP[n] = dct['fname']
# 2D array
eventsP.append([ele if type(ele) is not list else ele[0] for ele in dct['events']])
speed3DP.append(dct['speed3D'])
movingSTDP.append(dct['movingSTD'])
d2inflowP.append(dct['d2inflow'])
xP.append(dct['x'])
yP.append(dct['y'])
zP.append(dct['z'])
XP.append(dct['X'])
YP.append(dct['Y'])
ZP.append(dct['Z'])
ringpixelsP.append(dct['ringpixels'])
peaks_withinP.append(dct['peaks_within'])
swimdir_withinP.append(dct['swimdir_within'])
xy_withinP.append(dct['xy_within'])
location_one_thirdP.append(dct['location_one_third'])
dtheta_shapeP.append(dct['dtheta_shape'])
dtheta_velP.append(dct['dtheta_vel'])
turns_shapeP.append(dct['turns_shape'])
turns_velP.append(dct['turns_vel'])
TVroi = np.array(dct['TVroi'])
SVroi = np.array(dct['SVroi'])
return matchedUSnameP, fnameP, np.array(eventsP), np.array(speed3DP), np.array(d2inflowP), \
np.array(xP), np.array(yP), np.array(zP), np.array(XP), np.array(YP), np.array(ZP), \
np.array(ringpixelsP), np.array(peaks_withinP), np.array(swimdir_withinP), \
np.array(xy_withinP), np.array(dtheta_shapeP), np.array(dtheta_velP), \
np.array(turns_shapeP), np.array(turns_velP), TVroi, SVroi
def pickle2mat(fp, data=None):
# fp : full path to pickle file
# data : option to provide data to skip np.load(fp)
if not data:
data = np.load(fp)
for key1 in data.keys():
for key2 in data[key1].keys():
matchedUSname, fname, events, speed3D, d2inflow, x, y, z, X, Y, Z, \
ringpixels, peaks_within, swimdir_within, xy_within, dtheta_shape, dtheta_vel, \
turns_shape, turns_vel, TVroi, SVroi = datadct2array(data, key1, key2)
datadict = {
'matchedUSname' : matchedUSname,
'fname' : fname,
'events' : events,
'speed3D' : speed3D,
'd2inflow' : d2inflow,
'x' : x,
'y' : y,
'z' : z,
'X' : X,
'Y' : Y,
'Z' : Z,
'ringpixels' : ringpixels,
'peaks_within' : peaks_within,
'swimdir_within' : swimdir_within,
'xy_within' : xy_within,
'dtheta_shape' : dtheta_shape,
'dtheta_vel' : dtheta_vel,
'turns_shape' : turns_shape,
'turns_vel' : turns_vel,
'TVroi' : TVroi,
'SVroi' : SVroi,
}
outfp = '%s_%s_%s.mat' % (fp[:-7],key1,key2)
sio.savemat(outfp, datadict, oned_as='row', do_compression=True)
def interp_nan(x):
'''
Replace nan by interporation
http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
'''
ok = -np.isnan(x)
if (ok == False).all():
return x
else:
xp = ok.ravel().nonzero()[0]
fp = x[ok]
_x = np.isnan(x).ravel().nonzero()[0]
x[-ok] = np.interp(_x, xp, fp)
return x
def polytest(x,y,rx,ry,rw,rh,rang):
points=cv2.ellipse2Poly(
(rx,ry),
axes=(rw/2,rh/2),
angle=rang,
arcStart=0,
arcEnd=360,
delta=3
)
return cv2.pointPolygonTest(np.array(points), (x,y), measureDist=1)
def depthCorrection(z,x,TVx1,TVx2,SVy1,SVy2,SVy3):
z0 = z - SVy1
x0 = x - TVx1
mid = (SVy2-SVy1)/2
adj = (z0 - mid) / (SVy2-SVy1) * (SVy2-SVy3) * (1-(x0)/float(TVx2-TVx1))
return z0 + adj + SVy1 # back to abs coord
def putNp2xls(array, ws):
for r, row in enumerate(array):
for c, val in enumerate(row):
ws.write(r, c, val)
def drawLines(mi, ma, events, fps=30.0):
CS, USs, preRange = events
plot([CS-preRange, CS-preRange], [mi,ma], '--c') # 2 min prior odor
plot([CS , CS ], [mi,ma], '--g', linewidth=2) # CS onset
if USs:
if len(USs) > 3:
colors = 'r' * len(USs)
else:
colors = [_ for _ in ['r','b','c'][:len(USs)]]
for c,us in zip(colors, USs):
plot([us, us],[mi,ma], linestyle='--', color=c, linewidth=2) # US onset
plot([USs[0]+preRange/2,USs[0]+preRange/2], [mi,ma], linestyle='--', color=c, linewidth=2) # end of US window
xtck = np.arange(0, max(CS+preRange, max(USs)), 0.5*60*fps) # every 0.5 min tick
else:
xtck = np.arange(0, CS+preRange, 0.5*60*fps) # every 0.5 min tick
xticks(xtck, xtck/fps/60)
gca().xaxis.set_minor_locator(MultipleLocator(5*fps)) # 5 s minor ticks
def approachevents(x,y,z, ringpolyTVArray, ringpolySVArray, fishlength=134, thrs=None):
'''
fishlength: some old scrits may call this with fishlength
thrs: multitrack GUI provides this by ringAppearochLevel spin control.
can be an numpy array (to track water level change etc)
'''
smoothedz = np.convolve(np.hanning(10)/np.hanning(10).sum(), z, 'same')
peaks = argrelextrema(smoothedz, np.less)[0] # less because 0 is top in image.
# now filter peaks by height.
ringLevel = ringpolySVArray[:,1]
if thrs is None:
thrs = ringLevel+fishlength/2
if type(thrs) == int: # can be numpy array or int
thrs = ringLevel.mean() + thrs
peaks = peaks[ z[peaks] < thrs ]
else: # numpy array should be ready to use
peaks = peaks[ z[peaks] < thrs[peaks] ]
# now filter out by TVringCenter
peaks_within = get_withinring(ringpolyTVArray, peaks, x, y)
return smoothedz, peaks_within
def get_withinring(ringpolyTVArray, timepoints, x, y):
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
rang = ringpolyTVArray[:,4].astype(np.int)
# poly test
peaks_within = []
for p in timepoints:
points=cv2.ellipse2Poly(
(rx[p],ry[p]),
axes=(rw[p]/2,rh[p]/2),
angle=rang[p],
arcStart=0,
arcEnd=360,
delta=3
)
inout = cv2.pointPolygonTest(np.array(points), (x[p],y[p]), measureDist=1)
if inout > 0:
peaks_within.append(p)
return peaks_within
def location_ring(x,y,ringpolyTVArray):
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
d2ringcenter = np.sqrt((x-rx)**2 + (y-ry)**2)
# filter by radius 20% buffer in case the ring moves around
indices = (d2ringcenter < 1.2*max(rw.max(), rh.max())).nonzero()[0]
xy_within = get_withinring(ringpolyTVArray, indices, x, y)
return xy_within
def swimdir_analysis(x,y,z,ringpolyTVArray,ringpolySVArray,TVx1,TVy1,TVx2,TVy2,fps=30.0):
# smoothing
# z = np.convolve(np.hanning(16)/np.hanning(16).sum(), z, 'same')
# two cameras have different zoom settings. So, distance per pixel is different. But, for
# swim direction, it does not matter how much x,y are compressed relative to z.
# ring z level from SV
rz = ringpolySVArray[:,1].astype(np.int)
# ring all other params from TV
rx = ringpolyTVArray[:,0].astype(np.int)
ry = ringpolyTVArray[:,1].astype(np.int)
rw = ringpolyTVArray[:,2].astype(np.int)
rh = ringpolyTVArray[:,3].astype(np.int)
rang = ringpolyTVArray[:,4].astype(np.int)
speed3D = np.sqrt( np.diff(x)**2 + np.diff(y)**2 + np.diff(z)**2 )
speed3D = np.hstack(([0], speed3D))
# line in 3D http://tutorial.math.lamar.edu/Classes/CalcIII/EqnsOfLines.aspx
# x-x0 y-y0 z-z0
# ---- = ---- = ----
# a b c
# solve them for z = rz. x0,y0,z0 are tvx, tvy, svy
# x = (a * (rz-z)) / c + x0
dt = 3 # define slope as diff between current and dt frame before
a = np.hstack( (np.ones(dt), x[dt:]-x[:-dt]) )
b = np.hstack( (np.ones(dt), y[dt:]-y[:-dt]) )
c = np.hstack( (np.ones(dt), z[dt:]-z[:-dt]) )
c[c==0] = np.nan # avoid zero division
water_x = (a * (rz-z) / c) + x
water_y = (b * (rz-z) / c) + y
upwards = c<-2/30.0*fps # not accurate when c is small or negative
xok = (TVx1 < water_x) & (water_x < TVx2)
yok = (TVy1 < water_y) & (water_y < TVy2)
filtered = upwards & xok & yok# & -np.isinf(water_x) & -np.isinf(water_y)
water_x[-filtered] = np.nan
water_y[-filtered] = np.nan
# figure()
# ax = subplot(111)
# ax.imshow(npData['TVbg'], cmap=cm.gray) # clip out from TVx1,TVy1
# ax.plot(x-TVx1, y-TVy1, 'c')
# ax.plot(water_x-TVx1, water_y-TVy1, 'r.')
# xlim([0, TVx2-TVx1]); ylim([TVy2-TVy1, 0])
# draw(); show()
SwimDir = []
for n in filtered.nonzero()[0]:
inout = polytest(water_x[n],water_y[n],rx[n],ry[n],rw[n],rh[n],rang[n])
SwimDir.append((n, inout, speed3D[n])) # inout>0 are inside
return SwimDir, water_x, water_y
def plot_eachTr(events, x, y, z, inflowpos, ringpixels, peaks_within, swimdir_within=None,
pp=None, _title=None, fps=30.0, inmm=False):
CS, USs, preRange = events
# preRange = 3600 2 min prior and 1 min after CS. +900 for 0.5 min
if USs:
xmin, xmax = CS-preRange-10*fps, USs[0]+preRange/2+10*fps
else:
xmin, xmax = CS-preRange-10*fps, CS+preRange/2+(23+10)*fps
fig = figure(figsize=(12,8), facecolor='w')
subplot(511) # Swimming speed
speed3D = np.sqrt( np.diff(x)**2 + np.diff(y)**2 + np.diff(z)**2 )
drawLines(np.nanmin(speed3D), np.nanmax(speed3D), events, fps) # go behind
plot(speed3D)
movingSTD = np.append( np.zeros(fps*10), strided_sliding_std_dev(speed3D, fps*10) )
plot(movingSTD, linewidth=2)
plot(np.ones_like(speed3D) * speed3D.std()*6, '-.', color='gray')
ylim([-5, speed3D[xmin:xmax].max()])
xlim([xmin,xmax]); title(_title)
if inmm:
ylabel('Speed 3D (mm),\n6SD thr');
else:
ylabel('Speed 3D, 6SD thr');
ax = subplot(512) # z level
drawLines(z.min(), z.max(), events)
plot(z, 'b')
pkx = peaks_within.nonzero()[0]
if inmm:
plot(pkx, peaks_within[pkx]*z[xmin:xmax].max()*0.97, 'mo')
if swimdir_within is not None:
___x = swimdir_within.nonzero()[0]
plot(___x, swimdir_within[___x]*z[xmin:xmax].max()*0.96, 'g+')
ylim([z[xmin:xmax].min()*0.95, z[xmin:xmax].max()])
xlim([xmin,xmax]); ylabel('Z (mm)')
else:
plot(pkx, peaks_within[pkx]*z[xmin:xmax].min()*0.97, 'mo')
if swimdir_within is not None:
___x = swimdir_within.nonzero()[0]
plot(___x, swimdir_within[___x]*z[xmin:xmax].min()*0.96, 'g+')
ylim([z[xmin:xmax].min()*0.95, z[xmin:xmax].max()])
ax.invert_yaxis(); xlim([xmin,xmax]); ylabel('z')
subplot(513) # x
drawLines(x.min(), x.max(), events)
plot(x, 'b')
plot(y, 'g')
xlim([xmin,xmax]); ylabel('x,y')
subplot(514) # Distance to the inflow tube
xin, yin, zin = inflowpos
d2inflow = np.sqrt((x-xin) ** 2 + (y-yin) ** 2 + (z-zin) ** 2 )
drawLines(d2inflow.min(), d2inflow.max(), events)
plot(d2inflow)
ylim([d2inflow[xmin:xmax].min(), d2inflow[xmin:xmax].max()])
xlim([xmin,xmax]); ylabel('distance to\ninflow tube')
subplot(515) # ringpixels: it seems i never considered TV x,y for this
rpmax, rpmin = np.nanmax(ringpixels[xmin:xmax]), np.nanmin(ringpixels[xmin:xmax])
drawLines(rpmin, rpmax, events)
plot(ringpixels)
plot(pkx, peaks_within[pkx]*rpmax*1.06, 'mo')
if swimdir_within is not None:
plot(___x, swimdir_within[___x]*rpmax*1.15, 'g+')
ylim([-100, rpmax*1.2])
xlim([xmin,xmax]); ylabel('ringpixels')
tight_layout()
if pp:
fig.savefig(pp, format='pdf')
rng = np.arange(CS-preRange, CS+preRange, dtype=np.int)
return speed3D[rng], movingSTD[rng], d2inflow[rng], ringpixels[rng]
def plot_turnrates(events, dthetasum_shape,dthetasum_vel,turns_shape,turns_vel,
pp=None, _title=None, thrs=np.pi/4*(133.33333333333334/120), fps=30.0):
CS, USs, preRange = events
# preRange = 3600 2 min prior and 1 min after CS. +900 for 0.5 min
if USs:
xmin, xmax = CS-preRange-10*fps, USs[0]+preRange/2+10*fps
else:
xmin, xmax = CS-preRange-10*fps, CS+preRange/2+(23+10)*fps
fig = figure(figsize=(12,8), facecolor='w')
subplot(211)
drawLines(dthetasum_shape.min(), dthetasum_shape.max(), events)
plot(np.ones_like(dthetasum_shape)*thrs,'gray',linestyle='--')
plot(-np.ones_like(dthetasum_shape)*thrs,'gray',linestyle='--')
plot(dthetasum_shape)
dmax = dthetasum_shape[xmin:xmax].max()
plot(turns_shape, (0.5+dmax)*np.ones_like(turns_shape), 'o')
temp = np.zeros_like(dthetasum_shape)
temp[turns_shape] = 1
shape_cumsum = np.cumsum(temp)
shape_cumsum -= shape_cumsum[xmin]
plot( shape_cumsum / shape_cumsum[xmax] * (dmax-dthetasum_shape.min()) + dthetasum_shape.min())
xlim([xmin,xmax]); ylabel('Shape based'); title('Orientation change per 4 frames: ' + _title)
ylim([dthetasum_shape[xmin:xmax].min()-1, dmax+1])
subplot(212)
drawLines(dthetasum_vel.min(), dthetasum_vel.max(), events)
plot(np.ones_like(dthetasum_vel)*thrs,'gray',linestyle='--')
plot(-np.ones_like(dthetasum_vel)*thrs,'gray',linestyle='--')
plot(dthetasum_vel)
dmax = dthetasum_vel[xmin:xmax].max()
plot(turns_vel, (0.5+dmax)*np.ones_like(turns_vel), 'o')
temp = np.zeros_like(dthetasum_vel)
temp[turns_vel] = 1
vel_cumsum = np.cumsum(temp)
vel_cumsum -= vel_cumsum[xmin]
plot( vel_cumsum / vel_cumsum[xmax] * (dmax-dthetasum_shape.min()) + dthetasum_shape.min())
ylim([dthetasum_vel[xmin:xmax].min()-1, dmax+1])
xlim([xmin,xmax]); ylabel('Velocity based')
tight_layout()
if pp:
fig.savefig(pp, format='pdf')
def trajectory(x, y, z, rng, ax, _xlim=[0,640], _ylim=[480,480+300], _zlim=[150,340],
color='b', fps=30.0, ringpolygon=None):
ax.plot(x[rng],y[rng],z[rng], color=color)
ax.view_init(azim=-75, elev=-180+15)
if ringpolygon:
rx, ry, rz = ringpolygon
ax.plot(rx, ry, rz, color='gray')
ax.set_xlim(_xlim[0],_xlim[1])
ax.set_ylim(_ylim[0],_ylim[1])
ax.set_zlim(_zlim[0],_zlim[1])
title(("(%2.1f min to %2.1f min)" % (rng[0]/fps/60.0,(rng[-1]+1)/60.0/fps)))
draw()
def plotTrajectory(x, y, z, events, _xlim=None, _ylim=None, _zlim=None, fps=30.0, pp=None, ringpolygon=None):
CS, USs, preRange = events
rng1 = np.arange(CS-preRange, CS-preRange/2, dtype=int)
rng2 = np.arange(CS-preRange/2, CS, dtype=int)
if USs:
rng3 = np.arange(CS, min(USs), dtype=int)
rng4 = np.arange(min(USs), min(USs)+preRange/2, dtype=int)
combined = np.hstack((rng1,rng2,rng3,rng4))
else:
combined = np.hstack((rng1,rng2))
if _xlim is None:
_xlim = map( int, ( x[combined].min(), x[combined].max() ) )
if _ylim is None:
_ylim = map( int, ( y[combined].min(), y[combined].max() ) )
if _zlim is None:
_zlim = map( int, ( z[combined].min(), z[combined].max() ) )
if ringpolygon:
_zlim[0] = min( _zlim[0], int(ringpolygon[2][0]) )
fig3D = plt.figure(figsize=(12,8), facecolor='w')
ax = fig3D.add_subplot(221, projection='3d'); trajectory(x,y,z,rng1,ax,_xlim,_ylim,_zlim,'c',fps,ringpolygon)
ax = fig3D.add_subplot(222, projection='3d'); trajectory(x,y,z,rng2,ax,_xlim,_ylim,_zlim,'c',fps,ringpolygon)
if USs:
ax = fig3D.add_subplot(223, projection='3d'); trajectory(x,y,z,rng3,ax,_xlim,_ylim,_zlim,'g',fps,ringpolygon)
ax = fig3D.add_subplot(224, projection='3d'); trajectory(x,y,z,rng4,ax,_xlim,_ylim,_zlim,'r',fps,ringpolygon)
tight_layout()
if pp:
fig3D.savefig(pp, format='pdf')
def add2DataAndPlot(fp, fish, data, createPDF):
if createPDF:
pp = PdfPages(fp[:-7]+'_'+fish+'.pdf')
else:
pp = None
params = np.load(fp)
fname = os.path.basename(fp).split('.')[0] + '.avi'
dirname = os.path.dirname(fp)
preRange = params[(fname, 'mog')]['preRange']
fps = params[(fname, 'mog')]['fps']
TVx1 = params[(fname, fish)]['TVx1']
TVy1 = params[(fname, fish)]['TVy1']
TVx2 = params[(fname, fish)]['TVx2']
TVy2 = params[(fname, fish)]['TVy2']
SVx1 = params[(fname, fish)]['SVx1']
SVx2 = params[(fname, fish)]['SVx2']
SVx3 = params[(fname, fish)]['SVx3']
SVy1 = params[(fname, fish)]['SVy1']
SVy2 = params[(fname, fish)]['SVy2']
SVy3 = params[(fname, fish)]['SVy3']
ringAppearochLevel = params[(fname, fish)]['ringAppearochLevel']
_npz = os.path.join(dirname, os.path.join('%s_%s.npz' % (fname[:-4], fish)))
# if os.path.exists(_npz):
npData = np.load(_npz)
tvx = npData['TVtracking'][:,0] # x with nan
tvy = npData['TVtracking'][:,1] # y
headx = npData['TVtracking'][:,3] # headx
heady = npData['TVtracking'][:,4] # heady
svy = npData['SVtracking'][:,1] # z
InflowTubeTVArray = npData['InflowTubeTVArray']
InflowTubeSVArray = npData['InflowTubeSVArray']
inflowpos = InflowTubeTVArray[:,0], InflowTubeTVArray[:,1], InflowTubeSVArray[:,1]
ringpixels = npData['ringpixel']
ringpolyTVArray = npData['ringpolyTVArray']
ringpolySVArray = npData['ringpolySVArray']
TVbg = npData['TVbg']
print os.path.basename(_npz), 'loaded.'
x,y,z = map(interp_nan, [tvx,tvy,svy])
# z level correction by depth (x)
z = depthCorrection(z,x,TVx1,TVx2,SVy1,SVy2,SVy3)
smoothedz, peaks_within = approachevents(x, y, z,
ringpolyTVArray, ringpolySVArray, thrs=ringAppearochLevel)
# convert to numpy array from list
temp = np.zeros_like(x)
temp[peaks_within] = 1
peaks_within = temp
# normalize to mm
longaxis = float(max((TVx2-TVx1), (TVy2-TVy1))) # before rotation H is applied they are orthogonal
waterlevel = float(SVy2-SVy1)
X = (x-TVx1) / longaxis * CHMAMBER_LENGTH
Y = (TVy2-y) / longaxis * CHMAMBER_LENGTH
Z = (SVy2-z) / waterlevel * WATER_HIGHT # bottom of chamber = 0, higher more positive
inflowpos_mm = ((inflowpos[0]-TVx1) / longaxis * CHMAMBER_LENGTH,
(TVy2-inflowpos[1]) / longaxis * CHMAMBER_LENGTH,
(SVy2-inflowpos[2]) / waterlevel * WATER_HIGHT )
# do the swim direction analysis here
swimdir, water_x, water_y = swimdir_analysis(x,y,z,
ringpolyTVArray,ringpolySVArray,TVx1,TVy1,TVx2,TVy2,fps)
# all of swimdir are within ROI (frame#, inout, speed) but not necessary within ring
sdir = np.array(swimdir)
withinRing = sdir[:,1]>0 # inout>0 are inside ring
temp = np.zeros_like(x)
temp[ sdir[withinRing,0].astype(int) ] = 1
swimdir_within = temp
# location_ring
xy_within = location_ring(x,y, ringpolyTVArray)
temp = np.zeros_like(x)
temp[xy_within] = 1
xy_within = temp
# location_one_third
if (TVx2-TVx1) > (TVy2-TVy1):
if np.abs(np.arange(TVx1, longaxis+TVx1, longaxis/3) + longaxis/6 - inflowpos[0].mean()).argmin() == 2:
location_one_third = x-TVx1 > longaxis/3*2
else:
location_one_third = x < longaxis/3
else:
if np.abs(np.arange(TVy1, longaxis+TVy1, longaxis/3) + longaxis/6 - inflowpos[1].mean()).argmin() == 2:
location_one_third = y-TVy1 > longaxis/3*2
else:
location_one_third = y < longaxis/3
# turn rate analysis (shape based)
heady, headx = map(interp_nan, [heady, headx])
headx, heady = filterheadxy(headx, heady)
dy = heady - y
dx = headx - x
theta_shape = np.arctan2(dy, dx)
# velocity based
cx, cy = filterheadxy(x.copy(), y.copy()) # centroid x,y
vx = np.append(0, np.diff(cx))
vy = np.append(0, np.diff(cy))
theta_vel = np.arctan2(vy, vx)
# prepare ringpolygon for trajectory plot
rx, ry, rw, rh, rang = ringpolyTVArray.mean(axis=0).astype(int) # use mm ver above
rz = ringpolySVArray.mean(axis=0)[1].astype(int)
RX = (rx-TVx1) / longaxis * CHMAMBER_LENGTH
RY = (TVy2-ry) / longaxis * CHMAMBER_LENGTH
RW = rw / longaxis * CHMAMBER_LENGTH / 2
RH = rh / longaxis * CHMAMBER_LENGTH / 2
RZ = (SVy2-rz) / waterlevel * WATER_HIGHT
points = cv2.ellipse2Poly(
(RX.astype(int),RY.astype(int)),
axes=(RW.astype(int),RH.astype(int)),
angle=rang,
arcStart=0,
arcEnd=360,
delta=3
)
ringpolygon = [points[:,0], points[:,1], np.ones(points.shape[0]) * RZ]
eventTypeKeys = params[(fname, fish)]['EventData'].keys()
CSs = [_ for _ in eventTypeKeys if _.startswith('CS')]
USs = [_ for _ in eventTypeKeys if _.startswith('US')]
# print CSs, USs
# events
for CS in CSs:
CS_Timings = params[(fname, fish)]['EventData'][CS]
CS_Timings.sort()
# initialize when needed
if CS not in data[fish].keys():
data[fish][CS] = []
# now look around for US after it within preRange
for t in CS_Timings:
tr = len(data[fish][CS])+1
rng = np.arange(t-preRange, t+preRange, dtype=np.int)
matchedUSname = None
for us in USs:
us_Timings = params[(fname, fish)]['EventData'][us]
matched = [_ for _ in us_Timings if t-preRange < _ < t+preRange]
if matched:
events = [t, matched, preRange] # ex. CS+
matchedUSname = us
break
else:
continue
_title = '(%s, %s) trial#%02d %s (%s)' % (CS, matchedUSname[0], tr, fname, fish)
print _title, events
_speed3D, _movingSTD, _d2inflow, _ringpixels = plot_eachTr(events, X, Y, Z, inflowpos_mm,
ringpixels, peaks_within, swimdir_within, pp, _title, fps, inmm=True)
# 3d trajectory
_xlim = (0, CHMAMBER_LENGTH)
_zlim = (RZ.max(),0)
plotTrajectory(X, Y, Z, events, _xlim=_xlim, _zlim=_zlim, fps=fps, pp=pp, ringpolygon=ringpolygon)
# turn rate analysis
# shape based
theta_shape[rng] = smoothRad(theta_shape[rng].copy(), thrs=np.pi/2)
dtheta_shape = np.append(0, np.diff(theta_shape)) # full length
kernel = np.ones(4)
dthetasum_shape = np.convolve(dtheta_shape, kernel, 'same')
# 4 frames = 1000/30.0*4 = 133.3 ms
thrs = (np.pi / 2) * (133.33333333333334/120) # Braubach et al 2009 90 degree in 120 ms
peaks_shape = argrelextrema(abs(dthetasum_shape), np.greater)[0]
turns_shape = peaks_shape[ (abs(dthetasum_shape[peaks_shape]) > thrs).nonzero()[0] ]
# velocity based
theta_vel[rng] = smoothRad(theta_vel[rng].copy(), thrs=np.pi/2)
dtheta_vel = np.append(0, np.diff(theta_vel))
dthetasum_vel = np.convolve(dtheta_vel, kernel, 'same')
peaks_vel = argrelextrema(abs(dthetasum_vel), np.greater)[0]
turns_vel = peaks_vel[ (abs(dthetasum_vel[peaks_vel]) > thrs).nonzero()[0] ]
plot_turnrates(events, dthetasum_shape, dthetasum_vel, turns_shape, turns_vel, pp, _title, fps=fps)
_temp = np.zeros_like(dtheta_shape)
_temp[turns_shape] = 1
turns_shape_array = _temp
_temp = np.zeros_like(dtheta_vel)
_temp[turns_vel] = 1
turns_vel_array = _temp
# plot swim direction analysis
fig = figure(figsize=(12,8), facecolor='w')
ax1 = subplot(211)
ax1.imshow(TVbg, cmap=cm.gray) # TVbg is clip out of ROI
ax1.plot(x[rng]-TVx1, y[rng]-TVy1, 'gray')
ax1.plot(water_x[t-preRange:t]-TVx1, water_y[t-preRange:t]-TVy1, 'c.')
if matched:
ax1.plot( water_x[t:matched[0]]-TVx1,
water_y[t:matched[0]]-TVy1, 'g.')
ax1.plot( water_x[matched[0]:matched[0]+preRange/4]-TVx1,
water_y[matched[0]:matched[0]+preRange/4]-TVy1, 'r.')
xlim([0, TVx2-TVx1]); ylim([TVy2-TVy1, 0])
title(_title)
ax2 = subplot(212)
ax2.plot( swimdir_within )
ax2.plot( peaks_within*1.15-0.1, 'mo' )
if matched:
xmin, xmax = t-preRange-10*fps, matched[0]+preRange/4
else:
xmin, xmax = t-preRange-10*fps, t+preRange/2+10*fps
gzcs = np.cumsum(swimdir_within)
gzcs -= gzcs[xmin]
ax2.plot( gzcs/gzcs[xmax] )
drawLines(0,1.2, events)
ylim([0,1.2])
xlim([xmin, xmax])
ylabel('|: SwimDirection\no: approach events')
data[fish][CS].append( {
'fname' : fname,
'x': x[rng], 'y': y[rng], 'z': z[rng],
'X': X[rng], 'Y': Y[rng], 'Z': Z[rng], # calibrate space (mm)
'speed3D': _speed3D, # calibrate space (mm)
'movingSTD' : _movingSTD, # calibrate space (mm)
'd2inflow': _d2inflow, # calibrate space (mm)
'ringpixels': _ringpixels,
'peaks_within': peaks_within[rng],
'xy_within': xy_within[rng],
'location_one_third' : location_one_third[rng],
'swimdir_within' : swimdir_within[rng],
'dtheta_shape': dtheta_shape[rng],
'dtheta_vel': dtheta_vel[rng],
'turns_shape': turns_shape_array[rng], # already +/- preRange
'turns_vel': turns_vel_array[rng],
'events' : events,
'matchedUSname' : matchedUSname,
'TVroi' : (TVx1,TVy1,TVx2,TVy2),
'SVroi' : (SVx1,SVy1,SVx2,SVy2),
} )
if pp:
fig.savefig(pp, format='pdf')
close('all') # release memory ASAP!
if pp:
pp.close()
def getPDFs(pickle_files, fishnames=None, createPDF=True):
# type checking args
if type(pickle_files) is str:
pickle_files = [pickle_files]
# convert to a list or set of fish names
if type(fishnames) is str:
fishnames = [fishnames]
elif not fishnames:
fishnames = set()
# re-organize trials into a dict "data"
data = {}
# figure out trial number (sometime many trials in one files) for each fish
# go through all pickle_files and use timestamps of file to sort events.
timestamps = []
for fp in pickle_files:
# collect ctime of pickled files
fname = os.path.basename(fp).split('.')[0] + '.avi'
timestamps.append( time.strptime(fname, "%b-%d-%Y_%H_%M_%S.avi") )
# look into the pickle and collect fish analyzed
params = np.load(fp) # loading pickled file!
if type(fishnames) is set:
for fish in [fs for fl,fs in params.keys() if fl == fname and fs != 'mog']:
fishnames.add(fish)
timestamps = sorted(range(len(timestamps)), key=timestamps.__getitem__)
# For each fish, go thru all pickled files
for fish in fishnames:
data[fish] = {}
# now go thru the sorted
for ind in timestamps:
fp = pickle_files[ind]
print 'processing #%d\n%s' % (ind, fp)
add2DataAndPlot(fp, fish, data, createPDF)
return data
def plotTrials(data, fish, CSname, key, step, offset=0, pp=None):
fig = figure(figsize=(12,8), facecolor='w')
ax1 = fig.add_subplot(121) # raw trace
ax2 = fig.add_subplot(222) # learning curve
ax3 = fig.add_subplot(224) # bar plot
preP, postP, postP2 = [], [], []
longestUS = 0
for n, measurement in enumerate(data[fish][CSname]):
tr = n+1
CS, USs, preRange = measurement['events']
subplot(ax1)
mi = -step*(tr-1)
ma = mi + step
drawLines(mi, ma, (preRange, [preRange+(USs[0]-CS)], preRange))
longestUS = max([us-CS+preRange*3/2 for us in USs]+[longestUS])
# 'measurement[key]': vector around the CS timing (+/-) preRange. i.e., preRange is the center
ax1.plot(measurement[key]-step*(tr-1)+offset)
title(CSname+': '+key) # cf. preRange = 3600 frames
pre = measurement[key][:preRange].mean()+offset # 2 min window
post = measurement[key][preRange:preRange+(USs[0]-CS)].mean()+offset # 23 s window
post2 = measurement[key][preRange+(USs[0]-CS):preRange*3/2+(USs[0]-CS)].mean()+offset # 1 min window after US
preP.append(pre)
postP.append(post)
postP2.append(post2)
ax3.plot([1, 2, 3], [pre, post, post2],'o-')
ax1.set_xlim([0,longestUS])
ax1.axis('off')
subplot(ax2)
x = range(1, tr+1)
y = np.diff((preP,postP), axis=0).ravel()
ax2.plot( x, y, 'ko-', linewidth=2 )
ax2.plot( x, np.zeros_like(x), '-.', linewidth=1, color='gray' )
# grid()
slope, intercept, rvalue, pval, stderr = stats.stats.linregress(x,y)
title('slope = zero? p-value = %f' % pval)
ax2.set_xlabel("Trial#")
ax2.set_xlim([0.5,tr+0.5])
ax2.set_ylabel('CS - pre')
subplot(ax3)
ax3.bar([0.6, 1.6, 2.6], [np.nanmean(preP), np.nanmean(postP), np.nanmean(postP2)], facecolor='none')
t, pval = stats.ttest_rel(postP, preP)
title('paired t p-value = %f' % pval)
ax3.set_xticks([1,2,3])
ax3.set_xticklabels(['pre', CSname, measurement['matchedUSname']])
ax3.set_xlim([0.5,3.5])
ax3.set_ylabel('Raw mean values')
tight_layout(2, h_pad=1, w_pad=1)
if pp:
fig.savefig(pp, format='pdf')
close('all')
return np.vstack((preP, postP, postP2))
def getSummary(data, dirname=None):
for fish in data.keys():
for CSname in data[fish].keys():
if dirname:
pp = PdfPages(os.path.join(dirname, '%s_for_%s.pdf' % (CSname,fish)))
print 'generating %s_for_%s.pdf' % (CSname,fish)
book = Workbook()
sheet1 = book.add_sheet('speed3D')
avgs = plotTrials(data, fish, CSname, 'speed3D', 30, pp=pp)
putNp2xls(avgs, sheet1)
sheet2 = book.add_sheet('d2inflow')
avgs = plotTrials(data, fish, CSname, 'd2inflow', 200, pp=pp)
putNp2xls(avgs, sheet2)
# sheet3 = book.add_sheet('smoothedz')
sheet3 = book.add_sheet('Z')
# avgs = plotTrials(data, fish, CSname, 'smoothedz', 100, pp=pp)
avgs = plotTrials(data, fish, CSname, 'Z', 30, pp=pp)
putNp2xls(avgs, sheet3)
sheet4 = book.add_sheet('ringpixels')
avgs = plotTrials(data, fish, CSname, 'ringpixels', 1200, pp=pp)
putNp2xls(avgs, sheet4)
sheet5 = book.add_sheet('peaks_within')
avgs = plotTrials(data, fish, CSname, 'peaks_within', 1.5, pp=pp)
putNp2xls(avgs, sheet5)
sheet6 = book.add_sheet('swimdir_within')
avgs = plotTrials(data, fish, CSname, 'swimdir_within', 1.5, pp=pp)
putNp2xls(avgs, sheet6)
sheet7 = book.add_sheet('xy_within')
avgs = plotTrials(data, fish, CSname, 'xy_within', 1.5, pp=pp)
putNp2xls(avgs, sheet7)
sheet8 = book.add_sheet('turns_shape')
avgs = plotTrials(data, fish, CSname, 'turns_shape', 1.5, pp=pp)
putNp2xls(avgs, sheet8)
sheet9 = book.add_sheet('turns_vel')
avgs = plotTrials(data, fish, CSname, 'turns_vel', 1.5, pp=pp)
putNp2xls(avgs, sheet9)
if dirname:
pp.close()
book.save(os.path.join(dirname, '%s_for_%s.xls' % (CSname,fish)))
close('all')
else:
show()
def add2Pickles(dirname, pickle_files):
# dirname : folder to look for pickle files
# pickle_files : output, a list to be concatenated.
pattern = os.path.join(dirname, '*.pickle')
temp = [_ for _ in glob(pattern) if not _.endswith('- Copy.pickle') and
not os.path.basename(_).startswith('Summary')]
pickle_files += temp
if __name__ == '__main__':
pickle_files = []
# small test data
# add2Pickles('R:/Data/itoiori/behav/adult whitlock/conditioning/NeuroD/Aug4/test', pickle_files)
# outputdir = 'R:/Data/itoiori/behav/adult whitlock/conditioning/NeuroD/Aug4/test'
# show me what you got
for pf in pickle_files:
print pf
fp = os.path.join(outputdir, 'Summary.pickle')
createPDF = True # useful when plotting etc code updated
if 1: # refresh analysis
data = getPDFs(pickle_files, createPDF=createPDF)
import cPickle as pickle
with open(os.path.join(outputdir, 'Summary.pickle'), 'wb') as f:
pickle.dump(data, f)
else: # or reuse previous
data = np.load(fp)
getSummary(data, outputdir)
pickle2mat(fp, data)
| bsd-3-clause | 4,991,329,245,887,673,000 | 36.435306 | 124 | 0.567291 | false | 2.972286 | false | false | false |
jkunimune15/Map-Projections | src/zupplemental/compose_maps.py | 1 | 5115 | #compose_maps.py
#make ALL the maps
import math
from generate_borders import generate_borders
from generate_graticule import generate_graticule, generate_backdrop
from generate_indicatrices import generate_indicatrices
from generate_orthodromes import generate_orthodromes
from generate_shape import plot_shapes
from generate_labels import generate_topographical_labels, label_shapes, label_points
def compose_landmasses():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="land">')
plot_shapes('ne_50m_land', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="water">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t</g>')
def compose_graticule():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="graticule">')
generate_graticule(5, 1, include_tropics=True, adjust_poles=True)
print('\t\t</g>')
print('\t</g>')
def compose_graticule2():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="graticule">')
generate_graticule(15, .25, include_tropics=True, adjust_poles=True, double_dateline=True)
print('\t\t</g>')
print('\t</g>')
def compose_compound():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="land">')
plot_shapes('ne_50m_land', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="river">')
plot_shapes('ne_50m_rivers_lake_centerlines', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="graticule">')
generate_graticule(15, 1, include_tropics=True, adjust_poles=True)
print('\t\t</g>')
print('\t</g>')
def compose_indicatrices():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="land">')
plot_shapes('ne_50m_land', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="tissot">')
generate_indicatrices(15, math.radians(3.75), resolution=180, adjust_poles=True)
print('\t\t</g>')
print('\t</g>')
def compose_indicatrices2(ctr_meridian):
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="water">')
generate_backdrop(.5, ctr_meridian=ctr_meridian)
print('\t\t</g>')
print('\t\t<g class="land">')
plot_shapes('ne_110m_land', flesh_out_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_110m_lakes')
print('\t\t</g>')
print('\t\t<g class="graticule">')
generate_graticule(10, .5, double_dateline=(ctr_meridian==0))
print('\t\t</g>')
print('\t\t<g class="tissot">')
generate_indicatrices(30, 500/6371, ctr_meridian=ctr_meridian, adjust_poles=True, resolution=120, side_res=5, pole_res=120)
print('\t\t</g>')
print('\t</g>')
def compose_political():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="country">')
generate_borders('ne_50m', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="lakes">')
plot_shapes('ne_50m_lakes', max_rank=4)
print('\t\t</g>')
print('\t</g>')
label_shapes('ne_50m_admin_0_countries', "pol")
def compose_orthodromes():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="lines">')
generate_orthodromes()
print('\t\t</g>')
print('\t</g>')
def compose_everything():
print('\t<g transform="matrix(1,0,0,-1,180,90)">')
print('\t\t<g class="country">')
generate_borders('ne_10m', trim_antarctica=True, borders_only=False)
print('\t\t<g class="border">')
generate_borders('ne_10m', trim_antarctica=True, borders_only=True)
print('\t\t</g>')
print('\t\t</g>')
print('\t\t<g class="sovereign">')
plot_shapes('ne_10m_admin_0_map_units')
print('\t\t</g>')
print('\t\t<g class="admin">')
plot_shapes('ne_10m_admin_1_states_provinces_lines', filter_field='adm0_a3',
filter_vals=['RUS','CAN','CHN','USA','BRA','AUS','IND','ARG','KAZ'])
print('\t\t</g>')
print('\t\t<g class="dispute">')
plot_shapes('ne_10m_admin_0_boundary_lines_disputed_areas')
print('\t\t</g>')
print('\t\t<g class="coastline">')
plot_shapes('ne_10m_coastline', trim_antarctica=True)
print('\t\t</g>')
print('\t\t<g class="river">')
plot_shapes('ne_10m_rivers_lake_centerlines', max_rank=5)
print('\t\t</g>')
print('\t\t<g class="lake">')
plot_shapes('ne_10m_lakes', max_rank=4)
print('\t\t</g>')
print('\t\t<g class="graticule">')
generate_graticule(5, 1, include_tropics=True, adjust_poles=True)
plot_shapes('ne_10m_geographic_lines', clazz="dateline", filter_field='name', filter_vals=["International Date Line"])
print('\t\t</g>')
print('\t</g>')
generate_topographical_labels('ne_50m', max_rank=2, text_size=4)
label_shapes('ne_10m_lakes', "sea", max_rank=1, text_size=1)
label_shapes('ne_10m_admin_0_countries', "pol", text_size=4)
label_points('cities_capital', "cap", text_size=1)
label_points('cities_other', "cit", text_size=0)
if __name__ == '__main__':
# compose_landmasses()
# compose_graticule()
# compose_compound()
# compose_indicatrices()
# compose_indicatrices2(-0)
# compose_political()
# compose_orthodromes()
compose_everything()
| mit | -1,186,010,880,348,605,000 | 32.874172 | 124 | 0.657869 | false | 2.37355 | false | false | false |
francisco-dlp/hyperspy | hyperspy/drawing/utils.py | 1 | 57321 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import itertools
import textwrap
from traits import trait_base
import matplotlib.pyplot as plt
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backend_bases import key_press_handler
import warnings
import numpy as np
from distutils.version import LooseVersion
import logging
import hyperspy as hs
_logger = logging.getLogger(__name__)
def contrast_stretching(data, saturated_pixels):
"""Calculate bounds that leaves out a given percentage of the data.
Parameters
----------
data: numpy array
saturated_pixels: scalar, None
The percentage of pixels that are left out of the bounds. For example,
the low and high bounds of a value of 1 are the 0.5% and 99.5%
percentiles. It must be in the [0, 100] range. If None, set the value
to 0.
Returns
-------
vmin, vmax: scalar
The low and high bounds
Raises
------
ValueError if the value of `saturated_pixels` is out of the valid range.
"""
# Sanity check
if saturated_pixels is None:
saturated_pixels = 0
if not 0 <= saturated_pixels <= 100:
raise ValueError(
"saturated_pixels must be a scalar in the range[0, 100]")
vmin = np.nanpercentile(data, saturated_pixels / 2.)
vmax = np.nanpercentile(data, 100 - saturated_pixels / 2.)
return vmin, vmax
MPL_DIVERGING_COLORMAPS = [
"BrBG",
"bwr",
"coolwarm",
"PiYG",
"PRGn",
"PuOr",
"RdBu",
"RdGy",
"RdYIBu",
"RdYIGn",
"seismic",
"Spectral", ]
# Add reversed colormaps
MPL_DIVERGING_COLORMAPS += [cmap + "_r" for cmap in MPL_DIVERGING_COLORMAPS]
def centre_colormap_values(vmin, vmax):
"""Calculate vmin and vmax to set the colormap midpoint to zero.
Parameters
----------
vmin, vmax : scalar
The range of data to display.
Returns
-------
cvmin, cvmax : scalar
The values to obtain a centre colormap.
"""
absmax = max(abs(vmin), abs(vmax))
return -absmax, absmax
def create_figure(window_title=None,
_on_figure_window_close=None,
disable_xyscale_keys=False,
**kwargs):
"""Create a matplotlib figure.
This function adds the possibility to execute another function
when the figure is closed and to easily set the window title. Any
keyword argument is passed to the plt.figure function
Parameters
----------
window_title : string
_on_figure_window_close : function
disable_xyscale_keys : bool, disable the `k`, `l` and `L` shortcuts which
toggle the x or y axis between linear and log scale.
Returns
-------
fig : plt.figure
"""
fig = plt.figure(**kwargs)
if window_title is not None:
# remove non-alphanumeric characters to prevent file saving problems
# This is a workaround for:
# https://github.com/matplotlib/matplotlib/issues/9056
reserved_characters = r'<>"/\|?*'
for c in reserved_characters:
window_title = window_title.replace(c, '')
window_title = window_title.replace('\n', ' ')
window_title = window_title.replace(':', ' -')
fig.canvas.set_window_title(window_title)
if disable_xyscale_keys and hasattr(fig.canvas, 'toolbar'):
# hack the `key_press_handler` to disable the `k`, `l`, `L` shortcuts
manager = fig.canvas.manager
fig.canvas.mpl_disconnect(manager.key_press_handler_id)
manager.key_press_handler_id = manager.canvas.mpl_connect(
'key_press_event',
lambda event: key_press_handler_custom(event, manager.canvas))
if _on_figure_window_close is not None:
on_figure_window_close(fig, _on_figure_window_close)
return fig
def key_press_handler_custom(event, canvas):
if event.key not in ['k', 'l', 'L']:
key_press_handler(event, canvas, canvas.manager.toolbar)
def on_figure_window_close(figure, function):
"""Connects a close figure signal to a given function.
Parameters
----------
figure : mpl figure instance
function : function
"""
def function_wrapper(evt):
function()
figure.canvas.mpl_connect('close_event', function_wrapper)
def plot_RGB_map(im_list, normalization='single', dont_plot=False):
"""Plot 2 or 3 maps in RGB.
Parameters
----------
im_list : list of Signal2D instances
normalization : {'single', 'global'}
dont_plot : bool
Returns
-------
array: RGB matrix
"""
# from widgets import cursors
height, width = im_list[0].data.shape[:2]
rgb = np.zeros((height, width, 3))
rgb[:, :, 0] = im_list[0].data.squeeze()
rgb[:, :, 1] = im_list[1].data.squeeze()
if len(im_list) == 3:
rgb[:, :, 2] = im_list[2].data.squeeze()
if normalization == 'single':
for i in range(len(im_list)):
rgb[:, :, i] /= rgb[:, :, i].max()
elif normalization == 'global':
rgb /= rgb.max()
rgb = rgb.clip(0, rgb.max())
if not dont_plot:
figure = plt.figure()
ax = figure.add_subplot(111)
ax.frameon = False
ax.set_axis_off()
ax.imshow(rgb, interpolation='nearest')
# cursors.set_mpl_ax(ax)
figure.canvas.draw_idle()
else:
return rgb
def subplot_parameters(fig):
"""Returns a list of the subplot parameters of a mpl figure.
Parameters
----------
fig : mpl figure
Returns
-------
tuple : (left, bottom, right, top, wspace, hspace)
"""
wspace = fig.subplotpars.wspace
hspace = fig.subplotpars.hspace
left = fig.subplotpars.left
right = fig.subplotpars.right
top = fig.subplotpars.top
bottom = fig.subplotpars.bottom
return left, bottom, right, top, wspace, hspace
class ColorCycle:
_color_cycle = [mpl.colors.colorConverter.to_rgba(color) for color
in ('b', 'g', 'r', 'c', 'm', 'y', 'k')]
def __init__(self):
self.color_cycle = copy.copy(self._color_cycle)
def __call__(self):
if not self.color_cycle:
self.color_cycle = copy.copy(self._color_cycle)
return self.color_cycle.pop(0)
def plot_signals(signal_list, sync=True, navigator="auto",
navigator_list=None, **kwargs):
"""Plot several signals at the same time.
Parameters
----------
signal_list : list of BaseSignal instances
If sync is set to True, the signals must have the
same navigation shape, but not necessarily the same signal shape.
sync : True or False, default "True"
If True: the signals will share navigation, all the signals
must have the same navigation shape for this to work, but not
necessarily the same signal shape.
navigator : {"auto", None, "spectrum", "slider", BaseSignal}, default "auto"
See signal.plot docstring for full description
navigator_list : {List of navigator arguments, None}, default None
Set different navigator options for the signals. Must use valid
navigator arguments: "auto", None, "spectrum", "slider", or a
hyperspy Signal. The list must have the same size as signal_list.
If None, the argument specified in navigator will be used.
**kwargs
Any extra keyword arguments are passed to each signal `plot` method.
Example
-------
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll])
Specifying the navigator:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> hs.plot.plot_signals([s_cl, s_ll], navigator="slider")
Specifying the navigator for each signal:
>>> s_cl = hs.load("coreloss.dm3")
>>> s_ll = hs.load("lowloss.dm3")
>>> s_edx = hs.load("edx.dm3")
>>> s_adf = hs.load("adf.dm3")
>>> hs.plot.plot_signals(
[s_cl, s_ll, s_edx], navigator_list=["slider",None,s_adf])
"""
import hyperspy.signal
if navigator_list:
if not (len(signal_list) == len(navigator_list)):
raise ValueError(
"signal_list and navigator_list must"
" have the same size")
if sync:
axes_manager_list = []
for signal in signal_list:
axes_manager_list.append(signal.axes_manager)
if not navigator_list:
navigator_list = []
if navigator is None:
navigator_list.extend([None] * len(signal_list))
elif isinstance(navigator, hyperspy.signal.BaseSignal):
navigator_list.append(navigator)
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator == "slider":
navigator_list.append("slider")
navigator_list.extend([None] * (len(signal_list) - 1))
elif navigator == "spectrum":
navigator_list.extend(["spectrum"] * len(signal_list))
elif navigator == "auto":
navigator_list.extend(["auto"] * len(signal_list))
else:
raise ValueError(
"navigator must be one of \"spectrum\",\"auto\","
" \"slider\", None, a Signal instance")
# Check to see if the spectra have the same navigational shapes
temp_shape_first = axes_manager_list[0].navigation_shape
for i, axes_manager in enumerate(axes_manager_list):
temp_shape = axes_manager.navigation_shape
if not (temp_shape_first == temp_shape):
raise ValueError(
"The spectra does not have the same navigation shape")
axes_manager_list[i] = axes_manager.deepcopy()
if i > 0:
for axis0, axisn in zip(axes_manager_list[0].navigation_axes,
axes_manager_list[i].navigation_axes):
axes_manager_list[i]._axes[axisn.index_in_array] = axis0
del axes_manager
for signal, navigator, axes_manager in zip(signal_list,
navigator_list,
axes_manager_list):
signal.plot(axes_manager=axes_manager,
navigator=navigator,
**kwargs)
# If sync is False
else:
if not navigator_list:
navigator_list = []
navigator_list.extend([navigator] * len(signal_list))
for signal, navigator in zip(signal_list, navigator_list):
signal.plot(navigator=navigator,
**kwargs)
def _make_heatmap_subplot(spectra):
from hyperspy._signals.signal2d import Signal2D
im = Signal2D(spectra.data, axes=spectra.axes_manager._get_axes_dicts())
im.metadata.General.title = spectra.metadata.General.title
im.plot()
return im._plot.signal_plot.ax
def set_xaxis_lims(mpl_ax, hs_axis):
"""
Set the matplotlib axis limits to match that of a HyperSpy axis
Parameters
----------
mpl_ax : :class:`matplotlib.axis.Axis`
The ``matplotlib`` axis to change
hs_axis : :class:`~hyperspy.axes.DataAxis`
The data axis that contains the values that control the scaling
"""
x_axis_lower_lim = hs_axis.axis[0]
x_axis_upper_lim = hs_axis.axis[-1]
mpl_ax.set_xlim(x_axis_lower_lim, x_axis_upper_lim)
def _make_overlap_plot(spectra, ax, color="blue", line_style='-'):
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
spectrum = _transpose_if_required(spectrum, 1)
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_ylabel('Intensity')
ax.autoscale(tight=True)
def _make_cascade_subplot(
spectra, ax, color="blue", line_style='-', padding=1):
max_value = 0
for spectrum in spectra:
spectrum_yrange = (np.nanmax(spectrum.data) -
np.nanmin(spectrum.data))
if spectrum_yrange > max_value:
max_value = spectrum_yrange
if isinstance(color, str):
color = [color] * len(spectra)
if isinstance(line_style, str):
line_style = [line_style] * len(spectra)
for spectrum_index, (spectrum, color, line_style) in enumerate(
zip(spectra, color, line_style)):
x_axis = spectrum.axes_manager.signal_axes[0]
spectrum = _transpose_if_required(spectrum, 1)
data_to_plot = ((spectrum.data - spectrum.data.min()) /
float(max_value) + spectrum_index * padding)
ax.plot(x_axis.axis, data_to_plot, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
_set_spectrum_xlabel(spectra if isinstance(spectra, hs.signals.BaseSignal)
else spectra[-1], ax)
ax.set_yticks([])
ax.autoscale(tight=True)
def _plot_spectrum(spectrum, ax, color="blue", line_style='-'):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.plot(x_axis.axis, spectrum.data, color=color, ls=line_style)
set_xaxis_lims(ax, x_axis)
def _set_spectrum_xlabel(spectrum, ax):
x_axis = spectrum.axes_manager.signal_axes[0]
ax.set_xlabel("%s (%s)" % (x_axis.name, x_axis.units))
def _transpose_if_required(signal, expected_dimension):
# EDS profiles or maps have signal dimension = 0 and navigation dimension
# 1 or 2. For convenience transpose the signal if possible
if (signal.axes_manager.signal_dimension == 0 and
signal.axes_manager.navigation_dimension == expected_dimension):
return signal.T
else:
return signal
def plot_images(images,
cmap=None,
no_nans=False,
per_row=3,
label='auto',
labelwrap=30,
suptitle=None,
suptitle_fontsize=18,
colorbar='multi',
centre_colormap="auto",
saturated_pixels=0,
scalebar=None,
scalebar_color='white',
axes_decor='all',
padding=None,
tight_layout=False,
aspect='auto',
min_asp=0.1,
namefrac_thresh=0.4,
fig=None,
vmin=None,
vmax=None,
*args,
**kwargs):
"""Plot multiple images as sub-images in one figure.
Extra keyword arguments are passed to `matplotlib.figure`.
Parameters
----------
images : list of Signal2D or BaseSignal
`images` should be a list of Signals to plot. For `BaseSignal` with
navigation dimensions 2 and signal dimension 0, the signal will be
tranposed to form a `Signal2D`.
Multi-dimensional images will have each plane plotted as a separate
image.
If any signal shape is not suitable, a ValueError will be raised.
cmap : matplotlib colormap, list, or ``'mpl_colors'``, *optional*
The colormap used for the images, by default read from ``pyplot``.
A list of colormaps can also be provided, and the images will
cycle through them. Optionally, the value ``'mpl_colors'`` will
cause the cmap to loop through the default ``matplotlib``
colors (to match with the default output of the
:py:func:`~.drawing.utils.plot_spectra` method.
Note: if using more than one colormap, using the ``'single'``
option for ``colorbar`` is disallowed.
no_nans : bool, optional
If True, set nans to zero for plotting.
per_row : int, optional
The number of plots in each row
label : None, str, or list of str, optional
Control the title labeling of the plotted images.
If None, no titles will be shown.
If 'auto' (default), function will try to determine suitable titles
using Signal2D titles, falling back to the 'titles' option if no good
short titles are detected.
Works best if all images to be plotted have the same beginning
to their titles.
If 'titles', the title from each image's metadata.General.title
will be used.
If any other single str, images will be labeled in sequence using
that str as a prefix.
If a list of str, the list elements will be used to determine the
labels (repeated, if necessary).
labelwrap : int, optional
integer specifying the number of characters that will be used on
one line
If the function returns an unexpected blank figure, lower this
value to reduce overlap of the labels between each figure
suptitle : str, optional
Title to use at the top of the figure. If called with label='auto',
this parameter will override the automatically determined title.
suptitle_fontsize : int, optional
Font size to use for super title at top of figure
colorbar : {'multi', None, 'single'}
Controls the type of colorbars that are plotted.
If None, no colorbar is plotted.
If 'multi' (default), individual colorbars are plotted for each
(non-RGB) image
If 'single', all (non-RGB) images are plotted on the same scale,
and one colorbar is shown for all
centre_colormap : {"auto", True, False}
If True the centre of the color scheme is set to zero. This is
specially useful when using diverging color schemes. If "auto"
(default), diverging color schemes are automatically centred.
saturated_pixels: None, scalar or list of scalar, optional, default: 0
If list of scalar, the length should match the number of images to
show. If provide in the list, set the value to 0.
The percentage of pixels that are left out of the bounds. For
example, the low and high bounds of a value of 1 are the 0.5% and
99.5% percentiles. It must be in the [0, 100] range.
scalebar : {None, 'all', list of ints}, optional
If None (or False), no scalebars will be added to the images.
If 'all', scalebars will be added to all images.
If list of ints, scalebars will be added to each image specified.
scalebar_color : str, optional
A valid MPL color string; will be used as the scalebar color
axes_decor : {'all', 'ticks', 'off', None}, optional
Controls how the axes are displayed on each image; default is 'all'
If 'all', both ticks and axis labels will be shown
If 'ticks', no axis labels will be shown, but ticks/labels will
If 'off', all decorations and frame will be disabled
If None, no axis decorations will be shown, but ticks/frame will
padding : None or dict, optional
This parameter controls the spacing between images.
If None, default options will be used
Otherwise, supply a dictionary with the spacing options as
keywords and desired values as values
Values should be supplied as used in pyplot.subplots_adjust(),
and can be:
'left', 'bottom', 'right', 'top', 'wspace' (width),
and 'hspace' (height)
tight_layout : bool, optional
If true, hyperspy will attempt to improve image placement in
figure using matplotlib's tight_layout
If false, repositioning images inside the figure will be left as
an exercise for the user.
aspect : str or numeric, optional
If 'auto', aspect ratio is auto determined, subject to min_asp.
If 'square', image will be forced onto square display.
If 'equal', aspect ratio of 1 will be enforced.
If float (or int/long), given value will be used.
min_asp : float, optional
Minimum aspect ratio to be used when plotting images
namefrac_thresh : float, optional
Threshold to use for auto-labeling. This parameter controls how
much of the titles must be the same for the auto-shortening of
labels to activate. Can vary from 0 to 1. Smaller values
encourage shortening of titles by auto-labeling, while larger
values will require more overlap in titles before activing the
auto-label code.
fig : mpl figure, optional
If set, the images will be plotted to an existing MPL figure
vmin, vmax : scalar or list of scalar, optional, default: None
If list of scalar, the length should match the number of images to
show.
A list of scalar is not compatible with a single colorbar.
See vmin, vmax of matplotlib.imshow() for more details.
*args, **kwargs, optional
Additional arguments passed to matplotlib.imshow()
Returns
-------
axes_list : list
a list of subplot axes that hold the images
See Also
--------
plot_spectra : Plotting of multiple spectra
plot_signals : Plotting of multiple signals
plot_histograms : Compare signal histograms
Notes
-----
`interpolation` is a useful parameter to provide as a keyword
argument to control how the space between pixels is interpolated. A
value of ``'nearest'`` will cause no interpolation between pixels.
`tight_layout` is known to be quite brittle, so an option is provided
to disable it. Turn this option off if output is not as expected,
or try adjusting `label`, `labelwrap`, or `per_row`
"""
def __check_single_colorbar(cbar):
if cbar == 'single':
raise ValueError('Cannot use a single colorbar with multiple '
'colormaps. Please check for compatible '
'arguments.')
from hyperspy.drawing.widgets import ScaleBar
from hyperspy.misc import rgb_tools
from hyperspy.signal import BaseSignal
# Check that we have a hyperspy signal
im = [images] if not isinstance(images, (list, tuple)) else images
for image in im:
if not isinstance(image, BaseSignal):
raise ValueError("`images` must be a list of image signals or a "
"multi-dimensional signal."
" " + repr(type(images)) + " was given.")
# For list of EDS maps, transpose the BaseSignal
if isinstance(images, (list, tuple)):
images = [_transpose_if_required(image, 2) for image in images]
# If input is >= 1D signal (e.g. for multi-dimensional plotting),
# copy it and put it in a list so labeling works out as (x,y) when plotting
if isinstance(images,
BaseSignal) and images.axes_manager.navigation_dimension > 0:
images = [images._deepcopy_with_new_data(images.data)]
n = 0
for i, sig in enumerate(images):
if sig.axes_manager.signal_dimension != 2:
raise ValueError("This method only plots signals that are images. "
"The signal dimension must be equal to 2. "
"The signal at position " + repr(i) +
" was " + repr(sig) + ".")
# increment n by the navigation size, or by 1 if the navigation size is
# <= 0
n += (sig.axes_manager.navigation_size
if sig.axes_manager.navigation_size > 0
else 1)
# If no cmap given, get default colormap from pyplot:
if cmap is None:
cmap = [plt.get_cmap().name]
elif cmap == 'mpl_colors':
for n_color, c in enumerate(mpl.rcParams['axes.prop_cycle']):
make_cmap(colors=['#000000', c['color']],
name='mpl{}'.format(n_color))
cmap = ['mpl{}'.format(i) for i in
range(len(mpl.rcParams['axes.prop_cycle']))]
__check_single_colorbar(colorbar)
# cmap is list, tuple, or something else iterable (but not string):
elif hasattr(cmap, '__iter__') and not isinstance(cmap, str):
try:
cmap = [c.name for c in cmap] # convert colormap to string
except AttributeError:
cmap = [c for c in cmap] # c should be string if not colormap
__check_single_colorbar(colorbar)
elif isinstance(cmap, mpl.colors.Colormap):
cmap = [cmap.name] # convert single colormap to list with string
elif isinstance(cmap, str):
cmap = [cmap] # cmap is single string, so make it a list
else:
# Didn't understand cmap input, so raise error
raise ValueError('The provided cmap value was not understood. Please '
'check input values.')
# If any of the cmaps given are diverging, and auto-centering, set the
# appropriate flag:
if centre_colormap == "auto":
centre_colormaps = []
for c in cmap:
if c in MPL_DIVERGING_COLORMAPS:
centre_colormaps.append(True)
else:
centre_colormaps.append(False)
# if it was True, just convert to list
elif centre_colormap:
centre_colormaps = [True]
# likewise for false
elif not centre_colormap:
centre_colormaps = [False]
# finally, convert lists to cycle generators for adaptive length:
centre_colormaps = itertools.cycle(centre_colormaps)
cmap = itertools.cycle(cmap)
def _check_arg(arg, default_value, arg_name):
if isinstance(arg, list):
if len(arg) != n:
_logger.warning('The provided {} values are ignored because the '
'length of the list does not match the number of '
'images'.format(arg_name))
arg = [default_value] * n
else:
arg = [arg] * n
return arg
vmin = _check_arg(vmin, None, 'vmin')
vmax = _check_arg(vmax, None, 'vmax')
saturated_pixels = _check_arg(saturated_pixels, 0, 'saturated_pixels')
# Sort out the labeling:
div_num = 0
all_match = False
shared_titles = False
user_labels = False
if label is None:
pass
elif label == 'auto':
# Use some heuristics to try to get base string of similar titles
label_list = [x.metadata.General.title for x in images]
# Find the shortest common string between the image titles
# and pull that out as the base title for the sequence of images
# array in which to store arrays
res = np.zeros((len(label_list), len(label_list[0]) + 1))
res[:, 0] = 1
# j iterates the strings
for j in range(len(label_list)):
# i iterates length of substring test
for i in range(1, len(label_list[0]) + 1):
# stores whether or not characters in title match
res[j, i] = label_list[0][:i] in label_list[j]
# sum up the results (1 is True, 0 is False) and create
# a substring based on the minimum value (this will be
# the "smallest common string" between all the titles
if res.all():
basename = label_list[0]
div_num = len(label_list[0])
all_match = True
else:
div_num = int(min(np.sum(res, 1)))
basename = label_list[0][:div_num - 1]
all_match = False
# trim off any '(' or ' ' characters at end of basename
if div_num > 1:
while True:
if basename[len(basename) - 1] == '(':
basename = basename[:-1]
elif basename[len(basename) - 1] == ' ':
basename = basename[:-1]
else:
break
# namefrac is ratio of length of basename to the image name
# if it is high (e.g. over 0.5), we can assume that all images
# share the same base
if len(label_list[0]) > 0:
namefrac = float(len(basename)) / len(label_list[0])
else:
# If label_list[0] is empty, it means there was probably no
# title set originally, so nothing to share
namefrac = 0
if namefrac > namefrac_thresh:
# there was a significant overlap of label beginnings
shared_titles = True
# only use new suptitle if one isn't specified already
if suptitle is None:
suptitle = basename
else:
# there was not much overlap, so default back to 'titles' mode
shared_titles = False
label = 'titles'
div_num = 0
elif label == 'titles':
# Set label_list to each image's pre-defined title
label_list = [x.metadata.General.title for x in images]
elif isinstance(label, str):
# Set label_list to an indexed list, based off of label
label_list = [label + " " + repr(num) for num in range(n)]
elif isinstance(label, list) and all(
isinstance(x, str) for x in label):
label_list = label
user_labels = True
# If list of labels is longer than the number of images, just use the
# first n elements
if len(label_list) > n:
del label_list[n:]
if len(label_list) < n:
label_list *= (n // len(label_list)) + 1
del label_list[n:]
else:
raise ValueError("Did not understand input of labels.")
# Determine appropriate number of images per row
rows = int(np.ceil(n / float(per_row)))
if n < per_row:
per_row = n
# Set overall figure size and define figure (if not pre-existing)
if fig is None:
k = max(plt.rcParams['figure.figsize']) / max(per_row, rows)
f = plt.figure(figsize=(tuple(k * i for i in (per_row, rows))))
else:
f = fig
# Initialize list to hold subplot axes
axes_list = []
# Initialize list of rgb tags
isrgb = [False] * len(images)
# Check to see if there are any rgb images in list
# and tag them using the isrgb list
for i, img in enumerate(images):
if rgb_tools.is_rgbx(img.data):
isrgb[i] = True
# Determine how many non-rgb Images there are
non_rgb = list(itertools.compress(images, [not j for j in isrgb]))
if len(non_rgb) == 0 and colorbar is not None:
colorbar = None
warnings.warn("Sorry, colorbar is not implemented for RGB images.")
# Find global min and max values of all the non-rgb images for use with
# 'single' scalebar
if colorbar == 'single':
# get a g_saturated_pixels from saturated_pixels
if isinstance(saturated_pixels, list):
g_saturated_pixels = min(np.array([v for v in saturated_pixels]))
else:
g_saturated_pixels = saturated_pixels
# estimate a g_vmin and g_max from saturated_pixels
g_vmin, g_vmax = contrast_stretching(np.concatenate(
[i.data.flatten() for i in non_rgb]), g_saturated_pixels)
# if vmin and vmax are provided, override g_min and g_max
if isinstance(vmin, list):
_logger.warning('vmin have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmin = vmin if vmin is not None else g_vmin
if isinstance(vmax, list):
_logger.warning('vmax have to be a scalar to be compatible with a '
'single colorbar')
else:
g_vmax = vmax if vmax is not None else g_vmax
if next(centre_colormaps):
g_vmin, g_vmax = centre_colormap_values(g_vmin, g_vmax)
# Check if we need to add a scalebar for some of the images
if isinstance(scalebar, list) and all(isinstance(x, int)
for x in scalebar):
scalelist = True
else:
scalelist = False
idx = 0
ax_im_list = [0] * len(isrgb)
# Replot: create a list to store references to the images
replot_ims = []
# Loop through each image, adding subplot for each one
for i, ims in enumerate(images):
# Get handles for the signal axes and axes_manager
axes_manager = ims.axes_manager
if axes_manager.navigation_dimension > 0:
ims = ims._deepcopy_with_new_data(ims.data)
for j, im in enumerate(ims):
ax = f.add_subplot(rows, per_row, idx + 1)
axes_list.append(ax)
data = im.data
centre = next(centre_colormaps) # get next value for centreing
# Enable RGB plotting
if rgb_tools.is_rgbx(data):
data = rgb_tools.rgbx2regular_array(data, plot_friendly=True)
l_vmin, l_vmax = None, None
else:
data = im.data
# Find min and max for contrast
l_vmin, l_vmax = contrast_stretching(
data, saturated_pixels[idx])
l_vmin = vmin[idx] if vmin[idx] is not None else l_vmin
l_vmax = vmax[idx] if vmax[idx] is not None else l_vmax
if centre:
l_vmin, l_vmax = centre_colormap_values(l_vmin, l_vmax)
# Remove NaNs (if requested)
if no_nans:
data = np.nan_to_num(data)
# Get handles for the signal axes and axes_manager
axes_manager = im.axes_manager
axes = axes_manager.signal_axes
# Set dimensions of images
xaxis = axes[0]
yaxis = axes[1]
extent = (
xaxis.low_value,
xaxis.high_value,
yaxis.high_value,
yaxis.low_value,
)
if not isinstance(aspect, (int, float)) and aspect not in [
'auto', 'square', 'equal']:
_logger.warning("Did not understand aspect ratio input. "
"Using 'auto' as default.")
aspect = 'auto'
if aspect == 'auto':
if float(yaxis.size) / xaxis.size < min_asp:
factor = min_asp * float(xaxis.size) / yaxis.size
elif float(yaxis.size) / xaxis.size > min_asp ** -1:
factor = min_asp ** -1 * float(xaxis.size) / yaxis.size
else:
factor = 1
asp = np.abs(factor * float(xaxis.scale) / yaxis.scale)
elif aspect == 'square':
asp = abs(extent[1] - extent[0]) / abs(extent[3] - extent[2])
elif aspect == 'equal':
asp = 1
elif isinstance(aspect, (int, float)):
asp = aspect
if 'interpolation' not in kwargs.keys():
kwargs['interpolation'] = 'nearest'
# Get colormap for this image:
cm = next(cmap)
# Plot image data, using vmin and vmax to set bounds,
# or allowing them to be set automatically if using individual
# colorbars
if colorbar == 'single' and not isrgb[i]:
axes_im = ax.imshow(data,
cmap=cm,
extent=extent,
vmin=g_vmin, vmax=g_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
else:
axes_im = ax.imshow(data,
cmap=cm,
extent=extent,
vmin=l_vmin,
vmax=l_vmax,
aspect=asp,
*args, **kwargs)
ax_im_list[i] = axes_im
# If an axis trait is undefined, shut off :
if isinstance(xaxis.units, trait_base._Undefined) or \
isinstance(yaxis.units, trait_base._Undefined) or \
isinstance(xaxis.name, trait_base._Undefined) or \
isinstance(yaxis.name, trait_base._Undefined):
if axes_decor == 'all':
_logger.warning(
'Axes labels were requested, but one '
'or both of the '
'axes units and/or name are undefined. '
'Axes decorations have been set to '
'\'ticks\' instead.')
axes_decor = 'ticks'
# If all traits are defined, set labels as appropriate:
else:
ax.set_xlabel(axes[0].name + " axis (" + axes[0].units + ")")
ax.set_ylabel(axes[1].name + " axis (" + axes[1].units + ")")
if label:
if all_match:
title = ''
elif shared_titles:
title = label_list[i][div_num - 1:]
else:
if len(ims) == n:
# This is true if we are plotting just 1
# multi-dimensional Signal2D
title = label_list[idx]
elif user_labels:
title = label_list[idx]
else:
title = label_list[i]
if ims.axes_manager.navigation_size > 1 and not user_labels:
title += " %s" % str(ims.axes_manager.indices)
ax.set_title(textwrap.fill(title, labelwrap))
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
# If using independent colorbars, add them
if colorbar == 'multi' and not isrgb[i]:
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(axes_im, cax=cax)
# Add scalebars as necessary
if (scalelist and idx in scalebar) or scalebar == 'all':
ax.scalebar = ScaleBar(
ax=ax,
units=axes[0].units,
color=scalebar_color,
)
# Replot: store references to the images
replot_ims.append(im)
idx += 1
# If using a single colorbar, add it, and do tight_layout, ensuring that
# a colorbar is only added based off of non-rgb Images:
if colorbar == 'single':
foundim = None
for i in range(len(isrgb)):
if (not isrgb[i]) and foundim is None:
foundim = i
if foundim is not None:
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.9, 0.1, 0.03, 0.8])
f.colorbar(ax_im_list[foundim], cax=cbar_ax)
if tight_layout:
# tight_layout, leaving room for the colorbar
plt.tight_layout(rect=[0, 0, 0.9, 1])
elif tight_layout:
plt.tight_layout()
elif tight_layout:
plt.tight_layout()
# Set top bounds for shared titles and add suptitle
if suptitle:
f.subplots_adjust(top=0.85)
f.suptitle(suptitle, fontsize=suptitle_fontsize)
# If we want to plot scalebars, loop through the list of axes and add them
if scalebar is None or scalebar is False:
# Do nothing if no scalebars are called for
pass
elif scalebar == 'all':
# scalebars were taken care of in the plotting loop
pass
elif scalelist:
# scalebars were taken care of in the plotting loop
pass
else:
raise ValueError("Did not understand scalebar input. Must be None, "
"\'all\', or list of ints.")
# Adjust subplot spacing according to user's specification
if padding is not None:
plt.subplots_adjust(**padding)
# Replot: connect function
def on_dblclick(event):
# On the event of a double click, replot the selected subplot
if not event.inaxes:
return
if not event.dblclick:
return
subplots = [axi for axi in f.axes if isinstance(axi, mpl.axes.Subplot)]
inx = list(subplots).index(event.inaxes)
im = replot_ims[inx]
# Use some of the info in the subplot
cm = subplots[inx].images[0].get_cmap()
clim = subplots[inx].images[0].get_clim()
sbar = False
if (scalelist and inx in scalebar) or scalebar == 'all':
sbar = True
im.plot(colorbar=bool(colorbar),
vmin=clim[0],
vmax=clim[1],
no_nans=no_nans,
aspect=asp,
scalebar=sbar,
scalebar_color=scalebar_color,
cmap=cm)
f.canvas.mpl_connect('button_press_event', on_dblclick)
return axes_list
def set_axes_decor(ax, axes_decor):
if axes_decor == 'off':
ax.axis('off')
elif axes_decor == 'ticks':
ax.set_xlabel('')
ax.set_ylabel('')
elif axes_decor == 'all':
pass
elif axes_decor is None:
ax.set_xlabel('')
ax.set_ylabel('')
ax.set_xticklabels([])
ax.set_yticklabels([])
def make_cmap(colors, name='my_colormap', position=None,
bit=False, register=True):
"""
Create a matplotlib colormap with customized colors, optionally registering
it with matplotlib for simplified use.
Adapted from Chris Slocum's code at:
https://github.com/CSlocumWX/custom_colormap/blob/master/custom_colormaps.py
and used under the terms of that code's BSD-3 license
Parameters
----------
colors : iterable
list of either tuples containing rgb values, or html strings
Colors should be arranged so that the first color is the lowest
value for the colorbar and the last is the highest.
name : str
name of colormap to use when registering with matplotlib
position : None or iterable
list containing the values (from [0,1]) that dictate the position
of each color within the colormap. If None (default), the colors
will be equally-spaced within the colorbar.
bit : boolean
True if RGB colors are given in 8-bit [0 to 255] or False if given
in arithmetic basis [0 to 1] (default)
register : boolean
switch to control whether or not to register the custom colormap
with matplotlib in order to enable use by just the name string
"""
def _html_color_to_rgb(color_string):
""" convert #RRGGBB to an (R, G, B) tuple """
color_string = color_string.strip()
if color_string[0] == '#':
color_string = color_string[1:]
if len(color_string) != 6:
raise ValueError(
"input #{} is not in #RRGGBB format".format(color_string))
r, g, b = color_string[:2], color_string[2:4], color_string[4:]
r, g, b = [int(n, 16) / 255 for n in (r, g, b)]
return r, g, b
bit_rgb = np.linspace(0, 1, 256)
if position is None:
position = np.linspace(0, 1, len(colors))
else:
if len(position) != len(colors):
raise ValueError("position length must be the same as colors")
elif position[0] != 0 or position[-1] != 1:
raise ValueError("position must start with 0 and end with 1")
cdict = {'red': [], 'green': [], 'blue': []}
for pos, color in zip(position, colors):
if isinstance(color, str):
color = _html_color_to_rgb(color)
elif bit:
color = (bit_rgb[color[0]],
bit_rgb[color[1]],
bit_rgb[color[2]])
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
cmap = mpl.colors.LinearSegmentedColormap(name, cdict, 256)
if register:
mpl.cm.register_cmap(name, cmap)
return cmap
def plot_spectra(
spectra,
style='overlap',
color=None,
line_style=None,
padding=1.,
legend=None,
legend_picking=True,
legend_loc='upper right',
fig=None,
ax=None,
**kwargs):
"""Plot several spectra in the same figure.
Extra keyword arguments are passed to `matplotlib.figure`.
Parameters
----------
spectra : list of Signal1D or BaseSignal
Ordered spectra list of signal to plot. If `style` is "cascade" or
"mosaic" the spectra can have different size and axes. For `BaseSignal`
with navigation dimensions 1 and signal dimension 0, the signal will be
tranposed to form a `Signal1D`.
style : {'overlap', 'cascade', 'mosaic', 'heatmap'}
The style of the plot.
color : matplotlib color or a list of them or `None`
Sets the color of the lines of the plots (no action on 'heatmap').
If a list, if its length is less than the number of spectra to plot,
the colors will be cycled. If `None`, use default matplotlib color
cycle.
line_style: matplotlib line style or a list of them or `None`
Sets the line style of the plots (no action on 'heatmap').
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
padding : float, optional, default 0.1
Option for "cascade". 1 guarantees that there is not overlapping.
However, in many cases a value between 0 and 1 can produce a tighter
plot without overlapping. Negative values have the same effect but
reverse the order of the spectra without reversing the order of the
colors.
legend: None or list of str or 'auto'
If list of string, legend for "cascade" or title for "mosaic" is
displayed. If 'auto', the title of each spectra (metadata.General.title)
is used.
legend_picking: bool
If true, a spectrum can be toggle on and off by clicking on
the legended line.
legend_loc : str or int
This parameter controls where the legend is placed on the figure;
see the pyplot.legend docstring for valid values
fig : matplotlib figure or None
If None, a default figure will be created. Specifying fig will
not work for the 'heatmap' style.
ax : matplotlib ax (subplot) or None
If None, a default ax will be created. Will not work for 'mosaic'
or 'heatmap' style.
**kwargs
remaining keyword arguments are passed to matplotlib.figure() or
matplotlib.subplots(). Has no effect on 'heatmap' style.
Example
-------
>>> s = hs.load("some_spectra")
>>> hs.plot.plot_spectra(s, style='cascade', color='red', padding=0.5)
To save the plot as a png-file
>>> hs.plot.plot_spectra(s).figure.savefig("test.png")
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
import hyperspy.signal
def _reverse_legend(ax_, legend_loc_):
"""
Reverse the ordering of a matplotlib legend (to be more consistent
with the default ordering of plots in the 'cascade' and 'overlap'
styles
Parameters
----------
ax_: matplotlib axes
legend_loc_: str or int
This parameter controls where the legend is placed on the
figure; see the pyplot.legend docstring for valid values
"""
l = ax_.get_legend()
labels = [lb.get_text() for lb in list(l.get_texts())]
handles = l.legendHandles
ax_.legend(handles[::-1], labels[::-1], loc=legend_loc_)
# Before v1.3 default would read the value from prefereces.
if style == "default":
style = "overlap"
if color is not None:
if isinstance(color, str):
color = itertools.cycle([color])
elif hasattr(color, "__iter__"):
color = itertools.cycle(color)
else:
raise ValueError("Color must be None, a valid matplotlib color "
"string or a list of valid matplotlib colors.")
else:
if LooseVersion(mpl.__version__) >= "1.5.3":
color = itertools.cycle(
plt.rcParams['axes.prop_cycle'].by_key()["color"])
else:
color = itertools.cycle(plt.rcParams['axes.color_cycle'])
if line_style is not None:
if isinstance(line_style, str):
line_style = itertools.cycle([line_style])
elif hasattr(line_style, "__iter__"):
line_style = itertools.cycle(line_style)
else:
raise ValueError("line_style must be None, a valid matplotlib"
" line_style string or a list of valid matplotlib"
" line_style.")
else:
line_style = ['-'] * len(spectra)
if legend is not None:
if isinstance(legend, str):
if legend == 'auto':
legend = [spec.metadata.General.title for spec in spectra]
else:
raise ValueError("legend must be None, 'auto' or a list of"
" string")
elif hasattr(legend, "__iter__"):
legend = itertools.cycle(legend)
if style == 'overlap':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_overlap_plot(spectra,
ax,
color=color,
line_style=line_style,)
if legend is not None:
ax.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
if legend_picking is True:
animate_legend(fig=fig, ax=ax)
elif style == 'cascade':
if fig is None:
fig = plt.figure(**kwargs)
if ax is None:
ax = fig.add_subplot(111)
_make_cascade_subplot(spectra,
ax,
color=color,
line_style=line_style,
padding=padding)
if legend is not None:
plt.legend(legend, loc=legend_loc)
_reverse_legend(ax, legend_loc)
elif style == 'mosaic':
default_fsize = plt.rcParams["figure.figsize"]
figsize = (default_fsize[0], default_fsize[1] * len(spectra))
fig, subplots = plt.subplots(
len(spectra), 1, figsize=figsize, **kwargs)
if legend is None:
legend = [legend] * len(spectra)
for spectrum, ax, color, line_style, legend in zip(
spectra, subplots, color, line_style, legend):
spectrum = _transpose_if_required(spectrum, 1)
_plot_spectrum(spectrum, ax, color=color, line_style=line_style)
ax.set_ylabel('Intensity')
if legend is not None:
ax.set_title(legend)
if not isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
if isinstance(spectra, hyperspy.signal.BaseSignal):
_set_spectrum_xlabel(spectrum, ax)
fig.tight_layout()
elif style == 'heatmap':
if not isinstance(spectra, hyperspy.signal.BaseSignal):
import hyperspy.utils
spectra = [_transpose_if_required(spectrum, 1) for spectrum in
spectra]
spectra = hyperspy.utils.stack(spectra)
with spectra.unfolded():
ax = _make_heatmap_subplot(spectra)
ax.set_ylabel('Spectra')
ax = ax if style != "mosaic" else subplots
return ax
def animate_legend(fig=None, ax=None):
"""Animate the legend of a figure.
A spectrum can be toggle on and off by clicking on the legended line.
Parameters
----------
fig: None | matplotlib.figure
If None pick the current figure using "plt.gcf"
ax: None | matplotlib.axes
If None pick the current axes using "plt.gca".
Note
----
Code inspired from legend_picking.py in the matplotlib gallery
"""
if fig is None:
fig = plt.gcf()
if ax is None:
ax = plt.gca()
lines = ax.lines[::-1]
lined = dict()
leg = ax.get_legend()
for legline, origline in zip(leg.get_lines(), lines):
legline.set_picker(5) # 5 pts tolerance
lined[legline] = origline
def onpick(event):
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
if legline.axes == ax:
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
fig.canvas.draw_idle()
fig.canvas.mpl_connect('pick_event', onpick)
def plot_histograms(signal_list,
bins='freedman',
range_bins=None,
color=None,
line_style=None,
legend='auto',
fig=None,
**kwargs):
"""Plot the histogram of every signal in the list in the same figure.
This function creates a histogram for each signal and plot the list with
the `utils.plot.plot_spectra` function.
Parameters
----------
signal_list : iterable
Ordered spectra list to plot. If `style` is "cascade" or "mosaic"
the spectra can have different size and axes.
bins : int or list or str, optional
If bins is a string, then it must be one of:
'knuth' : use Knuth's rule to determine bins
'scotts' : use Scott's rule to determine bins
'freedman' : use the Freedman-diaconis rule to determine bins
'blocks' : use bayesian blocks for dynamic bin widths
range_bins : tuple or None, optional.
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
color : valid matplotlib color or a list of them or `None`, optional.
Sets the color of the lines of the plots. If a list, if its length is
less than the number of spectra to plot, the colors will be cycled. If
If `None`, use default matplotlib color cycle.
line_style: valid matplotlib line style or a list of them or `None`,
optional.
The main line style are '-','--','steps','-.',':'.
If a list, if its length is less than the number of
spectra to plot, line_style will be cycled. If
If `None`, use continuous lines, eg: ('-','--','steps','-.',':')
legend: None or list of str or 'auto', optional.
Display a legend. If 'auto', the title of each spectra
(metadata.General.title) is used.
legend_picking: bool, optional.
If true, a spectrum can be toggle on and off by clicking on
the legended line.
fig : matplotlib figure or None, optional.
If None, a default figure will be created.
**kwargs
other keyword arguments (weight and density) are described in
np.histogram().
Example
-------
Histograms of two random chi-square distributions
>>> img = hs.signals.Signal2D(np.random.chisquare(1,[10,10,100]))
>>> img2 = hs.signals.Signal2D(np.random.chisquare(2,[10,10,100]))
>>> hs.plot.plot_histograms([img,img2],legend=['hist1','hist2'])
Returns
-------
ax: matplotlib axes or list of matplotlib axes
An array is returned when `style` is "mosaic".
"""
hists = []
for obj in signal_list:
hists.append(obj.get_histogram(bins=bins,
range_bins=range_bins, **kwargs))
if line_style is None:
line_style = 'steps'
return plot_spectra(hists, style='overlap', color=color,
line_style=line_style, legend=legend, fig=fig)
| gpl-3.0 | 685,807,880,072,156,300 | 36.986083 | 82 | 0.581131 | false | 4.057262 | false | false | false |
rg3915/django-experience | djexperience/settings.py | 1 | 3763 | import os
from decouple import config, Csv
from dj_database_url import parse as dburl
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', default=[], cast=Csv())
# Application definition
INSTALLED_APPS = [
# my apps
'djexperience.core',
# default django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# thirty apps
'django_extensions',
'bootstrapform',
'widget_tweaks',
'daterange_filter',
'django_activeurl',
'import_export',
'django_tables2',
# my apps
'djexperience.bookstore',
'djexperience.company',
'djexperience.crm',
'djexperience.myemail',
'djexperience.product',
'djexperience.selling',
'djexperience.service',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djexperience.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djexperience.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
default_dburl = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
DATABASES = {
'default': config('DATABASE_URL', default=default_dburl, cast=dburl),
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL')
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
USE_THOUSAND_SEPARATOR = True
DECIMAL_SEPARATOR = ','
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
LOGIN_URL = '/admin/login/'
| mit | -8,715,243,961,084,812,000 | 25.687943 | 91 | 0.687749 | false | 3.520112 | true | false | false |
shoyer/xray | xarray/backends/locks.py | 1 | 5397 | import multiprocessing
import threading
import weakref
from typing import Any, MutableMapping
try:
from dask.utils import SerializableLock
except ImportError:
# no need to worry about serializing the lock
SerializableLock = threading.Lock
try:
from dask.distributed import Lock as DistributedLock
except ImportError:
DistributedLock = None
# Locks used by multiple backends.
# Neither HDF5 nor the netCDF-C library are thread-safe.
HDF5_LOCK = SerializableLock()
NETCDFC_LOCK = SerializableLock()
_FILE_LOCKS = weakref.WeakValueDictionary() # type: MutableMapping[Any, threading.Lock] # noqa
def _get_threaded_lock(key):
try:
lock = _FILE_LOCKS[key]
except KeyError:
lock = _FILE_LOCKS[key] = threading.Lock()
return lock
def _get_multiprocessing_lock(key):
# TODO: make use of the key -- maybe use locket.py?
# https://github.com/mwilliamson/locket.py
del key # unused
return multiprocessing.Lock()
_LOCK_MAKERS = {
None: _get_threaded_lock,
'threaded': _get_threaded_lock,
'multiprocessing': _get_multiprocessing_lock,
'distributed': DistributedLock,
}
def _get_lock_maker(scheduler=None):
"""Returns an appropriate function for creating resource locks.
Parameters
----------
scheduler : str or None
Dask scheduler being used.
See Also
--------
dask.utils.get_scheduler_lock
"""
return _LOCK_MAKERS[scheduler]
def _get_scheduler(get=None, collection=None):
"""Determine the dask scheduler that is being used.
None is returned if no dask scheduler is active.
See also
--------
dask.base.get_scheduler
"""
try:
# dask 0.18.1 and later
from dask.base import get_scheduler
actual_get = get_scheduler(get, collection)
except ImportError:
try:
from dask.utils import effective_get
actual_get = effective_get(get, collection)
except ImportError:
return None
try:
from dask.distributed import Client
if isinstance(actual_get.__self__, Client):
return 'distributed'
except (ImportError, AttributeError):
try:
import dask.multiprocessing
if actual_get == dask.multiprocessing.get:
return 'multiprocessing'
else:
return 'threaded'
except ImportError:
return 'threaded'
def get_write_lock(key):
"""Get a scheduler appropriate lock for writing to the given resource.
Parameters
----------
key : str
Name of the resource for which to acquire a lock. Typically a filename.
Returns
-------
Lock object that can be used like a threading.Lock object.
"""
scheduler = _get_scheduler()
lock_maker = _get_lock_maker(scheduler)
return lock_maker(key)
def acquire(lock, blocking=True):
"""Acquire a lock, possibly in a non-blocking fashion.
Includes backwards compatibility hacks for old versions of Python, dask
and dask-distributed.
"""
if blocking:
# no arguments needed
return lock.acquire()
elif DistributedLock is not None and isinstance(lock, DistributedLock):
# distributed.Lock doesn't support the blocking argument yet:
# https://github.com/dask/distributed/pull/2412
return lock.acquire(timeout=0)
else:
# "blocking" keyword argument not supported for:
# - threading.Lock on Python 2.
# - dask.SerializableLock with dask v1.0.0 or earlier.
# - multiprocessing.Lock calls the argument "block" instead.
return lock.acquire(blocking)
class CombinedLock:
"""A combination of multiple locks.
Like a locked door, a CombinedLock is locked if any of its constituent
locks are locked.
"""
def __init__(self, locks):
self.locks = tuple(set(locks)) # remove duplicates
def acquire(self, blocking=True):
return all(acquire(lock, blocking=blocking) for lock in self.locks)
def release(self):
for lock in self.locks:
lock.release()
def __enter__(self):
for lock in self.locks:
lock.__enter__()
def __exit__(self, *args):
for lock in self.locks:
lock.__exit__(*args)
def locked(self):
return any(lock.locked for lock in self.locks)
def __repr__(self):
return "CombinedLock(%r)" % list(self.locks)
class DummyLock:
"""DummyLock provides the lock API without any actual locking."""
def acquire(self, blocking=True):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def locked(self):
return False
def combine_locks(locks):
"""Combine a sequence of locks into a single lock."""
all_locks = []
for lock in locks:
if isinstance(lock, CombinedLock):
all_locks.extend(lock.locks)
elif lock is not None:
all_locks.append(lock)
num_locks = len(all_locks)
if num_locks > 1:
return CombinedLock(all_locks)
elif num_locks == 1:
return all_locks[0]
else:
return DummyLock()
def ensure_lock(lock):
"""Ensure that the given object is a lock."""
if lock is None or lock is False:
return DummyLock()
return lock
| apache-2.0 | -7,480,206,169,277,232,000 | 24.578199 | 96 | 0.631277 | false | 4.119847 | false | false | false |
brahle/eval2 | scripts/haski/actions/reviewaction.py | 1 | 1578 | #!/usr/bin/env python3.2
# Copyright 2011 Bruno Rahle
#
# This file is part of Evaluator.
#
# Evaluator is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Evaluator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Evaluator. If not, see
# <http://www.gnu.org/licenses/>.
from actions.baseaction import BaseHaskiAction
import argparse
class ReviewAction(BaseHaskiAction):
"""This class is the class that does linting work.
"""
RB_ID_STR = 'reviewboard id'
def __call__(self, params):
"""Fetches the desired revision and then sends it to reviewboard.
"""
commit = self.get_commit(params)
if not params.skip_lint:
commit.lint(params)
rb_id = commit.review()
if params.revision != 'HEAD':
if self.RB_ID_STR not in commit.message.fields:
print('[WARNING] Please edit the message to incorporate',
'`ReviewBoardID` field.')
else:
commit.message.set_field(self.RB_ID_STR, rb_id)
commit.amend()
def main():
pass
if __name__ == '__main__':
main()
| agpl-3.0 | -8,740,087,895,962,332,000 | 31.204082 | 73 | 0.665399 | false | 3.905941 | false | false | false |
pelodelfuego/word2vec-toolbox | toolbox/mlLib/conceptPairFeature.py | 1 | 4358 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import __init__
import numpy as np
from scipy.weave import inline
from sklearn.ensemble import RandomForestClassifier
import cpLib.concept as cp
import utils.skUtils as sku
# PROJECTION
def projCosSim(c1, c2):
v1 = c1.vect
v2 = c2.vect
dimCount = len(v1)
arr = np.zeros(dimCount, 'f')
code = """
for(int i = 0; i < dimCount; i++) {
float norm_v1 = 0.0;
float norm_v2 = 0.0;
float dot_pdt = 0.0;
for(int j = 0; j < dimCount; j++) {
if(i != j) {
dot_pdt += v1[j] * v2[j];
norm_v1 += v1[j] * v1[j];
norm_v2 += v2[j] * v2[j];
}
}
norm_v1 = sqrtf(norm_v1);
norm_v2 = sqrtf(norm_v2);
arr[i] = dot_pdt / norm_v1 / norm_v2;
}
return_val = 1;
"""
inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc')
return arr
def projEuclDist(c1, c2):
v1 = c1.vect
v2 = c2.vect
dimCount = len(v1)
arr = np.zeros(dimCount, 'f')
code = """
for(int i = 0; i < dimCount; i++) {
float dist = 0.0;
for(int j = 0; j < dimCount; j++) {
if(i != j) {
dist += pow(v1[j] - v2[j], 2);
}
}
arr[i] = sqrt(dist);
}
return_val = 1;
"""
inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc')
return arr
def projManaDist(c1, c2):
v1 = c1.vect
v2 = c2.vect
dimCount = len(v1)
arr = np.zeros(dimCount, 'f')
code = """
for(int i = 0; i < dimCount; i++) {
float dist = 0.0;
for(int j = 0; j < dimCount; j++) {
if(i != j) {
dist += fabs(v1[i] - v2[i]);
}
}
arr[i] = dist;
}
return_val = 1;
"""
inline(code, ['v1', 'v2', 'dimCount', 'arr'], headers = ['<math.h>'], compiler = 'gcc')
return arr
# COMMUTATIVE FEATURE
def subCarth(conceptPair):
return conceptPair[2].vect - conceptPair[0].vect
def subPolar(conceptPair):
return conceptPair[2].polarVect() - conceptPair[0].polarVect()
def subAngular(conceptPair):
return conceptPair[2].angularVect() - conceptPair[0].angularVect()
def concatCarth(conceptPair):
return np.concatenate((conceptPair[0].vect, conceptPair[2].vect))
def concatPolar(conceptPair):
return np.concatenate((conceptPair[0].polarVect(), conceptPair[2].polarVect()))
def concatAngular(conceptPair):
return np.concatenate((conceptPair[0].angularVect(), conceptPair[2].angularVect()))
# NON COMMUATIVE FEATURE
# PROJECTION SIMILARITY
def pCosSim(conceptPair):
return projCosSim(conceptPair[0], conceptPair[2])
def pEuclDist(conceptPair):
return projEuclDist(conceptPair[0], conceptPair[2])
def pManaDist(conceptPair):
return projManaDist(conceptPair[0], conceptPair[2])
# PROJECTION DISSIMILARITY
def _projectionDissimarilty(projectionMetric, globalMetric, conceptPair):
projectedFeature = projectionMetric(conceptPair[0], conceptPair[2])
globalFeature = globalMetric(conceptPair[0], conceptPair[2])
return np.array([(globalFeature - v) for v in projectedFeature])
def pdCosSim(conceptPair):
return _projectionDissimarilty(projCosSim, cp.cosSim, conceptPair)
def pdEuclDist(conceptPair):
return _projectionDissimarilty(projEuclDist, cp.euclDist, conceptPair)
def pdManaDist(conceptPair):
return _projectionDissimarilty(projManaDist, cp.manaDist, conceptPair)
# CLF
class ConceptPairClf(object):
def __init__(self, clf, featureExtractionFct):
self.clf = clf
self.featureExtractionFct = featureExtractionFct
def fit(self, X, y):
self.clf.fit([self.featureExtractionFct(x) for x in X], y)
self.classes_ = self.clf.classes_
def predict(self, X):
return self.clf.predict([self.featureExtractionFct(x) for x in X])
def predict_proba(self, X):
return self.clf.predict_proba([self.featureExtractionFct(x) for x in X])
| gpl-3.0 | -1,004,693,826,859,406,600 | 26.2375 | 91 | 0.562184 | false | 3.228148 | false | false | false |
skodapetr/lbvs-environment | scripts/libs/core.py | 1 | 1664 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import csv
import os
import logging
import gzip
__license__ = "X11"
def init_logging():
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] - %(message)s',
datefmt='%H:%M:%S')
def create_directory(path):
if not os.path.exists(path) and not path == "":
os.makedirs(path)
def create_parent_directory(path):
parent_directory = os.path.dirname(path)
if not os.path.exists(parent_directory) and not parent_directory == "":
os.makedirs(parent_directory)
def read_json(path):
if path.endswith(".gz"):
with gzip.open(path, "rt") as stream:
return json.load(stream)
else:
with open(path, "r") as stream:
return json.load(stream)
def write_json(path, object_to_write):
create_parent_directory(path)
if path.endswith(".gz"):
with gzip.open(path, "wt") as stream:
json.dump(object_to_write, stream, indent=2)
else:
with open(path, "w") as stream:
json.dump(object_to_write, stream, indent=2)
def read_csv_as_object(path):
"""
Read CSV lines as objects.
"""
results = []
with open(path) as stream:
reader = csv.reader(stream, delimiter=",", quotechar='"')
header = next(reader)
for row in reader:
new_object = {}
for index in range(0, len(row)):
new_object[header[index]] = row[index]
results.append(new_object)
return results
if __name__ == "__main__":
raise Exception("This module can be used only as a library!")
| mit | 453,216,932,470,209,800 | 23.470588 | 75 | 0.590144 | false | 3.570815 | false | false | false |
kaniblu/hangul-utils | hangul_utils/unicode.py | 1 | 8775 | __all__ = ["split_syllable_char", "split_syllables",
"join_jamos", "join_jamos_char",
"CHAR_INITIALS", "CHAR_MEDIALS", "CHAR_FINALS"]
import itertools
INITIAL = 0x001
MEDIAL = 0x010
FINAL = 0x100
CHAR_LISTS = {
INITIAL: list(map(chr, [
0x3131, 0x3132, 0x3134, 0x3137, 0x3138, 0x3139,
0x3141, 0x3142, 0x3143, 0x3145, 0x3146, 0x3147,
0x3148, 0x3149, 0x314a, 0x314b, 0x314c, 0x314d,
0x314e
])),
MEDIAL: list(map(chr, [
0x314f, 0x3150, 0x3151, 0x3152, 0x3153, 0x3154,
0x3155, 0x3156, 0x3157, 0x3158, 0x3159, 0x315a,
0x315b, 0x315c, 0x315d, 0x315e, 0x315f, 0x3160,
0x3161, 0x3162, 0x3163
])),
FINAL: list(map(chr, [
0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136,
0x3137, 0x3139, 0x313a, 0x313b, 0x313c, 0x313d,
0x313e, 0x313f, 0x3140, 0x3141, 0x3142, 0x3144,
0x3145, 0x3146, 0x3147, 0x3148, 0x314a, 0x314b,
0x314c, 0x314d, 0x314e
]))
}
CHAR_INITIALS = CHAR_LISTS[INITIAL]
CHAR_MEDIALS = CHAR_LISTS[MEDIAL]
CHAR_FINALS = CHAR_LISTS[FINAL]
CHAR_SETS = {k: set(v) for k, v in CHAR_LISTS.items()}
CHARSET = set(itertools.chain(*CHAR_SETS.values()))
CHAR_INDICES = {k: {c: i for i, c in enumerate(v)}
for k, v in CHAR_LISTS.items()}
def is_hangul_syllable(c):
return 0xac00 <= ord(c) <= 0xd7a3 # Hangul Syllables
def is_hangul_jamo(c):
return 0x1100 <= ord(c) <= 0x11ff # Hangul Jamo
def is_hangul_compat_jamo(c):
return 0x3130 <= ord(c) <= 0x318f # Hangul Compatibility Jamo
def is_hangul_jamo_exta(c):
return 0xa960 <= ord(c) <= 0xa97f # Hangul Jamo Extended-A
def is_hangul_jamo_extb(c):
return 0xd7b0 <= ord(c) <= 0xd7ff # Hangul Jamo Extended-B
def is_hangul(c):
return (is_hangul_syllable(c) or
is_hangul_jamo(c) or
is_hangul_compat_jamo(c) or
is_hangul_jamo_exta(c) or
is_hangul_jamo_extb(c))
def is_supported_hangul(c):
return is_hangul_syllable(c) or is_hangul_compat_jamo(c)
def check_hangul(c, jamo_only=False):
if not ((jamo_only or is_hangul_compat_jamo(c)) or is_supported_hangul(c)):
raise ValueError(f"'{c}' is not a supported hangul character. "
f"'Hangul Syllables' (0xac00 ~ 0xd7a3) and "
f"'Hangul Compatibility Jamos' (0x3130 ~ 0x318f) are "
f"supported at the moment.")
def get_jamo_type(c):
check_hangul(c)
assert is_hangul_compat_jamo(c), f"not a jamo: {ord(c):x}"
return sum(t for t, s in CHAR_SETS.items() if c in s)
def split_syllable_char(c):
"""
Splits a given korean syllable into its components. Each component is
represented by Unicode in 'Hangul Compatibility Jamo' range.
Arguments:
c: A Korean character.
Returns:
A triple (initial, medial, final) of Hangul Compatibility Jamos.
If no jamo corresponds to a position, `None` is returned there.
Example:
>>> split_syllable_char("안")
("ㅇ", "ㅏ", "ㄴ")
>>> split_syllable_char("고")
("ㄱ", "ㅗ", None)
>>> split_syllable_char("ㅗ")
(None, "ㅗ", None)
>>> split_syllable_char("ㅇ")
("ㅇ", None, None)
"""
check_hangul(c)
if len(c) != 1:
raise ValueError("Input string must have exactly one character.")
init, med, final = None, None, None
if is_hangul_syllable(c):
offset = ord(c) - 0xac00
x = (offset - offset % 28) // 28
init, med, final = x // 21, x % 21, offset % 28
if not final:
final = None
else:
final -= 1
else:
pos = get_jamo_type(c)
if pos & INITIAL == INITIAL:
pos = INITIAL
elif pos & MEDIAL == MEDIAL:
pos = MEDIAL
elif pos & FINAL == FINAL:
pos = FINAL
idx = CHAR_INDICES[pos][c]
if pos == INITIAL:
init = idx
elif pos == MEDIAL:
med = idx
else:
final = idx
return tuple(CHAR_LISTS[pos][idx] if idx is not None else None
for pos, idx in
zip([INITIAL, MEDIAL, FINAL], [init, med, final]))
def split_syllables(s, ignore_err=True, pad=None):
"""
Performs syllable-split on a string.
Arguments:
s (str): A string (possibly mixed with non-Hangul characters).
ignore_err (bool): If set False, it ensures that all characters in
the string are Hangul-splittable and throws a ValueError otherwise.
(default: True)
pad (str): Pad empty jamo positions (initial, medial, or final) with
`pad` character. This is useful for cases where fixed-length
strings are needed. (default: None)
Returns:
Hangul-split string
Example:
>>> split_syllables("안녕하세요")
"ㅇㅏㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ"
>>> split_syllables("안녕하세요~~", ignore_err=False)
ValueError: encountered an unsupported character: ~ (0x7e)
>>> split_syllables("안녕하세요ㅛ", pad="x")
'ㅇㅏㄴㄴㅕㅇㅎㅏxㅅㅔxㅇㅛxxㅛx'
"""
def try_split(c):
try:
return split_syllable_char(c)
except ValueError:
if ignore_err:
return (c,)
raise ValueError(f"encountered an unsupported character: "
f"{c} (0x{ord(c):x})")
s = map(try_split, s)
if pad is not None:
tuples = map(lambda x: tuple(pad if y is None else y for y in x), s)
else:
tuples = map(lambda x: filter(None, x), s)
return "".join(itertools.chain(*tuples))
def join_jamos_char(init, med, final=None):
"""
Combines jamos into a single syllable.
Arguments:
init (str): Initial jao.
med (str): Medial jamo.
final (str): Final jamo. If not supplied, the final syllable is made
without the final. (default: None)
Returns:
A Korean syllable.
"""
chars = (init, med, final)
for c in filter(None, chars):
check_hangul(c, jamo_only=True)
idx = tuple(CHAR_INDICES[pos][c] if c is not None else c
for pos, c in zip((INITIAL, MEDIAL, FINAL), chars))
init_idx, med_idx, final_idx = idx
# final index must be shifted once as
# final index with 0 points to syllables without final
final_idx = 0 if final_idx is None else final_idx + 1
return chr(0xac00 + 28 * 21 * init_idx + 28 * med_idx + final_idx)
def join_jamos(s, ignore_err=True):
"""
Combines a sequence of jamos to produce a sequence of syllables.
Arguments:
s (str): A string (possible mixed with non-jamo characters).
ignore_err (bool): If set False, it will ensure that all characters
will be consumed for the making of syllables. It will throw a
ValueError when it fails to do so. (default: True)
Returns:
A string
Example:
>>> join_jamos("ㅇㅏㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ")
"안녕하세요"
>>> join_jamos("ㅇㅏㄴㄴㄴㅕㅇㅎㅏㅅㅔㅇㅛ")
"안ㄴ녕하세요"
>>> join_jamos()
"""
last_t = 0
queue = []
new_string = ""
def flush(n=0):
new_queue = []
while len(queue) > n:
new_queue.append(queue.pop())
if len(new_queue) == 1:
if not ignore_err:
raise ValueError(f"invalid jamo character: {new_queue[0]}")
result = new_queue[0]
elif len(new_queue) >= 2:
try:
result = join_jamos_char(*new_queue)
except (ValueError, KeyError):
# Invalid jamo combination
if not ignore_err:
raise ValueError(f"invalid jamo characters: {new_queue}")
result = "".join(new_queue)
else:
result = None
return result
for c in s:
if c not in CHARSET:
if queue:
new_c = flush() + c
else:
new_c = c
last_t = 0
else:
t = get_jamo_type(c)
new_c = None
if t & FINAL == FINAL:
if not (last_t == MEDIAL):
new_c = flush()
elif t == INITIAL:
new_c = flush()
elif t == MEDIAL:
if last_t & INITIAL == INITIAL:
new_c = flush(1)
else:
new_c = flush()
last_t = t
queue.insert(0, c)
if new_c:
new_string += new_c
if queue:
new_string += flush()
return new_string
| gpl-3.0 | 3,778,202,042,101,361,700 | 29.820789 | 79 | 0.551227 | false | 2.954983 | false | false | false |
drnextgis/QGIS | python/plugins/processing/core/parameters.py | 1 | 55397 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Parameters.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
from builtins import range
from builtins import object
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import sys
import os
import math
from inspect import isclass
from copy import deepcopy
import numbers
from qgis.utils import iface
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (QgsRasterLayer, QgsVectorLayer, QgsMapLayer, QgsCoordinateReferenceSystem,
QgsExpressionContext, QgsExpressionContextUtils, QgsExpression, QgsExpressionContextScope)
from processing.tools.vector import resolveFieldIndex, features
from processing.tools import dataobjects
from processing.core.outputs import OutputNumber, OutputRaster, OutputVector
from processing.tools.dataobjects import getObject
def parseBool(s):
if s is None or s == str(None).lower():
return None
return str(s).lower() == str(True).lower()
def _splitParameterOptions(line):
tokens = line.split('=', 1)
if tokens[1].lower().strip().startswith('optional'):
isOptional = True
definition = tokens[1].strip()[len('optional') + 1:]
else:
isOptional = False
definition = tokens[1]
return isOptional, tokens[0], definition
def _createDescriptiveName(s):
return s.replace('_', ' ')
def _expressionContext():
context = QgsExpressionContext()
context.appendScope(QgsExpressionContextUtils.globalScope())
context.appendScope(QgsExpressionContextUtils.projectScope())
if iface.mapCanvas():
context.appendScope(QgsExpressionContextUtils.mapSettingsScope(iface.mapCanvas().mapSettings()))
processingScope = QgsExpressionContextScope()
extent = iface.mapCanvas().fullExtent()
processingScope.setVariable('fullextent_minx', extent.xMinimum())
processingScope.setVariable('fullextent_miny', extent.yMinimum())
processingScope.setVariable('fullextent_maxx', extent.xMaximum())
processingScope.setVariable('fullextent_maxy', extent.yMaximum())
context.appendScope(processingScope)
return context
def _resolveLayers(value):
layers = dataobjects.getAllLayers()
if value:
inputlayers = value.split(';')
for i, inputlayer in enumerate(inputlayers):
for layer in layers:
if layer.name() == inputlayer:
inputlayers[i] = layer.source()
break
return ";".join(inputlayers)
class Parameter(object):
"""
Base class for all parameters that a geoalgorithm might
take as input.
"""
default_metadata = {}
def __init__(self, name='', description='', default=None, optional=False,
metadata={}):
self.name = name
self.description = description
self.default = default
self.value = default
self.isAdvanced = False
# A hidden parameter can be used to set a hard-coded value.
# It can be used as any other parameter, but it will not be
# shown to the user
self.hidden = False
self.optional = parseBool(optional)
# TODO: make deep copy and deep update
self.metadata = deepcopy(self.default_metadata)
self.metadata.update(deepcopy(metadata))
def setValue(self, obj):
"""
Sets the value of the parameter.
Returns true if the value passed is correct for the type
of parameter.
"""
if obj is None:
if not self.optional:
return False
self.value = None
return True
self.value = str(obj)
return True
def setDefaultValue(self):
"""
Sets the value of the parameter to the default one
Returns true if the default value is correct for the type
of parameter.
"""
return self.setValue(self.default)
def __str__(self):
return u'{} <{}>'.format(self.name, self.__class__.__name__)
def getValueAsCommandLineParameter(self):
"""
Returns the value of this parameter as it should have been
entered in the console if calling an algorithm using the
Processing.runalg() method.
"""
return str(self.value)
def typeName(self):
return self.__class__.__name__.replace('Parameter', '').lower()
def todict(self):
o = deepcopy(self.__dict__)
del o['metadata']
return o
def tr(self, string, context=''):
if context == '':
context = 'Parameter'
return QCoreApplication.translate(context, string)
def wrapper(self, dialog, row=0, col=0):
wrapper = self.metadata.get('widget_wrapper', None)
# wrapper metadata should be a class path
if isinstance(wrapper, str):
tokens = wrapper.split('.')
mod = __import__('.'.join(tokens[:-1]), fromlist=[tokens[-1]])
wrapper = getattr(mod, tokens[-1])
# or directly a class object
if isclass(wrapper):
wrapper = wrapper(self, dialog, row, col)
# or a wrapper instance
return wrapper
def evaluate(self, alg):
pass
def evaluateForModeler(self, value, model):
return value
class ParameterBoolean(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.BooleanWidgetWrapper'
}
def __init__(self, name='', description='', default=None, optional=False, metadata={}):
Parameter.__init__(self, name, description, parseBool(default), optional, metadata)
def setValue(self, value):
if value is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(value, str):
self.value = str(value).lower() == str(True).lower()
else:
self.value = bool(value)
return True
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'boolean '
return '##' + self.name + '=' + param_type + str(self.default)
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("boolean"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('boolean') + 1:]
if default:
param = ParameterBoolean(name, descName, default)
else:
param = ParameterBoolean(name, descName)
param.optional = isOptional
return param
class ParameterCrs(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.CrsWidgetWrapper'
}
def __init__(self, name='', description='', default=None, optional=False, metadata={}):
'''The value is a string that uniquely identifies the
coordinate reference system. Typically it is the auth id of the CRS
(if the authority is EPSG) or proj4 string of the CRS (in case
of other authorities or user defined projections).'''
Parameter.__init__(self, name, description, default, optional, metadata)
def setValue(self, value):
if not bool(value):
if not self.optional:
return False
self.value = None
return True
if isinstance(value, QgsCoordinateReferenceSystem):
self.value = value.authid()
return True
if isinstance(value, QgsMapLayer):
self.value = value.crs().authid()
return True
try:
layer = dataobjects.getObjectFromUri(value)
if layer is not None:
self.value = layer.crs().authid()
return True
except:
pass
# TODO: check it is a valid authid
self.value = value
return True
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'crs '
return '##' + self.name + '=' + param_type + str(self.default)
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("crs"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('crs') + 1:]
if default:
return ParameterCrs(name, descName, default, isOptional)
else:
return ParameterCrs(name, descName, None, isOptional)
class ParameterDataObject(Parameter):
def getValueAsCommandLineParameter(self):
if self.value is None:
return str(None)
else:
s = dataobjects.normalizeLayerSource(str(self.value))
s = '"%s"' % s
return s
def evaluate(self, alg):
self.value = _resolveLayers(self.value)
class ParameterExtent(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.ExtentWidgetWrapper'
}
USE_MIN_COVERING_EXTENT = 'USE_MIN_COVERING_EXTENT'
def __init__(self, name='', description='', default=None, optional=True):
Parameter.__init__(self, name, description, default, optional)
# The value is a string in the form "xmin, xmax, ymin, ymax"
def setValue(self, value):
if not value:
if not self.optional:
return False
self.value = None
return True
if isinstance(value, QgsMapLayer):
rect = value.extent()
self.value = '{},{},{},{}'.format(
rect.xMinimum(), rect.xMaximum(), rect.yMinimum(), rect.yMaximum())
return True
try:
layer = dataobjects.getObjectFromUri(value)
if layer is not None:
rect = layer.extent()
self.value = '{},{},{},{}'.format(
rect.xMinimum(), rect.xMaximum(), rect.yMinimum(), rect.yMaximum())
return True
except:
pass
tokens = str(value).split(',')
if len(tokens) != 4:
return False
try:
float(tokens[0])
float(tokens[1])
float(tokens[2])
float(tokens[3])
self.value = value
return True
except:
return False
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'extent'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("extent"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('extent') + 1:] or None
return ParameterExtent(name, descName, default, isOptional)
def evaluate(self, alg):
if self.optional and not bool(self.value):
self.value = self.getMinCoveringExtent(alg)
def getMinCoveringExtent(self, alg):
first = True
found = False
for param in alg.parameters:
if param.value:
if isinstance(param, (ParameterRaster, ParameterVector)):
if isinstance(param.value, (QgsRasterLayer,
QgsVectorLayer)):
layer = param.value
else:
layer = dataobjects.getObject(param.value)
if layer:
found = True
self.addToRegion(layer, first)
first = False
elif isinstance(param, ParameterMultipleInput):
layers = param.value.split(';')
for layername in layers:
layer = dataobjects.getObject(layername)
if layer:
found = True
self.addToRegion(layer, first)
first = False
if found:
return '{},{},{},{}'.format(
self.xmin, self.xmax, self.ymin, self.ymax)
else:
return None
def addToRegion(self, layer, first):
if first:
self.xmin = layer.extent().xMinimum()
self.xmax = layer.extent().xMaximum()
self.ymin = layer.extent().yMinimum()
self.ymax = layer.extent().yMaximum()
else:
self.xmin = min(self.xmin, layer.extent().xMinimum())
self.xmax = max(self.xmax, layer.extent().xMaximum())
self.ymin = min(self.ymin, layer.extent().yMinimum())
self.ymax = max(self.ymax, layer.extent().yMaximum())
class ParameterPoint(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.PointWidgetWrapper'
}
def __init__(self, name='', description='', default=None, optional=False):
Parameter.__init__(self, name, description, default, optional)
# The value is a string in the form "x, y"
def setValue(self, text):
if text is None:
if not self.optional:
return False
self.value = None
return True
tokens = str(text).split(',')
if len(tokens) != 2:
return False
try:
float(tokens[0])
float(tokens[1])
self.value = text
return True
except:
return False
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'point'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("point"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('point') + 1:] or None
return ParameterPoint(name, descName, default, isOptional)
class ParameterFile(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.FileWidgetWrapper'
}
def __init__(self, name='', description='', isFolder=False, optional=True, ext=None):
Parameter.__init__(self, name, description, None, parseBool(optional))
self.ext = ext
self.isFolder = parseBool(isFolder)
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
def setValue(self, obj):
if obj is None or obj.strip() == '':
if not self.optional:
return False
self.value = None if obj is None else obj.strip()
return True
if self.ext is not None and obj != '' and not obj.endswith(self.ext):
return False
self.value = str(obj)
return True
def typeName(self):
if self.isFolder:
return 'directory'
else:
return 'file'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
if self.isFolder:
param_type += 'folder'
else:
param_type += 'file'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("file") or definition.startswith("folder"):
descName = _createDescriptiveName(name)
return ParameterFile(name, descName, definition.startswith("folder"), isOptional)
class ParameterFixedTable(Parameter):
def __init__(self, name='', description='', numRows=3,
cols=['value'], fixedNumOfRows=False, optional=False):
Parameter.__init__(self, name, description, None, optional)
self.cols = cols
if isinstance(cols, str):
self.cols = self.cols.split(";")
self.numRows = int(numRows)
self.fixedNumOfRows = parseBool(fixedNumOfRows)
def setValue(self, obj):
if obj is None:
if not self.optional:
return False
self.value = None
return True
# TODO: check that it contains a correct number of elements
if isinstance(obj, str):
self.value = obj
else:
self.value = ParameterFixedTable.tableToString(obj)
return True
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"'
@staticmethod
def tableToString(table):
tablestring = ''
for i in range(len(table)):
for j in range(len(table[0])):
tablestring = tablestring + table[i][j] + ','
tablestring = tablestring[:-1]
return tablestring
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.startswith("point"):
descName = _createDescriptiveName(name)
default = definition.strip()[len('point') + 1:] or None
return ParameterPoint(name, descName, default, isOptional)
class ParameterMultipleInput(ParameterDataObject):
"""A parameter representing several data objects.
Its value is a string with substrings separated by semicolons,
each of which represents the data source location of each element.
"""
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.MultipleInputWidgetWrapper'
}
exported = None
def __init__(self, name='', description='', datatype=-1, optional=False):
ParameterDataObject.__init__(self, name, description, None, optional)
self.datatype = int(float(datatype))
self.exported = None
self.minNumInputs = 0
""" Set minimum required number of inputs for parameter
By default minimal number of inputs is set to 1
@type _minNumInputs: numeric type or None
@param _minNumInputs: required minimum number of inputs for parameter. \
If user will pass None as parameter, we will use default minimal number of inputs (1)
@return: result, if the minimum number of inputs were set.
"""
def setMinNumInputs(self, _minNumInputs):
if _minNumInputs is None:
self.minNumInputs = 0
return True
if _minNumInputs < 1 and not self.optional:
# don't allow to set negative or null number of inputs if parameter isn't optional
return False
self.minNumInputs = int(_minNumInputs)
return True
""" Get minimum required number of inputs for parameter
@return: minimum number of inputs required for this parameter
@see: setMinNumInputs()
"""
def getMinNumInputs(self):
return self.minNumInputs
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, list):
if len(obj) == 0:
if self.optional:
self.value = None
return True
else:
return False
# prevent setting value if we didn't provide required minimal number of inputs
elif len(obj) < self.minNumInputs:
return False
self.value = ";".join([self.getAsString(lay) for lay in obj])
return True
else:
self.value = str(obj)
return True
def getSafeExportedLayers(self):
"""
Returns not the value entered by the user, but a string with
semicolon-separated filenames which contains the data of the
selected layers, but saved in a standard format (currently
shapefiles for vector layers and GeoTiff for raster) so that
they can be opened by most external applications.
If there is a selection and QGIS is configured to use just the
selection, it exports the layer even if it is already in a
suitable format.
Works only if the layer represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a layer in a suitable format,
it does no export at all and returns that value.
Currently, it works just for vector layer. In the case of
raster layers, it returns the parameter value.
The layers are exported just the first time the method is
called. The method can be called several times and it will
always return the same string, performing the export only the
first time.
"""
if self.exported:
return self.exported
self.exported = self.value
layers = self.value.split(';')
if layers is None or len(layers) == 0:
return self.value
if self.datatype == dataobjects.TYPE_RASTER:
for layerfile in layers:
layer = dataobjects.getObjectFromUri(layerfile, False)
if layer:
filename = dataobjects.exportRasterLayer(layer)
self.exported = self.exported.replace(layerfile, filename)
return self.exported
elif self.datatype == dataobjects.TYPE_FILE:
return self.value
else:
for layerfile in layers:
layer = dataobjects.getObjectFromUri(layerfile, False)
if layer:
filename = dataobjects.exportVectorLayer(layer)
self.exported = self.exported.replace(layerfile, filename)
return self.exported
def getAsString(self, value):
if self.datatype == dataobjects.TYPE_RASTER:
if isinstance(value, QgsRasterLayer):
return str(value.dataProvider().dataSourceUri())
else:
s = str(value)
layers = dataobjects.getRasterLayers()
for layer in layers:
if layer.name() == s:
return str(layer.dataProvider().dataSourceUri())
return s
if self.datatype == dataobjects.TYPE_FILE:
return str(value)
else:
if isinstance(value, QgsVectorLayer):
return str(value.source())
else:
s = str(value)
layers = dataobjects.getVectorLayers([self.datatype])
for layer in layers:
if layer.name() == s:
return str(layer.source())
return s
def getFileFilter(self):
if self.datatype == dataobjects.TYPE_RASTER:
exts = dataobjects.getSupportedOutputRasterLayerExtensions()
elif self.datatype == dataobjects.TYPE_FILE:
return self.tr('All files (*.*)', 'ParameterMultipleInput')
else:
exts = dataobjects.getSupportedOutputVectorLayerExtensions()
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterMultipleInput') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def dataType(self):
if self.datatype == dataobjects.TYPE_VECTOR_POINT:
return 'points'
elif self.datatype == dataobjects.TYPE_VECTOR_LINE:
return 'lines'
elif self.datatype == dataobjects.TYPE_VECTOR_POLYGON:
return 'polygons'
elif self.datatype == dataobjects.TYPE_RASTER:
return 'rasters'
elif self.datatype == dataobjects.TYPE_FILE:
return 'files'
else:
return 'any vectors'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
if self.datatype == dataobjects.TYPE_RASTER:
param_type += 'multiple raster'
if self.datatype == dataobjects.TYPE_FILE:
param_type += 'multiple file'
else:
param_type += 'multiple vector'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip() == 'multiple raster':
return ParameterMultipleInput(name, descName,
dataobjects.TYPE_RASTER, isOptional)
elif definition.lower().strip() == 'multiple vector':
return ParameterMultipleInput(name, definition,
dataobjects.TYPE_VECTOR_ANY, isOptional)
def evaluate(self, alg):
self.value = _resolveLayers(self.value)
class ParameterNumber(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.NumberWidgetWrapper'
}
def __init__(self, name='', description='', minValue=None, maxValue=None,
default=None, optional=False):
Parameter.__init__(self, name, description, default, optional)
if default is not None:
try:
self.default = int(str(default))
self.isInteger = True
except ValueError:
self.default = float(default)
self.isInteger = False
else:
self.isInteger = False
if minValue is not None:
self.min = int(float(minValue)) if self.isInteger else float(minValue)
else:
self.min = None
if maxValue is not None:
self.max = int(float(maxValue)) if self.isInteger else float(maxValue)
else:
self.max = None
self.value = self.default
def setValue(self, n):
if n is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(n, str):
try:
v = self._evaluate(n)
self.value = float(v)
if self.isInteger:
self.value = int(math.floor(self.value))
return True
except:
return False
else:
try:
if float(n) - int(float(n)) == 0:
value = int(float(n))
else:
value = float(n)
if self.min is not None:
if value < self.min:
return False
if self.max is not None:
if value > self.max:
return False
self.value = value
return True
except:
raise
return False
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'number'
code = '##' + self.name + '=' + param_type
if self.default:
code += str(self.default)
return code
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('number'):
default = definition.strip()[len('number'):] or None
if default == 'None':
default = None
return ParameterNumber(name, descName, default=default, optional=isOptional)
def _evaluate(self, value):
exp = QgsExpression(value)
if exp.hasParserError():
raise ValueError(self.tr("Error in parameter expression: ") + exp.parserErrorString())
result = exp.evaluate(_expressionContext())
if exp.hasEvalError():
raise ValueError("Error evaluating parameter expression: " + exp.evalErrorString())
if self.isInteger:
return math.floor(result)
else:
return result
def evaluate(self, alg):
if isinstance(self.value, str) and bool(self.value):
self.value = self._evaluate(self.value)
def _layerVariables(self, element, alg=None):
variables = {}
layer = getObject(element.value)
if layer is not None:
name = element.name if alg is None else "%s_%s" % (alg.name, element.name)
variables['@%s_minx' % name] = layer.extent().xMinimum()
variables['@%s_miny' % name] = layer.extent().yMinimum()
variables['@%s_maxx' % name] = layer.extent().yMaximum()
variables['@%s_maxy' % name] = layer.extent().yMaximum()
if isinstance(element, (ParameterRaster, OutputRaster)):
stats = layer.dataProvider().bandStatistics(1)
variables['@%s_avg' % name] = stats.mean
variables['@%s_stddev' % name] = stats.stdDev
variables['@%s_min' % name] = stats.minimumValue
variables['@%s_max' % name] = stats.maximumValue
return variables
def evaluateForModeler(self, value, model):
if isinstance(value, numbers.Number):
return value
variables = {}
for param in model.parameters:
if isinstance(param, ParameterNumber):
variables["@" + param.name] = param.value
if isinstance(param, (ParameterRaster, ParameterVector)):
variables.update(self._layerVariables(param))
for alg in list(model.algs.values()):
for out in alg.algorithm.outputs:
if isinstance(out, OutputNumber):
variables["@%s_%s" % (alg.name, out.name)] = out.value
if isinstance(out, (OutputRaster, OutputVector)):
variables.update(self._layerVariables(out, alg))
for k, v in list(variables.items()):
value = value.replace(k, str(v))
return value
def expressionContext(self):
return _expressionContext()
def getValueAsCommandLineParameter(self):
if self.value is None:
return str(None)
if isinstance(self.value, str):
return '"%s"' + self.value
return str(self.value)
class ParameterRange(Parameter):
def __init__(self, name='', description='', default=None, optional=False):
Parameter.__init__(self, name, description, default, optional)
if default is not None:
values = default.split(',')
try:
int(values[0])
int(values[1])
self.isInteger = True
except:
self.isInteger = False
else:
self.isInteger = False
def setValue(self, text):
if text is None:
if not self.optional:
return False
self.value = None
return True
tokens = text.split(',')
if len(tokens) != 2:
return False
try:
float(tokens[0])
float(tokens[1])
self.value = text
return True
except:
return False
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"' if self.value is not None else str(None)
class ParameterRaster(ParameterDataObject):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.RasterWidgetWrapper'
}
def __init__(self, name='', description='', optional=False, showSublayersDialog=True):
ParameterDataObject.__init__(self, name, description, None, optional)
self.showSublayersDialog = parseBool(showSublayersDialog)
self.exported = None
def getSafeExportedLayer(self):
"""Returns not the value entered by the user, but a string with
a filename which contains the data of this layer, but saved in
a standard format (currently always a geotiff file) so that it
can be opened by most external applications.
Works only if the layer represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a layer in a suitable format,
it does not export at all and returns that value.
The layer is exported just the first time the method is called.
The method can be called several times and it will always
return the same file, performing the export only the first
time.
"""
if self.exported:
return self.exported
layer = dataobjects.getObjectFromUri(self.value, False)
if layer:
self.exported = dataobjects.exportRasterLayer(layer)
else:
self.exported = self.value
return self.exported
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, QgsRasterLayer):
self.value = str(obj.dataProvider().dataSourceUri())
return True
else:
self.value = str(obj)
return True
def getFileFilter(self):
exts = dataobjects.getSupportedOutputRasterLayerExtensions()
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterRaster') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'raster'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('raster'):
return ParameterRaster(name, descName, optional=isOptional)
class ParameterSelection(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.SelectionWidgetWrapper'
}
def __init__(self, name='', description='', options=[], default=None, isSource=False,
multiple=False, optional=False):
Parameter.__init__(self, name, description, default, optional)
self.multiple = multiple
isSource = parseBool(isSource)
self.options = options
if isSource:
self.options = []
layer = QgsVectorLayer(options[0], "layer", "ogr")
if layer.isValid():
try:
index = resolveFieldIndex(layer, options[1])
feats = features(layer)
for feature in feats:
self.options.append(str(feature.attributes()[index]))
except ValueError:
pass
elif isinstance(self.options, str):
self.options = self.options.split(";")
if default is not None:
try:
self.default = int(default)
except:
self.default = 0
self.value = self.default
def setValue(self, value):
if value is None:
if not self.optional:
return False
self.value = 0
return True
if isinstance(value, list):
if not self.multiple:
return False
values = []
for v in value:
try:
n = int(v)
values.append(n)
except:
return False
if not self.optional and len(values) == 0:
return False
self.value = values
return True
else:
try:
n = int(value)
self.value = n
return True
except:
return False
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('selectionfromfile'):
options = definition.strip()[len('selectionfromfile '):].split(';')
return ParameterSelection(name, descName, options, isSource=True, optional=isOptional)
elif definition.lower().strip().startswith('selection'):
options = definition.strip()[len('selection '):].split(';')
return ParameterSelection(name, descName, options, optional=isOptional)
elif definition.lower().strip().startswith('multipleselectionfromfile'):
options = definition.strip()[len('multipleselectionfromfile '):].split(';')
return ParameterSelection(name, descName, options, isSource=True,
multiple=True, optional=isOptional)
elif definition.lower().strip().startswith('multipleselection'):
options = definition.strip()[len('multipleselection '):].split(';')
return ParameterSelection(name, descName, options, multiple=True, optional=isOptional)
class ParameterEvaluationException(Exception):
def __init__(self, param, msg):
Exception.__init__(msg)
self.param = param
class ParameterString(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.StringWidgetWrapper'
}
NEWLINE = '\n'
ESCAPED_NEWLINE = '\\n'
def __init__(self, name='', description='', default=None, multiline=False,
optional=False, evaluateExpressions=False):
Parameter.__init__(self, name, description, default, optional)
self.multiline = parseBool(multiline)
self.evaluateExpressions = parseBool(evaluateExpressions)
def setValue(self, obj):
if not bool(obj):
if not self.optional:
return False
self.value = None
return True
self.value = str(obj).replace(
ParameterString.ESCAPED_NEWLINE,
ParameterString.NEWLINE
)
return True
def getValueAsCommandLineParameter(self):
return ('"' + str(self.value.replace(ParameterString.NEWLINE,
ParameterString.ESCAPED_NEWLINE)) + '"'
if self.value is not None else str(None))
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'string'
return '##' + self.name + '=' + param_type + self.default
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('string'):
default = definition.strip()[len('string') + 1:]
if default:
return ParameterString(name, descName, default, optional=isOptional)
else:
return ParameterString(name, descName, optional=isOptional)
elif definition.lower().strip().startswith('longstring'):
default = definition.strip()[len('longstring') + 1:]
if default:
return ParameterString(name, descName, default, multiline=True, optional=isOptional)
else:
return ParameterString(name, descName, multiline=True, optional=isOptional)
def evaluate(self, alg):
if isinstance(self.value, str) and bool(self.value) and self.evaluateExpressions:
exp = QgsExpression(self.value)
if exp.hasParserError():
raise ValueError(self.tr("Error in parameter expression: ") + exp.parserErrorString())
result = exp.evaluate(_expressionContext())
if exp.hasEvalError():
raise ValueError("Error evaluating parameter expression: " + exp.evalErrorString())
self.value = result
def expressionContext(self):
return _expressionContext()
class ParameterExpression(Parameter):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.ExpressionWidgetWrapper'
}
NEWLINE = '\n'
ESCAPED_NEWLINE = '\\n'
def __init__(self, name='', description='', default=None, optional=False, parent_layer=None):
Parameter.__init__(self, name, description, default, optional)
self.parent_layer = parent_layer
def setValue(self, obj):
if not bool(obj):
if not self.optional:
return False
self.value = None
return True
self.value = str(obj).replace(
ParameterString.ESCAPED_NEWLINE,
ParameterString.NEWLINE
)
return True
def getValueAsCommandLineParameter(self):
return ('"' + str(self.value.replace(ParameterExpression.NEWLINE,
ParameterExpression.ESCAPED_NEWLINE)) + '"'
if self.value is not None else str(None))
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'expression'
return '##' + self.name + '=' + param_type + self.default
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
if definition.lower().strip().startswith('expression'):
descName = _createDescriptiveName(name)
default = definition.strip()[len('expression') + 1:]
if default:
return ParameterExpression(name, descName, default, optional=isOptional)
else:
return ParameterExpression(name, descName, optional=isOptional)
class ParameterTable(ParameterDataObject):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.TableWidgetWrapper'
}
def __init__(self, name='', description='', optional=False):
ParameterDataObject.__init__(self, name, description, None, optional)
self.exported = None
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, QgsVectorLayer):
source = str(obj.source())
self.value = source
return True
else:
self.value = str(obj)
layers = dataobjects.getTables()
for layer in layers:
if layer.name() == self.value or layer.source() == self.value:
source = str(layer.source())
self.value = source
return True
val = str(obj)
self.value = val
return os.path.exists(self.value)
def getSafeExportedTable(self):
"""Returns not the value entered by the user, but a string with
a filename which contains the data of this table, but saved in
a standard format (currently always a DBF file) so that it can
be opened by most external applications.
Works only if the table represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a table in a suitable format,
it does not export at all and returns that value.
The table is exported just the first time the method is called.
The method can be called several times and it will always
return the same file, performing the export only the first
time.
"""
if self.exported:
return self.exported
table = dataobjects.getObjectFromUri(self.value, False)
if table:
self.exported = dataobjects.exportTable(table)
else:
self.exported = self.value
return self.exported
def getFileFilter(self):
exts = ['csv', 'dbf']
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterTable') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'table'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('table'):
return ParameterTable(name, descName, isOptional)
class ParameterTableField(Parameter):
"""A parameter representing a table field.
Its value is a string that represents the name of the field.
"""
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.TableFieldWidgetWrapper'
}
DATA_TYPE_NUMBER = 0
DATA_TYPE_STRING = 1
DATA_TYPE_DATETIME = 2
DATA_TYPE_ANY = -1
def __init__(self, name='', description='', parent=None, datatype=-1,
optional=False, multiple=False):
Parameter.__init__(self, name, description, None, optional)
self.parent = parent
self.multiple = multiple
self.datatype = int(datatype)
def getValueAsCommandLineParameter(self):
return '"' + str(self.value) + '"' if self.value is not None else str(None)
def setValue(self, value):
if not bool(value):
if not self.optional:
return False
self.value = None
return True
if isinstance(value, list):
if not self.multiple and len(value) > 1:
return False
self.value = ";".join(value)
return True
else:
self.value = str(value)
return True
def __str__(self):
return self.name + ' <' + self.__module__.split('.')[-1] + ' from ' \
+ self.parent + '>'
def dataType(self):
if self.datatype == self.DATA_TYPE_NUMBER:
return 'numeric'
elif self.datatype == self.DATA_TYPE_STRING:
return 'string'
elif self.datatype == self.DATA_TYPE_DATETIME:
return 'datetime'
else:
return 'any'
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'field'
return '##' + self.name + '=' + param_type + self.parent
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip().startswith('field'):
if definition.lower().strip().startswith('field number'):
parent = definition.strip()[len('field number') + 1:]
datatype = ParameterTableField.DATA_TYPE_NUMBER
elif definition.lower().strip().startswith('field string'):
parent = definition.strip()[len('field string') + 1:]
datatype = ParameterTableField.DATA_TYPE_STRING
elif definition.lower().strip().startswith('field datetime'):
parent = definition.strip()[len('field datetime') + 1:]
datatype = ParameterTableField.DATA_TYPE_DATETIME
else:
parent = definition.strip()[len('field') + 1:]
datatype = ParameterTableField.DATA_TYPE_ANY
return ParameterTableField(name, descName, parent, datatype, isOptional)
class ParameterVector(ParameterDataObject):
default_metadata = {
'widget_wrapper': 'processing.gui.wrappers.VectorWidgetWrapper'
}
def __init__(self, name='', description='', datatype=[-1],
optional=False):
ParameterDataObject.__init__(self, name, description, None, optional)
if isinstance(datatype, int):
datatype = [datatype]
elif isinstance(datatype, str):
datatype = [int(t) for t in datatype.split(',')]
self.datatype = datatype
self.exported = None
self.allowOnlyOpenedLayers = False
def setValue(self, obj):
self.exported = None
if obj is None:
if not self.optional:
return False
self.value = None
return True
if isinstance(obj, QgsVectorLayer):
self.value = str(obj.source())
return True
else:
self.value = str(obj)
return True
def getSafeExportedLayer(self):
"""Returns not the value entered by the user, but a string with
a filename which contains the data of this layer, but saved in
a standard format (currently always a shapefile) so that it can
be opened by most external applications.
If there is a selection and QGIS is configured to use just the
selection, if exports the layer even if it is already in a
suitable format.
Works only if the layer represented by the parameter value is
currently loaded in QGIS. Otherwise, it will not perform any
export and return the current value string.
If the current value represents a layer in a suitable format,
it does not export at all and returns that value.
The layer is exported just the first time the method is called.
The method can be called several times and it will always
return the same file, performing the export only the first
time.
"""
if self.exported:
return self.exported
layer = dataobjects.getObjectFromUri(self.value, False)
if layer:
self.exported = dataobjects.exportVectorLayer(layer)
else:
self.exported = self.value
return self.exported
def getFileFilter(self):
exts = dataobjects.getSupportedOutputVectorLayerExtensions()
for i in range(len(exts)):
exts[i] = self.tr('%s files(*.%s)', 'ParameterVector') % (exts[i].upper(), exts[i].lower())
return ';;'.join(exts)
def dataType(self):
return dataobjects.vectorDataType(self)
def getAsScriptCode(self):
param_type = ''
if self.optional:
param_type += 'optional '
param_type += 'vector'
return '##' + self.name + '=' + param_type
@classmethod
def fromScriptCode(self, line):
isOptional, name, definition = _splitParameterOptions(line)
descName = _createDescriptiveName(name)
if definition.lower().strip() == 'vector':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_ANY], isOptional)
elif definition.lower().strip() == 'vector point':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_POINT], isOptional)
elif definition.lower().strip() == 'vector line':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_LINE], isOptional)
elif definition.lower().strip() == 'vector polygon':
return ParameterVector(name, descName,
[dataobjects.TYPE_VECTOR_POLYGON], isOptional)
class ParameterGeometryPredicate(Parameter):
predicates = ('intersects',
'contains',
'disjoint',
'equals',
'touches',
'overlaps',
'within',
'crosses')
def __init__(self, name='', description='', left=None, right=None,
optional=False, enabledPredicates=None):
Parameter.__init__(self, name, description, None, optional)
self.left = left
self.right = right
self.value = None
self.enabledPredicates = enabledPredicates
if self.enabledPredicates is None:
self.enabledPredicates = self.predicates
def getValueAsCommandLineParameter(self):
return str(self.value)
def setValue(self, value):
if value is None:
if not self.optional:
return False
self.value = None
return True
elif len(value) == 0 and not self.optional:
return False
if isinstance(value, str):
self.value = value.split(';') # relates to ModelerAlgorithm.resolveValue
else:
self.value = value
return True
paramClasses = [c for c in list(sys.modules[__name__].__dict__.values()) if isclass(c) and issubclass(c, Parameter)]
def getParameterFromString(s):
# Try the parameter definitions used in description files
if '|' in s and (s.startswith("Parameter") or s.startswith("*Parameter")):
isAdvanced = False
if s.startswith("*"):
s = s[1:]
isAdvanced = True
tokens = s.split("|")
params = [t if str(t) != str(None) else None for t in tokens[1:]]
try:
clazz = getattr(sys.modules[__name__], tokens[0])
param = clazz(*params)
param.isAdvanced = isAdvanced
return param
except:
return None
else: # try script syntax
for paramClass in paramClasses:
try:
param = paramClass.fromScriptCode(s)
if param is not None:
return param
except AttributeError:
pass
except:
return None
| gpl-2.0 | 6,668,968,050,453,547,000 | 34.083597 | 116 | 0.573605 | false | 4.626054 | false | false | false |
andrewk1/Climb-Bot | climb-bot.py | 1 | 3083 | import praw
import requests
import json
import time
import re
# Function iterates over each submission title and checks if the title contains route syntax that indicates the post is about a route
def parse_titles(bot, subreddit):
start_time = time.time()
for submission in subreddit.stream.submissions():
if (submission.created_utc < start_time):
continue
title = submission.title
# regex matches sequence of capitalized words followed by climb grade notation (V or 5.)
route_regex = '([A-Z][a-z]+(?=\s[A-Z])(?:\s[A-Z][a-z]+)+) [( ]?(5.[0-9][0-9]?[A-Za-z]|[Vv][0-9][0-9]?)'
route_name = re.search(route_regex, title)
print route_name
comment = make_get_request(route_name.group(0))
if comment != 'NA':
submission.reply(comment)
# Call custom google search engine API to parse the formulated title and gather theCrag's metadata for the route
def make_get_request(route):
key = 'key=***'
cx = 'cx=***'
query= 'q='+route
google_url = 'https://www.googleapis.com/customsearch/v1?' + key + cx + query
response = requests.get(google_url)
parsed_response= json.loads(response.text)
return form_post(parsed_response)
# Extract data from google's JSON response and form a post
def form_post(parsed_response):
# Check if Google search received a hit
if parsed_response['searchInformation']['totalResults'] == 0 or 'items' not in parsed_response:
return 'NA'
title = parsed_response['items'][0]['title']
print title
breadcrumb = parsed_response['items'][0]['pagemap']['breadcrumb']
count = 0
# Build up region string
region_string = ''
for key in breadcrumb:
region = breadcrumb[count]['title']
if (count > 0) :
region_string = region + ', ' + region_string
else :
region_string = region;
count+=1
metatags = parsed_response['items'][0]['pagemap']['metatags']
country = breadcrumb[0]['title']
latitude = metatags[0]['place:location:latitude']
longitude = metatags[0]['place:location:longitude']
google_pin = 'https://www.google.com/maps/@?api=1&map_action=map&basemap=satellite&zoom=19¢er=' + latitude + ',' + longitude
link = metatags[0]['og:url']
if (' in ' in title):
title = title[:title.index(' in ')]
# Truncate values to 3rd decimal place
lat_decimal = latitude.index('.')
latitude = latitude[:lat_decimal+4]
long_decimal = longitude.index('.')
longitude = longitude[:long_decimal+4]
# Format comment response
return 'I found a route! [' + title + '](' + link + ') in ' + region_string + '\n\nGPS Location: [' + latitude + ', ' + longitude + ']('+google_pin+')' + '\n\n ' + '\n\n^^^I ^^^am ^^^a ^^^bot ^^^| ^^^Data ^^^from ^^^[theCrag.com](https://www.thecrag.com/) ^^^| ^^^Feedback ^^^welcome ^^^at ^^^[r/climbBot](https://www.reddit.com/r/climbBot/)'
if __name__ == "__main__":
bot = praw.Reddit(
user_agent='climb-bot posts additional information on climbing routes it finds, created by /u/Akondrich, email: [email protected]',
client_id='***',
client_secret='***',
username='climb-bot',
password='***')
subreddit = bot.subreddit('climbBot')
parse_titles(bot, subreddit)
| mit | -518,196,464,358,046,460 | 38.525641 | 343 | 0.67337 | false | 3.019589 | false | false | false |
yuxng/Deep_ISM | ISM/lib/setup.py | 1 | 6351 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_bbox",
["utils/bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
),
Extension(
"utils.cython_nms",
["utils/nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
Extension('normals.gpu_normals',
['normals/compute_normals.cu', 'normals/gpu_normals.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include'], '/usr/local/include/eigen3']
)
]
setup(
name='fast_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
| mit | -73,697,840,488,066,960 | 37.490909 | 91 | 0.587781 | false | 3.905904 | false | false | false |
gamechanger/kafka-python | kafka/protocol/admin.py | 1 | 1182 | from .struct import Struct
from .types import Array, Bytes, Int16, Schema, String
class ListGroupsResponse(Struct):
SCHEMA = Schema(
('error_code', Int16),
('groups', Array(
('group', String('utf-8')),
('protocol_type', String('utf-8'))))
)
class ListGroupsRequest(Struct):
API_KEY = 16
API_VERSION = 0
RESPONSE_TYPE = ListGroupsResponse
SCHEMA = Schema()
class DescribeGroupsResponse(Struct):
SCHEMA = Schema(
('groups', Array(
('error_code', Int16),
('group', String('utf-8')),
('state', String('utf-8')),
('protocol_type', String('utf-8')),
('protocol', String('utf-8')),
('members', Array(
('member_id', String('utf-8')),
('client_id', String('utf-8')),
('client_host', String('utf-8')),
('member_metadata', Bytes),
('member_assignment', Bytes)))))
)
class DescribeGroupsRequest(Struct):
API_KEY = 15
API_VERSION = 0
RESPONSE_TYPE = DescribeGroupsResponse
SCHEMA = Schema(
('groups', Array(String('utf-8')))
)
| apache-2.0 | -8,683,488,429,018,159,000 | 25.863636 | 54 | 0.526227 | false | 3.953177 | false | false | false |
UTSA-ICS/keystone-SID | keystone/tests/test_auth.py | 1 | 44678 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import uuid
import mock
from keystone import assignment
from keystone import auth
from keystone.common import authorization
from keystone.common import environment
from keystone import config
from keystone import exception
from keystone.openstack.common import timeutils
from keystone import tests
from keystone.tests import default_fixtures
from keystone import token
from keystone import trust
CONF = config.CONF
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
HOST_URL = 'http://keystone:5001'
def _build_user_auth(token=None, user_id=None, username=None,
password=None, tenant_id=None, tenant_name=None,
trust_id=None):
"""Build auth dictionary.
It will create an auth dictionary based on all the arguments
that it receives.
"""
auth_json = {}
if token is not None:
auth_json['token'] = token
if username or password:
auth_json['passwordCredentials'] = {}
if username is not None:
auth_json['passwordCredentials']['username'] = username
if user_id is not None:
auth_json['passwordCredentials']['userId'] = user_id
if password is not None:
auth_json['passwordCredentials']['password'] = password
if tenant_name is not None:
auth_json['tenantName'] = tenant_name
if tenant_id is not None:
auth_json['tenantId'] = tenant_id
if trust_id is not None:
auth_json['trust_id'] = trust_id
return auth_json
class AuthTest(tests.TestCase):
def setUp(self):
super(AuthTest, self).setUp()
self.load_backends()
self.load_fixtures(default_fixtures)
# need to register the token provider first because auth controller
# depends on it
token.provider.Manager()
self.context_with_remote_user = {'environment':
{'REMOTE_USER': 'FOO',
'AUTH_TYPE': 'Negotiate'}}
self.empty_context = {'environment': {}}
self.controller = token.controllers.Auth()
#This call sets up, among other things, the call to popen
#that will be used to run the CMS command. These tests were
#passing only due to the global nature of the call. If the
#tests in this file are run alone, API calls return unauthorized.
environment.use_eventlet(monkeypatch_thread=False)
def assertEqualTokens(self, a, b):
"""Assert that two tokens are equal.
Compare two tokens except for their ids. This also truncates
the time in the comparison.
"""
def normalize(token):
token['access']['token']['id'] = 'dummy'
del token['access']['token']['expires']
del token['access']['token']['issued_at']
return token
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(a['access']['token']['expires']),
timeutils.parse_isotime(b['access']['token']['expires']))
self.assertCloseEnoughForGovernmentWork(
timeutils.parse_isotime(a['access']['token']['issued_at']),
timeutils.parse_isotime(b['access']['token']['issued_at']))
return self.assertDictEqual(normalize(a), normalize(b))
class AuthBadRequests(AuthTest):
def setUp(self):
super(AuthBadRequests, self).setUp()
def test_no_external_auth(self):
"""Verify that _authenticate_external() raises exception if N/A."""
self.assertRaises(
token.controllers.ExternalAuthNotApplicable,
self.controller._authenticate_external,
{}, {})
def test_no_token_in_auth(self):
"""Verify that _authenticate_token() raises exception if no token."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_token,
None, {})
def test_no_credentials_in_auth(self):
"""Verify that _authenticate_local() raises exception if no creds."""
self.assertRaises(
exception.ValidationError,
self.controller._authenticate_local,
None, {})
def test_authenticate_blank_request_body(self):
"""Verify sending empty json dict raises the right exception."""
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, {})
def test_authenticate_blank_auth(self):
"""Verify sending blank 'auth' raises the right exception."""
body_dict = _build_user_auth()
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_invalid_auth_content(self):
"""Verify sending invalid 'auth' raises the right exception."""
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, {'auth': 'abcd'})
def test_authenticate_user_id_too_large(self):
"""Verify sending large 'userId' raises the right exception."""
body_dict = _build_user_auth(user_id='0' * 65, username='FOO',
password='foo2')
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_username_too_large(self):
"""Verify sending large 'username' raises the right exception."""
body_dict = _build_user_auth(username='0' * 65, password='foo2')
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_tenant_id_too_large(self):
"""Verify sending large 'tenantId' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_id='0' * 65)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_tenant_name_too_large(self):
"""Verify sending large 'tenantName' raises the right exception."""
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='0' * 65)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_token_too_large(self):
"""Verify sending large 'token' raises the right exception."""
body_dict = _build_user_auth(token={'id': '0' * 8193})
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_password_too_large(self):
"""Verify sending large 'password' raises the right exception."""
length = CONF.identity.max_password_length + 1
body_dict = _build_user_auth(username='FOO', password='0' * length)
self.assertRaises(exception.ValidationSizeError,
self.controller.authenticate,
{}, body_dict)
class AuthWithToken(AuthTest):
def setUp(self):
super(AuthWithToken, self).setUp()
def test_unscoped_token(self):
"""Verify getting an unscoped token with password creds."""
body_dict = _build_user_auth(username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
self.assertNotIn('tenant', unscoped_token['access']['token'])
def test_auth_invalid_token(self):
"""Verify exception is raised if invalid token."""
body_dict = _build_user_auth(token={"id": uuid.uuid4().hex})
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_bad_formatted_token(self):
"""Verify exception is raised if invalid token."""
body_dict = _build_user_auth(token={})
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_auth_unscoped_token_no_project(self):
"""Verify getting an unscoped token with an unscoped token."""
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"])
unscoped_token_2 = self.controller.authenticate({}, body_dict)
self.assertEqualTokens(unscoped_token, unscoped_token_2)
def test_auth_unscoped_token_project(self):
"""Verify getting a token in a tenant with an unscoped token."""
# Add a role in so we can check we get this back
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
# Get an unscoped tenant
body_dict = _build_user_auth(
username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
# Get a token on BAR tenant using the unscoped tenant
body_dict = _build_user_auth(
token=unscoped_token["access"]["token"],
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertEqual(self.role_member['id'], roles[0])
def test_auth_token_project_group_role(self):
"""Verify getting a token in a tenant with group roles."""
# Add a v2 style role in so we can check we get this back
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'],
self.tenant_bar['id'],
self.role_member['id'])
# Now create a group role for this user as well
domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain1['id'], domain1)
new_group = {'id': uuid.uuid4().hex, 'domain_id': domain1['id'],
'name': uuid.uuid4().hex}
self.identity_api.create_group(new_group['id'], new_group)
self.identity_api.add_user_to_group(self.user_foo['id'],
new_group['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_admin['id'])
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertIn(self.role_member['id'], roles)
self.assertIn(self.role_admin['id'], roles)
def test_auth_token_cross_domain_group_and_project(self):
"""Verify getting a token in cross domain group/project roles."""
# create domain, project and group and grant roles to user
domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_domain(domain1['id'], domain1)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain1['id']}
self.assignment_api.create_project(project1['id'], project1)
role_foo_domain1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_foo_domain1['id'],
role_foo_domain1)
role_group_domain1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_group_domain1['id'],
role_group_domain1)
self.assignment_api.add_user_to_project(project1['id'],
self.user_foo['id'])
new_group = {'id': uuid.uuid4().hex, 'domain_id': domain1['id'],
'name': uuid.uuid4().hex}
self.identity_api.create_group(new_group['id'], new_group)
self.identity_api.add_user_to_group(self.user_foo['id'],
new_group['id'])
self.assignment_api.create_grant(
user_id=self.user_foo['id'],
project_id=project1['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
project_id=project1['id'],
role_id=self.role_admin['id'])
self.assignment_api.create_grant(
user_id=self.user_foo['id'],
domain_id=domain1['id'],
role_id=role_foo_domain1['id'])
self.assignment_api.create_grant(
group_id=new_group['id'],
domain_id=domain1['id'],
role_id=role_group_domain1['id'])
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_name=project1['name'])
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
self.assertEqual(project1['id'], tenant["id"])
self.assertIn(self.role_member['id'], roles)
self.assertIn(self.role_admin['id'], roles)
self.assertNotIn(role_foo_domain1['id'], roles)
self.assertNotIn(role_group_domain1['id'], roles)
def test_belongs_to_no_tenant(self):
r = self.controller.authenticate(
{},
auth={
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
})
unscoped_token_id = r['access']['token']['id']
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'BAR'}),
token_id=unscoped_token_id)
def test_belongs_to(self):
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
scoped_token = self.controller.authenticate({}, body_dict)
scoped_token_id = scoped_token['access']['token']['id']
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'me'}),
token_id=scoped_token_id)
self.assertRaises(
exception.Unauthorized,
self.controller.validate_token,
dict(is_admin=True, query_string={'belongsTo': 'BAR'}),
token_id=scoped_token_id)
def test_token_auth_with_binding(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth()
unscoped_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
# the token should have bind information in it
bind = unscoped_token['access']['token']['bind']
self.assertEqual('FOO', bind['kerberos'])
body_dict = _build_user_auth(
token=unscoped_token['access']['token'],
tenant_name='BAR')
# using unscoped token without remote user context fails
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
self.empty_context, body_dict)
# using token with remote user context succeeds
scoped_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
# the bind information should be carried over from the original token
bind = scoped_token['access']['token']['bind']
self.assertEqual('FOO', bind['kerberos'])
def test_deleting_role_revokes_token(self):
role_controller = assignment.controllers.Role()
project1 = {'id': 'Project1', 'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID}
self.assignment_api.create_project(project1['id'], project1)
role_one = {'id': 'role_one', 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role_one['id'], role_one)
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], project1['id'], role_one['id'])
no_context = {}
# Get a scoped token for the tenant
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=self.user_foo['password'],
tenant_name=project1['name'])
token = self.controller.authenticate(no_context, body_dict)
# Ensure it is valid
token_id = token['access']['token']['id']
self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=token_id)
# Delete the role, which should invalidate the token
role_controller.delete_role(
dict(is_admin=True, query_string={}), role_one['id'])
# Check the token is now invalid
self.assertRaises(
exception.TokenNotFound,
self.controller.validate_token,
dict(is_admin=True, query_string={}),
token_id=token_id)
class AuthWithPasswordCredentials(AuthTest):
def setUp(self):
super(AuthWithPasswordCredentials, self).setUp()
def test_auth_invalid_user(self):
"""Verify exception is raised if invalid user."""
body_dict = _build_user_auth(
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_valid_user_invalid_password(self):
"""Verify exception is raised if invalid password."""
body_dict = _build_user_auth(
username="FOO",
password=uuid.uuid4().hex)
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_empty_password(self):
"""Verify exception is raised if empty password."""
body_dict = _build_user_auth(
username="FOO",
password="")
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{}, body_dict)
def test_auth_no_password(self):
"""Verify exception is raised if empty password."""
body_dict = _build_user_auth(username="FOO")
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_blank_password_credentials(self):
"""Sending empty dict as passwordCredentials raises a 400 error."""
body_dict = {'passwordCredentials': {}, 'tenantName': 'demo'}
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_authenticate_no_username(self):
"""Verify skipping username raises the right exception."""
body_dict = _build_user_auth(password="pass",
tenant_name="demo")
self.assertRaises(exception.ValidationError,
self.controller.authenticate,
{}, body_dict)
def test_bind_without_remote_user(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(username='FOO', password='foo2',
tenant_name='BAR')
token = self.controller.authenticate({}, body_dict)
self.assertNotIn('bind', token['access']['token'])
def test_change_default_domain_id(self):
# If the default_domain_id config option is not the default then the
# user in auth data is from the new default domain.
# 1) Create a new domain.
new_domain_id = uuid.uuid4().hex
new_domain = {
'description': uuid.uuid4().hex,
'enabled': True,
'id': new_domain_id,
'name': uuid.uuid4().hex,
}
self.assignment_api.create_domain(new_domain_id, new_domain)
# 2) Create user "foo" in new domain with different password than
# default-domain foo.
new_user_id = uuid.uuid4().hex
new_user_password = uuid.uuid4().hex
new_user = {
'id': new_user_id,
'name': self.user_foo['name'],
'domain_id': new_domain_id,
'password': new_user_password,
'email': '[email protected]',
}
self.identity_api.create_user(new_user_id, new_user)
# 3) Update the default_domain_id config option to the new domain
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
# 4) Authenticate as "foo" using the password in the new domain.
body_dict = _build_user_auth(
username=self.user_foo['name'],
password=new_user_password)
# The test is successful if this doesn't raise, so no need to assert.
self.controller.authenticate({}, body_dict)
class AuthWithRemoteUser(AuthTest):
def setUp(self):
super(AuthWithRemoteUser, self).setUp()
def test_unscoped_remote_authn(self):
"""Verify getting an unscoped token with external authn."""
body_dict = _build_user_auth(
username='FOO',
password='foo2')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth()
remote_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
self.assertEqualTokens(local_token, remote_token)
def test_unscoped_remote_authn_jsonless(self):
"""Verify that external auth with invalid request fails."""
self.assertRaises(
exception.ValidationError,
self.controller.authenticate,
{'REMOTE_USER': 'FOO'},
None)
def test_scoped_remote_authn(self):
"""Verify getting a token with external authn."""
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name='BAR')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth(
tenant_name='BAR')
remote_token = self.controller.authenticate(
self.context_with_remote_user, body_dict)
self.assertEqualTokens(local_token, remote_token)
def test_scoped_nometa_remote_authn(self):
"""Verify getting a token with external authn and no metadata."""
body_dict = _build_user_auth(
username='TWO',
password='two2',
tenant_name='BAZ')
local_token = self.controller.authenticate(
{}, body_dict)
body_dict = _build_user_auth(tenant_name='BAZ')
remote_token = self.controller.authenticate(
{'environment': {'REMOTE_USER': 'TWO'}}, body_dict)
self.assertEqualTokens(local_token, remote_token)
def test_scoped_remote_authn_invalid_user(self):
"""Verify that external auth with invalid user fails."""
body_dict = _build_user_auth(tenant_name="BAR")
self.assertRaises(
exception.Unauthorized,
self.controller.authenticate,
{'environment': {'REMOTE_USER': uuid.uuid4().hex}},
body_dict)
def test_bind_with_kerberos(self):
self.config_fixture.config(group='token', bind=['kerberos'])
body_dict = _build_user_auth(tenant_name="BAR")
token = self.controller.authenticate(self.context_with_remote_user,
body_dict)
self.assertEqual('FOO', token['access']['token']['bind']['kerberos'])
def test_bind_without_config_opt(self):
self.config_fixture.config(group='token', bind=['x509'])
body_dict = _build_user_auth(tenant_name='BAR')
token = self.controller.authenticate(self.context_with_remote_user,
body_dict)
self.assertNotIn('bind', token['access']['token'])
class AuthWithTrust(AuthTest):
def setUp(self):
super(AuthWithTrust, self).setUp()
trust.Manager()
self.trust_controller = trust.controllers.TrustV3()
self.auth_v3_controller = auth.controllers.Auth()
self.trustor = self.user_foo
self.trustee = self.user_two
self.assigned_roles = [self.role_member['id'],
self.role_browser['id']]
for assigned_role in self.assigned_roles:
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
self.sample_data = {'trustor_user_id': self.trustor['id'],
'trustee_user_id': self.trustee['id'],
'project_id': self.tenant_bar['id'],
'impersonation': True,
'roles': [{'id': self.role_browser['id']},
{'name': self.role_member['name']}]}
expires_at = timeutils.strtime(timeutils.utcnow() +
datetime.timedelta(minutes=10),
fmt=TIME_FORMAT)
self.create_trust(expires_at=expires_at)
def config_overrides(self):
super(AuthWithTrust, self).config_overrides()
self.config_fixture.config(group='trust', enabled=True)
def _create_auth_context(self, token_id):
token_ref = self.token_api.get_token(token_id)
auth_context = authorization.token_to_auth_context(
token_ref['token_data'])
return {'environment': {authorization.AUTH_CONTEXT_ENV: auth_context},
'token_id': token_id,
'host_url': HOST_URL}
def create_trust(self, expires_at=None, impersonation=True):
username = self.trustor['name']
password = 'foo2'
body_dict = _build_user_auth(username=username, password=password)
self.unscoped_token = self.controller.authenticate({}, body_dict)
context = self._create_auth_context(
self.unscoped_token['access']['token']['id'])
trust_data = copy.deepcopy(self.sample_data)
trust_data['expires_at'] = expires_at
trust_data['impersonation'] = impersonation
self.new_trust = self.trust_controller.create_trust(
context, trust=trust_data)['trust']
def build_v2_token_request(self, username, password):
body_dict = _build_user_auth(username=username, password=password)
self.unscoped_token = self.controller.authenticate({}, body_dict)
unscoped_token_id = self.unscoped_token['access']['token']['id']
request_body = _build_user_auth(token={'id': unscoped_token_id},
trust_id=self.new_trust['id'],
tenant_id=self.tenant_bar['id'])
return request_body
def test_create_trust_bad_data_fails(self):
context = self._create_auth_context(
self.unscoped_token['access']['token']['id'])
bad_sample_data = {'trustor_user_id': self.trustor['id'],
'project_id': self.tenant_bar['id'],
'roles': [{'id': self.role_browser['id']}]}
self.assertRaises(exception.ValidationError,
self.trust_controller.create_trust,
context, trust=bad_sample_data)
def test_create_trust_no_roles(self):
context = {'token_id': self.unscoped_token['access']['token']['id']}
self.sample_data['roles'] = []
self.assertRaises(exception.Forbidden,
self.trust_controller.create_trust,
context, trust=self.sample_data)
def test_create_trust(self):
self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
self.assertTrue(timeutils.parse_strtime(self.new_trust['expires_at'],
fmt=TIME_FORMAT))
self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
self.new_trust['links']['self'])
self.assertIn('%s/v3/OS-TRUST/' % HOST_URL,
self.new_trust['roles_links']['self'])
for role in self.new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_expires_bad(self):
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust,
expires_at="bad")
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust,
expires_at="")
self.assertRaises(exception.ValidationTimeStampError,
self.create_trust,
expires_at="Z")
def test_get_trust(self):
context = {'token_id': self.unscoped_token['access']['token']['id'],
'host_url': HOST_URL}
trust = self.trust_controller.get_trust(context,
self.new_trust['id'])['trust']
self.assertEqual(self.trustor['id'], trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
for role in self.new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_no_impersonation(self):
self.create_trust(expires_at=None, impersonation=False)
self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
self.assertIs(self.new_trust['impersonation'], False)
auth_response = self.fetch_v2_token_from_trust()
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], self.new_trust['trustee_user_id'])
# TODO(ayoung): Endpoints
def test_create_trust_impersonation(self):
self.create_trust(expires_at=None)
self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
self.assertIs(self.new_trust['impersonation'], True)
auth_response = self.fetch_v2_token_from_trust()
token_user = auth_response['access']['user']
self.assertEqual(token_user['id'], self.new_trust['trustor_user_id'])
def test_token_from_trust_wrong_user_fails(self):
request_body = self.build_v2_token_request('FOO', 'foo2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def fetch_v2_token_from_trust(self):
request_body = self.build_v2_token_request('TWO', 'two2')
auth_response = self.controller.authenticate({}, request_body)
return auth_response
def fetch_v3_token_from_trust(self):
v3_password_data = {
'identity': {
"methods": ["password"],
"password": {
"user": {
"id": self.trustee["id"],
"password": self.trustee["password"]}}
},
'scope': {
'project': {
'id': self.tenant_baz['id']}}}
auth_response = (self.auth_v3_controller.authenticate_for_token
({'environment': {},
'query_string': {}},
v3_password_data))
token = auth_response.headers['X-Subject-Token']
v3_req_with_trust = {
"identity": {
"methods": ["token"],
"token": {"id": token}},
"scope": {
"OS-TRUST:trust": {"id": self.new_trust['id']}}}
token_auth_response = (self.auth_v3_controller.authenticate_for_token
({'environment': {},
'query_string': {}},
v3_req_with_trust))
return token_auth_response
def test_create_v3_token_from_trust(self):
auth_response = self.fetch_v3_token_from_trust()
trust_token_user = auth_response.json['token']['user']
self.assertEqual(self.trustor['id'], trust_token_user['id'])
trust_token_trust = auth_response.json['token']['OS-TRUST:trust']
self.assertEqual(trust_token_trust['id'], self.new_trust['id'])
self.assertEqual(self.trustor['id'],
trust_token_trust['trustor_user']['id'])
self.assertEqual(self.trustee['id'],
trust_token_trust['trustee_user']['id'])
trust_token_roles = auth_response.json['token']['roles']
self.assertEqual(2, len(trust_token_roles))
def test_v3_trust_token_get_token_fails(self):
auth_response = self.fetch_v3_token_from_trust()
trust_token = auth_response.headers['X-Subject-Token']
v3_token_data = {'identity': {
'methods': ['token'],
'token': {'id': trust_token}
}}
self.assertRaises(
exception.Forbidden,
self.auth_v3_controller.authenticate_for_token,
{'environment': {},
'query_string': {}}, v3_token_data)
def test_token_from_trust(self):
auth_response = self.fetch_v2_token_from_trust()
self.assertIsNotNone(auth_response)
self.assertEqual(2,
len(auth_response['access']['metadata']['roles']),
"user_foo has three roles, but the token should"
" only get the two roles specified in the trust.")
def assert_token_count_for_trust(self, expected_value):
tokens = self.trust_controller.token_api._list_tokens(
self.trustee['id'], trust_id=self.new_trust['id'])
token_count = len(tokens)
self.assertEqual(expected_value, token_count)
def test_delete_tokens_for_user_invalidates_tokens_from_trust(self):
self.assert_token_count_for_trust(0)
self.fetch_v2_token_from_trust()
self.assert_token_count_for_trust(1)
self.token_api.delete_tokens_for_user(self.trustee['id'])
self.assert_token_count_for_trust(0)
def test_token_from_trust_cant_get_another_token(self):
auth_response = self.fetch_v2_token_from_trust()
trust_token_id = auth_response['access']['token']['id']
request_body = _build_user_auth(token={'id': trust_token_id},
tenant_id=self.tenant_bar['id'])
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_delete_trust_revokes_token(self):
context = self._create_auth_context(
self.unscoped_token['access']['token']['id'])
self.fetch_v2_token_from_trust()
trust_id = self.new_trust['id']
tokens = self.token_api._list_tokens(self.trustor['id'],
trust_id=trust_id)
self.assertEqual(1, len(tokens))
self.trust_controller.delete_trust(context, trust_id=trust_id)
tokens = self.token_api._list_tokens(self.trustor['id'],
trust_id=trust_id)
self.assertEqual(0, len(tokens))
def test_token_from_trust_with_no_role_fails(self):
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_expired_trust_get_token_fails(self):
expiry = "1999-02-18T10:10:00Z"
self.create_trust(expiry)
request_body = self.build_v2_token_request('TWO', 'two2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
def test_token_from_trust_with_wrong_role_fails(self):
self.assignment_api.add_role_to_user_and_project(
self.trustor['id'],
self.tenant_bar['id'],
self.role_other['id'])
for assigned_role in self.assigned_roles:
self.assignment_api.remove_role_from_user_and_project(
self.trustor['id'], self.tenant_bar['id'], assigned_role)
request_body = self.build_v2_token_request('TWO', 'two2')
self.assertRaises(
exception.Forbidden,
self.controller.authenticate, {}, request_body)
class TokenExpirationTest(AuthTest):
@mock.patch.object(timeutils, 'utcnow')
def _maintain_token_expiration(self, mock_utcnow):
"""Token expiration should be maintained after re-auth & validation."""
now = datetime.datetime.utcnow()
mock_utcnow.return_value = now
r = self.controller.authenticate(
{},
auth={
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password']
}
})
unscoped_token_id = r['access']['token']['id']
original_expiration = r['access']['token']['expires']
mock_utcnow.return_value = now + datetime.timedelta(seconds=1)
r = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=unscoped_token_id)
self.assertEqual(original_expiration, r['access']['token']['expires'])
mock_utcnow.return_value = now + datetime.timedelta(seconds=2)
r = self.controller.authenticate(
{},
auth={
'token': {
'id': unscoped_token_id,
},
'tenantId': self.tenant_bar['id'],
})
scoped_token_id = r['access']['token']['id']
self.assertEqual(original_expiration, r['access']['token']['expires'])
mock_utcnow.return_value = now + datetime.timedelta(seconds=3)
r = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=scoped_token_id)
self.assertEqual(original_expiration, r['access']['token']['expires'])
def test_maintain_uuid_token_expiration(self):
self.config_fixture.config(group='signing', token_format='UUID')
self._maintain_token_expiration()
class AuthCatalog(tests.SQLDriverOverrides, AuthTest):
"""Tests for the catalog provided in the auth response."""
def config_files(self):
config_files = super(AuthCatalog, self).config_files()
# We need to use a backend that supports disabled endpoints, like the
# SQL backend.
config_files.append(tests.dirs.tests_conf('backend_sql.conf'))
return config_files
def _create_endpoints(self):
def create_endpoint(service_id, region, **kwargs):
id_ = uuid.uuid4().hex
ref = {
'id': id_,
'interface': 'public',
'region': region,
'service_id': service_id,
'url': 'http://localhost/%s' % uuid.uuid4().hex,
}
ref.update(kwargs)
self.catalog_api.create_endpoint(id_, ref)
return ref
# Create a service for use with the endpoints.
def create_service(**kwargs):
id_ = uuid.uuid4().hex
ref = {
'id': id_,
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
}
ref.update(kwargs)
self.catalog_api.create_service(id_, ref)
return ref
enabled_service_ref = create_service(enabled=True)
disabled_service_ref = create_service(enabled=False)
region = uuid.uuid4().hex
# Create endpoints
enabled_endpoint_ref = create_endpoint(
enabled_service_ref['id'], region)
create_endpoint(
enabled_service_ref['id'], region, enabled=False,
interface='internal')
create_endpoint(
disabled_service_ref['id'], region)
return enabled_endpoint_ref
def test_auth_catalog_disabled_endpoint(self):
"""On authenticate, get a catalog that excludes disabled endpoints."""
endpoint_ref = self._create_endpoints()
# Authenticate
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
token = self.controller.authenticate({}, body_dict)
# Check the catalog
self.assertEqual(1, len(token['access']['serviceCatalog']))
endpoint = token['access']['serviceCatalog'][0]['endpoints'][0]
self.assertEqual(
1, len(token['access']['serviceCatalog'][0]['endpoints']))
exp_endpoint = {
'id': endpoint_ref['id'],
'publicURL': endpoint_ref['url'],
'region': endpoint_ref['region'],
}
self.assertEqual(exp_endpoint, endpoint)
def test_validate_catalog_disabled_endpoint(self):
"""On validate, get back a catalog that excludes disabled endpoints."""
endpoint_ref = self._create_endpoints()
# Authenticate
body_dict = _build_user_auth(
username='FOO',
password='foo2',
tenant_name="BAR")
token = self.controller.authenticate({}, body_dict)
# Validate
token_id = token['access']['token']['id']
validate_ref = self.controller.validate_token(
dict(is_admin=True, query_string={}),
token_id=token_id)
# Check the catalog
self.assertEqual(1, len(token['access']['serviceCatalog']))
endpoint = validate_ref['access']['serviceCatalog'][0]['endpoints'][0]
self.assertEqual(
1, len(token['access']['serviceCatalog'][0]['endpoints']))
exp_endpoint = {
'id': endpoint_ref['id'],
'publicURL': endpoint_ref['url'],
'region': endpoint_ref['region'],
}
self.assertEqual(exp_endpoint, endpoint)
class NonDefaultAuthTest(tests.TestCase):
def test_add_non_default_auth_method(self):
self.config_fixture.config(group='auth',
methods=['password', 'token', 'custom'])
config.setup_authentication()
self.assertTrue(hasattr(CONF.auth, 'custom'))
| apache-2.0 | 159,340,296,657,682,560 | 39.839122 | 79 | 0.575406 | false | 4.143758 | true | false | false |
kritak/textdungeon | Internal/pricerandomtester.py | 1 | 1114 | """testing random frequency of items based on price for item.
a cheap item is more common, a expensive item is very rare"""
import random
d = {"healing":50,
"berserk":60,
"clever":100,
"swiftness":100,
"might":100,
"awesomeness":500,
}
# reverse d
dr = [[1/b,a] for [a,b] in d.items()] # list of [price, drinkname]
dr.sort() # sort this list by price
pricelist1 = [a for [a,b] in dr] # list of price only
drinklist = [b for [a,b] in dr] # list of drinkname only
pricelist2 = [] # list of added up prices
kprice = 0
for p in pricelist1:
kprice += p
pricelist2.append(kprice)
print(pricelist1, pricelist2)
result = {}
print("calculating please wait...")
for x in range(10000):
y = random.random()*(pricelist2[-1]) # 1 to maxprice
for p in pricelist2:
if y < p:
drinkname = drinklist[pricelist2.index(p)]
if drinkname in result:
result[drinkname] += 1
else:
result[drinkname] = 1
break
print(result)
| gpl-2.0 | -7,916,032,930,120,072,000 | 24.906977 | 66 | 0.561939 | false | 3.375758 | false | false | false |
unt-libraries/django-name | name/api/serializers.py | 1 | 6208 | """Serializers for the Name App Models.
This module leverages the Django Rest Framework's Serializer
components to build JSON representations of the models defined
in this app.
These JSON representations are designed to be backwards compatible
with the API documented in previous versions.
For documentation regarding the Django Rest Framework Serializers go
to http://www.django-rest-framework.org/api-guide/serializers/
"""
from rest_framework import serializers
from .. import models
class IdentifierSerializer(serializers.ModelSerializer):
"""Serializer for the Identifier Model.
The following fields have been renamed for backwards compatibility
with previous versions of the API.
label -> identifier.type
href -> identifier.value
"""
label = serializers.StringRelatedField(source='type')
href = serializers.CharField(source='value')
class Meta:
model = models.Identifier
fields = ('label', 'href')
class NoteSerializer(serializers.ModelSerializer):
"""Serializer for the Note Model."""
type = serializers.SerializerMethodField()
class Meta:
model = models.Note
fields = ('note', 'type')
def get_type(self, obj):
"""Sets the type field.
Returns the Note Type label, instead of the Note Type ID, which
is the default behavior.
"""
return obj.get_note_type_label().lower()
class VariantSerializer(serializers.ModelSerializer):
"""Serializer for the Variant Model."""
type = serializers.SerializerMethodField()
class Meta:
model = models.Variant
fields = ('variant', 'type')
def get_type(self, obj):
"""Sets the type field.
Returns the Variant Type label, instead of the Variant Type ID,
which is the default behavior.
"""
return obj.get_variant_type_label().lower()
class NameSerializer(serializers.ModelSerializer):
"""Serializer for the Name Model.
This serializes the the Name model to include detailed information
about the object, including the related Variants, Notes, and
Identifiers.
The following fields have been renamed for backwards compatibility
with previous versions of the API.
authoritative_name -> name.name
begin_date -> name.begin
end_date -> name.end
The identifier field is the absolute url to the name detail
page for the model instance.
"""
authoritative_name = serializers.CharField(source='name')
begin_date = serializers.CharField(source='begin')
name_type = serializers.SerializerMethodField()
end_date = serializers.CharField(source='end')
links = IdentifierSerializer(many=True, source='identifier_set')
notes = NoteSerializer(many=True, source='note_set')
variants = VariantSerializer(many=True, source='variant_set')
identifier = serializers.HyperlinkedIdentityField(
view_name='name:detail', lookup_field='name_id')
class Meta:
model = models.Name
fields = ('authoritative_name', 'name_type', 'begin_date', 'end_date',
'identifier', 'links', 'notes', 'variants',)
def get_name_type(self, obj):
"""Sets the name_type field.
Returns the Name Type label, instead of the Name Type ID, which
is the default behavior.
"""
return obj.get_name_type_label().lower()
class NameSearchSerializer(serializers.ModelSerializer):
"""Name Model Serializer for the Name search/autocompletion
endpoint.
The following fields have been renamed for backwards compatibility
with previous versions of the API.
begin_date -> name.begin
type -> name.get_name_type_label()
label -> Formats name.name and name.disambiguation.
The URL field is the absolute url to the name detail page for
the model instance.
"""
begin_date = serializers.CharField(source='begin')
type = serializers.SerializerMethodField()
label = serializers.SerializerMethodField()
URL = serializers.HyperlinkedIdentityField(
view_name='name:detail', lookup_field='name_id')
class Meta:
model = models.Name
fields = ('id', 'name', 'label', 'type', 'begin_date',
'disambiguation', 'URL')
def get_type(self, obj):
"""Sets the type field.
Returns the Name Type label, instead of the Name Type ID, which
is the default behavior.
"""
return obj.get_name_type_label().lower()
def get_label(self, obj):
"""Sets the label field.
Returns a string in the form of
"<name.name> (<name.disambiguation>)"
"""
if obj.disambiguation:
return '{0} ({1})'.format(obj.name, obj.disambiguation)
return obj.name
class LocationSerializer(serializers.ModelSerializer):
"""Serailizer for the Locations Model.
This includes the related Name via the belong_to_name field. The
belong_to_name field uses the NameSerializer to nest the related
Name model.
"""
belong_to_name = NameSerializer()
class Meta:
model = models.Location
fields = '__all__'
class NameStatisticsMonthSerializer(serializers.Serializer):
"""Serializer for the NameStatisticsMonth object."""
total = serializers.IntegerField()
total_to_date = serializers.IntegerField()
month = serializers.DateTimeField()
class NameStatisticsTypeSerializer(serializers.Serializer):
"""Serializer for the NameStatisticsType object.
This serializer utilizes the NameStatisticsTypeMonth to serialize
the NameStatisticsMonth instances that the object instance contains.
"""
running_total = serializers.IntegerField()
stats = NameStatisticsMonthSerializer(many=True)
class NameStatisticsSerializer(serializers.Serializer):
"""Serializer for the NameStatistics object.
This serializer utilizes the NameStatisticsTypeSerializer to
serialize the NameStatisticsType instances that the object instance
contains.
"""
created = NameStatisticsTypeSerializer()
modified = NameStatisticsTypeSerializer()
name_type_totals = serializers.DictField()
| bsd-3-clause | -1,052,281,697,192,771,800 | 31.502618 | 78 | 0.6875 | false | 4.568065 | false | false | false |
mithron/opendatahack | web/main.py | 1 | 1805 | from datetime import datetime
import json
import os
from urlparse import urlparse
from pymongo.connection import Connection
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
MONGO_URL = "" # found with $>heroku config
we_live = True
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/list/", MainHandler),
(r"/([0-9]+)/", SchoolHandler)
]
settings = dict(
autoescape=None,
)
tornado.web.Application.__init__(self, handlers, **settings)
if we_live:
self.con = Connection(MONGO_URL)
self.database = self.con[urlparse(MONGO_URL).path[1:]]
else:
self.con = Connection('localhost', 27017)
self.database = self.con["moscow"]
class BaseHandler(tornado.web.RequestHandler):
@property
def db(self):
return self.application.database
class SchoolHandler(BaseHandler):
def get(self, inn=None):
if inn:
suppliers = list(self.db["suppliers"].find({'inn': int(inn)}, fields={"_id": False}))
self.write(json.dumps(suppliers, ensure_ascii=False, encoding='utf8'))
else:
self.write("[]")
class MainHandler(BaseHandler):
def get(self):
schools = list(self.db["suppliers"].find(fields={"full_name": True, "inn": True, "_id": False}))
self.write(json.dumps(schools, ensure_ascii=False, encoding='utf8'))
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(int(os.environ.get("PORT", 8888)))
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main() | mit | 941,218,873,183,261,400 | 26.363636 | 104 | 0.628255 | false | 3.752599 | false | false | false |
CloudBoltSoftware/cloudbolt-forge | ui_extensions/veeam_admin_extension/restore_backup.py | 1 | 1717 | import requests
import time
from xml.dom import minidom
from common.methods import set_progress
from xui.veeam.veeam_admin import VeeamManager
def run(server, *args, **kwargs):
set_progress(f"Starting Veeam Backup restoration... ")
veeam = VeeamManager()
server_ci = veeam.get_connection_info()
url = f'http://{server_ci.ip}:9399/api/vmRestorePoints/' + \
kwargs.get('restore_point_href') + '?action=restore'
session_id = veeam.get_veeam_server_session_id()
header = {"X-RestSvcSessionId": session_id}
response = requests.post(url=url, headers=header)
task = minidom.parseString(response.content.decode('utf-8'))
items = task.getElementsByTagName('Task')[0].attributes.items()
restoration_url = [item for item in items if item[0] == 'Href'][0][-1]
def check_state():
response = requests.get(restoration_url, headers=header)
dom = minidom.parseString(response.content.decode('utf-8'))
state = dom.getElementsByTagName('State')[0]
child = state.firstChild
return child
# Wait until the restoration to completed.
while check_state().data == 'Running':
# wait
set_progress("Waiting for restoration to complete...")
time.sleep(10)
if check_state().data == 'Finished':
set_progress("Server restoration completed successfully")
return "SUCCESS", "Server restoration completed successfully", ""
else:
set_progress("Server restoration didn't complete successfully")
return "FAILURE", "", "Server restoration didn't complete successfully"
| apache-2.0 | 4,563,522,579,595,640,300 | 38.022727 | 83 | 0.630169 | false | 4.127404 | false | false | false |
Subsets and Splits