code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# originally from
# http://code.google.com/p/django-syncr/source/browse/trunk/app/delicious.py
# supousse django-syncr should be installed as external app and this code
# inherit from that, but django-syncr have not a setup.py file (so can't be
# added in src/pinax/requirements/external_apps.txt) and is not migrated to git
# (so can add a setup.file in a different fork from svn trunk)
import time, datetime, calendar
import httplib
import urllib, urllib2
import base64
#from syncr.delicious.models import Bookmark
from bookmarks.models import Bookmark, BookmarkInstance
from bookmarks.forms import BookmarkInstanceForm
try:
import xml.etree.ElementTree as ET
except:
import elementtree.ElementTree as ET
class DeliciousAPI:
"""
DeliciousAPI is a bare-bones interface to the del.icio.us API. It's
used by DeliciousSyncr objects and it's not recommended to use it
directly.
"""
_deliciousApiHost = 'https://api.del.icio.us/'
_deliciousApiURL = 'https://api.del.icio.us/v1/'
def __init__(self, user, passwd):
"""
Initialize a DeliciousAPI object.
Required arguments
user: The del.icio.us username as a string.
passwd: The username's password as a string.
"""
self.user = user
self.passwd = passwd
# pm = urllib2.HTTPPasswordMgrWithDefaultRealm()
# pm.add_password(None, 'https://' + self._deliciousApiHost, self.user, self.passwd)
# handler = urllib2.HTTPBasicAuthHandler(pm)
# self.opener = urllib2.build_opener(handler)
def _request(self, path, params=None):
# time.sleep(1.5)
# if params:
# post_data = urllib.urlencode(params)
# url = self._deliciousApiURL + path + post_data
# else:
# url = self._deliciousApiURL + path
# request = urllib2.Request(url)
# request.add_header('User-Agent', 'django/syncr.app.delicious')
# credentials = base64.encodestring("%s:%s" % (self.user, self.passwd))
# request.add_header('Authorization', ('Basic %s' % credentials))
# f = self.opener.open(request)
f = open('/home/julia/hacktivism/testing-parsing-bookmarks/all.xml')
return ET.parse(f)
class DeliciousSyncr:
"""
DeliciousSyncr objects sync del.icio.us bookmarks to the Django
backend. The constructor requires a username and password for
authenticated access to the API.
There are three ways to sync:
- All bookmarks for the user
- Only recent bookmarks for the user
- Bookmarks based on a limited search/query functionality. Currently
based on date, tag, and URL.
This app requires the excellent ElementTree, which is included in
Python 2.5. Otherwise available at:
http://effbot.org/zone/element-index.htm
"""
def __init__(self, username, password):
"""
Construct a new DeliciousSyncr.
Required arguments
username: a del.icio.us username
password: the user's password
"""
self.delicious = DeliciousAPI(username, password)
def clean_tags(self, tags):
"""
Utility method to clean up del.icio.us tags, removing double
quotes, duplicate tags and return a unicode string.
Required arguments
tags: a tag string
"""
tags = tags.lower().replace('\"', '').split(' ')
tags = set(tags)
tags = " ".join(tags)
return u'%s' % tags
def _syncPost(self, post_elem, user):
time_lst = time.strptime(post_elem.attrib['time'], "%Y-%m-%dT%H:%M:%SZ")
time_obj = datetime.datetime(*time_lst[0:7])
tags = self.clean_tags(post_elem.attrib['tag'])
try:
extended = post_elem.attrib['extended']
except KeyError:
extended = ''
default_dict = {
'description': post_elem.attrib['description'],
'tags': tags,
'url': post_elem.attrib['href'],
# Is post_hash attrib unique to the post/URL or post/username ?!
'post_hash': post_hash,
'saved_date': time_obj,
'extended_info': extended,
}
# Save only shared bookmarks
# try:
# is_shared = post_elem.attrib['shared'] # Only set, when it isn't shared
# except KeyError:
# obj, created = Bookmark.objects.get_or_create(
# post_hash=post_hash, defaults=default_dict)
# return obj
# return None
# to save pinax Bookmark
try:
unicode(default_dict['description'].decode('latin-1'))
except:
default_dict['description'] = ''
print default_dict['description']
bookmark_instance_form = BookmarkInstanceForm(user,default_dict)
if bookmark_instance_form.is_valid():
bookmark_instance = bookmark_instance_form.save(commit=False)
bookmark_instance.user = user
bookmark_instance.save()
print bookmark_instance
bookmark = bookmark_instance.bookmark
try:
headers = {
"Accept" : "text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5",
"Accept-Language" : "en-us,en;q=0.5",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Connection" : "close",
##"User-Agent": settings.URL_VALIDATOR_USER_AGENT
}
req = urllib2.Request(bookmark.get_favicon_url(force=True), None, headers)
u = urllib2.urlopen(req)
has_favicon = True
except:
has_favicon = False
bookmark.has_favicon = has_favicon
bookmark.favicon_checked = datetime.datetime.now()
# bookmark.added = bookmark['add_date']
bookmark.save()
# print bookmark
else:
print "bookmark_instance_form no es valido"
return
def syncRecent(self, count=15, tag=None):
"""
Synchronize the user's recent bookmarks.
Optional arguments:
count: The number of bookmarks to return, default 15, max 100.
tag: A string. Limit to recent bookmarks that match this tag.
"""
params = {'count': count}
if tag: params['tag'] = tag
result = self.delicious._request('posts/recent?', params)
root = result.getroot()
for post in list(root):
self._syncPost(post)
def syncAll(self, user, tag=None):
"""
Synchronize all of the user's bookmarks. WARNING this may take
a while! Excessive use may get you throttled.
Optional arguments
tag: A string. Limit to all bookmarks that match this tag.
"""
params = dict()
if tag: params = {'tag': tag}
result = self.delicious._request('posts/all?', params)
root = result.getroot()
for post in list(root):
self._syncPost(post, user)
def datetime2delicious(self, dt):
"""
Utility method to convert a Python datetime to a string format
suitable for the del.icio.us API.
Required arguments
dt: a datetime object
"""
return dt.strftime("%Y-%m-%dT%H:%M:%SZ")
def syncBookmarks(self, **kwargs):
"""
Synchronize bookmarks. If no arguments are used, today's
bookmarks will be sync'd.
Optional keyword arguments
date: A datetime object. Sync only bookmarks from this date.
tag: A string. Limit to bookmarks matching this tag.
url: A string. Limit to bookmarks matching this URL.
"""
params = kwargs
if kwargs.has_key('date'):
params['date'] = self.datetime2delicious(params['date'])
result = self.delicious._request('posts/get?', )
root = result.getroot()
for post in list(root):
self._syncPost(post)
| duy/pinax-syncr-delicious | delicious/delicious.py | Python | gpl-3.0 | 8,100 |
"""
Module to handle distortions in diffraction patterns.
"""
import numpy as np
import scipy.optimize
def filter_ring(points, center, rminmax):
"""Filter points to be in a certain radial distance range from center.
Parameters
----------
points : np.ndarray
Candidate points.
center : np.ndarray or tuple
Center position.
rminmax : tuple
Tuple of min and max radial distance.
Returns
-------
: np.ndarray
List of filtered points, two column array.
"""
try:
# points have to be 2D array with 2 columns
assert(isinstance(points, np.ndarray))
assert(points.shape[1] == 2)
assert(len(points.shape) == 2)
# center can either be tuple or np.array
center = np.array(center)
center = np.reshape(center, 2)
rminmax = np.array(rminmax)
rminmax = np.reshape(rminmax, 2)
except:
raise TypeError('Something wrong with the input!')
# calculate radii
rs = np.sqrt( np.square(points[:,0]-center[0]) + np.square(points[:,1]-center[1]) )
# filter by given limits
sel = (rs>=rminmax[0])*(rs<=rminmax[1])
if sel.any():
return points[sel]
else:
return None
def points_topolar(points, center):
"""Convert points to polar coordinate system.
Can be either in pixel or real dim, but should be the same for points and center.
Parameters
----------
points : np.ndarray
Positions as two column array.
center : np.ndarray or tuple
Origin of the polar coordinate system.
Returns
-------
: np.ndarray
Positions in polar coordinate system as two column array (r, theta).
"""
try:
# points have to be 2D array with 2 columns
assert(isinstance(points, np.ndarray))
assert(points.shape[1] == 2)
assert(len(points.shape) == 2)
# center can either be tuple or np.array
center = np.array(center)
center = np.reshape(center, 2)
except:
raise TypeError('Something wrong with the input!')
# calculate radii
rs = np.sqrt( np.square(points[:,0]-center[0]) + np.square(points[:,1]-center[1]) )
# calculate angle
thes = np.arctan2(points[:,1]-center[1], points[:,0]-center[0])
return np.array( [rs, thes] ).transpose()
def residuals_center( param, data):
"""Residual function for minimizing the deviations from the mean radial distance.
Parameters
----------
param : np.ndarray
The center to optimize.
data : np.ndarray
The points in x,y coordinates of the original image.
Returns
-------
: np.ndarray
Residuals.
"""
# manually calculating the radii, as we do not need the thetas
rs = np.sqrt( np.square(data[:,0]-param[0]) + np.square(data[:,1]-param[1]) )
return rs-np.mean(rs)
def optimize_center(points, center, maxfev=1000, verbose=None):
"""Optimize the center by minimizing the sum of square deviations from the mean radial distance.
Parameters
----------
points : np.ndarray
The points to which the optimization is done (x,y coords in org image).
center : np.ndarray or tuple
Initial center guess.
maxfev : int
Max number of iterations forwarded to scipy.optimize.leastsq().
verbose : bool
Set to get verbose output.
Returns
-------
: np.ndarray
The optimized center.
"""
try:
# points have to be 2D array with 2 columns
assert(isinstance(points, np.ndarray))
assert(points.shape[1] == 2)
assert(len(points.shape) == 2)
# center can either be tuple or np.array
center = np.array(center)
center = np.reshape(center, 2)
except:
raise TypeError('Something wrong with the input!')
# run the optimization
popt, flag = scipy.optimize.leastsq(residuals_center, center, args=points, maxfev=maxfev)
if flag not in [1, 2, 3, 4]:
print('WARNING: center optimization failed.')
if verbose:
print('optimized center: ({}, {})'.format(center[0], center[1]))
return popt
def rad_dis(theta, alpha, beta, order=2):
"""Radial distortion due to ellipticity or higher order distortion.
Relative distortion, to be multiplied with radial distance.
Parameters
----------
theta : np.ndarray
Angles at which to evaluate. Must be float.
alpha : float
Orientation of major axis.
beta : float
Strength of distortion (beta = (1-r_min/r_max)/(1+r_min/r_max).
order : int
Order of distortion.
Returns
-------
: np.ndarray
Distortion factor.
"""
return (1.-np.square(beta))/np.sqrt(1.+np.square(beta)-2.*beta*np.cos(order*(theta+alpha)))
def residuals_dis(param, points, ns):
"""Residual function for distortions.
Parameters
----------
param : np.ndarray
Parameters for distortion.
points : np.ndarray
Points to fit to.
ns : tuple
List of orders to account for.
Returns
-------
: np.ndarray
Residuals.
"""
est = param[0]*np.ones(points[:, 1].shape)
for i in range(len(ns)):
est *=rad_dis(points[:, 1], param[i*2+1], param[i*2+2], ns[i])
return points[:, 0] - est
def optimize_distortion(points, ns, maxfev=1000, verbose=False):
"""Optimize distortions.
The orders in the list ns are first fitted subsequently and the result is refined in a final fit simultaneously fitting all orders.
Parameters
----------
points : np.ndarray
Points to optimize to (in polar coords).
ns : tuple
List of orders to correct for.
maxfev : int
Max number of iterations forwarded to scipy.optimize.leastsq().
verbose : bool
Set for verbose output.
Returns
-------
: np.ndarray
Optimized parameters according to ns.
"""
try:
assert(isinstance(points, np.ndarray))
assert(points.shape[1] == 2)
# check points to be sufficient for fitting
assert(points.shape[0] >= 3)
# check orders
assert(len(ns)>=1)
except:
raise TypeError('Something wrong with the input!')
# init guess for full fit
init_guess = np.ones(len(ns)*2+1)
init_guess[0] = np.mean(points[:,0])
# make a temporary copy
points_tmp = np.copy(points)
if verbose:
print('correction for {} order distortions.'.format(ns))
print('starting with subsequent fitting:')
# subsequently fit the orders
for i in range(len(ns)):
# optimize order to points_tmp
popt, flag = scipy.optimize.leastsq(residuals_dis, np.array((init_guess[0], 0.1, 0.1)),
args=(points_tmp, (ns[i],)), maxfev=maxfev)
if flag not in [1, 2, 3, 4]:
print('WARNING: optimization of distortions failed.')
# information
if verbose:
print('fitted order {}: R={} alpha={} beta={}'.format(ns[i], popt[0], popt[1], popt[2]))
# save for full fit
init_guess[i*2+1] = popt[1]
init_guess[i*2+2] = popt[2]
# do correction
points_tmp[:, 0] /= rad_dis(points_tmp[:, 1], popt[1], popt[2], ns[i])
# full fit
if verbose:
print('starting the full fit:')
popt, flag = scipy.optimize.leastsq(residuals_dis, init_guess, args=(points, ns), maxfev=maxfev)
if flag not in [1, 2, 3, 4]:
print('WARNING: optimization of distortions failed.')
if verbose:
print('fitted to: R={}'.format(popt[0]))
for i in range(len(ns)):
print('.. order={}, alpha={}, beta={}'.format(ns[i], popt[i*2+1], popt[i*2+2]))
return popt
| ercius/openNCEM | ncempy/algo/distortion.py | Python | gpl-3.0 | 8,278 |
#!/usr/bin/env python
# coding=utf-8
import flask
from flask import flash
from flask_login import login_required
from rpress.models import SiteSetting
from rpress.database import db
from rpress.runtimes.rpadmin.template import render_template, navbar
from rpress.runtimes.current_session import get_current_site, get_current_site_info
from rpress.forms import SettingsForm
app = flask.Blueprint('rpadmin_setting', __name__)
@app.route('/', methods=['GET', ])
@login_required
@navbar(level1='settings')
def list():
content = {
'site': get_current_site_info(),
}
return render_template('rpadmin/settings/list.html', content=content)
@app.route('/<string:key>/edit', methods=['GET', 'POST'])
@login_required
@navbar(level1='settings')
def edit(key):
site = get_current_site()
site_setting = SiteSetting.query.filter_by(site=site, key=key).order_by('created_time').first()
if site_setting is None:
site_setting = SiteSetting(
site_id=site.id,
key=key,
value=None,
)
form = SettingsForm(obj=site_setting)
if form.validate_on_submit():
form.populate_obj(site_setting)
db.session.add(site_setting)
db.session.commit()
flash("settings updated", "success")
else:
flash('settings edit error')
return render_template("rpadmin/settings/edit.html", form=form, site_setting=site_setting)
| rexzhang/rpress | rpress/views/rpadmin/settings.py | Python | gpl-3.0 | 1,428 |
from __future__ import unicode_literals
from __future__ import absolute_import
from django.views.generic.base import TemplateResponseMixin
from wiki.core.plugins import registry
from wiki.conf import settings
class ArticleMixin(TemplateResponseMixin):
"""A mixin that receives an article object as a parameter (usually from a wiki
decorator) and puts this information as an instance attribute and in the
template context."""
def dispatch(self, request, article, *args, **kwargs):
self.urlpath = kwargs.pop('urlpath', None)
self.article = article
self.children_slice = []
if settings.SHOW_MAX_CHILDREN > 0:
try:
for child in self.article.get_children(
max_num=settings.SHOW_MAX_CHILDREN +
1,
articles__article__current_revision__deleted=False,
user_can_read=request.user):
self.children_slice.append(child)
except AttributeError as e:
raise Exception(
"Attribute error most likely caused by wrong MPTT version. Use 0.5.3+.\n\n" +
str(e))
return super(ArticleMixin, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs['urlpath'] = self.urlpath
kwargs['article'] = self.article
kwargs['article_tabs'] = registry.get_article_tabs()
kwargs['children_slice'] = self.children_slice[:20]
kwargs['children_slice_more'] = len(self.children_slice) > 20
kwargs['plugins'] = registry.get_plugins()
return kwargs
| Infernion/django-wiki | wiki/views/mixins.py | Python | gpl-3.0 | 1,668 |
import game as game
import pytest
import sys
sys.path.insert(0, '..')
def trim_board(ascii_board):
return '\n'.join([i.strip() for i in ascii_board.splitlines()])
t = trim_board
def test_new_board():
game.Board(3,3).ascii() == t("""
...
...
...
""")
game.Board(4,3).ascii() == t("""
....
....
....
""")
game.Board(3,4).ascii() == t("""
...
...
...
...
""")
def test_game():
board = game.Board(3,3,win=3)
assert board.count_tokens == 0
assert board.game_status == 'active'
assert board.turn_color == None
# drop first token
token = board.drop('x',0)
assert board.game_status == 'active'
assert token.position == (0,0)
assert token.color == 'x'
assert board.ascii() == t("""
...
...
x..
""")
assert board.count_tokens == 1
assert board.turn_color == 'o'
# drop second token
token = board.drop('o',0)
assert board.game_status == 'active'
assert token.position == (0,1)
assert token.color == 'o'
assert board.ascii() == t("""
...
o..
x..
""")
assert board.count_tokens == 2
assert board.turn_color == 'x'
# dropping the wrong color should raise an error
with pytest.raises(Exception):
token = board.drop('o',1)
# drop third token
token = board.drop('x',1)
assert board.game_status == 'active'
assert token.position == (1,0)
assert token.color == 'x'
board.ascii() == t("""
...
o..
xx.
""")
assert board.count_tokens == 3
assert board.turn_color == 'o'
# drop fourth token
token = board.drop('o',0)
assert board.game_status == 'active'
assert token.position == (0,2)
assert token.color == 'o'
board.ascii() == t("""
o..
o..
xx.
""")
assert board.count_tokens == 4
# drop fifth token
token = board.drop('x',2)
assert board.game_status == 'over'
assert board.won_by == 'x'
assert token.position == (2,0)
assert token.color == 'x'
board.ascii() == t("""
o..
o..
xxx
""")
assert board.count_tokens == 5
def test_load_board():
"""
The Board class should provide a load method to load a predefined board.
the load method should be implemented as a static method like this:
>>> class Test:
>>> @staticmethod
>>> def a_static_factory():
>>> t = Test()
>>> # do something with t and return it
>>> return t
the load function accepts a board layout. It retrieves the dimensions of the board
and loads the provided data into the board.
"""
board = game.Board.load(t("""
o..
o..
xxx
"""))
def test_axis_strings():
board = game.Board.load(t("""
o..
o..
xxx
"""))
# get the axis strings in this order: | \ / -
axis_strings = board.axis_strings(0,0)
assert axis_strings[0] == 'xoo'
assert axis_strings[1] == 'x'
assert axis_strings[2] == 'x..'
assert axis_strings[3] == 'xxx' # the winner :-)
assert board.won_by == 'x'
| fweidemann14/x-gewinnt | game/test_game.py | Python | gpl-3.0 | 3,095 |
"""
A mode for working with Circuit Python boards.
Copyright (c) 2015-2017 Nicholas H.Tollervey and others (see the AUTHORS file).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import ctypes
import logging
from subprocess import check_output
from mu.modes.base import MicroPythonMode
from mu.modes.api import ADAFRUIT_APIS, SHARED_APIS
from mu.interface.panes import CHARTS
from mu.logic import Device
from adafruit_board_toolkit import circuitpython_serial
logger = logging.getLogger(__name__)
class CircuitPythonMode(MicroPythonMode):
"""
Represents the functionality required by the CircuitPython mode.
"""
name = _("CircuitPython")
short_name = "circuitpython"
description = _("Write code for boards running CircuitPython.")
icon = "circuitpython"
save_timeout = 0 #: No auto-save on CP boards. Will restart.
connected = True #: is the board connected.
force_interrupt = False #: NO keyboard interrupt on serial connection.
# Modules built into CircuitPython which mustn't be used as file names
# for source code.
module_names = {
"_bleio",
"_eve",
"_pew",
"_pixelbuf",
"_stage",
"_typing",
"adafruit_bus_device",
"aesio",
"alarm",
"array",
"analogio",
"audiobusio",
"audiocore",
"audioio",
"audiomixer",
"audiomp3",
"audiopwmio",
"binascii",
"bitbangio",
"bitmaptools",
"bitops",
"board",
"builtins",
"busio",
"camera",
"canio",
"collections",
"countio",
"digitalio",
"displayio",
"dualbank",
"errno",
"fontio",
"framebufferio",
"frequencyio",
"gamepad",
"gamepadshift",
"gc",
"gnss",
"hashlib",
"i2cperipheral",
"io",
"ipaddress",
"json",
"math",
"memorymonitor",
"microcontroller",
"msgpack",
"multiterminal",
"neopixel_write",
"network",
"nvm",
"os",
"ps2io",
"pulseio",
"pwmio",
"random",
"re",
"rgbmatrix",
"rotaryio",
"rtc",
"sdcardio",
"sdioio",
"sharpdisplay",
"socket",
"socketpool",
"ssl",
"storage",
"struct",
"supervisor",
"sys",
"terminalio",
"time",
"touchio",
"uheap",
"usb_cdc",
"usb_hid",
"usb_midi",
"ustack",
"vectorio",
"watchdog",
"wifi",
"wiznet",
"zlib",
}
def actions(self):
"""
Return an ordered list of actions provided by this module. An action
is a name (also used to identify the icon) , description, and handler.
"""
buttons = [
{
"name": "serial",
"display_name": _("Serial"),
"description": _("Open a serial connection to your device."),
"handler": self.toggle_repl,
"shortcut": "CTRL+Shift+U",
}
]
if CHARTS:
buttons.append(
{
"name": "plotter",
"display_name": _("Plotter"),
"description": _("Plot incoming REPL data."),
"handler": self.toggle_plotter,
"shortcut": "CTRL+Shift+P",
}
)
return buttons
def workspace_dir(self):
"""
Return the default location on the filesystem for opening and closing
files.
"""
device_dir = None
# Attempts to find the path on the filesystem that represents the
# plugged in CIRCUITPY board.
if os.name == "posix":
# We're on Linux or OSX
for mount_command in ["mount", "/sbin/mount"]:
try:
mount_output = check_output(mount_command).splitlines()
mounted_volumes = [x.split()[2] for x in mount_output]
for volume in mounted_volumes:
tail = os.path.split(volume)[-1]
if tail.startswith(b"CIRCUITPY") or tail.startswith(
b"PYBFLASH"
):
device_dir = volume.decode("utf-8")
break
except FileNotFoundError:
pass
except PermissionError as e:
logger.error(
"Received '{}' running command: {}".format(
repr(e), mount_command
)
)
m = _("Permission error running mount command")
info = _(
'The mount command ("{}") returned an error: '
"{}. Mu will continue as if a device isn't "
"plugged in."
).format(mount_command, repr(e))
self.view.show_message(m, info)
# Avoid crashing Mu, the workspace dir will be set to default
except Exception as e:
logger.error(
"Received '{}' running command: {}".format(
repr(e), mount_command
)
)
elif os.name == "nt":
# We're on Windows.
def get_volume_name(disk_name):
"""
Each disk or external device connected to windows has an
attribute called "volume name". This function returns the
volume name for the given disk/device.
Code from http://stackoverflow.com/a/12056414
"""
vol_name_buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.kernel32.GetVolumeInformationW(
ctypes.c_wchar_p(disk_name),
vol_name_buf,
ctypes.sizeof(vol_name_buf),
None,
None,
None,
None,
0,
)
return vol_name_buf.value
#
# In certain circumstances, volumes are allocated to USB
# storage devices which cause a Windows popup to raise if their
# volume contains no media. Wrapping the check in SetErrorMode
# with SEM_FAILCRITICALERRORS (1) prevents this popup.
#
old_mode = ctypes.windll.kernel32.SetErrorMode(1)
try:
for disk in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
path = "{}:\\".format(disk)
if (
os.path.exists(path)
and get_volume_name(path) == "CIRCUITPY"
):
return path
finally:
ctypes.windll.kernel32.SetErrorMode(old_mode)
else:
# No support for unknown operating systems.
raise NotImplementedError('OS "{}" not supported.'.format(os.name))
if device_dir:
# Found it!
self.connected = True
return device_dir
else:
# Not plugged in? Just return Mu's regular workspace directory
# after warning the user.
wd = super().workspace_dir()
if self.connected:
m = _("Could not find an attached CircuitPython device.")
info = _(
"Python files for CircuitPython devices"
" are stored on the device. Therefore, to edit"
" these files you need to have the device plugged in."
" Until you plug in a device, Mu will use the"
" directory found here:\n\n"
" {}\n\n...to store your code."
)
self.view.show_message(m, info.format(wd))
self.connected = False
return wd
def compatible_board(self, port):
"""Use adafruit_board_toolkit to find out whether a board is running
CircuitPython. The toolkit sees if the CDC Interface name is appropriate.
"""
pid = port.productIdentifier()
vid = port.vendorIdentifier()
manufacturer = port.manufacturer()
serial_number = port.serialNumber()
port_name = self.port_path(port.portName())
# Find all the CircuitPython REPL comports,
# and see if any of their device names match the one passed in.
for comport in circuitpython_serial.repl_comports():
if comport.device == port_name:
return Device(
vid,
pid,
port_name,
serial_number,
manufacturer,
self.name,
self.short_name,
"CircuitPython board",
)
# No match.
return None
def api(self):
"""
Return a list of API specifications to be used by auto-suggest and call
tips.
"""
return SHARED_APIS + ADAFRUIT_APIS
| carlosperate/mu | mu/modes/circuitpython.py | Python | gpl-3.0 | 10,028 |
import os
from collections import defaultdict
from django.conf import settings
from django.contrib import messages
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.core.urlresolvers import reverse
from django.db.models import Count, Q
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.utils.translation import ugettext_lazy as _
from haystack.query import SearchQuerySet
from orb.forms import (ResourceStep1Form, ResourceStep2Form, SearchForm,
ResourceRejectForm, AdvancedSearchForm)
from orb.models import Collection
from orb.models import home_resources
from orb.models import ResourceFile, ResourceTag, ResourceCriteria, ResourceRating
from orb.models import ReviewerRole
from orb.models import Tag, Resource, ResourceURL, Category, TagOwner, SearchTracker
from orb.signals import (resource_viewed, resource_url_viewed, resource_file_viewed,
search, resource_workflow, resource_submitted, tag_viewed)
from orb.tags.forms import TagPageForm
def home_view(request):
topics = []
organized_topics = defaultdict(list)
for tag in Tag.tags.public().top_level():
child_tags = tag.children.values_list('id')
resource_count = Resource.objects.filter(status=Resource.APPROVED).filter(
Q(resourcetag__tag__pk__in=child_tags) | Q(resourcetag__tag=tag)).distinct().count()
for category_slug in ["health-domain"]:
if tag.category.slug == category_slug:
organized_topics[category_slug.replace("-", "_")].append({
'resource_count': resource_count,
'tag': tag,
})
topics.append({
'resource_count': resource_count,
'tag': tag,
})
return render(request, 'orb/home.html', {
'topics': topics,
'organized_topics': home_resources(),
'page_title': _(u'ORB by mPowering'),
})
def partner_view(request):
PARTNERS = ['jhu-ccp', 'digital-campus', 'digital-green',
'global-health-media-project', 'medical-aid-films', 'zinc-ors']
partners = Tag.objects.filter(
category__slug='organisation', slug__in=PARTNERS).order_by('name')
return render(request, 'orb/partners.html', {'partners': partners})
def tag_view(request, tag_slug):
"""
Renders a tag detail page.
Allows the user to paginate resultes and sort by preselected options.
Args:
request: HttpRequest
tag_slug: the identifier for the tag
Returns:
Rendered response with a tag's resource list
"""
tag = get_object_or_404(Tag, slug=tag_slug)
filter_params = {
'page': 1,
'order': TagPageForm.CREATED,
}
params_form = TagPageForm(data=request.GET)
if params_form.is_valid():
filter_params.update(params_form.cleaned_data)
order_by = filter_params['order']
if order_by == TagPageForm.RATING:
data = Resource.resources.approved().with_ratings(tag).order_by(order_by)
else:
data = Resource.resources.approved().for_tag(tag).order_by(order_by)
paginator = Paginator(data, settings.ORB_PAGINATOR_DEFAULT)
try:
resources = paginator.page(filter_params['page'])
except (EmptyPage, InvalidPage):
resources = paginator.page(paginator.num_pages)
show_filter_link = tag.category.slug in [slug for name, slug in settings.ADVANCED_SEARCH_CATEGORIES]
tag_viewed.send(sender=tag, tag=tag, request=request)
is_geo_tag = tag.category.name == "Geography"
return render(request, 'orb/tag.html', {
'tag': tag,
'page': resources,
'params_form': params_form,
'show_filter_link': show_filter_link,
'is_geo_tag': is_geo_tag,
})
def taxonomy_view(request):
return render(request, 'orb/taxonomy.html')
def resource_permalink_view(request, id):
resource = get_object_or_404(Resource, pk=id)
return resource_view(request, resource.slug)
def resource_view(request, resource_slug):
resource = get_object_or_404(
Resource.objects.approved(user=request.user), slug=resource_slug)
if resource.status == Resource.ARCHIVED:
messages.error(request, _(
u"This resource has been archived by the ORB Content"
u" Review Team, so is not available for users to view"))
elif resource.status != Resource.APPROVED:
messages.error(request, _(
u"This resource is not yet approved by the ORB Content"
u" Review Team, so is not yet available for all users to view"))
options_menu = []
if resource_can_edit(resource, request.user):
om = {}
om['title'] = _(u'Edit')
om['url'] = reverse('orb_resource_edit', args=[resource.id])
options_menu.append(om)
if request.user.is_staff and resource.status == Resource.PENDING:
om = {}
om['title'] = _(u'Reject')
om['url'] = reverse('orb_resource_reject', args=[resource.id])
options_menu.append(om)
om = {}
om['title'] = _(u'Approve')
om['url'] = reverse('orb_resource_approve', args=[resource.id])
options_menu.append(om)
resource_viewed.send(sender=resource, resource=resource, request=request)
user_rating = 0
if request.user.is_authenticated():
try:
user_rating = ResourceRating.objects.get(
resource=resource, user=request.user).rating
except ResourceRating.DoesNotExist:
pass
# get the collections for this resource
collections = Collection.objects.filter(
collectionresource__resource=resource, visibility=Collection.PUBLIC)
# See if bookmarked
bookmarks = Collection.objects.filter(collectionresource__resource=resource,
visibility=Collection.PRIVATE, collectionuser__user__id=request.user.id).count()
if bookmarks > 0:
bookmarked = True
else:
bookmarked = False
return render(request, 'orb/resource/view.html', {
'resource': resource,
'options_menu': options_menu,
'user_rating': user_rating,
'collections': collections,
'bookmarked': bookmarked,
})
def resource_create_step1_view(request):
if request.user.is_anonymous():
return render(request, 'orb/login_required.html', {
'message': _(u'You need to be logged in to add a resource.'),
})
if request.method == 'POST':
form = ResourceStep1Form(request.POST, request.FILES, request=request)
resource_form_set_choices(form)
if form.is_valid():
# save resource
resource = Resource(status=Resource.PENDING,
create_user=request.user, update_user=request.user)
resource.title = form.cleaned_data.get("title")
resource.description = form.cleaned_data.get("description")
if form.cleaned_data.get("study_time_number") and form.cleaned_data.get("study_time_unit"):
resource.study_time_number = form.cleaned_data.get(
"study_time_number")
resource.study_time_unit = form.cleaned_data.get(
"study_time_unit")
if request.FILES.has_key('image'):
resource.image = request.FILES["image"]
resource.attribution = form.cleaned_data.get("attribution")
resource.save()
# add organisation(s)/geography and other tags
resource_add_free_text_tags(
resource, form.cleaned_data.get('organisations'), request.user, 'organisation')
resource_add_free_text_tags(
resource, form.cleaned_data.get('geography'), request.user, 'geography')
resource_add_free_text_tags(
resource, form.cleaned_data.get('languages'), request.user, 'language')
resource_add_free_text_tags(
resource, form.cleaned_data.get('other_tags'), request.user, 'other')
# add tags
resource_add_tags(request, form, resource)
# see if email needs to be sent
resource_workflow.send(sender=resource, resource=resource, request=request,
status=Resource.PENDING, notes="")
resource_submitted.send(sender=resource, resource=resource, request=request)
# redirect to step 2
# Redirect after POST
return HttpResponseRedirect(reverse('orb_resource_create2', args=[resource.id]))
else:
if request.user.userprofile.organisation:
user_org = request.user.userprofile.organisation.name
initial = {'organisations': user_org, }
else:
initial = {}
form = ResourceStep1Form(initial=initial, request=request)
resource_form_set_choices(form)
return render(request, 'orb/resource/create_step1.html', {'form': form})
def resource_create_step2_view(request, id):
if request.user.is_anonymous():
# TODO use contrib.messages
return render(request, 'orb/login_required.html', {
'message': _(u'You need to be logged in to add a resource.'),
})
resource = get_object_or_404(Resource, pk=id)
# check if owner of this resource
if not resource_can_edit(resource, request.user):
raise Http404()
if request.method == 'POST':
form = ResourceStep2Form(request.POST, request.FILES, request=request)
if form.is_valid():
title = form.cleaned_data.get("title")
# add file and url
if request.FILES.has_key('file'):
rf = ResourceFile(
resource=resource, create_user=request.user, update_user=request.user)
rf.file = request.FILES["file"]
if title:
rf.title = title
rf.save()
url = form.cleaned_data.get("url")
if url:
ru = ResourceURL(
resource=resource, create_user=request.user, update_user=request.user)
ru.url = url
if title:
ru.title = title
ru.save()
initial = {}
form = ResourceStep2Form(initial=initial, request=request)
resource_files = ResourceFile.objects.filter(resource=resource)
resource_urls = ResourceURL.objects.filter(resource=resource)
return render(request, 'orb/resource/create_step2.html', {
'form': form,
'resource': resource,
'resource_files': resource_files,
'resource_urls': resource_urls,
})
def resource_create_file_delete_view(request, id, file_id):
# check ownership
resource = get_object_or_404(Resource, pk=id)
if not resource_can_edit(resource, request.user):
raise Http404()
try:
ResourceFile.objects.get(resource=resource, pk=file_id).delete()
except ResourceFile.DoesNotExist:
pass
return HttpResponseRedirect(reverse('orb_resource_create2', args=[id]))
def resource_create_url_delete_view(request, id, url_id):
# check ownership
resource = get_object_or_404(Resource, pk=id)
if not resource_can_edit(resource, request.user):
raise Http404()
try:
ResourceURL.objects.get(resource=resource, pk=url_id).delete()
except ResourceURL.DoesNotExist:
pass
return HttpResponseRedirect(reverse('orb_resource_create2', args=[id]))
def resource_edit_file_delete_view(request, id, file_id):
# check ownership
resource = get_object_or_404(Resource, pk=id)
if not resource_can_edit(resource, request.user):
raise Http404()
try:
ResourceFile.objects.get(resource=resource, pk=file_id).delete()
except ResourceFile.DoesNotExist:
pass
return HttpResponseRedirect(reverse('orb_resource_edit2', args=[id]))
def resource_edit_url_delete_view(request, id, url_id):
# check ownership
resource = get_object_or_404(Resource, pk=id)
if not resource_can_edit(resource, request.user):
raise Http404()
try:
ResourceURL.objects.get(resource=resource, pk=url_id).delete()
except ResourceURL.DoesNotExist:
pass
return HttpResponseRedirect(reverse('orb_resource_edit2', args=[id]))
def resource_create_thanks_view(request, id):
resource = get_object_or_404(Resource, pk=id)
if not resource_can_edit(resource, request.user):
raise Http404()
return render(request, 'orb/resource/create_thanks.html', {'resource': resource})
def resource_guidelines_view(request):
criteria = []
# get the general criteria
criteria_general = ResourceCriteria.objects.filter(role=None).order_by('order_by')
obj = {}
obj['category'] = _("General")
obj['criteria'] = criteria_general
criteria.append(obj)
for k in ReviewerRole.objects.all():
obj = {}
cat = ResourceCriteria.objects.filter(role=k).order_by('order_by')
obj['category'] = k
obj['criteria'] = cat
criteria.append(obj)
return render(request, 'orb/resource/guidelines.html', {'criteria_categories': criteria})
def resource_approve_view(request, id):
if not request.user.is_staff:
return HttpResponse(status=401, content="Not Authorized")
resource = Resource.objects.get(pk=id)
resource.status = Resource.APPROVED
resource.save()
resource_workflow.send(sender=resource, resource=resource,
request=request, status=Resource.APPROVED, notes="")
return render(request, 'orb/resource/status_updated.html', {'resource': resource})
def resource_reject_view(request, id):
if not request.user.is_staff:
return HttpResponse(status=401, content="Not Authorized")
resource = Resource.objects.get(pk=id)
if request.method == 'POST':
form = ResourceRejectForm(data=request.POST)
form.fields['criteria'].choices = [(t.id, t.description) for t in ResourceCriteria.objects.all(
).order_by('category_order_by', 'order_by')]
if form.is_valid():
resource.status = Resource.REJECTED
resource.save()
notes = form.cleaned_data.get("notes")
criteria = form.cleaned_data.get("criteria")
resource_workflow.send(sender=resource, resource=resource, request=request,
status=Resource.REJECTED, notes=notes, criteria=criteria)
return HttpResponseRedirect(reverse('orb_resource_reject_sent', args=[resource.id]))
else:
form = ResourceRejectForm()
form.fields['criteria'].choices = [(t.id, t.description) for t in ResourceCriteria.objects.all(
).order_by('category_order_by', 'order_by')]
return render(request, 'orb/resource/reject_form.html', {
'resource': resource,
'form': form,
})
def resource_reject_sent_view(request, id):
if not request.user.is_staff:
return HttpResponse(status=401, content="Not Authorized")
resource = Resource.objects.get(pk=id)
return render(request, 'orb/resource/status_updated.html', {'resource': resource, })
def resource_pending_mep_view(request, id):
if not request.user.is_staff:
return HttpResponse(status=401, content="Not Authorized")
resource = Resource.objects.get(pk=id)
resource.status = Resource.PENDING
resource.save()
resource_workflow.send(sender=resource, resource=resource, request=request,
status=Resource.PENDING, notes="")
return render(request, 'orb/resource/status_updated.html', {'resource': resource})
def resource_edit_view(request, resource_id):
resource = get_object_or_404(Resource, pk=resource_id)
if not resource_can_edit(resource, request.user):
raise Http404()
if request.method == 'POST':
form = ResourceStep1Form(data=request.POST, files=request.FILES)
resource_form_set_choices(form)
if form.is_valid():
resource.update_user = request.user
resource.title = form.cleaned_data.get("title")
resource.description = form.cleaned_data.get("description")
if form.cleaned_data.get("study_time_number") and form.cleaned_data.get("study_time_unit"):
resource.study_time_number = form.cleaned_data.get(
"study_time_number")
resource.study_time_unit = form.cleaned_data.get(
"study_time_unit")
resource.attribution = form.cleaned_data.get("attribution")
resource.save()
# update image
image = form.cleaned_data.get("image")
if image == False:
resource.image = None
resource.save()
if request.FILES.has_key('image'):
resource.image = request.FILES["image"]
resource.save()
# update tags - remove all current tags first
ResourceTag.objects.filter(resource=resource).delete()
resource_add_tags(request, form, resource)
resource_add_free_text_tags(
resource, form.cleaned_data.get('organisations'), request.user, 'organisation')
resource_add_free_text_tags(
resource, form.cleaned_data.get('geography'), request.user, 'geography')
resource_add_free_text_tags(
resource, form.cleaned_data.get('languages'), request.user, 'language')
resource_add_free_text_tags(
resource, form.cleaned_data.get('other_tags'), request.user, 'other')
# All successful - now redirect
# Redirect after POST
return HttpResponseRedirect(reverse('orb_resource_edit2', args=[resource.id]))
else:
initial = request.POST.copy()
initial['image'] = resource.image
files = ResourceFile.objects.filter(resource=resource)[:1]
if files:
initial['file'] = files[0].file
form = ResourceStep1Form(
initial=initial, data=request.POST, files=request.FILES)
resource_form_set_choices(form)
else:
data = {}
data['title'] = resource.title
organisations = Tag.objects.filter(
category__slug='organisation', resourcetag__resource=resource).values_list('name', flat=True)
data['organisations'] = ', '.join(organisations)
data['description'] = resource.description
data['image'] = resource.image
data['study_time_number'] = resource.study_time_number
data['study_time_unit'] = resource.study_time_unit
data['attribution'] = resource.attribution
files = ResourceFile.objects.filter(resource=resource)[:1]
if files:
data['file'] = files[0].file
urls = ResourceURL.objects.filter(resource=resource)[:1]
if urls:
data['url'] = urls[0].url
health_topic = Tag.objects.filter(
category__top_level=True, resourcetag__resource=resource).values_list('id', flat=True)
data['health_topic'] = health_topic
resource_type = Tag.objects.filter(
category__slug='type', resourcetag__resource=resource).values_list('id', flat=True)
data['resource_type'] = resource_type
audience = Tag.objects.filter(
category__slug='audience', resourcetag__resource=resource).values_list('id', flat=True)
data['audience'] = audience
geography = Tag.objects.filter(
category__slug='geography', resourcetag__resource=resource).values_list('name', flat=True)
data['geography'] = ', '.join(geography)
languages = Tag.objects.filter(
category__slug='language', resourcetag__resource=resource).values_list('name', flat=True)
data['languages'] = ', '.join(languages)
device = Tag.objects.filter(
category__slug='device', resourcetag__resource=resource).values_list('id', flat=True)
data['device'] = device
license = Tag.objects.filter(
category__slug='license', resourcetag__resource=resource).values_list('id', flat=True)
if license:
data['license'] = license[0]
other_tags = Tag.objects.filter(
resourcetag__resource=resource, category__slug='other').values_list('name', flat=True)
data['other_tags'] = ', '.join(other_tags)
form = ResourceStep1Form(initial=data)
resource_form_set_choices(form)
return render(request, 'orb/resource/edit.html', {'form': form})
def resource_edit_step2_view(request, resource_id):
if request.user.is_anonymous():
# TODO use contrib.messages
return render(request, 'orb/login_required.html', {
'message': _(u'You need to be logged in to add a resource.'),
})
resource = get_object_or_404(Resource, pk=resource_id)
# check if owner of this resource
if not resource_can_edit(resource, request.user):
raise Http404()
if request.method == 'POST':
form = ResourceStep2Form(request.POST, request.FILES, request=request)
if form.is_valid():
title = form.cleaned_data.get("title")
# add file and url
if request.FILES.has_key('file'):
rf = ResourceFile(
resource=resource, create_user=request.user, update_user=request.user)
rf.file = request.FILES["file"]
if title:
rf.title = title
rf.save()
url = form.cleaned_data.get("url")
if url:
ru = ResourceURL(
resource=resource, create_user=request.user, update_user=request.user)
ru.url = url
if title:
ru.title = title
ru.save()
initial = {}
form = ResourceStep2Form(initial=initial, request=request)
resource_files = ResourceFile.objects.filter(resource=resource)
resource_urls = ResourceURL.objects.filter(resource=resource)
return render(request, 'orb/resource/edit_step2.html', {
'form': form,
'resource': resource,
'resource_files': resource_files,
'resource_urls': resource_urls,
})
def resource_edit_thanks_view(request, id):
resource = get_object_or_404(Resource, pk=id)
if not resource_can_edit(resource, request.user):
raise Http404()
return render(request, 'orb/resource/edit_thanks.html', {'resource': resource})
def search_view(request):
search_query = request.GET.get('q', '')
if search_query:
search_results = SearchQuerySet().filter(content=search_query)
else:
search_results = []
data = {}
data['q'] = search_query
form = SearchForm(initial=data)
paginator = Paginator(search_results, settings.ORB_PAGINATOR_DEFAULT)
# Make sure page request is an int. If not, deliver first page.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
results = paginator.page(page)
except (EmptyPage, InvalidPage):
results = paginator.page(paginator.num_pages)
if search_query:
search.send(sender=search_results, query=search_query,
no_results=search_results.count(), request=request, page=page)
return render(request, 'orb/search.html', {
'form': form,
'query': search_query,
'page': results,
'total_results': paginator.count,
})
def search_advanced_view(request, tag_id=None):
if request.method == 'POST':
form = AdvancedSearchForm(request.POST)
if form.is_valid():
urlparams = request.POST.copy()
# delete these from params as not required
del urlparams['csrfmiddlewaretoken']
del urlparams['submit']
return HttpResponseRedirect(reverse('orb_search_advanced_results') + "?" + urlparams.urlencode())
else:
form = AdvancedSearchForm()
return render(request, 'orb/search_advanced.html', {'form': form})
def search_advanced_results_view(request):
form = AdvancedSearchForm(request.GET)
if form.is_valid():
q = form.cleaned_data.get('q')
results, filter_tags = form.search()
if q:
search_results = SearchQuerySet().filter(content=q).models(Resource).values_list('pk', flat=True)
results = results.filter(pk__in=search_results)
paginator = Paginator(results, settings.ORB_PAGINATOR_DEFAULT)
try:
page = int(request.GET.get('page', 1))
except ValueError:
page = 1
try:
resources = paginator.page(page)
except (EmptyPage, InvalidPage):
resources = paginator.page(paginator.num_pages)
search.send(sender=results, query=q, no_results=results.count(),
request=request, type=SearchTracker.SEARCH_ADV, page=page)
license_tags = form.cleaned_data['license']
else:
filter_tags = Tag.objects.filter(pk=None)
license_tags = []
resources = Resource.objects.filter(pk=None)
paginator = Paginator(resources, settings.ORB_PAGINATOR_DEFAULT)
return render(request, 'orb/search_advanced_results.html', {
'filter_tags': filter_tags,
'license_tags': license_tags,
'q': form.cleaned_data.get('q'),
'page': resources,
'total_results': paginator.count,
})
def collection_view(request, collection_slug):
collection = get_object_or_404(Collection,
slug=collection_slug, visibility=Collection.PUBLIC)
data = Resource.objects.filter(collectionresource__collection=collection,
status=Resource.APPROVED).order_by('collectionresource__order_by')
paginator = Paginator(data, settings.ORB_PAGINATOR_DEFAULT)
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
resources = paginator.page(page)
except (EmptyPage, InvalidPage):
resources = paginator.page(paginator.num_pages)
return render(request, 'orb/collection/view.html', {
'collection': collection,
'page': resources,
'total_results': paginator.count,
})
# Helper functions
def resource_form_set_choices(form):
form.fields['health_topic'].choices = [(t.id, t.name) for t in Tag.objects.filter(
category__top_level=True).order_by('order_by', 'name')]
form.fields['resource_type'].choices = [(t.id, t.name) for t in Tag.objects.filter(
category__slug='type').order_by('order_by', 'name')]
form.fields['audience'].choices = [(t.id, t.name) for t in Tag.objects.filter(
category__slug='audience').order_by('order_by', 'name')]
form.fields['device'].choices = [(t.id, t.name) for t in Tag.objects.filter(
category__slug='device').order_by('order_by', 'name')]
form.fields['license'].choices = [(t.id, t.name) for t in Tag.objects.filter(
category__slug='license').order_by('order_by', 'name')]
return form
def advanced_search_form_set_choices(form):
for name, slug in settings.ADVANCED_SEARCH_CATEGORIES:
form.fields[name].choices = [(t.id, t.name) for t in Tag.objects.filter(
category__slug=slug, resourcetag__resource__status=Resource.APPROVED).distinct().order_by('order_by', 'name')]
form.fields['license'].choices = [
('ND', _(u'Derivatives allowed')), ('NC', _(u'Commercial use allowed'))]
return form
def resource_can_edit(resource, user):
if user.is_staff or user == resource.create_user or user == resource.update_user:
return True
else:
return TagOwner.objects.filter(user__pk=user.id, tag__resourcetag__resource=resource).exists()
def resource_add_free_text_tags(resource, tag_text, user, category_slug):
"""
Adds tags to a resource based on free text and category slugs
Args:
resource: a Resource object
tag_text: string of text including multiple comma separated tags
user: the User object to use for the tags
category_slug: the slug of the related Category
Returns:
None
"""
free_text_tags = [x.strip() for x in tag_text.split(',') if x.strip()]
category = Category.objects.get(slug=category_slug)
for tag_name in free_text_tags:
try:
tag = Tag.tags.rewrite(False).get(name=tag_name)
except Tag.DoesNotExist:
try:
tag = Tag.tags.get(name=tag_name)
except Tag.DoesNotExist:
tag = Tag.tags.create(
name=tag_name,
category=category,
create_user=user,
update_user=user,
)
ResourceTag.objects.get_or_create(
tag=tag, resource=resource, defaults={'create_user': user})
def resource_add_tags(request, form, resource):
"""
Adds structured tags to the resource
Args:
request: the HttpRequest
form: Resource add/edit form that has the tag data
resource: the resource to add the tags
Returns:
None
"""
tag_categories = ["health_topic", "resource_type", "audience", "device"]
for tc in tag_categories:
tag_category = form.cleaned_data.get(tc)
for ht in tag_category:
tag = Tag.objects.get(pk=ht)
ResourceTag.objects.get_or_create(
tag=tag, resource=resource, defaults={'create_user': request.user})
# add license
license = form.cleaned_data.get("license")
tag = Tag.objects.get(pk=license)
ResourceTag(tag=tag, resource=resource, create_user=request.user).save()
| mPowering/django-orb | orb/views.py | Python | gpl-3.0 | 30,008 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 Tuukka Turto
#
# This file is part of satin-python.
#
# pyherc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyherc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with satin-python. If not, see <http://www.gnu.org/licenses/>.
"""
Module for testing labels
"""
from hamcrest.core.base_matcher import BaseMatcher
from hamcrest.core.helpers.wrap_matcher import wrap_matcher
from .enumerators import all_widgets
class LabelMatcher(BaseMatcher):
"""
Check if Widget has label with given text
"""
def __init__(self, text):
"""
Default constructor
"""
super(LabelMatcher, self).__init__()
if hasattr(text, 'matches'):
self.text = text
else:
self.text = wrap_matcher(text)
def _matches(self, item):
"""
Check if matcher matches item
:param item: object to match against
:returns: True if matching, otherwise False
:rtype: Boolean
"""
widgets = all_widgets(item)
for widget in widgets:
if hasattr(widget, 'text') and self.text.matches(widget.text()):
return True
return False
def describe_to(self, description):
"""
Describe this matcher
"""
description.append('Control with label {0}'.format(self.text))
def describe_mismatch(self, item, mismatch_description):
"""
Describe this mismatch
"""
mismatch_description.append(
'QLabel with text {0} was not found'.format(self.text))
def has_label(text):
"""
Check if Widget has label with given text
"""
return LabelMatcher(text)
| tuturto/satin-python | satin/label.py | Python | gpl-3.0 | 2,211 |
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
from base.models import ModelloSemplice
from base.tratti import ConMarcaTemporale
class Giudizio(ModelloSemplice, ConMarcaTemporale):
"""
Rappresenta un giudizio sociale ad un oggetto generico.
Utilizzare tramite il tratto ConGiudizio ed i suoi metodi.
"""
class Meta:
verbose_name_plural = "Giudizi"
permissions = (
("view_giudizio", "Can view giudizio"),
)
autore = models.ForeignKey("anagrafica.Persona", db_index=True, related_name="giudizi", on_delete=models.CASCADE)
positivo = models.BooleanField("Positivo", db_index=True, default=True)
oggetto_tipo = models.ForeignKey(ContentType, db_index=True, on_delete=models.SET_NULL, null=True)
oggetto_id = models.PositiveIntegerField(db_index=True)
oggetto = GenericForeignKey('oggetto_tipo', 'oggetto_id')
class Commento(ModelloSemplice, ConMarcaTemporale):
"""
Rappresenta un commento sociale ad un oggetto generico.
Utilizzare tramite il tratto ConCommento ed i suoi metodi.
"""
class Meta:
verbose_name_plural = "Commenti"
app_label = "social"
abstract = False
permissions = (
("view_commento", "Can view commento"),
)
autore = models.ForeignKey("anagrafica.Persona", db_index=True, related_name="commenti", on_delete=models.CASCADE)
commento = models.TextField("Testo del commento")
oggetto_tipo = models.ForeignKey(ContentType, db_index=True, on_delete=models.SET_NULL, null=True)
oggetto_id = models.PositiveIntegerField(db_index=True)
oggetto = GenericForeignKey('oggetto_tipo', 'oggetto_id')
LUNGHEZZA_MASSIMA = 1024
class ConGiudizio():
"""
Aggiunge le funzionalita' di giudizio, stile social,
positivi o negativi.
"""
class Meta:
abstract = True
giudizi = GenericRelation(
Giudizio,
related_query_name='giudizi',
content_type_field='oggetto_tipo',
object_id_field='oggetto_id'
)
def giudizio_positivo(self, autore):
"""
Registra un giudizio positivo
:param autore: Autore del giudizio
"""
self._giudizio(autore, True)
def giudizio_negativo(self, autore):
"""
Registra un giudizio negativo
:param autore: Autore del giudizio
"""
self._giudizio(autore, False)
def _giudizio(self, autore, positivo):
"""
Registra un giudizio
:param autore: Autore del giudizio
:param positivo: Vero se positivo, falso se negativo
"""
g = self.giudizio_cerca(autore)
if g: # Se gia' esiste un giudizio, modifico il tipo
g.positivo = positivo
else: # Altrimenti, ne registro uno nuovo
g = Giudizio(
oggetto=self,
positivo=positivo,
autore=autore
)
g.save()
@property
def giudizi_positivi(self):
"""
Restituisce il numero di giudizi positivi associati all'oggetto.
"""
return self._giudizi(self, True)
@property
def giudizi_negativi(self):
"""
Restituisce il numero di giudizi negativi associati all'oggetto.
"""
return self._giudizi(self, False)
def _giudizi(self, positivo):
"""
Restituisce il numero di giudizi positivi o negativi associati all'oggetto.
"""
return self.giudizi.filter(positivo=positivo).count()
def giudizio_cerca(self, autore):
"""
Cerca il giudizio di un autore sull'oggetto. Se non presente,
ritorna None.
"""
g = self.giudizi.filter(autore=autore)[:1]
if g:
return g
return None
class ConCommenti(models.Model):
"""
Aggiunge la possibilita' di aggiungere commenti ad
un oggetto.
"""
class Meta:
abstract = True
commenti = GenericRelation(
Commento,
related_query_name='%(class)s',
content_type_field='oggetto_tipo',
object_id_field='oggetto_id'
)
def commento_notifica_destinatari(self, mittente):
"""
SOVRASCRIVIMI!
Ritorna il queryset di persone che devono ricevere
una notifica ogni volta che un commento viene aggiunto
da un dato mittente.
"""
from anagrafica.models import Persona
return Persona.objects.none()
| CroceRossaItaliana/jorvik | social/models.py | Python | gpl-3.0 | 4,591 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Taifxx
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
########## LI2 (JSP):
### Import modules ...
from base import *
DETVIDEXT = False
### Items ...
currentItemPos = lambda : inte(xbmc.getInfoLabel('Container.CurrentItem'))
itemsCount = lambda : inte(xbmc.getInfoLabel('Container.NumItems'))
### Container info ...
getCpath = lambda : xbmc.getInfoLabel('Container.FolderPath')
getCname = lambda : xbmc.getInfoLabel('Container.FolderName')
getCplug = lambda : xbmc.getInfoLabel('Container.PluginName')
### Listitem info ...
getLi = lambda infolabel, idx=currentItemPos() : xbmc.getInfoLabel('ListitemNoWrap(%s).%s' % (str(idx-currentItemPos()), infolabel))
getIcn = lambda idx=currentItemPos() : getLi('Icon', idx)
getTbn = lambda idx=currentItemPos() : getLi('Thumb', idx)
getLink = lambda idx=currentItemPos() : getLi('FileNameAndPath', idx)
getPath = lambda idx=currentItemPos() : getLi('Path', idx)
getFname = lambda idx=currentItemPos() : getLi('FileName', idx)
getFolpath = lambda idx=currentItemPos() : getLi('FolderPath', idx)
getTitle = lambda idx=currentItemPos() : getLi('Label', idx)
getTitleF = lambda idx=currentItemPos() : getLi('Label', idx)
# def getTitleF (idx=currentItemPos()):
# tmpTitle = getTitle(idx)
# tmpFname = getFname(idx)
# return tmpFname if tmpFname else tmpTitle
def isFolder (idx=currentItemPos()):
if DETVIDEXT and isVidExt(getTitle(idx)) : return False
return True if getLi('Property(IsPlayable)',idx) in ('false', Empty) and not getFname(idx) else False
def isVidExt(name):
for itm in TAG_PAR_VIDEOSEXT:
if name.endswith(itm) : return True
return False
### JSP functions ...
_listdircmd = '{"jsonrpc": "2.0", "method": "Files.GetDirectory", "params": {"properties": ["file", "title"], "directory":"%s", "media":"files"}, "id": "1"}'
def getJsp(dirPath, srcName):
_itmList = eval(xbmc.executeJSONRPC(_listdircmd % (dirPath)))['result']['files']
_jsp = struct()
_jsp.link = dirPath
_jsp.name = srcName
_jsp.itmList = _itmList
_jsp.count = len(_itmList)
return _jsp
jsp_getLabel = lambda jsp, idx=0 : jsp.itmList[idx]['label']
jsp_getLink = lambda jsp, idx=0 : jsp.itmList[idx]['file']
def jsp_isFolder(jsp, idx):
if jsp.itmList[idx]['filetype'] != 'folder' : return False
return True
### Listitems Object ...
class vidItems:
def __init__(self, dirPath=Empty, srcName=Empty):
if not dirPath : self.norm_init()
else : self.jsp_init (dirPath, srcName)
def jsp_init(self, dirPath, srcName):
jsp = getJsp(dirPath, srcName)
#import resources.lib.gui as GUI
#GUI.dlgOk(str( jsp.count ))
## Current jsp data ...
self.vidFolderNameDef = Empty
self.vidCPath = jsp.link
self.vidCName = jsp.name
self.vidIsEmpty = True
self.vidFolCount = jsp.count
## Local items list...
self.vidListItems = []
self.vidListItemsRaw = []
## Create items list ...
for idx in range(0, self.vidFolCount):
self.vidListItemsRaw.append([jsp_getLabel(jsp, idx), jsp_getLink(jsp, idx)])
if jsp_isFolder(jsp, idx) : continue
self.vidListItems.append([jsp_getLabel(jsp, idx), jsp_getLink(jsp, idx)])
if self.vidListItems :
self.vidIsEmpty = False
## Set as default first nofolder item ...
self.vidFolderNameDef = self.vidListItems[0][0]
def norm_init(self):
## Current listitem data ...
self.vidFolderNameDef = Empty
self.vidCurr = getTitleF()
self.vidPath = getPath()
self.vidIsFolder = isFolder()
self.vidFPath = getFolpath()
self.vidLink = getLink()
self.vidCPath = getCpath()
self.vidCName = getCname()
self.vidCPlug = getCplug()
self.vidIsEmpty = True
self.vidFolCount = itemsCount()
## Local items list...
self.vidListItems = []
self.vidListItemsRaw = []
## If current item is not a folder, set it as default ...
if not self.vidIsFolder : self.vidFolderNameDef = self.vidCurr
## Create items list ...
for idx in range(1, self.vidFolCount+1):
self.vidListItemsRaw.append([getTitleF(idx), getLink(idx)])
if isFolder(idx) : continue
self.vidListItems.append([getTitleF(idx), getLink(idx)])
if self.vidListItems :
self.vidIsEmpty = False
## Set as default first nofolder item, if current item is a folder ...
if self.vidFolderNameDef == Empty : self.vidFolderNameDef = self.vidListItems[0][0]
def setmanually(self, manlist):
self.vidListItems = [itm for idx, itm in enumerate(self.vidListItemsRaw) if idx in manlist]
def reverse(self):
self.vidListItems.reverse()
self.vidListItemsRaw.reverse()
def getOnlyNexts(self):
nexts = False
retList = []
for itm in self.vidListItems:
if itm[0] == self.vidCurr : nexts = True; continue
if not nexts : continue
retList.append(itm[1])
return retList
| Taifxx/xxtrep | context.addtolib/resources/lib/ext/li2_.py | Python | gpl-3.0 | 6,356 |
#!/usr/bin/env python
import argparse
from . import core
from . import gui
def main():
parser = argparse.ArgumentParser(
description="Track opponent's mulligan in the game of Hearthstone.")
parser.add_argument('--gui', action='store_true')
args = parser.parse_args()
if args.gui:
gui.main()
else:
core.main()
if __name__ == '__main__':
main()
| xor7486/mully | mully/__main__.py | Python | gpl-3.0 | 396 |
from api.callers.api_caller import ApiCaller
from exceptions import ResponseTextContentTypeError
from colors import Color
import os
from cli.arguments_builders.default_cli_arguments import DefaultCliArguments
import datetime
from cli.cli_file_writer import CliFileWriter
from cli.formatter.cli_json_formatter import CliJsonFormatter
from constants import CALLED_SCRIPT
class CliCaller:
api_object = None
action_name = None
help_description = ''
given_args = {}
result_msg_for_files = 'Response contains files. They were saved in the output folder ({}).'
result_msg_for_json = '{}'
cli_output_folder = ''
args_to_prevent_from_being_send = ['chosen_action', 'verbose', 'quiet']
def __init__(self, api_object: ApiCaller, action_name: str):
self.api_object = api_object
self.action_name = action_name
self.help_description = self.help_description.format(self.api_object.endpoint_url)
def init_verbose_mode(self):
self.result_msg_for_json = 'JSON:\n\n{}'
def build_argument_builder(self, child_parser):
return DefaultCliArguments(child_parser)
def add_parser_args(self, child_parser):
parser_argument_builder = self.build_argument_builder(child_parser)
parser_argument_builder.add_verbose_arg()
parser_argument_builder.add_help_opt()
parser_argument_builder.add_quiet_opt()
return parser_argument_builder
def attach_args(self, args):
self.given_args = args.copy()
args_to_send = args.copy()
for arg_to_remove in self.args_to_prevent_from_being_send:
if arg_to_remove in args_to_send:
del args_to_send[arg_to_remove]
if 'output' in args:
self.cli_output_folder = args['output']
del args_to_send['output']
args_to_send = {k: v for k, v in args_to_send.items() if v not in [None, '']} # Removing some 'empty' elements from dictionary
if 'file' in args:
del args_to_send['file'] # attaching file is handled by separated method
if self.api_object.request_method_name == ApiCaller.CONST_REQUEST_METHOD_GET:
self.api_object.attach_params(args_to_send)
else: # POST
self.api_object.attach_data(args_to_send)
def attach_file(self, file):
if isinstance(file, str):
file = open(file, 'rb')
self.api_object.attach_files({'file': file}) # it's already stored as file handler
def get_colored_response_status_code(self):
response_code = self.api_object.get_response_status_code()
return Color.success(response_code) if self.api_object.if_request_success() is True else Color.error(response_code)
def get_colored_prepared_response_msg(self):
response_msg = self.api_object.get_prepared_response_msg()
return Color.success(response_msg) if self.api_object.if_request_success() is True else Color.error(response_msg)
def get_result_msg(self):
if self.api_object.api_response.headers['Content-Type'] == 'text/html':
raise ResponseTextContentTypeError('Can\'t print result, since it\'s \'text/html\' instead of expected content type with \'{}\' on board.'.format(self.api_object.api_expected_data_type))
if self.api_object.api_expected_data_type == ApiCaller.CONST_EXPECTED_DATA_TYPE_JSON:
return self.result_msg_for_json.format(CliJsonFormatter.format_to_pretty_string(self.api_object.get_response_json()))
elif self.api_object.api_expected_data_type == ApiCaller.CONST_EXPECTED_DATA_TYPE_FILE:
if self.api_object.if_request_success() is True:
return self.get_result_msg_for_files()
else:
error_msg = 'Error has occurred and your files were not saved.'
if self.given_args['verbose'] is False:
error_msg += ' To get more information, please run command in verbose mode. (add \'-v\')'
return error_msg
def get_processed_output_path(self):
output_path = self.cli_output_folder
if output_path.startswith('/') is True: # Given path is absolute
final_output_path = output_path
else:
path_parts = os.path.dirname(os.path.realpath(__file__)).split('/')[:-2]
called_script_dir = os.path.dirname(CALLED_SCRIPT)
# It's about a case when user is calling script from not root directory.€
if called_script_dir != 'vxapi.py':
new_path_parts = []
bad_parts = called_script_dir.split('/')
for part in reversed(path_parts):
if part in bad_parts:
bad_parts.remove(part)
continue
new_path_parts.append(part)
new_path_parts.reverse()
path_parts = new_path_parts
prepared_file_path = path_parts + [self.cli_output_folder]
final_output_path = '/'.join(prepared_file_path)
if not final_output_path.startswith('/'):
final_output_path = '/' + final_output_path
return final_output_path
def get_result_msg_for_files(self):
return self.result_msg_for_files.format(self.get_processed_output_path())
def do_post_processing(self):
if self.api_object.api_expected_data_type == ApiCaller.CONST_EXPECTED_DATA_TYPE_FILE and self.api_object.if_request_success() is True:
self.save_files()
def get_date_string(self):
now = datetime.datetime.now()
return '{}_{}_{}_{}_{}_{}'.format(now.year, now.month, now.day, now.hour, now.minute, now.second)
def convert_file_hashes_to_array(self, args, file_arg='hash_list', key_of_array_arg='hashes'):
with args[file_arg] as file:
hashes = file.read().splitlines()
if not hashes:
raise Exception('Given file does not contain any data.')
for key, value in enumerate(hashes):
args['{}[{}]'.format(key_of_array_arg, key)] = value
del args[file_arg]
return args
def save_files(self):
api_response = self.api_object.api_response
identifier = None
if 'id' in self.given_args:
identifier = self.given_args['id']
elif 'sha256' in self.given_args:
identifier = self.given_args['sha256']
filename = '{}-{}-{}'.format(self.action_name, identifier, api_response.headers['Vx-Filename']) if identifier is not None else '{}-{}'.format(self.action_name, api_response.headers['Vx-Filename'])
return CliFileWriter.write(self.get_processed_output_path(), filename, api_response.content)
| PayloadSecurity/VxAPI | cli/wrappers/cli_caller.py | Python | gpl-3.0 | 6,741 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
try:
import json
except ImportError:
from django.utils import simplejson as json
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from django.shortcuts import get_object_or_404
from django.conf import settings
from django.contrib.auth import get_user_model
from geonode.utils import resolve_object
from geonode.base.models import ResourceBase
from geonode.layers.models import Layer
from geonode.people.models import Profile
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
def _perms_info(obj):
info = obj.get_all_level_info()
return info
def _perms_info_json(obj):
info = _perms_info(obj)
info['users'] = dict([(u.username, perms)
for u, perms in info['users'].items()])
info['groups'] = dict([(g.name, perms)
for g, perms in info['groups'].items()])
return json.dumps(info)
def resource_permissions(request, resource_id):
try:
resource = resolve_object(
request, ResourceBase, {
'id': resource_id}, 'base.change_resourcebase_permissions')
except PermissionDenied:
# we are handling this in a non-standard way
return HttpResponse(
'You are not allowed to change permissions for this resource',
status=401,
content_type='text/plain')
if request.method == 'POST':
success = True
message = "Permissions successfully updated!"
try:
permission_spec = json.loads(request.body)
resource.set_permissions(permission_spec)
# Check Users Permissions Consistency
view_any = False
info = _perms_info(resource)
info_users = dict([(u.username, perms) for u, perms in info['users'].items()])
for user, perms in info_users.items():
if user == 'AnonymousUser':
view_any = ('view_resourcebase' in perms)
break
for user, perms in info_users.items():
if 'download_resourcebase' in perms and 'view_resourcebase' not in perms and not view_any:
success = False
message = 'User ' + str(user) + ' has Download permissions but ' \
'cannot access the resource. ' \
'Please update permissions consistently!'
return HttpResponse(
json.dumps({'success': success, 'message': message}),
status=200,
content_type='text/plain'
)
except BaseException:
success = False
message = "Error updating permissions :("
return HttpResponse(
json.dumps({'success': success, 'message': message}),
status=500,
content_type='text/plain'
)
elif request.method == 'GET':
permission_spec = _perms_info_json(resource)
return HttpResponse(
json.dumps({'success': True, 'permissions': permission_spec}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
'No methods other than get and post are allowed',
status=401,
content_type='text/plain')
@require_POST
def invalidate_permissions_cache(request):
from .utils import sync_resources_with_guardian
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
can_change_permissions = request.user.has_perm(
'change_resourcebase_permissions',
resource)
if can_change_permissions:
# Push Security Rules
sync_resources_with_guardian(resource)
return HttpResponse(
json.dumps({'success': 'ok', 'message': 'Security Rules Cache Refreshed!'}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'success': 'false', 'message': 'You cannot modify this resource!'}),
status=200,
content_type='text/plain'
)
@require_POST
def attributes_sats_refresh(request):
from geonode.geoserver.helpers import set_attributes_from_geoserver
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
can_change_data = request.user.has_perm(
'change_resourcebase',
resource)
layer = Layer.objects.get(id=resource.id)
if layer and can_change_data:
# recalculate the layer statistics
set_attributes_from_geoserver(layer, overwrite=True)
return HttpResponse(
json.dumps({'success': 'ok', 'message': 'Attributes/Stats Refreshed Successfully!'}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'success': 'false', 'message': 'You cannot modify this resource!'}),
status=200,
content_type='text/plain'
)
@require_POST
def invalidate_tiledlayer_cache(request):
from .utils import set_geowebcache_invalidate_cache
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
can_change_data = request.user.has_perm(
'change_resourcebase',
resource)
layer = Layer.objects.get(id=resource.id)
if layer and can_change_data:
set_geowebcache_invalidate_cache(layer.alternate)
return HttpResponse(
json.dumps({'success': 'ok', 'message': 'GeoWebCache Tiled Layer Emptied!'}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'success': 'false', 'message': 'You cannot modify this resource!'}),
status=200,
content_type='text/plain'
)
@require_POST
def set_bulk_permissions(request):
permission_spec = json.loads(request.POST.get('permissions', None))
resource_ids = request.POST.getlist('resources', [])
if permission_spec is not None:
not_permitted = []
for resource_id in resource_ids:
try:
resource = resolve_object(
request, ResourceBase, {
'id': resource_id
},
'base.change_resourcebase_permissions')
resource.set_permissions(permission_spec)
except PermissionDenied:
not_permitted.append(ResourceBase.objects.get(id=resource_id).title)
return HttpResponse(
json.dumps({'success': 'ok', 'not_changed': not_permitted}),
status=200,
content_type='text/plain'
)
else:
return HttpResponse(
json.dumps({'error': 'Wrong permissions specification'}),
status=400,
content_type='text/plain')
@require_POST
def request_permissions(request):
""" Request permission to download a resource.
"""
uuid = request.POST['uuid']
resource = get_object_or_404(ResourceBase, uuid=uuid)
try:
notification.send(
[resource.owner],
'request_download_resourcebase',
{'from_user': request.user, 'resource': resource}
)
return HttpResponse(
json.dumps({'success': 'ok', }),
status=200,
content_type='text/plain')
except BaseException:
return HttpResponse(
json.dumps({'error': 'error delivering notification'}),
status=400,
content_type='text/plain')
def send_email_consumer(layer_uuid, user_id):
resource = get_object_or_404(ResourceBase, uuid=layer_uuid)
user = Profile.objects.get(id=user_id)
notification.send(
[resource.owner],
'request_download_resourcebase',
{'from_user': user, 'resource': resource}
)
def send_email_owner_on_view(owner, viewer, layer_id, geonode_email="[email protected]"):
# get owner and viewer emails
owner_email = get_user_model().objects.get(username=owner).email
layer = Layer.objects.get(id=layer_id)
# check if those values are empty
if owner_email and geonode_email:
from django.core.mail import EmailMessage
# TODO: Copy edit message.
subject_email = "Your Layer has been seen."
msg = ("Your layer called {0} with uuid={1}"
" was seen by {2}").format(layer.name, layer.uuid, viewer)
try:
email = EmailMessage(
subject=subject_email,
body=msg,
from_email=geonode_email,
to=[owner_email, ],
reply_to=[geonode_email, ])
email.content_subtype = "html"
email.send()
except BaseException:
pass
| mcldev/geonode | geonode/security/views.py | Python | gpl-3.0 | 9,784 |
from elasticsearch import Elasticsearch
from django.conf import settings
def get_es_client(silent=False):
"""
Returns the elasticsearch client which uses the configuration file
"""
es_client = Elasticsearch([settings.ELASTIC_SEARCH_HOST],
scheme='http',
port=9200,
http_compress=True)
# test if it works
if not silent and not es_client.cat.health(request_timeout=30):
raise ValueError('Credentials do not work for Elastic search')
return es_client
def get_index_config(lang):
"""
Returns the elasticsearch index configuration.
Configures the analysers based on the language passed in.
"""
return {
"settings": {
"index": {
"number_of_shards": 1,
"number_of_replicas": 0
}
},
'mappings': {
'_doc': {
'properties': {
'title': {
'type': 'text',
'analyzer': settings.ELASTIC_SEARCH_ANALYSERS[lang]
},
'content': {
'type': 'text',
'analyzer': settings.ELASTIC_SEARCH_ANALYSERS[lang]
},
'url': {'type': 'text'},
'title_plain': {'type': 'text'},
'content_plain': {'type': 'text'},
'author': {
'type': 'keyword'
},
'source': {
'type': 'keyword'
},
'argument_score': {
'type': 'float'
}
}
}
}
}
| fako/datascope | src/online_discourse/elastic.py | Python | gpl-3.0 | 1,813 |
#-------------------------------------------------------------------------------
# Name: MissingPersomForm.py
#
# Purpose: Create Missing Person Flyer from data stored in the Subject
# Information data layer within MapSAR
#
# Author: Don Ferguson
#
# Created: 12/12/2011
# Copyright: (c) Don Ferguson 2011
# Licence:
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The GNU General Public License can be found at
# <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import arcpy
from datetime import datetime
#workspc = arcpy.GetParameterAsText(0)
output = arcpy.GetParameterAsText(0)
#arcpy.env.workspace = workspc
arcpy.env.overwriteOutput = "True"
fc3="Incident_Information"
fc2="Lead Agency"
rows = arcpy.SearchCursor(fc3)
row = rows.next()
arcpy.AddMessage("Get Incident Info")
while row:
# you need to insert correct field names in your getvalue function
LeadAgency = row.getValue("Lead_Agency")
where2 = '"Lead_Agency" = ' + "'" + LeadAgency + "'"
arcpy.AddMessage(where2)
rows2 = arcpy.SearchCursor(fc2, where2)
row2 = rows2.next()
Phone = 'none'
email = 'none'
while row2:
# you need to insert correct field names in your getvalue function
Phone = row2.getValue("Lead_Phone")
if Phone == 'none':
Phone = " "
arcpy.AddWarning("No Phone number provided for Lead Agency")
email = row2.getValue("E_Mail")
if email == 'none':
email = " "
arcpy.AddWarning("No e-mail address provided for Lead Agency")
row2 = rows2.next()
del rows2
del row2
row = rows.next()
del rows
del row
Callback = "If you have information please call: " + str(LeadAgency) + " at phone: " + str(Phone) + " or e-mail:" + str(email)
fc1="Subject_Information"
rows = arcpy.SearchCursor(fc1)
row = rows.next()
while row:
# you need to insert correct field names in your getvalue function
try:
Subject_Name = row.getValue("Name")
if len(Subject_Name) == 0:
arcpy.AddWarning('Need to provide a Subject Name')
except:
Subject_Name = " "
arcpy.AddWarning('Need to provide a Subject Name')
try:
fDate = row.getValue("Date_Seen")
Date_Seen = fDate.strftime("%m/%d/%Y")
except:
Date_Seen = " "
try:
fTime = row.getValue("Time_Seen")
except:
fTime = " "
Where_Last = row.getValue("WhereLastSeen")
Age = row.getValue("Age")
Gender = row.getValue("Gender")
Race = row.getValue("Race")
try:
Height1 = (row.getValue("Height"))/12.0
feet = int(Height1)
inches = int((Height1 - feet)*12.0)
fInches = "%1.0f" %inches
Height = str(feet) + " ft " + fInches +" in"
except:
Height = "NA"
Weight = row.getValue("Weight")
Build = row.getValue("Build")
Complex = row.getValue("Complexion")
Hair = row.getValue("Hair")
Eyes = row.getValue("Eyes")
Other = row.getValue("Other")
Shirt = row.getValue("Shirt")
Pants = row.getValue("Pants")
Jacket = row.getValue("Jacket")
Hat = row.getValue("Hat")
Footwear = row.getValue("Footwear")
Info = row.getValue("Info")
try:
QRCode = row.getValue("QRCode")
except:
QRCode = " "
filename = output + "/" + str(Subject_Name) + ".fdf"
txt= open (filename, "w")
txt.write("%FDF-1.2\n")
txt.write("%????\n")
txt.write("1 0 obj<</FDF<</F(MissingPersonForm.pdf)/Fields 2 0 R>>>>\n")
txt.write("endobj\n")
txt.write("2 0 obj[\n")
txt.write ("\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Name[0])/V(" + str(Subject_Name) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPFAge[0])/V(" + str(Age) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPFSex[0])/V(" + str(Gender) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Location[0])/V(" + str(Where_Last) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_TimeMissing[0])/V(" + fTime + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_DateMissing[0])/V(" + str(Date_Seen) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Race[0])/V(" + str(Race) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Height[0])/V(" + Height + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Weight[0])/V(" + str(Weight) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Build[0])/V(" + str(Build) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_Complex[0])/V(" + str(Complex) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_HairColor[0])/V(" + str(Hair) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_EyeColor[0])/V(" + str(Eyes) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_OtherPhy[0])/V(" + str(Other) + ")>>\n")
#txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_OtherPhy[1])/V(" + str(Incident_Name) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_ShirtClothing[0])/V(" + str(Shirt) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_PantsClothing[0])/V(" + str(Pants) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_JacketClothing[0])/V(" + str(Jacket) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_HatClothing[0])/V(" + str(Hat) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_FootClothing[0])/V(" + str(Footwear) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_OtherInfo[0])/V(" + str(Info) + ")>>\n")
txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].MPF_CallNumber[0])/V(" + str(Callback) + ")>>\n")
#txt.write("<</T(topmostSubform[0].Page1[0].Layer[0].Layer[0].ImageField1[0])/V(" + str(Incident_Name) + ")>>\n")
txt.write("]\n")
txt.write("endobj\n")
txt.write("trailer\n")
txt.write("<</Root 1 0 R>>\n")
txt.write("%%EO\n")
txt.close ()
row = rows.next()
del rows
del row
#arcpy.DeleteFeatures_management(fc3) | dferguso/IGT4SAR | MissingPersonForm.py | Python | gpl-3.0 | 6,940 |
from kivy.lib import osc
from time import sleep
import pocketclient
from kivy.utils import platform as kivy_platform
SERVICE_PORT = 4000
def platform():
p = kivy_platform()
if p.lower() in ('linux', 'waindows', 'osx'):
return 'desktop'
else:
return p
class Service(object):
def __init__(self):
osc.init()
self.last_update = 0
self.oscid = osc.listen(ipAddr='localhost', port=SERVICE_PORT)
osc.bind(self.oscid, self.pocket_connect, '/pocket/connect')
osc.bind(self.oscid, self.pocket_list, '/pocket/list')
osc.bind(self.oscid, self.pocket_mark_read, '/pocket/mark_read')
def send(self, **kwargs):
osc.sendMsg()
def run(self):
while self._run:
osc.readQueue(self.oscid)
sleep(.1)
def pocket_connect(self, **kwargs):
if 'token' in kwargs:
self.token = kwargs['token']
else:
pocketclient.authorize(platform(), self.save_pocket_token)
def save_pocket_token(self, api_key, token, username):
self.token = {
'key': api_key,
'token': token,
'username': username
}
def pocket_list(self, *args):
if not self.token:
if self.pocket_last_update:
pocketclient.get_items(self.
else:
pass
pass
def pocket_mark_read(self, *args):
pass
if __name__ == '__main__':
Service().run()
| tshirtman/kpritz | service/main.py | Python | gpl-3.0 | 1,497 |
import cv2
import numpy as np
import datetime as dt
# constant
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
OPENCV_METHODS = {
"Correlation": 0,
"Chi-Squared": 1,
"Intersection": 2,
"Hellinger": 3}
hist_limit = 0.6
ttl = 1 * 60
q_limit = 3
# init variables
total_count = 0
prev_count = 0
total_delta = 0
stm = {}
q = []
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
video_capture = cv2.VideoCapture(0)
while True:
for t in list(stm): # short term memory
if (dt.datetime.now() - t).seconds > ttl:
stm.pop(t, None)
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
count = len(faces)
if len(q) >= q_limit: del q[0]
q.append(count)
isSame = True
for c in q: # Protect from fluctuation
if c != count: isSame = False
if isSame is False: continue
max_hist = 0
total_delta = 0
for (x, y, w, h) in faces:
# Draw a rectangle around the faces
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
if count == prev_count: continue
# set up the ROI
face = frame[y: y + h, x: x + w]
hsv_roi = cv2.cvtColor(face, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(face, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
face_hist = cv2.calcHist([face], [0], mask, [180], [0, 180])
cv2.normalize(face_hist, face_hist, 0, 255, cv2.NORM_MINMAX)
isFound = False
for t in stm:
hist_compare = cv2.compareHist(stm[t], face_hist, OPENCV_METHODS["Correlation"])
if hist_compare > max_hist: max_hist = hist_compare
if hist_compare >= hist_limit: isFound = True
if (len(stm) == 0) or (isFound is False and max_hist > 0):
total_delta += 1
stm[dt.datetime.now()] = face_hist
if prev_count != count:
total_count += total_delta
print("", count, " > ", total_count)
prev_count = count
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
| kvasnyj/face_counter | counter.py | Python | gpl-3.0 | 2,444 |
#!python
# log/urls.py
from django.conf.urls import url
from . import views
# We are adding a URL called /home
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^clients/$', views.clients, name='clients'),
url(r'^clients/(?P<id>\d+)/$', views.client_detail, name='client_detail'),
url(r'^clients/new/$', views.client_new, name='client_new'),
url(r'^clients/(?P<id>\d+)/edit/$', views.client_edit, name='client_edit'),
url(r'^clients/sevices/$', views.clients_services_count, name='clients_services_count'),
url(r'^clients/bills/(?P<id>\d+)/$', views.all_clients_bills, name='all_clients_bills'),
url(r'^clients/bills/$', views.fresh_clients, name='fresh_clients'),
url(r'^clients/del/(?P<id>\d+)/$', views.delete_client, name='delete_client'),
url(r'^contracts/$', views.contracts, name='contracts'),
url(r'^contracts/(?P<id>\d+)/$', views.contract_detail, name='contract_detail'),
url(r'^contracts/new/$', views.contract_new, name='contract_new'),
url(r'^contracts/(?P<id>\d+)/edit/$', views.contract_edit, name='contract_edit'),
url(r'^contracts/list/(?P<id>\d+)/$', views.all_clients_contracts, name='all_clients_contracts'),
url(r'^contracts/list/$', views.contracts_services, name='contracts_services'),
url(r'^contracts/del/(?P<id>\d+)/$', views.delete_contract, name='delete_contract'),
url(r'^manager/$', views.managers, name='managers'),
url(r'^manager/(?P<id>\d+)/$', views.manager_detail, name='manager_detail'),
url(r'^manager/new/$', views.manager_new, name='manager_new'),
url(r'^manager/(?P<id>\d+)/edit/$', views.manager_edit, name='manager_edit'),
url(r'^manager/clients/$', views.managers_clients_count, name='managers_clients_count'),
url(r'^managers/del/(?P<id>\d+)/$', views.delete_manager, name='delete_manager'),
url(r'^briefs/$', views.brief, name='briefs'),
url(r'^briefs/(?P<id>\d+)/$', views.brief_detail, name='brief_detail'),
url(r'^briefs/new/$', views.brief_new, name='brief_new'),
url(r'^briefs/(?P<id>\d+)/edit/$', views.brief_edit, name='brief_edit'),
url(r'^briefs/del/(?P<id>\d+)/$', views.delete_brief, name='delete_brief'),
url(r'^briefs/list/(?P<id>\d+)/$', views.all_clients_briefs, name='all_clients_briefs'),
url(r'^services/$', views.services, name='services'),
url(r'^services/(?P<id>\d+)/$', views.service_detail, name='service_detail'),
url(r'^services/new/$', views.services_new, name='services_new'),
url(r'^services/(?P<id>\d+)/edit/$', views.service_edit, name='service_edit'),
url(r'^services/table/(?P<id>\d+)/$', views.service_all_clients, name='service_all_clients'),
url(r'^services/del/(?P<id>\d+)/$', views.delete_service, name='delete_service'),
url(r'^contractors/$', views.contractors, name='contractors'),
url(r'^contractors/(?P<id>\d+)/$', views.contractor_detail, name='contractor_detail'),
url(r'^contractors/new/$', views.contractors_new, name='contractors_new'),
url(r'^contractors/(?P<id>\d+)/edit/$', views.contractor_edit, name='contractor_edit'),
url(r'^contractors/newest/$', views.newest_contractors, name='newest_contractors'),
url(r'^contractors/del/(?P<id>\d+)/$', views.delete_contractor, name='delete_contractor'),
url(r'^acts/$', views.acts, name='acts'),
url(r'^acts/(?P<id>\d+)/$', views.act_detail, name='act_detail'),
url(r'^acts/new/$', views.act_new, name='act_new'),
url(r'^acts/(?P<id>\d+)/edit/$', views.act_edit, name='act_edit'),
url(r'^acts/del/(?P<id>\d+)/$', views.delete_act, name='delete_act'),
url(r'^bills/$', views.bills, name='bills'),
url(r'^bills/(?P<id>\d+)/$', views.bills_detail, name='bills_detail'),
url(r'^bills/new/$', views.bills_new, name='bills_new'),
url(r'^bills/(?P<id>\d+)/edit/$', views.bills_edit, name='bills_edit'),
url(r'^bill/del/(?P<id>\d+)/$', views.delete_bill, name='delete_bill'),
]
| alexeyshulzhenko/OBDZ_Project | OnlineAgecy/urls.py | Python | gpl-3.0 | 3,927 |
from __future__ import division, print_function, absolute_import
import unittest
from .. import common
import tempfile
import os
import platform
import numpy as num
from pyrocko import util, model
from pyrocko.pile import make_pile
from pyrocko import config, trace
if common.have_gui(): # noqa
from pyrocko.gui.qt_compat import qc, qw, use_pyqt5
if use_pyqt5:
from PyQt5.QtTest import QTest
Qt = qc.Qt
else:
from PyQt4.QtTest import QTest
Qt = qc.Qt
from pyrocko.gui.snuffler_app import SnufflerWindow
from pyrocko.gui import pile_viewer as pyrocko_pile_viewer
from pyrocko.gui import util as gui_util
from pyrocko.gui import snuffling
class DummySnuffling(snuffling.Snuffling):
def setup(self):
self.set_name('DummySnuffling')
def call(self):
figframe = self.figure_frame()
ax = figframe.gca()
ax.plot([0, 1], [0, 1])
figframe.draw()
self.enable_pile_changed_notifications()
self.pixmap_frame()
try:
self.web_frame()
except ImportError as e:
raise unittest.SkipTest(str(e))
self.get_pile()
no_gui = False
else:
no_gui = True
@common.require_gui
class GUITest(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''
Create a reusable snuffler instance for all tests cases.
'''
super(GUITest, cls).setUpClass()
if no_gui: # nosetests runs this even when class is has @skip
return
from pyrocko.gui import snuffler as sm
cls.snuffler = sm.get_snuffler_instance()
fpath = common.test_data_file('test2.mseed')
p = make_pile(fpath, show_progress=False)
cls.win = SnufflerWindow(pile=p, instant_close=True)
cls.pile_viewer = cls.win.pile_viewer
cls.viewer = cls.win.pile_viewer.viewer
pv = cls.pile_viewer
cls.main_control_defaults = dict(
highpass_control=pv.highpass_control.get_value(),
lowpass_control=pv.lowpass_control.get_value(),
gain_control=pv.gain_control.get_value(),
rot_control=pv.rot_control.get_value())
@classmethod
def tearDownClass(cls):
'''
Quit snuffler.
'''
if no_gui: # nosetests runs this even when class is has @skip
return
QTest.keyPress(cls.pile_viewer, 'q')
def setUp(self):
'''
reset GUI
'''
for k, v in self.main_control_defaults.items():
getattr(self.pile_viewer, k).set_value(v)
self.initial_trange = self.viewer.get_time_range()
self.viewer.set_tracks_range(
[0, self.viewer.ntracks_shown_max])
self.tempfiles = []
def tearDown(self):
self.clear_all_markers()
for tempfn in self.tempfiles:
os.remove(tempfn)
self.viewer.set_time_range(*self.initial_trange)
def get_tempfile(self):
fh, tempfn = tempfile.mkstemp()
os.close(fh)
self.tempfiles.append(tempfn)
return tempfn
def write_to_input_line(self, text):
'''emulate writing to inputline and press return'''
pv = self.pile_viewer
il = pv.inputline
QTest.keyPress(pv, ':')
QTest.keyClicks(il, text)
QTest.keyPress(il, Qt.Key_Return)
def clear_all_markers(self):
pv = self.pile_viewer
QTest.keyPress(pv, 'A', Qt.ShiftModifier, 10)
QTest.keyPress(pv, Qt.Key_Backspace)
self.assertEqual(len(pv.viewer.get_markers()), 0)
def trigger_menu_item(self, qmenu, action_text, dialog=False):
''' trigger a QMenu QAction with action_text. '''
for iaction, action in enumerate(qmenu.actions()):
if action.text() == action_text:
if dialog:
def closeDialog():
dlg = self.snuffler.activeModalWidget()
QTest.keyClick(dlg, Qt.Key_Escape)
qc.QTimer.singleShot(150, closeDialog)
action.trigger()
break
def get_slider_position(self, slider):
style = slider.style()
opt = qw.QStyleOptionSlider()
return style.subControlRect(
qw.QStyle.CC_Slider, opt, qw.QStyle.SC_SliderHandle)
def drag_slider(self, slider):
''' Click *slider*, drag from one side to the other, release mouse
button repeat to restore inital state'''
position = self.get_slider_position(slider)
QTest.mouseMove(slider, pos=position.topLeft())
QTest.mousePress(slider, Qt.LeftButton)
QTest.mouseMove(slider, pos=position.bottomRight())
QTest.mouseRelease(slider, Qt.LeftButton)
QTest.mousePress(slider, Qt.LeftButton)
QTest.mouseMove(slider, pos=position.topLeft())
QTest.mouseRelease(slider, Qt.LeftButton)
def add_one_pick(self):
'''Add a single pick to pile_viewer'''
pv = self.pile_viewer
QTest.mouseDClick(pv.viewer, Qt.LeftButton)
position_tl = pv.pos()
geom = pv.frameGeometry()
QTest.mouseMove(pv.viewer, pos=position_tl)
QTest.mouseMove(pv.viewer, pos=(qc.QPoint(
position_tl.x()+geom.x() // 2, position_tl.y()+geom.y() // 2)))
# This should be done also by mouseDClick().
QTest.mouseRelease(pv.viewer, Qt.LeftButton)
QTest.mouseClick(pv.viewer, Qt.LeftButton)
def test_main_control_sliders(self):
self.drag_slider(self.pile_viewer.highpass_control.slider)
self.drag_slider(self.pile_viewer.lowpass_control.slider)
self.drag_slider(self.pile_viewer.gain_control.slider)
self.drag_slider(self.pile_viewer.rot_control.slider)
def test_inputline(self):
initrange = self.viewer.shown_tracks_range
self.write_to_input_line('hide W.X.Y.Z')
self.write_to_input_line('unhide W.X.Y.Z')
self.pile_viewer.update()
self.write_to_input_line('hide *')
self.pile_viewer.update()
assert(self.viewer.shown_tracks_range == (0, 1))
self.write_to_input_line('unhide')
assert(self.viewer.shown_tracks_range == initrange)
self.write_to_input_line('markers')
self.write_to_input_line('markers 4')
self.write_to_input_line('markers all')
# should error
self.write_to_input_line('scaling 1000.')
self.write_to_input_line('scaling -1000. 1000.')
gotos = ['2015-01-01 00:00:00',
'2015-01-01 00:00',
'2015-01-01 00',
'2015-01-01',
'2015-01',
'2015']
for gt in gotos:
self.write_to_input_line('goto %s' % gt)
# test some false input
self.write_to_input_line('asdf')
QTest.keyPress(self.pile_viewer.inputline, Qt.Key_Escape)
def test_drawing_optimization(self):
n = 505
lats = num.random.uniform(-90., 90., n)
lons = num.random.uniform(-180., 180., n)
events = []
for i, (lat, lon) in enumerate(zip(lats, lons)):
events.append(
model.Event(time=i, lat=lat, lon=lon, name='XXXX%s' % i))
self.viewer.add_event(events[-1])
assert len(self.viewer.markers) == 1
self.viewer.add_events(events)
assert len(self.viewer.markers) == n + 1
self.viewer.set_time_range(-500., 5000)
self.viewer.set_time_range(0., None)
self.viewer.set_time_range(None, 0.)
def test_follow(self):
self.viewer.follow(10.)
self.viewer.unfollow()
def test_save_image(self):
tempfn_svg = self.get_tempfile() + '.svg'
self.viewer.savesvg(fn=tempfn_svg)
tempfn_png = self.get_tempfile() + '.png'
self.viewer.savesvg(fn=tempfn_png)
def test_read_events(self):
event = model.Event()
tempfn = self.get_tempfile()
model.event.dump_events([event], tempfn)
self.viewer.read_events(tempfn)
def test_add_remove_stations(self):
n = 10
lats = num.random.uniform(-90., 90., n)
lons = num.random.uniform(-180., 180., n)
stations = [
model.station.Station(network=str(i), station=str(i),
lat=lat, lon=lon) for i, (lat, lon) in
enumerate(zip(lats, lons))
]
tempfn = self.get_tempfile()
model.station.dump_stations(stations, tempfn)
self.viewer.open_stations(fns=[tempfn])
last = stations[-1]
self.assertTrue(self.viewer.has_station(last))
self.viewer.get_station((last.network, last.station))
def test_markers(self):
self.add_one_pick()
pv = self.pile_viewer
self.assertEqual(pv.viewer.get_active_event(), None)
conf = config.config('snuffler')
# test kinds and phases
kinds = range(5)
fkey_map = pyrocko_pile_viewer.fkey_map
for k in kinds:
for fkey, fkey_int in fkey_map.items():
fkey_int += 1
QTest.keyPress(pv, fkey)
QTest.keyPress(pv, str(k))
if fkey_int != 10:
want = conf.phase_key_mapping.get(
"F%s" % fkey_int, 'Undefined')
else:
want = None
m = pv.viewer.get_markers()[0]
self.assertEqual(m.kind, k)
if want:
self.assertEqual(m.get_phasename(), want)
def test_load_waveforms(self):
self.viewer.load('data', regex=r'\w+.mseed')
self.assertFalse(self.viewer.get_pile().is_empty())
def test_add_traces(self):
trs = []
for i in range(3):
trs.append(
trace.Trace(network=str(i), tmin=num.random.uniform(1),
ydata=num.random.random(100),
deltat=num.random.random())
)
self.viewer.add_traces(trs)
def test_event_marker(self):
pv = self.pile_viewer
self.add_one_pick()
# select all markers
QTest.keyPress(pv, 'a', Qt.ShiftModifier, 100)
# convert to EventMarker
QTest.keyPress(pv, 'e')
QTest.keyPress(pv, 'd')
for m in pv.viewer.get_markers():
self.assertTrue(isinstance(m, gui_util.EventMarker))
def test_load_save_markers(self):
nmarkers = 505
times = list(map(util.to_time_float, num.arange(nmarkers)))
markers = [gui_util.Marker(tmin=t, tmax=t,
nslc_ids=[('*', '*', '*', '*'), ])
for t in times]
tempfn = self.get_tempfile()
tempfn_selected = self.get_tempfile()
self.viewer.add_markers(markers)
self.viewer.write_selected_markers(
fn=tempfn_selected)
self.viewer.write_markers(fn=tempfn)
self.viewer.read_markers(fn=tempfn_selected)
self.viewer.read_markers(fn=tempfn)
for k in 'pnPN':
QTest.keyPress(self.pile_viewer, k)
self.viewer.go_to_time(-20., 20)
self.pile_viewer.update()
self.viewer.update()
assert(len(self.viewer.markers) != 0)
assert(len(self.viewer.markers) == nmarkers * 2)
len_before = len(self.viewer.markers)
self.viewer.remove_marker(
self.viewer.markers[0])
assert(len(self.viewer.markers) == len_before-1)
self.viewer.remove_markers(self.viewer.markers)
assert(len(self.viewer.markers) == 0)
def test_actions(self):
# Click through many menu option combinations that do not require
# further interaction. Activate options in pairs of two.
pv = self.pile_viewer
tinit = pv.viewer.tmin
tinitlen = pv.viewer.tmax - pv.viewer.tmin
non_dialog_actions = [
'Indivdual Scale',
'Common Scale',
'Common Scale per Station',
'Common Scale per Component',
'Scaling based on Minimum and Maximum',
'Scaling based on Mean +- 2 x Std. Deviation',
'Scaling based on Mean +- 4 x Std. Deviation',
'Sort by Names',
'Sort by Distance',
'Sort by Azimuth',
'Sort by Distance in 12 Azimuthal Blocks',
'Sort by Backazimuth',
'3D distances',
'Subsort by Network, Station, Location, Channel',
'Subsort by Network, Station, Channel, Location',
'Subsort by Station, Network, Channel, Location',
'Subsort by Location, Network, Station, Channel',
'Subsort by Channel, Network, Station, Location',
'Subsort by Network, Station, Channel (Grouped by Location)',
'Subsort by Station, Network, Channel (Grouped by Location)',
]
dialog_actions = [
'Open waveform files...',
'Open waveform directory...',
'Open station files...',
'Save markers...',
'Save selected markers...',
'Open marker file...',
'Open event file...',
'Save as SVG|PNG',
]
options = [
'Antialiasing',
'Liberal Fetch Optimization',
'Clip Traces',
'Show Boxes',
'Color Traces',
'Show Scale Ranges',
'Show Scale Axes',
'Show Zero Lines',
'Fix Scale Ranges',
'Allow Downsampling',
'Allow Degapping',
'FFT Filtering',
'Bandpass is Lowpass + Highpass',
'Watch Files',
]
# create an event marker and activate it
self.add_one_pick()
keys = list('mAhefrRh+-fgc?')
keys.extend([Qt.Key_PageUp, Qt.Key_PageDown])
def fire_key(x):
QTest.keyPress(self.pile_viewer, key)
for key in keys:
QTest.qWait(100)
fire_key(key)
event = model.Event()
markers = pv.viewer.get_markers()
self.assertEqual(len(markers), 1)
markers[0]._event = event
pv.viewer.set_active_event(event)
pv.viewer.set_event_marker_as_origin()
right_click_menu = self.viewer.menu
for action_text in dialog_actions:
self.trigger_menu_item(right_click_menu, action_text, dialog=True)
for action_text in non_dialog_actions:
for oa in options:
for ob in options:
self.trigger_menu_item(right_click_menu, action_text)
self.trigger_menu_item(right_click_menu, oa)
self.trigger_menu_item(right_click_menu, ob)
options.remove(oa)
self.viewer.go_to_event_by_name(event.name)
self.viewer.go_to_time(tinit, tinitlen)
@unittest.skipIf(
platform.system() != 'Windows' and os.getuid() == 0,
'does not like to run as root')
def test_frames(self):
frame_snuffling = DummySnuffling()
self.viewer.add_snuffling(frame_snuffling)
frame_snuffling.call()
# close three opened frames
QTest.keyPress(self.pile_viewer, 'd')
QTest.keyPress(self.pile_viewer, 'd')
QTest.keyPress(self.pile_viewer, 'd')
if __name__ == '__main__':
util.setup_logging('test_gui', 'warning')
unittest.main()
| pyrocko/pyrocko | test/gui/test_gui.py | Python | gpl-3.0 | 15,550 |
#!/usr/bin/python
# bigcinemas
class InvalidAge(Exception):
def __init__(self,age):
self.age = age
def validate_age(age):
if age < 18:
raise InvalidAge(age)
else:
return "Welcome to the movies!!"
age = int(raw_input("please enter your age:"))
#print validate_age(age)
try:
validate_age(age)
# except Exception as e:
except InvalidAge as e:
print "Buddy!! you are very young at {}!! Grow up a bit.".format(e.age)
else:
print validate_age(age)
| tuxfux-hlp-notes/python-batches | archieves/batch-64/14-oop/sixth.py | Python | gpl-3.0 | 462 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000, #reduced significantly -daniel
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
def before_run(self, run_context):
self._step += 1
self._start_time = time.time()
return tf.train.SessionRunArgs(loss) # Asks for loss value.
def after_run(self, run_context, run_values):
duration = time.time() - self._start_time
loss_value = run_values.results
if self._step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
with tf.train.MonitoredTrainingSession(
checkpoint_dir=FLAGS.train_dir,
hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
tf.train.NanTensorHook(loss),
_LoggerHook()],
config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run() | dpaschall/test_TensorFlow | bin/cifar10test/cifar10_train.py | Python | gpl-3.0 | 4,167 |
from django.apps import AppConfig
class CirculoConfig(AppConfig):
name = 'circulo'
| jstitch/gift_circle | GiftCircle/circulo/apps.py | Python | gpl-3.0 | 89 |
# Generated by Django 2.2 on 2019-06-20 09:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scoping', '0294_titlevecmodel'),
]
operations = [
migrations.AddField(
model_name='doc',
name='tslug',
field=models.TextField(null=True),
),
]
| mcallaghan/tmv | BasicBrowser/scoping/migrations/0295_doc_tslug.py | Python | gpl-3.0 | 369 |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from tf2_msgs/FrameGraphRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class FrameGraphRequest(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "tf2_msgs/FrameGraphRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FrameGraphRequest, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from tf2_msgs/FrameGraphResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class FrameGraphResponse(genpy.Message):
_md5sum = "437ea58e9463815a0d511c7326b686b0"
_type = "tf2_msgs/FrameGraphResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string frame_yaml
"""
__slots__ = ['frame_yaml']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
frame_yaml
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FrameGraphResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.frame_yaml is None:
self.frame_yaml = ''
else:
self.frame_yaml = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.frame_yaml
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.frame_yaml = str[start:end].decode('utf-8')
else:
self.frame_yaml = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.frame_yaml
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.frame_yaml = str[start:end].decode('utf-8')
else:
self.frame_yaml = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
class FrameGraph(object):
_type = 'tf2_msgs/FrameGraph'
_md5sum = '437ea58e9463815a0d511c7326b686b0'
_request_class = FrameGraphRequest
_response_class = FrameGraphResponse
| UnbDroid/robomagellan | Codigos/Raspberry/desenvolvimentoRos/devel/lib/python2.7/dist-packages/tf2_msgs/srv/_FrameGraph.py | Python | gpl-3.0 | 7,187 |
import re
CJDNS_IP_REGEX = re.compile(r'^fc[0-9a-f]{2}(:[0-9a-f]{4}){7}$', re.IGNORECASE)
class Node(object):
def __init__(self, ip, version=None, label=None):
if not valid_cjdns_ip(ip):
raise ValueError('Invalid IP address')
if not valid_version(version):
raise ValueError('Invalid version')
self.ip = ip
self.version = int(version)
self.label = ip[-4:] or label
def __lt__(self, b):
return self.ip < b.ip
def __repr__(self):
return 'Node(ip="%s", version=%s, label="%s")' % (
self.ip,
self.version,
self.label)
class Edge(object):
def __init__(self, a, b):
self.a, self.b = sorted([a, b])
def __eq__(self, that):
return self.a.ip == that.a.ip and self.b.ip == that.b.ip
def __repr__(self):
return 'Edge(a.ip="{}", b.ip="{}")'.format(self.a.ip, self.b.ip)
def valid_cjdns_ip(ip):
return CJDNS_IP_REGEX.match(ip)
def valid_version(version):
try:
return int(version) < 30
except ValueError:
return False
| vdloo/raptiformica-map | raptiformica_map/graph.py | Python | gpl-3.0 | 1,111 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2016 Alberto Gacías <[email protected]>
# Copyright (c) 2015-2016 Jose Antonio Chavarría <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gettext
_ = gettext.gettext
from gi.repository import Gtk
class Console(Gtk.Window):
def __init__(self):
super(Console, self).__init__()
sw = Gtk.ScrolledWindow()
sw.set_policy(
Gtk.PolicyType.AUTOMATIC,
Gtk.PolicyType.AUTOMATIC
)
self.textview = Gtk.TextView()
self.textbuffer = self.textview.get_buffer()
self.textview.set_editable(False)
self.textview.set_wrap_mode(Gtk.WrapMode.WORD)
sw.add(self.textview)
self.set_title(_('Migasfree Console'))
self.set_icon_name('migasfree')
self.resize(640, 420)
self.set_decorated(True)
self.set_border_width(10)
self.connect('delete-event', self.on_click_hide)
box = Gtk.Box(spacing=6, orientation='vertical')
box.pack_start(sw, expand=True, fill=True, padding=0)
self.progress = Gtk.ProgressBar()
self.progress.set_pulse_step(0.02)
progress_box = Gtk.Box(False, 0, orientation='vertical')
progress_box.pack_start(self.progress, False, True, 0)
box.pack_start(progress_box, expand=False, fill=True, padding=0)
self.add(box)
def on_timeout(self, user_data):
self.progress.pulse()
return True
def on_click_hide(self, widget, data=None):
self.hide()
return True
| migasfree/migasfree-launcher | migasfree_indicator/console.py | Python | gpl-3.0 | 2,190 |
import numpy as np
import matplotlib.pyplot as plt
import spm1d
#(0) Load dataset:
dataset = spm1d.data.mv1d.cca.Dorn2012()
y,x = dataset.get_data() #A:slow, B:fast
#(1) Conduct non-parametric test:
np.random.seed(0)
alpha = 0.05
two_tailed = False
snpm = spm1d.stats.nonparam.cca(y, x)
snpmi = snpm.inference(alpha, iterations=100)
print( snpmi )
#(2) Compare with parametric result:
spm = spm1d.stats.cca(y, x)
spmi = spm.inference(alpha)
print( spmi )
#(3) Plot
plt.close('all')
plt.figure(figsize=(10,4))
ax0 = plt.subplot(121)
ax1 = plt.subplot(122)
labels = 'Parametric', 'Non-parametric'
for ax,zi,label in zip([ax0,ax1], [spmi,snpmi], labels):
zi.plot(ax=ax)
zi.plot_threshold_label(ax=ax, fontsize=8)
zi.plot_p_values(ax=ax, size=10)
ax.set_title( label )
plt.tight_layout()
plt.show()
| 0todd0000/spm1d | spm1d/examples/nonparam/1d/ex_cca.py | Python | gpl-3.0 | 860 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Question.order'
db.add_column(u'survey_question', 'order',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Question.order'
db.delete_column(u'survey_question', 'order')
models = {
u'survey.option': {
'Meta': {'object_name': 'Option'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '254'})
},
u'survey.page': {
'Meta': {'ordering': "['order']", 'unique_together': "(('survey', 'order'),)", 'object_name': 'Page'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"})
},
u'survey.question': {
'Meta': {'object_name': 'Question'},
'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}),
'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'options_json': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
u'survey.respondant': {
'Meta': {'object_name': 'Respondant'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}),
'responses': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'responses'", 'symmetrical': 'False', 'to': u"orm['survey.Response']"}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "'bc967489-023c-46ce-b396-d209c8323fac'", 'max_length': '36', 'primary_key': 'True'})
},
u'survey.response': {
'Meta': {'object_name': 'Response'},
'answer': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']"}),
'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 30, 0, 0)'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'})
}
}
complete_apps = ['survey'] | point97/hapifis | server/apps/survey/migrations/0020_auto__add_field_question_order.py | Python | gpl-3.0 | 4,857 |
#!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Clock offset corrector
# Author: Piotr Krysik
# Generated: Wed Nov 19 08:38:40 2014
##################################################
from gnuradio import blocks
from gnuradio import filter
from gnuradio import gr
from gnuradio.filter import firdes
import grgsm
import math
class clock_offset_corrector(gr.hier_block2):
def __init__(self, fc=936.6e6, ppm=0, samp_rate_in=1625000.0/6.0*4.0):
gr.hier_block2.__init__(
self, "Clock offset corrector",
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
)
##################################################
# Parameters
##################################################
self.fc = fc
self.ppm = ppm
self.samp_rate_in = samp_rate_in
##################################################
# Variables
##################################################
self.samp_rate_out = samp_rate_out = samp_rate_in
##################################################
# Blocks
##################################################
self.ppm_in = None;self.message_port_register_hier_out("ppm_in")
self.gsm_controlled_rotator_cc_0 = grgsm.controlled_rotator_cc(0,samp_rate_out)
self.gsm_controlled_const_source_f_0 = grgsm.controlled_const_source_f(ppm)
self.fractional_resampler_xx_0 = filter.fractional_resampler_cc(0, samp_rate_in/samp_rate_out)
self.blocks_multiply_const_vxx_0_0 = blocks.multiply_const_vff((1.0e-6*samp_rate_in/samp_rate_out, ))
self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vff((fc/samp_rate_out*(2*math.pi)/1e6, ))
self.blocks_add_const_vxx_0 = blocks.add_const_vff((samp_rate_in/samp_rate_out, ))
##################################################
# Connections
##################################################
self.connect((self, 0), (self.fractional_resampler_xx_0, 0))
self.connect((self.fractional_resampler_xx_0, 0), (self.gsm_controlled_rotator_cc_0, 0))
self.connect((self.blocks_add_const_vxx_0, 0), (self.fractional_resampler_xx_0, 1))
self.connect((self.blocks_multiply_const_vxx_0_0, 0), (self.blocks_add_const_vxx_0, 0))
self.connect((self.blocks_multiply_const_vxx_0, 0), (self.gsm_controlled_rotator_cc_0, 1))
self.connect((self.gsm_controlled_rotator_cc_0, 0), (self, 0))
self.connect((self.gsm_controlled_const_source_f_0, 0), (self.blocks_multiply_const_vxx_0_0, 0))
self.connect((self.gsm_controlled_const_source_f_0, 0), (self.blocks_multiply_const_vxx_0, 0))
##################################################
# Asynch Message Connections
##################################################
self.msg_connect(self, "ppm_in", self.gsm_controlled_const_source_f_0, "constant_msg")
def get_fc(self):
return self.fc
def set_fc(self, fc):
self.fc = fc
self.blocks_multiply_const_vxx_0.set_k((self.fc/self.samp_rate_out*(2*math.pi)/1e6, ))
def get_ppm(self):
return self.ppm
def set_ppm(self, ppm):
self.ppm = ppm
self.gsm_controlled_const_source_f_0.set_constant(self.ppm)
def get_samp_rate_in(self):
return self.samp_rate_in
def set_samp_rate_in(self, samp_rate_in):
self.samp_rate_in = samp_rate_in
self.set_samp_rate_out(self.samp_rate_in)
self.fractional_resampler_xx_0.set_resamp_ratio(self.samp_rate_in/self.samp_rate_out)
self.blocks_multiply_const_vxx_0_0.set_k((1.0e-6*self.samp_rate_in/self.samp_rate_out, ))
self.blocks_add_const_vxx_0.set_k((self.samp_rate_in/self.samp_rate_out, ))
def get_samp_rate_out(self):
return self.samp_rate_out
def set_samp_rate_out(self, samp_rate_out):
self.samp_rate_out = samp_rate_out
self.blocks_multiply_const_vxx_0.set_k((self.fc/self.samp_rate_out*(2*math.pi)/1e6, ))
self.fractional_resampler_xx_0.set_resamp_ratio(self.samp_rate_in/self.samp_rate_out)
self.blocks_multiply_const_vxx_0_0.set_k((1.0e-6*self.samp_rate_in/self.samp_rate_out, ))
self.gsm_controlled_rotator_cc_0.set_samp_rate(self.samp_rate_out)
self.blocks_add_const_vxx_0.set_k((self.samp_rate_in/self.samp_rate_out, ))
| martinjlowm/gr-gsm | python/misc_utils/clock_offset_corrector.py | Python | gpl-3.0 | 4,498 |
#!/usr/bin/python
import numpy as np
mdir = "mesh3d/"
fname = "out_p6-p4-p8"
####################
print "input mesh data file"
f1 = open(mdir+fname+".mesh", 'r')
for line in f1:
if line.startswith("Vertices"): break
pcount = int(f1.next())
xyz = np.empty((pcount, 3), dtype=np.float)
for t in range(pcount):
xyz[t] = map(float,f1.next().split()[0:3])
for line in f1:
if line.startswith("Triangles"): break
trisc = int(f1.next())
tris = np.empty((trisc,4), dtype=int)
for t in range(trisc):
tris[t] = map(int,f1.next().split())
for line in f1:
if line.startswith("Tetrahedra"): break
tetsc = int(f1.next())
tets = np.empty((tetsc,5), dtype=int)
for t in range(tetsc):
tets[t] = map(int,f1.next().split())
f1.close()
####################
print "identify geometry"
ftype = [('v0', np.int),('v1', np.int),('v2', np.int),('label', 'S2')]
faces = np.empty(trisc/2, dtype=ftype)
for i in range(len(faces)):
faces[i] = (tris[2*i][0],tris[2*i][1],tris[2*i][2],str(tris[2*i][3])+str(tris[2*i+1][3]))
face_list,face_count = np.unique(faces['label'], return_counts=True)
vtype = [('v0', np.int),('v1', np.int),('v2', np.int),('v3', np.int),('label', 'S1')]
vols = np.empty(tetsc, dtype=vtype)
for i in range(tetsc):
vols[i] = (tets[i][0],tets[i][1],tets[i][2],tets[i][3],str(tets[i][4]))
vol_list,vol_count = np.unique(vols['label'], return_counts=True)
####################
print "output vtk data files for faces"
for i, f in enumerate(face_list):
f2 = open(mdir+fname+"_"+face_list[i]+".vtk", 'w')
f2.write("# vtk DataFile Version 2.0\n")
f2.write("mesh data\n")
f2.write("ASCII\n")
f2.write("DATASET UNSTRUCTURED_GRID\n")
f2.write("POINTS "+str(pcount)+" float\n") # overkill, all points!
for v in xyz:
f2.write(str(v[0]-35.33)+' '+str(35.33-v[1])+' '+str(12.36-v[2])+'\n')
f2.write("CELLS "+str(face_count[i])+" "+str(face_count[i]*4)+"\n")
for v in faces:
if v[3] == f:
f2.write("3 "+str(v[0]-1)+' '+str(v[1]-1)+' '+str(v[2]-1)+'\n')
f2.write("CELL_TYPES "+str(face_count[i])+"\n")
for t in range(face_count[i]): f2.write("5 ")
f2.write("\n")
f2.close()
####################
print "output vtk data files for volumes"
for i, f in enumerate(vol_list):
f2 = open(mdir+fname+"_"+vol_list[i]+".vtk", 'w')
f2.write("# vtk DataFile Version 2.0\n")
f2.write("mesh data\n")
f2.write("ASCII\n")
f2.write("DATASET UNSTRUCTURED_GRID\n")
f2.write("POINTS "+str(pcount)+" float\n") # overkill, all points!
for v in xyz:
f2.write(str(v[0]-35.33)+' '+str(35.33-v[1])+' '+str(12.36-v[2])+'\n')
f2.write("CELLS "+str(vol_count[i])+" "+str(vol_count[i]*5)+"\n")
for v in vols:
if v[4] == f:
f2.write("4 "+str(v[0]-1)+' '+str(v[1]-1)+' '+str(v[2]-1)+' '+str(v[3]-1)+'\n')
f2.write("CELL_TYPES "+str(vol_count[i])+"\n")
for t in range(vol_count[i]): f2.write("10 ")
f2.write("\n")
f2.close()
####################
| jrugis/cell_mesh | mesh2vtk.py | Python | gpl-3.0 | 2,909 |
#
# GeoCoon - GIS data analysis library based on Pandas and Shapely
#
# Copyright (C) 2014 by Artur Wroblewski <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from shapely.geometry import Point
from geocoon.sql import read_sql
from geocoon.core import GeoDataFrame, PointSeries
import unittest
from unittest import mock
class SQLTestCase(unittest.TestCase):
"""
Test SQL GeoCoon SQL routines.
"""
@mock.patch('pandas.io.sql.read_sql')
def test_read_sql(self, f_sql):
"""
Test SQL data frame read
"""
points = Point(1, 1), Point(2, 2), Point(3, 3)
data = {
'a': PointSeries([p.wkb for p in points]),
'b': list(range(3)),
}
data = GeoDataFrame(data)
data = data[['a', 'b']]
f_sql.return_value = data
result = read_sql('query', 'con', geom_col='a')
self.assertEqual(PointSeries, type(result.a))
self.assertEqual(Point, type(result.a[0]))
self.assertEqual(3, len(result.index))
self.assertTrue(all([1, 2, 3] == result.a.x))
self.assertTrue(all([1, 2, 3] == result.a.y))
# vim: sw=4:et:ai
| wrobell/geocoon | geocoon/tests/test_sql.py | Python | gpl-3.0 | 1,771 |
import csv
import decimal
import os
import datetime
from stocker.common.events import EventStreamNew, EventStockOpen, EventStockClose
from stocker.common.orders import OrderBuy, OrderSell
from stocker.common.utils import Stream
class CompanyProcessor(object):
def __init__(self, dirname, company_id):
self.dirname = os.path.join(dirname, company_id)
self.company_id = company_id
def get_dates(self):
files = [os.path.splitext(fi)[0] for fi in os.walk(self.dirname).next()[2]]
return files
def get_row(self, date):
filename = os.path.join(self.dirname, date) + ".csv"
try:
with open(filename, 'r') as f:
for row in reversed(list(csv.reader(f, delimiter=';'))):
try:
desc = row[5]
if desc.startswith('TRANSAKCJA'):
yield (row, self.company_id)
except IndexError:
pass
except IOError as e:
return
class Processor(object):
def build_stream(self, dirname_in, filename_out):
self.stream = Stream()
self.stream.begin(filename_out)
self.__process_companies(dirname_in)
self.stream.end()
def __process_companies(self, dirname):
companies = []
for company in os.walk(dirname).next()[1]:
companies.append(CompanyProcessor(dirname, company))
dates_set = set()
for company in companies:
dates_set.update(company.get_dates())
dates_ordered = sorted(dates_set, key=lambda date: datetime.datetime.strptime(date, "%Y-%m-%d"))
for date in dates_ordered:
self.__process_date(date, companies)
def __process_date(self, date, companies):
rows = []
correct_generators = []
correct_day = False
generators = [company.get_row(date) for company in companies]
for generator in generators:
try:
row, company_id = generator.next()
row = (company_id, row, generator)
rows.append(row)
correct_generators.append(generator)
except StopIteration as e:
pass
if correct_generators:
# correct day (have transactions)
correct_day = True
if correct_day:
self.stream.add_event(EventStockOpen(
datetime.datetime.combine(datetime.datetime.strptime(date, "%Y-%m-%d"), datetime.time(9, 0))))
# main loop, multiplexing rows
while correct_generators:
row_data = min(rows, key=lambda row: datetime.datetime.strptime(row[1][0], "%H:%M:%S"))
rows.remove(row_data)
company_id, row, generator = row_data
self.__process_row(row, date, company_id)
try:
row, company_id = generator.next()
row = (company_id, row, generator)
rows.append(row)
except StopIteration as e:
correct_generators.remove(generator)
if correct_day:
self.stream.add_event(EventStockClose(
datetime.datetime.combine(datetime.datetime.strptime(date, "%Y-%m-%d"), datetime.time(18, 0))))
def __process_row(self, row, date, company_id):
amount = int(row[3])
limit_price = decimal.Decimal(row[1].replace(',', '.'))
timestamp = datetime.datetime.strptime("%s %s" % (date, row[0]), "%Y-%m-%d %H:%M:%S")
expiration_date = timestamp + datetime.timedelta(days=1)
self.stream.add_event(
EventStreamNew(timestamp, OrderBuy(company_id, amount, limit_price, expiration_date)))
self.stream.add_event(
EventStreamNew(timestamp, OrderSell(company_id, amount, limit_price, expiration_date)))
| donpiekarz/Stocker | stocker/SEP/processor.py | Python | gpl-3.0 | 3,868 |
#! /usr/bin/python
import sys
sys.path.append('../')
from toolbox.hreaders import token_readers as reader
from toolbox.hreducers import list_reducer as reducer
SOLO_FACTURA = False
def reduction(x,y):
v1 = x.split(',')
v2 = y.split(',')
r = x if int(v1[1])>=int(v2[1]) else y
return r
_reader = reader.Token_reader("\t",1)
_reducer = reducer.List_reducer(reduction) #x: previous reduction result, y: next element
if SOLO_FACTURA:
for line in sys.stdin:
key, value = _reader.read_all(line)
K,V = _reducer.reduce(key,value)
if K:
print '{}\t{}'.format(V.split(',')[0],V.split(',')[1])
V = _reducer.out.split(',')
print '{}\t{}'.format(V[0],V[1])
else:
for line in sys.stdin:
key, value = _reader.read_all(line)
K,V = _reducer.reduce(key,value)
if K:
print '{}\t{}'.format(K,V)
print '{}\t{}'.format(key,V) | xavi783/u-tad | Modulo4/ejercicio3/reducer.py | Python | gpl-3.0 | 915 |
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle, PageBreak
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import cm
import operator
import os
import ConfigParser
import string
config = ConfigParser.ConfigParser()
config.read(os.environ["HOME"] + "/.abook/addressbook")
config.remove_section('format')
PAGE_HEIGHT=defaultPageSize[1]; PAGE_WIDTH=defaultPageSize[0]
styles = getSampleStyleSheet()
buchstabe = "A"
Title = "Hello world"
pageinfo = "platypus example"
def Pages(canvas, doc):
canvas.saveState()
canvas.restoreState()
def go(buchstabe):
doc = SimpleDocTemplate("phello.pdf")
Story = []
style = styles["Normal"]
addresses=[]
for s in config.sections():
nb=""
ub=""
mb=""
if config.has_option(s,'name'):
nb = "<b>" + config.get(s,'name') + "</b><br/>"
worte=config.get(s,'name').split()
print len(worte)
if len(worte)<2:
nachname=worte[0]
else:
nachname=worte[1]
anfangsbuchstabe=nachname[0:1]
if anfangsbuchstabe!=buchstabe:
buchstabe=anfangsbuchstabe
print buchstabe
p = Table(addresses)
p.setStyle(TableStyle([('VALIGN',(0,0),(-1,-1),"TOP"),
('ALIGN',(0,-1),(0,-1),'RIGHT')]))
Story.append(p)
Story.append(PageBreak())
addresses=[]
if config.has_option(s,'address'):
nb = nb + config.get(s,'address') + "<br/>"
if config.has_option(s,'zip'):
nb = nb + config.get(s,'zip') + " "
if config.has_option(s,'city'):
nb = nb + config.get(s,'city') + "<br/>"
if config.has_option(s,'state'):
nb = nb + config.get(s,'state') + " - "
if config.has_option(s,'country'):
nb = nb + config.get(s,'country') + "<br/>"
nb = nb +"<br/>"
if config.has_option(s,'phone'):
ub= "Fon: " + config.get(s,'phone') + "<br/>"
if config.has_option(s,'mobile'):
ub= ub + "Mobi: " + config.get(s,'mobile') + "<br/>"
if config.has_option(s,'email'):
ub= ub + config.get(s,'email').replace(',','<br/>') + "<br/>"
ub=ub+"<br/>"
if config.has_option(s,'custom3'):
mb= config.get(s,'custom3') + "<br/>"
mb=mb+"<br/>"
nameblock = Paragraph(nb,style)
numberblock = Paragraph(ub,style)
middleblock = Paragraph(mb,style)
addresses.append([nameblock,middleblock,numberblock])
p = Table(addresses)
p.setStyle(TableStyle([('VALIGN',(0,0),(-1,-1),"TOP"),
('ALIGN',(0,-1),(0,-1),'RIGHT')]))
Story.append(p)
doc.build(Story, onFirstPage=Pages, onLaterPages=Pages)
go(buchstabe)
| andydrop/x17papertrail | abook2pdf.py | Python | gpl-3.0 | 3,106 |
# coding=utf8
r"""
csection.py -- Create a tree of contents, organized by sections and inside
sections the exercises unique_name.
AUTHOR:
- Pedro Cruz (2012-01): initial version
- Pedro Cruz (2016-03): improvment for smc
An exercise could contain um its %summary tag line a description of section
in form::
%sumary section descriptive text; subsection descriptive text; etc
The class transform contents of some MegUA database into a tree of sections specifying exercises as leaves.
Then, this tree can be flushed out to some file or output system.
STRUTURE SAMPLE::
contents -> { 'Section1': Section('Section1',0), 'Section2': Section('Section2',0) }
For each Section object see below in this file.
A brief description is:
* a SectionClassifier is the "book" made with keys (chapter names) that are keys of a dictionary.
* SectionClassifier is a dictionary: keys are the chapter names and the values are Section objects.
* a Section object is defined by
* a name (the key of the SectionClassifiers appears again in sec_name)
* level (0 if it is top level sections: chapters, and so on)
* a list of exercises beloging to the section and
* a dictionary of subsections (again Section objects)
* Section = (sec_name, level, [list of exercises names], dict( subsections ) )
EXAMPLES:
Test with:
::
sage -t csection.py
Create or edit a database:
::
sage: from megua.megbook import MegBook
sage: meg = MegBook(r'_input/csection.sqlite')
Save a new or changed exercise
::
sage: txt=r'''
....: %Summary Primitives; Imediate primitives; Trigonometric
....:
....: Here, is a summary.
....:
....: %Problem Some Name
....: What is the primitive of $a x + b@()$ ?
....:
....: %Answer
....: The answer is $prim+C$, for $C in \mathbb{R}$.
....:
....: class E28E28_pimtrig_001(ExerciseBase):
....: pass
....: '''
sage: meg.save(txt)
-------------------------------
Instance of: E28E28_pimtrig_001
-------------------------------
==> Summary:
Here, is a summary.
==> Problem instance
What is the primitive of $a x + b$ ?
==> Answer instance
The answer is $prim+C$, for $C in \mathbb{R}$.
sage: txt=r'''
....: %Summary Primitives; Imediate primitives; Trigonometric
....:
....: Here, is a summary.
....:
....: %Problem Some Name2
....: What is the primitive of $a x + b@()$ ?
....:
....: %Answer
....: The answer is $prim+C$, for $C in \mathbb{R}$.
....:
....: class E28E28_pimtrig_002(ExerciseBase):
....: pass
....: '''
sage: meg.save(txt)
-------------------------------
Instance of: E28E28_pimtrig_002
-------------------------------
==> Summary:
Here, is a summary.
==> Problem instance
What is the primitive of $a x + b$ ?
==> Answer instance
The answer is $prim+C$, for $C in \mathbb{R}$.
sage: txt=r'''
....: %Summary Primitives; Imediate primitives; Polynomial
....:
....: Here, is a summary.
....:
....: %Problem Some Problem 1
....: What is the primitive of $a x + b@()$ ?
....:
....: %Answer
....: The answer is $prim+C$, for $C in \mathbb{R}$.
....:
....: class E28E28_pdirect_001(ExerciseBase):
....: pass
....: '''
sage: meg.save(txt)
-------------------------------
Instance of: E28E28_pdirect_001
-------------------------------
==> Summary:
Here, is a summary.
==> Problem instance
What is the primitive of $a x + b$ ?
==> Answer instance
The answer is $prim+C$, for $C in \mathbb{R}$.
sage: txt=r'''
....: %Summary
....:
....: Here, is a summary.
....:
....: %Problem
....: What is the primitive of $a x + b@()$ ?
....:
....: %Answer
....: The answer is $prim+C$, for $C in \mathbb{R}$.
....:
....: class E28E28_pdirect_003(ExerciseBase):
....: pass
....: '''
sage: meg.save(txt)
Each exercise can belong to a section/subsection/subsubsection.
Write sections using ';' in the '%summary' line. For ex., '%summary Section; Subsection; Subsubsection'.
<BLANKLINE>
Each problem can have a suggestive name.
Write in the '%problem' line a name, for ex., '%problem The Fish Problem'.
<BLANKLINE>
Check exercise E28E28_pdirect_003 for the above warnings.
-------------------------------
Instance of: E28E28_pdirect_003
-------------------------------
==> Summary:
Here, is a summary.
==> Problem instance
What is the primitive of $a x + b$ ?
==> Answer instance
The answer is $prim+C$, for $C in \mathbb{R}$.
Travel down the tree sections:
::
sage: s = SectionClassifier(meg.megbook_store)
sage: s.textprint()
Primitives
Imediate primitives
Polynomial
> E28E28_pdirect_001
Trigonometric
> E28E28_pimtrig_001
> E28E28_pimtrig_002
E28E28_pdirect
> E28E28_pdirect_003
Testing a recursive iterator:
::
sage: meg = MegBook("_input/paula.sqlite")
sage: s = SectionClassifier(meg.megbook_store)
sage: for section in s.section_iterator():
....: print section
"""
#*****************************************************************************
# Copyright (C) 2011,2016 Pedro Cruz <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
#PYHTON modules
import collections
#MEGUA modules
from megua.localstore import ExIter
class SectionClassifier:
"""
"""
def __init__(self,megbook_store,max_level=4,debug=False,exerset=None):
#save megstore reference
self.megbook_store = megbook_store
self.max_level = max_level
#Exercise set or none for all
self.exercise_set = exerset
#dictionary of sections
self.contents = dict()
self.classify()
def classify(self):
"""
Classify by sections.
"""
for row in ExIter(self.megbook_store):
if self.exercise_set and not row['unique_name'] in self.exercise_set:
continue
#get a list in form ["section", "subsection", "subsubsection", ...]
sec_list = str_to_list(row['sections_text'])
if sec_list == [] or sec_list == [u'']:
sec_list = [ first_part(row['unique_name']) ]
#sec_list contain at least one element.
if not sec_list[0] in self.contents:
self.contents[sec_list[0]] = Section(sec_list[0])
#sec_list contains less than `max_level` levels
subsec_list = sec_list[1:self.max_level]
self.contents[sec_list[0]].add(row['unique_name'],subsec_list)
def textprint(self):
"""
Textual print of all the contents.
"""
for c in self.contents:
self.contents[c].textprint()
def section_iterator(self):
r"""
OUTPUT:
- an iterator yielding (secname, sorted exercises)
"""
# A stack-based alternative to the traverse_tree method above.
od_top = collections.OrderedDict(sorted(self.contents.items()))
stack = []
for secname,section in od_top.iteritems():
stack.append(section)
while stack:
section_top = stack.pop(0) #remove left element
yield section_top
od_sub = collections.OrderedDict(sorted(section_top.subsections.items()))
desc = []
for secname,section in od_sub.iteritems():
desc.append(section)
stack[:0] = desc #add elemnts from desc list at left (":0")
class Section:
r"""
Section = (sec_name, level, [list of exercises names], dict( subsections ) )
"""
def __init__(self,sec_name,level=0):
self.sec_name = sec_name
self.level = level
#Exercises of this section (self).
self.exercises = []
#This section (self) can have subsections.
self.subsections = dict()
def __str__(self):
return self.level*" " + self.sec_name.encode("utf8") + " has " + str(len(self.exercises))
def __repr__(self):
return self.level*" " + self.sec_name.encode("utf8") + " has " + str(len(self.exercises))
def add(self,exname,sections):
r"""
Recursive function to add an exercise to """
if sections == []:
self.exercises.append(exname)
self.exercises.sort()
return
if not sections[0] in self.subsections:
self.subsections[sections[0]] = Section(sections[0],self.level+1)
self.subsections[sections[0]].add(exname,sections[1:])
def textprint(self):
"""
Textual print of the contents of this section and, recursivly, of the subsections.
"""
sp = " "*self.level
print sp + self.sec_name
for e in self.exercises:
print sp+r"> "+e
for sub in self.subsections:
self.subsections[sub].textprint()
def str_to_list(s):
"""
Convert::
'section description; subsection description; subsubsection description'
into::
[ 'section description', 'subsection description', 'subsubsection description']
"""
sl = s.split(';')
for i in range(len(sl)):
sl[i] = sl[i].strip()
return sl
def first_part(s):
"""
Usually exercise are named like `E12X34_name_001` and this routine extracts `E12X34` or `top` if no underscore is present.
"""
p = s.find("_")
p = s.find("_",p+1)
if p!=-1:
s = s[:p]
if s=='':
s = 'top'
return s
| jpedroan/megua | megua/csection.py | Python | gpl-3.0 | 10,442 |
# Copyright 2008 Dan Smith <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import struct
import re
import time
import logging
from chirp import chirp_common, errors, util, memmap
from chirp.settings import RadioSetting, RadioSettingGroup, \
RadioSettingValueBoolean, RadioSettings
LOG = logging.getLogger(__name__)
CMD_CLONE_OUT = 0xE2
CMD_CLONE_IN = 0xE3
CMD_CLONE_DAT = 0xE4
CMD_CLONE_END = 0xE5
SAVE_PIPE = None
class IcfFrame:
"""A single ICF communication frame"""
src = 0
dst = 0
cmd = 0
payload = ""
def __str__(self):
addrs = {0xEE: "PC",
0xEF: "Radio"}
cmds = {0xE0: "ID",
0xE1: "Model",
0xE2: "Clone out",
0xE3: "Clone in",
0xE4: "Clone data",
0xE5: "Clone end",
0xE6: "Clone result"}
return "%s -> %s [%s]:\n%s" % (addrs[self.src], addrs[self.dst],
cmds[self.cmd],
util.hexprint(self.payload))
def __init__(self):
pass
def parse_frame_generic(data):
"""Parse an ICF frame of unknown type from the beginning of @data"""
frame = IcfFrame()
frame.src = ord(data[2])
frame.dst = ord(data[3])
frame.cmd = ord(data[4])
try:
end = data.index("\xFD")
except ValueError:
return None, data
frame.payload = data[5:end]
return frame, data[end+1:]
class RadioStream:
"""A class to make reading a stream of IcfFrames easier"""
def __init__(self, pipe):
self.pipe = pipe
self.data = ""
def _process_frames(self):
if not self.data.startswith("\xFE\xFE"):
LOG.error("Out of sync with radio:\n%s" % util.hexprint(self.data))
raise errors.InvalidDataError("Out of sync with radio")
elif len(self.data) < 5:
return [] # Not enough data for a full frame
frames = []
while self.data:
try:
cmd = ord(self.data[4])
except IndexError:
break # Out of data
try:
frame, rest = parse_frame_generic(self.data)
if not frame:
break
elif frame.src == 0xEE and frame.dst == 0xEF:
# PC echo, ignore
pass
else:
frames.append(frame)
self.data = rest
except errors.InvalidDataError, e:
LOG.error("Failed to parse frame (cmd=%i): %s" % (cmd, e))
return []
return frames
def get_frames(self, nolimit=False):
"""Read any pending frames from the stream"""
while True:
_data = self.pipe.read(64)
if not _data:
break
else:
self.data += _data
if not nolimit and len(self.data) > 128 and "\xFD" in self.data:
break # Give us a chance to do some status
if len(self.data) > 1024:
break # Avoid an endless loop of chewing garbage
if not self.data:
return []
return self._process_frames()
def get_model_data(radio, mdata="\x00\x00\x00\x00"):
"""Query the @radio for its model data"""
send_clone_frame(radio, 0xe0, mdata, raw=True)
stream = RadioStream(radio.pipe)
frames = stream.get_frames()
if len(frames) != 1:
raise errors.RadioError("Unexpected response from radio")
return frames[0].payload
def get_clone_resp(pipe, length=None, max_count=None):
"""Read the response to a clone frame"""
def exit_criteria(buf, length, cnt, max_count):
"""Stop reading a clone response if we have enough data or encounter
the end of a frame"""
if max_count is not None:
if cnt >= max_count:
return True
if length is None:
return buf.endswith("\xfd")
else:
return len(buf) == length
resp = ""
cnt = 0
while not exit_criteria(resp, length, cnt, max_count):
resp += pipe.read(1)
cnt += 1
return resp
def send_clone_frame(radio, cmd, data, raw=False, checksum=False):
"""Send a clone frame with @cmd and @data to the @radio"""
payload = radio.get_payload(data, raw, checksum)
frame = "\xfe\xfe\xee\xef%s%s\xfd" % (chr(cmd), payload)
if SAVE_PIPE:
LOG.debug("Saving data...")
SAVE_PIPE.write(frame)
# LOG.debug("Sending:\n%s" % util.hexprint(frame))
# LOG.debug("Sending:\n%s" % util.hexprint(hed[6:]))
if cmd == 0xe4:
# Uncomment to avoid cloning to the radio
# return frame
pass
radio.pipe.write(frame)
if radio.MUNCH_CLONE_RESP:
# Do max 2*len(frame) read(1) calls
get_clone_resp(radio.pipe, max_count=2*len(frame))
return frame
def process_data_frame(radio, frame, _mmap):
"""Process a data frame, adding the payload to @_mmap"""
_data = radio.process_frame_payload(frame.payload)
# Checksum logic added by Rick DeWitt, 9/2019, issue # 7075
if len(_mmap) >= 0x10000: # This map size not tested for checksum
saddr, = struct.unpack(">I", _data[0:4])
length, = struct.unpack("B", _data[4])
data = _data[5:5+length]
sumc, = struct.unpack("B", _data[5+length])
addr1, = struct.unpack("B", _data[0])
addr2, = struct.unpack("B", _data[1])
addr3, = struct.unpack("B", _data[2])
addr4, = struct.unpack("B", _data[3])
else: # But this one has been tested for raw mode radio (IC-2730)
saddr, = struct.unpack(">H", _data[0:2])
length, = struct.unpack("B", _data[2])
data = _data[3:3+length]
sumc, = struct.unpack("B", _data[3+length])
addr1, = struct.unpack("B", _data[0])
addr2, = struct.unpack("B", _data[1])
addr3 = 0
addr4 = 0
cs = addr1 + addr2 + addr3 + addr4 + length
for byte in data:
cs += ord(byte)
vx = ((cs ^ 0xFFFF) + 1) & 0xFF
if sumc != vx:
LOG.error("Bad checksum in address %04X frame: %02x "
"calculated, %02x sent!" % (saddr, vx, sumc))
raise errors.InvalidDataError(
"Checksum error in download! "
"Try disabling High Speed Clone option in Settings.")
try:
_mmap[saddr] = data
except IndexError:
LOG.error("Error trying to set %i bytes at %05x (max %05x)" %
(bytes, saddr, len(_mmap)))
return saddr, saddr + length
def start_hispeed_clone(radio, cmd):
"""Send the magic incantation to the radio to go fast"""
buf = ("\xFE" * 20) + \
"\xEE\xEF\xE8" + \
radio.get_model() + \
"\x00\x00\x02\x01\xFD"
LOG.debug("Starting HiSpeed:\n%s" % util.hexprint(buf))
radio.pipe.write(buf)
radio.pipe.flush()
resp = radio.pipe.read(128)
LOG.debug("Response:\n%s" % util.hexprint(resp))
LOG.info("Switching to 38400 baud")
radio.pipe.baudrate = 38400
buf = ("\xFE" * 14) + \
"\xEE\xEF" + \
chr(cmd) + \
radio.get_model()[:3] + \
"\x00\xFD"
LOG.debug("Starting HiSpeed Clone:\n%s" % util.hexprint(buf))
radio.pipe.write(buf)
radio.pipe.flush()
def _clone_from_radio(radio):
md = get_model_data(radio)
if md[0:4] != radio.get_model():
LOG.info("This model: %s" % util.hexprint(md[0:4]))
LOG.info("Supp model: %s" % util.hexprint(radio.get_model()))
raise errors.RadioError("I can't talk to this model")
if radio.is_hispeed():
start_hispeed_clone(radio, CMD_CLONE_OUT)
else:
send_clone_frame(radio, CMD_CLONE_OUT, radio.get_model(), raw=True)
LOG.debug("Sent clone frame")
stream = RadioStream(radio.pipe)
addr = 0
_mmap = memmap.MemoryMap(chr(0x00) * radio.get_memsize())
last_size = 0
while True:
frames = stream.get_frames()
if not frames:
break
for frame in frames:
if frame.cmd == CMD_CLONE_DAT:
src, dst = process_data_frame(radio, frame, _mmap)
if last_size != (dst - src):
LOG.debug("ICF Size change from %i to %i at %04x" %
(last_size, dst - src, src))
last_size = dst - src
if addr != src:
LOG.debug("ICF GAP %04x - %04x" % (addr, src))
addr = dst
elif frame.cmd == CMD_CLONE_END:
LOG.debug("End frame (%i):\n%s" %
(len(frame.payload), util.hexprint(frame.payload)))
LOG.debug("Last addr: %04x" % addr)
if radio.status_fn:
status = chirp_common.Status()
status.msg = "Cloning from radio"
status.max = radio.get_memsize()
status.cur = addr
radio.status_fn(status)
return _mmap
def clone_from_radio(radio):
"""Do a full clone out of the radio's memory"""
try:
return _clone_from_radio(radio)
except Exception, e:
raise errors.RadioError("Failed to communicate with the radio: %s" % e)
def send_mem_chunk(radio, start, stop, bs=32):
"""Send a single chunk of the radio's memory from @start-@stop"""
_mmap = radio.get_mmap()
status = chirp_common.Status()
status.msg = "Cloning to radio"
status.max = radio.get_memsize()
for i in range(start, stop, bs):
if i + bs < stop:
size = bs
else:
size = stop - i
if radio.get_memsize() >= 0x10000:
chunk = struct.pack(">IB", i, size)
else:
chunk = struct.pack(">HB", i, size)
chunk += _mmap[i:i+size]
send_clone_frame(radio,
CMD_CLONE_DAT,
chunk,
raw=False,
checksum=True)
if radio.status_fn:
status.cur = i+bs
radio.status_fn(status)
return True
def _clone_to_radio(radio):
global SAVE_PIPE
# Uncomment to save out a capture of what we actually write to the radio
# SAVE_PIPE = file("pipe_capture.log", "w", 0)
md = get_model_data(radio)
if md[0:4] != radio.get_model():
raise errors.RadioError("I can't talk to this model")
# This mimics what the Icom software does, but isn't required and just
# takes longer
# md = get_model_data(radio, mdata=md[0:2]+"\x00\x00")
# md = get_model_data(radio, mdata=md[0:2]+"\x00\x00")
stream = RadioStream(radio.pipe)
if radio.is_hispeed():
start_hispeed_clone(radio, CMD_CLONE_IN)
else:
send_clone_frame(radio, CMD_CLONE_IN, radio.get_model(), raw=True)
frames = []
for start, stop, bs in radio.get_ranges():
if not send_mem_chunk(radio, start, stop, bs):
break
frames += stream.get_frames()
send_clone_frame(radio, CMD_CLONE_END, radio.get_endframe(), raw=True)
if SAVE_PIPE:
SAVE_PIPE.close()
SAVE_PIPE = None
for i in range(0, 10):
try:
frames += stream.get_frames(True)
result = frames[-1]
except IndexError:
LOG.debug("Waiting for clone result...")
time.sleep(0.5)
if len(frames) == 0:
raise errors.RadioError("Did not get clone result from radio")
return result.payload[0] == '\x00'
def clone_to_radio(radio):
"""Initiate a full memory clone out to @radio"""
try:
return _clone_to_radio(radio)
except Exception, e:
logging.exception("Failed to communicate with the radio")
raise errors.RadioError("Failed to communicate with the radio: %s" % e)
def convert_model(mod_str):
"""Convert an ICF-style model string into what we get from the radio"""
data = ""
for i in range(0, len(mod_str), 2):
hexval = mod_str[i:i+2]
intval = int(hexval, 16)
data += chr(intval)
return data
def convert_data_line(line):
"""Convert an ICF data line to raw memory format"""
if line.startswith("#"):
return ""
line = line.strip()
if len(line) == 38:
# Small memory (< 0x10000)
size = int(line[4:6], 16)
data = line[6:]
else:
# Large memory (>= 0x10000)
size = int(line[8:10], 16)
data = line[10:]
_mmap = ""
i = 0
while i < (size * 2):
try:
val = int("%s%s" % (data[i], data[i+1]), 16)
i += 2
_mmap += struct.pack("B", val)
except ValueError, e:
LOG.debug("Failed to parse byte: %s" % e)
break
return _mmap
def read_file(filename):
"""Read an ICF file and return the model string and memory data"""
f = file(filename)
mod_str = f.readline()
dat = f.readlines()
model = convert_model(mod_str.strip())
_mmap = ""
for line in dat:
if not line.startswith("#"):
_mmap += convert_data_line(line)
return model, memmap.MemoryMap(_mmap)
def is_9x_icf(filename):
"""Returns True if @filename is an IC9x ICF file"""
f = file(filename)
mdata = f.read(8)
f.close()
return mdata in ["30660000", "28880000"]
def is_icf_file(filename):
"""Returns True if @filename is an ICF file"""
f = file(filename)
data = f.readline()
data += f.readline()
f.close()
data = data.replace("\n", "").replace("\r", "")
return bool(re.match("^[0-9]{8}#", data))
class IcomBank(chirp_common.Bank):
"""A bank that works for all Icom radios"""
# Integral index of the bank (not to be confused with per-memory
# bank indexes
index = 0
class IcomNamedBank(IcomBank):
"""A bank with an adjustable name"""
def set_name(self, name):
"""Set the name of the bank"""
pass
class IcomBankModel(chirp_common.BankModel):
"""Icom radios all have pretty much the same simple bank model. This
central implementation can, with a few icom-specific radio interfaces
serve most/all of them"""
def get_num_mappings(self):
return self._radio._num_banks
def get_mappings(self):
banks = []
for i in range(0, self._radio._num_banks):
index = chr(ord("A") + i)
bank = self._radio._bank_class(self, index, "BANK-%s" % index)
bank.index = i
banks.append(bank)
return banks
def add_memory_to_mapping(self, memory, bank):
self._radio._set_bank(memory.number, bank.index)
def remove_memory_from_mapping(self, memory, bank):
if self._radio._get_bank(memory.number) != bank.index:
raise Exception("Memory %i not in bank %s. Cannot remove." %
(memory.number, bank))
self._radio._set_bank(memory.number, None)
def get_mapping_memories(self, bank):
memories = []
for i in range(*self._radio.get_features().memory_bounds):
if self._radio._get_bank(i) == bank.index:
memories.append(self._radio.get_memory(i))
return memories
def get_memory_mappings(self, memory):
index = self._radio._get_bank(memory.number)
if index is None:
return []
else:
return [self.get_mappings()[index]]
class IcomIndexedBankModel(IcomBankModel,
chirp_common.MappingModelIndexInterface):
"""Generic bank model for Icom radios with indexed banks"""
def get_index_bounds(self):
return self._radio._bank_index_bounds
def get_memory_index(self, memory, bank):
return self._radio._get_bank_index(memory.number)
def set_memory_index(self, memory, bank, index):
if bank not in self.get_memory_mappings(memory):
raise Exception("Memory %i is not in bank %s" % (memory.number,
bank))
if index not in range(*self._radio._bank_index_bounds):
raise Exception("Invalid index")
self._radio._set_bank_index(memory.number, index)
def get_next_mapping_index(self, bank):
indexes = []
for i in range(*self._radio.get_features().memory_bounds):
if self._radio._get_bank(i) == bank.index:
indexes.append(self._radio._get_bank_index(i))
for i in range(0, 256):
if i not in indexes:
return i
raise errors.RadioError("Out of slots in this bank")
def compute_checksum(data):
cs = 0
for byte in data:
cs += ord(byte)
return ((cs ^ 0xFFFF) + 1) & 0xFF
class IcomCloneModeRadio(chirp_common.CloneModeRadio):
"""Base class for Icom clone-mode radios"""
VENDOR = "Icom"
BAUDRATE = 9600
# Ideally, the driver should read clone response after each clone frame
# is sent, but for some reason it hasn't behaved this way for years.
# So not to break the existing tested drivers the MUNCH_CLONE_RESP flag
# was added. It's False by default which brings the old behavior,
# i.e. clone response is not read. The expectation is that new Icom
# drivers will use MUNCH_CLONE_RESP = True and old drivers will be
# gradually migrated to this. Once all Icom drivers will use
# MUNCH_CLONE_RESP = True, this flag will be removed.
MUNCH_CLONE_RESP = False
_model = "\x00\x00\x00\x00" # 4-byte model string
_endframe = "" # Model-unique ending frame
_ranges = [] # Ranges of the mmap to send to the radio
_num_banks = 10 # Most simple Icoms have 10 banks, A-J
_bank_index_bounds = (0, 99)
_bank_class = IcomBank
_can_hispeed = False
@classmethod
def is_hispeed(cls):
"""Returns True if the radio supports hispeed cloning"""
return cls._can_hispeed
@classmethod
def get_model(cls):
"""Returns the Icom model data for this radio"""
return cls._model
@classmethod
def get_endframe(cls):
"""Returns the magic clone end frame for this radio"""
return cls._endframe
@classmethod
def get_ranges(cls):
"""Returns the ranges this radio likes to have in a clone"""
return cls._ranges
def process_frame_payload(self, payload):
"""Convert BCD-encoded data to raw"""
bcddata = payload
data = ""
i = 0
while i+1 < len(bcddata):
try:
val = int("%s%s" % (bcddata[i], bcddata[i+1]), 16)
i += 2
data += struct.pack("B", val)
except ValueError, e:
LOG.error("Failed to parse byte: %s" % e)
break
return data
def get_payload(self, data, raw, checksum):
"""Returns the data with optional checksum BCD-encoded for the radio"""
if raw:
return data
payload = ""
for byte in data:
payload += "%02X" % ord(byte)
if checksum:
payload += "%02X" % compute_checksum(data)
return payload
def sync_in(self):
self._mmap = clone_from_radio(self)
self.process_mmap()
def sync_out(self):
clone_to_radio(self)
def get_bank_model(self):
rf = self.get_features()
if rf.has_bank:
if rf.has_bank_index:
return IcomIndexedBankModel(self)
else:
return IcomBankModel(self)
else:
return None
# Icom-specific bank routines
def _get_bank(self, loc):
"""Get the integral bank index of memory @loc, or None"""
raise Exception("Not implemented")
def _set_bank(self, loc, index):
"""Set the integral bank index of memory @loc to @index, or
no bank if None"""
raise Exception("Not implemented")
def get_settings(self):
return make_speed_switch_setting(self)
def set_settings(self, settings):
return honor_speed_switch_setting(self, settings)
def flip_high_order_bit(data):
return [chr(ord(d) ^ 0x80) for d in list(data)]
def escape_raw_byte(byte):
"""Escapes a raw byte for sending to the radio"""
# Certain bytes are used as control characters to the radio, so if one of
# these bytes is present in the stream to the radio, it gets escaped as
# 0xff followed by (byte & 0x0f)
if ord(byte) > 0xf9:
return "\xff%s" % (chr(ord(byte) & 0xf))
return byte
def unescape_raw_bytes(escaped_data):
"""Unescapes raw bytes from the radio."""
data = ""
i = 0
while i < len(escaped_data):
byte = escaped_data[i]
if byte == '\xff':
if i + 1 >= len(escaped_data):
raise errors.InvalidDataError(
"Unexpected escape character at end of data")
i += 1
byte = chr(0xf0 | ord(escaped_data[i]))
data += byte
i += 1
return data
class IcomRawCloneModeRadio(IcomCloneModeRadio):
"""Subclass for Icom clone-mode radios using the raw data protocol."""
def process_frame_payload(self, payload):
"""Payloads from a raw-clone-mode radio are already in raw format."""
return unescape_raw_bytes(payload)
def get_payload(self, data, raw, checksum):
"""Returns the data with optional checksum in raw format."""
if checksum:
cs = chr(compute_checksum(data))
else:
cs = ""
payload = "%s%s" % (data, cs)
# Escape control characters.
escaped_payload = [escape_raw_byte(b) for b in payload]
return "".join(escaped_payload)
def sync_in(self):
# The radio returns all the bytes with the high-order bit flipped.
_mmap = clone_from_radio(self)
_mmap = flip_high_order_bit(_mmap.get_packed())
self._mmap = memmap.MemoryMap(_mmap)
self.process_mmap()
def get_mmap(self):
_data = flip_high_order_bit(self._mmap.get_packed())
return memmap.MemoryMap(_data)
class IcomLiveRadio(chirp_common.LiveRadio):
"""Base class for an Icom Live-mode radio"""
VENDOR = "Icom"
BAUD_RATE = 38400
_num_banks = 26 # Most live Icoms have 26 banks, A-Z
_bank_index_bounds = (0, 99)
_bank_class = IcomBank
def get_bank_model(self):
rf = self.get_features()
if rf.has_bank:
if rf.has_bank_index:
return IcomIndexedBankModel(self)
else:
return IcomBankModel(self)
else:
return None
def make_speed_switch_setting(radio):
if not radio.__class__._can_hispeed:
return {}
drvopts = RadioSettingGroup("drvopts", "Driver Options")
top = RadioSettings(drvopts)
rs = RadioSetting("drv_clone_speed", "Use Hi-Speed Clone",
RadioSettingValueBoolean(radio._can_hispeed))
drvopts.append(rs)
return top
def honor_speed_switch_setting(radio, settings):
for element in settings:
if element.get_name() == "drvopts":
return honor_speed_switch_setting(radio, element)
if element.get_name() == "drv_clone_speed":
radio.__class__._can_hispeed = element.value.get_value()
return
| tylert/chirp.hg | chirp/drivers/icf.py | Python | gpl-3.0 | 23,992 |
#! /usr/bin/env python
# I found this file inside Super Mario Bros python
# written by HJ https://sourceforge.net/projects/supermariobrosp/
# the complete work is licensed under GPL3 although I can not determine# license of this file
# maybe this is the original author, we can contact him/her http://www.pygame.org/project-EzMeNu-855-.html
import pygame
class EzMenu:
def __init__(self, *options):
self.options = options
self.x = 0
self.y = 0
self.font = pygame.font.Font(None, 32)
self.option = 0
self.width = 1
self.color = [0, 0, 0]
self.hcolor = [255, 0, 0]
self.height = len(self.options)*self.font.get_height()
for o in self.options:
text = o[0]
ren = self.font.render(text, 2, (0, 0, 0))
if ren.get_width() > self.width:
self.width = ren.get_width()
def draw(self, surface):
i=0
for o in self.options:
if i==self.option:
clr = self.hcolor
else:
clr = self.color
text = o[0]
ren = self.font.render(text, 2, clr)
if ren.get_width() > self.width:
self.width = ren.get_width()
surface.blit(ren, ((self.x+self.width/2) - ren.get_width()/2, self.y + i*(self.font.get_height()+4)))
i+=1
def update(self, events):
for e in events:
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_DOWN:
self.option += 1
if e.key == pygame.K_UP:
self.option -= 1
if e.key == pygame.K_RETURN:
self.options[self.option][1]()
if self.option > len(self.options)-1:
self.option = 0
if self.option < 0:
self.option = len(self.options)-1
def set_pos(self, x, y):
self.x = x
self.y = y
def set_font(self, font):
self.font = font
def set_highlight_color(self, color):
self.hcolor = color
def set_normal_color(self, color):
self.color = color
def center_at(self, x, y):
self.x = x-(self.width/2)
self.y = y-(self.height/2)
| juanjosegzl/learningpygame | ezmenu.py | Python | gpl-3.0 | 2,298 |
"""
WSGI config for GoodDog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "GoodDog.settings")
application = get_wsgi_application()
| sate-z/GoodDog | GoodDog/wsgi.py | Python | gpl-3.0 | 392 |
#! /usr/bin/env python
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'and',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'exec',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'not',
'or',
'pass',
'print',
'raise',
'return',
'try',
'while',
'yield',
#--end keywords--
]
kwdict = {}
for keyword in kwlist:
kwdict[keyword] = 1
iskeyword = kwdict.has_key
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
fp = open(iptfile)
strprog = re.compile('"([^"]+)"')
lines = []
while 1:
line = fp.readline()
if not line: break
if line.find('{1, "') > -1:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
fp.close()
lines.sort()
# load the output skeleton from the target
fp = open(optfile)
format = fp.readlines()
fp.close()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()
| DarioGT/OMS-PluginXML | org.modelsphere.sms/lib/jython-2.2.1/Lib/keyword.py | Python | gpl-3.0 | 2,162 |
default_app_config = 'users.apps.UserConfig' | emoitzi/django-excel-viewer | users/__init__.py | Python | gpl-3.0 | 44 |
"""Calculate exact solutions for the zero dimensional LLG as given by
[Mallinson2000]
"""
from __future__ import division
from __future__ import absolute_import
from math import sin, cos, tan, log, atan2, acos, pi, sqrt
import scipy as sp
import matplotlib.pyplot as plt
import functools as ft
import simpleode.core.utils as utils
def calculate_switching_time(magnetic_parameters, p_start, p_now):
"""Calculate the time taken to switch from polar angle p_start to p_now
with the magnetic parameters given.
"""
# Should never quite get to pi/2
# if p_now >= pi/2:
# return sp.inf
# Cache some things to simplify the expressions later
H = magnetic_parameters.H(None)
Hk = magnetic_parameters.Hk()
alpha = magnetic_parameters.alpha
gamma = magnetic_parameters.gamma
# Calculate the various parts of the expression
prefactor = ((alpha**2 + 1)/(gamma * alpha)) \
* (1.0 / (H**2 - Hk**2))
a = H * log(tan(p_now/2) / tan(p_start/2))
b = Hk * log((H - Hk*cos(p_start)) /
(H - Hk*cos(p_now)))
c = Hk * log(sin(p_now) / sin(p_start))
# Put everything together
return prefactor * (a + b + c)
def calculate_azimuthal(magnetic_parameters, p_start, p_now):
"""Calculate the azimuthal angle corresponding to switching from
p_start to p_now with the magnetic parameters given.
"""
def azi_into_range(azi):
a = azi % (2*pi)
if a < 0:
a += 2*pi
return a
alpha = magnetic_parameters.alpha
no_range_azi = (-1/alpha) * log(tan(p_now/2) / tan(p_start/2))
return azi_into_range(no_range_azi)
def generate_dynamics(magnetic_parameters,
start_angle=pi/18,
end_angle=17*pi/18,
steps=1000):
"""Generate a list of polar angles then return a list of corresponding
m directions (in spherical polar coordinates) and switching times.
"""
mag_params = magnetic_parameters
# Construct a set of solution positions
pols = sp.linspace(start_angle, end_angle, steps)
azis = [calculate_azimuthal(mag_params, start_angle, p) for p in pols]
sphs = [utils.SphPoint(1.0, azi, pol) for azi, pol in zip(azis, pols)]
# Calculate switching times for these positions
times = [calculate_switching_time(mag_params, start_angle, p)
for p in pols]
return (sphs, times)
def plot_dynamics(magnetic_parameters,
start_angle=pi/18,
end_angle=17*pi/18,
steps=1000):
"""Plot exact positions given start/finish angles and magnetic
parameters.
"""
sphs, times = generate_dynamics(magnetic_parameters, start_angle,
end_angle, steps)
sphstitle = "Path of m for " + str(magnetic_parameters) \
+ "\n (starting point is marked)."
utils.plot_sph_points(sphs, title=sphstitle)
timestitle = "Polar angle vs time for " + str(magnetic_parameters)
utils.plot_polar_vs_time(sphs, times, title=timestitle)
plt.show()
def calculate_equivalent_dynamics(magnetic_parameters, polars):
"""Given a list of polar angles (and some magnetic parameters)
calculate what the corresponding azimuthal angles and switching times
(from the first angle) should be.
"""
start_angle = polars[0]
f_times = ft.partial(calculate_switching_time, magnetic_parameters,
start_angle)
exact_times = [f_times(p) for p in polars]
f_azi = ft.partial(calculate_azimuthal, magnetic_parameters, start_angle)
exact_azis = [f_azi(p) for p in polars]
return exact_times, exact_azis
def plot_vs_exact(magnetic_parameters, ts, ms):
# Extract lists of the polar coordinates
m_as_sph_points = map(utils.array2sph, ms)
pols = [m.pol for m in m_as_sph_points]
azis = [m.azi for m in m_as_sph_points]
# Calculate the corresponding exact dynamics
exact_times, exact_azis = \
calculate_equivalent_dynamics(magnetic_parameters, pols)
# Plot
plt.figure()
plt.plot(ts, pols, '--',
exact_times, pols)
plt.figure()
plt.plot(pols, azis, '--',
pols, exact_azis)
plt.show()
| davidshepherd7/Landau-Lifshitz-Gilbert-ODE-model | llg/mallinson.py | Python | gpl-3.0 | 4,251 |
class Sbs:
def __init__(self, sbsFilename, sbc_filename, newSbsFilename):
import xml.etree.ElementTree as ET
import Sbc
self.mySbc = Sbc.Sbc(sbc_filename)
self.sbsTree = ET.parse(sbsFilename)
self.sbsRoot = self.sbsTree.getroot()
self.XSI_TYPE = "{http://www.w3.org/2001/XMLSchema-instance}type"
self.newSbsFilename = newSbsFilename
def findPlayerBySteamID(self, steam_id):
if (steam_id == 0):
return False
print("looking for player with steamID of %s" % steam_id)
ourPlayerDict = self.mySbc.getPlayerDict()
for player in ourPlayerDict:
# print playerDict[player]['steamID']
if ourPlayerDict[player]['steamID'] == steam_id:
return ourPlayerDict[player]
# if we don't find the user
return False
def giveReward(self, rewardOwner, rewardType, rewardAmount):
"""
This method will hunt down the first cargo container owned by
<Owner> matching their ingame ID, and with with "CustomName"
of "LOOT" and place the rewards in it
"""
import xml.etree.ElementTree as ET
print("trying to give %s %s units of %s" % (rewardOwner, rewardAmount, rewardType))
for sectorObjects in self.sbsRoot.iter('SectorObjects'):
for entityBase in sectorObjects.iter('MyObjectBuilder_EntityBase'):
# EntityId = entityBase.find('EntityId')
# print ("checking entityID %s" % EntityId.text)
gridSize = entityBase.find('GridSizeEnum')
# TODO+: some kind of warning if we have a reward to give, but can't find this user's LOOT container
if hasattr(gridSize, 'text'):
cubeBlocks = entityBase.find('CubeBlocks')
for myCubeBlock in cubeBlocks.iter('MyObjectBuilder_CubeBlock'):
owner = myCubeBlock.find("Owner")
EntityId = myCubeBlock.find('EntityId')
customName = myCubeBlock.find('CustomName')
if hasattr(owner, 'text') and owner.text == rewardOwner and myCubeBlock.get(self.XSI_TYPE) == "MyObjectBuilder_CargoContainer" and hasattr(customName, 'text'):
if "LOOT" in customName.text:
print("I found a cargo container owned by %s with entityID of %s and name of %s" % (owner.text, EntityId.text, customName.text))
componentContainer = myCubeBlock.find('ComponentContainer')
components = componentContainer.find('Components')
componentData = components.find('ComponentData')
component = componentData.find('Component')
items = component.find('Items')
itemCount = 0
for myInventoryItems in items.iter('MyObjectBuilder_InventoryItem'):
itemCount += 1
print("planning to add %s of %s into it as item %s" % (rewardAmount, rewardType, itemCount))
# <MyObjectBuilder_InventoryItem>
# <Amount>200</Amount>
# <PhysicalContent xsi:type="MyObjectBuilder_Ore">
# <SubtypeName>Uranium</SubtypeName> ## from rewardType
# </PhysicalContent>
# <ItemId>4</ItemId> ## from itemCount
# <AmountDecimal>200</AmountDecimal> ## from rewardAmount
# </MyObjectBuilder_InventoryItem>
# myCubeBlock.append((ET.fromstring('<MyObjectBuilder_InventoryItem><Amount>123456789</Amount></MyObjectBuilder_InventoryItem>')))
inventoryItem = ET.SubElement(items, 'MyObjectBuilder_InventoryItem')
amount = ET.SubElement(inventoryItem, 'Amount')
amount.text = str(rewardAmount)
physicalContent = ET.SubElement(inventoryItem, 'PhysicalContent')
physicalContent.set(self.XSI_TYPE, 'MyObjectBuilder_Ore')
subtypeName = ET.SubElement(physicalContent, 'SubtypeName')
subtypeName.text = rewardType
itemId = ET.SubElement(inventoryItem, 'ItemId')
itemId.text = str(itemCount)
amountDecimal = ET.SubElement(inventoryItem, 'AmountDecimal')
amountDecimal.text = str(rewardAmount)
nextItemId = component.find('nextItemId')
nextItemId.text = str(itemCount + 1)
# FIXME: this makes a mess of the html, figure out a way to clean it up?
def removeFloaters(self):
import xml.etree.ElementTree as ET
removedCount = 0
warnCount = 0
for sectorObjects in self.sbsRoot.iter('SectorObjects'):
for entityBase in sectorObjects.iter('MyObjectBuilder_EntityBase'):
cubeGridID = entityBase.find('EntityId')
gridSizeEnum = entityBase.find('GridSizeEnum')
objectType = entityBase.get(self.XSI_TYPE)
isStatic = entityBase.find('IsStatic') # FIXME: this does not do what I thought it did. Tested with simple station, and it isn't set as static when I build it from scratch.
# TODO: only way I can see to easily fix is check for <Forward x="-0" y="-0" z="-1" /> for static things
# print cubeGridID.text if hasattr(cubeGridID, 'text') else 'not defined'
if hasattr(cubeGridID, 'text'):
print("Grid EntityID: %s " % cubeGridID.text)
else:
print("FIXME: no gridID")
# print ("\t is objectType %s" % objectType )
if hasattr(isStatic, 'text'):
# this is a base, all of our checks are null and void. Bases don't float or cost me CPU
print("\t skipping trash checks because this IsStatic")
continue
if hasattr(gridSizeEnum, 'text'):
# is a grid, small or large
gridName = entityBase.find('DisplayName').text
print("\t is a grid size %s %s" % (gridSizeEnum.text, gridName))
# if the name contains DEL.WRN
if "[DEL.WRN]" in gridName:
print("\t ALREADY HAD DEL.WRN in the NAME, GOODBYE")
sectorObjects.remove(entityBase)
removedCount += 1
else:
# it doesn't have a DEL WRN yet, lets check for our rules
# TODO: look through the whole entityBase for 6 thrusters, a power supply, and at least one block not owned by pirates
thrusterCount = 0
powerSource = 0
controlSurface = 0
gyroCount = 0
turretCount = 0
ownerCount = 0
ownedThings = 0
ownerList = []
cubeBlocks = entityBase.find('CubeBlocks')
for myCubeBlock in cubeBlocks.iter('MyObjectBuilder_CubeBlock'):
owner = myCubeBlock.find("Owner")
# subtype = myCubeBlock.find('SubtypeName')
cubeType = myCubeBlock.get(self.XSI_TYPE)
entityID = myCubeBlock.find("EntityId")
# print ("\t\tTODO: cubeType of: %s" % cubeType)
if "Thrust" in cubeType:
thrusterCount += 1
elif "Cockpit" in cubeType:
controlSurface += 1
elif "Reactor" in cubeType:
powerSource += 1
elif "SolarPanel" in cubeType:
powerSource += 1
elif "RemoteControl" in cubeType:
controlSurface += 1
elif "Gyro" in cubeType:
gyroCount += 1
elif "Turret" in cubeType:
turretCount += 1
if hasattr(owner, 'text'):
# print ("\tOwner: %s" % owner.text)
if owner.text not in ownerList:
ownerList.append(owner.text)
ownerCount += 1
ownedThings += 1 # TODO: this is how many blocks have an owner, above is distinct owners of this grid
print("\t totals: %s %s %s %s %s %s %s" % (thrusterCount, powerSource, controlSurface, gyroCount, turretCount, ownerCount, len(ownerList)))
# TODO: if it fails all my tests,
# [CHECK] set name to [DEL.WRN]
# set ShowOnHUD to True ## can't, this is per cube. Ignore this.
if (thrusterCount < 6 or controlSurface < 1 or powerSource < 1 or gyroCount < 1 or ownerCount < 1):
print("\tWARNING: THIS GRID IS DUE TO DELETE")
gridNameToUpdate = entityBase.find('DisplayName')
gridNameToUpdate.text = "[DEL.WRN]" + gridNameToUpdate.text
print("\tname is now: %s" % gridNameToUpdate.text)
warnCount += 1
for myCubeBlock in cubeBlocks.iter('MyObjectBuilder_CubeBlock'):
# set all DeformationRatio to 1 (right up under owner) <DeformationRatio>0.5</DeformationRatio>
deformationElement = ET.SubElement(myCubeBlock, "DeformationRatio")
deformationElement.text = ".77"
# myCubeBlock.append('DeformationRatio', '.77')
else:
if (objectType == "MyObjectBuilder_FloatingObject"):
print("\t GOODBYE")
sectorObjects.remove(entityBase)
removedCount += 1
elif (objectType == "MyObjectBuilder_ReplicableEntity"):
# print ("\t Backpack!")
backPackName = entityBase.find('Name')
if hasattr(backPackName, 'text'):
print("\t Backpackname: %s" % backPackName.text)
print("\t GOODBYE")
sectorObjects.remove(entityBase)
removedCount += 1
elif (objectType == "MyObjectBuilder_VoxelMap"):
voxelStorageName = entityBase.find('StorageName')
if hasattr(voxelStorageName, 'text'):
print("\t voxelStorageName: %s" % voxelStorageName.text)
elif (objectType == "MyObjectBuilder_Character"):
# oops, someone was online
# entityID matches CharacterEntityId in the sbc
entityID = entityBase.find('EntityId').text # steamID
print("\t looking for %s entityID in playerDict" % entityID)
thisPlayersDict = self.findPlayerBySteamID(entityID) # returns False if we didn't have this players steamID in the sbc, meaning they weren't online
if (thisPlayersDict is not False and entityID is not False):
print("\t Sorry player: %s %s" % (entityID, thisPlayersDict["username"]))
else:
print("\tFIXME: this player was online, but I don't have their steamID of %s in the sbc" % entityID)
else:
print("\t ##### has no grid size")
# print ("writing tree out to %s" % newSbsFileName)
# tree = ET.ElementTree(sbsRoot)
# sbsRoot.attrib["xmlns:xsd"]="http://www.w3.org/2001/XMLSchema"
# tree.write(newSbsFileName, encoding='utf-8', xml_declaration=True)
return (removedCount, warnCount)
def writeFile(self):
import xml.etree.ElementTree as ET
print("writing tree out to %s" % self.newSbsFilename)
tree = ET.ElementTree(self.sbsRoot)
self.sbsRoot.attrib["xmlns:xsd"] = "http://www.w3.org/2001/XMLSchema"
tree.write(self.newSbsFilename, encoding='utf-8', xml_declaration=True)
| mccorkle/seds-utils | Sbs.py | Python | gpl-3.0 | 13,196 |
# A template for APSync process based modules
from multiprocessing import Process, Event
import threading
import time
import signal, select
import traceback
import setproctitle
from APSyncFramework.utils.common_utils import PeriodicEvent
from APSyncFramework.utils.json_utils import ping, json_wrap_with_target
from APSyncFramework.utils.file_utils import read_config, write_config
class APModule(Process):
'''The base class for all modules'''
def __init__(self, in_queue, out_queue, name, description = None):
super(APModule, self).__init__()
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
self.daemon = True
self.config_list= [] # overwrite this list
self.config_changed = False
self.config = read_config()
self.start_time = time.time()
self.last_ping = None
self.needs_unloading = Event()
self.lock = threading.Lock()
self.in_queue = in_queue
self.out_queue = out_queue
self.name = name
self.ping = PeriodicEvent(frequency = 1.0/3.0, event = self.send_ping)
self.in_queue_thread = threading.Thread(target=self.in_queue_handling,
args = (self.lock,))
self.in_queue_thread.daemon = True
setproctitle.setproctitle(self.name)
if description is None:
self.description = "APSync {0} process".format(self.name)
else:
self.description = description
def update_config(self, config_list = []):
if len(config_list):
self.config_list = config_list
for (var_name, var_default) in self.config_list:
self.set_config(var_name, var_default)
if self.config_changed:
# TODO: send a msg to the webserver to update / reload the current page
self.log('At least one of your cloudsync settings was missing or has been updated, please reload the webpage if open.', 'INFO')
self.config_changed = False
config_on_disk = read_config()
for k in config_on_disk.keys():
if not k in self.config:
self.config[k] = config_on_disk[k]
write_config(self.config)
def send_ping(self):
self.out_queue.put_nowait(ping(self.name, self.pid))
def exit_gracefully(self, signum, frame):
self.unload()
def unload(self):
print self.name, 'called unload'
self.unload_callback()
self.needs_unloading.set()
def unload_callback(self):
''' overload to perform any module specific cleanup'''
pass
def run(self):
if self.in_queue_thread is not None:
self.in_queue_thread.start()
while not self.needs_unloading.is_set():
try:
self.main()
except:
print ("FATAL: module ({0}) exited while multiprocessing".format(self.name))
traceback.print_exc()
# TODO: logging here
print self.name, 'main finished'
def main(self):
pass
def in_queue_handling(self, lock=None):
while not self.needs_unloading.is_set():
(inputready,outputready,exceptready) = select.select([self.in_queue._reader],[],[],0.1)
for s in inputready:
while not self.in_queue.empty():
# drain the queue
data = self.in_queue.get_nowait()
if isinstance(data, Unload):
self.unload()
else:
# do something useful with the data...
self.process_in_queue_data(data)
self.ping.trigger()
print self.name, 'in queue finished'
def process_in_queue_data(self, data):
pass
def log(self, message, level = 'INFO'):
# CRITICAL
# ERROR
# WARNING
# INFO
# DEBUG
# NOTSET
self.out_queue.put_nowait(json_wrap_with_target({'msg':message, 'level':level}, target = 'logging'))
def set_config(self, var_name, var_default):
new_val = self.config.get(var_name, var_default)
try:
cur_val = self.config[var_name]
if new_val != cur_val:
self.config_changed = True
except:
self.config_changed = True
finally:
self.config[var_name] = new_val
return new_val
class Unload():
def __init__(self, name):
self.ack = False
| SamuelDudley/APSyncWeb | APSyncFramework/modules/lib/APSync_module.py | Python | gpl-3.0 | 4,702 |
# -*- coding: utf-8 -*-
# Dioptas - GUI program for fast processing of 2D X-ray diffraction data
# Principal author: Clemens Prescher ([email protected])
# Copyright (C) 2014-2019 GSECARS, University of Chicago, USA
# Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany
# Copyright (C) 2019-2020 DESY, Hamburg, Germany
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ....model.util.HelperModule import get_partial_index
# imports for type hinting in PyCharm -- DO NOT DELETE
from ....model.DioptasModel import DioptasModel
from ....widgets.integration import IntegrationWidget
from ....widgets.plot_widgets.ImgWidget import IntegrationImgWidget
class PhaseInCakeController(object):
"""
PhaseInCakeController handles all the interaction between the phase controls and the plotted lines in the cake view.
"""
def __init__(self, integration_widget, dioptas_model):
"""
:param integration_widget: Reference to an IntegrationWidget
:param dioptas_model: reference to DioptasModel object
:type integration_widget: IntegrationWidget
:type dioptas_model: DioptasModel
"""
self.model = dioptas_model
self.phase_model = self.model.phase_model
self.integration_widget = integration_widget
self.cake_view_widget = integration_widget.integration_image_widget.cake_view # type: IntegrationImgWidget
self.connect()
def connect(self):
self.phase_model.phase_added.connect(self.add_phase_plot)
self.model.phase_model.phase_removed.connect(self.cake_view_widget.del_cake_phase)
self.phase_model.phase_changed.connect(self.update_phase_lines)
self.phase_model.phase_changed.connect(self.update_phase_color)
self.phase_model.phase_changed.connect(self.update_phase_visible)
self.phase_model.reflection_added.connect(self.reflection_added)
self.phase_model.reflection_deleted.connect(self.reflection_deleted)
def get_phase_position_and_intensities(self, ind, clip=True):
"""
Obtains the positions and intensities for lines of a phase with an index ind within the cake view.
No clipping is used for the first call to add the CakePhasePlot to the ImgWidget. Subsequent calls are used with
clipping. Thus, only lines within the cake_tth are returned. The visibility of each line is then estimated in
the ImgWidget based on the length of the clipped and not clipped lists.
:param ind: the index of the phase
:param clip: whether or not the lists should be clipped. Clipped means that lines which have positions larger
than the
:return: line_positions, line_intensities
"""
if self.model.cake_tth is None:
cake_tth = self.model.calibration_model.tth
else:
cake_tth = self.model.cake_tth
reflections_tth = self.phase_model.get_phase_line_positions(ind, 'tth',
self.model.calibration_model.wavelength * 1e10)
reflections_intensities = [reflex[1] for reflex in self.phase_model.reflections[ind]]
cake_line_positions = []
cake_line_intensities = []
for ind, tth in enumerate(reflections_tth):
pos_ind = get_partial_index(cake_tth, tth)
if pos_ind is not None:
cake_line_positions.append(pos_ind + 0.5)
cake_line_intensities.append(reflections_intensities[ind])
elif clip is False:
cake_line_positions.append(0)
cake_line_intensities.append(reflections_intensities[ind])
return cake_line_positions, cake_line_intensities
def add_phase_plot(self):
cake_line_positions, cake_line_intensities = self.get_phase_position_and_intensities(-1, False)
self.cake_view_widget.add_cake_phase(cake_line_positions, cake_line_intensities,
self.phase_model.phase_colors[-1])
def update_phase_lines(self, ind):
cake_line_positions, cake_line_intensities = self.get_phase_position_and_intensities(ind)
self.cake_view_widget.update_phase_intensities(ind, cake_line_positions, cake_line_intensities)
def update_phase_color(self, ind):
self.cake_view_widget.set_cake_phase_color(ind, self.model.phase_model.phase_colors[ind])
def update_phase_visible(self, ind):
if self.phase_model.phase_visible[ind] and self.integration_widget.img_mode == 'Cake' and \
self.integration_widget.img_phases_btn.isChecked():
self.cake_view_widget.show_cake_phase(ind)
else:
self.cake_view_widget.hide_cake_phase(ind)
def reflection_added(self, ind):
self.cake_view_widget.phases[ind].add_line()
def reflection_deleted(self, phase_ind, reflection_ind):
self.cake_view_widget.phases[phase_ind].delete_line(reflection_ind)
| Dioptas/Dioptas | dioptas/controller/integration/phase/PhaseInCakeController.py | Python | gpl-3.0 | 5,615 |
# -*- coding: utf-8 -*-
#
# MAVProxy documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 19 05:17:36 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'last_letter'
copyright = u'2014, George Zogopoulos - Papaliakos'
author = u'George Zogopoulos - Papaliakos'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_static/themes", ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'last_letter_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'last_letter.tex', u'last_letter Documentation',
u'George Zogopoulos - Papaliakos', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'last_letter', u'last_letter Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'last_letter', u'last_letter Documentation',
author, 'last_letter', 'A collection of ROS packages for UAV simulation and autopilot development.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| Georacer/last_letter | documentation/source/conf.py | Python | gpl-3.0 | 11,461 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
An implementation of the time frequency phase misfit and adjoint source after
Fichtner et al. (2008).
:copyright:
Lion Krischer ([email protected]), 2013
:license:
GNU General Public License, Version 3
(http://www.gnu.org/copyleft/gpl.html)
"""
import warnings
import numexpr as ne
import numpy as np
import obspy
from obspy.signal.interpolation import lanczos_interpolation
from lasif import LASIFAdjointSourceCalculationError
from lasif.adjoint_sources import time_frequency, utils
eps = np.spacing(1)
def adsrc_tf_phase_misfit(t, data, synthetic, min_period, max_period,
plot=False, max_criterion=7.0):
"""
:rtype: dictionary
:returns: Return a dictionary with three keys:
* adjoint_source: The calculated adjoint source as a numpy array
* misfit: The misfit value
* messages: A list of strings giving additional hints to what happened
in the calculation.
"""
# Assumes that t starts at 0. Pad your data if that is not the case -
# Parts with zeros are essentially skipped making it fairly efficient.
assert t[0] == 0
messages = []
# Internal sampling interval. Some explanations for this "magic" number.
# LASIF's preprocessing allows no frequency content with smaller periods
# than min_period / 2.2 (see function_templates/preprocesssing_function.py
# for details). Assuming most users don't change this, this is equal to
# the Nyquist frequency and the largest possible sampling interval to
# catch everything is min_period / 4.4.
#
# The current choice is historic as changing does (very slightly) chance
# the calculated misfit and we don't want to disturb inversions in
# progress. The difference is likely minimal in any case. We might have
# same aliasing into the lower frequencies but the filters coupled with
# the TF-domain weighting will get rid of them in essentially all
# realistically occurring cases.
dt_new = max(float(int(min_period / 3.0)), t[1] - t[0])
# New time axis
ti = utils.matlab_range(t[0], t[-1], dt_new)
# Make sure its odd - that avoid having to deal with some issues
# regarding frequency bin interpolation. Now positive and negative
# frequencies will always be all symmetric. Data is assumed to be
# tapered in any case so no problem are to be expected.
if not len(ti) % 2:
ti = ti[:-1]
# Interpolate both signals to the new time axis - this massively speeds
# up the whole procedure as most signals are highly oversampled. The
# adjoint source at the end is re-interpolated to the original sampling
# points.
original_data = data
original_synthetic = synthetic
data = lanczos_interpolation(
data=data, old_start=t[0], old_dt=t[1] - t[0], new_start=t[0],
new_dt=dt_new, new_npts=len(ti), a=8, window="blackmann")
synthetic = lanczos_interpolation(
data=synthetic, old_start=t[0], old_dt=t[1] - t[0], new_start=t[0],
new_dt=dt_new, new_npts=len(ti), a=8, window="blackmann")
original_time = t
t = ti
# -------------------------------------------------------------------------
# Compute time-frequency representations
# Window width is twice the minimal period.
width = 2.0 * min_period
# Compute time-frequency representation of the cross-correlation
_, _, tf_cc = time_frequency.time_frequency_cc_difference(
t, data, synthetic, width)
# Compute the time-frequency representation of the synthetic
tau, nu, tf_synth = time_frequency.time_frequency_transform(t, synthetic,
width)
# -------------------------------------------------------------------------
# compute tf window and weighting function
# noise taper: down-weight tf amplitudes that are very low
tf_cc_abs = np.abs(tf_cc)
m = tf_cc_abs.max() / 10.0 # NOQA
weight = ne.evaluate("1.0 - exp(-(tf_cc_abs ** 2) / (m ** 2))")
nu_t = nu.T
# highpass filter (periods longer than max_period are suppressed
# exponentially)
weight *= (1.0 - np.exp(-(nu_t * max_period) ** 2))
# lowpass filter (periods shorter than min_period are suppressed
# exponentially)
nu_t_large = np.zeros(nu_t.shape)
nu_t_small = np.zeros(nu_t.shape)
thres = (nu_t <= 1.0 / min_period)
nu_t_large[np.invert(thres)] = 1.0
nu_t_small[thres] = 1.0
weight *= (np.exp(-10.0 * np.abs(nu_t * min_period - 1.0)) * nu_t_large +
nu_t_small)
# normalisation
weight /= weight.max()
# computation of phase difference, make quality checks and misfit ---------
# Compute the phase difference.
# DP = np.imag(np.log(m + tf_cc / (2 * m + np.abs(tf_cc))))
DP = np.angle(tf_cc)
# Attempt to detect phase jumps by taking the derivatives in time and
# frequency direction. 0.7 is an emperical value.
abs_weighted_DP = np.abs(weight * DP)
_x = abs_weighted_DP.max() # NOQA
test_field = ne.evaluate("weight * DP / _x")
criterion_1 = np.sum([np.abs(np.diff(test_field, axis=0)) > 0.7])
criterion_2 = np.sum([np.abs(np.diff(test_field, axis=1)) > 0.7])
criterion = np.sum([criterion_1, criterion_2])
# Compute the phase misfit
dnu = nu[1] - nu[0]
i = ne.evaluate("sum(weight ** 2 * DP ** 2)")
# inserted by Nienke Blom, 22-11-2016
weighted_DP = ne.evaluate("weight * DP")
phasediff_integral = float(ne.evaluate("sum(weighted_DP * dnu * dt_new)"))
mean_delay = np.mean(weighted_DP)
wDP = weighted_DP.flatten()
wDP_thresh = wDP[abs(wDP) > 0.1 * max(wDP, key=lambda x: abs(x))]
median_delay = np.median(wDP_thresh)
max_delay = max(wDP, key=lambda x: abs(x))
phase_misfit = np.sqrt(i * dt_new * dnu)
# Sanity check. Should not occur.
if np.isnan(phase_misfit):
msg = "The phase misfit is NaN."
raise LASIFAdjointSourceCalculationError(msg)
# The misfit can still be computed, even if not adjoint source is
# available.
if criterion > max_criterion:
warning = ("Possible phase jump detected. Misfit included. No "
"adjoint source computed. Criterion: %.1f - Max allowed "
"criterion: %.1f" % (criterion, max_criterion))
warnings.warn(warning)
messages.append(warning)
ret_dict = {
"adjoint_source": None,
"misfit_value": phase_misfit,
"details": {"messages": messages,
#"weighted_DP": weighted_DP,
#"weight": weight,
#"DP": DP,
"mean_delay": mean_delay, # added NAB 30-8-2017
"phasediff_integral": phasediff_integral, # added NAB 22-11-2016, edited 30-8-2017
"median_delay": median_delay, # added NAB 22-11-2016, edited 30-8-2017
"max_delay": max_delay} # added NAB 31-8-2017
}
return ret_dict
# Make kernel for the inverse tf transform
idp = ne.evaluate(
"weight ** 2 * DP * tf_synth / (m + abs(tf_synth) ** 2)")
# Invert tf transform and make adjoint source
ad_src, it, I = time_frequency.itfa(tau, idp, width)
# Interpolate both signals to the new time axis
ad_src = lanczos_interpolation(
# Pad with a couple of zeros in case some where lost in all
# these resampling operations. The first sample should not
# change the time.
data=np.concatenate([ad_src.imag, np.zeros(100)]),
old_start=tau[0],
old_dt=tau[1] - tau[0],
new_start=original_time[0],
new_dt=original_time[1] - original_time[0],
new_npts=len(original_time), a=8, window="blackmann")
# Divide by the misfit and change sign.
ad_src /= (phase_misfit + eps)
ad_src = -1.0 * np.diff(ad_src) / (t[1] - t[0])
# Taper at both ends. Exploit ObsPy to not have to deal with all the
# nasty things.
ad_src = \
obspy.Trace(ad_src).taper(max_percentage=0.05, type="hann").data
# Reverse time and add a leading zero so the adjoint source has the
# same length as the input time series.
ad_src = ad_src[::-1]
ad_src = np.concatenate([[0.0], ad_src])
# Plot if requested. ------------------------------------------------------
if plot:
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use("seaborn-whitegrid")
from lasif.colors import get_colormap
if isinstance(plot, mpl.figure.Figure):
fig = plot
else:
fig = plt.gcf()
# Manually set-up the axes for full control.
l, b, w, h = 0.1, 0.05, 0.80, 0.22
rect = l, b + 3 * h, w, h
waveforms_axis = fig.add_axes(rect)
rect = l, b + h, w, 2 * h
tf_axis = fig.add_axes(rect)
rect = l, b, w, h
adj_src_axis = fig.add_axes(rect)
rect = l + w + 0.02, b, 1.0 - (l + w + 0.02) - 0.05, 4 * h
cm_axis = fig.add_axes(rect)
# Plot the weighted phase difference.
weighted_phase_difference = (DP * weight).transpose()
mappable = tf_axis.pcolormesh(
tau, nu, weighted_phase_difference, vmin=-1.0, vmax=1.0,
cmap=get_colormap("tomo_full_scale_linear_lightness_r"),
shading="gouraud", zorder=-10)
tf_axis.grid(True)
tf_axis.grid(True, which='minor', axis='both', linestyle='-',
color='k')
cm = fig.colorbar(mappable, cax=cm_axis)
cm.set_label("Phase difference in radian", fontsize="large")
# Various texts on the time frequency domain plot.
text = "Misfit: %.4f" % phase_misfit
tf_axis.text(x=0.99, y=0.02, s=text, transform=tf_axis.transAxes,
fontsize="large", color="#C25734", fontweight=900,
verticalalignment="bottom",
horizontalalignment="right")
txt = "Weighted Phase Difference - red is a phase advance of the " \
"synthetics"
tf_axis.text(x=0.99, y=0.95, s=txt,
fontsize="large", color="0.1",
transform=tf_axis.transAxes,
verticalalignment="top",
horizontalalignment="right")
if messages:
message = "\n".join(messages)
tf_axis.text(x=0.99, y=0.98, s=message,
transform=tf_axis.transAxes,
bbox=dict(facecolor='red', alpha=0.8),
verticalalignment="top",
horizontalalignment="right")
# Adjoint source.
adj_src_axis.plot(original_time, ad_src[::-1], color="0.1", lw=2,
label="Adjoint source (non-time-reversed)")
adj_src_axis.legend()
# Waveforms.
waveforms_axis.plot(original_time, original_data, color="0.1", lw=2,
label="Observed")
waveforms_axis.plot(original_time, original_synthetic,
color="#C11E11", lw=2, label="Synthetic")
waveforms_axis.legend()
# Set limits for all axes.
tf_axis.set_ylim(0, 2.0 / min_period)
tf_axis.set_xlim(0, tau[-1])
adj_src_axis.set_xlim(0, tau[-1])
waveforms_axis.set_xlim(0, tau[-1])
waveforms_axis.set_ylabel("Velocity [m/s]", fontsize="large")
tf_axis.set_ylabel("Period [s]", fontsize="large")
adj_src_axis.set_xlabel("Seconds since event", fontsize="large")
# Hack to keep ticklines but remove the ticks - there is probably a
# better way to do this.
waveforms_axis.set_xticklabels([
"" for _i in waveforms_axis.get_xticks()])
tf_axis.set_xticklabels(["" for _i in tf_axis.get_xticks()])
_l = tf_axis.get_ylim()
_r = _l[1] - _l[0]
_t = tf_axis.get_yticks()
_t = _t[(_l[0] + 0.1 * _r < _t) & (_t < _l[1] - 0.1 * _r)]
tf_axis.set_yticks(_t)
tf_axis.set_yticklabels(["%.1fs" % (1.0 / _i) for _i in _t])
waveforms_axis.get_yaxis().set_label_coords(-0.08, 0.5)
tf_axis.get_yaxis().set_label_coords(-0.08, 0.5)
fig.suptitle("Time Frequency Phase Misfit and Adjoint Source",
fontsize="xx-large")
ret_dict = {
"adjoint_source": ad_src,
"misfit_value": phase_misfit,
"details": {"messages": messages,
#"weighted_DP": weighted_DP,
#"weight": weight,
#"DP": DP,
"mean_delay": mean_delay, # added NAB 30-8-2017
"phasediff_integral": phasediff_integral, # added NAB 22-11-2016, edited 30-8-2017
"median_delay": median_delay, # added NAB 22-11-2016, edited 30-8-2017
"max_delay": max_delay} # added NAB 31-8-2017
}
# print "the phasedifference integral is "+str(ret_dict['details']['phasediff_integral'])
return ret_dict
| Phlos/LASIF_scripts | lasif_code/ad_src_tf_phase_misfit.py | Python | gpl-3.0 | 13,183 |
import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
| sentriz/steely | steely/plugins/bible/main.py | Python | gpl-3.0 | 3,384 |
#!/usr/bin/env python
"""
This program decodes the Motorola SmartNet II trunking protocol from the control channel
Tune it to the control channel center freq, and it'll spit out the decoded packets.
In what format? Who knows.
Based on your AIS decoding software, which is in turn based on the gr-pager code and the gr-air code.
"""
from gnuradio import gr, gru, blks2, optfir, digital
from gnuradio import audio
from gnuradio import eng_notation
from gnuradio import uhd
from fsk_demod import fsk_demod
from optparse import OptionParser
from gnuradio.eng_option import eng_option
from gnuradio import smartnet
import time
import gnuradio.gr.gr_threading as _threading
import csv
class top_block_runner(_threading.Thread):
def __init__(self, tb):
_threading.Thread.__init__(self)
self.setDaemon(1)
self.tb = tb
self.done = False
self.start()
def run(self):
self.tb.run()
self.done = True
class my_top_block(gr.top_block):
def __init__(self, options, queue):
gr.top_block.__init__(self)
if options.filename is not None:
self.fs = gr.file_source(gr.sizeof_gr_complex, options.filename)
self.rate = options.rate
else:
self.u = uhd.usrp_source(options.addr,
io_type=uhd.io_type.COMPLEX_FLOAT32,
num_channels=1)
if options.subdev is not None:
self.u.set_subdev_spec(options.subdev, 0)
self.u.set_samp_rate(options.rate)
self.rate = self.u.get_samp_rate()
# Set the antenna
if(options.antenna):
self.u.set_antenna(options.antenna, 0)
self.centerfreq = options.centerfreq
print "Tuning to: %fMHz" % (self.centerfreq - options.error)
if not(self.tune(options.centerfreq - options.error)):
print "Failed to set initial frequency"
if options.gain is None: #set to halfway
g = self.u.get_gain_range()
options.gain = (g.start()+g.stop()) / 2.0
print "Setting gain to %i" % options.gain
self.u.set_gain(options.gain)
self.u.set_bandwidth(options.bandwidth)
print "Samples per second is %i" % self.rate
self._syms_per_sec = 3600;
options.samples_per_second = self.rate
options.syms_per_sec = self._syms_per_sec
options.gain_mu = 0.01
options.mu=0.5
options.omega_relative_limit = 0.3
options.syms_per_sec = self._syms_per_sec
options.offset = options.centerfreq - options.freq
print "Control channel offset: %f" % options.offset
self.demod = fsk_demod(options)
self.start_correlator = gr.correlate_access_code_tag_bb("10101100",
0,
"smartnet_preamble") #should mark start of packet
self.smartnet_deinterleave = smartnet.deinterleave()
self.smartnet_crc = smartnet.crc(queue)
if options.filename is None:
self.connect(self.u, self.demod)
else:
self.connect(self.fs, self.demod)
self.connect(self.demod, self.start_correlator, self.smartnet_deinterleave, self.smartnet_crc)
#hook up the audio patch
if options.audio:
self.audiorate = 48000
self.audiotaps = gr.firdes.low_pass(1, self.rate, 8000, 2000, gr.firdes.WIN_HANN)
self.prefilter_decim = int(self.rate / self.audiorate) #might have to use a rational resampler for audio
print "Prefilter decimation: %i" % self.prefilter_decim
self.audio_prefilter = gr.freq_xlating_fir_filter_ccf(self.prefilter_decim, #decimation
self.audiotaps, #taps
0, #freq offset
self.rate) #sampling rate
#on a trunked network where you know you will have good signal, a carrier power squelch works well. real FM receviers use a noise squelch, where
#the received audio is high-passed above the cutoff and then fed to a reverse squelch. If the power is then BELOW a threshold, open the squelch.
self.squelch = gr.pwr_squelch_cc(options.squelch, #squelch point
alpha = 0.1, #wat
ramp = 10, #wat
gate = False)
self.audiodemod = blks2.fm_demod_cf(self.rate/self.prefilter_decim, #rate
1, #audio decimation
4000, #deviation
3000, #audio passband
4000, #audio stopband
1, #gain
75e-6) #deemphasis constant
#the filtering removes FSK data woobling from the subaudible channel (might be able to combine w/lpf above)
self.audiofilttaps = gr.firdes.high_pass(1, self.audiorate, 300, 50, gr.firdes.WIN_HANN)
self.audiofilt = gr.fir_filter_fff(1, self.audiofilttaps)
self.audiogain = gr.multiply_const_ff(options.volume)
self.audiosink = audio.sink (self.audiorate, "")
# self.audiosink = gr.wavfile_sink("test.wav", 1, self.audiorate, 8)
self.mute()
if options.filename is None:
self.connect(self.u, self.audio_prefilter)
else:
self.connect(self.fs, self.audio_prefilter)
# self.connect(self.audio_prefilter, self.squelch, self.audiodemod, self.audiofilt, self.audiogain, self.audioresamp, self.audiosink)
self.connect(self.audio_prefilter, self.squelch, self.audiodemod, self.audiofilt, self.audiogain, self.audiosink)
###########SUBCHANNEL DECODING EXPERIMENT###########
#here we set up the low-pass filter for audio subchannel data decoding. gain of 10, decimation of 10.
# self.subchannel_decimation = 50
# self.subchannel_gain = 10
# self.subchannelfilttaps = gr.firdes.low_pass(self.subchannel_gain, self.audiorate, 200, 40, firdes.WIN_HANN)
# self.subchannelfilt = gr.fir_filter_fff(self.subchannel_decimation, self.subchannelfilttaps)
# self.subchannel_syms_per_sec = 150
# self.subchannel_samples_per_symbol = (self.audiorate / self.subchannel_decimation) / self.subchannel_syms_per_sec
# print "Subchannel samples per symbol: %f" % self.subchannel_samples_per_symbol
# self.subchannel_clockrec = gr.clock_recovery_mm_ff(self.subchannel_samples_per_symbol,
# 0.25*0.01*0.01,
# 0.5,
# 0.01,
# 0.3)
# self.subchannel_slicer = gr.binary_slicer_fb()
# self.subchannel_correlator = gr.correlate_access_code_bb("01000",0)
# self.subchannel_framer = smartnet.subchannel_framer()
# self.subchannel_sink = gr.null_sink(1); #just so it doesn't bitch until we do something with it
# self.connect(self.audiodemod, self.subchannelfilt, self.subchannel_clockrec, self.subchannel_slicer, self.subchannel_correlator, self.subchannel_framer, self.subchannel_sink)
def tune(self, freq):
result = self.u.set_center_freq(freq)
return True
def tuneoffset(self, target_freq, rffreq):
#print "Setting offset; target freq is %f, Center freq is %f" % (target_freq, rffreq)
self.audio_prefilter.set_center_freq(rffreq-target_freq*1e6)
def setvolume(self, vol):
self.audiogain.set_k(vol)
def mute(self):
self.setvolume(0)
def unmute(self, volume):
self.setvolume(volume)
def getfreq(chanlist, cmd):
if chanlist is None:
if cmd < 0x2d0:
freq = float(cmd * 0.025 + 851.0125)
else:
freq = None
else:
if chanlist.get(str(cmd), None) is not None:
freq = float(chanlist[str(cmd)])
else:
freq = None
return freq
def parsefreq(s, chanlist):
retfreq = None
[address, groupflag, command] = s.split(",")
command = int(command)
address = int(address) & 0xFFF0
groupflag = bool(groupflag)
if chanlist is None:
if command < 0x2d0:
retfreq = getfreq(chanlist, command)
else:
if chanlist.get(str(command), None) is not None: #if it falls into the channel somewhere
retfreq = getfreq(chanlist, command)
return [retfreq, address] # mask so the squelch opens up on the entire group
def parse(s, shorttglist, longtglist, chanlist, elimdupes):
#this is the main parser. it takes in commands in the form "address,command" (no quotes of course) and outputs text via print
#it is also responsible for using the talkgroup list, if any
[address, groupflag, command] = s.split(",")
command = int(command)
address = int(address)
lookupaddr = address & 0xFFF0
groupflag = bool(groupflag)
# print "Command is",command
if longtglist is not None and longtglist.get(str(lookupaddr), None) is not None:
longname = longtglist[str(lookupaddr)] #the mask is to screen out extra status bits, which we can add in later (see the RadioReference.com wiki on SmartNet Type II)
else:
longname = None
if shorttglist is not None and shorttglist.get(str(lookupaddr), None) is not None:
shortname = shorttglist[str(lookupaddr)]
else:
shortname = None
retval = None
if command == 0x30B and groupflag is True and lastmsg.get("command", None) == 0x308 and address & 0x2000 and address & 0x0800:
retval = "SysID: Sys #" + hex(lastmsg["address"]) + " on " + str(getfreq(chanlist, address & 0x3FF))
else:
if getfreq(chanlist, command) is not None and dupes.get(command, None) != address:
retval = "Freq assignment: " + str(shortname) + " (" + str(address) + ")" + " @ " + str(getfreq(chanlist, command)) + " (" + str(longname) + ")"
if elimdupes is True:
dupes[command] = address
lastlastmsg = lastmsg
lastmsg["command"]=command
lastmsg["address"]=address
return retval
def main():
# Create Options Parser:
parser = OptionParser (option_class=eng_option, conflict_handler="resolve")
expert_grp = parser.add_option_group("Expert")
parser.add_option("-f", "--freq", type="eng_float", default=866.9625e6,
help="set control channel frequency to MHz [default=%default]", metavar="FREQ")
parser.add_option("-c", "--centerfreq", type="eng_float", default=867.5e6,
help="set center receive frequency to MHz [default=%default]. Set to center of 800MHz band for best results")
parser.add_option("-g", "--gain", type="int", default=None,
help="set RF gain", metavar="dB")
parser.add_option("-b", "--bandwidth", type="eng_float", default=3e6,
help="set bandwidth of DBS RX frond end [default=%default]")
parser.add_option("-F", "--filename", type="string", default=None,
help="read data from filename rather than USRP")
parser.add_option("-t", "--tgfile", type="string", default="sf_talkgroups.csv",
help="read in CSV-formatted talkgroup list for pretty printing of talkgroup names")
parser.add_option("-C", "--chanlistfile", type="string", default="motochan14.csv",
help="read in list of Motorola channel frequencies (improves accuracy of frequency decoding) [default=%default]")
parser.add_option("-e", "--allowdupes", action="store_false", default=True,
help="do not eliminate duplicate records (produces lots of noise)")
parser.add_option("-E", "--error", type="eng_float", default=0,
help="enter an offset error to compensate for USRP clock inaccuracy")
parser.add_option("-u", "--audio", action="store_true", default=False,
help="output audio on speaker")
parser.add_option("-m", "--monitor", type="int", default=None,
help="monitor a specific talkgroup")
parser.add_option("-v", "--volume", type="eng_float", default=0.2,
help="set volume gain for audio output [default=%default]")
parser.add_option("-s", "--squelch", type="eng_float", default=28,
help="set audio squelch level (default=%default, play with it)")
parser.add_option("-s", "--subdev", type="string",
help="UHD subdev spec", default=None)
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("-r", "--rate", type="eng_float", default=64e6/18,
help="set sample rate [default=%default]")
parser.add_option("-a", "--addr", type="string", default="",
help="address options to pass to UHD")
#receive_path.add_options(parser, expert_grp)
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help(sys.stderr)
sys.exit(1)
if options.tgfile is not None:
tgreader=csv.DictReader(open(options.tgfile), quotechar='"')
shorttglist = {"0": 0}
longtglist = {"0": 0}
for record in tgreader:
# print record['tgnum']
shorttglist[record['tgnum']] = record['shortname']
longtglist[record['tgnum']] = record['longname']
else:
shorttglist = None
longtglist = None
if options.chanlistfile is not None:
clreader=csv.DictReader(open(options.chanlistfile), quotechar='"')
chanlist={"0": 0}
for record in clreader:
chanlist[record['channel']] = record['frequency']
else:
chanlist = None
# build the graph
queue = gr.msg_queue(10)
tb = my_top_block(options, queue)
runner = top_block_runner(tb)
global dupes
dupes = {0: 0}
global lastmsg
lastmsg = {"command": 0x0000, "address": 0x0000}
global lastlastmsg
lastlastmsg = lastmsg
currentoffset = 0
updaterate = 10
#tb.setvolume(options.volume)
#tb.mute()
try:
while 1:
if not queue.empty_p():
msg = queue.delete_head() # Blocking read
sentence = msg.to_string()
s = parse(sentence, shorttglist, longtglist, chanlist, options.allowdupes)
if s is not None:
print s
if options.audio:
[newfreq, newaddr] = parsefreq(sentence, chanlist)
if newfreq == currentoffset and newaddr != (options.monitor & 0xFFF0):
tb.mute()
if newaddr == (options.monitor & 0xFFF0): #the mask is to allow listening to all "flags" within a talkgroup: emergency, broadcast, etc.
tb.unmute(options.volume)
if newfreq is not None and newfreq != currentoffset:
print "Changing freq to %f" % newfreq
currentoffset = newfreq
tb.tuneoffset(newfreq, options.centerfreq)
elif runner.done:
break
else:
time.sleep(1.0/updaterate)
# tb.run()
except KeyboardInterrupt:
tb.stop()
runner = None
if __name__ == '__main__':
main()
| bistromath/gr-smartnet | src/python/smartnet2decode.py | Python | gpl-3.0 | 13,591 |
# coding: utf-8
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.mixins import UserPassesTestMixin
from django.views.generic import ListView
from django.views import View
from django.db.models import Q
import posgradmin.models as models
from posgradmin import authorization as auth
from django.conf import settings
from django.shortcuts import render, HttpResponseRedirect
import posgradmin.forms as forms
from dal import autocomplete
from django.urls import reverse
from django.forms.models import model_to_dict
from pdfrw import PdfReader, PdfWriter, PageMerge
from django.template.loader import render_to_string
from sh import pandoc, mkdir
from tempfile import NamedTemporaryFile
import datetime
from django.utils.text import slugify
from .settings import BASE_DIR, MEDIA_ROOT, MEDIA_URL
class AcademicoAutocomplete(LoginRequiredMixin, UserPassesTestMixin, autocomplete.Select2QuerySetView):
login_url = settings.APP_PREFIX + 'accounts/login/'
def test_func(self):
if auth.is_academico(self.request.user):
if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E',
'candidato profesor']:
return True
return False
def get_queryset(self):
qs = models.Academico.objects.filter(Q(acreditacion='candidato profesor')
| Q(acreditacion='P')
| Q(acreditacion='M')
| Q(acreditacion='D')
| Q(acreditacion='E'))
if self.q:
qs = qs.filter(Q(user__first_name__istartswith=self.q)
| Q(user__last_name__icontains=self.q))
return qs
class ProponerAsignatura(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = settings.APP_PREFIX + 'accounts/login/'
template = 'posgradmin/proponer_asignatura.html'
form_class = forms.AsignaturaModelForm
def test_func(self):
if auth.is_academico(self.request.user):
if self.request.user.academico.acreditacion in ['D', 'M', 'P',
'candidato profesor']:
return True
return False
def get(self, request, *args, **kwargs):
form = self.form_class(initial={'academicos': [request.user.academico, ]})
breadcrumbs = ((settings.APP_PREFIX + 'inicio/', 'Inicio'),
('', 'Proponer Asignatura')
)
return render(request,
self.template,
{
'title': 'Proponer Asignatura',
'breadcrumbs': breadcrumbs,
'form': form
})
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST, request.FILES)
if form.is_valid():
a = models.Asignatura(
asignatura=request.POST['asignatura'],
tipo='Optativa',
estado='propuesta',
programa=request.FILES['programa'])
a.save()
return HttpResponseRedirect(reverse('inicio'))
else:
print(form.errors)
class SolicitaCurso(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = settings.APP_PREFIX + 'accounts/login/'
template = 'posgradmin/solicita_curso.html'
form_class = forms.CursoModelForm
def test_func(self):
if auth.is_academico(self.request.user):
if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E',
'candidato profesor']:
return True
return False
def get(self, request, *args, **kwargs):
convocatoria = models.ConvocatoriaCurso.objects.get(pk=int(kwargs['pk']))
if convocatoria.status == 'cerrada':
return HttpResponseRedirect(reverse('mis_cursos'))
asignatura = models.Asignatura.objects.get(pk=int(kwargs['as_id']))
form = self.form_class(initial={'academicos': [request.user.academico, ]})
breadcrumbs = ((settings.APP_PREFIX + 'inicio/', 'Inicio'),
(reverse('elige_asignatura', args=[convocatoria.id,]),
"Convocatoria para cursos %s-%s" % (convocatoria.year, convocatoria.semestre))
)
return render(request,
self.template,
{
'title': 'Solicitar curso',
'breadcrumbs': breadcrumbs,
'convocatoria': convocatoria,
'asignatura': asignatura,
'form': form
})
def post(self, request, *args, **kwargs):
convocatoria = models.ConvocatoriaCurso.objects.get(pk=int(kwargs['pk']))
if convocatoria.status == 'cerrada':
return HttpResponseRedirect(reverse('mis_cursos'))
asignatura = models.Asignatura.objects.get(pk=int(kwargs['as_id']))
form = self.form_class(request.POST)
if form.is_valid():
curso = models.Curso(
convocatoria=convocatoria,
asignatura=asignatura,
year=convocatoria.year,
semestre=convocatoria.semestre,
sede=request.POST['sede'],
aula=request.POST['aula'],
horario=request.POST['horario'])
curso.save()
for ac_id in request.POST.getlist('academicos'):
ac = models.Academico.objects.get(pk=int(ac_id))
curso.academicos.add(ac)
curso.academicos.add(request.user.academico)
curso.save()
return HttpResponseRedirect(reverse('mis_cursos'))
class CursoView(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = settings.APP_PREFIX + 'accounts/login/'
template = 'posgradmin/solicita_curso.html'
form_class = forms.CursoModelForm
def test_func(self):
curso = models.Curso.objects.get(pk=int(self.kwargs['pk']))
if auth.is_academico(self.request.user):
if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E',
'candidato profesor']:
if self.request.user.academico in curso.academicos.all():
return True
return False
def get(self, request, *args, **kwargs):
curso = models.Curso.objects.get(pk=int(kwargs['pk']))
form = self.form_class(initial=model_to_dict(curso))
breadcrumbs = ((reverse('inicio'), 'Inicio'),
(reverse('mis_cursos'), "Mis cursos"))
return render(request,
self.template,
{
'title': 'Editar curso',
'breadcrumbs': breadcrumbs,
'convocatoria': curso.convocatoria,
'asignatura': curso.asignatura,
'form': form
})
def post(self, request, *args, **kwargs):
curso = models.Curso.objects.get(pk=int(kwargs['pk']))
convocatoria = curso.convocatoria
if convocatoria.status == 'cerrada':
return HttpResponseRedirect(reverse('mis_cursos'))
asignatura = curso.asignatura
form = self.form_class(request.POST)
if form.is_valid():
curso.sede = request.POST['sede']
curso.aula = request.POST['aula']
curso.horario = request.POST['horario']
curso.save()
curso.academicos.clear()
for ac_id in request.POST.getlist('academicos'):
ac = models.Academico.objects.get(pk=int(ac_id))
curso.academicos.add(ac)
curso.save()
return HttpResponseRedirect(reverse('mis_cursos'))
class CursoConstancia(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = settings.APP_PREFIX + 'accounts/login/'
template = 'posgradmin/curso_constancia.html'
form_class = forms.CursoConstancia
def test_func(self):
curso = models.Curso.objects.get(pk=int(self.kwargs['pk']))
if auth.is_academico(self.request.user):
if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E',
'candidato profesor']:
if self.request.user.academico in curso.academicos.all():
return True
return False
def get(self, request, *args, **kwargs):
curso = models.Curso.objects.get(pk=int(kwargs['pk']))
form = self.form_class(initial=model_to_dict(curso))
breadcrumbs = ((reverse('inicio'), 'Inicio'),
(reverse('mis_cursos'), "Mis cursos"))
return render(request,
self.template,
{
'title': 'Emitir constancia de participación',
'breadcrumbs': breadcrumbs,
'convocatoria': curso.convocatoria,
'asignatura': curso.asignatura,
'form': form
})
def post(self, request, *args, **kwargs):
curso = models.Curso.objects.get(pk=int(kwargs['pk']))
profesor_invitado = request.POST['profesor_invitado']
fecha_participacion = datetime.date(int(request.POST['fecha_de_participación_year']),
int(request.POST['fecha_de_participación_month']),
int(request.POST['fecha_de_participación_day']))
with NamedTemporaryFile(mode='r+', encoding='utf-8') as carta_md:
carta_md.write(
render_to_string('posgradmin/constancia_curso.md',
{'fecha': datetime.date.today(),
'profesor_invitado': profesor_invitado,
'tema': request.POST['tema'],
'curso': curso,
'fecha_participacion': fecha_participacion,
'profesor': request.user.get_full_name() }))
carta_md.seek(0)
outdir = '%s/perfil-academico/%s/' % (MEDIA_ROOT,
request.user.academico.id)
tmpname = 'cursoplain_%s_%s.pdf' % (curso.id,
slugify(profesor_invitado)
)
final_name = tmpname.replace('cursoplain', 'constancia_curso')
mkdir("-p", outdir)
pandoc(carta_md.name, output=outdir + tmpname)
C = PdfReader(outdir + tmpname)
M = PdfReader(BASE_DIR + '/docs/membrete_pcs.pdf')
w = PdfWriter()
merger = PageMerge(M.pages[0])
merger.add(C.pages[0]).render()
w.write(outdir + final_name, M)
return HttpResponseRedirect(MEDIA_URL+"perfil-academico/%s/%s" % (request.user.academico.id, final_name))
class CursoConstanciaEstudiante(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = settings.APP_PREFIX + 'accounts/login/'
template = 'posgradmin/curso_constancia.html'
form_class = forms.CursoConstanciaEstudiante
def test_func(self):
curso = models.Curso.objects.get(pk=int(self.kwargs['pk']))
if auth.is_academico(self.request.user):
if self.request.user.academico.acreditacion in ['D', 'M', 'P', 'E',
'candidato profesor']:
if self.request.user.academico in curso.academicos.all():
return True
return False
def get(self, request, *args, **kwargs):
curso = models.Curso.objects.get(pk=int(kwargs['pk']))
form = self.form_class(initial=model_to_dict(curso))
breadcrumbs = ((reverse('inicio'), 'Inicio'),
(reverse('mis_cursos'), "Mis cursos"))
return render(request,
self.template,
{
'title': 'Emitir constancia para estudiante',
'breadcrumbs': breadcrumbs,
'convocatoria': curso.convocatoria,
'asignatura': curso.asignatura,
'form': form
})
def post(self, request, *args, **kwargs):
curso = models.Curso.objects.get(pk=int(kwargs['pk']))
form = self.form_class(request.POST, request.FILES)
if form.is_valid():
estudiante_invitado = request.POST['estudiante_invitado']
calificacion = request.POST['calificacion']
with NamedTemporaryFile(mode='r+', encoding='utf-8') as carta_md:
carta_md.write(
render_to_string('posgradmin/constancia_curso_estudiante.md',
{'fecha': datetime.date.today(),
'estudiante_invitado': estudiante_invitado,
'calificacion': calificacion,
'curso': curso,
'profesor': request.user.get_full_name() }))
carta_md.seek(0)
outdir = '%s/perfil-academico/%s/' % (MEDIA_ROOT,
request.user.academico.id)
tmpname = 'cursoplain_%s_%s.pdf' % (curso.id,
slugify(estudiante_invitado)
)
final_name = tmpname.replace('cursoplain', 'constancia_curso')
mkdir("-p", outdir)
pandoc(carta_md.name, output=outdir + tmpname)
C = PdfReader(outdir + tmpname)
M = PdfReader(BASE_DIR + '/docs/membrete_pcs.pdf')
w = PdfWriter()
merger = PageMerge(M.pages[0])
merger.add(C.pages[0]).render()
w.write(outdir + final_name, M)
return HttpResponseRedirect(
MEDIA_URL + "perfil-academico/%s/%s" % (request.user.academico.id,
final_name))
else:
return render(request,
self.template,
{
'title': 'Emitir constancia para estudiante',
'breadcrumbs': breadcrumbs,
'convocatoria': curso.convocatoria,
'asignatura': curso.asignatura,
'form': form
})
class EligeAsignatura(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = settings.APP_PREFIX + 'accounts/login/'
template = 'posgradmin/elige_asignatura.html'
def test_func(self):
return auth.is_academico(self.request.user)
def get(self, request, *args, **kwargs):
pk = int(kwargs['pk'])
convocatoria = models.ConvocatoriaCurso.objects.get(pk=pk)
asignaturas = models.Asignatura.objects.filter(
Q(tipo='Optativa') &
(Q(estado='aceptada') | Q(estado='propuesta')))
breadcrumbs = ((settings.APP_PREFIX + 'inicio/', 'Inicio'),
('', "Convocatoria para cursos %s-%s" % (convocatoria.year, convocatoria.semestre))
)
return render(request,
self.template,
{
'title': 'Asignaturas',
'breadcrumbs': breadcrumbs,
'asignaturas': asignaturas,
'convocatoria': convocatoria,
})
class MisEstudiantesView(LoginRequiredMixin, UserPassesTestMixin, ListView):
login_url = settings.APP_PREFIX + 'accounts/login/'
def test_func(self):
return auth.is_academico(self.request.user)
model = models.Estudiante
template_name = 'posgradmin/mis_estudiantes_list.html'
def get_queryset(self):
new_context = self.request.user.academico.estudiantes()
return new_context
class MisCursos(LoginRequiredMixin, UserPassesTestMixin, ListView):
login_url = settings.APP_PREFIX + 'accounts/login/'
def test_func(self):
return auth.is_academico(self.request.user)
model = models.Curso
template_name = 'posgradmin/mis_cursos_list.html'
def get_queryset(self):
return self.request.user.academico.curso_set.all()
def get_context_data(self, **kwargs):
ctxt = super(MisCursos, self).get_context_data(**kwargs)
ctxt['MEDIA_URL'] = MEDIA_URL
return ctxt
| sostenibilidad-unam/posgrado | posgradmin/posgradmin/views_academico.py | Python | gpl-3.0 | 17,170 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'src/ui/mainwindow.ui'
#
# Created: Fri Feb 15 16:08:54 2013
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1024, 768)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setEnabled(True)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setMaximumSize(QtCore.QSize(200, 200))
self.label_3.setText(_fromUtf8(""))
self.label_3.setPixmap(QtGui.QPixmap(_fromUtf8(":/logo/pixmaps/logo.jpg")))
self.label_3.setScaledContents(True)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_2.addWidget(self.label_3)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.label_2 = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(20)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.verticalLayout_2.addWidget(self.label_2)
self.labelServerId = QtGui.QLabel(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(118, 116, 113))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(118, 116, 113))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
self.labelServerId.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.labelServerId.setFont(font)
self.labelServerId.setAlignment(QtCore.Qt.AlignCenter)
self.labelServerId.setObjectName(_fromUtf8("labelServerId"))
self.verticalLayout_2.addWidget(self.labelServerId)
self.labelYear = QtGui.QLabel(self.centralwidget)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(118, 116, 113))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
self.labelYear.setPalette(palette)
font = QtGui.QFont()
font.setPointSize(37)
font.setBold(True)
font.setWeight(75)
self.labelYear.setFont(font)
self.labelYear.setTextFormat(QtCore.Qt.PlainText)
self.labelYear.setAlignment(QtCore.Qt.AlignCenter)
self.labelYear.setObjectName(_fromUtf8("labelYear"))
self.verticalLayout_2.addWidget(self.labelYear)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.label = QtGui.QLabel(self.centralwidget)
self.label.setMaximumSize(QtCore.QSize(200, 200))
self.label.setText(_fromUtf8(""))
self.label.setPixmap(QtGui.QPixmap(_fromUtf8(":/logo/pixmaps/Stampa-silicone-tondo-fi55.png")))
self.label.setScaledContents(True)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_2.addWidget(self.label)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.line = QtGui.QFrame(self.centralwidget)
self.line.setFrameShadow(QtGui.QFrame.Raised)
self.line.setLineWidth(4)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.verticalLayout.addWidget(self.line)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.btnNewYear = QtGui.QToolButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(11)
sizePolicy.setHeightForWidth(self.btnNewYear.sizePolicy().hasHeightForWidth())
self.btnNewYear.setSizePolicy(sizePolicy)
self.btnNewYear.setMinimumSize(QtCore.QSize(0, 200))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btnNewYear.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/img/pixmaps/planner.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnNewYear.setIcon(icon)
self.btnNewYear.setIconSize(QtCore.QSize(128, 128))
self.btnNewYear.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnNewYear.setAutoRaise(False)
self.btnNewYear.setArrowType(QtCore.Qt.NoArrow)
self.btnNewYear.setObjectName(_fromUtf8("btnNewYear"))
self.horizontalLayout.addWidget(self.btnNewYear)
self.btnCloseYear = QtGui.QToolButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(11)
sizePolicy.setHeightForWidth(self.btnCloseYear.sizePolicy().hasHeightForWidth())
self.btnCloseYear.setSizePolicy(sizePolicy)
self.btnCloseYear.setMinimumSize(QtCore.QSize(0, 200))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btnCloseYear.setFont(font)
self.btnCloseYear.setAutoFillBackground(False)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/img/pixmaps/save.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnCloseYear.setIcon(icon1)
self.btnCloseYear.setIconSize(QtCore.QSize(128, 128))
self.btnCloseYear.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnCloseYear.setObjectName(_fromUtf8("btnCloseYear"))
self.horizontalLayout.addWidget(self.btnCloseYear)
self.btnTeachers = QtGui.QToolButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(11)
sizePolicy.setHeightForWidth(self.btnTeachers.sizePolicy().hasHeightForWidth())
self.btnTeachers.setSizePolicy(sizePolicy)
self.btnTeachers.setMinimumSize(QtCore.QSize(0, 200))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btnTeachers.setFont(font)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/img/pixmaps/education.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnTeachers.setIcon(icon2)
self.btnTeachers.setIconSize(QtCore.QSize(128, 128))
self.btnTeachers.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnTeachers.setObjectName(_fromUtf8("btnTeachers"))
self.horizontalLayout.addWidget(self.btnTeachers)
self.btnStudents = QtGui.QToolButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(11)
sizePolicy.setHeightForWidth(self.btnStudents.sizePolicy().hasHeightForWidth())
self.btnStudents.setSizePolicy(sizePolicy)
self.btnStudents.setMinimumSize(QtCore.QSize(0, 200))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btnStudents.setFont(font)
self.btnStudents.setStyleSheet(_fromUtf8(""))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/img/pixmaps/System-users.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnStudents.setIcon(icon3)
self.btnStudents.setIconSize(QtCore.QSize(128, 128))
self.btnStudents.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnStudents.setObjectName(_fromUtf8("btnStudents"))
self.horizontalLayout.addWidget(self.btnStudents)
self.btnAdvanced = QtGui.QToolButton(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(11)
sizePolicy.setHeightForWidth(self.btnAdvanced.sizePolicy().hasHeightForWidth())
self.btnAdvanced.setSizePolicy(sizePolicy)
self.btnAdvanced.setMinimumSize(QtCore.QSize(0, 200))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.btnAdvanced.setFont(font)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/img/pixmaps/advanced_options.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnAdvanced.setIcon(icon4)
self.btnAdvanced.setIconSize(QtCore.QSize(128, 128))
self.btnAdvanced.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.btnAdvanced.setObjectName(_fromUtf8("btnAdvanced"))
self.horizontalLayout.addWidget(self.btnAdvanced)
self.verticalLayout.addLayout(self.horizontalLayout)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1024, 29))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuImpostazioni = QtGui.QMenu(self.menubar)
self.menuImpostazioni.setEnabled(False)
self.menuImpostazioni.setObjectName(_fromUtf8("menuImpostazioni"))
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setEnabled(False)
self.menuHelp.setObjectName(_fromUtf8("menuHelp"))
self.menuArchivi = QtGui.QMenu(self.menubar)
self.menuArchivi.setObjectName(_fromUtf8("menuArchivi"))
MainWindow.setMenuBar(self.menubar)
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionPreferenze = QtGui.QAction(MainWindow)
self.actionPreferenze.setObjectName(_fromUtf8("actionPreferenze"))
self.actionArchivioAnniPrec = QtGui.QAction(MainWindow)
self.actionArchivioAnniPrec.setObjectName(_fromUtf8("actionArchivioAnniPrec"))
self.menuImpostazioni.addAction(self.actionPreferenze)
self.menuHelp.addAction(self.actionAbout)
self.menuArchivi.addAction(self.actionArchivioAnniPrec)
self.menubar.addAction(self.menuArchivi.menuAction())
self.menubar.addAction(self.menuImpostazioni.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.btnAdvanced, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.execAdvancedUserManager)
QtCore.QObject.connect(self.btnCloseYear, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.execYearEnd)
QtCore.QObject.connect(self.btnNewYear, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.execYearNew)
QtCore.QObject.connect(self.actionArchivioAnniPrec, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.showArchBackup)
QtCore.QObject.connect(self.btnStudents, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.showStudentsManager)
QtCore.QObject.connect(self.btnTeachers, QtCore.SIGNAL(_fromUtf8("clicked()")), MainWindow.showTeachersManager)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "Pannello di Amministrazione del Server", None, QtGui.QApplication.UnicodeUTF8))
self.labelServerId.setText(QtGui.QApplication.translate("MainWindow", "TextLabel", None, QtGui.QApplication.UnicodeUTF8))
self.labelYear.setText(QtGui.QApplication.translate("MainWindow", "Anno -", None, QtGui.QApplication.UnicodeUTF8))
self.btnNewYear.setText(QtGui.QApplication.translate("MainWindow", "Nuovo Anno", None, QtGui.QApplication.UnicodeUTF8))
self.btnCloseYear.setText(QtGui.QApplication.translate("MainWindow", "Chiusura Anno", None, QtGui.QApplication.UnicodeUTF8))
self.btnTeachers.setText(QtGui.QApplication.translate("MainWindow", "Gestione Insegnanti", None, QtGui.QApplication.UnicodeUTF8))
self.btnStudents.setText(QtGui.QApplication.translate("MainWindow", "Gestione Alunni", None, QtGui.QApplication.UnicodeUTF8))
self.btnAdvanced.setText(QtGui.QApplication.translate("MainWindow", "Gestione Avanzata", None, QtGui.QApplication.UnicodeUTF8))
self.menuImpostazioni.setTitle(QtGui.QApplication.translate("MainWindow", "Impostazioni", None, QtGui.QApplication.UnicodeUTF8))
self.menuHelp.setTitle(QtGui.QApplication.translate("MainWindow", "Help", None, QtGui.QApplication.UnicodeUTF8))
self.menuArchivi.setTitle(QtGui.QApplication.translate("MainWindow", "Archivi", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout.setText(QtGui.QApplication.translate("MainWindow", "About", None, QtGui.QApplication.UnicodeUTF8))
self.actionPreferenze.setText(QtGui.QApplication.translate("MainWindow", "Preferenze", None, QtGui.QApplication.UnicodeUTF8))
self.actionArchivioAnniPrec.setText(QtGui.QApplication.translate("MainWindow", "Archivio anni precedenti", None, QtGui.QApplication.UnicodeUTF8))
import classerman_rc
| itarozzi/classerman | src/ui/mainwindow_ui.py | Python | gpl-3.0 | 16,000 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe.model.document import Document
from frappe.website.utils import delete_page_cache
class Homepage(Document):
def validate(self):
if not self.description:
self.description = frappe._("This is an example website auto-generated from ERPNext")
delete_page_cache('home')
def setup_items(self):
for d in frappe.get_all('Item', fields=['name', 'item_name', 'description', 'image'],
filters={'show_in_website': 1}, limit=3):
doc = frappe.get_doc('Item', d.name)
if not doc.route:
# set missing route
doc.save()
self.append('products', dict(item_code=d.name,
item_name=d.item_name, description=d.description, image=d.image))
| mhbu50/erpnext | erpnext/portal/doctype/homepage/homepage.py | Python | gpl-3.0 | 801 |
# (void)walker hardware platform support
# Copyright (C) 2012-2013 David Holm <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import abc
from ..utils import OrderedDict
from ..utils import enum
Architecture = enum('Test', 'X86', 'X8664', 'Mips', 'Arm', 'Generic',
enum_type='Architecture')
class Register(object):
_register_fmt = {16: '0x%032lX',
10: '0x%020lX',
8: '0x%016lX',
4: '0x%08lX',
2: '0x%04lX',
1: '0x%02lX'}
def __init__(self, name):
self._name = name
def name(self):
return self._name
def size(self):
raise NotImplementedError
def value(self):
raise NotImplementedError
def str(self):
if self.value() is not None:
return self._register_fmt[self.size()] % self.value()
chars_per_byte = 2
return ''.join(['-' * (self.size() * chars_per_byte)])
def create_static_register(register):
class StaticRegister(type(register), object):
def __init__(self, name):
super(StaticRegister, self).__init__(name)
self._size = register.size()
self._value = register.value()
def size(self):
return self._size
def value(self):
return self._value
return StaticRegister(register.name())
class Cpu(object):
__metaclass__ = abc.ABCMeta
def __init__(self, cpu_factory, registers):
self._registers = OrderedDict()
for group, register_list in registers.iteritems():
registers = OrderedDict([(x.name(),
cpu_factory.create_register(self, x))
for x in register_list])
self._registers[group] = registers
@classmethod
@abc.abstractmethod
def architecture(cls):
raise NotImplementedError
def register(self, name):
for register_dict in self._registers.itervalues():
if name in register_dict:
return register_dict[name]
return None
def registers(self):
return self._registers.iteritems()
@abc.abstractmethod
def stack_pointer(self):
raise NotImplementedError
@abc.abstractmethod
def program_counter(self):
raise NotImplementedError
class CpuFactory(object):
__metaclass__ = abc.ABCMeta
def create_cpu(self, architecture):
assert architecture in _cpu_map
return _cpu_map.get(architecture,
None)(self)
@abc.abstractmethod
def create_register(self, cpu, register):
raise NotImplementedError
class CpuRepository(object):
def __init__(self, cpu_factory):
self._cpu_factory = cpu_factory
self._cpus = {}
def get_cpu(self, architecture):
if architecture in self._cpus:
return self._cpus[architecture]
cpu = self._cpu_factory.create_cpu(architecture)
self._cpus[architecture] = cpu
return cpu
def register_cpu(cls):
_cpu_map[cls.architecture()] = cls
return cls
_cpu_map = {}
| dholm/voidwalker | voidwalker/framework/platform/cpu.py | Python | gpl-3.0 | 3,778 |
# Author: seedboy
# URL: https://github.com/seedboy
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import traceback
import datetime
import urlparse
import sickbeard
import generic
from sickbeard.common import Quality
from sickbeard import logger
from sickbeard import tvcache
from sickbeard import db
from sickbeard import classes
from sickbeard import helpers
from sickbeard import show_name_helpers
from sickbeard.exceptions import ex, AuthException
from sickbeard import clients
from lib import requests
from lib.requests import exceptions
from sickbeard.bs4_parser import BS4Parser
from lib.unidecode import unidecode
from sickbeard.helpers import sanitizeSceneName
from sickbeard.show_name_helpers import allPossibleShowNames
class IPTorrentsProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "IPTorrents")
self.supportsBacklog = True
self.enabled = False
self.username = None
self.password = None
self.ratio = None
self.freeleech = False
self.cache = IPTorrentsCache(self)
self.urls = {'base_url': 'https://www.iptorrents.com',
'login': 'https://www.iptorrents.com/torrents/',
'search': 'https://www.iptorrents.com/torrents/?%s%s&q=%s&qf=ti',
}
self.url = self.urls['base_url']
self.categorie = 'l73=1&l78=1&l66=1&l65=1&l79=1&l5=1&l4=1'
def isEnabled(self):
return self.enabled
def imageName(self):
return 'iptorrents.png'
def getQuality(self, item, anime=False):
quality = Quality.sceneQuality(item[0], anime)
return quality
def _checkAuth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _doLogin(self):
login_params = {'username': self.username,
'password': self.password,
'login': 'submit',
}
try:
response = self.session.post(self.urls['login'], data=login_params, timeout=30, verify=False)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError), e:
logger.log(u'Unable to connect to ' + self.name + ' provider: ' + ex(e), logger.ERROR)
return False
if re.search('tries left', response.text) \
or re.search('<title>IPT</title>', response.text) \
or response.status_code == 401:
logger.log(u'Invalid username or password for ' + self.name + ', Check your settings!', logger.ERROR)
return False
return True
def _get_season_search_strings(self, ep_obj):
search_string = {'Season': []}
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
if ep_obj.show.air_by_date or ep_obj.show.sports:
ep_string = show_name + ' ' + str(ep_obj.airdate).split('-')[0]
elif ep_obj.show.anime:
ep_string = show_name + ' ' + "%d" % ep_obj.scene_absolute_number
else:
ep_string = show_name + ' S%02d' % int(ep_obj.scene_season) #1) showName SXX
search_string['Season'].append(ep_string)
return [search_string]
def _get_episode_search_strings(self, ep_obj, add_string=''):
search_string = {'Episode': []}
if not ep_obj:
return []
if self.show.air_by_date:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|')
search_string['Episode'].append(ep_string)
elif self.show.sports:
for show_name in set(allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
str(ep_obj.airdate).replace('-', '|') + '|' + \
ep_obj.airdate.strftime('%b')
search_string['Episode'].append(ep_string)
elif self.show.anime:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = sanitizeSceneName(show_name) + ' ' + \
"%i" % int(ep_obj.scene_absolute_number)
search_string['Episode'].append(ep_string)
else:
for show_name in set(show_name_helpers.allPossibleShowNames(self.show)):
ep_string = show_name_helpers.sanitizeSceneName(show_name) + ' ' + \
sickbeard.config.naming_ep_type[2] % {'seasonnumber': ep_obj.scene_season,
'episodenumber': ep_obj.scene_episode} + ' %s' % add_string
search_string['Episode'].append(re.sub('\s+', ' ', ep_string))
return [search_string]
def _doSearch(self, search_params, search_mode='eponly', epcount=0, age=0):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
freeleech = '&free=on' if self.freeleech else ''
if not self._doLogin():
return results
for mode in search_params.keys():
for search_string in search_params[mode]:
if isinstance(search_string, unicode):
search_string = unidecode(search_string)
# URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
searchURL = self.urls['search'] % (self.categorie, freeleech, search_string)
searchURL += ';o=seeders' if mode != 'RSS' else ''
logger.log(u"" + self.name + " search page URL: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
continue
try:
data = re.sub(r'(?im)<button.+?<[\/]button>', '', data, 0)
with BS4Parser(data, features=["html5lib", "permissive"]) as html:
if not html:
logger.log(u"Invalid HTML data: " + str(data), logger.DEBUG)
continue
if html.find(text='No Torrents Found!'):
logger.log(u"No results found for: " + search_string + " (" + searchURL + ")", logger.DEBUG)
continue
torrent_table = html.find('table', attrs={'class': 'torrents'})
torrents = torrent_table.find_all('tr') if torrent_table else []
#Continue only if one Release is found
if len(torrents) < 2:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.WARNING)
continue
for result in torrents[1:]:
try:
torrent = result.find_all('td')[1].find('a')
torrent_name = torrent.string
torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href']
torrent_details_url = self.urls['base_url'] + torrent['href']
torrent_seeders = int(result.find('td', attrs={'class': 'ac t_seeders'}).string)
## Not used, perhaps in the future ##
#torrent_id = int(torrent['href'].replace('/details.php?id=', ''))
#torrent_leechers = int(result.find('td', attrs = {'class' : 'ac t_leechers'}).string)
except (AttributeError, TypeError):
continue
# Filter unseeded torrent and torrents with no name/url
if mode != 'RSS' and torrent_seeders == 0:
continue
if not torrent_name or not torrent_download_url:
continue
item = torrent_name, torrent_download_url
logger.log(u"Found result: " + torrent_name + " (" + torrent_details_url + ")", logger.DEBUG)
items[mode].append(item)
except Exception, e:
logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)
results += items[mode]
return results
def _get_title_and_url(self, item):
title, url = item
if title:
title = u'' + title
title = title.replace(' ', '.')
if url:
url = str(url).replace('&', '&')
return (title, url)
def findPropers(self, search_date=datetime.datetime.today()):
results = []
myDB = db.DBConnection()
sqlResults = myDB.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate FROM tv_episodes AS e' +
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)' +
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND (e.status IN (' + ','.join([str(x) for x in Quality.DOWNLOADED]) + ')' +
' OR (e.status IN (' + ','.join([str(x) for x in Quality.SNATCHED]) + ')))'
)
if not sqlResults:
return []
for sqlshow in sqlResults:
self.show = helpers.findCertainShow(sickbeard.showList, int(sqlshow["showid"]))
if self.show:
curEp = self.show.getEpisode(int(sqlshow["season"]), int(sqlshow["episode"]))
searchString = self._get_episode_search_strings(curEp, add_string='PROPER|REPACK')
for item in self._doSearch(searchString[0]):
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, datetime.datetime.today(), self.show))
return results
def seedRatio(self):
return self.ratio
class IPTorrentsCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# Only poll IPTorrents every 10 minutes max
self.minTime = 10
def _getRSSData(self):
search_params = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_params)}
provider = IPTorrentsProvider()
| bcorbet/SickRage | sickbeard/providers/iptorrents.py | Python | gpl-3.0 | 11,284 |
from collections import defaultdict
class Solution(object):
def minWindow(self, S, T):
"""
:type S: str
:type T: str
:rtype: str
"""
pre = defaultdict(list)
for i, c in enumerate(T, -1):
pre[c].append(i)
for val in pre.values():
val.reverse()
start_index = [None] * (len(T) + 1)
lo, hi = float('-inf'), 0
for i, c in enumerate(S):
start_index[-1] = i
for p in pre[c]:
if start_index[p] is not None:
start_index[p + 1] = start_index[p]
if (c == T[-1] and start_index[-2] is not None
and i - start_index[-2] < hi - lo):
lo, hi = start_index[-2], i
if lo < 0:
return ''
else:
return S[lo:hi+1]
# print(Solution().minWindow("abcdebdde", "bde"))
# print(Solution().minWindow("nkzcnhczmccqouqadqtmjjzltgdzthm", "bt"))
print(Solution().minWindow("cnhczmccqouqadqtmjjzl", "mm"))
| wufangjie/leetcode | 727. Minimum Window Subsequence.py | Python | gpl-3.0 | 1,035 |
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$', 'webinterface.view.dashboard.main'),
url(r'^dashboard/$', 'webinterface.view.dashboard.main'),
url(r'^login/$', 'webinterface.view.login.main'),
url(r'^login/ajax/$', 'webinterface.view.login.ajax'),
url(r'^settings/$', 'webinterface.view.settings.main'),
url(r'^settings/ajax/$', 'webinterface.view.settings.ajax'),
url(r'^orders/$', 'webinterface.view.orders.main'),
url(r'^orders/ajax/$', 'webinterface.view.orders.ajax'),
) | cynja/coffeenator | webinterface/urls.py | Python | gpl-3.0 | 551 |
import numpy
class DifferentialEvolutionAbstract:
amount_of_individuals = None
f = None
p = None
end_method = None
def __init__(self, min_element=-1, max_element=1):
self.min_element = min_element
self.max_element = max_element
self.f = 0.5
self.p = 0.9
self.func = None
self.population = None
self.func_population = None
self.dim = 0
self.child_funcs = None
self.cost_list = []
self.end_method = 'max_iter'
def set_amount_of_individuals(self, amount_of_individuals):
self.amount_of_individuals = amount_of_individuals
def set_params(self, f, p):
self.f = f
self.p = p
def set_end_method(self, end_method):
self.end_method = end_method
def create_population(self):
# Создаем популяцию
population = []
for _ in range(self.amount_of_individuals):
population.append(numpy.random.uniform(self.min_element, self.max_element, self.dim))
return numpy.array(population)
def choose_best_individual(self):
# Данная функция находит лучшую особь в популяции
func_list = list(self.func_population)
best_index = func_list.index(min(func_list))
return self.population[best_index]
def iteration(self):
return []
def optimize(self, func, dim, end_cond, debug_pop_print=-1):
return []
def return_cost_list(self):
return self.cost_list
| QuantumTechDevStudio/RUDNEVGAUSS | archive/solver/DifferentialEvolutionAbstract.py | Python | gpl-3.0 | 1,558 |
import pickle
from matplotlib import pyplot as plt
plt.style.use('classic')
import matplotlib as mpl
fs = 12.
fw = 'bold'
mpl.rc('lines', linewidth=2., color='k')
mpl.rc('font', size=fs, weight=fw, family='Arial')
mpl.rc('legend', fontsize='small')
import numpy
def grad( x, u ) :
return numpy.gradient(u) / numpy.gradient(x)
date = '20160519'
base = '/home/mk-sim-linux/Battery_TempGrad/Python/batt_simulation/battsimpy/'
base_dir = '/home/mk-sim-linux/Battery_TempGrad/JournalPaper2/Paper2/ocv_unif35/'
fig_dir = '/home/mk-sim-linux/Battery_TempGrad/JournalPaper3/modeling_paper_p3/figs/'
#base_dir = '/home/m_klein/tgs_data/ocv_unif35/'
#base_dir = '/Volumes/Data/Paper2/ocv_dat/'
#bsp_path = '/Users/mk/Desktop/battsim/battsimpy/'
nmc_rest_523 = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/2012Yang_523NMC_dchg_restOCV.csv', delimiter=',' )
nmc_cby25_111 = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/2012Wu_NMC111_Cby25_dchg.csv' , delimiter=',' )
nmc_YangWu_mix = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/YangWuMix_NMC_20170607.csv' , delimiter=',' )
lfp_prada_dchg = numpy.loadtxt( base+'data/Model_v1/Model_Pars/solid/thermodynamics/2012Prada_LFP_U_dchg.csv' , delimiter=',' )
graph_hess_dchg = numpy.loadtxt( base+'data/Model_nmc/Model_Pars/solid/thermodynamics/Ua_cell4Fit_NMC_2012Yang_refx.csv' , delimiter=',' ) #graphite_Hess_discharge_x.csv
#xin, Uin = 1.-lfp_prada_dchg[:,0], lfp_prada_dchg[:,1]
#xin, Uin = 1.-nmc_rest_523[:,0], nmc_rest_523[:,1]
xin, Uin = 1.-nmc_YangWu_mix[:,0], nmc_YangWu_mix[:,1]
#xin, Uin = 1.-nmc_cby25_111[:,0], nmc_cby25_111[:,1]
xin2, Uin2 = graph_hess_dchg[:,0], graph_hess_dchg[:,1]#-0.025
pfiles2 = [ base_dir+'slowOCVdat_cell4_slow_ocv_'+date+'.p', ]
# Load the cell ocv c/60 data
d = pickle.load( open( pfiles2[0], 'rb' ) )
max_cap = numpy.amax( d['interp']['cap'] )
x_cell, U_cell = 1-numpy.array(d['interp']['cap'])/max_cap*1., d['interp']['dchg']['volt']
# NMC 532 scale - NMC cyl cells (cell 4)
#scale_x = 1.8#1.5 # 1.55
#shift_x = -.01#-.06 #-.12
scale_x = 1.42 # 1.55
shift_x = -.03 #-.12
#scale_x1 = 1.9
#shift_x1 = -.03
## LFP Prada - (cell 2)
#scale_x = 1.25
#shift_x = 1.05-scale_x
# Graphite - scale NMC cyl cells (cell 4)
scale_x2 = 1/.8 #1./0.83 #
shift_x2 = -.06 #-.035
#scale_x2 = 1/.74
#shift_x2 = -.04
figres = 300
figname = base_dir+'ocv-plots_'+date+'.pdf'
sty = [ '-', '--' ]
fsz = (190./25.4,120./25.4)
f1, axes = plt.subplots(1,2,figsize=fsz)
a1,a2 = axes
# Plot the full cell ocv
a1.plot( x_cell, U_cell, '-b', label='Cell C/60 Data' )
# Plot the cathode curve for the shifted soc operating window
a1.plot( xin*scale_x+shift_x, Uin, '-g', label='Cathode' )
# Plot the anode curve for the shifted soc operating window
#a1t = a1.twinx()
a1.plot( xin2*scale_x2+shift_x2, Uin2, '-k', label='Anode' )
# Compute the cathode ocv for the full cell soc operating window
if xin[1] < xin[0] :
Uc = numpy.interp( x_cell, numpy.flipud(xin*scale_x+shift_x), numpy.flipud(Uin) )
else :
Uc = numpy.interp( x_cell, xin*scale_x+shift_x, Uin )
Ua = numpy.interp( x_cell, xin2*scale_x2+shift_x2, Uin2 )
# Plot the estimated full cell ocv curve for the aligned anode and cathode equilibrium curves
#a1.plot( x_cell, Uc-U_cell, ':k', label='U$_{anode}$ fit' )
#a1t.set_ylim([0.,2.])
a1.plot( x_cell, Uc-Ua, ':k', label='U$_{cell}$ fit' )
# Calculate the alignment stoichs for anode and cathode
Ua_out = Uc - U_cell
xa_out = (x_cell-shift_x2)/scale_x2
#numpy.savetxt( base+'data/Model_v1/Model_Pars/solid/thermodynamics/Ua_lfp_2012Prada.csv', numpy.array([xa_out, Ua_out]).T, delimiter=',' )
#numpy.savetxt( base+'data/Model_v1/Model_Pars/solid/thermodynamics/Ua_nmc_2012Yang.csv', numpy.array([xa_out, Ua_out]).T, delimiter=',' )
yin = 1.-xin
xc_lo = 1. - (-shift_x/scale_x)
xc_hi = 1. - (1.-shift_x)/scale_x
xa_lo = (-shift_x2/scale_x2)
xa_hi = (1.-shift_x2)/scale_x2
# Print out the stoich limits for the anode and cathode
print 'xc_lo, xc_hi:',xc_lo, xc_hi
print 'xa_lo, xa_hi:',xa_lo, xa_hi
a1.set_xlabel( 'State of Charge', fontsize=fs, fontweight=fw )
a1.set_ylabel( 'Voltage vs. Li [V]', fontsize=fs, fontweight=fw )
a1.set_title( 'Full and Half Cell OCV', fontsize=fs, fontweight=fw )
a1.legend(loc='best')
a1.set_axisbelow(True)
a1.grid(color='gray')
a2.plot( x_cell, grad(x_cell, U_cell), label=r'$\frac{\partial U_{cell}}{\partial SOC}$' )
a2.plot( x_cell, -grad(x_cell, Ua), label=r'$\frac{\partial U_{anode}}{\partial SOC}$' )
a2.set_xlabel( 'State of Charge', fontsize=fs, fontweight=fw )
a2.set_ylabel( '$\partial U / \partial SOC$', fontsize=fs, fontweight=fw )
a2.set_title( 'OCV Gradients for Anode Alignment', fontsize=fs, fontweight=fw )
a2.legend(loc='best')
a2.set_axisbelow(True)
a2.grid(color='gray')
a2.set_ylim([-0.1,1.5])
#plt.suptitle('LFP/C$_6$ Half Cell OCV Alignment', fontsize=fs, fontweight=fw)
plt.suptitle('NMC/C$_6$ Half Cell OCV Alignment', fontsize=fs, fontweight=fw)
plt.tight_layout(rect=[0,0.03,1,0.97])
plt.show()
#f1.savefig( fig_dir+'ocv_alignment_cell2_lfp.pdf', dpi=figres)
#f1.savefig( fig_dir+'ocv_alignment_cell4_nmc.pdf', dpi=figres)
| matthewpklein/battsimpy | docs/extra_files/electrode_ocv_gen.py | Python | gpl-3.0 | 5,199 |
../../../../share/pyshared/jockey/xorg_driver.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/jockey/xorg_driver.py | Python | gpl-3.0 | 48 |
"""Holds all pytee logic."""
| KonishchevDmitry/pytee | pytee/__init__.py | Python | gpl-3.0 | 29 |
#!/usr/bin/env python
"""
unit test for filters module
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import unittest
import numpy as np
from evo.core import filters
from evo.core import lie_algebra as lie
# TODO: clean these up and use proper fixtures.
POSES_1 = [
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 0.5])),
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 1]))
]
POSES_2 = [
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 0.5])),
lie.se3(np.eye(3), np.array([0, 0, 0.99])),
lie.se3(np.eye(3), np.array([0, 0, 1.0]))
]
POSES_3 = [
lie.se3(np.eye(3), np.array([0, 0, 0.0])),
lie.se3(np.eye(3), np.array([0, 0, 0.9])),
lie.se3(np.eye(3), np.array([0, 0, 0.99])),
lie.se3(np.eye(3), np.array([0, 0, 0.999])),
lie.se3(np.eye(3), np.array([0, 0, 0.9999])),
lie.se3(np.eye(3), np.array([0, 0, 0.99999])),
lie.se3(np.eye(3), np.array([0, 0, 0.999999])),
lie.se3(np.eye(3), np.array([0, 0, 0.9999999]))
]
POSES_4 = [
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 1])),
lie.se3(np.eye(3), np.array([0, 0, 1])),
lie.se3(np.eye(3), np.array([0, 0, 1]))
]
class TestFilterPairsByPath(unittest.TestCase):
def test_poses1_all_pairs(self):
target_path = 1.0
tol = 0.0
id_pairs = filters.filter_pairs_by_path(POSES_1, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [(0, 2), (2, 3)])
def test_poses1_wrong_target(self):
target_path = 2.5
tol = 0.0
id_pairs = filters.filter_pairs_by_path(POSES_1, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [])
def test_poses2_all_pairs_low_tolerance(self):
target_path = 1.0
tol = 0.001
id_pairs = filters.filter_pairs_by_path(POSES_2, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [(0, 3)])
def test_convergence_all_pairs(self):
target_path = 1.0
tol = 0.2
id_pairs = filters.filter_pairs_by_path(POSES_3, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [(0, 7)])
axis = np.array([1, 0, 0])
POSES_5 = [
lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * math.pi), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * math.pi / 3), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * math.pi), np.array([0, 0, 0]))
]
TRANSFORM = lie.random_se3()
POSES_5_TRANSFORMED = [TRANSFORM.dot(p) for p in POSES_5]
axis = np.array([1, 0, 0])
p0 = lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0]))
pd = lie.se3(lie.so3_exp(axis * (math.pi / 3.)), np.array([1, 2, 3]))
p1 = np.dot(p0, pd)
p2 = np.dot(p1, pd)
p3 = np.dot(p2, pd)
POSES_6 = [p0, p1, p2, p3, p3]
POSES_6_TRANSFORMED = [TRANSFORM.dot(p) for p in POSES_6]
class TestFilterPairsByAngle(unittest.TestCase):
def test_poses5(self):
tol = 0.001
expected_result = [(0, 1), (1, 2), (2, 4)]
# Result should be unaffected by global transformation.
for poses in (POSES_5, POSES_5_TRANSFORMED):
target_angle = math.pi - tol
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=False)
self.assertEqual(id_pairs, expected_result)
# Check for same result when using degrees:
target_angle = np.rad2deg(target_angle)
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=False,
degrees=True)
self.assertEqual(id_pairs, expected_result)
def test_poses5_all_pairs(self):
tol = 0.01
expected_result = [(0, 1), (0, 4), (1, 2), (2, 4)]
# Result should be unaffected by global transformation.
for poses in (POSES_5, POSES_5_TRANSFORMED):
target_angle = math.pi
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=True)
self.assertEqual(id_pairs, expected_result)
# Check for same result when using degrees:
target_angle = np.rad2deg(target_angle)
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=True,
degrees=True)
self.assertEqual(id_pairs, expected_result)
def test_poses6(self):
tol = 0.001
target_angle = math.pi - tol
expected_result = [(0, 3)]
# Result should be unaffected by global transformation.
for poses in (POSES_6, POSES_6_TRANSFORMED):
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=False)
self.assertEqual(id_pairs, expected_result)
def test_poses6_all_pairs(self):
target_angle = math.pi
tol = 0.001
expected_result = [(0, 3), (0, 4)]
# Result should be unaffected by global transformation.
for poses in (POSES_6, POSES_6_TRANSFORMED):
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=True)
self.assertEqual(id_pairs, expected_result)
if __name__ == '__main__':
unittest.main(verbosity=2)
| MichaelGrupp/evo | test/test_filters.py | Python | gpl-3.0 | 6,476 |
# PiTimer - Python Hardware Programming Education Project For Raspberry Pi
# Copyright (C) 2015 Jason Birch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#/****************************************************************************/
#/* PiTimer - Step 8 - Controlling physical relays. */
#/* ------------------------------------------------------------------------ */
#/* V1.00 - 2015-07-04 - Jason Birch */
#/* ------------------------------------------------------------------------ */
#/* Class to handle user input, output display and interface state machine. */
#/****************************************************************************/
import string
import operator
import datetime
import SystemTime
import Schedule
import ScheduleItem
# Constants to define current user interface display.
STATE_MAIN_MENU = 0
STATE_ADD_SCHEDULE = 1
STATE_DEL_SCHEDULE = 2
STATE_RELAY_STATES = 3
STATE_SCHEDULE = 4
STATE_SET_SYSTEM_TIME = 5
STATE_SHUTDOWN = 6
# Constants to define display modes.
MODE_STANDARD = 0
MODE_CONFIRM = 1
class UserInterface:
def __init__(self, NewWIndow, NewThisSchedule, NewThisRelays):
# Store a reference to the system window class to display onto.
self.ThisWindow = NewWIndow
# Store a reference to the schedule class to display schedule inforamtion.
self.ThisSchedule = NewThisSchedule
# Store a reference to the relays class to display relay inforamtion.
self.ThisRelays = NewThisRelays
# Create an instance of the system time class, to display the system time.
self.ThisSystemTime = SystemTime.SystemTime()
# Display application splash screen on initialisation.
self.DisplaySplash()
# Buffer for input strings.
self.InputBuffer = ""
# List position, moved by user.
self.SelectPos = 0
self.SelectID = 0
# Display the initial user interface, the main menu.
self.InterfaceState = STATE_MAIN_MENU
#/***************************************************/
#/* Display a splash screen for application startup */
#/* to show information about this application. */
#/***************************************************/
def DisplaySplash(self):
self.ThisWindow.clear()
self.ThisWindow.refresh()
print("{:^20}".format("PiTimer") + "\r")
print("{:^20}".format("2015-06-23") + "\r")
print("{:^20}".format("Version 1.00") + "\r")
print("{:^20}".format("(C) Jason Birch") + "\r")
self.ThisWindow.refresh()
#/***********************************************************************/
#/* Distribute key press events to the current user interface function. */
#/***********************************************************************/
def KeyPress(self, KeyCode):
Result = KeyCode
if self.InterfaceState == STATE_MAIN_MENU:
Result = self.KeysMainMenu(KeyCode)
elif self.InterfaceState == STATE_ADD_SCHEDULE:
Result = self.KeysAddSchedule(KeyCode)
elif self.InterfaceState == STATE_DEL_SCHEDULE:
Result = self.KeysDelSchedule(KeyCode)
elif self.InterfaceState == STATE_SCHEDULE:
Result = self.KeysSchedule(KeyCode)
elif self.InterfaceState == STATE_RELAY_STATES:
Result = self.KeysRelayStates(KeyCode)
elif self.InterfaceState == STATE_SET_SYSTEM_TIME:
Result = self.KeysSetSystemTime(KeyCode)
return Result
#/****************************************************************/
#/* Certain user interface displays need to update every second. */
#/****************************************************************/
def DisplayRefresh(self):
if self.InterfaceState == STATE_MAIN_MENU:
self.DisplayMainMenu()
elif self.InterfaceState == STATE_ADD_SCHEDULE:
self.DisplayAddSchedule()
elif self.InterfaceState == STATE_DEL_SCHEDULE:
self.DisplayDelSchedule()
elif self.InterfaceState == STATE_SCHEDULE:
self.DisplaySchedule()
elif self.InterfaceState == STATE_RELAY_STATES:
self.DisplayRelayStates()
elif self.InterfaceState == STATE_SET_SYSTEM_TIME:
self.DisplaySetSystemTime()
#/*******************************************************/
#/* Change the current user interface to a new display. */
#/*******************************************************/
def SetInterfaceState(self, NewInterfaceState):
# Start on standard display mode.
self.Mode = MODE_STANDARD
# Clear the input buffer.
self.InputBuffer = ""
# Reset list selection position.
self.SelectPos =0
self.SelectID = 0
self.InterfaceState = NewInterfaceState
if self.InterfaceState == STATE_MAIN_MENU:
self.DisplayMainMenu()
elif self.InterfaceState == STATE_ADD_SCHEDULE:
self.DisplayAddSchedule()
elif self.InterfaceState == STATE_DEL_SCHEDULE:
self.DisplayDelSchedule()
elif self.InterfaceState == STATE_SCHEDULE:
self.DisplaySchedule()
elif self.InterfaceState == STATE_RELAY_STATES:
self.DisplayRelayStates()
elif self.InterfaceState == STATE_SET_SYSTEM_TIME:
self.DisplaySetSystemTime()
#/*********************************************************/
#/* Provided the input from the user and a mask to define */
#/* how to display the input, format a string to display. */
#/*********************************************************/
def GetMaskedInput(self, Mask, Input):
InputCount = 0
Result = ""
for Char in Mask:
if Char == "#" and len(Input) > InputCount:
Result += Input[InputCount:InputCount + 1]
InputCount += 1
else:
Result += Char
return Result
#/************************************************/
#/* Gather the input required for an input mask. */
#/************************************************/
def KeyMaskedInput(self, Mask, Input, KeyCode):
# If a valid key is pressed, add to the input buffer.
if len(Input) < Mask.count("#") and KeyCode >= ord("0") and KeyCode <= ord("9"):
Input += chr(KeyCode)
# If delete key is pressed, delete the last entered key.
elif KeyCode == 127 and len(Input) > 0:
Input = Input[:-1]
return Input
#/*****************************/
#/* MAIN MENU user interface. */
#/*****************************/
def DisplayMainMenu(self):
self.ThisWindow.clear()
self.ThisWindow.refresh()
print("{:>20}".format(self.ThisSystemTime.SystemTimeString()) + "\r")
print("{:^20}".format("1 Add 4 Schedule") + "\r")
print("{:^20}".format("2 Delete 5 Set Time") + "\r")
print("{:^20}".format("3 Relays 6 Shutdown") + "\r")
self.ThisWindow.refresh()
def KeysMainMenu(self, KeyCode):
Result = KeyCode
# If menu item 1 is selected, change to display add schedule.
if KeyCode == ord("1"):
self.SetInterfaceState(STATE_ADD_SCHEDULE)
# If menu item 2 is selected, change to display del schedule.
if KeyCode == ord("2"):
self.SetInterfaceState(STATE_DEL_SCHEDULE)
# If menu item 3 is selected, change to display relay states.
if KeyCode == ord("3"):
self.SetInterfaceState(STATE_RELAY_STATES)
# If menu item 4 is selected, change to display schedule.
if KeyCode == ord("4"):
self.SetInterfaceState(STATE_SCHEDULE)
# If menu item 5 is selected, change to display set system time.
if KeyCode == ord("5"):
self.SetInterfaceState(STATE_SET_SYSTEM_TIME)
# If menu item 6 is selected, return ESC key to the application main loop.
if KeyCode == ord("6"):
Result = 27
return Result
#/********************************/
#/* RELAY STATES user interface. */
#/********************************/
def DisplayRelayStates(self):
self.ThisWindow.clear()
self.ThisWindow.refresh()
self.ThisRelays.DisplayRelayStates()
self.ThisWindow.refresh()
def KeysRelayStates(self, KeyCode):
Result = KeyCode
# If enter key is pressed, change to display main menu.
if KeyCode == 10:
self.SetInterfaceState(STATE_MAIN_MENU)
return Result
#/********************************/
#/* ADD SCHEDULE user interface. */
#/********************************/
def DisplayAddSchedule(self):
self.ThisWindow.clear()
self.ThisWindow.refresh()
print("{:^20}".format("ADD SCHEDULE") + "\r")
print(self.GetMaskedInput("####-##-## ##:##:##\r\nPeriod ### ##:##:##\r\nRelay ## State #\r", self.InputBuffer))
self.ThisWindow.refresh()
def KeysAddSchedule(self, KeyCode):
Result = KeyCode
self.InputBuffer = self.KeyMaskedInput("####-##-## ##:##:## ### ##:##:## ## #", self.InputBuffer, KeyCode)
# If enter key is pressed, change to display main menu.
if KeyCode == 10:
# If full user input has been gathered, add a schedule item.
if len(self.InputBuffer) == 26:
# Parse user input.
UserInput = self.GetMaskedInput("####-##-## ##:##:## ### ##:##:## ## #", self.InputBuffer)
RelayState = {
"0":ScheduleItem.RELAY_OFF,
"1":ScheduleItem.RELAY_ON,
"2":ScheduleItem.RELAY_TOGGLE,
}.get(UserInput[36:37], ScheduleItem.RELAY_TOGGLE)
PeriodSeconds = string.atoi(UserInput[30:32]) + 60 * string.atoi(UserInput[27:29]) + 60 * 60 * string.atoi(UserInput[24:26]) + 24 * 60 * 60 * string.atoi(UserInput[20:23])
PeriodDays = operator.div(PeriodSeconds, 24 * 60 * 60)
PeriodSeconds = operator.mod(PeriodSeconds, 24 * 60 * 60)
# Add schedule item, ignore errors from invalid data entered.
try:
self.ThisSchedule.AddSchedule(string.atoi(UserInput[33:35]), datetime.datetime(string.atoi(UserInput[0:4]), string.atoi(UserInput[5:7]), string.atoi(UserInput[8:10]), string.atoi(UserInput[11:13]), string.atoi(UserInput[14:16]), string.atoi(UserInput[17:19])), RelayState, datetime.timedelta(PeriodDays, PeriodSeconds))
except:
print("")
self.ThisWindow.refresh()
self.SetInterfaceState(STATE_MAIN_MENU)
return Result
#/********************************/
#/* DEL SCHEDULE user interface. */
#/********************************/
def DisplayDelSchedule(self):
self.ThisWindow.clear()
self.ThisWindow.refresh()
if self.Mode == MODE_STANDARD:
print("{:^20}".format("DELETE SCHEDULE") + "\r")
print("\r")
if self.ThisSchedule.GetItemCount():
self.SelectID = self.ThisSchedule.DisplaySchedule(self.SelectPos, 1)
else:
print("{:^20}".format("Empty") + "\r")
elif self.Mode == MODE_CONFIRM:
print("{:^20}".format("DELETE SCHEDULE") + "\r")
print("\r")
print("{:^20}".format("ARE YOU SURE?") + "\r")
print("{:^20}".format("(4=N, 6=Y)") + "\r")
self.ThisWindow.refresh()
def KeysDelSchedule(self, KeyCode):
Result = KeyCode
if self.Mode == MODE_STANDARD:
# If a key at the top of the keypad is pressed, move up the list.
if (KeyCode == ord("1") or KeyCode == ord("2") or KeyCode == ord("3")) and self.SelectPos > 0:
self.SelectPos -= 1
# If a key at the bottom of the keypad is pressed, move down the list.
elif (KeyCode == ord("0") or KeyCode == ord("7") or KeyCode == ord("8") or KeyCode == ord("9")) and self.SelectPos < self.ThisSchedule.GetItemCount() - 1:
self.SelectPos += 1
# If enter key is pressed, enter confirm mode.
if KeyCode == 10:
if self.ThisSchedule.GetItemCount():
self.Mode = MODE_CONFIRM
else:
self.SetInterfaceState(STATE_MAIN_MENU)
# If delete key is pressed, change to display main menu.
if KeyCode == 127:
self.SetInterfaceState(STATE_MAIN_MENU)
elif self.Mode == MODE_CONFIRM:
if KeyCode == ord("4"):
self.SetInterfaceState(STATE_MAIN_MENU)
elif KeyCode == ord("6"):
self.ThisSchedule.DelSchedule(self.SelectID)
self.SetInterfaceState(STATE_MAIN_MENU)
return Result
#/************************************/
#/* CURRENT SCHEDULE user interface. */
#/************************************/
def DisplaySchedule(self):
self.ThisWindow.clear()
self.ThisWindow.refresh()
if self.ThisSchedule.GetItemCount():
self.ThisSchedule.DisplaySchedule(self.SelectPos, 2)
else:
print("\r")
print("{:^20}".format("Empty") + "\r")
self.ThisWindow.refresh()
def KeysSchedule(self, KeyCode):
Result = KeyCode
# If a key at the top of the keypad is pressed, move up the list.
if (KeyCode == ord("1") or KeyCode == ord("2") or KeyCode == ord("3")) and self.SelectPos > 0:
self.SelectPos -= 1
# If a key at the bottom of the keypad is pressed, move down the list.
elif (KeyCode == ord("0") or KeyCode == ord("7") or KeyCode == ord("8") or KeyCode == ord("9")) and self.SelectPos < self.ThisSchedule.GetItemCount() - 1:
self.SelectPos += 1
# If enter key is pressed, change to display main menu.
elif KeyCode == 10:
self.SetInterfaceState(STATE_MAIN_MENU)
return Result
#/***********************************/
#/* SET SYSTEM TIME user interface. */
#/***********************************/
def DisplaySetSystemTime(self):
self.ThisWindow.clear()
self.ThisWindow.refresh()
print("{:^20}".format("SET SYSTEM TIME") + "\r")
print(self.GetMaskedInput("####-##-## ##:##:##\r", self.InputBuffer))
self.ThisWindow.refresh()
def KeysSetSystemTime(self, KeyCode):
Result = KeyCode
self.InputBuffer = self.KeyMaskedInput("####-##-## ##:##:##", self.InputBuffer, KeyCode)
# If enter key is pressed, change to display main menu.
if KeyCode == 10:
# If full user input has been gathered, set the system time.
if len(self.InputBuffer) == 14:
# BOOKMARK: THIS IS A PLACEHOLDER FOR WHEN THE CLOCK MODLUE IS IMPLEMENTED.
self.ThisSystemTime.SetSystemTime(self.GetMaskedInput("####-##-## ##:##:##", self.InputBuffer))
self.SetInterfaceState(STATE_MAIN_MENU)
return Result
| BirchJD/RPiTimer | PiTimer_Step-8/UserInterface.py | Python | gpl-3.0 | 14,823 |
import sys,os
class Solution():
def reverse(self, x):
sign=1
if x<0:
sign=-1
x=x*-1
token=str(x)
str_rev=""
str_len=len(token)
for i in range(str_len):
str_rev+=token[str_len-i-1]
num_rev=int(str_rev)
if sign==1 and num_rev>2**31-1:
return 0
if sign==-1 and num_rev>2**31:
return 0
return num_rev*sign
my_sol=Solution()
print my_sol.reverse(123)
| urashima9616/Leetcode_Python | leet7.py | Python | gpl-3.0 | 491 |
# -*- coding: utf-8 -*-
__author__ = 'LIWEI240'
"""
Constants definition
"""
class Const(object):
class RetCode(object):
OK = 0
InvalidParam = -1
NotExist = -2
ParseError = -3 | lwldcr/keyboardman | common/const.py | Python | gpl-3.0 | 214 |
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Menu for quickly adding waypoints when on move
#----------------------------------------------------------------------------
# Copyright 2007-2008, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from modules.base_module import RanaModule
import cairo
from time import time
from math import pi
def getModule(*args, **kwargs):
return ClickMenu(*args, **kwargs)
class ClickMenu(RanaModule):
"""Overlay info on the map"""
def __init__(self, *args, **kwargs):
RanaModule.__init__(self, *args, **kwargs)
self.lastWaypoint = "(none)"
self.lastWaypointAddTime = 0
self.messageLingerTime = 2
def handleMessage(self, message, messageType, args):
if message == "addWaypoint":
m = self.m.get("waypoints", None)
if m is not None:
self.lastWaypoint = m.newWaypoint()
self.lastWaypointAddTime = time()
def drawMapOverlay(self, cr):
"""Draw an overlay on top of the map, showing various information
about position etc."""
# waypoins will be done in another way, so this is disabled for the time being
# (x,y,w,h) = self.get('viewport')
#
# dt = time() - self.lastWaypointAddTime
# if(dt > 0 and dt < self.messageLingerTime):
# self.drawNewWaypoint(cr, x+0.5*w, y+0.5*h, w*0.3)
# else:
# m = self.m.get('clickHandler', None)
# if(m != None):
# m.registerXYWH(x+0.25*w,y+0.25*h,w*0.5,h*0.5, "clickMenu:addWaypoint")
def drawNewWaypoint(self, cr, x, y, size):
text = self.lastWaypoint
cr.set_font_size(200)
extents = cr.text_extents(text)
(w, h) = (extents[2], extents[3])
cr.set_source_rgb(0, 0, 0.5)
cr.arc(x, y, size, 0, 2 * pi)
cr.fill()
x1 = x - 0.5 * w
y1 = y + 0.5 * h
border = 20
cr.set_source_rgb(1, 1, 1)
cr.move_to(x1, y1)
cr.show_text(text)
cr.fill()
| ryfx/modrana | modules/_mod_clickMenu.py | Python | gpl-3.0 | 2,761 |
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 17 22:06:52 2017
Based on: print_MODFLOW_inputs_res_NWT.m
@author: gcng
"""
# print_MODFLOW_inputs
import numpy as np
import MODFLOW_NWT_lib as mf # functions to write individual MODFLOW files
import os # os functions
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read('settings.ini')
LOCAL_DIR = parser.get('settings', 'local_dir')
GSFLOW_DIR = LOCAL_DIR + "/GSFLOW"
# - directories
sw_2005_NWT = 2 # 1 for MODFLOW-2005; 2 for MODFLOW-NWT algorithm (both can be
# carried out with MODFLOW-NWT code)
fl_BoundConstH = 0 # 1 for const head at high elev boundary, needed for numerical
# convergence for AGU2016 poster. Maybe resolved with MODFLOW-NWT?
if sw_2005_NWT == 1:
# MODFLOW input files
GSFLOW_indir = GSFLOW_DIR + '/inputs/MODFLOW_2005/'
# MODFLOW output files
GSFLOW_outdir = GSFLOW_DIR + '/outputs/MODFLOW_2005/'
elif sw_2005_NWT == 2:
# MODFLOW input files
GSFLOW_indir = GSFLOW_DIR + '/inputs/MODFLOW_NWT/'
# MODFLOW output files
GSFLOW_outdir = GSFLOW_DIR + '/outputs/MODFLOW_NWT/'
infile_pre = 'test2lay_py';
NLAY = 2;
DZ = [100, 50] # [NLAYx1] [m] ***testing
# DZ = [350, 100] # [NLAYx1] [m] ***testing
# length of transient stress period (follows 1-day steady-state period) [d]
# perlen_tr = 365; # [d], ok if too long
# perlen_tr = 365*5 + ceil(365*5/4); # [d], includes leap years; ok if too long (I think, but maybe run time is longer?)
perlen_tr = 365*30 + np.ceil(365*30/4) # [d], includes leap years; ok if too long (I think, but maybe run time is longer?)
GIS_indir = GSFLOW_DIR + '/DataToReadIn/GIS/';
# use restart file as initial cond (empty string to not use restart file)
fil_res_in = '' # empty string to not use restart file
#fil_res_in = '/home/gcng/workspace/Pfil_res_inrojectFiles/AndesWaterResources/GSFLOW/outputs/MODFLOW/test2lay_melt_30yr.out' % empty string to not use restart file
# for various files: ba6, dis, uzf, lpf
surfz_fil = GIS_indir + 'topo.asc'
# surfz_fil = GIS_indir + 'SRTM_new_20161208.asc'
# for various files: ba6, uzf
mask_fil = GIS_indir + 'basinmask_dischargept.asc'
# for sfr
reach_fil = GIS_indir + 'reach_data.txt'
segment_fil_all = [GIS_indir + 'segment_data_4A_INFORMATION_Man.csv',
GIS_indir + 'segment_data_4B_UPSTREAM_Man.csv',
GIS_indir + 'segment_data_4C_DOWNSTREAM_Man.csv']
# create MODFLOW input directory if it does not exist:
if not os.path.isdir(GSFLOW_indir):
os.makedirs(GSFLOW_indir)
# while we're at it, create MODFLOW output file if it does not exist:
if not os.path.isdir(GSFLOW_outdir):
os.makedirs(GSFLOW_outdir)
##
mf.write_dis_MOD2_f(GSFLOW_indir, infile_pre, surfz_fil, NLAY, DZ, perlen_tr);
mf.write_ba6_MOD3_2(GSFLOW_indir, infile_pre, mask_fil, fl_BoundConstH); # list this below write_dis_MOD2_f
# flow algorithm
if sw_2005_NWT == 1:
mf.write_lpf_MOD2_f2_2(GSFLOW_indir, infile_pre, surfz_fil, NLAY);
elif sw_2005_NWT == 2:
# MODFLOW-NWT files
mf.write_upw_MOD2_f2_2(GSFLOW_indir, infile_pre, surfz_fil, NLAY);
mf.NWT_write_file(GSFLOW_indir, infile_pre);
# unsat zone and streamflow input files
mf.make_uzf3_f_2(GSFLOW_indir, infile_pre, surfz_fil, mask_fil);
mf.make_sfr2_f_Mannings(GSFLOW_indir, infile_pre, reach_fil, segment_fil_all); # list this below write_dis_MOD2_f
# Write PCG file (only used for MODFLOW-2005, but this function also creates OC file)
mf.write_OC_PCG_MOD_f(GSFLOW_indir, infile_pre, perlen_tr);
# Write namefile
mf.write_nam_MOD_f2_NWT(GSFLOW_indir, GSFLOW_outdir, infile_pre, fil_res_in, sw_2005_NWT);
| UMN-Hydro/GSFLOW_pre-processor | python_scripts/MODFLOW_scripts/print_MODFLOW_inputs_res_NWT.py | Python | gpl-3.0 | 3,667 |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header
import EasyDialogs
valid_responses = { 1:'yes',
0:'no',
-1:'cancel',
}
response = EasyDialogs.AskYesNoCancel('Select an option')
print 'You selected:', valid_responses[response]
| qilicun/python | python2/PyMOTW-1.132/PyMOTW/EasyDialogs/EasyDialogs_AskYesNoCancel.py | Python | gpl-3.0 | 390 |
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GdkPixbuf
from GTG.core.tag import ALLTASKS_TAG
from GTG.gtk.colors import get_colored_tags_markup, rgba_to_hex
from GTG.backends.backend_signals import BackendSignals
class BackendsTree(Gtk.TreeView):
"""
Gtk.TreeView that shows the currently loaded backends.
"""
COLUMN_BACKEND_ID = 0 # never shown, used for internal lookup.
COLUMN_ICON = 1
COLUMN_TEXT = 2 # holds the backend "human-readable" name
COLUMN_TAGS = 3
def __init__(self, backendsdialog):
"""
Constructor, just initializes the gtk widgets
@param backends: a reference to the dialog in which this is
loaded
"""
super().__init__()
self.dialog = backendsdialog
self.req = backendsdialog.get_requester()
self._init_liststore()
self._init_renderers()
self._init_signals()
self.refresh()
def refresh(self):
"""refreshes the Gtk.Liststore"""
self.backendid_to_iter = {}
self.liststore.clear()
# Sort backends
# 1, put default backend on top
# 2, sort backends by human name
backends = list(self.req.get_all_backends(disabled=True))
backends = sorted(backends,
key=lambda backend: (not backend.is_default(),
backend.get_human_name()))
for backend in backends:
self.add_backend(backend)
self.on_backend_state_changed(None, backend.get_id())
def on_backend_added(self, sender, backend_id):
"""
Signal callback executed when a new backend is loaded
@param sender: not used, only here to let this function be used as a
callback
@param backend_id: the id of the backend to add
"""
# Add
backend = self.req.get_backend(backend_id)
if not backend:
return
self.add_backend(backend)
self.refresh()
# Select
self.select_backend(backend_id)
# Update it's enabled state
self.on_backend_state_changed(None, backend.get_id())
def add_backend(self, backend):
"""
Adds a new backend to the list
@param backend_id: the id of the backend to add
"""
if backend:
backend_iter = self.liststore.append([
backend.get_id(),
self.dialog.get_pixbuf_from_icon_name(backend.get_icon(),
16),
backend.get_human_name(),
self._get_markup_for_tags(backend.get_attached_tags()),
])
self.backendid_to_iter[backend.get_id()] = backend_iter
def on_backend_state_changed(self, sender, backend_id):
"""
Signal callback executed when a backend is enabled/disabled.
@param sender: not used, only here to let this function be used as a
callback
@param backend_id: the id of the backend to add
"""
if backend_id in self.backendid_to_iter:
b_iter = self.backendid_to_iter[backend_id]
b_path = self.liststore.get_path(b_iter)
backend = self.req.get_backend(backend_id)
backend_name = backend.get_human_name()
if backend.is_enabled():
text = backend_name
else:
# FIXME This snippet is on more than 2 places!!!
# FIXME create a function which takes a widget and
# flag and returns color as #RRGGBB
style_context = self.get_style_context()
color = style_context.get_color(Gtk.StateFlags.INSENSITIVE)
color = rgba_to_hex(color)
text = f"<span color='{color}'>{backend_name}</span>"
self.liststore[b_path][self.COLUMN_TEXT] = text
# Also refresh the tags
new_tags = self._get_markup_for_tags(backend.get_attached_tags())
self.liststore[b_path][self.COLUMN_TAGS] = new_tags
def _get_markup_for_tags(self, tag_names):
"""Given a list of tags names, generates the pango markup to render
that list with the tag colors used in GTG
@param tag_names: the list of the tags (strings)
@return str: the pango markup string
"""
if ALLTASKS_TAG in tag_names:
tags_txt = ""
else:
tags_txt = get_colored_tags_markup(self.req, tag_names)
return "<small>" + tags_txt + "</small>"
def remove_backend(self, backend_id):
""" Removes a backend from the treeview, and selects the first (to show
something in the configuration panel
@param backend_id: the id of the backend to remove
"""
if backend_id in self.backendid_to_iter:
self.liststore.remove(self.backendid_to_iter[backend_id])
del self.backendid_to_iter[backend_id]
self.select_backend()
def _init_liststore(self):
"""Creates the liststore"""
self.liststore = Gtk.ListStore(object, GdkPixbuf.Pixbuf, str, str)
self.set_model(self.liststore)
def _init_renderers(self):
"""Initializes the cell renderers"""
# We hide the columns headers
self.set_headers_visible(False)
# For the backend icon
pixbuf_cell = Gtk.CellRendererPixbuf()
tvcolumn_pixbuf = Gtk.TreeViewColumn('Icon', pixbuf_cell)
tvcolumn_pixbuf.add_attribute(pixbuf_cell, 'pixbuf', self.COLUMN_ICON)
self.append_column(tvcolumn_pixbuf)
# For the backend name
text_cell = Gtk.CellRendererText()
tvcolumn_text = Gtk.TreeViewColumn('Name', text_cell)
tvcolumn_text.add_attribute(text_cell, 'markup', self.COLUMN_TEXT)
self.append_column(tvcolumn_text)
text_cell.connect('edited', self.cell_edited_callback)
text_cell.set_property('editable', True)
# For the backend tags
tags_cell = Gtk.CellRendererText()
tvcolumn_tags = Gtk.TreeViewColumn('Tags', tags_cell)
tvcolumn_tags.add_attribute(tags_cell, 'markup', self.COLUMN_TAGS)
self.append_column(tvcolumn_tags)
def cell_edited_callback(self, text_cell, path, new_text):
"""If a backend name is changed, it saves the changes in the Backend
@param text_cell: not used. The Gtk.CellRendererText that emitted the
signal. Only here because it's passed by the signal
@param path: the Gtk.TreePath of the edited cell
@param new_text: the new name of the backend
"""
# we strip everything not permitted in backend names
new_text = ''.join(c for c in new_text if (c.isalnum() or c in [" ", "-", "_"]))
selected_iter = self.liststore.get_iter(path)
# update the backend name
backend_id = self.liststore.get_value(selected_iter,
self.COLUMN_BACKEND_ID)
backend = self.dialog.get_requester().get_backend(backend_id)
if backend:
backend.set_human_name(new_text)
# update the text in the liststore
self.liststore.set(selected_iter, self.COLUMN_TEXT, new_text)
def _init_signals(self):
"""Initializes the backends and gtk signals """
self.connect("cursor-changed", self.on_select_row)
_signals = BackendSignals()
_signals.connect(_signals.BACKEND_ADDED, self.on_backend_added)
_signals.connect(_signals.BACKEND_STATE_TOGGLED,
self.on_backend_state_changed)
def on_select_row(self, treeview=None):
"""When a row is selected, displays the corresponding editing panel
@var treeview: not used
"""
self.dialog.on_backend_selected(self.get_selected_backend_id())
def _get_selected_path(self):
"""
Helper function to get the selected path
@return Gtk.TreePath : returns exactly one path for the selected object
or None
"""
selection = self.get_selection()
if selection:
model, selected_paths = self.get_selection().get_selected_rows()
if selected_paths:
return selected_paths[0]
return None
def select_backend(self, backend_id=None):
"""
Selects the backend corresponding to backend_id.
If backend_id is none, refreshes the current configuration panel.
@param backend_id: the id of the backend to select
"""
selection = self.get_selection()
if backend_id in self.backendid_to_iter:
backend_iter = self.backendid_to_iter[backend_id]
if selection:
selection.select_iter(backend_iter)
else:
if self._get_selected_path():
# We just reselect the currently selected entry
self.on_select_row()
else:
# If nothing is selected, we select the first entry
if selection:
selection.select_path("0")
self.dialog.on_backend_selected(self.get_selected_backend_id())
def get_selected_backend_id(self):
"""
returns the selected backend id, or none
@return string: the selected backend id (or None)
"""
selected_path = self._get_selected_path()
if not selected_path:
return None
selected_iter = self.liststore.get_iter(selected_path)
return self.liststore.get_value(selected_iter, self.COLUMN_BACKEND_ID)
| getting-things-gnome/gtg | GTG/gtk/backends/backendstree.py | Python | gpl-3.0 | 10,617 |
#要注意 javascript 轉 python 語法差異
#document.getElementById -> doc[]
#module Math -> math
#Math.PI -> math.pi
#abs -> fabs
#array 可用 list代替
import math
import time
from browser import doc
import browser.timer
# 點類別
class Point(object):
# 起始方法
def __init__(self, x, y):
self.x = x
self.y = y
# 繪製方法
def drawMe(self, g, r):
self.g = g
self.r = r
self.g.save()
self.g.moveTo(self.x,self.y)
self.g.beginPath()
# 根據 r 半徑繪製一個圓代表點的所在位置
self.g.arc(self.x, self.y, self.r, 0, 2*math.pi, true)
self.g.moveTo(self.x,self.y)
self.g.lineTo(self.x+self.r, self.y)
self.g.moveTo(self.x, self.y)
self.g.lineTo(self.x-self.r, self.y)
self.g.moveTo(self.x, self.y)
self.g.lineTo(self.x, self.y+self.r)
self.g.moveTo(self.x, self.y)
self.g.lineTo(self.x, self.y-self.r)
self.g.restore()
self.g.stroke()
# 加入 Eq 方法
def Eq(self, pt):
self.x = pt.x
self.y = pt.y
# 加入 setPoint 方法
def setPoint(self, px, py):
self.x = px
self.y = py
# 加上 distance(pt) 方法, 計算點到 pt 的距離
def distance(self, pt):
self.pt = pt
x = self.x - self.pt.x
y = self.y - self.pt.y
return math.sqrt(x * x + y * y)
# 利用文字標示點的座標位置
def tag(self, g):
self.g = g
self.g.beginPath()
self.g.fillText("%d, %d"%(self.x, self.y),self.x, self.y)
self.g.stroke()
# Line 類別物件
class Line(object):
# 起始方法
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
# 直線的第一點, 設為線尾
self.Tail = self.p1
# 直線組成的第二點, 設為線頭
self.Head = self.p2
# 直線的長度屬性
self.length = math.sqrt(math.pow(self.p2.x-self.p1.x, 2)+math.pow(self.p2.y-self.p1.y,2))
# setPP 以指定頭尾座標點來定義直線
def setPP(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.Tail = self.p1
self.Head = self.p2
self.length = math.sqrt(math.pow(self.p2.x-self.p1.x, 2)+math.pow(self.p2.y-self.p1.y,2))
# setRT 方法 for Line, 應該已經確定 Tail 點, 然後以 r, t 作為設定 Head 的參考
def setRT(self, r, t):
self.r = r
self.t = t
x = self.r * math.cos(self.t)
y = self.r * math.sin(self.t)
self.Tail.Eq(self.p1)
self.Head.setPoint(self.Tail.x + x,self.Tail.y + y)
# getR 方法 for Line
def getR(self):
# x 分量與 y 分量
x = self.p1.x - self.p2.x
y = self.p1.y - self.p2.y
return math.sqrt(x * x + y * y)
# 根據定義 atan2(y,x), 表示 (x,y) 與 正 x 軸之間的夾角, 介於 pi 與 -pi 間
def getT(self):
x = self.p2.x - self.p1.x
y = self.p2.y - self.p1.y
if (math.fabs(x) < math.pow(10,-100)):
if(y < 0.0):
return (-math.pi/2)
else:
return (math.pi/2)
else:
return math.atan2(y, x)
# setTail 方法 for Line
def setTail(self, pt):
self.pt = pt
self.Tail.Eq(pt)
self.Head.setPoint(self.pt.x + self.x, self.pt.y + self.y)
# getHead 方法 for Line
def getHead(self):
return self.Head
def getTail(self):
return self.Tail
def drawMe(self, g):
self.g = g
self.g.beginPath()
self.g.moveTo(self.p1.x,self.p1.y)
self.g.lineTo(self.p2.x,self.p2.y)
self.g.stroke()
def test(self):
return ("this is pure test to Inherit")
class Link(Line):
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
self.length = math.sqrt(math.pow((self.p2.x - self.p1.x), 2) + math.pow((self.p2.y - self.p1.y), 2))
#g context
def drawMe(self, g):
self.g = g
hole = 5
radius = 10
length = self.getR()
# alert(length)
# 儲存先前的繪圖狀態
self.g.save()
self.g.translate(self.p1.x,self.p1.y)
#alert(str(self.p1.x)+","+str(self.p1.y))
#self.g.rotate(-((math.pi/2)-self.getT()))
self.g.rotate(-math.pi*0.5 + self.getT())
#alert(str(self.getT()))
#self.g.rotate(10*math.pi/180)
#this.g.rotate(-(Math.PI/2-this.getT()));
# 必須配合畫在 y 軸上的 Link, 進行座標轉換, 也可以改為畫在 x 軸上...
self.g.beginPath()
self.g.moveTo(0,0)
self.g.arc(0, 0, hole, 0, 2*math.pi, true)
self.g.stroke()
self.g.moveTo(0,length)
self.g.beginPath()
self.g.arc(0,length, hole, 0, 2*math.pi, true)
self.g.stroke()
self.g.moveTo(0,0)
self.g.beginPath()
self.g.arc(0,0, radius, 0, math.pi, true)
self.g.moveTo(0+radius,0)
self.g.lineTo(0+radius,0+length)
self.g.stroke()
self.g.moveTo(0,0+length)
self.g.beginPath()
self.g.arc(0, 0+length, radius, math.pi, 0, true)
self.g.moveTo(0-radius,0+length)
self.g.lineTo(0-radius,0)
self.g.stroke()
self.g.restore()
self.g.beginPath()
self.g.fillStyle = "red"
self.g.font = "bold 18px sans-serif"
self.g.fillText("%d, %d"%(self.p2.x, self.p2.y),self.p2.x, self.p2.y)
self.g.stroke()
self.g.restore()
class Triangle(object):
def __init__(self, p1, p2, p3):
self.p1 = p1
self.p2 = p2
self.p3 = p3
def getLenp3(self):
p1 = self.p1
ret = p1.distance(self.p2)
return ret
def getLenp1(self):
p2 = self.p2
ret = p2.distance(self.p3)
return ret
def getLenp2(self):
p1 = self.p1
ret = p1.distance(self.p3)
return ret
# 角度
def getAp1(self):
ret = math.acos(((self.getLenp2() * self.getLenp2() + self.getLenp3() * self.getLenp3()) - self.getLenp1() * self.getLenp1()) / (2* self.getLenp2() * self.getLenp3()))
return ret
#
def getAp2(self):
ret =math.acos(((self.getLenp1() * self.getLenp1() + self.getLenp3() * self.getLenp3()) - self.getLenp2() * self.getLenp2()) / (2* self.getLenp1() * self.getLenp3()))
return ret
def getAp3(self):
ret = math.acos(((self.getLenp1() * self.getLenp1() + self.getLenp2() * self.getLenp2()) - self.getLenp3() * self.getLenp3()) / (2* self.getLenp1() * self.getLenp2()))
return ret
def drawMe(self, g):
self.g = g
r = 5
# 繪出三個頂點
self.p1.drawMe(self.g,r)
self.p2.drawMe(self.g,r)
self.p3.drawMe(self.g,r)
line1 = Line(self.p1,self.p2)
line2 = Line(self.p1,self.p3)
line3 = Line(self.p2,self.p3)
# 繪出三邊線
line1.drawMe(self.g)
line2.drawMe(self.g)
line3.drawMe(self.g)
# ends Triangle def
# 透過三個邊長定義三角形
def setSSS(self, lenp3, lenp1, lenp2):
self.lenp3 = lenp3
self.lenp1 = lenp1
self.lenp2 = lenp2
self.ap1 = math.acos(((self.lenp2 * self.lenp2 + self.lenp3 * self.lenp3) - self.lenp1 * self.lenp1) / (2* self.lenp2 * self.lenp3))
self.ap2 = math.acos(((self.lenp1 * self.lenp1 + self.lenp3 * self.lenp3) - self.lenp2 * self.lenp2) / (2* self.lenp1 * self.lenp3))
self.ap3 = math.acos(((self.lenp1 * self.lenp1 + self.lenp2 * self.lenp2) - self.lenp3 * self.lenp3) / (2* self.lenp1 * self.lenp2))
# 透過兩個邊長與夾角定義三角形
def setSAS(self, lenp3, ap2, lenp1):
self.lenp3 = lenp3
self.ap2 = ap2
self.lenp1 = lenp1
self.lenp2 = math.sqrt((self.lenp3 * self.lenp3 + self.lenp1 * self.lenp1) - 2* self.lenp3 * self.lenp1 * math.cos(self.ap2))
#等於 SSS(AB, BC, CA)
def setSaSS(self, lenp2, lenp3, lenp1):
self.lenp2 = lenp2
self.lenp3 = lenp3
self.lenp1 = lenp1
if(self.lenp1 > (self.lenp2 + self.lenp3)):
#<CAB 夾角為 180 度, 三點共線且 A 介於 BC 之間
ret = math.pi
else :
# <CAB 夾角為 0, 三點共線且 A 不在 BC 之間
if((self.lenp1 < (self.lenp2 - self.lenp3)) or (self.lenp1 < (self.lenp3 - self.lenp2))):
ret = 0.0
else :
# 透過餘絃定理求出夾角 <CAB
ret = math.acos(((self.lenp2 * self.lenp2 + self.lenp3 * self.lenp3) - self.lenp1 * self.lenp1) / (2 * self.lenp2 * self.lenp3))
return ret
# 取得三角形的三個邊長值
def getSSS(self):
temp = []
temp.append( self.getLenp1() )
temp.append( self.getLenp2() )
temp.append( self.getLenp3() )
return temp
# 取得三角形的三個角度值
def getAAA(self):
temp = []
temp.append( self.getAp1() )
temp.append( self.getAp2() )
temp.append( self.getAp3() )
return temp
# 取得三角形的三個角度與三個邊長
def getASASAS(self):
temp = []
temp.append(self.getAp1())
temp.append(self.getLenp1())
temp.append(self.getAp2())
temp.append(self.getLenp2())
temp.append(self.getAp3())
temp.append(self.getLenp3())
return temp
#2P 2L return mid P
def setPPSS(self, p1, p3, lenp1, lenp3):
temp = []
self.p1 = p1
self.p3 = p3
self.lenp1 = lenp1
self.lenp3 = lenp3
#bp3 is the angle beside p3 point, cp3 is the angle for line23, p2 is the output
line31 = Line(p3, p1)
self.lenp2 = line31.getR()
#self.lenp2 = self.p3.distance(self.p1)
#這裡是求角3
ap3 = math.acos(((self.lenp1 * self.lenp1 + self.lenp2 * self.lenp2) - self.lenp3 * self.lenp3) / (2 * self.lenp1 * self.lenp2))
#ap3 = math.acos(((self.lenp1 * self.lenp1 + self.lenp3 * self.lenp3) - self.lenp2 * self.lenp2) / (2 * self.lenp1 * self.lenp3))
bp3 = line31.getT()
cp3 = bp3 - ap3
temp.append(p3.x + self.lenp1*math.cos(cp3))#p2.x
temp.append(p3.y + self.lenp1*math.sin(cp3))#p2.y
return temp
def tag(g, p):
None
# 執行繪圖流程, 注意 x, y 為 global variables
def draw():
global theta
context.clearRect(0, 0, canvas.width, canvas.height)
line1.drawMe(context)
line2.drawMe(context)
line3.drawMe(context)
#triangle1.drawMe(context)
#triangle2.drawMe(context)
theta += dx
p2.x = p1.x + line1.length*math.cos(theta*degree)
p2.y = p1.y - line1.length*math.sin(theta*degree)
p3.x, p3.y = triangle2.setPPSS(p2,p4,link2_len,link3_len)
p1.tag(context)
# 以上為相關函式物件的定義區
# 全域變數
# 幾何位置輸入變數
x=10
y=10
r=10
# 畫布與繪圖內容
# 其他輸入變數
theta = 0
degree = math.pi/180.0
dx = 2
dy = 4
#set p1.p2.p3.p4 position
p1 = Point(150,100)
p2 = Point(150,200)
p3 = Point(300,300)
p4 = Point(350,100)
#accord position create link
line1 = Link(p1,p2)
line2 = Link(p2,p3)
line3 = Link(p3,p4)
line4 = Link(p1,p4)
line5 = Link(p2,p4)
link2_len = p2.distance(p3)
link3_len = p3.distance(p4)
#link2_len = line1.getR()
#link3_len = line3.getR()
#alert(str(link2_len)+','+str(link3_len))
triangle1 = Triangle(p1,p2,p4)
triangle2 = Triangle(p2,p3,p4)
# 視窗載入時執行內容
# 繪圖畫布設定
canvas = doc["plotarea"]
context = canvas.getContext("2d")
# 座標轉換, 移動 canvas.height 並且 y 座標變號, 也就是將原點座標移到畫面左下角
context.translate(0,canvas.height)
context.scale(1,-1)
#以間隔 10 micro seconds 重複呼叫 draw()
#time.set_interval(draw,20)
browser.timer.set_interval(draw,10)
| 2014c2g5/2014cadp | wsgi/local_data/brython_programs/brython_fourbar1.py | Python | gpl-3.0 | 11,960 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
timeformat='%H:%M:%S'
def begin_banner():
print ''
print '[*] swarm starting at '+time.strftime(timeformat,time.localtime())
print ''
def end_banner():
print ''
print '[*] swarm shutting down at '+time.strftime(timeformat,time.localtime())
print '' | Arvin-X/swarm | lib/utils/banner.py | Python | gpl-3.0 | 334 |
#!/usr/bin/python -Wall
# -*- coding: utf-8 -*-
"""
<div id="content">
<div style="text-align:center;" class="print"><img src="images/print_page_logo.png" alt="projecteuler.net" style="border:none;" /></div>
<h2>Number letter counts</h2><div id="problem_info" class="info"><h3>Problem 17</h3><span>Published on Friday, 17th May 2002, 06:00 pm; Solved by 88413; Difficulty rating: 5%</span></div>
<div class="problem_content" role="problem">
<p>If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.</p>
<p>If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? </p>
<br />
<p class="note"><b>NOTE:</b> Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage.</p>
</div><br />
<br /></div>
"""
s={0:"",1:"one",2:"two",3:"three",4:"four",5:"five",6:"six",7:"seven",8:"eight",9:"nine",10:"ten",11:"eleven",12:"twelve",13:"thirteen",14:"fourteen",15:"fifteen",16:"sixteen",17:"seventeen",18:"eighteen",19:"nineteen",20:"twenty",30:"thirty",40:"forty",50:"fifty",60:"sixty",70:"seventy",80:"eighty",90:"ninety"}
for i in range(1,1000):
if(not i in s.keys()):
if(i<100):
s[i]=s[i/10*10]+s[i%10]
else:
s[i]=s[i/100]+"hundred"
if(i%100):
s[i]+="and"+s[i%100]
s[1000]="onethousand"
total=0;
for i in s.values():
total+=len(i)
print total
| beyoungwoo/C_glibc_Sample | _Algorithm/ProjectEuler_python/euler_17.py | Python | gpl-3.0 | 1,631 |
#! /usr/bin/env python
# ==========================================================================
# This scripts performs unit tests for the csiactobs script
#
# Copyright (C) 2016-2018 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import gammalib
import cscripts
from testing import test
# =============================== #
# Test class for csiactobs script #
# =============================== #
class Test(test):
"""
Test class for csiactobs script
This test class makes unit tests for the csiactobs script by using it
from the command line and from Python.
"""
# Constructor
def __init__(self):
"""
Constructor
"""
# Call base class constructor
test.__init__(self)
# Set data members
self._datapath = self._datadir + '/iactdata'
self._runlist = self._datadir + '/iact_runlist.dat'
# Return
return
# Set test functions
def set(self):
"""
Set all test functions
"""
# Set test name
self.name('csiactobs')
# Append tests
self.append(self._test_cmd, 'Test csiactobs on command line')
self.append(self._test_python, 'Test csiactobs from Python')
# Return
return
# Test csiactobs on command line
def _test_cmd(self):
"""
Test csiactobs on the command line
"""
# Set script name
csiactobs = self._script('csiactobs')
# Setup csiactobs command
cmd = csiactobs+' datapath="'+self._datapath+'"'+ \
' prodname="unit-test"'+ \
' infile="'+self._runlist+'"'+ \
' bkgpars=1'+\
' outobs="csiactobs_obs_cmd1.xml"'+ \
' outmodel="csiactobs_bgd_cmd1.xml"'+ \
' logfile="csiactobs_cmd1.log" chatter=1'
# Check if execution was successful
self.test_assert(self._execute(cmd) == 0,
'Check successful execution from command line')
# Check observation definition XML file
self._check_obsdef('csiactobs_obs_cmd1.xml', 6)
# Check model definition XML file
self._check_moddef('csiactobs_bgd_cmd1.xml', 6)
# Setup csiactobs command
cmd = csiactobs+' datapath="data_path_that_does_not_exist"'+ \
' prodname="unit-test"'+ \
' infile="'+self._runlist+'"'+ \
' bkgpars=1'+\
' outobs="csiactobs_obs_cmd2.xml"'+ \
' outmodel="csiactobs_bgd_cmd2.xml"'+ \
' logfile="csiactobs_cmd2.log" debug=yes debug=yes'+ \
' chatter=1'
# Check if execution failed
self.test_assert(self._execute(cmd, success=False) != 0,
'Check invalid input datapath when executed from command line')
# Setup csiactobs command
cmd = csiactobs+' datapath="'+self._datapath+'"'+ \
' prodname="unit-test-doesnt-exist"'+ \
' infile="'+self._runlist+'"'+ \
' bkgpars=1'+\
' outobs="csiactobs_obs_cmd3.xml"'+ \
' outmodel="csiactobs_bgd_cmd3.xml"'+ \
' logfile="csiactobs_cmd3.log" debug=yes debug=yes'+ \
' chatter=1'
# Check if execution failed
self.test_assert(self._execute(cmd, success=False) != 0,
'Check invalid input prodname when executed from command line')
# Check csiactobs --help
self._check_help(csiactobs)
# Return
return
# Test csiactobs from Python
def _test_python(self):
"""
Test csiactobs from Python
"""
# Allocate empty csiactobs script
iactobs = cscripts.csiactobs()
# Check that empty csiactobs sciript has an empty observation container
# and energy boundaries
self.test_value(iactobs.obs().size(), 0,
'Check that empty csiactobs has an empty observation container')
self.test_value(iactobs.ebounds().size(), 0,
'Check that empty csiactobs has empty energy bins')
# Check that saving saves an empty model definition file
iactobs['outobs'] = 'csiactobs_obs_py0.xml'
iactobs['outmodel'] = 'csiactobs_bgd_py0.xml'
iactobs['logfile'] = 'csiactobs_py0.log'
iactobs.logFileOpen()
iactobs.save()
# Check empty observation definition XML file
self._check_obsdef('csiactobs_obs_py0.xml', 0)
# Check empty model definition XML file
self._check_moddef('csiactobs_bgd_py0.xml', 0)
# Check that clearing does not lead to an exception or segfault
#iactobs.clear()
# Set-up csiactobs
iactobs = cscripts.csiactobs()
iactobs['datapath'] = self._datapath
iactobs['prodname'] = 'unit-test'
iactobs['infile'] = self._runlist
iactobs['bkgpars'] = 1
iactobs['outobs'] = 'csiactobs_obs_py1.xml'
iactobs['outmodel'] = 'csiactobs_bgd_py1.xml'
iactobs['logfile'] = 'csiactobs_py1.log'
iactobs['chatter'] = 2
# Run csiactobs script and save run list
iactobs.logFileOpen() # Make sure we get a log file
iactobs.run()
iactobs.save()
# Check observation definition XML file
self._check_obsdef('csiactobs_obs_py1.xml', 6)
# Check model definition XML file
self._check_moddef('csiactobs_bgd_py1.xml', 6)
# Create test runlist
runlist = ['15000','15001']
# Set-up csiactobs using a runlist with 2 background parameters
iactobs = cscripts.csiactobs()
iactobs['datapath'] = self._datapath
iactobs['prodname'] = 'unit-test'
iactobs['bkgpars'] = 2
iactobs['outobs'] = 'csiactobs_obs_py2.xml'
iactobs['outmodel'] = 'csiactobs_bgd_py2.xml'
iactobs['logfile'] = 'csiactobs_py2.log'
iactobs['chatter'] = 3
iactobs.runlist(runlist)
# Run csiactobs script and save run list
iactobs.logFileOpen() # Make sure we get a log file
iactobs.run()
iactobs.save()
# Test return functions
self.test_value(iactobs.obs().size(), 2,
'Check number of observations in container')
self.test_value(iactobs.ebounds().size(), 0,
'Check number of energy boundaries')
# Check observation definition XML file
self._check_obsdef('csiactobs_obs_py2.xml',2)
# Check model definition XML file
self._check_moddef('csiactobs_bgd_py2.xml',2)
# Set-up csiactobs with a large number of free parameters and "aeff"
# background
iactobs = cscripts.csiactobs()
iactobs['datapath'] = self._datapath
iactobs['prodname'] = 'unit-test'
iactobs['infile'] = self._runlist
iactobs['bkgpars'] = 8
iactobs['bkg_mod_hiera'] = 'aeff'
iactobs['outobs'] = 'csiactobs_obs_py3.xml'
iactobs['outmodel'] = 'csiactobs_bgd_py3.xml'
iactobs['logfile'] = 'csiactobs_py3.log'
iactobs['chatter'] = 4
# Execute csiactobs script
iactobs.execute()
# Check observation definition XML file
self._check_obsdef('csiactobs_obs_py3.xml',6)
# Check model definition XML file
self._check_moddef('csiactobs_bgd_py3.xml',6)
# Set-up csiactobs with a "gauss" background and "inmodel" parameter
iactobs = cscripts.csiactobs()
iactobs['datapath'] = self._datapath
iactobs['inmodel'] = self._model
iactobs['prodname'] = 'unit-test'
iactobs['infile'] = self._runlist
iactobs['bkgpars'] = 1
iactobs['bkg_mod_hiera'] = 'gauss'
iactobs['outobs'] = 'NONE'
iactobs['outmodel'] = 'NONE'
iactobs['logfile'] = 'csiactobs_py4.log'
iactobs['chatter'] = 4
# Run csiactobs script
iactobs.logFileOpen() # Make sure we get a log file
iactobs.run()
# Check number of observations
self.test_value(iactobs.obs().size(), 6,
'Check number of observations in container')
# Check number of models
self.test_value(iactobs.obs().models().size(), 8,
'Check number of models in container')
# Set-up csiactobs with a "gauss" background and "inmodel" parameter
iactobs = cscripts.csiactobs()
iactobs['datapath'] = self._datapath
iactobs['inmodel'] = self._model
iactobs['prodname'] = 'unit-test'
iactobs['infile'] = self._runlist
iactobs['bkgpars'] = 1
iactobs['bkg_mod_hiera'] = 'irf'
iactobs['outobs'] = 'NONE'
iactobs['outmodel'] = 'NONE'
iactobs['logfile'] = 'csiactobs_py4.log'
iactobs['chatter'] = 4
# Run csiactobs script
iactobs.logFileOpen() # Make sure we get a log file
iactobs.run()
# Check number of observations
self.test_value(iactobs.obs().size(), 5,
'Check number of observations in container')
# Check number of models
self.test_value(iactobs.obs().models().size(), 7,
'Check number of models in container')
# Return
return
# Check observation definition XML file
def _check_obsdef(self, filename, obs_expected):
"""
Check observation definition XML file
"""
# Load observation definition XML file
obs = gammalib.GObservations(filename)
# Check number of observations
self.test_value(obs.size(), obs_expected,
'Check for '+str(obs_expected)+' observations in XML file')
# If there are observations in the XML file then check their content
if obs_expected > 0:
# Get response
rsp = obs[0].response()
# Test response
self.test_value(obs[0].eventfile().file(), 'events_0.fits.gz',
'Check event file name')
self.test_value(obs[0].eventfile().extname(), 'EVENTS',
'Check event extension name')
self.test_value(rsp.aeff().filename().file(), 'irf_file.fits.gz',
'Check effective area file name')
self.test_value(rsp.aeff().filename().extname(), 'EFFECTIVE AREA',
'Check effective area extension name')
self.test_value(rsp.psf().filename().file(), 'irf_file.fits.gz',
'Check point spread function file name')
self.test_value(rsp.psf().filename().extname(), 'POINT SPREAD FUNCTION',
'Check point spread function extension name')
self.test_value(rsp.edisp().filename().file(), 'irf_file.fits.gz',
'Check energy dispersion file name')
self.test_value(rsp.edisp().filename().extname(), 'ENERGY DISPERSION',
'Check energy dispersion extension name')
self.test_value(rsp.background().filename().file(), 'irf_file.fits.gz',
'Check background file name')
self.test_value(rsp.background().filename().extname(), 'BACKGROUND',
'Check background extension name')
# Return
return
# Check model XML file
def _check_moddef(self, filename, models_expected):
"""
Check model definition XML file
"""
# Load model definition XML file
models = gammalib.GModels(filename)
# Check number of models
self.test_value(models.size(), models_expected,
'Check for '+str(models_expected)+' models in XML file')
# Return
return
| ctools/ctools | test/test_csiactobs.py | Python | gpl-3.0 | 13,064 |
# Copyright 2015 Matthew J. Aburn
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. See <http://www.gnu.org/licenses/>.
r"""
Simulation of standard multiple stochastic integrals, both Ito and Stratonovich
I_{ij}(t) = \int_{0}^{t}\int_{0}^{s} dW_i(u) dW_j(s) (Ito)
J_{ij}(t) = \int_{0}^{t}\int_{0}^{s} \circ dW_i(u) \circ dW_j(s) (Stratonovich)
These multiple integrals I and J are important building blocks that will be
used by most of the higher-order algorithms that integrate multi-dimensional
SODEs.
We first implement the method of Kloeden, Platen and Wright (1992) to
approximate the integrals by the first n terms from the series expansion of a
Brownian bridge process. By default using n=5.
Finally we implement the method of Wiktorsson (2001) which improves on the
previous method by also approximating the tail-sum distribution by a
multivariate normal distribution.
References:
P. Kloeden, E. Platen and I. Wright (1992) The approximation of multiple
stochastic integrals
M. Wiktorsson (2001) Joint Characteristic Function and Simultaneous
Simulation of Iterated Ito Integrals for Multiple Independent Brownian
Motions
"""
import numpy as np
numpy_version = list(map(int, np.version.short_version.split('.')))
if numpy_version >= [1,10,0]:
broadcast_to = np.broadcast_to
else:
from ._broadcast import broadcast_to
def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m))
def _t(a):
"""transpose the last two axes of a three axis array"""
return a.transpose((0, 2, 1))
def _dot(a, b):
r""" for rank 3 arrays a and b, return \sum_k a_ij^k . b_ik^l (no sum on i)
i.e. This is just normal matrix multiplication at each point on first axis
"""
return np.einsum('ijk,ikl->ijl', a, b)
def _Aterm(N, h, m, k, dW):
"""kth term in the sum of Wiktorsson2001 equation (2.2)"""
sqrt2h = np.sqrt(2.0/h)
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
term1 = _dot(Xk, _t(Yk + sqrt2h*dW))
term2 = _dot(Yk + sqrt2h*dW, _t(Xk))
return (term1 - term2)/k
def Ikpw(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, I) where
A: array of shape (N, m, m) giving the Levy areas that were used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
A = _Aterm(N, h, m, 1, dW)
for k in range(2, n+1):
A += _Aterm(N, h, m, k, dW)
A = (h/(2.0*np.pi))*A
I = 0.5*(_dot(dW, _t(dW)) - np.diag(h*np.ones(m))) + A
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (A, I)
def Jkpw(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, based on the method of Kloeden, Platen and Wright (1992).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(A, J) where
A: array of shape (N, m, m) giving the Levy areas that were used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
A, I = Ikpw(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (A, J)
# The code below this point implements the method of Wiktorsson2001.
def _vec(A):
"""
Linear operator _vec() from Wiktorsson2001 p478
Args:
A: a rank 3 array of shape N x m x n, giving a matrix A[j] for each
interval of time j in 0..N-1
Returns:
array of shape N x mn x 1, made by stacking the columns of matrix A[j] on
top of each other, for each j in 0..N-1
"""
N, m, n = A.shape
return A.reshape((N, m*n, 1), order='F')
def _unvec(vecA, m=None):
"""inverse of _vec() operator"""
N = vecA.shape[0]
if m is None:
m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64)
return vecA.reshape((N, m, -1), order='F')
def _kp(a, b):
"""Special case Kronecker tensor product of a[i] and b[i] at each
time interval i for i = 0 .. N-1
It is specialized for the case where both a and b are shape N x m x 1
"""
if a.shape != b.shape or a.shape[-1] != 1:
raise(ValueError)
N = a.shape[0]
# take the outer product over the last two axes, then reshape:
return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)
def _kp2(A, B):
"""Special case Kronecker tensor product of A[i] and B[i] at each
time interval i for i = 0 .. N-1
Specialized for the case A and B rank 3 with A.shape[0]==B.shape[0]
"""
N = A.shape[0]
if B.shape[0] != N:
raise(ValueError)
newshape1 = A.shape[1]*B.shape[1]
return np.einsum('ijk,ilm->ijlkm', A, B).reshape(N, newshape1, -1)
def _P(m):
"""Returns m^2 x m^2 permutation matrix that swaps rows i and j where
j = 1 + m((i - 1) mod m) + (i - 1) div m, for i = 1 .. m^2
"""
P = np.zeros((m**2,m**2), dtype=np.int64)
for i in range(1, m**2 + 1):
j = 1 + m*((i - 1) % m) + (i - 1)//m
P[i-1, j-1] = 1
return P
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K
def _AtildeTerm(N, h, m, k, dW, Km0, Pm0):
"""kth term in the sum for Atilde (Wiktorsson2001 p481, 1st eqn)"""
M = m*(m-1)//2
Xk = np.random.normal(0.0, 1.0, (N, m, 1))
Yk = np.random.normal(0.0, 1.0, (N, m, 1))
factor1 = np.dot(Km0, Pm0 - np.eye(m**2))
factor1 = broadcast_to(factor1, (N, M, m**2))
factor2 = _kp(Yk + np.sqrt(2.0/h)*dW, Xk)
return _dot(factor1, factor2)/k
def _sigmainf(N, h, m, dW, Km0, Pm0):
r"""Asymptotic covariance matrix \Sigma_\infty Wiktorsson2001 eqn (4.5)"""
M = m*(m-1)//2
Im = broadcast_to(np.eye(m), (N, m, m))
IM = broadcast_to(np.eye(M), (N, M, M))
Ims0 = np.eye(m**2)
factor1 = broadcast_to((2.0/h)*np.dot(Km0, Ims0 - Pm0), (N, M, m**2))
factor2 = _kp2(Im, _dot(dW, _t(dW)))
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
return 2*IM + _dot(_dot(factor1, factor2), factor3)
def _a(n):
r""" \sum_{n+1}^\infty 1/k^2 """
return np.pi**2/6.0 - sum(1.0/k**2 for k in range(1, n+1))
def Iwik(dW, h, n=5):
"""matrix I approximating repeated Ito integrals for each of N time
intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, I) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
I: array of shape (N, m, m) giving an m x m matrix of repeated Ito
integral values for each of the N time intervals.
"""
N = dW.shape[0]
m = dW.shape[1]
if dW.ndim < 3:
dW = dW.reshape((N, -1, 1)) # change to array of shape (N, m, 1)
if dW.shape[2] != 1 or dW.ndim > 3:
raise(ValueError)
if m == 1:
return (np.zeros((N, 1, 1)), (dW*dW - h)/2.0)
Pm0 = _P(m)
Km0 = _K(m)
M = m*(m-1)//2
Atilde_n = _AtildeTerm(N, h, m, 1, dW, Km0, Pm0)
for k in range(2, n+1):
Atilde_n += _AtildeTerm(N, h, m, k, dW, Km0, Pm0)
Atilde_n = (h/(2.0*np.pi))*Atilde_n # approximation after n terms
S = _sigmainf(N, h, m, dW, Km0, Pm0)
normdW2 = np.sum(np.abs(dW)**2, axis=1)
radical = np.sqrt(1.0 + normdW2/h).reshape((N, 1, 1))
IM = broadcast_to(np.eye(M), (N, M, M))
Im = broadcast_to(np.eye(m), (N, m, m))
Ims0 = np.eye(m**2)
sqrtS = (S + 2.0*radical*IM)/(np.sqrt(2.0)*(1.0 + radical))
G = np.random.normal(0.0, 1.0, (N, M, 1))
tailsum = h/(2.0*np.pi)*_a(n)**0.5*_dot(sqrtS, G)
Atilde = Atilde_n + tailsum # our final approximation of the areas
factor3 = broadcast_to(np.dot(Ims0 - Pm0, Km0.T), (N, m**2, M))
vecI = 0.5*(_kp(dW, dW) - _vec(h*Im)) + _dot(factor3, Atilde)
I = _unvec(vecI)
dW = dW.reshape((N, -1)) # change back to shape (N, m)
return (Atilde, I)
def Jwik(dW, h, n=5):
"""matrix J approximating repeated Stratonovich integrals for each of N
time intervals, using the method of Wiktorsson (2001).
Args:
dW (array of shape (N, m)): giving m independent Weiner increments for
each time step N. (You can make this array using sdeint.deltaW())
h (float): the time step size
n (int, optional): how many terms to take in the series expansion
Returns:
(Atilde, J) where
Atilde: array of shape (N,m(m-1)//2,1) giving the area integrals used.
J: array of shape (N, m, m) giving an m x m matrix of repeated
Stratonovich integral values for each of the N time intervals.
"""
m = dW.shape[1]
Atilde, I = Iwik(dW, h, n)
J = I + 0.5*h*np.eye(m).reshape((1, m, m))
return (Atilde, J)
| mattja/sdeint | sdeint/wiener.py | Python | gpl-3.0 | 10,384 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#--------------------------------------------------------------------
# Copyright (c) 2014 Eren Inan Canpolat
# Author: Eren Inan Canpolat <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#--------------------------------------------------------------------
content_template = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title></title>
</head>
<body>
</body>
</html>"""
toc_ncx = u"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE ncx PUBLIC "-//NISO//DTD ncx 2005-1//EN"
"http://www.daisy.org/z3986/2005/ncx-2005-1.dtd">
<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1">
<head>
<meta name="dtb:uid" content="{book.uuid}" />
<meta name="dtb:depth" content="{book.toc_root.maxlevel}" />
<meta name="dtb:totalPageCount" content="0" />
<meta name="dtb:maxPageNumber" content="0" />
</head>
<docTitle>
<text>{book.title}</text>
</docTitle>
{navmap}
</ncx>"""
container_xml = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<container xmlns="urn:oasis:names:tc:opendocument:xmlns:container" version="1.0">
<rootfiles>
<rootfile full-path="OEBPS/content.opf" media-type="application/oebps-package+xml"/>
</rootfiles>
</container>
"""
| canpolat/bookbinder | bookbinder/templates.py | Python | gpl-3.0 | 2,011 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.defs.windows.TestType import TestType
logger = logging.getLogger(__name__)
class Process58TestElement(TestType):
MODEL_MAP = {
'tag_name': 'process58_test',
}
| cjaymes/pyscap | src/scap/model/oval_5/defs/windows/Process58TestElement.py | Python | gpl-3.0 | 896 |
def main():
"""Instantiate a DockerStats object and collect stats."""
print('Docker Service Module')
if __name__ == '__main__':
main()
| gomex/docker-zabbix | docker_service/__init__.py | Python | gpl-3.0 | 148 |
# ScratchABit - interactive disassembler
#
# Copyright (c) 2018 Paul Sokolovsky
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import capstone
import _any_capstone
dis = capstone.Cs(capstone.CS_ARCH_ARM, capstone.CS_MODE_ARM)
def PROCESSOR_ENTRY():
return _any_capstone.Processor("arm_32", dis)
| pfalcon/ScratchABit | plugins/cpu/arm_32_arm_capstone.py | Python | gpl-3.0 | 891 |
#!/usr/bin/python
"""Perform preprocessing and generate raytrace exec scripts for one focal plane.
For documentation using the python_control for ImSim/PhoSim version <= v.3.0.x,
see README.v3.0.x.txt.
For documentation using the python_control for ImSim/PhoSim version == v.3.2.x,
see README.txt.
The behavior of this script differs depending on the version of ImSim/PhoSim.
For versions <= v3.0.x, it functions like the original fullFocalplane.py and
calls AllChipsScriptGenerator.makeScripts() to generate a script and some tarballs
that can in turn be executed to run the preprocessing step (which in turn calls
AllChipsScriptGenerator) to generate shells scripts and tarballs for performing
the raytrace stage. See README.v3.0.x.txt for more info.
The behavior for ImSim/PhoSim version == 3.2.x is to run the preprocessing step
directly through the class PhosimManager.PhosimPrepreprocessor (which in turn
calls phosim.py in the phosin.git repository). After the preprocessing is
complete, PhosimPreprocessor generates shell scripts for the raytrace phase.
A few notes on options:
--skip_atmoscreens: Use this to optionally skip the step to generate atmosphere
screens during preprocessing and instead perform this
operation at the start of the raytrace phase. This is
useful in distributed environments where the cost of
transferring the atmosphere screens to the compute node
is higher than recalculating them.
--logtostderr: (only v3.2.x and higher) By default, log output from python_controls
is done via the python logging module, and directed to either
log_dir in the imsim_config_file or /tmp/fullFocalplane.log
if log_dir is not specified. This option overrides this behavior
and prints logging information to stdout. Note: output from
phosim.py and the phosim binaries are still printed to stdout.
TODO(gardnerj): Add stdout log redirect
TODO(gardnerj): Support sensor_ids argument for phosim.py.
TODO(gardnerj): Support not running e2adc step.
"""
from __future__ import with_statement
import ConfigParser
from distutils import version
import logging
from optparse import OptionParser # Can't use argparse yet, since we must work in 2.5
import os
import sys
from AllChipsScriptGenerator import AllChipsScriptGenerator
import PhosimManager
import PhosimUtil
import PhosimVerifier
import ScriptWriter
__author__ = 'Jeff Gardner ([email protected])'
logger = logging.getLogger(__name__)
def DoPreprocOldVersion(trimfile, policy, extra_commands, scheduler, sensor_id):
"""Do preprocessing for v3.1.0 and earlier.
Args:
trimfile: Full path to trim metadata file.
policy: ConfigParser object from python_controls config file.
extra_commands: Full path to extra commands or 'extraid' file.
scheduler: Name of scheduler (currently, just 'csh' is supported).
sensor_id: If not '', run just this single sensor ID.
Returns:
0 (success)
"""
with PhosimUtil.WithTimer() as t:
# Determine the pre-processing scheduler so that we know which class to use
if scheduler == 'csh':
scriptGenerator = AllChipsScriptGenerator(trimfile, policy, extra_commands)
scriptGenerator.makeScripts(sensor_id)
elif scheduler == 'pbs':
scriptGenerator = AllChipsScriptGenerator_Pbs(trimfile, policy, extra_commands)
scriptGenerator.makeScripts(sensor_id)
elif scheduler == 'exacycle':
print 'Exacycle funtionality not added yet.'
return 1
else:
print 'Scheduler "%s" unknown. Use -h or --help for help.' % scheduler
t.LogWall('makeScripts')
return 0
def DoPreproc(trimfile, imsim_config_file, extra_commands, scheduler,
skip_atmoscreens=False, keep_scratch_dirs=False):
"""Do preprocessing for v3.2.0 and later.
Args:
trimfile: Full path to trim metadata file.
imsim_config_file: Full path to the python_controls config file.
extra_commands: Full path to extra commands or 'extraid' file.
scheduler: Name of scheduler (currently, just 'csh' is supported).
skip_atmoscreens: Generate atmosphere screens in raytrace stage instead
of preprocessing stage.
keep_scratch_dirs: Do not delete the working directories at the end of
execution.
Returns:
0 upon success, 1 upon failure.
"""
if scheduler == 'csh':
preprocessor = PhosimManager.Preprocessor(imsim_config_file,
trimfile, extra_commands)
elif scheduler == 'pbs':
# Construct PhosimPreprocessor with PBS-specific ScriptWriter
preprocessor = PhosimManager.Preprocessor(
imsim_config_file, trimfile, extra_commands,
script_writer_class=ScriptWriter.PbsRaytraceScriptWriter)
# Read in PBS-specific config
policy = ConfigParser.RawConfigParser()
policy.read(imsim_config_file)
preprocessor.script_writer.ParsePbsConfig(policy)
else:
logger.critical('Unknown scheduler: %s. Use -h or --help for help',
scheduler)
return 1
preprocessor.InitExecEnvironment()
with PhosimUtil.WithTimer() as t:
if not preprocessor.DoPreprocessing(skip_atmoscreens=skip_atmoscreens):
logger.critical('DoPreprocessing() failed.')
return 1
t.LogWall('DoPreprocessing')
exec_manifest_fn = 'execmanifest_raytrace_%s.txt' % preprocessor.focalplane.observationID
files_to_stage = preprocessor.ArchiveRaytraceInputByExt(exec_archive_name=exec_manifest_fn)
if not files_to_stage:
logger.critical('Output archive step failed.')
return 1
with PhosimUtil.WithTimer() as t:
preprocessor.StageOutput(files_to_stage)
t.LogWall('StageOutput')
if not keep_scratch_dirs:
preprocessor.Cleanup()
verifier = PhosimVerifier.PreprocVerifier(imsim_config_file, trimfile,
extra_commands)
missing_files = verifier.VerifySharedOutput()
if missing_files:
logger.critical('Verification failed with the following files missing:')
for fn in missing_files:
logger.critical(' %s', fn)
sys.stderr.write('Verification failed with the following files missing:\n')
for fn in missing_files:
sys.stderr.write(' %s\n', fn)
else:
logger.info('Verification completed successfully.')
return 0
def ConfigureLogging(trimfile, policy, log_to_stdout, imsim_config_file,
extra_commands=None):
"""Configures logger.
If log_to_stdout, the logger will write to stdout. Otherwise, it will
write to:
'log_dir' in the config file, if present
/tmp/fullFocalplane.log if 'log_dir' is not present.
Stdout from phosim.py and PhoSim binaries always goes to stdout.
"""
if log_to_stdout:
log_fn = None
else:
if policy.has_option('general', 'log_dir'):
# Log to file in log_dir
obsid, filter_num = PhosimManager.ObservationIdFromTrimfile(
trimfile, extra_commands=options.extra_commands)
log_dir = os.path.join(policy.get('general', 'log_dir'), obsid)
log_fn = os.path.join(log_dir, 'fullFocalplane_%s.log' % obsid)
else:
log_fn = '/tmp/fullFocalplane.log'
PhosimUtil.ConfigureLogging(policy.getint('general', 'debug_level'),
logfile_fullpath=log_fn)
params_str = 'trimfile=%s\nconfig_file=%s\n' % (trimfile, imsim_config_file)
if extra_commands:
params_str += 'extra_commands=%s\n' % extra_commands
PhosimUtil.WriteLogHeader(__file__, params_str=params_str)
def main(trimfile, imsim_config_file, extra_commands, skip_atmoscreens,
keep_scratch_dirs, sensor_ids, log_to_stdout=False):
"""
Run the fullFocalplanePbs.py script, populating it with the
correct user and cluster job submission information from an LSST
policy file.
"""
policy = ConfigParser.RawConfigParser()
policy.read(imsim_config_file)
if policy.has_option('general', 'phosim_version'):
phosim_version = policy.get('general', 'phosim_version')
else:
phosim_version = '3.0.1'
ConfigureLogging(trimfile, policy, log_to_stdout,
imsim_config_file, extra_commands)
# print 'Running fullFocalPlane on: ', trimfile
logger.info('Running fullFocalPlane on: %s ', trimfile)
# print 'Using Imsim/Phosim version', phosim_version
logger.info('Using Imsim/Phosim version %s', phosim_version)
# Must pass absolute paths to imsim/phosim workers
if not os.path.isabs(trimfile):
trimfile = os.path.abspath(trimfile)
if not os.path.isabs(imsim_config_file):
imsim_config_file = os.path.abspath(imsim_config_file)
if not os.path.isabs(extra_commands):
extra_commands = os.path.abspath(extra_commands)
scheduler = policy.get('general','scheduler2')
if version.LooseVersion(phosim_version) < version.LooseVersion('3.1.0'):
if len(sensor_ids.split('|')) > 1:
logger.critical('Multiple sensors not supported in version < 3.1.0.')
return 1
sensor_id = '' if sensor_ids == 'all' else sensor_ids
return DoPreprocOldVersion(trimfile, policy, extra_commandsm,scheduler,
sensor_id)
elif version.LooseVersion(phosim_version) > version.LooseVersion('3.2.0'):
if sensor_ids != 'all':
logger.critical('Single exposure mode is currently not supported for'
' phosim > 3.2.0')
return 1
return DoPreproc(trimfile, imsim_config_file, extra_commands, scheduler,
skip_atmoscreens=skip_atmoscreens,
keep_scratch_dirs=keep_scratch_dirs)
logger.critical('Unsupported phosim version %s', phosim_version)
return 1
if __name__ == '__main__':
usage = 'usage: %prog trimfile imsim_config_file [options]'
parser = OptionParser(usage=usage)
parser.add_option('-a', '--skip_atmoscreens', dest='skip_atmoscreens',
action='store_true', default=False,
help='Generate atmospheric screens in raytrace stage instead'
' of preprocessing stage.')
parser.add_option('-c', '--command', dest='extra_commands',
help='Extra commands filename.')
parser.add_option('-k', '--keep_scratch', dest='keep_scratch_dirs',
action='store_true', default=False,
help='Do not cleanup working directories.'
' (version 3.2.x and higher only).')
parser.add_option('-l', '--logtostdout', dest='log_to_stdout',
action='store_true', default=False,
help='Write logging output to stdout instead of log file'
' (version 3.2.x and higher only).')
parser.add_option('-s', '--sensor', dest='sensor_ids', default='all',
help='Specify a list of sensor ids to use delimited by "|",'
' or use "all" for all.')
(options, args) = parser.parse_args()
if len(args) != 2:
print 'Incorrect number of arguments. Use -h or --help for help.'
print usage
quit()
trimfile = args[0]
imsim_config_file = args[1]
sys.exit(main(trimfile, imsim_config_file, options.extra_commands,
options.skip_atmoscreens, options.keep_scratch_dirs,
options.sensor_ids, options.log_to_stdout))
| lsst-sims/sims_phosim_pythoncontrol | fullFocalplane.py | Python | gpl-3.0 | 11,426 |
# Example made by OssiLehtinen
#
from svgpathtools import svg2paths, wsvg
import numpy as np
import uArmRobot
import time
#Configure Serial Port
#serialport = "com3" # for windows
serialport = "/dev/ttyACM0" # for linux like system
# Connect to uArm
myRobot = uArmRobot.robot(serialport,0) # user 0 for firmware < v4 and use 1 for firmware v4
myRobot.debug = True # Enable / Disable debug output on screen, by default disabled
myRobot.connect()
myRobot.mode(1) # Set mode to Normal
# Read in the svg
paths, attributes = svg2paths('drawing.svg')
scale = .25
steps_per_seg = 3
coords = []
x_offset = 200
height = 90
draw_speed = 1000
# Convert the paths to a list of coordinates
for i in range(len(paths)):
path = paths[i]
attribute = attributes[i]
# A crude check for whether a path should be drawn. Does it have a style defined?
if 'style' in attribute:
for seg in path:
segcoords = []
for p in range(steps_per_seg+1):
cp = seg.point(float(p)/float(steps_per_seg))
segcoords.append([-np.real(cp)*scale+x_offset, np.imag(cp)*scale])
coords.append(segcoords)
# The starting point
myRobot.goto(coords[0][0][0], coords[0][0][1], height, 6000)
for seg in coords:
myRobot.goto(seg[0][0], seg[0][1], height, 6000)
time.sleep(0.15)
for p in seg:
myRobot.goto_laser(p[0], p[1], height, draw_speed)
# Back to the starting point (and turn the laser off)
myRobot.goto(coords[0][0][0], coords[0][0][1], height, 6000)
| AnykeyNL/uArmProPython | svg_example.py | Python | gpl-3.0 | 1,467 |
# -*- coding: utf-8 -*-
# Author: Leo Vidarte <http://nerdlabs.com.ar>
#
# This file is part of lai-client.
#
# lai-client is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation.
#
# lai-client is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with lai-client. If not, see <http://www.gnu.org/licenses/>.
import pymongo
from pymongo.errors import AutoReconnect
from lai.db.base import DBBase
from lai.database import UPDATE_PROCESS, COMMIT_PROCESS
from lai.database import DatabaseException, NotFoundError
from lai import Document
class DBMongo(DBBase):
def __init__(self, name, host='127.0.0.1', port=27017):
self.name = name
self.host = host
self.port = port
def connect(self):
try:
self.connection = pymongo.Connection(self.host, self.port)
self.db = self.connection[self.name]
except AutoReconnect:
raise DatabaseException("It's not possible connect to the database")
def get_next_id(self):
try:
query = {'_id': 'last_id'}
update = {'$inc': {'id': 1}}
fn = self.db.internal.find_and_modify
row = fn(query, update, upsert=True, new=True)
except Exception as e:
raise DatabaseException(e)
return row['id']
def search(self, regex):
try:
spec = {'$or': [{'data.content' : {'$regex': regex, '$options': 'im'}},
{'data.description': {'$regex': regex, '$options': 'im'}}]}
fields = {'_id': 0}
cur = self.db.docs.find(spec, fields)
except Exception as e:
raise DatabaseException(e)
return [Document(**row) for row in cur]
def get(self, id, pk='id', deleted=False):
try:
if pk == 'id':
id = int(id)
if deleted:
spec = {pk: id}
else:
spec = {pk: id, 'data': {'$exists': 1}}
fields = {'_id': 0}
row = self.db.docs.find_one(spec, fields)
except Exception as e:
raise DatabaseException(e)
if row:
return Document(**row)
raise NotFoundError('%s %s not found' % (pk, id))
def getall(self):
try:
spec = {'data': {'$exists': 1}}
fields = {'_id': 0}
sort = [('tid', 1)]
cur = self.db.docs.find(spec, fields, sort=sort)
except Exception as e:
raise DatabaseException(e)
return [Document(**row) for row in cur]
def save(self, doc):
if doc.id:
return self.update(doc)
else:
return self.insert(doc)
def insert(self, doc, synced=False):
doc.id = self.get_next_id()
doc.synced = synced
try:
self.db.docs.insert(doc)
except Exception as e:
raise DatabaseException(e)
return doc
def update(self, doc, process=None):
if process is None:
pk = 'id'
id = doc.id
doc.synced = False
set = doc
elif process == UPDATE_PROCESS:
if self.db.docs.find({'sid': doc.sid}).count() == 0:
return self.insert(doc, synced=True)
pk = 'sid'
id = doc.sid
doc.synced = not doc.merged() # must be commited if was merged
doc.merged(False)
set = {'tid': doc.tid, 'data': doc.data, 'user': doc.user,
'public': doc.public, 'synced': doc.synced}
elif process == COMMIT_PROCESS:
pk = 'id'
id = doc.id
doc.synced = True
set = {'sid': doc.sid, 'tid': doc.tid, 'synced': doc.synced}
else:
raise DatabaseException('Incorrect update process')
try:
rs = self.db.docs.update({pk: id}, {'$set': set}, safe=True)
assert rs['n'] == 1
except Exception as e:
raise DatabaseException(e)
return doc
def delete(self, doc):
if doc.id is None:
raise DatabaseException('Document does not have id')
if doc.sid is None:
try:
rs = self.db.docs.remove({'id': doc.id}, safe=True)
assert rs['n'] == 1
except Exception as e:
raise DatabaseException(e)
return None
doc.data = None
return self.update(doc)
def save_last_sync(self, ids, process):
try:
spec = {'_id': 'last_sync'}
document = {'$set': {process: ids}}
self.db.internal.update(spec, document, upsert=True)
except Exception as e:
raise DatabaseException(e)
def get_docs_to_commit(self):
try:
spec = {'synced': False}
fields = {'_id': 0}
cur = self.db.docs.find(spec, fields)
except Exception as e:
raise DatabaseException(e)
return list(cur)
def get_last_tid(self):
try:
spec = {'tid': {'$gt': 0}}
sort = [('tid', -1)]
row = self.db.docs.find_one(spec, sort=sort)
except Exception as e:
raise DatabaseException(e)
if row:
return row['tid']
return 0
def status(self):
docs = {'updated' : [],
'committed': [],
'to_commit': []}
row = self.db.internal.find_one({'_id': 'last_sync'})
if row and 'update' in row:
for id in row['update']:
docs['updated'].append(self.get(id, deleted=True))
if row and 'commit' in row:
for id in row['commit']:
docs['committed'].append(self.get(id, deleted=True))
to_commit = self.get_docs_to_commit()
for row in to_commit:
doc = Document(**row)
docs['to_commit'].append(doc)
return docs
def __str__(self):
return "%s://%s:%s/%s" % ('mongo', self.host, self.port, self.name)
| lvidarte/lai-client | lai/db/mongo.py | Python | gpl-3.0 | 6,391 |
from matplotlib import pyplot as plt
path = "C:/Temp/mnisterrors/chunk" + str(input("chunk: ")) + ".txt"
with open(path, "r") as f:
errorhistory = [float(line.rstrip('\n')) for line in f]
plt.plot(errorhistory)
plt.show()
| jabumaho/MNIST-neural-network | plot_error.py | Python | gpl-3.0 | 235 |
# We calculate the flatness with the Roche model
# calculate omk knowing omc and vice-versa
from numpy import *
from scipy.optimize import root
# we have to solve a cubic equation a-J2*a**3=1+J2+0.5*omk**2
def eps(omk):
return omk**2/(2+omk**2)
def om_k(omc):
khi=arcsin(omc)
return sqrt(6*sin(khi/3)/omc-2)
omc=0.88
print 'omc=',omc,' omk=',om_k(omc)
| ester-project/ester | postprocessing/Roche.py | Python | gpl-3.0 | 361 |
#!/usr/bin/env python
import sys
import os
output_dir = "erc2-chromatin15state-all-files"
if not os.path.exists(output_dir):
sys.stderr.write("Creating dir [%s]...\n" % (output_dir))
os.makedirs(output_dir)
prefix = "/home/cbreeze/for_Alex"
suffix = "_15_coreMarks_mnemonics.bed"
marks = [ '1_TssA',
'2_TssAFlnk',
'3_TxFlnk',
'4_Tx',
'5_TxWk',
'6_EnhG',
'7_Enh',
'8_ZNF/Rpts',
'9_Het',
'10_TssBiv',
'11_BivFlnk',
'12_EnhBiv',
'13_ReprPC',
'14_ReprPCWk',
'15_Quies' ]
all = [ 'E001',
'E002',
'E003',
'E004',
'E005',
'E006',
'E007',
'E008',
'E009',
'E010',
'E011',
'E012',
'E013',
'E014',
'E015',
'E016',
'E017',
'E018',
'E019',
'E020',
'E021',
'E022',
'E023',
'E024',
'E025',
'E026',
'E027',
'E028',
'E029',
'E030',
'E031',
'E032',
'E033',
'E034',
'E035',
'E036',
'E037',
'E038',
'E039',
'E040',
'E041',
'E042',
'E043',
'E044',
'E045',
'E046',
'E047',
'E048',
'E049',
'E050',
'E051',
'E052',
'E053',
'E054',
'E055',
'E056',
'E057',
'E058',
'E059',
'E061',
'E062',
'E063',
'E065',
'E066',
'E067',
'E068',
'E069',
'E070',
'E071',
'E072',
'E073',
'E074',
'E075',
'E076',
'E077',
'E078',
'E079',
'E080',
'E081',
'E082',
'E083',
'E084',
'E085',
'E086',
'E087',
'E088',
'E089',
'E090',
'E091',
'E092',
'E093',
'E094',
'E095',
'E096',
'E097',
'E098',
'E099',
'E100',
'E101',
'E102',
'E103',
'E104',
'E105',
'E106',
'E107',
'E108',
'E109',
'E110',
'E111',
'E112',
'E113',
'E114',
'E115',
'E116',
'E117',
'E118',
'E119',
'E120',
'E121',
'E122',
'E123',
'E124',
'E125',
'E126',
'E127',
'E128',
'E129' ]
# prefix, suffix, marks, all
for sample in all:
fns = {}
fhs = {}
# set up output file handles for all combinations of per-sample and marks
for mark in marks:
fns[mark] = os.path.join(output_dir, "%s_%s.bed" % (sample, mark.replace('/', '-')))
sys.stderr.write("Setting up output handle to [%s]...\n" % (fns[mark]))
fhs[mark] = open(fns[mark], "w")
# split per-sample mnemonics to per-sample, per-mark file
psm_fn = "%s/%s%s" % (prefix, sample, suffix)
sys.stderr.write("Reading PSM [%s]...\n" % (psm_fn))
with open(psm_fn, "r") as psm_fh:
for line in psm_fh:
(chr, start, stop, state_call) = line.strip().split('\t')
fhs[state_call].write('\t'.join([chr, start, stop]) + '\n')
# close handles
for mark in marks:
sys.stderr.write("Closing output handle to [%s]...\n" % (fns[mark]))
fhs[mark].close()
fns[mark] = None
fhs[mark] = None | charlesbreeze/eFORGE | docs/eforge-db-construction/construct_erc2-chromatin15state-all_files.py | Python | gpl-3.0 | 3,559 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2017 Stephen Bunn ([email protected])
# GNU GPLv3 <https://www.gnu.org/licenses/gpl-3.0.en.html>
from ._common import *
from .rethinkdb import RethinkDBPipe
from .mongodb import MongoDBPipe
| ritashugisha/neat | neat/pipe/__init__.py | Python | gpl-3.0 | 255 |
import pandas as pd
from larray.core.array import Array
from larray.inout.pandas import from_frame
__all__ = ['read_stata']
def read_stata(filepath_or_buffer, index_col=None, sort_rows=False, sort_columns=False, **kwargs) -> Array:
r"""
Reads Stata .dta file and returns an Array with the contents
Parameters
----------
filepath_or_buffer : str or file-like object
Path to .dta file or a file handle.
index_col : str or None, optional
Name of column to set as index. Defaults to None.
sort_rows : bool, optional
Whether or not to sort the rows alphabetically (sorting is more efficient than not sorting).
This only makes sense in combination with index_col. Defaults to False.
sort_columns : bool, optional
Whether or not to sort the columns alphabetically (sorting is more efficient than not sorting).
Defaults to False.
Returns
-------
Array
See Also
--------
Array.to_stata
Notes
-----
The round trip to Stata (Array.to_stata followed by read_stata) loose the name of the "column" axis.
Examples
--------
>>> read_stata('test.dta') # doctest: +SKIP
{0}\{1} row country sex
0 0 BE F
1 1 FR M
2 2 FR F
>>> read_stata('test.dta', index_col='row') # doctest: +SKIP
row\{1} country sex
0 BE F
1 FR M
2 FR F
"""
df = pd.read_stata(filepath_or_buffer, index_col=index_col, **kwargs)
return from_frame(df, sort_rows=sort_rows, sort_columns=sort_columns)
| gdementen/larray | larray/inout/stata.py | Python | gpl-3.0 | 1,657 |
# ../gungame/core/messages/hooks.py
"""Provides a way to hook GunGame messages."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python
from core import AutoUnload
# GunGame
from .manager import message_manager
# =============================================================================
# >> CLASSES
# =============================================================================
class MessageHook(AutoUnload):
"""Decorator used to register message hooks."""
def __init__(self, message_name):
"""Store the message name."""
self.message_name = message_name
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_message(self.message_name, self.callback)
def _unload_instance(self):
"""Unregister the message hook."""
message_manager.unhook_message(self.message_name, self.callback)
class MessagePrefixHook(AutoUnload):
"""Decorator used to register message prefix hooks."""
def __init__(self, message_prefix):
"""Store the message prefix."""
self.message_prefix = message_prefix
self.callback = None
def __call__(self, callback):
"""Store the callback and register the hook."""
self.callback = callback
message_manager.hook_prefix(self.message_prefix, self.callback)
def _unload_instance(self):
"""Unregister the message prefix hook."""
message_manager.unhook_prefix(self.message_prefix, self.callback)
| GunGame-Dev-Team/GunGame-SP | addons/source-python/plugins/gungame/core/messages/hooks.py | Python | gpl-3.0 | 1,705 |
# coding=utf-8
""" NodeChains are sequential orders of :mod:`~pySPACE.missions.nodes`
.. image:: ../../graphics/node_chain.png
:width: 500
There are two main use cases:
* the application for :mod:`~pySPACE.run.launch_live` and the
:mod:`~pySPACE.environments.live` using the default
:class:`NodeChain` and
* the benchmarking with :mod:`~pySPACE.run.launch` using
the :class:`BenchmarkNodeChain` with the
:mod:`~pySPACE.missions.operations.node_chain` operation.
.. seealso::
- :mod:`~pySPACE.missions.nodes`
- :ref:`node_list`
- :mod:`~pySPACE.missions.operations.node_chain` operation
.. image:: ../../graphics/launch_live.png
:width: 500
.. todo:: Documentation
This module extends/reimplements the original MDP flow class and
has some additional methods like reset(), save() etc.
Furthermore it supports the construction of NodeChains and
also running them inside nodes in parallel.
MDP is distributed under the following BSD license::
This file is part of Modular toolkit for Data Processing (MDP).
All the code in this package is distributed under the following conditions:
Copyright (c) 2003-2012, MDP Developers <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Modular toolkit for Data Processing (MDP)
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
if __name__ == '__main__':
# add root of the code to system path
file_path = os.path.dirname(os.path.abspath(__file__))
pyspace_path = file_path[:file_path.rfind('pySPACE')-1]
if not pyspace_path in sys.path:
sys.path.append(pyspace_path)
import cPickle
import gc
import logging
import multiprocessing
import shutil
import socket
import time
import uuid
import yaml
import pySPACE
from pySPACE.tools.filesystem import create_directory
from pySPACE.tools.socket_utils import talk, inform
from pySPACE.tools.conversion import python2yaml, replace_parameters_and_convert, replace_parameters
import copy
import warnings
import traceback
import numpy
class CrashRecoveryException(Exception):
"""Class to handle crash recovery """
def __init__(self, *args):
"""Allow crash recovery.
Arguments: (error_string, crashing_obj, parent_exception)
The crashing object is kept in self.crashing_obj
The triggering parent exception is kept in ``self.parent_exception``.
"""
errstr = args[0]
self.crashing_obj = args[1]
self.parent_exception = args[2]
# ?? python 2.5: super(CrashRecoveryException, self).__init__(errstr)
super(CrashRecoveryException,self).__init__(self, errstr)
def dump(self, filename = None):
"""
Save a pickle dump of the crashing object on filename.
If filename is None, the crash dump is saved on a file created by
the tempfile module.
Return the filename.
"""
import cPickle
import tempfile
if filename is None:
(fd, filename)=tempfile.mkstemp(suffix=".pic", prefix="NodeChainCrash_")
fl = os.fdopen(fd, 'w+b', -1)
else:
fl = open(filename, 'w+b', -1)
cPickle.dump(self.crashing_obj, fl)
fl.close()
return filename
class NodeChainException(Exception):
"""Base class for exceptions in node chains."""
pass
class NodeChainExceptionCR(CrashRecoveryException, NodeChainException):
"""Class to handle crash recovery """
def __init__(self, *args):
"""Allow crash recovery.
Arguments: (error_string, flow_instance, parent_exception)
The triggering parent exception is kept in self.parent_exception.
If ``flow_instance._crash_recovery`` is set, save a crash dump of
flow_instance on the file self.filename
"""
CrashRecoveryException.__init__(self, *args)
rec = self.crashing_obj._crash_recovery
errstr = args[0]
if rec:
if isinstance(rec, str):
name = rec
else:
name = None
name = CrashRecoveryException.dump(self, name)
dumpinfo = '\nA crash dump is available on: "%s"' % name
self.filename = name
errstr = errstr+dumpinfo
Exception.__init__(self, errstr)
class NodeChain(object):
""" Reimplement/overwrite mdp.Flow methods e.g., for supervised learning """
def __init__(self, node_sequence, crash_recovery=False, verbose=False):
""" Creates the NodeChain based on the node_sequence
.. note:: The NodeChain cannot be executed before not all trainable
nodes have been trained, i.e. self.trained() == True.
"""
self._check_nodes_consistency(node_sequence)
self.flow = node_sequence
self.verbose = verbose
self.set_crash_recovery(crash_recovery)
# Register the direct predecessor of a node as its input
# (i.e. we assume linear flows)
for i in range(len(node_sequence) - 1):
node_sequence[i+1].register_input_node(node_sequence[i])
self.use_test_data = False
# set a default run number
self[-1].set_run_number(0)
# give this flow a unique identifier
self.id = str(uuid.uuid4())
self.handler = None
self.store_intermediate_results = True
def train(self, data_iterators=None):
""" Train NodeChain with data from iterator or source node
The method can proceed in two different ways:
* If no data is provided, it is checked that the first node of
the flow is a source node. If that is the case, the data provided
by this node is passed forward through the flow. During this
forward propagation, the flow is trained.
The request of the data is done in the last node.
* If a list of data iterators is provided,
then it is checked that no source
and split nodes are contained in the NodeChain.
these nodes only include already a data handling
and should not be used, when training is done in different way.
Furthermore, split nodes are relevant for benchmarking.
One iterator for each node has to be given.
If only one is given, or no list, it is mapped to a list
with the same iterator for each node.
.. note:: The iterator approach is normally not used in pySPACE,
because pySPACE supplies the data with special
source nodes and is doing the training automatically
without explicit calls on data samples.
The approach came with MDP.
.. todo:: The iterator approach needs some use cases and testings,
especially, because it is not used in the normal setting.
"""
if data_iterators is not None:
# Check if no source and split nodes are contained in the node chain
assert(not self[0].is_source_node()), \
"Node chains with source nodes cannot be trained "\
"with external data_iterators!"
for node in self:
assert(not node.is_split_node()), \
"Node chains with split nodes cannot be trained "\
"with external data_iterators!"
# prepare iterables
if not type(data_iterators) == list:
data_iterators = [data_iterators] * len(self.flow)
elif not len(data_iterators)==len(self.flow):
data_iterators = [data_iterators] * len(self.flow)
# Delegate to iterative training
self.iter_train(data_iterators)
else: # Use the pySPACE train semantic and not MDP type
# Check if the first node of the node chain is a source node
assert(self[0].is_source_node()), \
"Training of a node chain without source node requires a "\
"data_iterator argument!"
# Training is accomplished by requesting the iterator
# of the last node of the chain. This node will recursively call
# the train method of all its predecessor nodes.
# As soon as the first element is yielded the node has been trained.
for _ in self[-1].request_data_for_training(
use_test_data=self.use_test_data):
return
def iter_train(self, data_iterables):
""" Train all trainable nodes in the NodeChain with data from iterator
*data_iterables* is a list of iterables, one for each node in the chain.
The iterators returned by the iterables must return data arrays that
are then used for the node training (so the data arrays are the data for
the nodes).
Note that the data arrays are processed by the nodes
which are in front of the node that gets trained, so the data dimension
must match the input dimension of the first node.
If a node has only a single training phase then instead of an iterable
you can alternatively provide an iterator (including generator-type
iterators). For nodes with multiple training phases this is not
possible, since the iterator cannot be restarted after the first
iteration. For more information on iterators and iterables see
http://docs.python.org/library/stdtypes.html#iterator-types .
In the special case that *data_iterables* is one single array,
it is used as the data array *x* for all nodes and training phases.
Instead of a data array *x* the iterators can also return a list or
tuple, where the first entry is *x* and the following are args for the
training of the node (e.g., for supervised training).
"""
data_iterables = self._train_check_iterables(data_iterables)
# train each Node successively
for i in range(len(self.flow)):
if self.verbose:
print "Training node #%d (%s)" % (i, str(self.flow[i]))
self._train_node(data_iterables[i], i)
if self.verbose:
print "Training finished"
self._close_last_node()
def trained(self):
"""
Returns whether the complete training is finished, i.e. if all nodes have been trained.
"""
return self[-1].get_remaining_train_phase() == 0
def execute(self, data_iterators=None):
""" Process the data through all nodes """
if data_iterators is not None:
# Delegate to super class
return self.iter_execute(data_iterators)
else: # Use the evaluate semantic
# Check if the first node of the flow is a source node
assert (self[0].is_source_node()), \
"Evaluation of a node chain without source node requires a " \
"data_iterator argument!"
# This is accomplished by calling the request_data_for_testing
# method of the last node of the chain. This node will recursively
# call the request_data_for_testing method of all its predecessor
# nodes
return self[-1].process()
def iter_execute(self, iterable, nodenr = None):
""" Process the data through all nodes in the chain till *nodenr*
'iterable' is an iterable or iterator (note that a list is also an
iterable), which returns data arrays that are used as input.
Alternatively, one can specify one data array as input.
If 'nodenr' is specified, the flow is executed only up to
node nr. 'nodenr'. This is equivalent to 'flow[:nodenr+1](iterable)'.
.. note:: In contrary to MDP, results are not concatenated
to one big object. Each data object remains separate.
"""
if isinstance(iterable, numpy.ndarray):
return self._execute_seq(iterable, nodenr)
res = []
empty_iterator = True
for x in iterable:
empty_iterator = False
res.append(self._execute_seq(x, nodenr))
if empty_iterator:
errstr = ("The execute data iterator is empty.")
raise NodeChainException(errstr)
return res
def _inc_train(self, data, class_label=None):
""" Iterate through the nodes to train them """
for node in self:
if node.is_retrainable() and not node.buffering and hasattr(node, "_inc_train"):
if not node.retraining_phase:
node.retraining_phase=True
node.start_retraining()
node._inc_train(data,class_label)
if not (hasattr(self, "buffering") and self.buffering):
data = node.execute(data)
else: # workaround to inherit meta data
self.buffering = False
data = node.execute(data)
self.buffering = True
def save(self, filename, protocol = -1):
""" Save a pickled representation to *filename*
If *filename* is None, return a string.
.. note:: the pickled NodeChain is not guaranteed to be upward or
backward compatible.
.. note:: Having C-Code in the node might cause problems with saving.
Therefore, the code has special handling for the
LibSVMClassifierNode.
.. todo:: Intrinsic node methods for storing should be used.
.. seealso:: :func:`store_node_chain`
"""
if self[-1].__class__.__name__ in ["LibSVMClassifierNode"] \
and self[-1].multinomial:
indx = filename.find(".pickle")
if indx != -1:
self[-1].save_model(filename[0:indx]+'.model')
else:
self[-1].save_model(filename+'.model')
import cPickle
odict = self.__dict__.copy() # copy the dict since we change it
# Remove other non-pickable stuff
remove_keys=[]
k = 0
for key, value in odict.iteritems():
if key == "input_node" or key == "flow":
continue
try:
cPickle.dumps(value)
except (ValueError, TypeError, cPickle.PicklingError):
remove_keys.append(key)
for key in remove_keys:
odict.pop(key)
self.__dict__ = odict
if filename is None:
return cPickle.dumps(self, protocol)
else:
# if protocol != 0 open the file in binary mode
if protocol != 0:
mode = 'wb'
else:
mode = 'w'
flh = open(filename , mode)
cPickle.dump(self, flh, protocol)
flh.close()
def get_output_type(self, input_type, as_string=True):
"""
Returns the output type of the entire node chain
Recursively iterate over nodes in flow
"""
output = input_type
for i in range(len(self.flow)):
if i == 0:
output = self.flow[i].get_output_type(
input_type, as_string=True)
else:
output = self.flow[i].get_output_type(output, as_string=True)
if as_string:
return output
else:
return self.string_to_class(output)
@staticmethod
def string_to_class(string_encoding):
""" given a string variable, outputs a class instance
e.g. obtaining a TimeSeries
"""
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.resources.data_types.feature_vector import FeatureVector
from pySPACE.resources.data_types.prediction_vector import PredictionVector
if "TimeSeries" in string_encoding:
return TimeSeries
elif "PredictionVector" in string_encoding:
return PredictionVector
elif "FeatureVector" in string_encoding:
return FeatureVector
else:
raise NotImplementedError
#################
# MDP Code copy #
def _propagate_exception(self, exception, nodenr):
# capture exception. the traceback of the error is printed and a
# new exception, containing the identity of the node in the NodeChain
# is raised. Allow crash recovery.
(etype, val, tb) = sys.exc_info()
prev = ''.join(traceback.format_exception(exception.__class__,
exception,tb))
act = "\n! Exception in node #%d (%s):\n" % (nodenr,
str(self.flow[nodenr]))
errstr = ''.join(('\n', 40*'-', act, 'Node Traceback:\n', prev, 40*'-'))
raise NodeChainExceptionCR(errstr, self, exception)
def _train_node(self, data_iterable, nodenr):
""" Train a single node in the flow.
nodenr -- index of the node in the flow
"""
node = self.flow[nodenr]
if (data_iterable is not None) and (not node.is_trainable()):
# attempted to train a node although it is not trainable.
# raise a warning and continue with the next node.
# wrnstr = "\n! Node %d is not trainable" % nodenr + \
# "\nYou probably need a 'None' iterable for"+\
# " this node. Continuing anyway."
#warnings.warn(wrnstr, UserWarning)
return
elif (data_iterable is None) and node.is_training():
# None instead of iterable is passed to a training node
err_str = ("\n! Node %d is training"
" but instead of iterable received 'None'." % nodenr)
raise NodeChainException(err_str)
elif (data_iterable is None) and (not node.is_trainable()):
# skip training if node is not trainable
return
try:
train_arg_keys = self._get_required_train_args(node)
train_args_needed = bool(len(train_arg_keys))
## We leave the last training phase open for the
## CheckpointFlow class.
## Checkpoint functions must close it explicitly if needed!
## Note that the last training_phase is closed
## automatically when the node is executed.
while True:
empty_iterator = True
for x in data_iterable:
empty_iterator = False
# the arguments following the first are passed only to the
# currently trained node, allowing the implementation of
# supervised nodes
if (type(x) is tuple) or (type(x) is list):
arg = x[1:]
x = x[0]
else:
arg = ()
# check if the required number of arguments was given
if train_args_needed:
if len(train_arg_keys) != len(arg):
err = ("Wrong number of arguments provided by " +
"the iterable for node #%d " % nodenr +
"(%d needed, %d given).\n" %
(len(train_arg_keys), len(arg)) +
"List of required argument keys: " +
str(train_arg_keys))
raise NodeChainException(err)
# filter x through the previous nodes
if nodenr > 0:
x = self._execute_seq(x, nodenr-1)
# train current node
node.train(x, *arg)
if empty_iterator:
if node.get_current_train_phase() == 1:
err_str = ("The training data iteration for node "
"no. %d could not be repeated for the "
"second training phase, you probably "
"provided an iterator instead of an "
"iterable." % (nodenr+1))
raise NodeChainException(err_str)
else:
err_str = ("The training data iterator for node "
"no. %d is empty." % (nodenr+1))
raise NodeChainException(err_str)
self._stop_training_hook()
# close the previous training phase
node.stop_training()
if node.get_remaining_train_phase() > 0:
continue
else:
break
except self.flow[-1].TrainingFinishedException, e:
# attempted to train a node although its training phase is already
# finished. raise a warning and continue with the next node.
wrnstr = ("\n! Node %d training phase already finished"
" Continuing anyway." % nodenr)
warnings.warn(wrnstr, UserWarning)
except NodeChainExceptionCR, e:
# this exception was already propagated,
# probably during the execution of a node upstream in the flow
(exc_type, val) = sys.exc_info()[:2]
prev = ''.join(traceback.format_exception_only(e.__class__, e))
prev = prev[prev.find('\n')+1:]
act = "\nWhile training node #%d (%s):\n" % (nodenr,
str(self.flow[nodenr]))
err_str = ''.join(('\n', 40*'=', act, prev, 40*'='))
raise NodeChainException(err_str)
except Exception, e:
# capture any other exception occurred during training.
self._propagate_exception(e, nodenr)
def _stop_training_hook(self):
"""Hook method that is called before stop_training is called."""
pass
@staticmethod
def _get_required_train_args(node):
"""Return arguments in addition to self and x for node.train.
Arguments that have a default value are ignored.
"""
import inspect
train_arg_spec = inspect.getargspec(node.train)
train_arg_keys = train_arg_spec[0][2:] # ignore self, x
if train_arg_spec[3]:
# subtract arguments with a default value
train_arg_keys = train_arg_keys[:-len(train_arg_spec[3])]
return train_arg_keys
def _train_check_iterables(self, data_iterables):
"""Return the data iterables after some checks and sanitizing.
Note that this method does not distinguish between iterables and
iterators, so this must be taken care of later.
"""
# verifies that the number of iterables matches that of
# the signal nodes and multiplies them if needed.
flow = self.flow
# # if a single array is given wrap it in a list of lists,
# # note that a list of 2d arrays is not valid
# if isinstance(data_iterables, numpy.ndarray):
# data_iterables = [[data_iterables]] * len(flow)
if not isinstance(data_iterables, list):
err_str = ("'data_iterables' must be either a list of "
"iterables or an array, but got %s" %
str(type(data_iterables)))
raise NodeChainException(err_str)
# check that all elements are iterable
for i, iterable in enumerate(data_iterables):
if (iterable is not None) and (not hasattr(iterable, '__iter__')):
err = ("Element number %d in the data_iterables"
" list is not an iterable." % i)
raise NodeChainException(err)
# check that the number of data_iterables is correct
if len(data_iterables) != len(flow):
err_str = ("%d data iterables specified,"
" %d needed" % (len(data_iterables), len(flow)))
raise NodeChainException(err_str)
return data_iterables
def _close_last_node(self):
if self.verbose:
print "Close the training phase of the last node"
try:
self.flow[-1].stop_training()
except self.flow[-1].TrainingFinishedException:
pass
except Exception, e:
self._propagate_exception(e, len(self.flow)-1)
def set_crash_recovery(self, state = True):
"""Set crash recovery capabilities.
When a node raises an Exception during training, execution, or
inverse execution that the flow is unable to handle, a NodeChainExceptionCR
is raised. If crash recovery is set, a crash dump of the flow
instance is saved for later inspection. The original exception
can be found as the 'parent_exception' attribute of the
NodeChainExceptionCR instance.
- If 'state' = False, disable crash recovery.
- If 'state' is a string, the crash dump is saved on a file
with that name.
- If 'state' = True, the crash dump is saved on a file created by
the tempfile module.
"""
self._crash_recovery = state
def _execute_seq(self, x, nodenr = None):
""" Executes input data 'x' through the nodes 0..'node_nr' included
If no *nodenr* is specified, the complete node chain is used for
processing.
"""
flow = self.flow
if nodenr is None:
nodenr = len(flow)-1
for node_index in range(nodenr+1):
try:
x = flow[node_index].execute(x)
except Exception, e:
self._propagate_exception(e, node_index)
return x
def copy(self, protocol=None):
"""Return a deep copy of the flow.
The protocol parameter should not be used.
"""
import copy
if protocol is not None:
warnings.warn("protocol parameter to copy() is ignored",
DeprecationWarning, stacklevel=2)
return copy.deepcopy(self)
def __call__(self, iterable, nodenr = None):
"""Calling an instance is equivalent to call its 'execute' method."""
return self.iter_execute(iterable, nodenr=nodenr)
###### string representation
def __str__(self):
nodes = ', '.join([str(x) for x in self.flow])
return '['+nodes+']'
def __repr__(self):
# this should look like a valid Python expression that
# could be used to recreate an object with the same value
# eval(repr(object)) == object
name = type(self).__name__
pad = len(name)+2
sep = ',\n'+' '*pad
nodes = sep.join([repr(x) for x in self.flow])
return '%s([%s])' % (name, nodes)
###### private container methods
def __len__(self):
return len(self.flow)
def _check_dimension_consistency(self, out, inp):
"""Raise ValueError when both dimensions are set and different."""
if ((out and inp) is not None) and out != inp:
errstr = "dimensions mismatch: %s != %s" % (str(out), str(inp))
raise ValueError(errstr)
def _check_nodes_consistency(self, flow = None):
"""Check the dimension consistency of a list of nodes."""
if flow is None:
flow = self.flow
len_flow = len(flow)
for i in range(1, len_flow):
out = flow[i-1].output_dim
inp = flow[i].input_dim
self._check_dimension_consistency(out, inp)
def _check_value_type_isnode(self, value):
if not isinstance(value, pySPACE.missions.nodes.base.BaseNode):
raise TypeError("flow item must be Node instance")
def __getitem__(self, key):
if isinstance(key, slice):
flow_slice = self.flow[key]
self._check_nodes_consistency(flow_slice)
return self.__class__(flow_slice)
else:
return self.flow[key]
def __setitem__(self, key, value):
if isinstance(key, slice):
[self._check_value_type_isnode(item) for item in value]
else:
self._check_value_type_isnode(value)
# make a copy of list
flow_copy = list(self.flow)
flow_copy[key] = value
# check dimension consistency
self._check_nodes_consistency(flow_copy)
# if no exception was raised, accept the new sequence
self.flow = flow_copy
def __delitem__(self, key):
# make a copy of list
flow_copy = list(self.flow)
del flow_copy[key]
# check dimension consistency
self._check_nodes_consistency(flow_copy)
# if no exception was raised, accept the new sequence
self.flow = flow_copy
def __contains__(self, item):
return self.flow.__contains__(item)
def __iter__(self):
return self.flow.__iter__()
def __add__(self, other):
# append other to self
if isinstance(other, NodeChain):
flow_copy = list(self.flow).__add__(other.flow)
# check dimension consistency
self._check_nodes_consistency(flow_copy)
# if no exception was raised, accept the new sequence
return self.__class__(flow_copy)
elif isinstance(other, pySPACE.missions.nodes.base.BaseNode):
flow_copy = list(self.flow)
flow_copy.append(other)
# check dimension consistency
self._check_nodes_consistency(flow_copy)
# if no exception was raised, accept the new sequence
return self.__class__(flow_copy)
else:
err_str = ('can only concatenate flow or node'
' (not \'%s\') to flow' % (type(other).__name__))
raise TypeError(err_str)
def __iadd__(self, other):
# append other to self
if isinstance(other, NodeChain):
self.flow += other.flow
elif isinstance(other, pySPACE.missions.nodes.base.BaseNode):
self.flow.append(other)
else:
err_str = ('can only concatenate flow or node'
' (not \'%s\') to flow' % (type(other).__name__))
raise TypeError(err_str)
self._check_nodes_consistency(self.flow)
return self
###### public container methods
def append(self, x):
"""flow.append(node) -- append node to flow end"""
self[len(self):len(self)] = [x]
def extend(self, x):
"""flow.extend(iterable) -- extend flow by appending
elements from the iterable"""
if not isinstance(x, NodeChain):
err_str = ('can only concatenate flow'
' (not \'%s\') to flow' % (type(x).__name__))
raise TypeError(err_str)
self[len(self):len(self)] = x
def insert(self, i, x):
"""flow.insert(index, node) -- insert node before index"""
self[i:i] = [x]
def pop(self, i = -1):
"""flow.pop([index]) -> node -- remove and return node at index
(default last)"""
x = self[i]
del self[i]
return x
def reset(self):
""" Reset the flow and obey permanent_attributes where available
Method was moved to the end of class code, due to program environment
problems which needed the __getitem__ method beforehand.
"""
for i in range(len(self)):
self[i].reset()
class BenchmarkNodeChain(NodeChain):
""" This subclass overwrites the train method in order
to provide a more convenient way of doing supervised learning.
Furthermore, it contains a benchmark method that can be used for
benchmarking.
This includes logging, setting of run numbers,
delivering the result collection, handling of source and sink nodes, ...
:Author: Jan Hendrik Metzen ([email protected])
:Created: 2008/08/18
"""
def __init__(self, node_sequence):
""" Creates the BenchmarkNodeChain based on the node_sequence """
super(BenchmarkNodeChain, self).__init__(node_sequence)
# Each BenchmarkNodeChain must start with an source node
# and end with a sink node
assert(self[0].is_source_node()), \
"A benchmark flow must start with a source node"
assert(self[-1].is_sink_node()), \
"A benchmark flow must end with a sink node"
def use_next_split(self):
"""
Use the next split of the data into training and test data
This method is useful for pySPACE-benchmarking
"""
# This is handled by calling use_next_split() of the last node of
# the flow which will recursively call predecessor nodes in the flow
# until a node is found that handles the splitting
return self[-1].use_next_split()
def benchmark(self, input_collection, run=0,
persistency_directory=None, store_node_chain=False):
""" Perform the benchmarking of this data flow with the given collection
Benchmarking is accomplished by iterating through all splits of the
data into training and test data.
**Parameters**:
:input_collection:
A sequence of data/label-tuples that serves as a generator or a
BaseDataset which contains the data to be processed.
:run:
The current run which defines all random seeds within the flow.
:persistency_directory:
Optional information of the nodes as well as the trained node chain
(if *store_node_chain* is not False) are stored to the given
*persistency_directory*.
:store_node_chain:
If True the trained flow is stored to *persistency_directory*.
If *store_node_chain* is a tuple of length 2---lets say (i1,i2)--
only the subflow starting at the i1-th node and ending at the
(i2-1)-th node is stored. This may be useful when the stored
flow should be used in an ensemble.
"""
# Inform the first node of this flow about the input collection
if hasattr(input_collection,'__iter__'):
# assume a generator is given
self[0].set_generator(input_collection)
else: # assume BaseDataset
self[0].set_input_dataset(input_collection)
# Inform all nodes recursively about the number of the current run
self[-1].set_run_number(int(run))
# set temp file folder
if persistency_directory != None:
self[-1].set_temp_dir(persistency_directory+os.sep+"temp_dir")
split_counter = 0
# For every split of the dataset
while True: # As long as more splits are available
# Compute the results for the current split
# by calling the method on its last node
self[-1].process_current_split()
if persistency_directory != None:
if store_node_chain:
self.store_node_chain(persistency_directory + os.sep + \
"node_chain_sp%s.pickle" % split_counter, store_node_chain)
# Store nodes that should be persistent
self.store_persistent_nodes(persistency_directory)
# If no more splits are available
if not self.use_next_split():
break
split_counter += 1
# print "Input benchmark"
# print gc.get_referrers(self[0].collection)
# During the flow numerous pointers are put to the flow but they are
# not deleted. So memory is not given free, which can be seen by the
# upper comment. Therefore we now free the input collection and only
# then the gc collector can free the memory. Otherwise under not yet
# found reasons, the pointers to the input collection will remain even
# between processes.
if hasattr(input_collection,'__iter__'):
self[0].set_generator(None)
else:
self[0].set_input_dataset(None)
gc.collect()
# Return the result collection of this flow
return self[-1].get_result_dataset()
def __call__(self, iterable=None, train_instances=None, runs=[]):
""" Call *execute* or *benchmark* and return (id, PerformanceResultSummary)
If *iterable* is given, calling an instance is equivalent to call its
'execute' method.
If *train_instances* and *runs* are given, 'benchmark' is called for
every run number specified and results are merged. This is useful for
e.g. parallel execution of subflows with the multiprocessing module,
since instance methods can not be serialized in Python but whole objects.
"""
if iterable != None:
return self.execute(iterable)
elif train_instances != None and runs != []: # parallelization case
# we have to reinitialize logging cause otherwise deadlocks occur
# when parallelization is done via multiprocessing.Pool
self.prepare_logging()
for ind, run in enumerate(runs):
result = self.benchmark(train_instances, run=run)
if ind == 0:
result_collection = result
else:
result_collection.data.update(result.data)
# reset node chain for new training if another call of
# :func:`benchmark` is expected.
if not ind == len(runs) - 1:
self.reset()
self.clean_logging()
return (self.id, result_collection)
else:
import warnings
warnings.warn("__call__ methods needs at least one parameter (data)")
return None
def store_node_chain(self, result_dir, store_node_chain):
""" Pickle this flow into *result_dir* for later usage"""
if isinstance(store_node_chain,basestring):
store_node_chain = eval(store_node_chain)
if isinstance(store_node_chain,tuple):
assert(len(store_node_chain) == 2)
# Keep only subflow starting at the i1-th node and ending at the
# (i2-1) node.
flow = NodeChain(self.flow[store_node_chain[0]:store_node_chain[1]])
elif isinstance(store_node_chain,list):
# Keep only nodes with indices contained in the list
# nodes have to be copied, otherwise input_node-refs of current flow
# are overwritten
from copy import copy
store_node_list = [copy(node) for ind, node in enumerate(self.flow) \
if ind in store_node_chain]
flow = NodeChain(store_node_list)
else:
# Per default, get rid of source and sink nodes
flow = NodeChain(self.flow[1:-1])
input_node = flow[0].input_node
flow[0].input_node = None
flow.save(result_dir)
def prepare_logging(self):
""" Set up logging
This method is only needed if one forks subflows, i.e. to execute them
via multiprocessing.Pool
"""
# Prepare remote logging
root_logger = logging.getLogger("%s-%s" % (socket.gethostname(),
os.getpid()))
root_logger.setLevel(logging.DEBUG)
root_logger.propagate = False
if len(root_logger.handlers)==0:
self.handler = logging.handlers.SocketHandler(socket.gethostname(),
logging.handlers.DEFAULT_TCP_LOGGING_PORT)
root_logger.addHandler(self.handler)
def clean_logging(self):
""" Remove logging handlers if existing
Call this method only if you have called *prepare_logging* before.
"""
# Remove potential logging handlers
if self.handler is not None:
self.handler.close()
root_logger = logging.getLogger("%s-%s" % (socket.gethostname(),
os.getpid()))
root_logger.removeHandler(self.handler)
def store_persistent_nodes(self, result_dir):
""" Store all nodes that should be persistent """
# For all node
for index, node in enumerate(self):
# Store them in the result dir if they enabled storing
node.store_state(result_dir, index)
class NodeChainFactory(object):
""" Provide static methods to create and instantiate data flows
:Author: Jan Hendrik Metzen ([email protected])
:Created: 2009/01/26
"""
@staticmethod
def flow_from_yaml(Flow_Class, flow_spec):
""" Creates a Flow object
Reads from the given *flow_spec*, which should be a valid YAML
specification of a NodeChain object, and returns this dataflow
object.
**Parameters**
:Flow_Class:
The class name of node chain to create. Valid are 'NodeChain' and
'BenchmarkNodeChain'.
:flow_spec:
A valid YAML specification stream; this could be a file object,
a string representation of the YAML file or the Python
representation of the YAML file (list of dicts)
"""
from pySPACE.missions.nodes.base_node import BaseNode
# Reads and parses the YAML file if necessary
if type(flow_spec) != list:
dataflow_spec = yaml.load(flow_spec)
else:
dataflow_spec = flow_spec
node_sequence = []
# For all nodes of the flow
for node_spec in dataflow_spec:
# Use factory method to create node
node_obj = BaseNode.node_from_yaml(node_spec)
# Append this node to the sequence of node
node_sequence.append(node_obj)
# Check if the nodes have to cache their outputs
for index, node in enumerate(node_sequence):
# If a node is trainable, it uses the outputs of its input node
# at least twice, so we have to cache.
if node.is_trainable():
node_sequence[index - 1].set_permanent_attributes(caching = True)
# Split node might also request the data from their input nodes
# (once for each split), depending on their implementation. We
# assume the worst case and activate caching
if node.is_split_node():
node_sequence[index - 1].set_permanent_attributes(caching = True)
# Create the flow based on the node sequence and the given flow class
# and return it
return Flow_Class(node_sequence)
@staticmethod
def instantiate(template, parametrization):
""" Instantiate a template recursively for the given parameterization
Instantiate means to replace the parameter in the template by the
chosen value.
**Parameters**
:template:
A dictionary with key-value pairs, where values might contain
parameter keys which have to be replaced. A typical example of a
template would be a Python representation of a node read from YAML.
:parametrization:
A dictionary with parameter names as keys and exact one value for
this parameter as value.
"""
instance = {}
for key, value in template.iteritems():
if value in parametrization.keys(): # Replacement
instance[key] = parametrization[value]
elif isinstance(value, dict): # Recursive call
instance[key] = NodeChainFactory.instantiate(value, parametrization)
elif isinstance(value, basestring): # String replacement
for param_key, param_value in parametrization.iteritems():
try:
value = value.replace(param_key, repr(param_value))
except:
value = value.replace(param_key, python2yaml(param_value))
instance[key] = value
elif hasattr(value, "__iter__"):
# Iterate over all items in sequence
instance[key] = []
for iter_item in value:
if iter_item in parametrization.keys(): # Replacement
instance[key].append(parametrization[iter_item])
elif isinstance(iter_item, dict):
instance[key].append(NodeChainFactory.instantiate(
iter_item, parametrization))
elif isinstance(value, basestring): # String replacement
for param_key, param_value in parametrization.iteritems():
try:
iter_item = iter_item.replace(param_key,
repr(param_value))
except:
iter_item = iter_item.replace(
param_key, python2yaml(param_value))
instance[key] = value
else:
instance[key].append(iter_item)
else: # Not parameterized
instance[key] = value
return instance
@staticmethod
def replace_parameters_in_node_chain(node_chain_template, parametrization):
node_chain_template = copy.copy(node_chain_template)
if parametrization == {}:
return node_chain_template
elif type(node_chain_template) == list:
return [NodeChainFactory.instantiate(
template=node,parametrization=parametrization)
for node in node_chain_template]
elif isinstance(node_chain_template, basestring):
node_chain_template = \
replace_parameters(node_chain_template, parametrization)
return node_chain_template
class SubflowHandler(object):
""" Interface for nodes to generate and execute subflows (subnode-chains)
A subflow means a node chain used inside a node for processing data.
This class provides functions that can be used by nodes to generate and
execute subflows. It serves thereby as a communication daemon to the
backend (if it is used).
Most important when inheriting from this class is that the subclass MUST be
a node. The reason is that this class uses node functionality, e.g. logging,
the *temp_dir*-variable and so on.
**Parameters**
:processing_modality:
One of the valid strings: 'backend', 'serial', 'local'.
:backend:
The current backends modality is used. This is implemented
at the moment only for 'LoadlevelerBackend' and 'LocalBackend'.
:serial:
All subflows are executed sequentially, i.e. one after the
other.
:local:
Subflows are executed in a Pool using *pool_size* cpus. This
may be also needed when no backend is used.
(*optional, default: 'serial'*)
:pool_size:
If a parallelization is based on using several processes on a local
system in parallel, e.g. option 'backend' and
:class:`pySPACEMulticoreBackend`
or option
'local', the number of worker processes for subflow evaluation has
to be specified.
.. note:: When using the LocalBackend, there is also the possibility
to specify the pool size of parallel executed
processes, e.g. data sets. Your total number of cpu's
should be pool size (pySPACE) + pool size (subflows).
(*optional, default: 2*)
:batch_size:
If parallelization of subflow execution is done together with the
:class:`~pySPACE.environments.backends.ll_backend.LoadLevelerBackend`,
*batch_size* determines how many subflows are executed in one
serial LoadLeveler job. This option is useful if execution of a
single subflow is really short (range of seconds) since there is
significant overhead in creating new jobs.
(*optional, default: 1*)
:Author: Anett Seeland ([email protected])
:Created: 2012/09/04
:LastChange: 2012/11/06 batch_size option added
"""
def __init__(self, processing_modality='serial', pool_size=2, batch_size=1,
**kwargs):
self.modality = processing_modality
self.pool_size = int(pool_size)
self.batch_size = int(batch_size)
# a flag to send pool_size / batch_size only once to the backend
self.already_send = False
self.backend_com = None
self.backend_name = None
# to indicate the end of a message received over a socket
self.end_token = '!END!'
if processing_modality not in ["serial", "local", "backend"]:
import warnings
warnings.warn("Processing modality not found! Serial mode is used!")
self.modality = 'serial'
@staticmethod
def generate_subflow(flow_template, parametrization=None, flow_class=None):
""" Return a *flow_class* object of the given *flow_template*
This methods wraps two function calls (NodeChainFactory.instantiate and
NodeChainFactory.flow_from_yaml.
**Parameters**
:flow_template:
List of dicts - a valid representation of a node chain.
Alternatively, a YAML-String representation could be used,
which simplifies parameter replacement.
:parametrization:
A dictionary with parameter names as keys and exact one value for
this parameter as value. Passed to NodeChainFactory.instantiate
(*optional, default: None*)
:flow_class:
The flow class name of which an object should be returned
(*optional, default: BenchmarkNodeChain*)
"""
if flow_class is None:
flow_class = BenchmarkNodeChain
flow_spec = NodeChainFactory.replace_parameters_in_node_chain(
flow_template,parametrization)
# create a new Benchmark flow
flow = NodeChainFactory.flow_from_yaml(flow_class, flow_spec)
return flow
def execute_subflows(self, train_instances, subflows, run_numbers=None):
""" Execute subflows and return result collection.
**Parameters**
:training_instances:
List of training instances which should be used to execute
*subflows*.
:subflows:
List of BenchmarkNodeChain objects.
..note:: Note that every subflow object is stored in memory!
:run_numbers:
All subflows will be executed with every run_number specified in
this list. If None, the current self.run_number (from the node
class) is used.
(*optional, default: None*)
"""
if run_numbers == None:
run_numbers = [self.run_number]
# in case of serial backend, modality is mapped to serial
# in the other case communication must be set up and
# jobs need to be submitted to backend
if self.modality == 'backend':
self.backend_com = pySPACE.configuration.backend_com
if not self.backend_com is None:
# ask for backend_name
# create a socket and keep it alive as long as possible since
# handshaking costs really time
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(self.backend_com)
client_socket, self.backend_name = talk('name' + self.end_token,
client_socket, self.backend_com)
else:
import warnings #necessary for serial backend!
warnings.warn("Seems that no backend is used! Modality of subflow execution "\
"has to be specified! Assuming serial backend.")
self.backend_name = 'serial'
self._log("Preparing subflows for backend execution.")
if self.backend_name in ['loadl','mcore'] :
# we have to pickle training instances and store it on disk
store_path = os.path.join(self.temp_dir,
"sp%d" % self.current_split)
create_directory(store_path)
filename = os.path.join(store_path, "subflow_data.pickle")
if not os.path.isfile(filename):
cPickle.dump(train_instances, open(filename,'wb'),
protocol=cPickle.HIGHEST_PROTOCOL)
subflows_to_compute = [subflows[ind].id for ind in \
range(len(subflows))]
if self.backend_name == 'loadl':
# send batch_size to backend if not already done
if not self.already_send:
client_socket = inform("subflow_batchsize;%d%s" % \
(self.batch_size, self.end_token),
client_socket, self.backend_com)
self.already_send = True
for subflow in subflows:
cPickle.dump(subflow, open(os.path.join(store_path,
subflow.id+".pickle"),"wb"),
protocol=cPickle.HIGHEST_PROTOCOL)
send_flows = subflows_to_compute
else: # backend_name == mcore
# send pool_size to backend if not already done
if not self.already_send:
client_socket = inform("subflow_poolsize;%d%s" % \
(self.pool_size, self.end_token),
client_socket, self.backend_com)
self.already_send = True
# send flow objects via socket
send_flows = [cPickle.dumps(subflow, cPickle.HIGHEST_PROTOCOL) \
for subflow in subflows]
# inform backend
client_socket,msg = talk('execute_subflows;%s;%d;%s;%s%s' % \
(store_path, len(subflows), str(send_flows),
str(run_numbers), self.end_token),
client_socket, self.backend_com)
time.sleep(10)
not_finished_subflows = set(subflows_to_compute)
while len(not_finished_subflows) != 0:
# ask backend for finished jobs
client_socket, msg = talk('is_ready;%d;%s%s' % \
(len(not_finished_subflows), str(not_finished_subflows),
self.end_token), client_socket, self.backend_com)
# parse message
finished_subflows = eval(msg) #should be a set
# set difference
not_finished_subflows -= finished_subflows
time.sleep(10)
if self.backend_name == 'loadl':
# read results and delete store_dir
result_pattern = os.path.join(store_path, '%s_result.pickle')
result_collections = [cPickle.load(open(result_pattern % \
subflows[ind].id,'rb')) for ind in range(len(subflows))]
# ..todo:: check if errors have occurred and if so do not delete!
shutil.rmtree(store_path)
else: # backend_name == mcore
# ask backend to send results
client_socket, msg = talk("send_results;%s!END!" % \
subflows_to_compute, client_socket, self.backend_com)
# should be a list of collections
results = eval(msg)
result_collections = [cPickle.loads(result) for result in results]
self._log("Finished subflow execution.")
client_socket.shutdown(socket.SHUT_RDWR)
client_socket.close()
return result_collections
elif self.backend_name == 'serial':
# do the same as modality=='serial'
self.modality = 'serial'
else: # e.g. mpi backend :
import warnings
warnings.warn("Subflow Handling with %s backend not supported,"\
" serial-modality is used!" % self.backend_name)
self.modality = 'serial'
if self.modality == 'serial':
# serial execution
# .. note:: the here executed flows can not store anything.
# meta data of result collection is NOT updated!
results = [subflow(train_instances=train_instances,
runs=run_numbers) for subflow in subflows]
result_collections = [result[1] for result in results]
return result_collections
else: # modality local, e.g. usage without backend in application case
self._log("Subflow Handler starts processes in pool.")
pool = multiprocessing.Pool(processes=self.pool_size)
results = [pool.apply_async(func=subflow,
kwds={"train_instances": train_instances,
"runs": run_numbers}) \
for subflow in subflows]
pool.close()
self._log("Waiting for parallel processes to finish.")
pool.join()
result_collections = [result.get()[1] for result in results]
del pool
return result_collections
| Crespo911/pyspace | pySPACE/environments/chains/node_chain.py | Python | gpl-3.0 | 60,058 |
import pandas as pd
adv = pd.read_csv('Advertising.csv')
tv_budget_x = adv.TV.tolist()
print(tv_budget_x)
| akanuragkumar/tensorflow-basics | ex1.py | Python | gpl-3.0 | 110 |
from django.contrib import admin
#from .models import Tag
#import sys
#import importlib
#importlib.reload(sys)
#admin.site.register(Tag)
# Register your models here.
| summerzhangft/summer | tag/admin.py | Python | gpl-3.0 | 167 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 22 17:01:36 2019
@author: raf
"""
# IMPORT STUFF
from pdb import set_trace as stop
import copy
import numpy as np
from collections import OrderedDict
import string as st
import os
import pandas as pd
from vison.datamodel import cdp
from vison.support import files
from vison.fpa import fpa as fpamod
from vison.metatests.metacal import MetaCal
from vison.plot import plots_fpa as plfpa
from vison.support import vcal, utils
from vison.datamodel import core as vcore
from vison.ogse import ogse
from vison.inject import lib as ilib
import matplotlib.cm as cm
from matplotlib import pyplot as plt
plt.switch_backend('TkAgg')
from matplotlib.colors import Normalize
# END IMPORT
cols2keep = [
'test',
'sn_ccd1',
'sn_ccd2',
'sn_ccd3',
'sn_roe',
'sn_rpsu',
'exptime',
'vstart',
'vend',
'rdmode',
'flushes',
'siflsh',
'siflsh_p',
'swellw',
'swelldly',
'inisweep',
'cdpu_clk',
'chinj',
'chinj_on',
'chinj_of',
'id_wid',
'id_dly',
'chin_dly',
'v_tpump',
's_tpump',
'v_tp_mod',
's_tp_mod',
'v_tp_cnt',
's_tp_cnt',
'dwell_v',
'dwell_s',
'toi_fl',
'toi_tp',
'toi_ro',
'toi_ch',
'motr',
'motr_cnt',
'motr_siz',
'source',
'wave',
'mirr_on',
'mirr_pos',
'R1C1_TT',
'R1C1_TB',
'R1C2_TT',
'R1C2_TB',
'R1C3_TT',
'R1C3_TB',
'IDL',
'IDH',
'IG1_1_T',
'IG1_2_T',
'IG1_3_T',
'IG1_1_B',
'IG1_2_B',
'IG1_3_B',
'IG2_T',
'IG2_B',
'OD_1_T',
'OD_2_T',
'OD_3_T',
'OD_1_B',
'OD_2_B',
'OD_3_B',
'RD_T',
'RD_B',
'time',
'HK_CCD1_TEMP_T',
'HK_CCD2_TEMP_T',
'HK_CCD3_TEMP_T',
'HK_CCD1_TEMP_B',
'HK_CCD2_TEMP_B',
'HK_CCD3_TEMP_B',
'HK_CCD1_OD_T',
'HK_CCD2_OD_T',
'HK_CCD3_OD_T',
'HK_CCD1_OD_B',
'HK_CCD2_OD_B',
'HK_CCD3_OD_B',
'HK_COMM_RD_T',
'HK_COMM_RD_B',
'HK_CCD1_IG1_T',
'HK_CCD2_IG1_T',
'HK_CCD3_IG1_T',
'HK_CCD1_IG1_B',
'HK_CCD2_IG1_B',
'HK_CCD3_IG1_B',
'HK_COMM_IG2_T',
'HK_COMM_IG2_B',
'HK_FPGA_BIAS_ID2',
'HK_VID_PCB_TEMP_T',
'HK_VID_PCB_TEMP_B',
'HK_RPSU_TEMP1',
'HK_FPGA_PCB_TEMP_T',
'HK_FPGA_PCB_TEMP_B',
'HK_RPSU_TEMP_2',
'HK_RPSU_28V_PRI_I',
'chk_NPIXOFF',
'chk_NPIXSAT',
'offset_pre',
'offset_ove',
'std_pre',
'std_ove']
class MetaChinj01(MetaCal):
""" """
def __init__(self, **kwargs):
""" """
super(MetaChinj01, self).__init__(**kwargs)
self.testnames = ['CHINJ01']
self.incols = cols2keep
self.ParsedTable = OrderedDict()
allgains = files.cPickleRead(kwargs['cdps']['gain'])
self.cdps['GAIN'] = OrderedDict()
for block in self.blocks:
self.cdps['GAIN'][block] = allgains[block]['PTC01'].copy()
self.products['METAFIT'] = OrderedDict()
self.products['VERPROFILES'] = OrderedDict()
self.products['HORPROFILES'] = OrderedDict()
self.init_fignames()
self.init_outcdpnames()
def parse_single_test(self, jrep, block, testname, inventoryitem):
""" """
NCCDs = len(self.CCDs)
NQuads = len(self.Quads)
session = inventoryitem['session']
CCDkeys = ['CCD%i' % CCD for CCD in self.CCDs]
IndexS = vcore.vMultiIndex([vcore.vIndex('ix', vals=[0])])
IndexCQ = vcore.vMultiIndex([vcore.vIndex('ix', vals=[0]),
vcore.vIndex('CCD', vals=self.CCDs),
vcore.vIndex('Quad', vals=self.Quads)])
#idd = copy.deepcopy(inventoryitem['dd'])
sidd = self.parse_single_test_gen(jrep, block, testname, inventoryitem)
# TEST SCPECIFIC
# TO BE ADDED:
# OFFSETS: pre, img, ove
# RON: pre, img, ove
# REFERENCES TO PROFILES
CHAMBER = sidd.meta['inputs']['CHAMBER']
CHAMBER_key = CHAMBER[0]
chamber_v = np.array([CHAMBER_key])
sidd.addColumn(chamber_v, 'CHAMBERKEY', IndexS, ix=0)
block_v = np.array([block])
sidd.addColumn(block_v, 'BLOCK', IndexS, ix=0)
test_v = np.array([jrep + 1])
sidd.addColumn(test_v, 'REP', IndexS, ix=0)
test_v = np.array([session])
sidd.addColumn(test_v, 'SESSION', IndexS, ix=0)
test_v = np.array([testname])
sidd.addColumn(test_v, 'TEST', IndexS, ix=0)
productspath = os.path.join(inventoryitem['resroot'], 'products')
metafitcdp_pick = os.path.join(productspath,
os.path.split(sidd.products['METAFIT_CDP'])[-1])
metafitcdp = files.cPickleRead(metafitcdp_pick)
metafit = copy.deepcopy(metafitcdp['data']['ANALYSIS'])
metafitkey = '%s_%s_%s_%i' % (testname, block, session, jrep + 1)
self.products['METAFIT'][metafitkey] = copy.deepcopy(metafit)
metafitkey_v = np.array([metafitkey])
sidd.addColumn(metafitkey_v, 'METAFIT', IndexS, ix=0)
metacdp_pick = os.path.join(productspath, os.path.split(
sidd.products['META_CDP'])[-1]) # change to META_CDP
metacdp = files.cPickleRead(metacdp_pick)
meta = metacdp['data']['ANALYSIS'] # this is a pandas DataFrame
tmp_v_CQ = np.zeros((1, NCCDs, NQuads))
bgd_adu_v = tmp_v_CQ.copy()
ig1_thresh_v = tmp_v_CQ.copy()
ig1_notch_v = tmp_v_CQ.copy()
slope_v = tmp_v_CQ.copy()
n_adu_v = tmp_v_CQ.copy()
for iCCD, CCDk in enumerate(CCDkeys):
for kQ, Q in enumerate(self.Quads):
ixloc = np.where((meta['CCD'] == iCCD + 1) & (meta['Q'] == kQ + 1))
bgd_adu_v[0, iCCD, kQ] = meta['BGD_ADU'][ixloc[0][0]]
ig1_thresh_v[0, iCCD, kQ] = meta['IG1_THRESH'][ixloc[0][0]]
ig1_notch_v[0, iCCD, kQ] = meta['IG1_NOTCH'][ixloc[0][0]]
slope_v[0, iCCD, kQ] = meta['S'][ixloc[0][0]]
n_adu_v[0, iCCD, kQ] = meta['N_ADU'][ixloc[0][0]]
sidd.addColumn(bgd_adu_v, 'FIT_BGD_ADU', IndexCQ)
sidd.addColumn(ig1_thresh_v, 'FIT_IG1_THRESH', IndexCQ)
sidd.addColumn(ig1_notch_v, 'FIT_IG1_NOTCH', IndexCQ)
sidd.addColumn(slope_v, 'FIT_SLOPE', IndexCQ)
sidd.addColumn(n_adu_v, 'FIT_N_ADU', IndexCQ)
# charge injection profiles
verprofspick = os.path.join(productspath,
os.path.split(sidd.products['PROFS_ALCOL'])[-1])
verprofs = files.cPickleRead(verprofspick)
vprofkey = '%s_%s_%s_%i' % (testname, block, session, jrep + 1)
self.products['VERPROFILES'][vprofkey] = verprofs.copy()
vprofskeys_v = np.zeros((1),dtype='U50')
vprofskeys_v[0] = vprofkey
sidd.addColumn(vprofskeys_v, 'VERPROFS_KEY', IndexS)
horprofspick = os.path.join(productspath,
os.path.split(sidd.products['PROFS_ALROW'])[-1])
horprofs = files.cPickleRead(horprofspick)
hprofkey = '%s_%s_%s_%i' % (testname, block, session, jrep + 1)
self.products['HORPROFILES'][hprofkey] = horprofs.copy()
hprofskeys_v = np.zeros((1),dtype='U50')
hprofskeys_v[0] = hprofkey
sidd.addColumn(hprofskeys_v, 'HORPROFS_KEY', IndexS)
# flatten sidd to table
sit = sidd.flattentoTable()
return sit
def _get_extractor_NOTCH_fromPT(self, units):
""" """
def _extract_NOTCH_fromPT(PT, block, CCDk, Q):
ixblock = self.get_ixblock(PT, block)
column = 'FIT_N_ADU_%s_Quad%s' % (CCDk, Q)
if units == 'ADU':
unitsConvFactor = 1
elif units == 'E':
unitsConvFactor = self.cdps['GAIN'][block][CCDk][Q][0]
Notch = np.nanmedian(PT[column][ixblock]) * unitsConvFactor
return Notch
return _extract_NOTCH_fromPT
def _get_injcurve(self, _chfitdf, ixCCD, ixQ, IG1raw, gain):
""" """
ixsel = np.where((_chfitdf['CCD'] == ixCCD) & (_chfitdf['Q'] == ixQ))
pars = ['BGD', 'K', 'XT', 'XN', 'A', 'N']
trans = dict(BGD='b', K='k', XT='xt', XN='xN', A='a', N='N')
parsdict = dict()
for par in pars:
parsdict[trans[par]] = _chfitdf[par].values[ixsel][0]
parsdict['IG1'] = IG1raw.copy()
inj = ilib.f_Inj_vs_IG1_ReLU(**parsdict) * 2.**16 # ADU
inj_kel = inj * gain / 1.E3
return inj_kel
def _get_CHIG1_MAP_from_PT(self, kind='CAL'):
""" """
CHIG1MAP = OrderedDict()
CHIG1MAP['labelkeys'] = self.Quads
PT = self.ParsedTable['CHINJ01']
column = 'METAFIT'
IG1s = [2.5, 6.75]
dIG1 = 0.05
NIG1 = (IG1s[1] - IG1s[0]) / dIG1 + 1
IG1raw = np.arange(NIG1) * dIG1 + IG1s[0]
for jY in range(self.NSLICES_FPA):
for iX in range(self.NCOLS_FPA):
Ckey = 'C_%i%i' % (jY + 1, iX + 1)
CHIG1MAP[Ckey] = OrderedDict()
locator = self.fpa.FPA_MAP[Ckey]
block = locator[0]
CCDk = locator[1]
jCCD = int(CCDk[-1])
ixblock = np.where(PT['BLOCK'] == block)
if len(ixblock[0]) == 0:
CHIG1MAP[Ckey] = OrderedDict(x=OrderedDict(),
y=OrderedDict())
for Q in self.Quads:
CHIG1MAP[Ckey]['x'][Q] = []
CHIG1MAP[Ckey]['y'][Q] = []
continue
_chkey = PT[column][ixblock][0]
_chfitdf = self.products['METAFIT'][_chkey]
_ccd_chfitdict = OrderedDict(x=OrderedDict(),
y=OrderedDict())
for kQ, Q in enumerate(self.Quads):
roeVCal = self.roeVCals[block]
IG1cal = roeVCal.fcal_HK(IG1raw, 'IG1', jCCD, Q)
gain = self.cdps['GAIN'][block][CCDk][Q][0]
inj_kel = self._get_injcurve(_chfitdf, jCCD, kQ + 1, IG1raw, gain)
if kind == 'CAL':
_IG1 = IG1cal.copy()
elif kind == 'RAW':
_IG1 = IG1raw.copy()
_ccd_chfitdict['x'][Q] = _IG1.copy()
_ccd_chfitdict['y'][Q] = inj_kel.copy()
CHIG1MAP[Ckey] = _ccd_chfitdict.copy()
return CHIG1MAP
def _get_XYdict_INJ(self, kind='CAL'):
x = dict()
y = dict()
PT = self.ParsedTable['CHINJ01']
column = 'METAFIT'
IG1s = [2.5, 6.75]
dIG1 = 0.05
NIG1 = (IG1s[1] - IG1s[0]) / dIG1 + 1
IG1raw = np.arange(NIG1) * dIG1 + IG1s[0]
labelkeys = []
for block in self.flight_blocks:
ixblock = np.where(PT['BLOCK'] == block)
ch_key = PT[column][ixblock][0]
chfitdf = self.products['METAFIT'][ch_key]
for iCCD, CCD in enumerate(self.CCDs):
CCDk = 'CCD%i' % CCD
for kQ, Q in enumerate(self.Quads):
roeVCal = self.roeVCals[block]
IG1cal = roeVCal.fcal_HK(IG1raw, 'IG1', iCCD + 1, Q)
gain = self.cdps['GAIN'][block][CCDk][Q][0]
if kind == 'CAL':
_IG1 = IG1cal.copy()
elif kind == 'RAW':
_IG1 = IG1raw.copy()
pkey = '%s_%s_%s' % (block, CCDk, Q)
inj_kel = self._get_injcurve(chfitdf, iCCD + 1, kQ + 1, IG1raw, gain)
x[pkey] = _IG1.copy()
y[pkey] = inj_kel.copy()
labelkeys.append(pkey)
CHdict = dict(x=x, y=y, labelkeys=labelkeys)
return CHdict
def _extract_INJCURVES_PAR_fromPT(self,PT,block,CCDk,Q):
""" """
ixblock = self.get_ixblock(PT,block)
column = 'METAFIT'
ch_key = PT[column][ixblock][0]
chfitdf = self.products['METAFIT'][ch_key]
ixCCD = ['CCD1','CCD2','CCD3'].index(CCDk)+1
ixQ = ['E','F','G','H'].index(Q)+1
ixsel = np.where((chfitdf['CCD'] == ixCCD) & (chfitdf['Q'] == ixQ))
pars = ['BGD', 'K', 'XT', 'XN', 'A', 'N']
trans = dict(BGD='b', K='k', XT='xt', XN='xN', A='a', N='N')
parsdict = dict()
for par in pars:
parsdict[trans[par]] = '%.3e' % chfitdf[par].values[ixsel][0]
return parsdict
def _get_XYdict_PROFS(self,proftype, IG1=4.5, Quads=None, doNorm=False, xrangeNorm=None):
""" """
if Quads is None:
Quads = self.Quads
x = dict()
y = dict()
labelkeys = []
PT = self.ParsedTable['CHINJ01']
profcol = '%sPROFS_KEY' % proftype.upper()
prodkey = '%sPROFILES' % proftype.upper()
for block in self.flight_blocks:
ixsel = np.where(PT['BLOCK'] == block)
prof_key = PT[profcol][ixsel][0]
i_Prof = self.products[prodkey][prof_key].copy()
IG1key = 'IG1_%.2fV' % IG1
for iCCD, CCD in enumerate(self.CCDs):
CCDk = 'CCD%i' % CCD
for kQ, Q in enumerate(Quads):
pkey = '%s_%s_%s' % (block, CCDk, Q)
_pcq = i_Prof['data'][CCDk][Q].copy()
_x = _pcq['x'][IG1key].copy()
_y = _pcq['y'][IG1key].copy()
x[pkey] = _x
if doNorm:
if xrangeNorm is not None:
norm = np.nanmedian(_y[xrangeNorm[0]:xrangeNorm[1]])
else:
norm = np.nanmedian(_y)
y[pkey] = _y / norm
labelkeys.append(pkey)
Pdict = dict(x=x,y=y,labelkeys=labelkeys)
return Pdict
def init_fignames(self):
""" """
if not os.path.exists(self.figspath):
os.system('mkdir %s' % self.figspath)
self.figs['NOTCH_ADU_MAP'] = os.path.join(self.figspath,
'NOTCH_ADU_MAP.png')
self.figs['NOTCH_ELE_MAP'] = os.path.join(self.figspath,
'NOTCH_ELE_MAP.png')
self.figs['CHINJ01_curves_IG1_RAW'] = os.path.join(self.figspath,
'CHINJ01_CURVES_IG1_RAW.png')
self.figs['CHINJ01_curves_IG1_CAL'] = os.path.join(self.figspath,
'CHINJ01_CURVES_IG1_CAL.png')
self.figs['CHINJ01_curves_MAP_IG1_CAL'] = os.path.join(self.figspath,
'CHINJ01_CURVES_MAP_IG1_CAL.png')
for proftype in ['ver','hor']:
for ccdhalf in ['top','bot']:
figkey = 'PROFS_%s_%s' % (proftype.upper(),ccdhalf.upper())
self.figs[figkey] = os.path.join(self.figspath,
'CHINJ01_%s_%s_PROFILES.png' % \
(proftype.upper(),ccdhalf.upper()))
for ccdhalf in ['top','bot']:
figkey = 'PROFS_ver_%s_ZOOM' % (ccdhalf.upper(),)
self.figs[figkey] = os.path.join(self.figspath,
'CHINJ01_ver_%s_ZOOM_PROFILES.png' % \
(ccdhalf.upper()),)
def init_outcdpnames(self):
if not os.path.exists(self.cdpspath):
os.system('mkdir %s' % self.cdpspath)
self.outcdps['INJCURVES'] = 'CHINJ01_INJCURVES_PAR.json'
self.outcdps['INJPROF_XLSX_HOR'] = 'CHINJ01_INJPROFILES_HOR.xlsx'
self.outcdps['INJPROF_XLSX_VER'] = 'CHINJ01_INJPROFILES_VER.xlsx'
self.outcdps['INJPROF_FITS_HOR'] = 'CHINJ01_INJPROFILES_HOR.fits'
self.outcdps['INJPROF_FITS_VER'] = 'CHINJ01_INJPROFILES_VER.fits'
def _extract_NUNHOR_fromPT(self, PT, block, CCDk, Q):
""" """
IG1 = 4.5
ixblock = self.get_ixblock(PT, block)
profcol = 'HORPROFS_KEY'
prodkey = 'HORPROFILES'
prof_key = PT[profcol][ixblock][0]
i_Prof = self.products[prodkey][prof_key].copy()
IG1key = 'IG1_%.2fV' % IG1
_pcq = i_Prof['data'][CCDk][Q].copy()
_y = _pcq['y'][IG1key].copy()
return np.nanstd(_y)/np.nanmean(_y)*100.
def _get_injprof_dfdict(self, direction, pandice=False):
""" """
injprofs = OrderedDict()
Quads = self.Quads
PT = self.ParsedTable['CHINJ01']
profcol = '{}PROFS_KEY'.format(direction.upper())
prodkey = '{}PROFILES'.format(direction.upper())
for ib, block in enumerate(self.flight_blocks):
injprofs[block] = OrderedDict()
ixsel = np.where(PT['BLOCK'] == block)
prof_key = PT[profcol][ixsel][0]
i_Prof = self.products[prodkey][prof_key].copy()
if ib==0:
rawIG1keys = list(i_Prof['data']['CCD1']['E']['x'].keys())
IG1values = [float(item.replace('IG1_','').replace('V','')) for item in rawIG1keys]
_order = np.argsort(IG1values)
IG1keys = np.array(rawIG1keys)[_order].tolist()
IG1values = np.array(IG1values)[_order].tolist()
for IG1key in IG1keys:
for iCCD, CCD in enumerate(self.CCDs):
CCDk = 'CCD%i' % CCD
Ckey = self.fpa.get_Ckey_from_BlockCCD(block, CCD)
for kQ, Q in enumerate(Quads):
_pcq = i_Prof['data'][CCDk][Q].copy()
_x = _pcq['x'][IG1key].copy()
_y = _pcq['y'][IG1key].copy()
#_y /= np.nanmedian(_y)
if iCCD==0 and kQ==0:
injprofs[block]['pixel'] = _x.copy()
injprofs[block]['%s_%s_%s' % (Ckey,Q,IG1key)] = _y.copy()
if pandice:
for block in self.flight_blocks:
injprofs[block] = pd.DataFrame.from_dict(injprofs[block])
return injprofs, IG1values
def get_injprof_xlsx_cdp(self, direction, inCDP_header=None):
""" """
CDP_header = OrderedDict()
if CDP_header is not None:
CDP_header.update(inCDP_header)
cdpname = self.outcdps['INJPROF_XLSX_%s' % direction.upper()]
path = self.cdpspath
injprof_cdp = cdp.Tables_CDP()
injprof_cdp.rootname = os.path.splitext(cdpname)[0]
injprof_cdp.path = path
injprofs_meta = OrderedDict()
injprofs, IG1values = self._get_injprof_dfdict(direction, pandice=True)
injprofs_meta['IG1'] = IG1values.__repr__()
#injprofs_meta['norm'] = 'median'
injprof_cdp.ingest_inputs(data=injprofs.copy(),
meta=injprofs_meta.copy(),
header=CDP_header.copy())
injprof_cdp.init_wb_and_fillAll(
header_title='CHINJ01: INJPROFS-%s' % direction.upper())
return injprof_cdp
def get_injprof_fits_cdp(self, direction, inCDP_header=None):
""" """
CDP_header = OrderedDict()
if inCDP_header is not None:
CDP_header.update(inCDP_header)
cdpname = self.outcdps['INJPROF_FITS_%s' % direction.upper()]
path = self.cdpspath
injprof_cdp = cdp.FitsTables_CDP()
injprof_cdp.rootname = os.path.splitext(cdpname)[0]
injprof_cdp.path = path
injprofs_meta = OrderedDict()
injprofs, IG1values = self._get_injprof_dfdict(direction, pandice=False)
injprofs_meta['IG1'] = IG1values.__repr__()
#injprofs_meta['norm'] = 'median'
CDP_header = self.FITSify_CDP_header(CDP_header)
injprof_cdp.ingest_inputs(data=injprofs.copy(),
meta=injprofs_meta.copy(),
header=CDP_header.copy())
injprof_cdp.init_HL_and_fillAll()
injprof_cdp.hdulist[0].header.insert(list(CDP_header.keys())[0],
('title', 'CHINJ01: INJPROFS-%s' % direction.upper()))
return injprof_cdp
def dump_aggregated_results(self):
""" """
if self.report is not None:
self.report.add_Section(keyword='dump',
Title='Aggregated Results', level=0)
self.add_DataAlbaran2Report()
function, module = utils.get_function_module()
CDP_header = self.CDP_header.copy()
CDP_header.update(dict(function=function, module=module))
CDP_header['DATE'] = self.get_time_tag()
# Histogram of Slopes [ADU/electrons]
# Histogram of Notch [ADU/electrons]
# Histogram of IG1_THRESH
# Injection level vs. Calibrated IG1, MAP
CURVES_IG1CAL_MAP = self._get_CHIG1_MAP_from_PT(kind='CAL')
figkey1 = 'CHINJ01_curves_MAP_IG1_CAL'
figname1 = self.figs[figkey1]
self.plot_XYMAP(CURVES_IG1CAL_MAP, **dict(
suptitle='Charge Injection Curves - Calibrated IG1',
doLegend=True,
ylabel='Inj [kel]',
xlabel='IG1 [V]',
corekwargs=dict(E=dict(linestyle='-', marker='', color='r'),
F=dict(linestyle='-', marker='', color='g'),
G=dict(linestyle='-', marker='', color='b'),
H=dict(linestyle='-', marker='', color='m')),
figname=figname1
))
if self.report is not None:
self.addFigure2Report(figname1,
figkey=figkey1,
caption='CHINJ01: Charge injection level [ke-] as a function of '+\
'calibrated IG1 voltage.',
texfraction=0.7)
# saving charge injection parameters to a json CDP
ICURVES_PAR_MAP = self.get_FPAMAP_from_PT(
self.ParsedTable['CHINJ01'],
extractor=self._extract_INJCURVES_PAR_fromPT)
ic_header = OrderedDict()
ic_header['title'] = 'Injection Curves Parameters'
ic_header['test'] = 'CHINJ01'
ic_header.update(CDP_header)
ic_meta = OrderedDict()
ic_meta['units'] ='/2^16 ADU',
ic_meta['model'] = 'I=b+1/(1+exp(-K(IG1-XT))) * (-A*(IG1-XN)[IG1<XN] + N)'
ic_meta['structure'] = ''
ic_cdp = cdp.Json_CDP(rootname=self.outcdps['INJCURVES'],
path=self.cdpspath)
ic_cdp.ingest_inputs(data=ICURVES_PAR_MAP,
header = ic_header,
meta=ic_meta)
ic_cdp.savehardcopy()
# Injection level vs. Calibrated IG1, single plot
IG1CAL_Singledict = self._get_XYdict_INJ(kind='CAL')
figkey2 = 'CHINJ01_curves_IG1_CAL'
figname2 = self.figs[figkey2]
IG1CAL_kwargs = dict(
title='Charge Injection Curves - Calibrated IG1',
doLegend=False,
xlabel='IG1 (Calibrated) [V]',
ylabel='Injection [kel]',
figname=figname2)
corekwargs = dict()
for block in self.flight_blocks:
for iCCD in self.CCDs:
corekwargs['%s_CCD%i_E' % (block, iCCD)] = dict(linestyle='-',
marker='', color='#FF4600') # red
corekwargs['%s_CCD%i_F' % (block, iCCD)] = dict(linestyle='-',
marker='', color='#61FF00') # green
corekwargs['%s_CCD%i_G' % (block, iCCD)] = dict(linestyle='-',
marker='', color='#00FFE0') # cyan
corekwargs['%s_CCD%i_H' % (block, iCCD)] = dict(linestyle='-',
marker='', color='#1700FF') # blue
IG1CAL_kwargs['corekwargs'] = corekwargs.copy()
self.plot_XY(IG1CAL_Singledict, **IG1CAL_kwargs)
if self.report is not None:
self.addFigure2Report(figname2,
figkey=figkey2,
caption='CHINJ01: Charge injection level [ke-] as a function of '+\
'calibrated IG1 voltage.',
texfraction=0.7)
# Injection level vs. Non-Calibrated IG1, single plot
IG1RAW_Singledict = self._get_XYdict_INJ(kind='RAW')
figkey3 = 'CHINJ01_curves_IG1_RAW'
figname3 = self.figs[figkey3]
IG1RAW_kwargs = dict(
title='Charge Injection Curves - RAW IG1',
doLegend=False,
xlabel='IG1 (RAW) [V]',
ylabel='Injection [kel]',
figname=figname3)
corekwargs = dict()
for block in self.flight_blocks:
for iCCD in self.CCDs:
corekwargs['%s_CCD%i_E' % (block, iCCD)] = dict(linestyle='-',
marker='', color='#FF4600') # red
corekwargs['%s_CCD%i_F' % (block, iCCD)] = dict(linestyle='-',
marker='', color='#61FF00') # green
corekwargs['%s_CCD%i_G' % (block, iCCD)] = dict(linestyle='-',
marker='', color='#00FFE0') # cyan
corekwargs['%s_CCD%i_H' % (block, iCCD)] = dict(linestyle='-',
marker='', color='#1700FF') # blue
IG1RAW_kwargs['corekwargs'] = corekwargs.copy()
self.plot_XY(IG1RAW_Singledict, **IG1RAW_kwargs)
if self.report is not None:
self.addFigure2Report(figname3,
figkey=figkey3,
caption='CHINJ01: Charge injection level [ke-] as a function of '+\
'Non-calibrated IG1 voltage.',
texfraction=0.7)
# Notch level vs. calibrated IG2
# Notch level vs. calibrated IDL
# Notch level vs. calibrated OD
# Notch injection map, ADUs
NOTCHADUMAP = self.get_FPAMAP_from_PT(
self.ParsedTable['CHINJ01'],
extractor=self._get_extractor_NOTCH_fromPT(
units='ADU'))
figkey4 = 'NOTCH_ADU_MAP'
figname4 = self.figs[figkey4]
self.plot_SimpleMAP(NOTCHADUMAP, **dict(
suptitle='CHINJ01: NOTCH INJECTION [ADU]',
ColorbarText='ADU',
figname=figname4))
if self.report is not None:
self.addFigure2Report(figname4,
figkey=figkey4,
caption='CHINJ01: notch injection level, in ADU.',
texfraction=0.7)
# Notch injection map, ELECTRONs
NOTCHEMAP = self.get_FPAMAP_from_PT(self.ParsedTable['CHINJ01'],
extractor=self._get_extractor_NOTCH_fromPT(units='E'))
figkey5 = 'NOTCH_ELE_MAP'
figname5 = self.figs[figkey5]
self.plot_SimpleMAP(NOTCHEMAP, **dict(
suptitle='CHINJ01: NOTCH INJECTION [ELECTRONS]',
ColorbarText='electrons',
figname=figname5))
if self.report is not None:
self.addFigure2Report(figname5,
figkey=figkey5,
caption='CHINJ01: notch injection level, in electrons.',
texfraction=0.7)
# Average injection profiles
IG1profs = 4.5
xlabels_profs = dict(hor='column [pix]',
ver='row [pix]')
ylabels_profs = dict(hor='Injection level [Normalized]',
ver='Injection level [ADU]',)
proftypes = ['hor','ver']
ccdhalves = ['top','bot']
BLOCKcolors = cm.rainbow(np.linspace(0, 1, len(self.flight_blocks)))
pointcorekwargs = dict()
for jblock, block in enumerate(self.flight_blocks):
jcolor = BLOCKcolors[jblock]
for iCCD in self.CCDs:
for kQ in self.Quads:
pointcorekwargs['%s_CCD%i_%s' % (block, iCCD, kQ)] = dict(
linestyle='', marker='.', color=jcolor, ms=2.0)
for ccdhalf in ccdhalves:
if ccdhalf == 'top':
_Quads = ['G','H']
elif ccdhalf == 'bot':
_Quads = ['E','F']
for proftype in proftypes:
if proftype == 'hor':
xrangeNorm = None
elif proftype == 'ver':
xrangeNorm = [10,20]
XY_profs = self._get_XYdict_PROFS(proftype=proftype,
IG1=IG1profs,Quads=_Quads, doNorm=True,
xrangeNorm=xrangeNorm)
figkey6 = 'PROFS_%s_%s' % (proftype.upper(),ccdhalf.upper())
figname6 = self.figs[figkey6]
title = 'CHINJ01: Direction: %s, CCDHalf: %s' % \
(proftype.upper(),ccdhalf.upper()),
if proftype == 'ver':
xlim=[0,50]
ylim=None
elif proftype == 'hor':
xlim=None
ylim=[0.5,1.5]
profkwargs = dict(
title=title,
doLegend=False,
xlabel=xlabels_profs[proftype],
xlim=xlim,
ylim=ylim,
ylabel=ylabels_profs[proftype],
figname=figname6,
corekwargs=pointcorekwargs)
self.plot_XY(XY_profs, **profkwargs)
if proftype == 'ver':
captemp = 'CHINJ01: Average (normalized) injection profiles in vertical direction (along CCD columns) '+\
'for IG1=%.2fV. Only the 2 channels in the CCD %s-half are shown '+\
'(%s, %s). Each colour corresponds to a '+\
'different block (2x3 quadrant-channels in each colour).'
elif proftype == 'hor':
captemp = 'CHINJ01: Average injection profiles in horizontal direction (along CCD rows) '+\
'for IG1=%.2fV. The profiles have been normalized by the median injection level. '+\
'Only the 2 channels in the CCD %s-half are shown (%s, %s). Each colour corresponds to a '+\
'different block (2x3 quadrant-channels in each colour).'
if self.report is not None:
self.addFigure2Report(figname6,
figkey=figkey6,
caption= captemp % (IG1profs, ccdhalf, _Quads[0],_Quads[1]),
texfraction=0.7)
# Average injection vertical profiles, zoomed in to highlight
# non-perfect charge injection shut-down.
pointcorekwargs = dict()
for jblock, block in enumerate(self.flight_blocks):
jcolor = BLOCKcolors[jblock]
for iCCD in self.CCDs:
for kQ in self.Quads:
pointcorekwargs['%s_CCD%i_%s' % (block, iCCD, kQ)] = dict(
linestyle='', marker='.', color=jcolor, ms=2.0)
for ccdhalf in ccdhalves:
if ccdhalf == 'top':
_Quads = ['G','H']
elif ccdhalf == 'bot':
_Quads = ['E','F']
XY_profs = self._get_XYdict_PROFS(proftype='ver',
IG1=IG1profs,Quads=_Quads, doNorm=True,
xrangeNorm=[10,20])
figkey7 = 'PROFS_ver_%s_ZOOM' % (ccdhalf.upper(),)
figname7 = self.figs[figkey7]
title = 'CHINJ01: Direction: ver, CCDHalf: %s, ZOOM-in' % \
(ccdhalf.upper(),),
xlim=[25,50]
ylim=[0,4.e-3]
profkwargs = dict(
title=title,
doLegend=False,
xlabel=xlabels_profs[proftype],
xlim=xlim,
ylim=ylim,
ylabel=ylabels_profs[proftype],
figname=figname7,
corekwargs=pointcorekwargs)
self.plot_XY(XY_profs, **profkwargs)
captemp = 'CHINJ01: Average injection profiles in vertical direction (along CCD columns) '+\
'for IG1=%.2fV. Only the 2 channels in the CCD %s-half are shown '+\
'(%s, %s). Each colour corresponds to a '+\
'different block (2x3 quadrant-channels in each colour). Zoomed in '+\
'to highlight injection shutdown profile.'
if self.report is not None:
self.addFigure2Report(figname7,
figkey=figkey7,
caption= captemp % (IG1profs, ccdhalf, _Quads[0],_Quads[1]),
texfraction=0.7)
# creating and saving INJ PROFILES CDPs.
for direction in ['hor','ver']:
_injprof_xlsx_cdp = self.get_injprof_xlsx_cdp(direction=direction,
inCDP_header=CDP_header)
_injprof_xlsx_cdp.savehardcopy()
_injprof_fits_cdp = self.get_injprof_fits_cdp(direction=direction,
inCDP_header=CDP_header)
_injprof_fits_cdp.savehardcopy()
# reporting non-uniformity of injection lines to report
if self.report is not None:
NUN_HOR = self.get_FPAMAP_from_PT(self.ParsedTable['CHINJ01'],
extractor=self._extract_NUNHOR_fromPT)
nun_cdpdict = dict(
caption='CHINJ01: Non-Uniformity of the injection lines, rms, as percentage.',
valformat='%.2f')
ignore = self.add_StdQuadsTable2Report(
Matrix = NUN_HOR,
cdpdict = nun_cdpdict)
| ruymanengithub/vison | vison/metatests/chinj01.py | Python | gpl-3.0 | 34,380 |
"""
MagPy
IAGA02 input filter
Written by Roman Leonhardt June 2012
- contains test, read and write function
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from io import open
from magpy.stream import *
#global variables
MISSING_DATA = 99999
NOT_REPORTED = 88888
def isIAGA(filename):
"""
Checks whether a file is ASCII IAGA 2002 format.
"""
try:
temp = open(filename, 'rt').readline()
except:
return False
try:
if not temp.startswith(' Format'):
return False
if not 'IAGA-2002' in temp:
return False
except:
return False
return True
def readIAGA(filename, headonly=False, **kwargs):
"""
Reading IAGA2002 format data.
"""
starttime = kwargs.get('starttime')
endtime = kwargs.get('endtime')
debug = kwargs.get('debug')
getfile = True
array = [[] for key in KEYLIST]
fh = open(filename, 'rt')
# read file and split text into channels
stream = DataStream()
# Check whether header infromation is already present
headers = {}
data = []
key = None
try:
# get day from filename (platform independent)
theday = extractDateFromString(filename)[0]
day = datetime.strftime(theday,"%Y-%m-%d")
# Select only files within eventually defined time range
if starttime:
if not datetime.strptime(day,'%Y-%m-%d') >= datetime.strptime(datetime.strftime(stream._testtime(starttime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
if endtime:
if not datetime.strptime(day,'%Y-%m-%d') <= datetime.strptime(datetime.strftime(stream._testtime(endtime),'%Y-%m-%d'),'%Y-%m-%d'):
getfile = False
except:
logging.warning("Could not identify typical IAGA date for %s. Reading all ...".format(filename))
getfile = True
if getfile:
loggerlib.info('Read: %s Format: %s ' % (filename, "IAGA2002"))
dfpos = KEYLIST.index('df')
for line in fh:
if line.isspace():
# blank line
continue
elif line.startswith(' '):
# data info
infoline = line[:-4]
key = infoline[:23].strip()
val = infoline[23:].strip()
if key.find('Source') > -1:
if not val == '':
stream.header['StationInstitution'] = val
if key.find('Station') > -1:
if not val == '':
stream.header['StationName'] = val
if key.find('IAGA') > -1:
if not val == '':
stream.header['StationIAGAcode'] = val
stream.header['StationID'] = val
if key.find('Latitude') > -1:
if not val == '':
stream.header['DataAcquisitionLatitude'] = val
if key.find('Longitude') > -1:
if not val == '':
stream.header['DataAcquisitionLongitude'] = val
if key.find('Elevation') > -1:
if not val == '':
stream.header['DataElevation'] = val
if key.find('Format') > -1:
if not val == '':
stream.header['DataFormat'] = val
if key.find('Reported') > -1:
if not val == '':
stream.header['DataComponents'] = val
if key.find('Orientation') > -1:
if not val == '':
stream.header['DataSensorOrientation'] = val
if key.find('Digital') > -1:
if not val == '':
stream.header['DataDigitalSampling'] = val
if key.find('Interval') > -1:
if not val == '':
stream.header['DataSamplingFilter'] = val
if key.startswith(' #'):
if key.find('# V-Instrument') > -1:
if not val == '':
stream.header['SensorID'] = val
elif key.find('# PublicationDate') > -1:
if not val == '':
stream.header['DataPublicationDate'] = val
else:
print ("formatIAGA: did not import optional header info {a}".format(a=key))
if key.find('Data Type') > -1:
if not val == '':
if val[0] in ['d','D']:
stream.header['DataPublicationLevel'] = '4'
elif val[0] in ['q','Q']:
stream.header['DataPublicationLevel'] = '3'
elif val[0] in ['p','P']:
stream.header['DataPublicationLevel'] = '2'
else:
stream.header['DataPublicationLevel'] = '1'
if key.find('Publication Date') > -1:
if not val == '':
stream.header['DataPublicationDate'] = val
elif line.startswith('DATE'):
# data header
colsstr = line.lower().split()
varstr = ''
for it, elem in enumerate(colsstr):
if it > 2:
varstr += elem[-1]
varstr = varstr[:4]
stream.header["col-x"] = varstr[0].upper()
stream.header["col-y"] = varstr[1].upper()
stream.header["col-z"] = varstr[2].upper()
stream.header["unit-col-x"] = 'nT'
stream.header["unit-col-y"] = 'nT'
stream.header["unit-col-z"] = 'nT'
stream.header["unit-col-f"] = 'nT'
if varstr.endswith('g'):
stream.header["unit-col-df"] = 'nT'
stream.header["col-df"] = 'G'
stream.header["col-f"] = 'F'
else:
stream.header["col-f"] = 'F'
if varstr in ['dhzf','dhzg']:
#stream.header["col-x"] = 'H'
#stream.header["col-y"] = 'D'
#stream.header["col-z"] = 'Z'
stream.header["unit-col-y"] = 'deg'
stream.header['DataComponents'] = 'HDZF'
elif varstr in ['ehzf','ehzg']:
#stream.header["col-x"] = 'H'
#stream.header["col-y"] = 'E'
#stream.header["col-z"] = 'Z'
stream.header['DataComponents'] = 'HEZF'
elif varstr in ['dhif','dhig']:
stream.header["col-x"] = 'I'
stream.header["col-y"] = 'D'
stream.header["col-z"] = 'F'
stream.header["unit-col-x"] = 'deg'
stream.header["unit-col-y"] = 'deg'
stream.header['DataComponents'] = 'IDFF'
elif varstr in ['hdzf','hdzg']:
#stream.header["col-x"] = 'H'
#stream.header["col-y"] = 'D'
stream.header["unit-col-y"] = 'deg'
#stream.header["col-z"] = 'Z'
stream.header['DataComponents'] = 'HDZF'
else:
#stream.header["col-x"] = 'X'
#stream.header["col-y"] = 'Y'
#stream.header["col-z"] = 'Z'
stream.header['DataComponents'] = 'XYZF'
elif headonly:
# skip data for option headonly
continue
elif line.startswith('%'):
pass
else:
# data entry - may be written in multiple columns
# row beinhaltet die Werte eine Zeile
# transl. row values contains a line
row=[]
# Verwende das letzte Zeichen von "line" nicht, d.h. line[:-1],
# da darin der Zeilenumbruch "\n" steht
# transl. Do not use the last character of "line", d.h. line [:-1],
# since this is the line break "\n"
for val in line[:-1].split():
# nur nicht-leere Spalten hinzufuegen
# transl. Just add non-empty columns
if val.strip()!="":
row.append(val.strip())
# Baue zweidimensionales Array auf
# transl. Build two-dimensional array
array[0].append( date2num(datetime.strptime(row[0]+'-'+row[1],"%Y-%m-%d-%H:%M:%S.%f")) )
if float(row[3]) >= NOT_REPORTED:
row[3] = np.nan
if float(row[4]) >= NOT_REPORTED:
row[4] = np.nan
if float(row[5]) >= NOT_REPORTED:
row[5] = np.nan
if varstr in ['dhzf','dhzg']:
array[1].append( float(row[4]) )
array[2].append( float(row[3])/60.0 )
array[3].append( float(row[5]) )
elif varstr in ['ehzf','ehzg']:
array[1].append( float(row[4]) )
array[2].append( float(row[3]) )
array[3].append( float(row[5]) )
elif varstr in ['dhif','dhig']:
array[1].append( float(row[5])/60.0 )
array[2].append( float(row[3])/60.0 )
array[3].append( float(row[6]) )
elif varstr in ['hdzf','hdzg']:
array[1].append( float(row[3]) )
array[2].append( float(row[4])/60.0 )
array[3].append( float(row[5]) )
else:
array[1].append( float(row[3]) )
array[2].append( float(row[4]) )
array[3].append( float(row[5]) )
try:
if float(row[6]) < NOT_REPORTED:
if varstr[-1]=='f':
array[4].append(float(elem[6]))
elif varstr[-1]=='g' and varstr=='xyzg':
array[4].append(np.sqrt(float(row[3])**2+float(row[4])**2+float(row[5])**2) - float(row[6]))
array[dfpos].append(float(row[6]))
elif varstr[-1]=='g' and varstr in ['hdzg','dhzg','ehzg']:
array[4].append(np.sqrt(float(row[3])**2+float(row[5])**2) - float(row[6]))
array[dfpos].append(float(row[6]))
elif varstr[-1]=='g' and varstr in ['dhig']:
array[4].append(float(row[6]))
array[dfpos].append(float(row[6]))
else:
raise ValueError
else:
array[4].append(float('nan'))
except:
if not float(row[6]) >= NOT_REPORTED:
array[4].append(float(row[6]))
else:
array[4].append(float('nan'))
#data.append(row)
fh.close()
for idx, elem in enumerate(array):
array[idx] = np.asarray(array[idx])
stream = DataStream([LineStruct()],stream.header,np.asarray(array))
sr = stream.samplingrate()
return stream
def writeIAGA(datastream, filename, **kwargs):
"""
Writing IAGA2002 format data.
"""
mode = kwargs.get('mode')
useg = kwargs.get('useg')
def OpenFile(filename, mode='w'):
if sys.version_info >= (3,0,0):
f = open(filename, mode, newline='')
else:
f = open(filename, mode+'b')
return f
if os.path.isfile(filename):
if mode == 'skip': # skip existing inputs
exst = read(path_or_url=filename)
datastream = mergeStreams(exst,datastream,extend=True)
myFile= OpenFile(filename)
elif mode == 'replace': # replace existing inputs
exst = read(path_or_url=filename)
datastream = mergeStreams(datastream,exst,extend=True)
myFile= OpenFile(filename)
elif mode == 'append':
myFile= OpenFile(filename,mode='a')
else: # overwrite mode
#os.remove(filename) ?? necessary ??
myFile= OpenFile(filename)
else:
myFile= OpenFile(filename)
header = datastream.header
datacomp = header.get('DataComponents'," ")
if datacomp in ['hez','HEZ','hezf','HEZF','hezg','HEZG']:
order = [1,0,2]
datacomp = 'EHZ'
elif datacomp in ['hdz','HDZ','hdzf','HDZF','hdzg','HDZG']:
order = [1,0,2]
datacomp = 'DHZ'
elif datacomp in ['idf','IDF','idff','IDFF','idfg','IDFG']:
order = [1,3,0]
datacomp = 'DHI'
elif datacomp in ['xyz','XYZ','xyzf','XYZF','xyzg','XYZG']:
order = [0,1,2]
datacomp = 'XYZ'
elif datacomp in ['ehz','EHZ','ehzf','EHZF','ehzg','EHZG']:
order = [0,1,2]
datacomp = 'EHZ'
elif datacomp in ['dhz','DHZ','dhzf','DHZF','dhzg','DHZG']:
order = [0,1,2]
datacomp = 'DHZ'
elif datacomp in ['dhi','DHI','dhif','DHIF','dhig','DHIG']:
order = [0,1,2]
datacomp = 'DHI'
else:
order = [0,1,2]
datacomp = 'XYZ'
find = KEYLIST.index('f')
findg = KEYLIST.index('df')
if len(datastream.ndarray[findg]) > 0:
useg = True
if len(datastream.ndarray[find]) > 0:
if not useg:
datacomp = datacomp+'F'
else:
datacomp = datacomp+'G'
else:
datacomp = datacomp+'F'
publevel = str(header.get('DataPublicationLevel'," "))
if publevel == '2':
publ = 'Provisional'
elif publevel == '3':
publ = 'Quasi-definitive'
elif publevel == '4':
publ = 'Definitive'
else:
publ = 'Variation'
proj = header.get('DataLocationReference','')
longi = header.get('DataAcquisitionLongitude',' ')
lati = header.get('DataAcquisitionLatitude',' ')
if not longi=='' or lati=='':
if proj == '':
pass
else:
if proj.find('EPSG:') > 0:
epsg = int(proj.split('EPSG:')[1].strip())
if not epsg==4326:
longi,lati = convertGeoCoordinate(float(longi),float(lati),'epsg:'+str(epsg),'epsg:4326')
line = []
if not mode == 'append':
#if header.get('Elevation') > 0:
# print(header)
line.append(' Format %-15s IAGA-2002 %-34s |\n' % (' ',' '))
line.append(' Source of Data %-7s %-44s |\n' % (' ',header.get('StationInstitution'," ")[:44]))
line.append(' Station Name %-9s %-44s |\n' % (' ', header.get('StationName'," ")[:44]))
line.append(' IAGA Code %-12s %-44s |\n' % (' ',header.get('StationIAGAcode'," ")[:44]))
line.append(' Geodetic Latitude %-4s %-44s |\n' % (' ',str(lati)[:44]))
line.append(' Geodetic Longitude %-3s %-44s |\n' % (' ',str(longi)[:44]))
line.append(' Elevation %-12s %-44s |\n' % (' ',str(header.get('DataElevation'," "))[:44]))
line.append(' Reported %-13s %-44s |\n' % (' ',datacomp))
line.append(' Sensor Orientation %-3s %-44s |\n' % (' ',header.get('DataSensorOrientation'," ").upper()[:44]))
line.append(' Digital Sampling %-5s %-44s |\n' % (' ',str(header.get('DataDigitalSampling'," "))[:44]))
line.append(' Data Interval Type %-3s %-44s |\n' % (' ',(str(header.get('DataSamplingRate'," "))+' ('+header.get('DataSamplingFilter'," ")+')')[:44]))
line.append(' Data Type %-12s %-44s |\n' % (' ',publ[:44]))
if not header.get('DataPublicationDate','') == '':
line.append(' {a:<20} {b:<45s}|\n'.format(a='Publication date',b=str(header.get('DataPublicationDate'))[:10]))
# Optional header part:
skipopt = False
if not skipopt:
if not header.get('SensorID','') == '':
line.append(' #{a:<20} {b:<45s}|\n'.format(a='V-Instrument',b=header.get('SensorID')[:44]))
if not header.get('SecondarySensorID','') == '':
line.append(' #{a:<20} {b:<45s}|\n'.format(a='F-Instrument',b=header.get('SecondarySensorID')[:44]))
if not header.get('StationMeans','') == '':
try:
meanlist = header.get('StationMeans') # Assume something like H:xxxx,D:xxx,Z:xxxx
meanlist = meanlist.split(',')
for me in meanlist:
if me.startswith('H'):
hval = me.split(':')
line.append(' #{a:<20} {b:<45s}|\n'.format(a='Approx H',b=hval[1]))
except:
pass
line.append(' #{a:<20} {b:<45s}|\n'.format(a='File created by',b='MagPy '+magpyversion))
iagacode = header.get('StationIAGAcode',"")
line.append('DATE TIME DOY %8s %9s %9s %9s |\n' % (iagacode+datacomp[0],iagacode+datacomp[1],iagacode+datacomp[2],iagacode+datacomp[3]))
try:
myFile.writelines(line) # Write header sequence of strings to a file
except IOError:
pass
try:
line = []
ndtype = False
if len(datastream.ndarray[0]) > 0:
ndtype = True
fulllength = datastream.length()[0]
# Possible types: DHIF, DHZF, XYZF, or DHIG, DHZG, XYZG
#datacomp = 'EHZ'
#datacomp = 'DHZ'
#datacomp = 'DHI'
#datacomp = 'XYZ'
xmult = 1.0
ymult = 1.0
zmult = 1.0
xind = order[0]+1
yind = order[1]+1
zind = order[2]+1
if len(datastream.ndarray[xind]) == 0 or len(datastream.ndarray[yind]) == 0 or len(datastream.ndarray[zind]) == 0:
print("writeIAGA02: WARNING! Data missing in X, Y or Z component! Writing anyway...")
find = KEYLIST.index('f')
if datacomp.startswith('DHZ'):
xmult = 60.0
elif datacomp.startswith('DHI'):
xmult = 60.0
zmult = 60.0
for i in range(fulllength):
if not ndtype:
elem = datastream[i]
xval = elem.x
yval = elem.y
zval = elem.z
fval = elem.f
timeval = elem.time
else:
if len(datastream.ndarray[xind]) > 0:
xval = datastream.ndarray[xind][i]*xmult
else:
xval = NOT_REPORTED
if len(datastream.ndarray[yind]) > 0:
yval = datastream.ndarray[yind][i]
if order[1] == '3':
yval = datastream.ndarray[yind][i]*np.cos(datastream.ndarray[zind][i]*np.pi/180.)
else:
yval = NOT_REPORTED
if len(datastream.ndarray[zind]) > 0:
zval = datastream.ndarray[zind][i]*zmult
else:
zval = NOT_REPORTED
if len(datastream.ndarray[find]) > 0:
if not useg:
fval = datastream.ndarray[find][i]
else:
fval = np.sqrt(xval**2+yval**2+zval**2)-datastream.ndarray[find][i]
else:
fval = NOT_REPORTED
timeval = datastream.ndarray[0][i]
row = ''
try:
row = datetime.strftime(num2date(timeval).replace(tzinfo=None),"%Y-%m-%d %H:%M:%S.%f")
row = row[:-3]
doi = datetime.strftime(num2date(timeval).replace(tzinfo=None), "%j")
row += ' %s' % str(doi)
except:
row = ''
pass
if isnan(xval):
row += '%13.2f' % MISSING_DATA
else:
row += '%13.2f' % xval
if isnan(yval):
row += '%10.2f' % MISSING_DATA
else:
row += '%10.2f' % yval
if isnan(zval):
row += '%10.2f' % MISSING_DATA
else:
row += '%10.2f' % zval
if isnan(fval):
row += '%10.2f' % MISSING_DATA
else:
row += '%10.2f' % fval
line.append(row + '\n')
try:
myFile.writelines( line )
pass
finally:
myFile.close()
except IOError:
return False
pass
return True
| hschovanec-usgs/magpy | magpy/lib/format_iaga02.py | Python | gpl-3.0 | 20,820 |
class Solution:
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
num1, cnt1 = 0, 0
num2, cnt2 = 1, 0
for num in nums:
if num == num1:
cnt1 += 1
elif num == num2:
cnt2 += 1
else:
if cnt1 == 0:
num1, cnt1 = num, 1
elif cnt2 == 0:
num2, cnt2 = num, 1
else:
cnt1, cnt2 = cnt1 - 1, cnt2 - 1
return [num for num in (num1, num2) if nums.count(num) > len(nums) // 3]
| YiqunPeng/Leetcode-pyq | solutions/229MajorityElementII.py | Python | gpl-3.0 | 671 |
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.cross_validation import train_test_split
from sklearn.metrics import mean_squared_error
from .auto_segment_FEMPO import BasicSegmenter_FEMPO
def demo(X = None, y = None, test_size = 0.1):
if X == None:
boston = load_boston()
X = pd.DataFrame(boston.data)
y = pd.DataFrame(boston.target)
base_estimator = DecisionTreeRegressor(max_depth = 5)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
print X_train.shape
# If you want to compare with BaggingRegressor.
# bench = BaggingRegressor(base_estimator = base_estimator, n_estimators = 10, max_samples = 1, oob_score = True).fit(X_train, y_train)
# print bench.score(X_test, y_test)
# print mean_squared_error(bench.predict(X_test), y_test)
clf = BasicSegmenterEG_FEMPO(ngen=30,init_sample_percentage = 1, n_votes=10, n = 10, base_estimator = base_estimator,
unseen_x = X_test, unseen_y = y_test)
clf.fit(X_train, y_train)
print clf.score(X_test,y_test)
y = clf.predict(X_test)
print mean_squared_error(y, y_test)
print y.shape
return clf, X_test, y_test
| bhanu-mnit/EvoML | evoml/subsampling/test_auto_segmentEG_FEMPO.py | Python | gpl-3.0 | 1,357 |
#!/usr/bin/env python
# Version 0.1
# NDVI automated acquisition and calculation by Vladyslav Popov
# Using landsat-util, source: https://github.com/developmentseed/landsat-util
# Uses Amazon Web Services Public Dataset (Lansat 8)
# Script should be run every day
from os.path import join, abspath, dirname, exists
import os
import errno
import shutil
from tempfile import mkdtemp
import subprocess
import urllib2
import logging
import sys
import datetime
import re
from landsat.search import Search
from landsat.ndvi import NDVIWithManualColorMap
# Enable logging
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
# Get current date
current_date = datetime.datetime.now().date()
print 'Current date is:', current_date
# Let`s subtract 1 day from current date
sub_date = current_date - datetime.timedelta(days=1)
print 'Subtract date is:', sub_date
# Scene search by date and WRS-2 row and path
search = Search()
try:
search_results = search.search(paths_rows='177,025', start_date=sub_date, end_date=current_date)
search_string = str(search_results.get('results'))
search_list = re.compile('\w+').findall(search_string)
scene_id = str(search_list.pop(5))
print scene_id
l = len(scene_id)
print l
#exit if we have no current image
except Exception:
raise SystemExit('Closing...')
# String concat for building Red Band URL for download
url_red = 'http://landsat-pds.s3.amazonaws.com/L8/177/025/' + scene_id + '/' + scene_id + '_B4.TIF'
# String concat for building NIR Band URL for download
url_nir = 'http://landsat-pds.s3.amazonaws.com/L8/177/025/' + scene_id + '/' + scene_id + '_B5.TIF'
# Build filenames for band rasters and output NDVI file
red_file = scene_id + '_B4.TIF'
nir_file = scene_id + '_B5.TIF'
ndvi_file = scene_id + '_NDVI.TIF'
print 'Filenames builded succsessfuly'
# Create directories for future pssing
base_dir = os.getcwd()
temp_folder = join(base_dir, "temp_folder")
scene_folder = join(temp_folder, scene_id)
if not os.path.exists(temp_folder):
os.makedirs(temp_folder)
if not os.path.exists(scene_folder):
os.makedirs(scene_folder)
# Download section for Band 4 using urllib2
file_name = url_red.split('/')[-1]
u = urllib2.urlopen(url_red)
f = open("temp_folder/"+scene_id+"/"+file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
# Download section for Band 5 using urllib2
file_name = url_nir.split('/')[-1]
u = urllib2.urlopen(url_nir)
f = open("temp_folder/"+scene_id+"/"+file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (file_name, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
f.close()
# NDVI processing
# Lets create new instance of class
nd = NDVIWithManualColorMap(path=temp_folder+"/"+scene_id, dst_path=temp_folder)
# Start process
print nd.run()
# Create virtual dataset for deviding tiff into tiles
subprocess.call(["gdalbuildvrt", "-a_srs", "EPSG:3857", "NDVImap.vrt", "temp_folder/"+scene_id+"/"+ndvi_file])
# Remove old tiles
shutil.rmtree("ndvi_tiles", ignore_errors=True)
# Start process of deviding with virtual dataset
subprocess.call(["./gdal2tilesp.py", "-w", "none", "-s EPSG:3857", "-p", "mercator", "-z 8-12", "--format=PNG", "--processes=4", "-o", "tms", "NDVImap.vrt", "ndvi_tiles"])
# Let`s clean temporary files (bands, ndvi, vrt)
shutil.rmtree("temp_folder", ignore_errors=True)
os.remove("NDVImap.vrt")
print 'All temporary data was succsessfully removed'
# Close script
raise SystemExit('Closing...')
| vladanti/ndvi_calc | ndvi.py | Python | gpl-3.0 | 4,318 |
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class DownloadEnteredDataTest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://kc.kbtdev.org/"
self.verificationErrors = []
self.accept_next_alert = True
def test_download_entered_data(self):
# Open KoBoCAT.
driver = self.driver
driver.get(self.base_url + "")
# Assert that our form's title is in the list of projects and follow its link.
self.assertTrue(self.is_element_present(By.LINK_TEXT, "Selenium test form title."))
driver.find_element_by_link_text("Selenium test form title.").click()
# Wait for and click the "Download data" link.
for _ in xrange(self.DEFAULT_WAIT_SECONDS):
self.check_timeout('Waiting for "Download data" link.')
try:
if self.is_element_present(By.LINK_TEXT, "Download data"): break
except: pass
time.sleep(1)
else: self.fail("time out")
driver.find_element_by_link_text("Download data").click()
# Wait for and click the "XLS" link.
for _ in xrange(self.DEFAULT_WAIT_SECONDS):
self.check_timeout('Waiting for "XLS" link.')
try:
if self.is_element_present(By.LINK_TEXT, "XLS"): break
except: pass
time.sleep(1)
else: self.fail("time out")
driver.find_element_by_link_text("XLS").click()
# Wait for the download page's header and ensure it contains the word "excel" (case insensitive).
for _ in xrange(self.DEFAULT_WAIT_SECONDS):
self.check_timeout('Waiting for download page\'s header.')
try:
if self.is_element_present(By.CSS_SELECTOR, ".data-page__header"): break
except: pass
time.sleep(1)
else: self.fail("time out")
self.assertIsNotNone(re.compile('excel', re.IGNORECASE).search(driver.find_element_by_css_selector(".data-page__header").text))
# Wait for the export progress status.
for _ in xrange(self.DEFAULT_WAIT_SECONDS):
self.check_timeout('Waiting for the export progress status.')
try:
if self.is_element_present(By.CSS_SELECTOR, ".refresh-export-progress"): break
except: pass
time.sleep(1)
else: self.fail("time out")
# Wait (a little more than usual) for the export's download link and click it.
for _ in xrange(30):
self.check_timeout('Waiting for the export\'s download link.')
try:
if re.search(r"^Selenium_test_form_title_[\s\S]*$", driver.find_element_by_css_selector("#forms-table a").text): break
except: pass
time.sleep(1)
else: self.fail("time out")
driver.find_element_by_css_selector("#forms-table a").click()
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| kobotoolbox/kobo_selenium_tests | kobo_selenium_tests/selenium_ide_exported/download_entered_data_test.py | Python | gpl-3.0 | 3,987 |
########################################################################
# File : ARCComputingElement.py
# Author : A.T.
########################################################################
""" ARC Computing Element
"""
__RCSID__ = "58c42fc (2013-07-07 22:54:57 +0200) Andrei Tsaregorodtsev <[email protected]>"
import os
import stat
import tempfile
from types import StringTypes
from DIRAC import S_OK, S_ERROR
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Core.Utilities.Grid import executeGridCommand
CE_NAME = 'ARC'
MANDATORY_PARAMETERS = [ 'Queue' ]
class ARCComputingElement( ComputingElement ):
#############################################################################
def __init__( self, ceUniqueID ):
""" Standard constructor.
"""
ComputingElement.__init__( self, ceUniqueID )
self.ceType = CE_NAME
self.submittedJobs = 0
self.mandatoryParameters = MANDATORY_PARAMETERS
self.pilotProxy = ''
self.queue = ''
self.outputURL = 'gsiftp://localhost'
self.gridEnv = ''
self.ceHost = self.ceName
if 'Host' in self.ceParameters:
self.ceHost = self.ceParameters['Host']
if 'GridEnv' in self.ceParameters:
self.gridEnv = self.ceParameters['GridEnv']
#############################################################################
def _addCEConfigDefaults( self ):
"""Method to make sure all necessary Configuration Parameters are defined
"""
# First assure that any global parameters are loaded
ComputingElement._addCEConfigDefaults( self )
def __writeXRSL( self, executableFile ):
""" Create the JDL for submission
"""
workingDirectory = self.ceParameters['WorkingDirectory']
fd, name = tempfile.mkstemp( suffix = '.xrsl', prefix = 'ARC_', dir = workingDirectory )
diracStamp = os.path.basename( name ).replace( '.xrsl', '' ).replace( 'ARC_', '' )
xrslFile = os.fdopen( fd, 'w' )
xrsl = """
&(executable="%(executable)s")
(inputFiles=(%(executable)s "%(executableFile)s"))
(stdout="%(diracStamp)s.out")
(stderr="%(diracStamp)s.err")
(outputFiles=("%(diracStamp)s.out" "") ("%(diracStamp)s.err" ""))
""" % {
'executableFile':executableFile,
'executable':os.path.basename( executableFile ),
'diracStamp':diracStamp
}
xrslFile.write( xrsl )
xrslFile.close()
return name, diracStamp
def _reset( self ):
self.queue = self.ceParameters['Queue']
self.gridEnv = self.ceParameters['GridEnv']
#############################################################################
def submitJob( self, executableFile, proxy, numberOfJobs = 1 ):
""" Method to submit job
"""
self.log.verbose( "Executable file path: %s" % executableFile )
if not os.access( executableFile, 5 ):
os.chmod( executableFile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH + stat.S_IXOTH )
batchIDList = []
stampDict = {}
i = 0
while i < numberOfJobs:
i += 1
xrslName, diracStamp = self.__writeXRSL( executableFile )
cmd = ['arcsub', '-j', self.ceParameters['JobListFile'],
'-c', '%s' % self.ceHost, '%s' % xrslName ]
result = executeGridCommand( self.proxy, cmd, self.gridEnv )
os.unlink( xrslName )
if not result['OK']:
break
if result['Value'][0] != 0:
break
pilotJobReference = result['Value'][1].strip()
if pilotJobReference and pilotJobReference.startswith('Job submitted with jobid:'):
pilotJobReference = pilotJobReference.replace('Job submitted with jobid:', '').strip()
batchIDList.append( pilotJobReference )
stampDict[pilotJobReference] = diracStamp
else:
break
#os.unlink( executableFile )
if batchIDList:
result = S_OK( batchIDList )
result['PilotStampDict'] = stampDict
else:
result = S_ERROR('No pilot references obtained from the glite job submission')
return result
def killJob( self, jobIDList ):
""" Kill the specified jobs
"""
workingDirectory = self.ceParameters['WorkingDirectory']
fd, name = tempfile.mkstemp( suffix = '.list', prefix = 'KillJobs_', dir = workingDirectory )
jobListFile = os.fdopen( fd, 'w' )
jobList = list( jobIDList )
if type( jobIDList ) in StringTypes:
jobList = [ jobIDList ]
for job in jobList:
jobListFile.write( job+'\n' )
cmd = ['arckill', '-c', self.ceHost, '-i', name]
result = executeGridCommand( self.proxy, cmd, self.gridEnv )
os.unlink( name )
if not result['OK']:
return result
if result['Value'][0] != 0:
return S_ERROR( 'Failed kill job: %s' % result['Value'][0][1] )
return S_OK()
#############################################################################
def getCEStatus( self ):
""" Method to return information on running and pending jobs.
"""
cmd = ['arcstat', '-c', self.ceHost, '-j', self.ceParameters['JobListFile'] ]
result = executeGridCommand( self.proxy, cmd, self.gridEnv )
resultDict = {}
if not result['OK']:
return result
if result['Value'][0] == 1 and result['Value'][1] == "No jobs\n":
result = S_OK()
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
result['SubmittedJobs'] = 0
return result
if result['Value'][0]:
if result['Value'][2]:
return S_ERROR(result['Value'][2])
else:
return S_ERROR('Error while interrogating CE status')
if result['Value'][1]:
resultDict = self.__parseJobStatus( result['Value'][1] )
running = 0
waiting = 0
for ref in resultDict:
status = resultDict[ref]
if status == 'Scheduled':
waiting += 1
if status == 'Running':
running += 1
result = S_OK()
result['RunningJobs'] = running
result['WaitingJobs'] = waiting
result['SubmittedJobs'] = 0
return result
def __parseJobStatus( self, commandOutput ):
"""
"""
resultDict = {}
lines = commandOutput.split('\n')
ln = 0
while ln < len( lines ):
if lines[ln].startswith( 'Job:' ):
jobRef = lines[ln].split()[1]
ln += 1
line = lines[ln].strip()
stateARC = ''
if line.startswith( 'State' ):
stateARC = line.replace( 'State:','' ).strip()
line = lines[ln+1].strip()
exitCode = None
if line.startswith( 'Exit Code' ):
line = line.replace( 'Exit Code:','' ).strip()
exitCode = int( line )
# Evaluate state now
if stateARC in ['Accepted','Preparing','Submitting','Queuing','Hold']:
resultDict[jobRef] = "Scheduled"
elif stateARC in ['Running','Finishing']:
resultDict[jobRef] = "Running"
elif stateARC in ['Killed','Deleted']:
resultDict[jobRef] = "Killed"
elif stateARC in ['Finished','Other']:
if exitCode is not None:
if exitCode == 0:
resultDict[jobRef] = "Done"
else:
resultDict[jobRef] = "Failed"
else:
resultDict[jobRef] = "Failed"
elif stateARC in ['Failed']:
resultDict[jobRef] = "Failed"
else:
self.log.warn( "Unknown state %s for job %s" % ( stateARC, jobRef ) )
elif lines[ln].startswith( "WARNING: Job information not found:" ):
jobRef = lines[ln].replace( 'WARNING: Job information not found:', '' ).strip()
resultDict[jobRef] = "Scheduled"
ln += 1
return resultDict
def getJobStatus( self, jobIDList ):
""" Get the status information for the given list of jobs
"""
workingDirectory = self.ceParameters['WorkingDirectory']
fd, name = tempfile.mkstemp( suffix = '.list', prefix = 'StatJobs_', dir = workingDirectory )
jobListFile = os.fdopen( fd, 'w' )
jobTmpList = list( jobIDList )
if type( jobIDList ) in StringTypes:
jobTmpList = [ jobIDList ]
jobList = []
for j in jobTmpList:
if ":::" in j:
job = j.split(":::")[0]
else:
job = j
jobList.append( job )
jobListFile.write( job+'\n' )
cmd = ['arcstat', '-c', self.ceHost, '-i', name, '-j', self.ceParameters['JobListFile']]
result = executeGridCommand( self.proxy, cmd, self.gridEnv )
os.unlink( name )
resultDict = {}
if not result['OK']:
self.log.error( 'Failed to get job status', result['Message'] )
return result
if result['Value'][0]:
if result['Value'][2]:
return S_ERROR(result['Value'][2])
else:
return S_ERROR('Error while interrogating job statuses')
if result['Value'][1]:
resultDict = self.__parseJobStatus( result['Value'][1] )
if not resultDict:
return S_ERROR('No job statuses returned')
# If CE does not know about a job, set the status to Unknown
for job in jobList:
if not resultDict.has_key( job ):
resultDict[job] = 'Unknown'
return S_OK( resultDict )
def getJobOutput( self, jobID, localDir = None ):
""" Get the specified job standard output and error files. If the localDir is provided,
the output is returned as file in this directory. Otherwise, the output is returned
as strings.
"""
if jobID.find( ':::' ) != -1:
pilotRef, stamp = jobID.split( ':::' )
else:
pilotRef = jobID
stamp = ''
if not stamp:
return S_ERROR( 'Pilot stamp not defined for %s' % pilotRef )
arcID = os.path.basename(pilotRef)
if "WorkingDirectory" in self.ceParameters:
workingDirectory = os.path.join( self.ceParameters['WorkingDirectory'], arcID )
else:
workingDirectory = arcID
outFileName = os.path.join( workingDirectory, '%s.out' % stamp )
errFileName = os.path.join( workingDirectory, '%s.err' % stamp )
cmd = ['arcget', '-j', self.ceParameters['JobListFile'], pilotRef ]
result = executeGridCommand( self.proxy, cmd, self.gridEnv )
output = ''
if result['OK']:
if not result['Value'][0]:
outFile = open( outFileName, 'r' )
output = outFile.read()
outFile.close()
os.unlink( outFileName )
errFile = open( errFileName, 'r' )
error = errFile.read()
errFile.close()
os.unlink( errFileName )
else:
error = '\n'.join( result['Value'][1:] )
return S_ERROR( error )
else:
return S_ERROR( 'Failed to retrieve output for %s' % jobID )
return S_OK( ( output, error ) )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| miloszz/DIRAC | Resources/Computing/ARCComputingElement.py | Python | gpl-3.0 | 10,864 |
#!/usr/bin/env python3
import re
a = [[0 for x in range(25)] for y in range(13)]
f=open("../distrib/spiral.txt","r")
s=f.readline().strip()
dx, dy = [0, 1, 0, -1], [1, 0, -1, 0]
x, y, c = 0, -1, 1
l=0
for i in range(13+13-1):
if i%2==0:
for j in range((25+25-i)//2):
x += dx[i % 4]
y += dy[i % 4]
#print(x,y,l)
a[x][y] = s[l]
l=l+1
c += 1
else:
for j in range((13+13-i)//2):
x += dx[i % 4]
y += dy[i % 4]
#print(x,y,l)
a[x][y] = s[l]
l=l+1
c += 1
for i in a:
for k in i:
k=re.sub(r"¦","█",k)
k=re.sub(r"¯","▀",k)
k=re.sub(r"_","▄",k)
print(k,end="")
print()
| DISMGryphons/GryphonCTF2017-Challenges | challenges/misc/Spirals/solution/solution.py | Python | gpl-3.0 | 812 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
#
from __future__ import (absolute_import, division, print_function, unicode_literals)
from os import path
from mantid import logger
class WorkspaceLoader(object):
@staticmethod
def load_workspaces(directory, workspaces_to_load):
"""
The method that is called to load in workspaces. From the given directory and the workspace names provided.
:param directory: String or string castable object; The project directory
:param workspaces_to_load: List of Strings; of the workspaces to load
"""
if workspaces_to_load is None:
return
from mantid.simpleapi import Load # noqa
for workspace in workspaces_to_load:
try:
Load(path.join(directory, (workspace + ".nxs")), OutputWorkspace=workspace)
except Exception:
logger.warning("Couldn't load file in project: " + workspace + ".nxs")
| mganeva/mantid | qt/python/mantidqt/project/workspaceloader.py | Python | gpl-3.0 | 1,243 |
#!python
"""Script for plotting distributions of epitopes per site for two sets of sites.
Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py.
Written by Jesse Bloom."""
import os
import sys
import random
import epitopefinder.io
import epitopefinder.plot
def main():
"""Main body of script."""
random.seed(1) # seed random number generator in case P values are being computed
if not epitopefinder.plot.PylabAvailable():
raise ImportError("Cannot import matplotlib / pylab, which are required by this script.")
# output is written to out, currently set to standard out
out = sys.stdout
out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n")
# read input file and parse arguments
args = sys.argv[1 : ]
if len(args) != 1:
raise IOError("Script must be called with exactly one argument specifying the input file")
infilename = sys.argv[1]
if not os.path.isfile(infilename):
raise IOError("Failed to find infile %s" % infilename)
d = epitopefinder.io.ParseInfile(open(infilename))
out.write("\nRead input arguments from %s\n" % infilename)
out.write('Read the following key / value pairs:\n')
for (key, value) in d.iteritems():
out.write("%s %s\n" % (key, value))
plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip()
epitopesbysite1_list = []
epitopesbysite2_list = []
for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]:
epitopesfile = epitopefinder.io.ParseFileList(d, xf)
if len(epitopesfile) != 1:
raise ValueError("%s specifies more than one file" % xf)
epitopesfile = epitopesfile[0]
for line in open(epitopesfile).readlines()[1 : ]:
if not (line.isspace() or line[0] == '#'):
(site, n) = line.split(',')
(site, n) = (int(site), int(n))
xlist.append(n)
if not xlist:
raise ValueError("%s failed to specify information for any sites" % xf)
set1name = epitopefinder.io.ParseStringValue(d, 'set1name')
set2name = epitopefinder.io.ParseStringValue(d, 'set2name')
title = epitopefinder.io.ParseStringValue(d, 'title').strip()
if title.upper() in ['NONE', 'FALSE']:
title = None
pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue')
if pvalue.upper() in ['NONE', 'FALSE']:
pvalue = None
pvaluewithreplacement = None
else:
pvalue = int(pvalue)
pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement')
if pvalue < 1:
raise ValueError("pvalue must be >= 1")
if len(epitopesbysite2_list) >= len(epitopesbysite1_list):
raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.")
ymax = None
if 'ymax' in d:
ymax = epitopefinder.io.ParseFloatValue(d, 'ymax')
out.write('\nNow creating the plot file %s\n' % plotfile)
epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax)
out.write("\nScript is complete.\n")
if __name__ == '__main__':
main() # run the script
| jbloom/epitopefinder | scripts/epitopefinder_plotdistributioncomparison.py | Python | gpl-3.0 | 3,447 |
mcinif='mcini_gen2'
runname='gen_test2111b'
mcpick='gen_test2b.pickle'
pathdir='/beegfs/work/ka_oj4748/echoRD'
wdir='/beegfs/work/ka_oj4748/gen_tests'
update_prec=0.04
update_mf=False
update_part=500
import sys
sys.path.append(pathdir)
import run_echoRD as rE
rE.echoRD_job(mcinif=mcinif,mcpick=mcpick,runname=runname,wdir=wdir,pathdir=pathdir,update_prec=update_prec,update_mf=update_mf,update_part=update_part,hdf5pick=False)
| cojacoo/testcases_echoRD | gen_test2111b.py | Python | gpl-3.0 | 430 |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 14:11:31 2015
@author: Martin Friedl
"""
from datetime import date
import numpy as np
from Patterns.GrowthTheoryCell import make_theory_cell
from Patterns.GrowthTheoryCell_100_3BranchDevices import make_theory_cell_3br
from Patterns.GrowthTheoryCell_100_4BranchDevices import make_theory_cell_4br
from gdsCAD_py3.core import Cell, Boundary, CellArray, Layout, Path
from gdsCAD_py3.shapes import Box, Rectangle, Label
from gdsCAD_py3.templates100 import Wafer_GridStyle, dashed_line
WAFER_ID = 'XXXX' # CHANGE THIS FOR EACH DIFFERENT WAFER
PATTERN = 'SQ1.2'
putOnWafer = True # Output full wafer or just a single pattern?
HighDensity = False # High density of triangles?
glbAlignmentMarks = False
tDicingMarks = 10. # Dicing mark line thickness (um)
rotAngle = 0. # Rotation angle of the membranes
wafer_r = 25e3
waferVer = '100 Membranes Multi-Use v1.2'.format(int(wafer_r / 1000))
waferLabel = waferVer + '\n' + date.today().strftime("%d%m%Y")
# Layers
l_smBeam = 0
l_lgBeam = 1
l_drawing = 100
# %% Wafer template for MBE growth
class MBE100Wafer(Wafer_GridStyle):
"""
A 2" wafer divided into square cells
"""
def __init__(self, name, cells=None):
Wafer_GridStyle.__init__(self, name=name, cells=cells, block_gap=1200.)
# The placement of the wafer alignment markers
am_x = 1.5e4
am_y = 1.5e4
self.align_pts = np.array([am_x, am_y])
self.align_pts = np.vstack((self.align_pts, self.align_pts *
(-1, 1))) # Reflect about y-axis
self.align_pts = np.vstack((self.align_pts, self.align_pts *
(1, -1))) # Reflect about x-axis
self.wafer_r = 25e3
self.block_size = np.array([10e3, 10e3])
self._place_blocks(radius=self.wafer_r + 5e3)
# if glbAlignmentMarks:
# self.add_aligment_marks(l_lgBeam)
# self.add_orientation_text(l_lgBeam)
# self.add_dicing_marks() # l_lgBeam, mkWidth=mkWidth Width of dicing marks
self.add_blocks()
self.add_wafer_outline(layers=l_drawing)
self.add_dashed_dicing_marks(layers=[l_lgBeam])
self.add_block_labels(layers=[l_lgBeam])
self.add_prealignment_markers(layers=[l_lgBeam])
self.add_tem_membranes([0.08, 0.012, 0.028, 0.044], 2000, 1, l_smBeam)
self.add_theory_cells()
self.add_chip_labels()
# self.add_blockLabels(l_lgBeam)
# self.add_cellLabels(l_lgBeam)
bottom = np.array([0, -self.wafer_r * 0.9])
# top = np.array([0, -1]) * bottom
self.add_waferLabel(waferLabel, l_drawing, pos=bottom)
def add_block_labels(self, layers):
txtSize = 800
for (i, pt) in enumerate(self.block_pts):
origin = (pt + np.array([0.5, 0.5])) * self.block_size
blk_lbl = self.blockcols[pt[0]] + self.blockrows[pt[1]]
for l in layers:
txt = Label(blk_lbl, txtSize, layer=l)
bbox = txt.bounding_box
offset = np.array(pt)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
lbl_cell = Cell("lbl_" + blk_lbl)
lbl_cell.add(txt)
origin += np.array([0, 0])
self.add(lbl_cell, origin=origin)
def add_dashed_dicing_marks(self, layers):
if type(layers) is not list:
layers = [layers]
width = 10. / 2
dashlength = 2000
r = self.wafer_r
rng = np.floor(self.wafer_r / self.block_size).astype(int)
dmarks = Cell('DIC_MRKS')
for l in layers:
for x in np.arange(-rng[0], rng[0] + 1) * self.block_size[0]:
y = np.sqrt(r ** 2 - x ** 2)
vm = dashed_line([x, y], [x, -y], dashlength, width, layer=l)
dmarks.add(vm)
for y in np.arange(-rng[1], rng[1] + 1) * self.block_size[1]:
x = np.sqrt(r ** 2 - y ** 2)
hm = dashed_line([x, y], [-x, y], dashlength, width, layer=l)
dmarks.add(hm)
self.add(dmarks)
def add_prealignment_markers(self, layers, mrkr_size=7):
if mrkr_size % 2 == 0: # Number is even, but we need odd numbers
mrkr_size += 1
if type(layers) is not list:
layers = [layers]
for l in layers:
rect_size = 10. # 10 um large PAMM rectangles
marker_rect = Rectangle([-rect_size / 2., -rect_size / 2.], [rect_size / 2., rect_size / 2.], layer=l)
marker = Cell('10umMarker')
marker.add(marker_rect)
# Make one arm of the PAMM array
marker_arm = Cell('PAMM_Arm')
# Define the positions of the markers, they increase in spacing by 1 um each time:
mrkr_positions = [75 * n + (n - 1) * n // 2 for n in range(1, (mrkr_size - 1) // 2 + 1)]
for pos in mrkr_positions:
marker_arm.add(marker, origin=[pos, 0])
# Build the final PAMM Marker
pamm_cell = Cell('PAMM_Marker')
pamm_cell.add(marker) # Center marker
pamm_cell.add(marker_arm) # Right arm
pamm_cell.add(marker_arm, rotation=180) # Left arm
pamm_cell.add(marker_arm, rotation=90) # Top arm
pamm_cell.add(marker_arm, rotation=-90) # Bottom arm
for pos in mrkr_positions:
pamm_cell.add(marker_arm, origin=[pos, 0], rotation=90) # Top arms
pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=90)
pamm_cell.add(marker_arm, origin=[pos, 0], rotation=-90) # Bottom arms
pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=-90)
# Make the 4 tick marks that mark the center of the array
h = 30.
w = 100.
tick_mrk = Rectangle([-w / 2., -h / 2.], [w / 2, h / 2.], layer=l)
tick_mrk_cell = Cell("TickMark")
tick_mrk_cell.add(tick_mrk)
pos = mrkr_positions[-1] + 75 + w / 2.
pamm_cell.add(tick_mrk_cell, origin=[pos, 0])
pamm_cell.add(tick_mrk_cell, origin=[-pos, 0])
pamm_cell.add(tick_mrk_cell, origin=[0, pos], rotation=90)
pamm_cell.add(tick_mrk_cell, origin=[0, -pos], rotation=90)
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(pamm_cell, origin=(center_x + 2000, center_y))
block.add(pamm_cell, origin=(center_x - 2000, center_y))
def add_tem_membranes(self, widths, length, pitch, layer):
tem_membranes = Cell('TEM_Membranes')
n = 5
curr_y = 0
for width in widths:
membrane = Path([(-length / 2., 0), (length / 2., 0)], width=width, layer=layer)
membrane_cell = Cell('Membrane_w{:.0f}'.format(width * 1000))
membrane_cell.add(membrane)
membrane_array = CellArray(membrane_cell, 1, n, (0, pitch))
membrane_array_cell = Cell('MembraneArray_w{:.0f}'.format(width * 1000))
membrane_array_cell.add(membrane_array)
tem_membranes.add(membrane_array_cell, origin=(0, curr_y))
curr_y += n * pitch
n2 = 3
tem_membranes2 = Cell('Many_TEM_Membranes')
tem_membranes2.add(CellArray(tem_membranes, 1, n2, (0, n * len(widths) * pitch)))
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(tem_membranes2, origin=(center_x, center_y + 2000))
def add_theory_cells(self):
theory_cells = Cell('TheoryCells')
theory_cells.add(make_theory_cell(wafer_orient='100'), origin=(-400, 0))
theory_cells.add(make_theory_cell_3br(), origin=(0, 0))
theory_cells.add(make_theory_cell_4br(), origin=(400, 0))
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(theory_cells, origin=(center_x, center_y - 2000))
def add_chip_labels(self):
wafer_lbl = PATTERN + '\n' + WAFER_ID
text = Label(wafer_lbl, 20., layer=l_lgBeam)
text.translate(tuple(np.array(-text.bounding_box.mean(0)))) # Center justify label
chip_lbl_cell = Cell('chip_label')
chip_lbl_cell.add(text)
center_x, center_y = (5000, 5000)
for block in self.blocks:
block.add(chip_lbl_cell, origin=(center_x, center_y - 2850))
class Frame(Cell):
"""
Make a frame for writing to with ebeam lithography
Params:
-name of the frame, just like when naming a cell
-size: the size of the frame as an array [xsize,ysize]
"""
def __init__(self, name, size, border_layers):
if not (type(border_layers) == list):
border_layers = [border_layers]
Cell.__init__(self, name)
self.size_x, self.size_y = size
# Create the border of the cell
for l in border_layers:
self.border = Box(
(-self.size_x / 2., -self.size_y / 2.),
(self.size_x / 2., self.size_y / 2.),
1,
layer=l)
self.add(self.border) # Add border to the frame
self.align_markers = None
def make_align_markers(self, t, w, position, layers, joy_markers=False, camps_markers=False):
if not (type(layers) == list):
layers = [layers]
top_mk_cell = Cell('AlignmentMark')
for l in layers:
if not joy_markers:
am0 = Rectangle((-w / 2., -w / 2.), (w / 2., w / 2.), layer=l)
rect_mk_cell = Cell("RectMarker")
rect_mk_cell.add(am0)
top_mk_cell.add(rect_mk_cell)
elif joy_markers:
crosspts = [(0, 0), (w / 2., 0), (w / 2., t), (t, t), (t, w / 2), (0, w / 2), (0, 0)]
crosspts.extend(tuple(map(tuple, (-np.array(crosspts)).tolist())))
am0 = Boundary(crosspts, layer=l) # Create gdsCAD shape
joy_mk_cell = Cell("JOYMarker")
joy_mk_cell.add(am0)
top_mk_cell.add(joy_mk_cell)
if camps_markers:
emw = 20. # 20 um e-beam marker width
camps_mk = Rectangle((-emw / 2., -emw / 2.), (emw / 2., emw / 2.), layer=l)
camps_mk_cell = Cell("CAMPSMarker")
camps_mk_cell.add(camps_mk)
top_mk_cell.add(camps_mk_cell, origin=[100., 100.])
top_mk_cell.add(camps_mk_cell, origin=[100., -100.])
top_mk_cell.add(camps_mk_cell, origin=[-100., 100.])
top_mk_cell.add(camps_mk_cell, origin=[-100., -100.])
self.align_markers = Cell("AlignMarkers")
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([1, -1]))
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([-1, -1]))
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([1, 1]))
self.align_markers.add(top_mk_cell, origin=np.array(position) * np.array([-1, 1]))
self.add(self.align_markers)
def make_slit_array(self, _pitches, spacing, _widths, _lengths, rot_angle,
array_height, array_width, array_spacing, layers):
if not (type(layers) == list):
layers = [layers]
if not (type(_pitches) == list):
_pitches = [_pitches]
if not (type(_lengths) == list):
_lengths = [_lengths]
if not (type(_widths) == list):
_widths = [_widths]
manyslits = i = j = None
for l in layers:
i = -1
j = -1
manyslits = Cell("SlitArray")
pitch = _pitches[0]
for length in _lengths:
j += 1
i = -1
for width in _widths:
# for pitch in pitches:
i += 1
if i % 3 == 0:
j += 1 # Move to array to next line
i = 0 # Restart at left
pitch_v = pitch / np.cos(np.deg2rad(rot_angle))
# widthV = width / np.cos(np.deg2rad(rotAngle))
nx = int(array_width / (length + spacing))
ny = int(array_height / pitch_v)
# Define the slits
slit = Cell("Slits")
rect = Rectangle((-length / 2., -width / 2.), (length / 2., width / 2.), layer=l)
rect = rect.copy().rotate(rot_angle)
slit.add(rect)
slits = CellArray(slit, nx, ny, (length + spacing, pitch_v))
slits.translate((-(nx - 1) * (length + spacing) / 2., -(ny - 1) * pitch_v / 2.))
slit_array = Cell("SlitArray")
slit_array.add(slits)
text = Label('w/p/l\n%i/%i/%i' % (width * 1000, pitch, length), 5, layer=l)
lbl_vertical_offset = 1.35
if j % 2 == 0:
text.translate(
tuple(np.array(-text.bounding_box.mean(0)) + np.array((
0, -array_height / lbl_vertical_offset)))) # Center justify label
else:
text.translate(
tuple(np.array(-text.bounding_box.mean(0)) + np.array((
0, array_height / lbl_vertical_offset)))) # Center justify label
slit_array.add(text)
manyslits.add(slit_array,
origin=((array_width + array_spacing) * i, (
array_height + 2. * array_spacing) * j - array_spacing / 2.))
self.add(manyslits,
origin=(-i * (array_width + array_spacing) / 2, -(j + 1.5) * (
array_height + array_spacing) / 2))
# %%Create the pattern that we want to write
lgField = Frame("LargeField", (2000., 2000.), []) # Create the large write field
lgField.make_align_markers(20., 200., (850., 850.), l_lgBeam, joy_markers=True, camps_markers=True)
# Define parameters that we will use for the slits
widths = [0.004, 0.008, 0.012, 0.016, 0.028, 0.044]
pitches = [1.0, 2.0]
lengths = [10., 20.]
smFrameSize = 400
slitColumnSpacing = 3.
# Create the smaller write field and corresponding markers
smField1 = Frame("SmallField1", (smFrameSize, smFrameSize), [])
smField1.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField1.make_slit_array(pitches[0], slitColumnSpacing, widths, lengths[0], rotAngle, 100, 100, 30, l_smBeam)
smField2 = Frame("SmallField2", (smFrameSize, smFrameSize), [])
smField2.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField2.make_slit_array(pitches[0], slitColumnSpacing, widths, lengths[1], rotAngle, 100, 100, 30, l_smBeam)
smField3 = Frame("SmallField3", (smFrameSize, smFrameSize), [])
smField3.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField3.make_slit_array(pitches[1], slitColumnSpacing, widths, lengths[0], rotAngle, 100, 100, 30, l_smBeam)
smField4 = Frame("SmallField4", (smFrameSize, smFrameSize), [])
smField4.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
smField4.make_slit_array(pitches[1], slitColumnSpacing, widths, lengths[1], rotAngle, 100, 100, 30, l_smBeam)
centerAlignField = Frame("CenterAlignField", (smFrameSize, smFrameSize), [])
centerAlignField.make_align_markers(2., 20., (180., 180.), l_lgBeam, joy_markers=True)
# Add everything together to a top cell
topCell = Cell("TopCell")
topCell.add(lgField)
smFrameSpacing = 400 # Spacing between the three small frames
dx = smFrameSpacing + smFrameSize
dy = smFrameSpacing + smFrameSize
topCell.add(smField1, origin=(-dx / 2., dy / 2.))
topCell.add(smField2, origin=(dx / 2., dy / 2.))
topCell.add(smField3, origin=(-dx / 2., -dy / 2.))
topCell.add(smField4, origin=(dx / 2., -dy / 2.))
topCell.add(centerAlignField, origin=(0., 0.))
topCell.spacing = np.array([4000., 4000.])
# %%Create the layout and output GDS file
layout = Layout('LIBRARY')
if putOnWafer: # Fit as many patterns on a 2inch wafer as possible
wafer = MBE100Wafer('MembranesWafer', cells=[topCell])
layout.add(wafer)
# layout.show()
else: # Only output a single copy of the pattern (not on a wafer)
layout.add(topCell)
layout.show()
filestring = str(waferVer) + '_' + WAFER_ID + '_' + date.today().strftime("%d%m%Y") + ' dMark' + str(tDicingMarks)
filename = filestring.replace(' ', '_') + '.gds'
layout.save(filename)
cell_layout = Layout('LIBRARY')
cell_layout.add(wafer.blocks[0])
cell_layout.save(filestring.replace(' ', '_') + '_block' + '.gds')
# Output up chip for doing aligned jobs
layout_field = Layout('LIBRARY')
layout_field.add(topCell)
layout_field.save(filestring.replace(' ', '_') + '_2mmField.gds') | Martin09/E-BeamPatterns | 100 Wafers - 1cm Squares/Multi-Use Pattern/v1.2/MembraneDesign_100Wafer_v1.1.py | Python | gpl-3.0 | 17,018 |
Subsets and Splits