code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from rest_framework import serializers
from .models import CustomerWallet
class CustomerWalletSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = CustomerWallet
fields = ("wallet_id", "msisdn", "balance", "type", "status")
| kyrelos/vitelco-mobile-money-wallet | app_dir/customer_wallet_management/serializers.py | Python | gpl-3.0 | 268 |
#!/usr/bin/env python2
import urllib2
import urllib
from BeautifulSoup import BeautifulSoup
import smtplib
import ConfigParser
# Retreive user information
config = ConfigParser.ConfigParser()
config.read('config.cfg')
user = config.get('data','user')
password = config.get('data','password')
fromaddr = config.get('data','fromaddr')
toaddr = config.get('data','toaddr')
smtpserver = config.get('data','smtp_server')
login_page='https://bugs.archlinux.org/index.php?do=authenticate'
# Create message
msg = "To: %s \nFrom: %s \nSubject: Bug Mail\n\n" % (toaddr,fromaddr)
msg += 'Unassigned bugs \n\n'
# build opener with HTTPCookieProcessor
o = urllib2.build_opener( urllib2.HTTPCookieProcessor() )
urllib2.install_opener( o )
p = urllib.urlencode( { 'user_name': user, 'password': password, 'remember_login' : 'on',} )
f = o.open(login_page, p)
data = f.read()
# Archlinux
url = "https://bugs.archlinux.org/index.php?string=&project=1&search_name=&type%5B%5D=&sev%5B%5D=&pri%5B%5D=&due%5B%5D=0&reported%5B%5D=&cat%5B%5D=&status%5B%5D=1&percent%5B%5D=&opened=&dev=&closed=&duedatefrom=&duedateto=&changedfrom=&changedto=&openedfrom=&openedto=&closedfrom=&closedto=&do=index"
# Community
url2= "https://bugs.archlinux.org/index.php?string=&project=5&search_name=&type%5B%5D=&sev%5B%5D=&pri%5B%5D=&due%5B%5D=0&reported%5B%5D=&cat%5B%5D=&status%5B%5D=1&percent%5B%5D=&opened=&dev=&closed=&duedatefrom=&duedateto=&changedfrom=&changedto=&openedfrom=&openedto=&closedfrom=&closedto=&do=index"
def parse_bugtrackerpage(url,count=1):
print url
# open bugtracker / parse
page = urllib2.urlopen(url)
soup = BeautifulSoup(page)
data = soup.findAll('td',{'class':'task_id'})
msg = ""
pages = False
# Is there another page with unassigned bugs
if soup.findAll('a',{'id': 'next' }) == []:
page = False
else:
print soup.findAll('a',{'id': 'next'})
count += 1
pages = True
print count
# print all found bugs
for f in data:
title = f.a['title'].replace('Assigned |','')
title = f.a['title'].replace('| 0%','')
msg += '* [https://bugs.archlinux.org/task/%s FS#%s] %s \n' % (f.a.string,f.a.string,title)
if pages == True:
new = "%s&pagenum=%s" % (url,count)
msg += parse_bugtrackerpage(new,count)
return msg
msg += '\n\nArchlinux: \n\n'
msg += parse_bugtrackerpage(url)
msg += '\n\nCommunity: \n\n'
msg += parse_bugtrackerpage(url2)
msg = msg.encode("utf8")
# send mail
server = smtplib.SMTP(smtpserver)
server.sendmail(fromaddr, toaddr,msg)
server.quit()
| jelly/Utils | unassignedbugs.py | Python | gpl-3.0 | 2,592 |
import os
import json
import collections
import datetime
from flask import Flask, request, current_app, make_response, session, escape, Response, jsonify
from flask_jwt_extended import JWTManager, jwt_required, create_access_token, get_jwt_identity
from flask_socketio import SocketIO
from neo4j.v1 import GraphDatabase, basic_auth
from lib.crossDomain import crossdomain
import simplekv.memory
import eventlet
#eventlet.monkey_patch()
# if sys.version_info < (3, 0):
# sys.stdout.write("Sorry, requires Python 3.x, not Python 2.x\n")
# sys.exit(1)
config = json.load(open('./config.json'));
# Init
UPLOAD_FOLDER = os.path.dirname(os.path.realpath(__file__)) + "/uploads"
x_socketio = SocketIO()
def create_app():
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = config['auth_secret']
app.config['JWT_BLACKLIST_ENABLED'] = False
app.config['JWT_BLACKLIST_STORE'] = simplekv.memory.DictStore()
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = 'all'
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = datetime.timedelta(minutes=15)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
driver = GraphDatabase.driver(config['database_url'], auth=basic_auth(config['database_user'],config['database_pass']))
db_session = driver.session()
# start jwt service
jwt = JWTManager(app)
# Import blueprints
from auth import auth_blueprint
from banner import banner_blueprint
from people import people_blueprint
from organizations import organizations_blueprint
from repos import repositories_blueprint
from schema import schema_blueprint
from data import data_blueprint
from search import search_blueprint
from upload import upload_blueprint
from export import export_blueprint
from list import list_blueprint
from .sockets import sockets as socket_blueprint
# register API modules
app.register_blueprint(banner_blueprint)
app.register_blueprint(auth_blueprint)
app.register_blueprint(people_blueprint)
app.register_blueprint(organizations_blueprint)
app.register_blueprint(repositories_blueprint)
app.register_blueprint(schema_blueprint)
app.register_blueprint(search_blueprint)
app.register_blueprint(data_blueprint)
app.register_blueprint(upload_blueprint)
app.register_blueprint(socket_blueprint)
app.register_blueprint(export_blueprint)
app.register_blueprint(list_blueprint)
x_socketio.init_app(app)
return app, jwt
| inquisite/Inquisite-Core | api/__init__.py | Python | gpl-3.0 | 2,483 |
#!/usr/bin/python
import sys
sys.path.append('/var/www/html/valumodel.com/scripts/dcf')
from calc_dcf import calc_dcf
def create_dcf(req, tax_rate, growth_rate_1_year_out, sga_of_sales, da_of_sales, capex_of_sales, nwc_of_sales, levered_beta, current_yield, exit_multiple, ticker):
assumptions = {}
try:
assumptions['Tax Rate'] = float(tax_rate)/100.0
assumptions['Growth Rate 1 year out'] = float(growth_rate_1_year_out)/100.0
assumptions['SGA % of sales'] = float(sga_of_sales)/100.0
assumptions['D&A % of sales'] = float(da_of_sales)/100.0
assumptions['CAPEX % of sales'] = float(capex_of_sales)/100.0
assumptions['NWC % of sales'] = float(nwc_of_sales)/100.0
assumptions['Levered Beta'] = float(levered_beta)
assumptions['Current Yield'] = float(current_yield)/100.0
assumptions['Exit Multiple'] = float(exit_multiple)
except ValueError:
return '<!doctype html><html><body><h1>Invalid DCF Input. Please try again.</h1></body></html>'
ticker = ticker.split(' ')[0]
if not ticker.isalnum():
return '<!doctype html><html><body><h1>Invalid Ticker. Please try again.</h1></body></html>'
return calc_dcf(assumptions, ticker.upper())
| willmarkley/valumodel.com | scripts/dcf.py | Python | gpl-3.0 | 1,260 |
import os
import re
import gettext
import locale
import threading # libsearchfilter_toggle starts thread libsearchfilter_loop
import operator
import gtk
import gobject
import pango
import ui
import misc
import formatting
import mpdhelper as mpdh
from consts import consts
import breadcrumbs
def library_set_data(album=None, artist=None, genre=None, year=None,
path=None):
if album is not None:
album = unicode(album)
if artist is not None:
artist = unicode(artist)
if genre is not None:
genre = unicode(genre)
if year is not None:
year = unicode(year)
if path is not None:
path = unicode(path)
return (album, artist, genre, year, path)
def library_get_data(data, *args):
name_to_index = {'album': 0, 'artist': 1, 'genre': 2, 'year': 3, 'path': 4}
# Data retrieved from the gtktreeview model is not in
# unicode anymore, so convert it.
retlist = [unicode(data[name_to_index[arg]]) if data[name_to_index[arg]] \
else None for arg in args]
if len(retlist) == 1:
return retlist[0]
else:
return retlist
class Library(object):
def __init__(self, config, mpd, artwork, TAB_LIBRARY, album_filename,
settings_save, filtering_entry_make_red,
filtering_entry_revert_color, filter_key_pressed,
on_add_item, connected, on_library_button_press, new_tab,
get_multicd_album_root_dir):
self.artwork = artwork
self.config = config
self.mpd = mpd
self.librarymenu = None # cyclic dependency, set later
self.album_filename = album_filename
self.settings_save = settings_save
self.filtering_entry_make_red = filtering_entry_make_red
self.filtering_entry_revert_color = filtering_entry_revert_color
self.filter_key_pressed = filter_key_pressed
self.on_add_item = on_add_item
self.connected = connected
self.on_library_button_press = on_library_button_press
self.get_multicd_album_root_dir = get_multicd_album_root_dir
self.NOTAG = _("Untagged")
self.VAstr = _("Various Artists")
self.search_terms = [_('Artist'), _('Title'), _('Album'), _('Genre'),
_('Filename'), _('Everything')]
self.search_terms_mpd = ['artist', 'title', 'album', 'genre', 'file',
'any']
self.libfilterbox_cmd_buf = None
self.libfilterbox_cond = None
self.libfilterbox_source = None
self.prevlibtodo_base = None
self.prevlibtodo_base_results = None
self.prevlibtodo = None
self.save_timeout = None
self.libsearch_last_tooltip = None
self.lib_view_filesystem_cache = None
self.lib_view_artist_cache = None
self.lib_view_genre_cache = None
self.lib_view_album_cache = None
self.lib_list_genres = None
self.lib_list_artists = None
self.lib_list_albums = None
self.lib_list_years = None
self.view_caches_reset()
self.libraryvbox = gtk.VBox()
self.library = ui.treeview()
self.library_selection = self.library.get_selection()
self.breadcrumbs = breadcrumbs.CrumbBox()
self.breadcrumbs.props.spacing = 2
expanderwindow2 = ui.scrollwindow(add=self.library)
self.searchbox = gtk.HBox()
self.searchcombo = ui.combo(items=self.search_terms)
self.searchcombo.set_tooltip_text(_("Search terms"))
self.searchtext = ui.entry()
self.searchtext.set_tooltip_text(_("Search library"))
self.searchbutton = ui.button(img=ui.image(stock=gtk.STOCK_CANCEL),
h=self.searchcombo.size_request()[1])
self.searchbutton.set_no_show_all(True)
self.searchbutton.hide()
self.searchbutton.set_tooltip_text(_("End Search"))
self.libraryview = ui.button(relief=gtk.RELIEF_NONE)
self.libraryview.set_tooltip_text(_("Library browsing view"))
# disabled as breadcrumbs replace this:
# self.searchbox.pack_start(self.libraryview, False, False, 1)
# self.searchbox.pack_start(gtk.VSeparator(), False, False, 2)
self.searchbox.pack_start(ui.label(_("Search:")), False, False, 3)
self.searchbox.pack_start(self.searchtext, True, True, 2)
self.searchbox.pack_start(self.searchcombo, False, False, 2)
self.searchbox.pack_start(self.searchbutton, False, False, 2)
self.libraryvbox.pack_start(self.breadcrumbs, False, False, 2)
self.libraryvbox.pack_start(expanderwindow2, True, True)
self.libraryvbox.pack_start(self.searchbox, False, False, 2)
self.tab = new_tab(self.libraryvbox, gtk.STOCK_HARDDISK, TAB_LIBRARY,
self.library)
# Assign some pixbufs for use in self.library
self.openpb2 = self.library.render_icon(gtk.STOCK_OPEN,
gtk.ICON_SIZE_LARGE_TOOLBAR)
self.harddiskpb2 = self.library.render_icon(gtk.STOCK_HARDDISK,
gtk.ICON_SIZE_LARGE_TOOLBAR)
self.openpb = self.library.render_icon(gtk.STOCK_OPEN,
gtk.ICON_SIZE_MENU)
self.harddiskpb = self.library.render_icon(gtk.STOCK_HARDDISK,
gtk.ICON_SIZE_MENU)
self.albumpb = gtk.gdk.pixbuf_new_from_file_at_size(
album_filename, consts.LIB_COVER_SIZE, consts.LIB_COVER_SIZE)
self.genrepb = self.library.render_icon('gtk-orientation-portrait',
gtk.ICON_SIZE_LARGE_TOOLBAR)
self.artistpb = self.library.render_icon('artist',
gtk.ICON_SIZE_LARGE_TOOLBAR)
self.sonatapb = self.library.render_icon('sonata', gtk.ICON_SIZE_MENU)
# list of the library views: (id, name, icon name, label)
self.VIEWS = [
(consts.VIEW_FILESYSTEM, 'filesystem',
gtk.STOCK_HARDDISK, _("Filesystem")),
(consts.VIEW_ALBUM, 'album',
'album', _("Albums")),
(consts.VIEW_ARTIST, 'artist',
'artist', _("Artists")),
(consts.VIEW_GENRE, 'genre',
gtk.STOCK_ORIENTATION_PORTRAIT, _("Genres")),
]
self.library_view_assign_image()
self.library.connect('row_activated', self.on_library_row_activated)
self.library.connect('button_press_event',
self.on_library_button_press)
self.library.connect('key-press-event', self.on_library_key_press)
self.library.connect('query-tooltip', self.on_library_query_tooltip)
expanderwindow2.connect('scroll-event', self.on_library_scrolled)
self.libraryview.connect('clicked', self.library_view_popup)
self.searchtext.connect('key-press-event',
self.libsearchfilter_key_pressed)
self.searchtext.connect('activate', self.libsearchfilter_on_enter)
self.searchbutton.connect('clicked', self.on_search_end)
self.libfilter_changed_handler = self.searchtext.connect(
'changed', self.libsearchfilter_feed_loop)
searchcombo_changed_handler = self.searchcombo.connect(
'changed', self.on_library_search_combo_change)
# Initialize library data and widget
self.libraryposition = {}
self.libraryselectedpath = {}
self.searchcombo.handler_block(searchcombo_changed_handler)
self.searchcombo.set_active(self.config.last_search_num)
self.searchcombo.handler_unblock(searchcombo_changed_handler)
self.librarydata = gtk.ListStore(gtk.gdk.Pixbuf, gobject.TYPE_PYOBJECT,
str)
self.library.set_model(self.librarydata)
self.library.set_search_column(2)
self.librarycell = gtk.CellRendererText()
self.librarycell.set_property("ellipsize", pango.ELLIPSIZE_END)
self.libraryimg = gtk.CellRendererPixbuf()
self.librarycolumn = gtk.TreeViewColumn()
self.librarycolumn.pack_start(self.libraryimg, False)
self.librarycolumn.pack_start(self.librarycell, True)
self.librarycolumn.set_attributes(self.libraryimg, pixbuf=0)
self.librarycolumn.set_attributes(self.librarycell, markup=2)
self.librarycolumn.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self.library.append_column(self.librarycolumn)
self.library_selection.set_mode(gtk.SELECTION_MULTIPLE)
def get_libraryactions(self):
return [(name + 'view', icon, label,
None, None, self.on_libraryview_chosen)
for _view, name, icon, label in self.VIEWS]
def get_model(self):
return self.librarydata
def get_widgets(self):
return self.libraryvbox
def get_treeview(self):
return self.library
def get_selection(self):
return self.library_selection
def set_librarymenu(self, librarymenu):
self.librarymenu = librarymenu
self.librarymenu.attach_to_widget(self.libraryview, None)
def library_view_popup(self, button):
self.librarymenu.popup(None, None, self.library_view_position_menu, 1,
0, button)
def library_view_position_menu(self, _menu, button):
x, y, _width, height = button.get_allocation()
return (self.config.x + x, self.config.y + y + height, True)
def on_libraryview_chosen(self, action):
if self.search_visible():
self.on_search_end(None)
if action.get_name() == 'filesystemview':
self.config.lib_view = consts.VIEW_FILESYSTEM
elif action.get_name() == 'artistview':
self.config.lib_view = consts.VIEW_ARTIST
elif action.get_name() == 'genreview':
self.config.lib_view = consts.VIEW_GENRE
elif action.get_name() == 'albumview':
self.config.lib_view = consts.VIEW_ALBUM
self.library.grab_focus()
self.library_view_assign_image()
self.libraryposition = {}
self.libraryselectedpath = {}
self.library_browse(self.library_set_data(path="/"))
try:
if len(self.librarydata) > 0:
self.library_selection.unselect_range((0,),
(len(self.librarydata)-1,))
except:
pass
gobject.idle_add(self.library.scroll_to_point, 0, 0)
def library_view_assign_image(self):
_view, _name, icon, label = [v for v in self.VIEWS
if v[0] == self.config.lib_view][0]
self.libraryview.set_image(ui.image(stock=icon))
self.libraryview.set_label(" " + label)
def view_caches_reset(self):
# We should call this on first load and whenever mpd is
# updated.
self.lib_view_filesystem_cache = None
self.lib_view_artist_cache = None
self.lib_view_genre_cache = None
self.lib_view_album_cache = None
self.lib_list_genres = None
self.lib_list_artists = None
self.lib_list_albums = None
self.lib_list_years = None
def on_library_scrolled(self, _widget, _event):
try:
# Use gobject.idle_add so that we can get the visible
# state of the treeview
gobject.idle_add(self._on_library_scrolled)
except:
pass
def _on_library_scrolled(self):
if not self.config.show_covers:
return
# This avoids a warning about a NULL node in get_visible_range
if not self.library.props.visible:
return
vis_range = self.library.get_visible_range()
if vis_range is None:
return
try:
start_row = int(vis_range[0][0])
end_row = int(vis_range[1][0])
except IndexError:
# get_visible_range failed
return
self.artwork.library_artwork_update(self.librarydata, start_row,
end_row, self.albumpb)
def library_browse(self, _widget=None, root=None):
# Populates the library list with entries
if not self.connected():
return
if root is None or (self.config.lib_view == consts.VIEW_FILESYSTEM \
and self.library_get_data(root, 'path') is None):
root = self.library_set_data(path="/")
if self.config.wd is None or (self.config.lib_view == \
consts.VIEW_FILESYSTEM and \
self.library_get_data(self.config.wd,
'path') is None):
self.config.wd = self.library_set_data(path="/")
prev_selection = []
prev_selection_root = False
prev_selection_parent = False
if root == self.config.wd:
# This will happen when the database is updated. So, lets save
# the current selection in order to try to re-select it after
# the update is over.
model, selected = self.library_selection.get_selected_rows()
for path in selected:
if model.get_value(model.get_iter(path), 2) == "/":
prev_selection_root = True
elif model.get_value(model.get_iter(path), 2) == "..":
prev_selection_parent = True
else:
prev_selection.append(model.get_value(model.get_iter(path),
1))
self.libraryposition[self.config.wd] = \
self.library.get_visible_rect()[1]
path_updated = True
else:
path_updated = False
new_level = self.library_get_data_level(root)
curr_level = self.library_get_data_level(self.config.wd)
# The logic below is more consistent with, e.g., thunar.
if new_level > curr_level:
# Save position and row for where we just were if we've
# navigated into a sub-directory:
self.libraryposition[self.config.wd] = \
self.library.get_visible_rect()[1]
model, rows = self.library_selection.get_selected_rows()
if len(rows) > 0:
data = self.librarydata.get_value(
self.librarydata.get_iter(rows[0]), 2)
if not data in ("..", "/"):
self.libraryselectedpath[self.config.wd] = rows[0]
elif (self.config.lib_view == consts.VIEW_FILESYSTEM and \
root != self.config.wd) \
or (self.config.lib_view != consts.VIEW_FILESYSTEM and new_level != \
curr_level):
# If we've navigated to a parent directory, don't save
# anything so that the user will enter that subdirectory
# again at the top position with nothing selected
self.libraryposition[self.config.wd] = 0
self.libraryselectedpath[self.config.wd] = None
# In case sonata is killed or crashes, we'll save the library state
# in 5 seconds (first removing any current settings_save timeouts)
if self.config.wd != root:
try:
gobject.source_remove(self.save_timeout)
except:
pass
self.save_timeout = gobject.timeout_add(5000, self.settings_save)
self.config.wd = root
self.library.freeze_child_notify()
self.librarydata.clear()
# Populate treeview with data:
bd = []
while len(bd) == 0:
if self.config.lib_view == consts.VIEW_FILESYSTEM:
bd = self.library_populate_filesystem_data(
self.library_get_data(self.config.wd, 'path'))
elif self.config.lib_view == consts.VIEW_ALBUM:
album, artist, year = self.library_get_data(self.config.wd,
'album', 'artist',
'year')
if album is not None:
bd = self.library_populate_data(artist=artist, album=album,
year=year)
else:
bd = self.library_populate_toplevel_data(albumview=True)
elif self.config.lib_view == consts.VIEW_ARTIST:
artist, album, year = self.library_get_data(self.config.wd,
'artist', 'album',
'year')
if artist is not None and album is not None:
bd = self.library_populate_data(artist=artist, album=album,
year=year)
elif artist is not None:
bd = self.library_populate_data(artist=artist)
else:
bd = self.library_populate_toplevel_data(artistview=True)
elif self.config.lib_view == consts.VIEW_GENRE:
genre, artist, album, year = self.library_get_data(
self.config.wd, 'genre', 'artist', 'album', 'year')
if genre is not None and artist is not None and album is \
not None:
bd = self.library_populate_data(genre=genre, artist=artist,
album=album, year=year)
elif genre is not None and artist is not None:
bd = self.library_populate_data(genre=genre, artist=artist)
elif genre is not None:
bd = self.library_populate_data(genre=genre)
else:
bd = self.library_populate_toplevel_data(genreview=True)
if len(bd) == 0:
# Nothing found; go up a level until we reach the top level
# or results are found
last_wd = self.config.wd
self.config.wd = self.library_get_parent()
if self.config.wd == last_wd:
break
for _sort, path in bd:
self.librarydata.append(path)
self.library.thaw_child_notify()
# Scroll back to set view for current dir:
self.library.realize()
gobject.idle_add(self.library_set_view, not path_updated)
if len(prev_selection) > 0 or prev_selection_root or \
prev_selection_parent:
# Retain pre-update selection:
self.library_retain_selection(prev_selection, prev_selection_root,
prev_selection_parent)
# Update library artwork as necessary
self.on_library_scrolled(None, None)
self.update_breadcrumbs()
def update_breadcrumbs(self):
# remove previous buttons
for b in self.breadcrumbs:
self.breadcrumbs.remove(b)
# add the views button first
b = ui.button(text=_(" v "), can_focus=False, relief=gtk.RELIEF_NONE)
b.connect('clicked', self.library_view_popup)
self.breadcrumbs.pack_start(b, False, False)
b.show()
# add the ellipsis explicitly XXX make this unnecessary
b = ui.label("...")
self.breadcrumbs.pack_start(b, False, False)
b.show()
# find info for current view
view, _name, icon, label = [v for v in self.VIEWS
if v[0] == self.config.lib_view][0]
# the first crumb is the root of the current view
crumbs = [(label, icon, None, self.library_set_data(path='/'))]
# rest of the crumbs are specific to the view
if view == consts.VIEW_FILESYSTEM:
path = self.library_get_data(self.config.wd, 'path')
if path and path != '/':
parts = path.split('/')
else:
parts = [] # no crumbs for /
# append a crumb for each part
for i, part in enumerate(parts):
partpath = '/'.join(parts[:i + 1])
target = self.library_set_data(path=partpath)
crumbs.append((part, gtk.STOCK_OPEN, None, target))
else:
if view == consts.VIEW_ALBUM:
# We don't want to show an artist button in album view
keys = 'genre', 'album'
nkeys = 2
else:
keys = 'genre', 'artist', 'album'
nkeys = 3
parts = self.library_get_data(self.config.wd, *keys)
# append a crumb for each part
for i, key, part in zip(range(nkeys), keys, parts):
if part is None:
continue
partdata = dict(zip(keys, parts)[:i + 1])
target = self.library_set_data(**partdata)
pb, icon = None, None
if key == 'album':
# Album artwork, with self.alumbpb as a backup:
artist, album, path = self.library_get_data(self.config.wd,
'artist', 'album', 'path')
cache_data = self.library_set_data(artist=artist,
album=album, path=path)
pb = self.artwork.get_library_artwork_cached_pb(cache_data,
None)
if pb is None:
icon = 'album'
elif key == 'artist':
icon = 'artist'
else:
icon = gtk.STOCK_ORIENTATION_PORTRAIT
crumbs.append((part, icon, pb, target))
# add a button for each crumb
for crumb in crumbs:
text, icon, pb, target = crumb
text = misc.escape_html(text)
if crumb is crumbs[-1]:
text = "<b>%s</b>" % text
label = ui.label(markup=text)
if icon:
image = ui.image(stock=icon)
elif pb:
pb = pb.scale_simple(16, 16, gtk.gdk.INTERP_HYPER)
image = ui.image(pb=pb)
b = breadcrumbs.CrumbButton(image, label)
if crumb is crumbs[-1]:
# FIXME makes the button request minimal space:
# label.props.ellipsize = pango.ELLIPSIZE_END
b.props.active = True
# FIXME why doesn't the tooltip show?
b.set_tooltip_text(label.get_label())
b.connect('toggled', self.library_browse, target)
self.breadcrumbs.pack_start(b, False, False)
b.show_all()
def library_populate_add_parent_rows(self):
return [] # disabled as breadcrumbs replace these
if self.config.lib_view == consts.VIEW_FILESYSTEM:
bd = [('0', [self.harddiskpb, self.library_set_data(path='/'),
'/'])]
bd += [('1', [self.openpb, self.library_set_data(path='..'),
'..'])]
else:
bd = [('0', [self.harddiskpb2, self.library_set_data(path='/'),
'/'])]
bd += [('1', [self.openpb2, self.library_set_data(path='..'),
'..'])]
return bd
def library_populate_filesystem_data(self, path):
# List all dirs/files at path
bd = []
if path == '/' and self.lib_view_filesystem_cache is not None:
# Use cache if possible...
bd = self.lib_view_filesystem_cache
else:
for item in self.mpd.lsinfo(path):
if 'directory' in item:
name = mpdh.get(item, 'directory').split('/')[-1]
data = self.library_set_data(path=mpdh.get(item,
'directory'))
bd += [('d' + unicode(name).lower(), [self.openpb, data,
misc.escape_html(name)])]
elif 'file' in item:
data = self.library_set_data(path=mpdh.get(item, 'file'))
bd += [('f' + unicode(mpdh.get(item, 'file')).lower(),
[self.sonatapb, data,
formatting.parse(self.config.libraryformat, item,
True)])]
bd.sort(key=operator.itemgetter(0))
if path != '/' and len(bd) > 0:
bd = self.library_populate_add_parent_rows() + bd
if path == '/':
self.lib_view_filesystem_cache = bd
return bd
def library_get_toplevel_cache(self, genreview=False, artistview=False,
albumview=False):
if genreview and self.lib_view_genre_cache is not None:
bd = self.lib_view_genre_cache
elif artistview and self.lib_view_artist_cache is not None:
bd = self.lib_view_artist_cache
elif albumview and self.lib_view_album_cache is not None:
bd = self.lib_view_album_cache
else:
return None
# Check if we can update any artwork:
for _sort, info in bd:
pb = info[0]
if pb == self.albumpb:
artist, album, path = self.library_get_data(info[1], 'artist',
'album', 'path')
key = self.library_set_data(path=path, artist=artist,
album=album)
pb2 = self.artwork.get_library_artwork_cached_pb(key, None)
if pb2 is not None:
info[0] = pb2
return bd
def library_populate_toplevel_data(self, genreview=False, artistview=False,
albumview=False):
bd = self.library_get_toplevel_cache(genreview, artistview, albumview)
if bd is not None:
# We have our cached data, woot.
return bd
bd = []
if genreview or artistview:
# Only for artist/genre views, album view is handled differently
# since multiple artists can have the same album name
if genreview:
items = self.library_return_list_items('genre')
pb = self.genrepb
else:
items = self.library_return_list_items('artist')
pb = self.artistpb
if not (self.NOTAG in items):
items.append(self.NOTAG)
for item in items:
if genreview:
playtime, num_songs = self.library_return_count(genre=item)
data = self.library_set_data(genre=item)
else:
playtime, num_songs = self.library_return_count(
artist=item)
data = self.library_set_data(artist=item)
if num_songs > 0:
display = misc.escape_html(item)
display += self.add_display_info(num_songs,
int(playtime) / 60)
bd += [(misc.lower_no_the(item), [pb, data, display])]
elif albumview:
albums = []
untagged_found = False
for item in self.mpd.listallinfo('/'):
if 'file' in item and 'album' in item:
album = mpdh.get(item, 'album')
artist = mpdh.get(item, 'artist', self.NOTAG)
year = mpdh.get(item, 'date', self.NOTAG)
path = self.get_multicd_album_root_dir(
os.path.dirname(mpdh.get(item, 'file')))
data = self.library_set_data(album=album, artist=artist,
year=year, path=path)
albums.append(data)
if album == self.NOTAG:
untagged_found = True
if not untagged_found:
albums.append(self.library_set_data(album=self.NOTAG))
albums = misc.remove_list_duplicates(albums, case=False)
albums = self.list_identify_VA_albums(albums)
for item in albums:
album, artist, year, path = self.library_get_data(item,
'album',
'artist',
'year',
'path')
playtime, num_songs = self.library_return_count(artist=artist,
album=album,
year=year)
if num_songs > 0:
data = self.library_set_data(artist=artist, album=album,
year=year, path=path)
display = misc.escape_html(album)
if artist and year and len(artist) > 0 and len(year) > 0 \
and artist != self.NOTAG and year != self.NOTAG:
display += " <span weight='light'>(%s, %s)</span>" \
% (misc.escape_html(artist),
misc.escape_html(year))
elif artist and len(artist) > 0 and artist != self.NOTAG:
display += " <span weight='light'>(%s)</span>" \
% misc.escape_html(artist)
elif year and len(year) > 0 and year != self.NOTAG:
display += " <span weight='light'>(%s)</span>" \
% misc.escape_html(year)
display += self.add_display_info(num_songs,
int(playtime) / 60)
bd += [(misc.lower_no_the(album), [self.albumpb, data,
display])]
bd.sort(locale.strcoll, key=operator.itemgetter(0))
if genreview:
self.lib_view_genre_cache = bd
elif artistview:
self.lib_view_artist_cache = bd
elif albumview:
self.lib_view_album_cache = bd
return bd
def list_identify_VA_albums(self, albums):
for i in range(len(albums)):
if i + consts.NUM_ARTISTS_FOR_VA - 1 > len(albums)-1:
break
VA = False
for j in range(1, consts.NUM_ARTISTS_FOR_VA):
if unicode(self.library_get_data(albums[i], 'album')).lower() \
!= unicode(self.library_get_data(albums[i + j],
'album')).lower() or \
self.library_get_data(albums[i], 'year') != \
self.library_get_data(albums[i + j], 'year') or \
self.library_get_data(albums[i], 'path') != \
self.library_get_data(albums[i + j], 'path'):
break
if unicode(self.library_get_data(albums[i], 'artist')) == \
unicode(self.library_get_data(albums[i + j], 'artist')):
albums.pop(i + j)
break
if j == consts.NUM_ARTISTS_FOR_VA - 1:
VA = True
if VA:
album, year, path = self.library_get_data(albums[i], 'album',
'year', 'path')
artist = self.VAstr
albums[i] = self.library_set_data(album=album, artist=artist,
year=year, path=path)
j = 1
while i + j <= len(albums) - 1:
if unicode(self.library_get_data(albums[i],
'album')).lower() == \
unicode(self.library_get_data(albums[i + j],
'album')).lower() \
and self.library_get_data(albums[i], 'year') == \
self.library_get_data(albums[i + j], 'year'):
albums.pop(i + j)
else:
break
return albums
def get_VAstr(self):
return self.VAstr
def library_populate_data(self, genre=None, artist=None, album=None,
year=None):
# Create treeview model info
bd = []
if genre is not None and artist is None and album is None:
# Artists within a genre
artists = self.library_return_list_items('artist', genre=genre)
if len(artists) > 0:
if not self.NOTAG in artists:
artists.append(self.NOTAG)
for artist in artists:
playtime, num_songs = self.library_return_count(
genre=genre, artist=artist)
if num_songs > 0:
display = misc.escape_html(artist)
display += self.add_display_info(num_songs,
int(playtime) / 60)
data = self.library_set_data(genre=genre,
artist=artist)
bd += [(misc.lower_no_the(artist),
[self.artistpb, data, display])]
elif artist is not None and album is None:
# Albums/songs within an artist and possibly genre
# Albums first:
if genre is not None:
albums = self.library_return_list_items('album', genre=genre,
artist=artist)
else:
albums = self.library_return_list_items('album', artist=artist)
for album in albums:
if genre is not None:
years = self.library_return_list_items('date', genre=genre,
artist=artist,
album=album)
else:
years = self.library_return_list_items('date',
artist=artist,
album=album)
if not self.NOTAG in years:
years.append(self.NOTAG)
for year in years:
if genre is not None:
playtime, num_songs = self.library_return_count(
genre=genre, artist=artist, album=album, year=year)
if num_songs > 0:
files = self.library_return_list_items(
'file', genre=genre, artist=artist,
album=album, year=year)
path = os.path.dirname(files[0])
data = self.library_set_data(genre=genre,
artist=artist,
album=album,
year=year, path=path)
else:
playtime, num_songs = self.library_return_count(
artist=artist, album=album, year=year)
if num_songs > 0:
files = self.library_return_list_items(
'file', artist=artist, album=album, year=year)
path = os.path.dirname(files[0])
data = self.library_set_data(artist=artist,
album=album,
year=year, path=path)
if num_songs > 0:
cache_data = self.library_set_data(artist=artist,
album=album,
path=path)
display = misc.escape_html(album)
if year and len(year) > 0 and year != self.NOTAG:
display += " <span weight='light'>(%s)</span>" \
% misc.escape_html(year)
display += self.add_display_info(num_songs,
int(playtime) / 60)
ordered_year = year
if ordered_year == self.NOTAG:
ordered_year = '9999'
pb = self.artwork.get_library_artwork_cached_pb(
cache_data, self.albumpb)
bd += [(ordered_year + misc.lower_no_the(album),
[pb, data, display])]
# Now, songs not in albums:
bd += self.library_populate_data_songs(genre, artist, self.NOTAG,
None)
else:
# Songs within an album, artist, year, and possibly genre
bd += self.library_populate_data_songs(genre, artist, album, year)
if len(bd) > 0:
bd = self.library_populate_add_parent_rows() + bd
bd.sort(locale.strcoll, key=operator.itemgetter(0))
return bd
def library_populate_data_songs(self, genre, artist, album, year):
bd = []
if genre is not None:
songs, _playtime, _num_songs = \
self.library_return_search_items(genre=genre, artist=artist,
album=album, year=year)
else:
songs, _playtime, _num_songs = self.library_return_search_items(
artist=artist, album=album, year=year)
for song in songs:
data = self.library_set_data(path=mpdh.get(song, 'file'))
track = mpdh.get(song, 'track', '99', False, 2)
disc = mpdh.get(song, 'disc', '99', False, 2)
try:
bd += [('f' + disc + track + misc.lower_no_the(
mpdh.get(song, 'title')), [self.sonatapb, data,
formatting.parse(
self.config.libraryformat,
song, True)])]
except:
bd += [('f' + disc + track + \
unicode(mpdh.get(song, 'file')).lower(),
[self.sonatapb, data,
formatting.parse(self.config.libraryformat, song,
True)])]
return bd
def library_return_list_items(self, itemtype, genre=None, artist=None,
album=None, year=None, ignore_case=True):
# Returns all items of tag 'itemtype', in alphabetical order,
# using mpd's 'list'. If searchtype is passed, use
# a case insensitive search, via additional 'list'
# queries, since using a single 'list' call will be
# case sensitive.
results = []
searches = self.library_compose_list_count_searchlist(genre, artist,
album, year)
if len(searches) > 0:
for s in searches:
# If we have untagged tags (''), use search instead
# of list because list will not return anything.
if '' in s:
items = []
songs, playtime, num_songs = \
self.library_return_search_items(genre, artist,
album, year)
for song in songs:
items.append(mpdh.get(song, itemtype))
else:
items = self.mpd.list(itemtype, *s)
for item in items:
if len(item) > 0:
results.append(item)
else:
if genre is None and artist is None and album is None and year \
is None:
for item in self.mpd.list(itemtype):
if len(item) > 0:
results.append(item)
if ignore_case:
results = misc.remove_list_duplicates(results, case=False)
results.sort(locale.strcoll)
return results
def library_return_count(self, genre=None, artist=None, album=None,
year=None):
# Because mpd's 'count' is case sensitive, we have to
# determine all equivalent items (case insensitive) and
# call 'count' for each of them. Using 'list' + 'count'
# involves much less data to be transferred back and
# forth than to use 'search' and count manually.
searches = self.library_compose_list_count_searchlist(genre, artist,
album, year)
playtime = 0
num_songs = 0
for s in searches:
if '' in s and self.mpd.version <= (0, 13):
# Can't return count for empty tags, use search instead:
_results, playtime, num_songs = \
self.library_return_search_items(
genre=genre, artist=artist, album=album, year=year)
else:
count = self.mpd.count(*s)
playtime += mpdh.get(count, 'playtime', 0, True)
num_songs += mpdh.get(count, 'songs', 0, True)
return (playtime, num_songs)
def library_compose_list_count_searchlist_single(self, search, typename,
cached_list, searchlist):
s = []
skip_type = (typename == 'artist' and search == self.VAstr)
if search is not None and not skip_type:
if search == self.NOTAG:
itemlist = [search, '']
else:
itemlist = []
if cached_list is None:
cached_list = self.library_return_list_items(typename,
ignore_case=False)
# This allows us to match untagged items
cached_list.append('')
for item in cached_list:
if unicode(item).lower() == unicode(search).lower():
itemlist.append(item)
if len(itemlist) == 0:
# There should be no results!
return None, cached_list
for item in itemlist:
if len(searchlist) > 0:
for item2 in searchlist:
s.append(item2 + (typename, item))
else:
s.append((typename, item))
else:
s = searchlist
return s, cached_list
def library_compose_list_count_searchlist(self, genre=None, artist=None,
album=None, year=None):
s = []
s, self.lib_list_genres = \
self.library_compose_list_count_searchlist_single(
genre, 'genre', self.lib_list_genres, s)
if s is None:
return []
s, self.lib_list_artists = \
self.library_compose_list_count_searchlist_single(
artist, 'artist', self.lib_list_artists, s)
if s is None:
return []
s, self.lib_list_albums = \
self.library_compose_list_count_searchlist_single(
album, 'album', self.lib_list_albums, s)
if s is None:
return []
s, self.lib_list_years = \
self.library_compose_list_count_searchlist_single(
year, 'date', self.lib_list_years, s)
if s is None:
return []
return s
def library_compose_search_searchlist_single(self, search, typename,
searchlist):
s = []
skip_type = (typename == 'artist' and search == self.VAstr)
if search is not None and not skip_type:
if search == self.NOTAG:
itemlist = [search, '']
else:
itemlist = [search]
for item in itemlist:
if len(searchlist) > 0:
for item2 in searchlist:
s.append(item2 + (typename, item))
else:
s.append((typename, item))
else:
s = searchlist
return s
def library_compose_search_searchlist(self, genre=None, artist=None,
album=None, year=None):
s = []
s = self.library_compose_search_searchlist_single(genre, 'genre', s)
s = self.library_compose_search_searchlist_single(album, 'album', s)
s = self.library_compose_search_searchlist_single(artist, 'artist', s)
s = self.library_compose_search_searchlist_single(year, 'date', s)
return s
def library_return_search_items(self, genre=None, artist=None, album=None,
year=None):
# Returns all mpd items, using mpd's 'search', along with
# playtime and num_songs.
searches = self.library_compose_search_searchlist(genre, artist, album,
year)
for s in searches:
args_tuple = tuple(map(str, s))
playtime = 0
num_songs = 0
results = []
if '' in s and self.mpd.version <= (0, 13):
# Can't search for empty tags, search broader and
# filter instead:
# Strip empty tag args from tuple:
pos = list(args_tuple).index('')
strip_type = list(args_tuple)[pos-1]
new_lst = []
for i, item in enumerate(list(args_tuple)):
if i != pos and i != pos-1:
new_lst.append(item)
args_tuple = tuple(new_lst)
else:
strip_type = None
if len(args_tuple) == 0:
return None, 0, 0
items = self.mpd.search(*args_tuple)
if items is not None:
for item in items:
if strip_type is None or (strip_type is not None and not \
strip_type in item.keys()):
match = True
pos = 0
# Ensure that if, e.g., "foo" is searched,
# "foobar" isn't returned too
for arg in args_tuple[::2]:
if arg in item and \
unicode(mpdh.get(item, arg)).upper() != \
unicode(args_tuple[pos + 1]).upper():
match = False
break
pos += 2
if match:
results.append(item)
num_songs += 1
playtime += mpdh.get(item, 'time', 0, True)
return (results, int(playtime), num_songs)
def add_display_info(self, num_songs, playtime):
return "\n<small><span weight='light'>%s %s, %s %s</span></small>" \
% (num_songs, gettext.ngettext('song', 'songs', num_songs),
playtime, gettext.ngettext('minute', 'minutes', playtime))
def library_retain_selection(self, prev_selection, prev_selection_root,
prev_selection_parent):
# Unselect everything:
if len(self.librarydata) > 0:
self.library_selection.unselect_range((0,),
(len(self.librarydata) - 1,))
# Now attempt to retain the selection from before the update:
for value in prev_selection:
for row in self.librarydata:
if value == row[1]:
self.library_selection.select_path(row.path)
break
if prev_selection_root:
self.library_selection.select_path((0,))
if prev_selection_parent:
self.library_selection.select_path((1,))
def library_set_view(self, select_items=True):
# select_items should be false if the same directory has merely
# been refreshed (updated)
try:
if self.config.wd in self.libraryposition:
self.library.scroll_to_point(
-1, self.libraryposition[self.config.wd])
else:
self.library.scroll_to_point(0, 0)
except:
self.library.scroll_to_point(0, 0)
# Select and focus previously selected item
if select_items:
if self.config.wd in self.libraryselectedpath:
try:
if self.libraryselectedpath[self.config.wd]:
self.library_selection.select_path(
self.libraryselectedpath[self.config.wd])
self.library.grab_focus()
except:
pass
def library_set_data(self, *args, **kwargs):
return library_set_data(*args, **kwargs)
def library_get_data(self, data, *args):
return library_get_data(data, *args)
def library_get_data_level(self, data):
if self.config.lib_view == consts.VIEW_FILESYSTEM:
# Returns the number of directories down:
if library_get_data(data, 'path') == '/':
# Every other path doesn't start with "/", so
# start the level numbering at -1
return -1
else:
return library_get_data(data, 'path').count("/")
else:
# Returns the number of items stored in data, excluding
# the path:
level = 0
album, artist, genre, year = library_get_data(
data, 'album', 'artist', 'genre', 'year')
for item in [album, artist, genre, year]:
if item is not None:
level += 1
return level
def on_library_key_press(self, widget, event):
if event.keyval == gtk.gdk.keyval_from_name('Return'):
self.on_library_row_activated(widget, widget.get_cursor()[0])
return True
def on_library_query_tooltip(self, widget, x, y, keyboard_mode, tooltip):
if keyboard_mode or not self.search_visible():
widget.set_tooltip_text(None)
return False
bin_x, bin_y = widget.convert_widget_to_bin_window_coords(x, y)
pathinfo = widget.get_path_at_pos(bin_x, bin_y)
if not pathinfo:
widget.set_tooltip_text(None)
# If the user hovers over an empty row and then back to
# a row with a search result, this will ensure the tooltip
# shows up again:
gobject.idle_add(self.library_search_tooltips_enable, widget, x, y,
keyboard_mode, None)
return False
treepath, _col, _x2, _y2 = pathinfo
i = self.librarydata.get_iter(treepath[0])
path = misc.escape_html(self.library_get_data(
self.librarydata.get_value(i, 1), 'path'))
song = self.librarydata.get_value(i, 2)
new_tooltip = "<b>%s:</b> %s\n<b>%s:</b> %s" \
% (_("Song"), song, _("Path"), path)
if new_tooltip != self.libsearch_last_tooltip:
self.libsearch_last_tooltip = new_tooltip
self.library.set_property('has-tooltip', False)
gobject.idle_add(self.library_search_tooltips_enable, widget, x, y,
keyboard_mode, tooltip)
gobject.idle_add(widget.set_tooltip_markup, new_tooltip)
return
self.libsearch_last_tooltip = new_tooltip
return False #api says we should return True, but this doesn't work?
def library_search_tooltips_enable(self, widget, x, y, keyboard_mode,
tooltip):
self.library.set_property('has-tooltip', True)
if tooltip is not None:
self.on_library_query_tooltip(widget, x, y, keyboard_mode, tooltip)
def on_library_row_activated(self, _widget, path, _column=0):
if path is None:
# Default to last item in selection:
_model, selected = self.library_selection.get_selected_rows()
if len(selected) >= 1:
path = selected[0]
else:
return
value = self.librarydata.get_value(self.librarydata.get_iter(path), 1)
icon = self.librarydata.get_value(self.librarydata.get_iter(path), 0)
if icon == self.sonatapb:
# Song found, add item
self.on_add_item(self.library)
elif value == self.library_set_data(path=".."):
self.library_browse_parent(None)
else:
self.library_browse(None, value)
def library_get_parent(self):
if self.config.lib_view == consts.VIEW_ALBUM:
value = self.library_set_data(path="/")
elif self.config.lib_view == consts.VIEW_ARTIST:
album, artist = self.library_get_data(self.config.wd, 'album',
'artist')
if album is not None:
value = self.library_set_data(artist=artist)
else:
value = self.library_set_data(path="/")
elif self.config.lib_view == consts.VIEW_GENRE:
album, artist, genre = self.library_get_data(
self.config.wd, 'album', 'artist', 'genre')
if album is not None:
value = self.library_set_data(genre=genre, artist=artist)
elif artist is not None:
value = self.library_set_data(genre=genre)
else:
value = self.library_set_data(path="/")
else:
newvalue = '/'.join(
self.library_get_data(self.config.wd, 'path').split('/')[:-1])\
or '/'
value = self.library_set_data(path=newvalue)
return value
def library_browse_parent(self, _action):
if not self.search_visible():
if self.library.is_focus():
value = self.library_get_parent()
self.library_browse(None, value)
return True
def not_parent_is_selected(self):
# Returns True if something is selected and it's not
# ".." or "/":
model, rows = self.library_selection.get_selected_rows()
for path in rows:
i = model.get_iter(path)
value = model.get_value(i, 2)
if value != ".." and value != "/":
return True
return False
def get_path_child_filenames(self, return_root, selected_only=True):
# If return_root=True, return main directories whenever possible
# instead of individual songs in order to reduce the number of
# mpd calls we need to make. We won't want this behavior in some
# instances, like when we want all end files for editing tags
items = []
if selected_only:
model, rows = self.library_selection.get_selected_rows()
else:
model = self.librarydata
rows = [(i,) for i in range(len(model))]
for path in rows:
i = model.get_iter(path)
pb = model.get_value(i, 0)
data = model.get_value(i, 1)
value = model.get_value(i, 2)
if value != ".." and value != "/":
album, artist, year, genre, path = self.library_get_data(
data, 'album', 'artist', 'year', 'genre', 'path')
if path is not None and album is None and artist is None and \
year is None and genre is None:
if pb == self.sonatapb:
# File
items.append(path)
else:
# Directory
if not return_root:
items += self.library_get_path_files_recursive(
path)
else:
items.append(path)
else:
results, _playtime, _num_songs = \
self.library_return_search_items(
genre=genre, artist=artist, album=album,
year=year)
for item in results:
items.append(mpdh.get(item, 'file'))
# Make sure we don't have any EXACT duplicates:
items = misc.remove_list_duplicates(items, case=True)
return items
def library_get_path_files_recursive(self, path):
results = []
for item in self.mpd.lsinfo(path):
if 'directory' in item:
results = results + self.library_get_path_files_recursive(
mpdh.get(item, 'directory'))
elif 'file' in item:
results.append(mpdh.get(item, 'file'))
return results
def on_library_search_combo_change(self, _combo=None):
self.config.last_search_num = self.searchcombo.get_active()
if not self.search_visible():
return
self.prevlibtodo = ""
self.prevlibtodo_base = "__"
self.libsearchfilter_feed_loop(self.searchtext)
def on_search_end(self, _button, move_focus=True):
if self.search_visible():
self.libsearchfilter_toggle(move_focus)
def search_visible(self):
return self.searchbutton.get_property('visible')
def libsearchfilter_toggle(self, move_focus):
if not self.search_visible() and self.connected():
self.library.set_property('has-tooltip', True)
ui.show(self.searchbutton)
self.prevlibtodo = 'foo'
self.prevlibtodo_base = "__"
self.prevlibtodo_base_results = []
# extra thread for background search work,
# synchronized with a condition and its internal mutex
self.libfilterbox_cond = threading.Condition()
self.libfilterbox_cmd_buf = self.searchtext.get_text()
qsearch_thread = threading.Thread(target=self.libsearchfilter_loop)
qsearch_thread.setDaemon(True)
qsearch_thread.start()
elif self.search_visible():
ui.hide(self.searchbutton)
self.searchtext.handler_block(self.libfilter_changed_handler)
self.searchtext.set_text("")
self.searchtext.handler_unblock(self.libfilter_changed_handler)
self.libsearchfilter_stop_loop()
self.library_browse(root=self.config.wd)
if move_focus:
self.library.grab_focus()
def libsearchfilter_feed_loop(self, editable):
if not self.search_visible():
self.libsearchfilter_toggle(None)
# Lets only trigger the searchfilter_loop if 200ms pass
# without a change in gtk.Entry
try:
gobject.source_remove(self.libfilterbox_source)
except:
pass
self.libfilterbox_source = gobject.timeout_add(
300, self.libsearchfilter_start_loop, editable)
def libsearchfilter_start_loop(self, editable):
self.libfilterbox_cond.acquire()
self.libfilterbox_cmd_buf = editable.get_text()
self.libfilterbox_cond.notifyAll()
self.libfilterbox_cond.release()
def libsearchfilter_stop_loop(self):
self.libfilterbox_cond.acquire()
self.libfilterbox_cmd_buf = '$$$QUIT###'
self.libfilterbox_cond.notifyAll()
self.libfilterbox_cond.release()
def libsearchfilter_loop(self):
while True:
# copy the last command or pattern safely
self.libfilterbox_cond.acquire()
try:
while(self.libfilterbox_cmd_buf == '$$$DONE###'):
self.libfilterbox_cond.wait()
todo = self.libfilterbox_cmd_buf
self.libfilterbox_cond.release()
except:
todo = self.libfilterbox_cmd_buf
searchby = self.search_terms_mpd[self.config.last_search_num]
if self.prevlibtodo != todo:
if todo == '$$$QUIT###':
gobject.idle_add(self.filtering_entry_revert_color,
self.searchtext)
return
elif len(todo) > 1:
gobject.idle_add(self.libsearchfilter_do_search,
searchby, todo)
elif len(todo) == 0:
gobject.idle_add(self.filtering_entry_revert_color,
self.searchtext)
self.libsearchfilter_toggle(False)
else:
gobject.idle_add(self.filtering_entry_revert_color,
self.searchtext)
self.libfilterbox_cond.acquire()
self.libfilterbox_cmd_buf = '$$$DONE###'
try:
self.libfilterbox_cond.release()
except:
pass
self.prevlibtodo = todo
def libsearchfilter_do_search(self, searchby, todo):
if not self.prevlibtodo_base in todo:
# Do library search based on first two letters:
self.prevlibtodo_base = todo[:2]
self.prevlibtodo_base_results = self.mpd.search(searchby,
self.prevlibtodo_base)
subsearch = False
else:
subsearch = True
# Now, use filtering similar to playlist filtering:
# this make take some seconds... and we'll escape the search text
# because we'll be searching for a match in items that are also escaped
#
# Note that the searching is not order specific. That is, "foo bar"
# will match on "fools bar" and "barstool foo".
todos = todo.split(" ")
regexps = []
for i in range(len(todos)):
todos[i] = misc.escape_html(todos[i])
todos[i] = re.escape(todos[i])
todos[i] = '.*' + todos[i].lower()
regexps.append(re.compile(todos[i]))
matches = []
if searchby != 'any':
for row in self.prevlibtodo_base_results:
is_match = True
for regexp in regexps:
if not regexp.match(unicode(mpdh.get(row,
searchby)).lower()):
is_match = False
break
if is_match:
matches.append(row)
else:
for row in self.prevlibtodo_base_results:
allstr = " ".join(mpdh.get(row, meta) for meta in row)
is_match = True
for regexp in regexps:
if not regexp.match(unicode(allstr).lower()):
is_match = False
break
if is_match:
matches.append(row)
if subsearch and len(matches) == len(self.librarydata):
# nothing changed..
return
self.library.freeze_child_notify()
currlen = len(self.librarydata)
bd = [[self.sonatapb,
self.library_set_data(path=mpdh.get(item, 'file')),
formatting.parse(self.config.libraryformat, item, True)]
for item in matches if 'file' in item]
bd.sort(locale.strcoll, key=operator.itemgetter(2))
for i, item in enumerate(bd):
if i < currlen:
j = self.librarydata.get_iter((i, ))
for index in range(len(item)):
if item[index] != self.librarydata.get_value(j, index):
self.librarydata.set_value(j, index, item[index])
else:
self.librarydata.append(item)
# Remove excess items...
newlen = len(bd)
if newlen == 0:
self.librarydata.clear()
else:
for i in range(currlen - newlen):
j = self.librarydata.get_iter((currlen - 1 - i,))
self.librarydata.remove(j)
self.library.thaw_child_notify()
if len(matches) == 0:
gobject.idle_add(self.filtering_entry_make_red, self.searchtext)
else:
gobject.idle_add(self.library.set_cursor, '0')
gobject.idle_add(self.filtering_entry_revert_color,
self.searchtext)
def libsearchfilter_key_pressed(self, widget, event):
self.filter_key_pressed(widget, event, self.library)
def libsearchfilter_on_enter(self, _entry):
self.on_library_row_activated(None, None)
def libsearchfilter_set_focus(self):
gobject.idle_add(self.searchtext.grab_focus)
def libsearchfilter_get_style(self):
return self.searchtext.get_style()
| onto/sonata | sonata/library.py | Python | gpl-3.0 | 66,200 |
from django.shortcuts import render
def about(request):
return render(request, "about.html", {})
def location(request):
return render(request, "location.html", {})
def failure(request):
return render(request, "failure.html", {})
| apul1421/table-client-side-app-retake | src/ecommerce2/views.py | Python | gpl-3.0 | 237 |
import json
import urllib
import urllib2
def shorten(url):
gurl = 'http://goo.gl/api/url?url=%s' % urllib.quote(url)
req = urllib2.Request(gurl, data='')
req.add_header('User-Agent','toolbar')
results = json.load(urllib2.urlopen(req))
return results['short_url'] | arjunjain/nixurl | NixURL/exlib/google.py | Python | gpl-3.0 | 284 |
# -*- coding: utf-8 -*-
################################################
## Aplikacja wspomagajaca tworzenie bazy publikacji naukowych wpsółpracujaca z Google Scholar
## Copyright (C) 2013 Damian Baran
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################
import wx
import os
import wx.xrc
import modules.baz.cDatabase as cDatabase
import linecache
###########################################################################
## Class PubDialog
###########################################################################
## Dokumentacja dla klasy
#
# Klasa zawiera widok z zarzadzaniem publikacjami
class PubDialog ( wx.Dialog ):
## Konstruktor
def __init__( self ):
wx.Dialog.__init__ ( self, None, id = wx.ID_ANY, title = u"Zarządzanie Publikacjami", pos = wx.DefaultPosition, size = wx.Size( 450,430 ), style = wx.DEFAULT_DIALOG_STYLE )
self.session = cDatabase.connectDatabase()
self.listType = []
self.getType()
ico = wx.Icon('icon/pub.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
bSizer28 = wx.BoxSizer( wx.VERTICAL )
bSizer21 = wx.BoxSizer( wx.VERTICAL )
self.m_staticText1 = wx.StaticText( self, wx.ID_ANY, u"Dodawanie Publikacji", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_CENTRE|wx.ST_NO_AUTORESIZE )
self.m_staticText1.Wrap( -1 )
bSizer21.Add( self.m_staticText1, 0, wx.EXPAND|wx.ALL, 5 )
bSizer28.Add( bSizer21, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 5 )
bSizer1.Add( bSizer28, 0, wx.EXPAND, 5 )
bSizer26 = wx.BoxSizer( wx.HORIZONTAL )
bSizer15 = wx.BoxSizer( wx.VERTICAL )
bSizer3 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText2 = wx.StaticText( self, wx.ID_ANY, u"Tytuł:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2.Wrap( -1 )
bSizer3.Add( self.m_staticText2, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_textCtrl2 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 )
bSizer3.Add( self.m_textCtrl2, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer15.Add( bSizer3, 0, wx.EXPAND|wx.ALIGN_CENTER_HORIZONTAL, 5 )
bSizer5 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText4 = wx.StaticText( self, wx.ID_ANY, u"Autorzy:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText4.Wrap( -1 )
bSizer5.Add( self.m_staticText4, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_textCtrl4 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 )
bSizer5.Add( self.m_textCtrl4, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer15.Add( bSizer5, 0, wx.EXPAND, 5 )
bSizer4 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText3 = wx.StaticText( self, wx.ID_ANY, u"Cytowania:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText3.Wrap( -1 )
bSizer4.Add( self.m_staticText3, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_textCtrl3 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 )
bSizer4.Add( self.m_textCtrl3, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer15.Add( bSizer4, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.EXPAND, 5 )
bSizer6 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText5 = wx.StaticText( self, wx.ID_ANY, u"Typ:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText5.Wrap( -1 )
bSizer6.Add( self.m_staticText5, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
m_choice1Choices = self.listType
self.m_choice1 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 145,-1 ), m_choice1Choices, 0 )
self.m_choice1.SetSelection( 0 )
bSizer6.Add( self.m_choice1, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer15.Add( bSizer6, 0, wx.EXPAND, 5 )
bSizer7 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText6 = wx.StaticText( self, wx.ID_ANY, u"Rok:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText6.Wrap( -1 )
bSizer7.Add( self.m_staticText6, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_textCtrl5 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 )
bSizer7.Add( self.m_textCtrl5, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer15.Add( bSizer7, 0, wx.EXPAND, 5 )
bSizer8 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText7 = wx.StaticText( self, wx.ID_ANY, u"DOI:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText7.Wrap( -1 )
bSizer8.Add( self.m_staticText7, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_textCtrl6 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 )
bSizer8.Add( self.m_textCtrl6, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer15.Add( bSizer8, 0, wx.EXPAND, 5 )
bSizer29 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText9 = wx.StaticText( self, wx.ID_ANY, u"Inny klucz:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText9.Wrap( -1 )
bSizer29.Add( self.m_staticText9, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.m_textCtrl7 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 )
bSizer29.Add( self.m_textCtrl7, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer15.Add( bSizer29, 0, wx.EXPAND, 5 )
bSizer9 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText8 = wx.StaticText( self, wx.ID_ANY, u"Wydawca:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText8.Wrap( -1 )
bSizer9.Add( self.m_staticText8, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
m_choice2Choices = cDatabase.getJournalName(self.session)
self.m_choice2 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 145,-1 ), m_choice2Choices, 0 )
bSizer9.Add( self.m_choice2, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer15.Add( bSizer9, 0, wx.EXPAND, 5 )
bSizer17 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText10 = wx.StaticText( self, wx.ID_ANY, u"Źródło:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText10.Wrap( -1 )
bSizer17.Add( self.m_staticText10, 1, wx.ALL, 5 )
self.m_textCtrl71 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 )
bSizer17.Add( self.m_textCtrl71, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer15.Add( bSizer17, 1, wx.EXPAND, 5 )
bSizer18 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText99 = wx.StaticText( self, wx.ID_ANY, u"LMCP:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText99.Wrap( -1 )
bSizer18.Add( self.m_staticText99, 1, wx.ALL, 5 )
self.m_textCtrl99 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 145,-1 ), 0 )
self.m_textCtrl99.SetToolTipString( u"Ilość punktów na liście ministerialnej" )
bSizer18.Add( self.m_textCtrl99, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer15.Add( bSizer18, 1, wx.EXPAND, 5 )
bSizer19 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText98 = wx.StaticText( self, wx.ID_ANY, u"JCR:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText98.Wrap( -1 )
bSizer19.Add( self.m_staticText98, 1, wx.ALL, 5 )
m_choice3Choices = ['True', 'False']
self.m_choice3 = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 145,-1 ), m_choice3Choices, 0 )
bSizer19.Add( self.m_choice3, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer15.Add( bSizer19, 1, wx.EXPAND, 5 )
bSizer26.Add( bSizer15, 1, wx.EXPAND, 5 )
bSizer23 = wx.BoxSizer( wx.VERTICAL )
bSizer10 = wx.BoxSizer( wx.VERTICAL )
m_checkList3Choices = cDatabase.getUserName(self.session)
self.m_checkList3 = wx.CheckListBox( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 200,281 ), m_checkList3Choices, 0 )
self.m_checkList3.SetToolTipString( u"Powiąż autorów z publikacją" )
bSizer10.Add( self.m_checkList3, 0, wx.EXPAND|wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer23.Add( bSizer10, 0, wx.EXPAND, 5 )
bSizer26.Add( bSizer23, 1, wx.EXPAND, 5 )
bSizer1.Add( bSizer26, 0, wx.EXPAND, 5 )
bSizer55 = wx.BoxSizer( wx.HORIZONTAL )
self.m_textCtrl55 = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,50 ), wx.TE_MULTILINE )
self.m_textCtrl55.SetToolTipString( u"Notatki do publikacji" )
bSizer55.Add( self.m_textCtrl55, 1, wx.ALL|wx.EXPAND, 5 )
bSizer1.Add( bSizer55, 0, wx.EXPAND, 5 )
bSizer11 = wx.BoxSizer( wx.HORIZONTAL )
self.m_button1 = wx.Button( self, wx.ID_ANY, u"Dodaj", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer11.Add( self.m_button1, 0, wx.ALL|wx.EXPAND, 5 )
self.m_button3 = wx.Button( self, wx.ID_ANY, u"Zatwierdź", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer11.Add( self.m_button3, 0, wx.ALL, 5 )
self.m_button4 = wx.Button( self, wx.ID_ANY, u"Zamknij", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer11.Add( self.m_button4, 0, wx.ALL, 5 )
self.m_staticText11 = wx.StaticText( self, wx.ID_ANY, u"", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText11.Wrap( -1 )
bSizer11.Add( self.m_staticText11, 1, wx.ALL, 5 )
self.m_staticText12 = wx.StaticText( self, wx.ID_ANY, u"", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText12.Wrap( -1 )
bSizer11.Add( self.m_staticText12, 1, wx.ALL, 5 )
bSizer1.Add( bSizer11, 0, wx.ALIGN_RIGHT, 5 )
self.SetSizer( bSizer1 )
self.Layout()
self.Centre( wx.BOTH )
self.m_button3.Hide()
self.m_staticText11.Hide()
self.m_staticText12.Hide()
##################################################
## Bind
###################################################
self.m_button1.Bind(wx.EVT_BUTTON, self.addPubValue)
self.m_button4.Bind(wx.EVT_BUTTON, self.close)
self.m_button3.Bind(wx.EVT_BUTTON, self.editPubValue)
###################################################
## Metody
###################################################
self.getType()
## Dokumentacja getType
# @param self Wskaźnik obiektu
#
# @return void
# Funkcja pobiera typy publikacji z pliku
def getType(self):
count = len(open('type.txt', 'rU').readlines())
for i in range(count):
self.listType.append(linecache.getline('type.txt',i+1))
print self.listType
## Dokumentacja editPubValue
# @param self Wskaźnik obiektu
# @param event Wywołanie żadania
#
# @return void
# Funkcja wysyla zadanie edycji wybranej publikacji
def editPubValue(self, event):
#Pobiera wartosci z kontrolek do edycji
tmp = self.m_staticText1.GetLabel()
tmp = tmp.split('. ', 1)
t0 = tmp[1]
t1 = self.m_textCtrl2.GetValue()
t2 = self.m_textCtrl4.GetValue()
t3 = self.m_textCtrl3.GetValue()
t4 = self.m_choice1.GetStringSelection()
t5 = self.m_textCtrl5.GetValue()
t6 = self.m_textCtrl6.GetValue()
t7 = self.m_textCtrl7.GetValue()
t8 = self.m_choice2.GetStringSelection()
t10 = self.m_textCtrl71.GetValue()
t11 = self.m_textCtrl99.GetValue() #Lista ministerialna
t12 = self.m_choice3.GetStringSelection() #czy jest w JCR
t13 = self.m_textCtrl55.GetValue() #notatka
#Odznacza już powiazanych autorów
ch = cDatabase.editItemAuthor(self.session, t0)
t9 = self.getCheckUser()
#Pobiera wartosci ID dla zaznaczonych autorów
tmp = cDatabase.getJournalNameID(self.session)
print t8
if t8 != u'':
t8 = tmp[t8]
else:
t8 = None
t = (t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13)
#Sprawdzenie czy obowiazkowe wartości nie sa puste
if t1 != '' and t2 != '' and t3 != '' and t5 != '':
cDatabase.editPubData(self.session, t, t0)
wx.MessageBox(u'Zauktualizowano wartości!', u'Sukces', wx.OK | wx.ICON_INFORMATION)
else:
wx.MessageBox(u'Nie podana nazwy grupy \nlub nie wybrano autorów.', u'Bład', wx.OK | wx.ICON_INFORMATION)
self.Destroy()
## Dokumentacja addPubValue
# @param self Wskaźnik obiektu
# @param event Wywołanie żadania
#
# @return void
# Funkcja wysyla zadanie dodania nowej publikacji
def addPubValue(self, event):
#Pobiera wartosci z kontrolek do edycji
tx1 = self.m_textCtrl2.GetValue() #tytul
tx2 = self.m_textCtrl4.GetValue() #autor
tx3 = self.m_textCtrl3.GetValue() #cytowania
tx4 = self.m_choice1.GetStringSelection() #typ
tx5 = self.m_textCtrl5.GetValue() #rok
tx6 = self.m_textCtrl6.GetValue() #doi
tx9 = self.m_textCtrl7.GetValue() #identy
tx7 = self.m_choice2.GetStringSelection() #wydawca ID
tx8 = self.getCheckUser() #autor id
tx10 = self.m_textCtrl71.GetValue() #zrodlo
tx11 = self.m_staticText11.GetLabel() #urlpub
tx12 = self.m_staticText12.GetLabel() #urlcit
tx13 = self.m_textCtrl99.GetValue() #Lista ministerialna
tx14 = self.m_choice3.GetStringSelection() #jcr
tx15 = self.m_textCtrl55.GetValue() #note
#Pobiera wartosci ID dla zaznaczonych autorów
tmp = cDatabase.getJournalNameID(self.session)
if tx7 != u'':
tx7 = tmp[tx7]
else:
tx7 = None
t = (tx1, tx2, tx3, tx4, tx5, tx6, tx9, tx7, tx8, tx11, tx12, tx10, tx13, tx14, tx15)
#Sprawdzenie czy obowiazkowe wartości nie sa puste
if tx1 != '' and tx2 != '' and tx3 != '' and tx5 != '':
cDatabase.addPubData(self.session, t)
else:
wx.MessageBox(u'Pola "Tytuł, Autor, Cytowania, Rok" sa wymagane!', u'Bład', wx.OK | wx.ICON_INFORMATION)
self.Destroy()
## Dokumentacja getCheckUser
# @param self Wskaźnik obiektu
#
# @return list Lista ID autorow powiazanych z publikacja
# Funkcja pobiera id wszystkich powiazanych autorów do publikacji
def getCheckUser(self):
result = []
guser = cDatabase.getUserName(self.session)
t = cDatabase.getUserNameID(self.session)
for i in range(len(guser)):
if self.m_checkList3.IsChecked(i):
id = t[guser[i]]
result.append(id)
return result
## Dokumentacja close
# @param self Wskaźnik obiektu
# @param event Wywołanie żadania
#
# @return void
# Funkcja zamyka okienko z zarzadzaniem publikacjami
def close(self, event):
"""Zamyka okienko publikacji"""
self.Destroy()
if __name__ == "__main__":
app = wx.App(False)
controller = PubDialog()
controller.Show()
app.MainLoop()
| damianbaran/inz | popup/publikacja.py | Python | gpl-3.0 | 16,876 |
__author__ = 'xiaoxiaol'
import numpy as np
import pylab as pl
import scipy
import pandas as pd
import seaborn as sns
import os
import sys, getopt
from scipy.cluster import hierarchy
import platform
from scipy.stats.stats import pearsonr
import scipy.stats as stats
from PIL import Image
import glob
from sklearn.metrics import silhouette_samples, silhouette_score
import math
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from itertools import cycle
####################################
ZSCORE_OUTLIER_THRESHOLD = 5
####################################
sns.set_context("poster")
def zscore(features, remove_outlier=0):
zscores = scipy.stats.zscore(features, 0)
# zscores = normalizeFeatures(features)
return zscores
# def normalizeFeatures(features):
# meanFeatures = np.median(features, 0)
# stdFeatures = np.std(features, 0)
# if np.count_nonzero(stdFeatures) < len(stdFeatures):
# print "zero detected"
# print stdFeatures
# normalized = (features - meanFeatures) / stdFeatures
# return normalized
#### need to be updated
# def distance_matrix(df_all, feature_names, out_distanceMatrix_file, REMOVE_OUTLIER=0):
# feature_array = df_all[feature_names].astype(float)
# distanceMatrix = []
# normalized = zscore(feature_array)
# #normalized = normalizeFeatures(feature_array)
#
# if num_outliers > 0:
# if not REMOVE_OUTLIER: # only clp
# normalized[normalized < -ZSCORE_OUTLIER_THRESHOLD] = -ZSCORE_OUTLIER_THRESHOLD
# normalized[normalized > ZSCORE_OUTLIER_THRESHOLD] = ZSCORE_OUTLIER_THRESHOLD
#
# for i in range(len(normalized)):
# queryFeature = normalized[i] # each row is a feature vector
# scores = np.exp(-np.sum(abs(normalized - queryFeature) ** 2, 1) / 100) #similarity
# #scores = np.sum(np.abs(normalized - queryFeature) ** 2, 1) # distance
# distanceMatrix.append(scores)
#
# df_dist = pd.DataFrame(distanceMatrix)
# df_dist.to_csv(out_distanceMatrix_file, index=False)
# print("score sim matrix is saved to : " + out_distanceMatrix_file + "\n")
# return df_dist
def copySnapshots(df_in, snapshots_dir, output_dir):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
swc_files = df_in['swc_file_name']
if len(swc_files) > 0:
for afile in swc_files:
filename = snapshots_dir + '/' + afile.split('/')[-1] + '.BMP'
if os.path.exists(filename):
os.system("cp " + filename + " " + output_dir + "/\n")
return
def assemble_screenshots(input_dir, output_image_file_name, size):
files = glob.glob(input_dir + "/*.BMP")
assemble_image = Image.new("RGB", (size * len(files),size))
y = 0
for infile in files:
im = Image.open(infile)
im.thumbnail((size, size), Image.ANTIALIAS)
assemble_image.paste(im, (y, 0))
y += size
assemble_image.save(output_image_file_name)
return
def generateLinkerFileFromDF(df_in, output_ano_file, strip_path=False, swc_path=None):
swc_files = df_in['swc_file_name']
if len(swc_files) > 0:
with open(output_ano_file, 'w') as outf:
for afile in swc_files:
if swc_path is not None:
filename = swc_path + '/'+afile
else:
filename = afile
if strip_path:
filename = afile.split('/')[-1]
line = 'SWCFILE=' + filename + '\n'
outf.write(line)
outf.close()
return
############## heatmap plot: hierachical clustering ########
#
# def heatmap_plot_distancematrix(df_distanceMatrix, merged, output_dir, title=None):
# pl.figure()
#
# # Create a custom palette for creline colors
# cre_lines = np.unique(merged['cre_line'])
# cre_line_pal = sns.color_palette("hls", len(cre_lines))
# cre_line_lut = dict(zip(cre_lines, cre_line_pal)) # map creline type to color
# creline_colors = merged['cre_line'].map(cre_line_lut)
#
# # Create a custom palette for dendrite_type colors thre colors
# dendrite_types = np.unique(merged['dendrite_type'])
# dendrite_type_pal = sns.color_palette(['white','gray','black'])
# dendrite_type_lut = dict(zip(dendrite_types, dendrite_type_pal))
# dendritetype_colors = merged['dendrite_type'].map(dendrite_type_lut)
#
# # Create a custom colormap for the heatmap values
# #cmap = sns.diverging_palette(240, 10, as_cmap=True)
#
# g = sns.clustermap(df_distanceMatrix, method='ward', metric='euclidean', linewidths=0.0,
# row_colors=dendritetype_colors, col_colors=creline_colors, cmap=cmap, xticklabels=False,
# yticklabels=False)
# if title:
# pl.title(title)
# # Legend for row and col colors
#
# for label in dendrite_types:
# pl.bar(0, 0, color=dendrite_type_lut[label], label=label, linewidth=0)
# pl.legend(loc="center", ncol=1)
#
# for label in cre_lines:
# g.ax_col_dendrogram.bar(0, 0, color=cre_line_lut[label], label=label, linewidth=0)
# g.ax_col_dendrogram.legend(loc="center", ncol=3)
#
# pl.title('Similarities')
#
# filename = output_dir + '/similarity_heatmap.png'
# pl.savefig(filename, dpi=300)
# print("save similarity matrix heatmap figure to :" + filename)
# pl.close()
return g
def plot_confusion_matrix(cm, xlabel, ylabel, xnames, ynames, title='Confusion matrix', cmap=pl.cm.Blues):
pl.grid(False)
pl.imshow(cm, interpolation = 'none',cmap=cmap)
pl.title(title)
pl.colorbar()
tick_marksx = np.arange(len(xnames))
tick_marksy = np.arange(len(ynames))
pl.xticks(tick_marksx, xnames)
pl.yticks(tick_marksy, ynames)
pl.tight_layout()
pl.ylabel(ylabel)
pl.xlabel(xlabel)
def heatmap_plot_zscore_ivscc(df_zscore_features, df_all, output_dir, title=None):
# Create a custom palette for dendrite_type colors
dendrite_types = [np.nan, 'aspiny', 'sparsely spiny', 'spiny']
# dendrite_type_pal = sns.color_palette("coolwarm", len(dendrite_types))
dendrite_type_pal = sns.color_palette(["gray","black","purple","red"])
dendrite_type_lut = dict(zip(dendrite_types, dendrite_type_pal))
dendrite_type_colors = df_all['dendrite_type'].map(dendrite_type_lut)
# Create a custom palette for creline colors
cre_lines = np.unique(df_all['cre_line'])
print cre_lines
cre_lines = ['Pvalb-IRES-Cre','Sst-IRES-Cre','Gad2-IRES-Cre', 'Htr3a-Cre_NO152',
'Nr5a1-Cre', 'Ntsr1-Cre','Rbp4-Cre_KL100' ,'Rorb-IRES2-Cre-D', 'Scnn1a-Tg2-Cre',
'Scnn1a-Tg3-Cre','Slc17a6-IRES-Cre','Cux2-CreERT2']
cre_line_pal = sns.color_palette("BrBG", len(cre_lines))
cre_line_lut = dict(zip(cre_lines, cre_line_pal)) # map creline type to color
cre_line_colors = df_all['cre_line'].map(cre_line_lut)
# layers = np.unique(df_all['layer'])
# layer_pal = sns.light_palette("green", len(layers))
# layer_lut = dict(zip(layers, layer_pal))
# layer_colors = df_all['layer'].map(layer_lut)
# # only if types are available
# types = np.unique(df_all['types'])
# #reorder
# types = ['NGC','multipolar','symm', 'bitufted','bipolar','tripod', 'Martinotti','cortico-cortical', 'cortico-thal','non-tufted', 'short-thick-tufted', 'tufted','thick-tufted']
# type_pal = sns.color_palette("coolwarm", len(types))# sns.diverging_palette(220, 20, n=len(types))# sns.color_palette("husl", len(types))
# type_lut = dict(zip(types, type_pal))
# type_colors = df_all['types'].map(type_lut)
# Create a custom colormap for the heatmap values
#cmap = sns.diverging_palette(240, 10, as_cmap=True)
linkage = hierarchy.linkage(df_zscore_features, method='ward', metric='euclidean')
data = df_zscore_features.transpose()
row_linkage = hierarchy.linkage(data, method='ward', metric='euclidean')
feature_order = hierarchy.leaves_list(row_linkage)
#print data.index
matchIndex = [data.index[x] for x in feature_order]
#print matchIndex
data = data.reindex(matchIndex)
g = sns.clustermap(data, row_cluster = False, col_linkage=linkage, method='ward', metric='euclidean',
linewidths = 0.0,col_colors = [cre_line_colors,dendrite_type_colors],
cmap = sns.cubehelix_palette(light=1, as_cmap=True),figsize=(40,20))
#g.ax_heatmap.xaxis.set_xticklabels()
pl.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=90 )
pl.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
pl.subplots_adjust(left=0.1, bottom=0.5, right=0.9, top=0.95) # !!!!!
#pl.tight_layout( fig, h_pad=20.0, w_pad=20.0)
if title:
pl.title(title)
location ="best"
num_cols=1
# Legend for row and col colors
for label in cre_lines:
g.ax_row_dendrogram.bar(0, 0, color=cre_line_lut[label], label=label, linewidth=0.0)
g.ax_row_dendrogram.legend(loc=location, ncol=num_cols,borderpad=0)
for i in range(3):
g.ax_row_dendrogram.bar(0, 0, color = "white", label=" ", linewidth=0)
g.ax_row_dendrogram.legend(loc=location, ncol=num_cols, borderpad=0.0)
# for label in layers:
# pl.bar(0, 0, color=layer_lut[label], label=label, linewidth=1)
# pl.legend(loc="left", ncol=2,borderpad=0.5)
#
# for label in types:
# g.ax_row_dendrogram.bar(0, 0, color=type_lut[label], label=label,linewidth=0)
# g.ax_row_dendrogram.legend(loc=location, ncol=num_cols,borderpad=0.0)
#
#
# g.ax_row_dendrogram.bar(0, 0, color = "white", label=" ", linewidth=0)
# g.ax_row_dendrogram.legend(loc=location, ncol=num_cols, borderpad=0.0)
for label in dendrite_types:
g.ax_row_dendrogram.bar(0, 0, color = dendrite_type_lut[label], label=label, linewidth=0)
g.ax_row_dendrogram.legend(loc=location, ncol= num_cols, borderpad=0.0)
filename = output_dir + '/zscore_feature_heatmap.png'
pl.savefig(filename, dpi=300)
#pl.show()
print("save zscore matrix heatmap figure to :" + filename)
pl.close()
return linkage
def heatmap_plot_zscore_bbp(df_zscore_features, df_all, output_dir, title=None):
print "heatmap plot"
metric ='m-type'
mtypes = np.unique(df_all[metric])
print mtypes
mtypes_pal = sns.color_palette("hls", len(mtypes))
mtypes_lut = dict(zip(mtypes, mtypes_pal)) # map creline type to color
mtypes_colors = df_all[metric].map(mtypes_lut)
layers = np.unique(df_all['layer'])
layer_pal = sns.light_palette("green", len(layers))
layers_lut = dict(zip(layers, layer_pal))
layer_colors = df_all['layer'].map(layers_lut)
# Create a custom colormap for the heatmap values
#cmap = sns.diverging_palette(240, 10, as_cmap=True)
linkage = hierarchy.linkage(df_zscore_features, method='ward', metric='euclidean')
data = df_zscore_features.transpose()
row_linkage = hierarchy.linkage(data, method='ward', metric='euclidean')
feature_order = hierarchy.leaves_list(row_linkage)
#print data.index
matchIndex = [data.index[x] for x in feature_order]
#print matchIndex
data = data.reindex(matchIndex)
g = sns.clustermap(data, row_cluster = False, col_linkage=linkage, method='ward', metric='euclidean',
linewidths = 0.0,col_colors = [mtypes_colors,layer_colors],
cmap = sns.cubehelix_palette(light=1, as_cmap=True),figsize=(40,20))
#g.ax_heatmap.xaxis.set_xticklabels()
pl.setp(g.ax_heatmap.xaxis.get_majorticklabels(), rotation=90 )
pl.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
pl.subplots_adjust(left=0.1, bottom=0.5, right=0.9, top=0.95) # !!!!!
#pl.tight_layout( fig, h_pad=20.0, w_pad=20.0)
if title:
pl.title(title)
location ="best"
num_cols=1
# Legend for row and col colors
for label in mtypes:
g.ax_row_dendrogram.bar(0, 0, color=mtypes_lut[label], label=label, linewidth=0.0)
g.ax_row_dendrogram.legend(loc=location, ncol=num_cols,borderpad=0)
for i in range(3):
g.ax_row_dendrogram.bar(0, 0, color = "white", label=" ", linewidth=0)
g.ax_row_dendrogram.legend(loc=location, ncol=num_cols, borderpad=0.0)
for label in layers:
g.ax_row_dendrogram.bar(0, 0, color=layers_lut[label], label=label, linewidth=0.0)
g.ax_row_dendrogram.legend(loc=location, ncol=num_cols,borderpad=0)
filename = output_dir + '/zscore_feature_heatmap.png'
pl.savefig(filename, dpi=300)
#pl.show()
print("save zscore matrix heatmap figure to :" + filename)
pl.close()
return linkage
########################## feature selection ########################
def remove_correlated_features(df_all, feature_names, coef_threshold=0.98):
num_features = len(feature_names)
removed_names = []
for i in range(num_features):
if not feature_names[i] in removed_names:
a = df_all[feature_names[i]].astype(float)
for j in range(i + 1, num_features):
if not feature_names[j] in removed_names:
b = df_all[feature_names[j]].astype(float)
corrcoef = pearsonr(a, b)
if (corrcoef[0] > coef_threshold):
removed_names.append(feature_names[j])
print("highly correlated:[" + feature_names[i] + ", " + feature_names[j] + " ]")
subset_features_names = feature_names.tolist()
for i in range(len(removed_names)):
if removed_names[i] in subset_features_names:
print ("remove " + removed_names[i])
subset_features_names.remove(removed_names[i])
return np.asarray(subset_features_names)
####################################### cluster evaluations ##################
def delta(ck, cl):
values = np.ones([len(ck), len(cl)]) * 10000
for i in range(0, len(ck)):
for j in range(0, len(cl)):
values[i, j] = np.linalg.norm(ck[i] - cl[j])
return np.min(values)
def big_delta(ci):
values = np.zeros([len(ci), len(ci)])
for i in range(0, len(ci)):
for j in range(0, len(ci)):
values[i, j] = np.linalg.norm(ci[i] - ci[j])
return np.max(values)
def dunn(k_list):
""" Dunn index [CVI]
Parameters
----------
k_list : list of np.arrays
A list containing a numpy array for each cluster |c| = number of clusters
c[K] is np.array([N, p]) (N : number of samples in cluster K, p : sample dimension)
"""
deltas = np.ones([len(k_list), len(k_list)]) * 1000000
big_deltas = np.zeros([len(k_list), 1])
l_range = range(0, len(k_list))
for k in l_range:
for l in (l_range[0:k] + l_range[k + 1:]):
deltas[k, l] = delta(k_list[k], k_list[l])
big_deltas[k] = big_delta(k_list[k])
di = np.min(deltas) / np.max(big_deltas)
return di
############################### cluster specific features #####
def cluster_specific_features(df_all, assign_ids, feature_names, output_csv_fn):
#student t to get cluster specific features
labels=[]
clusters = np.unique(assign_ids)
num_cluster = len(clusters)
df_pvalues = pd.DataFrame(index = feature_names, columns = clusters)
for cluster_id in clusters:
ids_a = np.nonzero(assign_ids == cluster_id)[0] # starting from 0
ids_b = np.nonzero(assign_ids != cluster_id)[0] # starting from 0
labels.append("C"+str(cluster_id) + "("+ str(len(ids_a))+")" )
for feature in feature_names:
a = df_all.iloc[ids_a][feature]
b = df_all.iloc[ids_b][feature]
t_stats,pval = stats.ttest_ind(a,b,equal_var=False)
df_pvalues.loc[feature,cluster_id] = -np.log10(pval)
df_pvalues.to_csv(output_csv_fn)
### visulaize
df_pvalues.index.name = "Features"
df_pvalues.columns.name ="Clusters"
d=df_pvalues[df_pvalues.columns].astype(float)
g = sns.heatmap(data=d,linewidths=0.1)
# cmap =sns.color_palette("coolwarm",7, as_cmap=True))
g.set_xticklabels(labels)
pl.yticks(rotation=0)
pl.xticks(rotation=90)
pl.subplots_adjust(left=0.5, right=0.9, top=0.9, bottom=0.1)
pl.title('-log10(P value)')
filename = output_csv_fn + '.png'
pl.savefig(filename, dpi=300)
#pl.show()
pl.close()
return df_pvalues
#############################################################################################
def get_zscore_features(df_all, feature_names, out_file, REMOVE_OUTLIER=0,
zscore_threshold=ZSCORE_OUTLIER_THRESHOLD): # if remove_outlier ==0 , just clip at threshold
featureArray = df_all[feature_names].astype(float)
featureArray.fillna(0,inplace=True) ### might introduce some bias
normalized = zscore(featureArray)
# normalized = featureArray
# normalized[~np.isnan(featureArray)] = zscore(featureArray[~np.isnan(featureArray)])
num_outliers = np.count_nonzero(normalized < -zscore_threshold) + np.count_nonzero(
normalized > zscore_threshold)
print("Found %d |z score| > %f in zscore matrix :" % (num_outliers, zscore_threshold) )
df_all_modified = df_all
df_outliers = pd.DataFrame()
if num_outliers > 0:
if not REMOVE_OUTLIER: # just clip
normalized[normalized < -zscore_threshold] = -zscore_threshold
normalized[normalized > zscore_threshold] = zscore_threshold
# else:
# outliers_l = np.nonzero(normalized < -zscore_threshold)
# outliers_h = np.nonzero(normalized > zscore_threshold)
# outlier_index = np.unique((np.append(outliers_l[0], outliers_h[0])))
#
# # remove outlier rows
# df_all_modified = df_all_modified.drop(df_all_modified.index[outlier_index])
# normalized = np.delete(normalized, outlier_index, 0)
#
# # re-zscoring and clipping
# # m_featureArray = df_all_modified[feature_names].astype(float)
# # normalized = zscore(m_featureArray)
# # normalized[normalized < -zscore_threshold] = -zscore_threshold
# # normalized[normalized > zscore_threshold] = zscore_threshold
#
#
# print("Removed %d outlier neurons" % len(outlier_index))
#
# df_outliers = df_all.iloc[outlier_index]
df_z = pd.DataFrame(normalized)
df_z.columns = feature_names
df_z.index = df_all['swc_file_name']
if out_file:
df_z.to_csv(out_file, index=True)
print("save to " + out_file )
if (df_z.shape[0] != df_all_modified.shape[0]):
print ("error: the sample size of the zscore and the original table does not match!")
return df_z, df_all_modified, df_outliers
#############################################################################################
def output_single_cluster_results(df_cluster, output_dir, output_prefix, snapshots_dir=None, swc_path = None):
csv_file = output_dir + '/' + output_prefix + '.csv'
df_cluster.to_csv(csv_file, index=False)
ano_file = output_dir + '/' + output_prefix + '.ano'
generateLinkerFileFromDF(df_cluster, ano_file, False, swc_path)
# copy bmp vaa3d snapshots images over
if (snapshots_dir):
copySnapshots(df_cluster, snapshots_dir, output_dir + '/' + output_prefix)
assemble_screenshots(output_dir + '/' + output_prefix, output_dir + '/' + output_prefix + '_assemble.png', 128)
else:
print "no bmp copying from:", snapshots_dir
return
def output_clusters(assign_ids, df_zscores, df_all, feature_names, output_dir, snapshots_dir=None):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
df_assign_id = pd.DataFrame()
df_assign_id['specimen_name'] = df_all['specimen_name']
df_assign_id['cluster_id'] = assign_ids
df_assign_id.to_csv(output_dir + "/cluster_id.csv", index=False)
clusters = np.unique(assign_ids)
num_cluster = len(clusters)
cluster_list = [] # for dunn index calculation
print("There are %d clusters in total" % num_cluster)
df_cluster = pd.DataFrame()
df_zscore_cluster = pd.DataFrame()
for i in clusters:
ids = np.nonzero(assign_ids == i)[0] # starting from 0
df_cluster = df_all.iloc[ids]
print(" %d neurons in cluster %d" % (df_cluster.shape[0], i))
output_single_cluster_results(df_cluster, output_dir, "/cluster_" + str(i), snapshots_dir)
df_zscore_cluster = df_zscores.iloc[ids]
csv_file2 = output_dir + '/cluster_zscore_' + str(i) + '.csv'
df_zscore_cluster.to_csv(csv_file2, index=False)
cluster_list.append(df_zscore_cluster.values)
## pick the cluster specific feature and plot histogram
cluster_specific_features(df_all, assign_ids, feature_names, output_dir+'/pvalues.csv')
return cluster_list
####### ward hierachichal clustering ###########
def ward_cluster(df_all, feature_names, max_cluster_num, output_dir, snapshots_dir= None, RemoveOutliers = 0, datasetType='ivscc'):
print("\n\n\n *************** ward computation, max_cluster = %d *************:" % max_cluster_num)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
else:
os.system("rm -r " + output_dir + '/*')
#### similarity plots
# df_simMatrix = distance_matrix(df_all, feature_names, output_dir + "/morph_features_similarity_matrix.csv", 1)
# # visualize heatmap using ward on similarity matrix
# out = heatmap_plot_distancematrix(df_simMatrix, df_all, output_dir, "Similarity")
# linkage = out.dendrogram_row.calculated_linkage
##### zscores featuer plots
df_zscores, df_all_outlier_removed, df_outliers = get_zscore_features(df_all, feature_names,
output_dir + '/zscore.csv', RemoveOutliers)
if (df_outliers.shape[0] > 0 ):
output_single_cluster_results(df_outliers, output_dir, "outliers", snapshots_dir)
if datasetType =='ivscc':
linkage = heatmap_plot_zscore_ivscc(df_zscores, df_all_outlier_removed, output_dir, "feature zscores")
if datasetType =='bbp':
linkage = heatmap_plot_zscore_bbp(df_zscores, df_all_outlier_removed, output_dir, "feature zscores")
assignments = hierarchy.fcluster(linkage, max_cluster_num, criterion="maxclust")
#hierarchy.dendrogram(linkage)
## put assignments into ano files and csv files
clusters_list = output_clusters(assignments, df_zscores, df_all_outlier_removed, feature_names, output_dir, snapshots_dir)
dunn_index = dunn(clusters_list)
print("dunn index is %f" % dunn_index)
return linkage,df_zscores
def silhouette_clusternumber(linkage,df_zscores,output_dir ="."):
#Silhouette analysis for determining the number of clusters
print("Silhouettee analysis:")
scores=[]
for n_clusters in range(2,30):
assignments = hierarchy.fcluster(linkage, n_clusters, criterion="maxclust")
silhouette_avg = silhouette_score(df_zscores, assignments)
print("For n_clusters =", n_clusters,"The average silhouette_score is :", silhouette_avg)
scores.append(silhouette_avg)
# plot sihouettee and cut
pl.figure()
pl.plot(range(2,30),scores,"*-")
pl.xlabel("cluster number")
pl.ylabel("average sihouettee coefficient")
pl.savefig(output_dir+'/sihouettee_clusternumber.pdf')
#pl.show()
pl.close()
return
def dunnindex_clusternumber(linkage,df_zscores, output_dir ="."):
index_list=[]
for n_clusters in range(2,30):
assignments = hierarchy.fcluster(linkage, n_clusters, criterion="maxclust")
df_assign_id = pd.DataFrame()
df_assign_id['cluster_id'] = assignments
clusters = np.unique(assignments)
num_cluster = len(clusters)
cluster_list = [] # for dunn index calculation
df_cluster = pd.DataFrame()
df_zscore_cluster = pd.DataFrame()
for i in clusters:
ids = np.nonzero(assignments == i)[0] # starting from 0
df_zscore_cluster = df_zscores.iloc[ids]
cluster_list.append(df_zscore_cluster.values)
dunn_index = dunn(cluster_list)
index_list.append(dunn_index)
pl.figure()
pl.plot(range(2,30),index_list,"*-")
pl.xlabel("cluster number")
pl.ylabel("dunn index")
pl.savefig(output_dir+'/dunnindex_clusternumber.pdf')
#pl.show()
return
def affinity_propagation(df_all, feature_names, output_dir, snapshots_dir=None, RemoveOutliers=0):
###### Affinity Propogation ##############
print("\n\n\n *************** affinity propogation computation ****************:")
redundancy_removed_features_names = remove_correlated_features(df_all, feature_names, 0.95)
print(" The %d features that are not closely correlated are %s" % (
len(redundancy_removed_features_names), redundancy_removed_features_names))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
else:
os.system("rm -r " + output_dir + '/*')
# Compute Affinity Propagation
df_zscores, df_all_outlier_removed, df_outliers = get_zscore_features(df_all, redundancy_removed_features_names, None, RemoveOutliers)
if (df_outliers.shape[0] > 0 ):
output_single_cluster_results(df_outliers, output_dir, "outliers", snapshots_dir)
X = df_zscores.as_matrix()
af = AffinityPropagation().fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
labels = labels + 1 # the default labels start from 0, to be consistent with ward, add 1 so that it starts from 1
clusters_list = output_clusters(labels, df_zscores, df_all_outlier_removed, redundancy_removed_features_names, output_dir,
snapshots_dir)
dunn_index = dunn(clusters_list)
print("dunn index is %f" % dunn_index)
return len(np.unique(labels)), dunn_index
def run_ward_cluster(df_features, feature_names, num_clusters,output_dir,output_postfix):
redundancy_removed_features_names = remove_correlated_features(df_features, feature_names, 0.95)
print(" The %d features that are not closely correlated are %s" % (
len(redundancy_removed_features_names), redundancy_removed_features_names))
#num_clusters, dunn_index1 = affinity_propagation(merged, redundancy_removed_features_names, output_dir + '/ap' + postfix, swc_screenshot_folder, REMOVE_OUTLIERS)
linkage, df_zscore = ward_cluster(df_features, redundancy_removed_features_names, num_clusters, output_dir + '/ward' + output_postfix)
silhouette_clusternumber(linkage, df_zscore, output_dir + '/ward' + output_postfix)
return redundancy_removed_features_names
def main():
######################################################################################################################
data_DIR = "/data/mat/xiaoxiaol/data/lims2/pw_aligned_1223"
#default_all_feature_merged_file = data_DIR + '/keith_features_23dec.csv'
#drop outliers, edit dendrite_type, creline
#df_features = pd.read_csv(data_DIR +'/0107_new_features.csv')
#df_features = df_features[df_features['QC status'] != "Outlier"]
# #parse creline info from specimen_name
#df_features.dropnas()
# crelines=[]
# swc_file_names=[]
# for i in range(df_features.shape[0]):
# sn=df_features['specimen_name'][i]
# fn = df_features['specimen_name'][i].split('/')[-1]
# cl=sn.split(';')[0]
# crelines.append(cl)
# swc_file_names.append(fn)
# df_features['cre_line'] = pd.Series(crelines)
# df_features['swc_file_name'] = pd.Series(swc_file_names)
# df_features.to_csv(data_DIR+'/filtered_w_cre.csv')
input_csv_file = data_DIR + '/0108/0108_features.csv'
out_dir = data_DIR + '/0108/clustering_results/no_GMI'
default_swc_screenshot_folder = data_DIR + "/figures/pw_aligned_bmps"
#######################################################################################################################
swc_screenshot_folder = default_swc_screenshot_folder
method = "all"
SEL_FEATURE = "all"
if not os.path.exists(out_dir):
os.mkdir(out_dir)
########################################################
all_feature_file = input_csv_file
#########################################################
meta_feature_names = np.array(['specimen_name','specimen_id','dendrite_type','cre_line','region_info','filename','swc_file_name'])
basal_feature_names = np.array(['basal_average_bifurcation_angle_local','basal_average_bifurcation_angle_remote','basal_average_contraction','basal_average_fragmentation',
'basal_max_branch_order','basal_max_euclidean_distance','basal_max_path_distance',
'basal_nodes_over_branches','basal_number_of_bifurcations',
'basal_number_of_branches','basal_number_of_stems','basal_number_of_tips','basal_overall_depth','basal_overall_height',
'basal_overall_width','basal_total_length','bb_first_moment_x_basal','bb_first_moment_y_basal','bb_first_moment_z_basal',
'kg_soma_depth',
'basal_moment1','basal_moment10','basal_moment11','basal_moment12','basal_moment13','basal_moment2',
'basal_moment3','basal_moment4',
'basal_moment5','basal_moment6','basal_moment7','basal_moment8','basal_moment9'])
#'basal_total_surface','basal_total_volume','basal_soma_surface','basal_number_of_nodes','basal_average_diameter',
# 'basal_moment1','basal_moment10','basal_moment11','basal_moment12','basal_moment13','basal_moment14','basal_moment2','basal_moment3','basal_moment4',
#'basal_moment5','basal_moment6','basal_moment7','basal_moment8','basal_moment9','basal_average_parent_daughter_ratio'
apical_feature_names = np.array(['apical_average_bifurcation_angle_local','apical_average_bifurcation_angle_remote','apical_average_contraction',
'apical_average_fragmentation','apical_max_branch_order','apical_max_euclidean_distance',
'apical_max_path_distance',
'apical_nodes_over_branches','apical_number_of_bifurcations','apical_number_of_branches',
'apical_number_of_tips','apical_overall_depth','apical_overall_height','apical_overall_width','apical_total_length',
'kg_branch_mean_from_centroid_z_apical',
'kg_branch_stdev_from_centroid_z_apical',
'kg_centroid_over_farthest_branch_apical',
'kg_centroid_over_farthest_neurite_apical',
'kg_centroid_over_radial_dist_apical',
'kg_mean_over_centroid',
'kg_mean_over_farthest_branch_apical',
'kg_mean_over_farthest_neurite_apical',
'kg_mean_over_radial_dist_apical',
'kg_mean_over_stdev',
'kg_num_branches_over_radial_dist_apical',
'kg_num_outer_apical_branches',
'kg_outer_mean_from_center_z_apical',
'kg_outer_mean_over_stdev',
'kg_outer_stdev_from_center_z_apical',
'kg_peak_over_moment_z_apical',
'kg_radial_dist_over_moment_z_apical',
'kg_soma_depth'])
#, 'apical_number_of_nodes'
# ])#'apical_soma_surface', 'apical_total_surface','apical_total_volume','apical_average_diameter','apical_moment1','apical_moment10','apical_moment11','apical_moment12','apical_moment13','apical_moment14',
# 'apical_moment2','apical_moment3','apical_moment4','apical_moment5','apical_moment6','apical_moment7','apical_moment8','apical_moment9','apical_average_parent_daughter_ratio','apical_number_of_stems?? always 1',
bbp_feature_names = np.array(['bb_first_moment_apical','bb_first_moment_basal','bb_first_moment_dendrite','bb_first_moment_x_apical','bb_first_moment_x_basal',
'bb_first_moment_x_dendrite','bb_first_moment_y_apical','bb_first_moment_y_basal','bb_first_moment_y_dendrite','bb_first_moment_z_apical',
'bb_first_moment_z_basal','bb_first_moment_z_dendrite','bb_max_branch_order_apical','bb_max_branch_order_basal','bb_max_branch_order_dendrite',
'bb_max_path_length_apical','bb_max_path_length_basal','bb_max_path_length_dendrite','bb_max_radial_distance_apical','bb_max_radial_distance_basal',
'bb_max_radial_distance_dendrite','bb_mean_trunk_diameter_apical','bb_mean_trunk_diameter_basal','bb_mean_trunk_diameter_dendrite',
'bb_number_branches_apical','bb_number_branches_basal','bb_number_branches_dendrite','bb_number_neurites_apical','bb_number_neurites_basal',
'bb_number_neurites_dendrite','bb_second_moment_apical','bb_second_moment_basal','bb_second_moment_dendrite','bb_second_moment_x_apical',
'bb_second_moment_x_basal','bb_second_moment_x_dendrite','bb_second_moment_y_apical','bb_second_moment_y_basal','bb_second_moment_y_dendrite',
'bb_second_moment_z_apical','bb_second_moment_z_basal','bb_second_moment_z_dendrite','bb_total_length_apical','bb_total_length_basal',
'bb_total_length_dendrite'])
#'bb_total_surface_area_apical','bb_total_volume_basal','bb_total_volume_apical','bb_total_volume_dendrite','bb_total_surface_area_basal','bb_total_surface_area_dendrite'
#selected_features = ['max_euclidean_distance', 'num_stems', 'num_bifurcations', 'average_contraction',
#'parent_daughter_ratio']
#tmp = np.append(meta_feature_names, basal_feature_names)
all_dendritic_feature_names = np.append(basal_feature_names, apical_feature_names) #bbp_feature_names
spiny_feature_names = apical_feature_names
aspiny_feature_names = basal_feature_names
df_features = pd.read_csv(all_feature_file)
print df_features.columns
df_features[all_dendritic_feature_names]= df_features[all_dendritic_feature_names].astype(float)
print "There are %d neurons in this dataset" % df_features.shape[0]
print "Dendrite types: ", np.unique(df_features['dendrite_type'])
# df_features_all = df_features[np.append(meta_feature_names,all_dendritic_feature_names)]
# df_features_all.to_csv(data_DIR+'/0108/all_dendrite_features.csv')
df_groups = df_features.groupby(['dendrite_type'])
df_spiny = df_groups.get_group('spiny')
# df_w_spiny = df_spiny[np.append(meta_feature_names,spiny_feature_names)]
# df_w_spiny.to_csv(data_DIR +'/0108/spiny_features.csv', index=False)
df_aspiny = pd.concat([df_groups.get_group('aspiny'),df_groups.get_group('sparsely spiny')],axis=0)
# df_w_aspiny = df_aspiny[np.append(meta_feature_names,aspiny_feature_names)]
# df_w_aspiny.to_csv(data_DIR +'/0108/aspiny_features.csv', index=False)
print "There are %d neurons are aspiny " % df_aspiny.shape[0]
print "There are %d neurons are spiny\n\n" % df_spiny.shape[0]
feature_names = all_dendritic_feature_names
method = "ward"
REMOVE_OUTLIERS = 0
postfix = "_" + SEL_FEATURE
postfix += "_ol_clipped"
#run_ward_cluster(df_features, feature_names, num_clusters,output_postfix):
# num_clusters, dunn_index1 = affinity_propagation(df_aspiny, aspiny_feature_names,
# out_dir + '/ap_aspiny' + postfix,
# None, REMOVE_OUTLIERS)
# print "spiny ap:"
# print num_clusters
#
# num_clusters, dunn_index1 = affinity_propagation(df_spiny, spiny_feature_names,
# out_dir + '/ap_spiny' + postfix,
# None, REMOVE_OUTLIERS)
# print "aspiny ap:"
# print num_clusters
# exit()
redundancy_removed_features = run_ward_cluster(df_aspiny, aspiny_feature_names, num_clusters = 6 ,output_dir = out_dir,output_postfix= '_aspiny'+postfix)
df_w_aspiny = df_aspiny[np.append(meta_feature_names,redundancy_removed_features)]
df_w_aspiny.to_csv(data_DIR +'/0108/aspiny_selected_features.csv', index=False)
# #
# df_spiny.fillna(0,inplace=True)
# redundancy_removed_features = run_ward_cluster(df_spiny, spiny_feature_names, num_clusters = 9 ,output_dir = out_dir, output_postfix='_spiny'+ postfix)
# df_w_spiny = df_spiny[np.append(meta_feature_names,redundancy_removed_features)]
# df_w_spiny.to_csv(data_DIR +'/0108/spiny_selected_features.csv', index=False)
#
if __name__ == "__main__":
main()
| XiaoxiaoLiu/morphology_analysis | IVSCC/morph_clustering_on_bbp_features_old_example.py | Python | gpl-3.0 | 37,767 |
from xboxdrv_parser import Controller
from time import sleep
import argparse
import os
import sys
sys.path.append(os.path.abspath("../../.."))
from util.communication.grapevine import Communicator
from robosub_settings import settings
def main (args):
com = Communicator (args.module_name)
controller = Controller (["X1", "Y1", "X2", "Y2", "R2", "L2"], ["right/left", "forward/backward", "yaw", "pitch", "up", "down"], (0, 255), (-1, 1))
while True:
control_packet = controller.get_values ()
try:
outgoing_packet = {"right/left": 0.0, "forward/backward": 0.0, "yaw": 0.0, "pitch": 0.0, "up/down": 0.0, "roll": 0.0}
# Further parse controller values here
# Controller's sticks Y axis are switched
control_packet["forward/backward"] = -control_packet["forward/backward"]
control_packet["pitch"] = -control_packet["pitch"]
# Up and Down are not -1 to 1. Just 0 - 1
control_packet["up"] = controller.map_range(control_packet["up"], -1, 1, 0, 1)
control_packet["down"] = controller.map_range(control_packet["down"], -1, 1, 0, -1)
# Transferring to outgoing packet
outgoing_packet["forward/backward"] = control_packet["forward/backward"]
outgoing_packet["right/left"] = control_packet["right/left"]
outgoing_packet["up/down"] = control_packet["up"] + control_packet["down"]
outgoing_packet["yaw"] = control_packet["yaw"]
outgoing_packet["pitch"] = control_packet["pitch"]
#outgoing_packet["roll"] = control_packet["roll"]
outgoing_packet["roll"] = 0.0
# Controller sticks are not centered very well.
# TODO: Find a better way to do this (short of getting a new controller)
for key in outgoing_packet.keys ():
if abs (outgoing_packet[key]) < .10: outgoing_packet[key] = 0.0
print outgoing_packet
Fuzzy_Sets = {"Fuzzy_Sets": outgoing_packet}
com.publish_message (Fuzzy_Sets)
except KeyError as i:
pass
sleep (args.epoch)
def commandline():
parser = argparse.ArgumentParser(description='Mock module.')
parser.add_argument('-e', '--epoch', type=float,
default=0.1,
help='Sleep time per cycle.')
parser.add_argument('-m', '--module_name', type=str,
default='movement/fuzzification',
help='Module name.')
return parser.parse_args()
if __name__ == '__main__':
args = commandline()
main(args)
| pi19404/robosub-1 | src/movement/fuzzification/test/xbox_controller.py | Python | gpl-3.0 | 2,612 |
""" Integration test: permit call
"""
import os
import sys
myPath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, myPath + '/../../')
import logging
import nose
from nose.tools import *
import inte_testutils
from telewall.core.model import TelephoneNumber
from telewall.core.util import sleep_until
logging.basicConfig(filename='/tmp/telewall-inte.log', level=logging.DEBUG)
logging.getLogger('telewall').setLevel(logging.DEBUG)
LOG = logging.getLogger(__name__)
def test_Anruf_erlauben():
u = inte_testutils.TestUtil()
u.unblock_callerid(TelephoneNumber('0790000001'))
call = u.make_call_to_incoming(callerid='0790000001')
LOG.info('call: %s', call)
sleep_until(lambda: 'Ringing' in call.get_call_states() or 'Up' in call.get_call_states(), 5)
call.hangup()
states = call.get_call_states()
LOG.info('states: %s', states)
assert_true('Ringing' in states,
'Das analoge Telefon sollte angerufen worden sein, aber es gab keinen "Ringing" Status.')
call.stop()
if __name__ == '__main__':
nose.runmodule()
| synox/telewall | telewall/telewall/integrationtests/test_InT01.py | Python | gpl-3.0 | 1,091 |
# -*- coding: utf-8 -*-
# Copyright © 2013, 2014, 2017, 2020 Tom Most <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this Program, or any covered work, by linking or
# combining it with OpenSSL (or a modified version of that library),
# containing parts covered by the terms of the OpenSSL License, the
# licensors of this Program grant you additional permission to convey
# the resulting work. Corresponding Source for a non-source form of
# such a combination shall include the source code for the parts of
# OpenSSL used as well as that of the covered work.
from __future__ import absolute_import
import argparse
import os
import sys
import yarrharr
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description="Yarrharr feed reader")
parser.add_argument("--version", action="version", version=yarrharr.__version__)
parser.parse_args(argv)
os.environ["DJANGO_SETTINGS_MODULE"] = "yarrharr.settings"
from yarrharr.application import run
run()
| twm/yarrharr | yarrharr/scripts/yarrharr.py | Python | gpl-3.0 | 1,676 |
# Copyright 2017 Virgil Dupras
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from datetime import date
from itertools import starmap
from hscommon.testutil import eq_
from ...model.amount import Amount
from ...model.currency import USD
from ...model.entry import Entry
from ...model.transaction import Transaction
from ...plugin.base_import_bind import ReferenceBind
def create_entry(entry_date, description, reference):
txn = Transaction(entry_date, description=description, amount=Amount(1, USD))
split = txn.splits[0]
split.reference = reference
return Entry(split, split.amount, 0, 0, 0)
def test_typical_situation():
# Verify that ReferenceBind.match_entries() return expected entried in a typical situation
# We only match entries with the same reference
plugin = ReferenceBind()
DATE = date(2017, 10, 10)
existing_entries = list(starmap(create_entry, [
(DATE, 'e1', 'ref1'),
(DATE, 'e2', 'ref2'),
]))
imported_entries = list(starmap(create_entry, [
(DATE, 'i1', 'ref1'),
(DATE, 'i2', 'ref3'),
]))
matches = plugin.match_entries(None, None, None, existing_entries, imported_entries)
EXPECTED = [('e1', 'i1', True, 0.99)]
result = [(m.existing.description, m.imported.description, m.will_import, m.weight) for m in matches]
eq_(result, EXPECTED)
def test_reconciled_entry():
# Reconciled entries are matched, but with will_import = False
plugin = ReferenceBind()
DATE = date(2017, 10, 10)
existing = create_entry(DATE, 'e1', 'ref1')
existing.split.reconciliation_date = DATE
imported = create_entry(DATE, 'i1', 'ref1')
matches = plugin.match_entries(None, None, None, [existing], [imported])
EXPECTED = [('e1', 'i1', False, 0.99)]
result = [(m.existing.description, m.imported.description, m.will_import, m.weight) for m in matches]
eq_(result, EXPECTED)
def test_match_first_only():
# If two entries have the same reference, we only get one match (we don't care which, it's not
# really supposed to happen...).
# Verify that ReferenceBind.match_entries() return expected entried in a typical situation
# We only match entries with the same reference
plugin = ReferenceBind()
DATE = date(2017, 10, 10)
existing_entries = list(starmap(create_entry, [
(DATE, 'e1', 'ref1'),
]))
imported_entries = list(starmap(create_entry, [
(DATE, 'i1', 'ref1'),
(DATE, 'i2', 'ref1'),
]))
matches = plugin.match_entries(None, None, None, existing_entries, imported_entries)
eq_(len(matches), 1)
| tuxlifan/moneyguru | core/tests/plugin/test_reference_bind.py | Python | gpl-3.0 | 2,766 |
from django.core.management.base import BaseCommand, CommandError
from django.core import management
from django.db.models import Count
from scoping.models import *
class Command(BaseCommand):
help = 'check a query file - how many records'
def add_arguments(self, parser):
parser.add_argument('qid',type=int)
def handle(self, *args, **options):
qid = options['qid']
q = Query.objects.get(pk=qid)
p = 'TY - '
if q.query_file.name is not '':
fpath = q.query_file.path
else:
if q.database=="scopus":
fname = 's_results.txt'
else:
fname = 'results.txt'
fpath = f'{settings.QUERY_DIR}/{qid}/{fname}'
with open(fpath, 'r') as f:
c = f.read().count(p)
print('\n{} documents in downloaded file\n'.format(c))
if q.doc_set.count() > 0:
yts = q.doc_set.values('PY').annotate(
n = Count('pk')
)
for y in yts:
print('{} documents in {}'.format(y['n'],y['PY']))
| mcallaghan/tmv | BasicBrowser/scoping/management/commands/check_query_file.py | Python | gpl-3.0 | 1,107 |
"""
IfExp astroid node
An if statement written in an expression form.
Attributes:
- test (Node)
- Holds a single node such as Compare.
- Body (List[Node])
- A list of nodes that will execute if the condition passes.
- orelse (List[Node])
- The else clause.
Example:
- test -> True
- Body -> [x = 1]
- orelse -> [0]
"""
x = 1 if True else 0
| shweta97/pyta | nodes/IfExp.py | Python | gpl-3.0 | 406 |
# -*- coding: utf-8 -*-
import pilas
archi = open('datos.txt', 'r')
nivel = archi.readline()
pantalla = archi.readline()
idioma = archi.readline()
archi.close()
if idioma == "ES":
from modulos.ES import *
else:
from modulos.EN import *
class EscenaMenu(pilas.escena.Base):
"Es la escena de presentación donde se elijen las opciones del juego."
def __init__(self, musica=False):
pilas.escena.Base.__init__(self)
self.musica = musica
def iniciar(self):
pilas.fondos.Fondo("data/guarida.jpg")
pilas.avisar(menu_aviso)
self.crear_el_menu_principal()
pilas.mundo.agregar_tarea(0.1, self.act)
self.sonido = pilas.sonidos.cargar("data/menu.ogg")
self.sonido.reproducir(repetir=True)
def crear_el_menu_principal(self):
opciones = [
(menu1, self.comenzar_a_jugar),
(menu2, self.mostrar_ayuda_del_juego),
(menu3, self.mostrar_historia),
(menu4, self.mostrar_opciones),
(menu5, self.salir_del_juego)
]
self.trans = pilas.actores.Actor("data/trans.png")
self.trans.x = -155
self.trans.arriba = 85
self.menu = pilas.actores.Menu(opciones, x=-150, y=70, color_normal=
pilas.colores.negro, color_resaltado=pilas.colores.rojo)
self.menu.x = -150
def act(self):
if self.menu.x == -500:
if self.donde == "jugar":
self.sonido.detener()
import escena_niveles
pilas.cambiar_escena(escena_niveles.EscenaNiveles())
return False
elif self.donde == "historia":
self.sonido.detener()
import escena_historia
pilas.cambiar_escena(escena_historia.Historia())
elif self.donde == "ayuda":
self.sonido.detener()
import escena_ayuda
pilas.cambiar_escena(escena_ayuda.Ayuda())
elif self.donde == "opciones":
self.sonido.detener()
import escena_opciones
pilas.cambiar_escena(escena_opciones.Opciones())
return True
def mostrar_historia(self):
self.menu.x = [-500]
self.trans.x = [-500]
self.donde = "historia"
def mostrar_opciones(self):
self.menu.x = [-500]
self.trans.x = [-500]
self.donde = "opciones"
def comenzar_a_jugar(self):
self.menu.x = [-500]
self.trans.x = [-500]
self.donde = "jugar"
def mostrar_ayuda_del_juego(self):
self.menu.x = [-500]
self.trans.x = [-500]
self.donde = "ayuda"
def salir_del_juego(self):
pilas.terminar()
| MendeleievBros/Mendeleiev-Bros | mendeleiev_bros/escena_menu.py | Python | gpl-3.0 | 2,716 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# SSHplus
# A remote connect utlity, sshmenu compatible clone, and application starter.
#
# (C) 2011 Anil Gulecha
# Based on sshlist, incorporating changes by Benjamin Heil's simplestarter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Instructions
#
# 1. Copy file sshplus.py (this file) to /usr/local/bin
# 2. Edit file .sshplus in your home directory to add menu entries, each
# line in the format NAME|COMMAND|ARGS
# 3. Launch sshplus.py
# 4. Or better yet, add it to gnome startup programs list so it's run on login.
import shlex
import sys
import notify2
import os
import gi
gi.require_version("AppIndicator3", "0.1")
from gi.repository import AppIndicator3
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
_VERSION = "1.0"
_SETTINGS_FILE = os.getenv("HOME") + "/.sshplus"
_ABOUT_TXT = """A simple application starter as appindicator.
To add items to the menu, edit the file <i>.sshplus</i> in your home directory. Each entry must be on a new line in this format:
<tt>NAME|COMMAND|ARGS</tt>
If the item is clicked in the menu, COMMAND with arguments ARGS will be executed. ARGS can be empty. To insert a separator, add a line which only contains "sep". Lines starting with "#" will be ignored. You can set an unclickable label with the prefix "label:". To insert a nested menu, use the prefix "folder:menu name". Subsequent items will be inserted in this menu, until a line containing an empty folder name is found: "folder:". After that, subsequent items get inserted in the parent menu. That means that more than one level of nested menus can be created.
Example file:
<tt><small>
Show top|gnome-terminal|-x top
sep
# this is a comment
label:SSH connections
# create a folder named "Home"
folder:Home
SSH Ex|gnome-terminal|-x ssh [email protected]
# to mark the end of items inside "Home", specify and empty folder:
folder:
# this item appears in the main menu
SSH Ex|gnome-terminal|-x ssh [email protected]
label:RDP connections
RDP Ex|rdesktop|-T "RDP-Server" -r sound:local 1.2.3.4
</small></tt>
Copyright 2011 Anil Gulecha
Incorporating changes from simplestarter, Benjamin Heil, http://www.bheil.net
Released under GPL3, http://www.gnu.org/licenses/gpl-3.0.html"""
_EDIT_CONFIG = """To add items to the menu, edit the file <i>.sshplus</i> in your home directory. Each entry must be on a new line in this format:
<tt>NAME|COMMAND|ARGS</tt>
If the item is clicked in the menu, COMMAND with arguments ARGS will be executed. ARGS can be empty. To insert a separator, add a line which only contains "sep". Lines starting with "#" will be ignored. You can set an unclickable label with the prefix "label:". To insert a nested menu, use the prefix "folder:menu name". Subsequent items will be inserted in this menu, until a line containing an empty folder name is found: "folder:". After that, subsequent items get inserted in the parent menu. That means that more than one level of nested menus can be created.
Example file:
<tt><small>
Show top|gnome-terminal|-x top
sep
# this is a comment
label:SSH connections
# create a folder named "Home"
folder:Home
SSH Ex|gnome-terminal|-x ssh [email protected]
# to mark the end of items inside "Home", specify and empty folder:
folder:
# this item appears in the main menu
SSH Ex|gnome-terminal|-x ssh [email protected]
label:RDP connections
RDP Ex|rdesktop|-T "RDP-Server" -r sound:local 1.2.3.4
</small></tt>"""
def menuitem_response(w, item):
if item == "_about":
show_help_dlg(_ABOUT_TXT)
elif item == "_edit":
edit_config_file()
elif item == "_refresh":
newmenu = build_menu()
ind.set_menu(newmenu)
notify2.init("sshplus")
notify2.Notification(
"SSHplus refreshed", '"%s" has been read! Menu list was refreshed!' % _SETTINGS_FILE
).show()
elif item == "_quit":
sys.exit(0)
elif item == "folder":
pass
else:
print(item)
os.spawnvp(os.P_NOWAIT, item["cmd"], [item["cmd"]] + item["args"])
os.wait3(os.WNOHANG)
def show_help_dlg(msg, error=False):
if error:
dlg_icon = Gtk.MessageType.ERROR
md = Gtk.MessageDialog(
None, 0, dlg_icon, Gtk.ButtonsType.OK, "This is an INFO MessageDialog"
)
edit_config_file()
else:
dlg_icon = Gtk.MessageType.INFO
md = Gtk.MessageDialog(
None, 0, dlg_icon, Gtk.ButtonsType.OK, "This is an INFO MessageDialog"
)
try:
md.set_markup("<b>SSHplus %s</b>" % _VERSION)
md.format_secondary_markup(msg)
md.run()
finally:
md.destroy()
def edit_config_file():
if os.path.isfile(_SETTINGS_FILE) is not True:
os.mknod(_SETTINGS_FILE)
show_help_dlg(
"<b>No <i>.sshplus</i> config file found, we created one for you!\n\nPlease edit the"
" file and reload the config.</b>\n\n%s"
% _EDIT_CONFIG,
error=True,
)
os.spawnvp(os.P_NOWAIT, "xdg-open", ["xdg-open", _SETTINGS_FILE])
os.wait3(os.WNOHANG)
def add_separator(menu):
separator = Gtk.SeparatorMenuItem()
separator.show()
menu.append(separator)
def add_menu_item(menu, caption, item=None):
menu_item = Gtk.MenuItem.new_with_label(caption)
if item:
menu_item.connect("activate", menuitem_response, item)
else:
menu_item.set_sensitive(False)
menu_item.show()
menu.append(menu_item)
return menu_item
def get_sshplusconfig():
if not os.path.exists(_SETTINGS_FILE):
return []
app_list = []
f = open(_SETTINGS_FILE, "r")
try:
for line in f.readlines():
line = line.rstrip()
if not line or line.startswith("#"):
continue
elif line == "sep":
app_list.append("sep")
elif line.startswith("label:"):
app_list.append({"name": "LABEL", "cmd": line[6:], "args": ""})
elif line.startswith("folder:"):
app_list.append({"name": "FOLDER", "cmd": line[7:], "args": ""})
else:
try:
name, cmd, args = line.split("|", 2)
app_list.append(
{
"name": name,
"cmd": cmd,
"args": [n.replace("\n", "") for n in shlex.split(args)],
}
)
except ValueError:
print("The following line has errors and will be ignored:\n%s" % line)
finally:
f.close()
return app_list
def build_menu():
if not os.path.exists(_SETTINGS_FILE):
show_help_dlg(
"<b>ERROR: No .sshmenu file found in home directory</b>\n\n%s" % _ABOUT_TXT, error=True
)
sys.exit(1)
app_list = get_sshplusconfig()
menu = Gtk.Menu()
menus = [menu]
for app in app_list:
if app == "sep":
add_separator(menus[-1])
elif app["name"] == "FOLDER" and not app["cmd"]:
if len(menus) > 1:
menus.pop()
elif app["name"] == "FOLDER":
menu_item = add_menu_item(menus[-1], app["cmd"], "folder")
menus.append(Gtk.Menu())
menu_item.set_submenu(menus[-1])
elif app["name"] == "LABEL":
add_menu_item(menus[-1], app["cmd"], None)
else:
add_menu_item(menus[-1], app["name"], app)
# Add SSHplus options folder to the end of the Menu
add_separator(menu)
menu_item = add_menu_item(menus[-1], "SSHplus Options", "folder")
menus.append(Gtk.Menu())
menu_item.set_submenu(menus[-1])
add_menu_item(menus[-1], "Options", None)
add_menu_item(menus[-1], "Edit", "_edit")
add_menu_item(menus[-1], "Refresh", "_refresh")
add_menu_item(menus[-1], "About", "_about")
add_separator(menus[-1])
add_menu_item(menus[-1], "Quit", "_quit")
menus.pop()
return menu
if __name__ == "__main__":
ind = AppIndicator3.Indicator.new(
"SSHplus", "utilities-terminal", AppIndicator3.IndicatorCategory.APPLICATION_STATUS
)
ind.set_label("Launch", "none")
ind.set_status(AppIndicator3.IndicatorStatus.ACTIVE)
if not os.path.exists(_SETTINGS_FILE):
edit_config_file()
appmenu = build_menu()
ind.set_menu(appmenu)
Gtk.main()
| NoXPhasma/sshplus | sshplus.py | Python | gpl-3.0 | 9,010 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-09 11:32
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('source', django.contrib.postgres.fields.jsonb.JSONField(default={})),
('financial_data', django.contrib.postgres.fields.jsonb.JSONField(default={})),
('stock_data', django.contrib.postgres.fields.jsonb.JSONField(default={})),
],
),
migrations.CreateModel(
name='Exchange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('source', django.contrib.postgres.fields.jsonb.JSONField(default={})),
],
),
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('source', models.URLField(max_length=255)),
('source_title', models.CharField(max_length=255)),
('companies', models.ManyToManyField(related_name='news', to='intellifin.Company')),
],
),
migrations.AddField(
model_name='company',
name='exchange',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='companies', to='intellifin.Exchange'),
),
]
| cw-intellineers/intellifin | intellifin/migrations/0001_initial.py | Python | gpl-3.0 | 2,024 |
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
from threading import Thread
from time import sleep
import random
from adapt.intent import IntentBuilder
from mycroft.messagebus.message import Message
from mycroft.skills.LILACS_core.question_parser import LILACSQuestionParser
from mycroft.skills.LILACS_knowledge.knowledgeservice import KnowledgeService
from mycroft.skills.core import MycroftSkill
from mycroft.util.log import getLogger
__author__ = 'jarbas'
logger = getLogger(__name__)
class LILACSChatbotSkill(MycroftSkill):
# https://github.com/ElliotTheRobot/LILACS-mycroft-core/issues/19
def __init__(self):
super(LILACSChatbotSkill, self).__init__(name="ChatbotSkill")
# initialize your variables
self.reload_skill = False
self.active = True
self.parser = None
self.service = None
self.TIMEOUT = 2
def initialize(self):
# register intents
self.parser = LILACSQuestionParser()
self.service = KnowledgeService(self.emitter)
self.build_intents()
# make thread to keep active
self.make_bump_thread()
def ping(self):
while True:
i = 0
if self.active:
self.emitter.emit(Message("recognizer_loop:utterance", {"source": "LILACS_chatbot_skill",
"utterances": [
"bump chat to active skill list"]}))
while i < 60 * self.TIMEOUT:
i += 1
sleep(1)
i = 0
def make_bump_thread(self):
timer_thread = Thread(target=self.ping)
timer_thread.setDaemon(True)
timer_thread.start()
def build_intents(self):
# build intents
deactivate_intent = IntentBuilder("DeactivateChatbotIntent") \
.require("deactivateChatBotKeyword").build()
activate_intent=IntentBuilder("ActivateChatbotIntent") \
.require("activateChatBotKeyword").build()
bump_intent = IntentBuilder("BumpChatBotSkillIntent"). \
require("bumpChatBotKeyword").build()
# register intents
self.register_intent(deactivate_intent, self.handle_deactivate_intent)
self.register_intent(activate_intent, self.handle_activate_intent)
self.register_intent(bump_intent, self.handle_set_on_top_active_list())
def handle_set_on_top_active_list(self):
# dummy intent just to bump curiosity skill to top of active skill list
# called on a timer in order to always use converse method
pass
def handle_deactivate_intent(self, message):
self.active = False
self.speak_dialog("chatbot_off")
def handle_activate_intent(self, message):
self.active = True
self.speak_dialog("chatbot_on")
def stop(self):
self.handle_deactivate_intent("global stop")
def converse(self, transcript, lang="en-us"):
# parse 1st utterance for entitys
if self.active and "bump chat" not in transcript[0] and "bump curiosity" not in transcript[0]:
nodes, parents, synonims = self.parser.tag_from_dbpedia(transcript[0])
self.log.info("nodes: " + str(nodes))
self.log.info("parents: " + str(parents))
self.log.info("synonims: " + str(synonims))
# get concept net , talk
possible_responses = []
for node in nodes:
try:
dict = self.service.adquire(node, "concept net")
usages = dict["concept net"]["surfaceText"]
for usage in usages:
possible_responses.append(usage.replace("[", "").replace("]", ""))
except:
self.log.info("could not get reply for node " + node)
try:
# say something random
reply = random.choice(possible_responses)
self.speak(reply)
return True
except:
self.log.error("Could not get chatbot response for: " + transcript[0])
# dont know what to say
# TODO ask user a question and play du,mb
return False
# tell intent skill you did not handle intent
return False
def create_skill():
return LILACSChatbotSkill() | ElliotTheRobot/LILACS-mycroft-core | mycroft/skills/LILACS_chatbot/__init__.py | Python | gpl-3.0 | 5,092 |
# -*- coding: utf-8 -*-
# This file is part of Gertrude.
#
# Gertrude is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Gertrude is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gertrude; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from builtins import str as text
import traceback
import subprocess
import wx
import wx.lib.filebrowsebutton
from ooffice import *
class DocumentDialog(wx.Dialog):
def __init__(self, parent, modifications):
self.modifications = modifications
self.document_generated = False
# Instead of calling wx.Dialog.__init__ we precreate the dialog
# so we can set an extra style that must be set before
# creation, and then we create the GUI object using the Create
# method.
pre = wx.PreDialog()
pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
pre.Create(parent, -1, "Génération de document")
# This next step is the most important, it turns this Python
# object into the real wrapper of the dialog (instead of pre)
# as far as the wxPython extension is concerned.
self.PostCreate(pre)
self.sizer = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(wx.StaticText(self, -1, "Format :"), 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
if not IsOODocument(modifications.template):
self.format = wx.Choice(self, -1, choices=["Texte"])
elif sys.platform == 'win32':
self.format = wx.Choice(self, -1, choices=["LibreOffice", "PDF"])
else:
self.format = wx.Choice(self, -1, choices=["LibreOffice"])
self.format.SetSelection(0)
self.Bind(wx.EVT_CHOICE, self.onFormat, self.format)
sizer.Add(self.format, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
default_output = normalize_filename(modifications.default_output)
self.extension = os.path.splitext(default_output)[-1]
wildcard = "OpenDocument (*%s)|*%s|PDF files (*.pdf)|*.pdf" % (self.extension, self.extension)
self.fbb = wx.lib.filebrowsebutton.FileBrowseButton(self, -1,
size=(600, -1),
labelText="Nom de fichier :",
startDirectory=config.documents_directory,
initialValue=os.path.join(config.documents_directory, default_output),
fileMask=wildcard,
fileMode=wx.SAVE)
sizer.Add(self.fbb, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.LEFT, 5)
self.sizer.Add(sizer, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.gauge = wx.Gauge(self, -1, size=(-1, 10))
self.gauge.SetRange(100)
self.sizer.Add(self.gauge, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.RIGHT | wx.LEFT | wx.TOP, 5)
line = wx.StaticLine(self, -1, size=(20, -1), style=wx.LI_HORIZONTAL)
self.sizer.Add(line, 0, wx.GROW | wx.ALIGN_CENTER_VERTICAL | wx.TOP | wx.BOTTOM, 5)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sauver_ouvrir = wx.Button(self, -1, "Sauver et ouvrir")
self.sauver_ouvrir.SetDefault()
self.Bind(wx.EVT_BUTTON, self.OnSauverOuvrir, self.sauver_ouvrir)
sizer.Add(self.sauver_ouvrir, 0, wx.LEFT | wx.RIGHT, 5)
self.sauver = wx.Button(self, -1, "Sauver")
self.Bind(wx.EVT_BUTTON, self.OnSauver, self.sauver)
sizer.Add(self.sauver, 0, wx.RIGHT, 5)
if modifications.multi:
button = wx.Button(self, -1, "Sauver individuellement")
self.Bind(wx.EVT_BUTTON, self.OnSauverUnitaire, button)
sizer.Add(button, 0, wx.RIGHT, 5)
if modifications.email:
self.sauver_envoyer = wx.Button(self, -1, "Sauver et envoyer par email")
self.Bind(wx.EVT_BUTTON, self.OnSauverEnvoyer, self.sauver_envoyer)
sizer.Add(self.sauver_envoyer, 0, wx.RIGHT, 5)
if modifications.multi is False and not modifications.email_to:
self.sauver_envoyer.Disable()
if database.creche.caf_email:
self.sauver_envoyer = wx.Button(self, -1, "Sauver et envoyer par email à la CAF")
self.Bind(wx.EVT_BUTTON, self.OnSauverEnvoyerCAF, self.sauver_envoyer)
sizer.Add(self.sauver_envoyer, 0, wx.LEFT | wx.RIGHT, 5)
# btnsizer.Add(self.ok)
btn = wx.Button(self, wx.ID_CANCEL)
sizer.Add(btn, 0, wx.RIGHT, 5)
self.sizer.Add(sizer, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5)
self.SetSizer(self.sizer)
self.sizer.Fit(self)
self.CenterOnScreen()
def onFormat(self, _):
filename = os.path.splitext(self.fbb.GetValue())[0]
if self.format.GetSelection() == 0:
self.fbb.SetValue(filename + self.extension, None)
else:
self.fbb.SetValue(filename + ".pdf", None)
def Sauver(self):
self.fbb.Disable()
self.sauver.Disable()
if self.sauver_ouvrir:
self.sauver_ouvrir.Disable()
self.filename = self.fbb.GetValue()
f, e = os.path.splitext(self.filename)
if e == ".pdf":
self.pdf = True
self.oo_filename = f + self.extension
else:
self.pdf = False
self.oo_filename = self.filename
config.documents_directory = os.path.dirname(self.filename)
dlg = None
try:
if self.modifications.multi is not False:
errors = {}
simple_modifications = self.modifications.get_simple_modifications(self.oo_filename)
for i, (filename, modifs) in enumerate(simple_modifications):
self.gauge.SetValue((100 * i) / len(simple_modifications))
errors.update(GenerateDocument(modifs, filename=filename))
if self.pdf:
f, e = os.path.splitext(filename)
convert_to_pdf(filename, f + ".pdf")
os.remove(filename)
else:
self.filename = self.filename.replace(" <prenom> <nom>", "")
self.oo_filename = self.oo_filename.replace(" <prenom> <nom>", "")
errors = GenerateDocument(self.modifications, filename=self.oo_filename, gauge=self.gauge)
if self.pdf:
convert_to_pdf(self.oo_filename, self.filename)
os.remove(self.oo_filename)
self.document_generated = True
if errors:
message = "Document %s généré avec des erreurs :\n" % self.filename
for label in errors.keys():
message += '\n' + label + ' :\n '
message += '\n '.join(errors[label])
dlg = wx.MessageDialog(self, message, 'Message', wx.OK | wx.ICON_WARNING)
except IOError:
print(sys.exc_info())
dlg = wx.MessageDialog(self, "Impossible de sauver le document. Peut-être est-il déjà ouvert ?", 'Erreur',
wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
return
except Exception as e:
info = sys.exc_info()
message = ' [type: %s value: %s traceback: %s]' % (info[0], info[1], traceback.extract_tb(info[2]))
dlg = wx.MessageDialog(self, message, 'Erreur', wx.OK | wx.ICON_WARNING)
if dlg:
dlg.ShowModal()
dlg.Destroy()
self.EndModal(wx.ID_OK)
def OnSauver(self, _):
self.modifications.multi = False
self.Sauver()
def OnSauverOuvrir(self, event):
self.OnSauver(event)
if self.document_generated:
if self.filename.endswith(".pdf"):
StartAcrobatReader(self.filename)
else:
StartLibreOffice(self.filename)
def OnSauverUnitaire(self, _):
self.Sauver()
def OnSauverEnvoyer(self, event):
self.OnSauverUnitaire(event)
if self.document_generated:
if self.modifications.multi is not False:
simple_modifications = self.modifications.get_simple_modifications(self.oo_filename)
emails = '\n'.join(
[" - %s (%s)" % (modifs.email_subject, ", ".join(modifs.email_to)) for filename, modifs in
simple_modifications])
if len(emails) > 1000:
emails = emails[:1000] + "\n..."
dlg = wx.MessageDialog(self, "Ces emails seront envoyés :\n" + emails, 'Confirmation',
wx.OK | wx.CANCEL | wx.ICON_WARNING)
response = dlg.ShowModal()
dlg.Destroy()
if response != wx.ID_OK:
return
for filename, modifs in simple_modifications:
if self.pdf:
oo_filename = filename
filename, e = os.path.splitext(oo_filename)
filename += ".pdf"
try:
SendDocument(filename, modifs)
except Exception as e:
dlg = wx.MessageDialog(self, "Impossible d'envoyer le document %s\n%r" % (filename, e),
'Erreur', wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
else:
try:
SendDocument(self.filename, self.modifications)
except Exception as e:
dlg = wx.MessageDialog(self, "Impossible d'envoyer le document %s\n%r" % (self.filename, e),
'Erreur', wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
def OnSauverEnvoyerCAF(self, event):
self.OnSauver(event)
if self.document_generated:
try:
root, ext = os.path.splitext(self.modifications.introduction_filename)
introduction_filename = root + " CAF" + ext
SendDocument(self.filename, self.modifications, to=[database.creche.caf_email], introduction_filename=GetTemplateFile(introduction_filename))
except Exception as e:
dlg = wx.MessageDialog(self, "Impossible d'envoyer le document %s\n%r" % (self.filename, e), "Erreur", wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
def StartLibreOffice(filename):
if sys.platform == 'win32':
filename = "".join(["file:", urllib.pathname2url(os.path.abspath(filename.encode("utf-8")))])
# print filename
try:
StarDesktop, objServiceManager, core_reflection = getOOoContext()
StarDesktop.LoadComponentFromURL(filename, "_blank", 0, MakePropertyValues(objServiceManager, [
["ReadOnly", False],
["Hidden", False]]))
except Exception as e:
print("Exception ouverture LibreOffice", e)
dlg = wx.MessageDialog(None, "Impossible d'ouvrir le document\n%r" % e, "Erreur", wx.OK|wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
else:
paths = []
if sys.platform == "darwin":
paths.append("/Applications/LibreOffice.app/Contents/MacOS/soffice")
paths.append("/Applications/OpenOffice.app/Contents/MacOS/soffice")
else:
paths.append("/usr/bin/libreoffice")
paths.append("ooffice")
for path in paths:
try:
print(path, filename)
subprocess.Popen([path, filename])
return
except Exception as e:
print(e)
pass
dlg = wx.MessageDialog(None, "Impossible de lancer OpenOffice / LibreOffice", 'Erreur', wx.OK|wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
DDE_ACROBAT_STRINGS = ["AcroviewR15", "AcroviewA15", "AcroviewR12", "AcroviewA12", "AcroviewR11", "AcroviewA11",
"AcroviewR10", "AcroviewA10", "acroview"]
dde_server = None
def StartAcrobatReader(filename):
global dde_server
import win32api
import win32ui
import dde
filename = str(os.path.abspath(filename))
path, name = os.path.split(filename)
reader = win32api.FindExecutable(name, path)
os.spawnl(os.P_NOWAIT, reader[1], " ")
for t in range(10):
time.sleep(1)
for acrobat in DDE_ACROBAT_STRINGS:
try:
if not dde_server:
dde_server = dde.CreateServer()
dde_server.Create('Gertrude')
c = dde.CreateConversation(dde_server)
c.ConnectTo(acrobat, 'control')
c.Exec('[DocOpen("%s")]' % (filename,))
return
except Exception as e:
pass
print("Impossible de lancer acrobat reader ; prochain essai dans 1s ...", e)
dlg = wx.MessageDialog(None, "Impossible d'ouvrir le document", 'Erreur', wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
dlg.Destroy()
| studio1247/gertrude | document_dialog.py | Python | gpl-3.0 | 13,966 |
from ..rerequest import TemplateRequest
init_req = TemplateRequest(
re = r'(http://)?(www\.)?(?P<domain>ur(play)?)\.se/(?P<req_url>.+)',
encode_vars = lambda v: { 'req_url': 'http://%(domain)s.se/%(req_url)s' % v } )
hls = { 'title': 'UR-play', 'url': 'http://urplay.se/', 'feed_url': 'http://urplay.se/rss',
'items': [init_req,
TemplateRequest(
re = r'file_html5":\s?"(?P<final_url>[^"]+)".*?"subtitles":\s?"(?P<subtitles>[^",]*)',
encode_vars = lambda v: { 'final_url': ('http://130.242.59.75/%(final_url)s/playlist.m3u8' % v).replace('\\', ''),
'suffix-hint': 'mp4',
'subtitles': v.get('subtitles', '').replace('\\', '') % v } )] }
rtmp = { 'items': [init_req,
TemplateRequest(
re = r'file_flash":\s?"(?P<final_url>[^"]+\.(?P<ext>mp[34]))".*?"subtitles":\s?"(?P<subtitles>[^",]*)',
encode_vars = lambda v: { 'final_url': ('rtmp://130.242.59.75/ondemand playpath=%(ext)s:/%(final_url)s app=ondemand' % v).replace('\\', ''),
'suffix-hint': 'flv',
'rtmpdump-realtime': True,
'subtitles': v.get('subtitles', '').replace('\\', '') % v } )] }
services = [hls, rtmp] | jackuess/pirateplay.se | lib/pirateplay/lib/services/ur.py | Python | gpl-3.0 | 1,161 |
class Object:
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
| gnovis/swift | swift_fca/swift_core/object_fca.py | Python | gpl-3.0 | 131 |
from planarprocess import *
from gds_helpers import *
from itertools import cycle
xmin, xmax = -5, 5
layers = gds_cross_section('mypmos.gds', [(0,xmin), (0, xmax)], 'gdsmap.map')
['P-Active-Well', 'Active-Cut', 'N-Well', 'Metal-2', 'Metal-1', 'P-Select',
'N-Select', 'Transistor-Poly', 'Via1']
wafer = Wafer(1., 5., 0, xmax - xmin)
# N-Well
nw = layers['N-Well']
wafer.implant(.7, nw, outdiffusion=5., label='N-Well')
# Field and gate oxides
de = layers['P-Active-Well']
# TODO: channel stop under field oxide
fox = wafer.grow(.5, wafer.blank_mask().difference(de),
y_offset=-.2, outdiffusion=.1)
gox = wafer.grow(.05, de, outdiffusion=.05, base=wafer.wells,
label='Gate oxide')
# Gate poly and N+/P+ implants
gp = layers['Transistor-Poly']
poly = wafer.grow(.25, gp, outdiffusion=.25, label='Gate poly')
np = layers['N-Select'].intersection(
layers['P-Active-Well']).difference(gp)
nplus = wafer.implant(.1, np, outdiffusion=.1, target=wafer.wells, source=gox,
label='N+')
pp = layers['P-Select'].intersection(
layers['P-Active-Well']).difference(gp)
pplus = wafer.implant(.1, pp, outdiffusion=.1, target=wafer.wells, source=gox,
label='P+')
# Multi-level dielectric and contacts
mld_thickness = .5
mld = wafer.grow(mld_thickness, wafer.blank_mask(), outdiffusion=.1)
ct = layers['Active-Cut']
contact = wafer.grow(-mld_thickness*1.1, ct, consuming=[mld, gox], base=wafer.air,
outdiffusion=.05, outdiffusion_vertices=3)
# Metals and vias
m1 = layers['Metal-1']
metal1 = wafer.grow(.6, m1, outdiffusion=.1, label='Metal-1')
ild_thickness = 1.2
ild1 = wafer.grow(ild_thickness, wafer.blank_mask(), outdiffusion=.1)
wafer.planarize()
v1 = layers['Via1']
via1 = wafer.grow(-ild_thickness*1.1, v1, consuming=[ild1], base=wafer.air,
outdiffusion=.05, outdiffusion_vertices=3)
m2 = layers['Metal-2']
metal2 = wafer.grow(1., m2, outdiffusion=.1, label='Metal-2')
# Presentation
custom_style = {s: {} for s in wafer.solids}
for solid, color in {
fox: '.4', gox: 'r', poly: 'g', mld: 'k',
ild1: '.3', contact: '.5', via1: '.5',
metal1: '.7', metal2: '.8'}.items():
custom_style[solid].update(dict(facecolor=color, edgecolor='k'))
for solid in wafer.solids:
if solid not in wafer.wells:
custom_style[solid].update(dict(hatch=None, fill=True))
base_hatches = r'\/' # r'/\|-+xoO.*'
hatches = cycle(list(base_hatches) + [h1+h2 for h1 in base_hatches
for h2 in base_hatches])
colors = cycle('krgbcmy')
plot_geometryref(wafer.air, hatch='.', fill=False, linewidth=0, color=(.9,.9,.9),
zorder=-100)
zorder = -99
for solid in wafer.solids:
style = dict(hatch=next(hatches), fill=False,
edgecolor=next(colors), zorder=zorder)
zorder += 1
style.update(custom_style.get(solid, {}))
plot_geometryref(solid, **style)
pyplot.legend()
pyplot.savefig('mypmos-x.png')
pyplot.show()
| ignamv/PlanarProcess | test.py | Python | gpl-3.0 | 2,927 |
from setuptools import setup, find_packages
setup(
name = "CyprjToMakefile",
version = "0.1",
author = "Simon Marchi",
author_email = "[email protected]",
description = "Generate Makefiles from Cypress cyprj files.",
license = "GPLv3",
url = "https://github.com/simark/cyprj-to-makefile",
packages = find_packages(),
install_requires = ['jinja2'],
package_data = {
'cyprj_to_makefile': ['Makefile.tpl'],
},
entry_points = {
'console_scripts': [
'cyprj-to-makefile = cyprj_to_makefile.cyprj_to_makefile:main',
],
},
)
| simark/cyprj-to-makefile | setup.py | Python | gpl-3.0 | 576 |
from .submaker import Submaker
from inception.tools.signapk import SignApk
import shutil
import os
from inception.constants import InceptionConstants
class UpdatezipSubmaker(Submaker):
def make(self, updatePkgDir):
keys_name = self.getValue("keys")
signingKeys = self.getMaker().getConfig().getKeyConfig(keys_name) if keys_name else None
updateBinaryKey, updateBinary = self.getTargetBinary("update-binary")
assert updateBinary, "%s is not set" % updateBinaryKey
if keys_name:
assert signingKeys, "update.keys is '%s' but __config__.host.keys.%s is not set" % (keys_name, keys_name)
signingKeys = signingKeys["private"], signingKeys["public"]
shutil.copy(updateBinary, os.path.join(updatePkgDir, "META-INF/com/google/android/update-binary"))
updateZipPath = updatePkgDir + "/../"
updateZipPath += "update_unsigned" if signingKeys else "update"
shutil.make_archive(updateZipPath, "zip", updatePkgDir)
updateZipPath += ".zip"
if signingKeys:
javaKey, javaPath = self.getHostBinary("java")
signApkKey, signApkPath = self.getHostBinary("signapk")
assert signApkPath, "%s is not set" % signApkKey
assert os.path.exists(signApkPath), "'%s' from %s does not exist" % (signApkPath, signApkKey)
assert os.path.exists(javaPath), "'%s' from %s does not exist" % (javaPath, javaKey)
signApk = SignApk(javaPath, signApkPath)
targetPath = updatePkgDir + "/../" + InceptionConstants.OUT_NAME_UPDATE
signApk.sign(updateZipPath, targetPath, signingKeys[0], signingKeys[1])
updateZipPath = targetPath
return updateZipPath
| tgalal/inception | inception/argparsers/makers/submakers/submaker_updatezip.py | Python | gpl-3.0 | 1,747 |
#!/bin/env python
# -*- coding: utf-8 -*-
PYS_SERVICE_MOD_PRE='pys_' # 模块名称的前缀
PYS_HEAD_LEN=12 # 报文头长度
PYS_MAX_BODY_LEN=10485760 # 最大报文长度
| dungeonsnd/test-code | dev_examples/pyserver/src/util/pys_define.py | Python | gpl-3.0 | 184 |
# -*- coding: utf-8 -*-
import hashlib, time
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from poetry.models import Poem, Theme
from user.models import Contributor
my_default_errors = {
'required': 'Еңгізуге міндетті параметр'
}
error_messages = {'required': 'Толтыруға маңызды параметр'}
class UserAuthenticateForm(forms.ModelForm):
email = forms.EmailField(required=True, error_messages=error_messages)
password = forms.CharField(
required=True,
label='Құпиясөз',
error_messages=error_messages,
widget=forms.PasswordInput)
class Meta:
model = User
fields = ('email', 'password')
labels = {
'email': 'Email',
'password': 'Құпиясөз',
}
class UserCreateForm(UserCreationForm):
email = forms.EmailField(required=True, error_messages=error_messages)
full_name = forms.CharField(required=True, label='Есіміңіз', error_messages=error_messages)
password1 = forms.CharField(required=True, label='Құпиясөз', widget=forms.PasswordInput,
error_messages=error_messages)
password2 = forms.CharField(required=True, label='Құпиясөзді қайталаңыз', widget=forms.PasswordInput,
error_messages=error_messages)
class Meta:
model = User
fields = ('full_name', 'email', 'password1', 'password2')
def save(self, commit=True):
user = super(UserCreateForm, self).save(commit=False)
user.email = self.cleaned_data["email"]
user.username = user.email
user.is_active = 0
hash = '%s%s' % (user.email, time.time())
if commit:
user.save()
user.contributor = Contributor(user_id=user, full_name=self.cleaned_data["full_name"],
activation_code=hashlib.md5(hash.encode('utf-8')).hexdigest())
user.contributor.save()
group = self.get_user_group()
user.groups.add(group)
else:
pass
return user
def get_user_group(self):
return Group.objects.get(name='site-users')
def clean_email(self):
email = self.cleaned_data.get('email')
user = User.objects.filter(email=email).first()
if user:
raise forms.ValidationError("Бұл email-мен колднушы тіркелген.")
return email
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if not password2:
raise forms.ValidationError("Құпиясөзді растаңыз")
if password1 != password2:
raise forms.ValidationError("Құпиясөздер бір біріне сәйкес емес. Қайта теріңіз")
if len(password2) < 6:
raise forms.ValidationError('Кемінде 6 символ')
return super(UserCreateForm, self).clean_password2()
class UserEditForm(forms.ModelForm):
text_status = forms.CharField(
widget=forms.Textarea(attrs={'rows': 5, 'cols': 100}),
label='Сайттағы статусыңыз (250 символ)',
error_messages=error_messages)
class Meta:
model = Contributor
fields = ('full_name', 'text_status')
labels = {
'full_name': 'Есіміңіз',
'text_status': 'Сайттағы статусыңыз (250 символ)',
}
error_messages = {
'full_name': error_messages
}
class OfferPoemFrom(forms.ModelForm):
theme = forms.MultipleChoiceField(
label="Тақырып",
widget=forms.SelectMultiple,
error_messages=error_messages,
choices=Theme.objects.values_list('id', 'name').all()
)
class Meta:
model = Poem
fields = ('author', 'title', 'content', 'theme',)
labels = {
'author': 'Автор',
'title': 'Шығарма аты',
'content': 'Текст',
}
error_messages = {
'author': error_messages,
'title': error_messages,
'content': error_messages,
'theme': error_messages
}
| ra1ski/poetrydb | user/forms.py | Python | gpl-3.0 | 4,469 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-29 13:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0054_add_field_user_to_productionform'),
]
operations = [
migrations.AddField(
model_name='applicationform',
name='requires_development',
field=models.BooleanField(default=False, verbose_name='requires_development'),
),
]
| efornal/pulmo | app/migrations/0055_applicationform_requires_development.py | Python | gpl-3.0 | 523 |
__all__ = ["speedtest_exceptions", "speedtest"]
from . import sendtest
| Awesomecase/Speedtest | speedtest_sendtest/__init__.py | Python | gpl-3.0 | 71 |
import os
import re
import sys
"""
* Perform initial configuration to ensure that the server is set up to work with Burton's format
sudo chown -R ubuntu:ubuntu /var/www
mkdir -p /var/www/default/public_html
mv /var/www/html/index.html /var/www/default/public_html # Ubuntu >=14.04
mv /var/www/index.html /var/www/default/public_html # Ubuntu <14.04
rm -rf /var/www/html
sudo vim /etc/apache2/sites-available/000-default.conf # Ubuntu >=14.04
sudo vim /etc/apache2/sites-available/default # Ubuntu <14.04
sudo a2enmod ssl
sudo service apache2 restart
* Enable / disable .htaccess for a site
* PHP configuration
"""
environment = ''
def main(env):
global environment
environment = env
while True:
print("\nConfigure Websites\n")
print("Please select an operation:")
print(" 1. Restart Apache")
print(" 2. Add a new website")
print(" 3. Add SSL to website")
print(" 0. Go Back")
print(" -. Exit")
operation = input(environment.prompt)
if operation == '0':
return True
elif operation == '-':
sys.exit()
elif operation == '1':
restart_apache()
elif operation == '2':
add_website()
elif operation == '3':
add_ssl()
else:
print("Invalid input.")
def restart_apache():
print("\nAttempting to restart Apache:")
# TODO: Print an error when the user does not have permissions to perform the action.
result = os.system("sudo service apache2 restart")
print(result)
return True
def add_website():
global environment
print('\nAdd website.\n')
input_file = open('./example-files/apache-site', 'r')
input_file_text = input_file.read()
input_file.close()
site_name = input('Website name (without www or http)' + environment.prompt)
new_filename = '/etc/apache2/sites-available/%s.conf' % (site_name,)
tmp_filename = '/tmp/%s.conf' % (site_name,)
# TODO: Check that site_name is legal for both a domain name and a filename.
while os.path.isfile(new_filename):
print('Site exists! Please choose another.')
site_name = input('Website name (without www or http)' + environment.prompt)
new_filename = '/etc/apache2/sites-available/%s.conf' % (site_name,)
tmp_filename = '/tmp/%s.conf' % (site_name,)
new_config = re.sub('SITE', site_name, input_file_text)
try:
output_file = open(tmp_filename, 'w')
output_file.write(new_config)
output_file.close()
tmp_move = os.system("sudo mv %s %s" % (tmp_filename, new_filename))
except PermissionError as e:
print('\n\nError!')
print('The current user does not have permission to perform this action.')
#print('Please run Burton with elevated permissions to resolve this error.\n\n')
if tmp_move != 0:
print('\n\nError!')
print('The current user does not have permission to perform this action.')
#print('Please run Burton with elevated permissions to resolve this error.\n\n')
current_user = str(os.getuid())
result = os.system('sudo mkdir -p /var/www/%s/public_html/' % (site_name,))
result = os.system('sudo mkdir -p /var/www/%s/logs/' % (site_name,))
result = os.system('sudo chown -R %s:%s /var/www/%s/' % (current_user, current_user,))
result = os.system('sudo a2ensite %s.conf' % (site_name,))
restart_apache()
return True
def add_ssl():
global environment
print("\nAdd SSL to website.\n")
print("Please enter the URL of the website.\n")
site_name = input(environment.prompt)
print("Is this a wildcard certificate? (y/N)\n")
wildcard = input(environment.prompt)
if wildcard.lower()=='y':
print("Generating wildcard cert for *.%s" % (site_name,))
wildcard = '*.'
else:
print("Generating cert for %s" % (site_name,))
wildcard = ''
# http://serverfault.com/questions/649990/non-interactive-creation-of-ssl-certificate-requests
#command_template = 'openssl req -new -newkey rsa:2048 -nodes -sha256 -keyout foobar.com.key -out foobar.com.csr -subj "/C=US/ST=New foobar/L=foobar/O=foobar foobar, Inc./CN=foobar.com/[email protected]"'
command_template = "openssl req -new -newkey rsa:2048 -nodes -sha256 -keyout %s.key -out %s.csr -subj \"/CN=%s%s\""
print(command_template % (site_name, site_name, wildcard, site_name))
return True
| dotancohen/burton | configure_websites.py | Python | gpl-3.0 | 4,169 |
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^(\d+)/$', 'onpsx.gallery.views.index'),
(r'^$', 'onpsx.gallery.views.index'),
)
| chrizel/onpsx | src/onpsx/gallery/urls.py | Python | gpl-3.0 | 160 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import io
import os
import six
import pytest
from pytest_pootle.factories import (
LanguageDBFactory, ProjectDBFactory, StoreDBFactory,
TranslationProjectFactory)
from pytest_pootle.utils import update_store
from translate.storage.factory import getclass
from django.db.models import Max
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
from pootle.core.delegate import (
config, format_classes, format_diffs, formats)
from pootle.core.models import Revision
from pootle.core.delegate import deserializers, serializers
from pootle.core.url_helpers import to_tp_relative_path
from pootle.core.plugin import provider
from pootle.core.serializers import Serializer, Deserializer
from pootle_app.models import Directory
from pootle_config.exceptions import ConfigurationError
from pootle_format.exceptions import UnrecognizedFiletype
from pootle_format.formats.po import PoStoreSyncer
from pootle_format.models import Format
from pootle_language.models import Language
from pootle_project.models import Project
from pootle_statistics.models import (
SubmissionFields, SubmissionTypes)
from pootle_store.constants import (
NEW, OBSOLETE, PARSED, POOTLE_WINS, TRANSLATED)
from pootle_store.diff import DiffableStore, StoreDiff
from pootle_store.models import Store
from pootle_store.util import parse_pootle_revision
from pootle_translationproject.models import TranslationProject
def _update_from_upload_file(store, update_file,
content_type="text/x-gettext-translation",
user=None, submission_type=None):
with open(update_file, "r") as f:
upload = SimpleUploadedFile(os.path.basename(update_file),
f.read(),
content_type)
test_store = getclass(upload)(upload.read())
store_revision = parse_pootle_revision(test_store)
store.update(test_store, store_revision=store_revision,
user=user, submission_type=submission_type)
def _store_as_string(store):
ttk = store.syncer.convert(store.syncer.file_class)
if hasattr(ttk, "updateheader"):
# FIXME We need those headers on import
# However some formats just don't support setting metadata
ttk.updateheader(
add=True, X_Pootle_Path=store.pootle_path)
ttk.updateheader(
add=True, X_Pootle_Revision=store.get_max_unit_revision())
return str(ttk)
@pytest.mark.django_db
def test_delete_mark_obsolete(project0_nongnu, project0, store0):
"""Tests that the in-DB Store and Directory are marked as obsolete
after the on-disk file ceased to exist.
Refs. #269.
"""
tp = TranslationProjectFactory(
project=project0, language=LanguageDBFactory())
store = StoreDBFactory(
translation_project=tp,
parent=tp.directory)
store.update(store.deserialize(store0.serialize()))
store.sync()
pootle_path = store.pootle_path
# Remove on-disk file
os.remove(store.file.path)
# Update stores by rescanning TP
tp.scan_files()
# Now files that ceased to exist should be marked as obsolete
updated_store = Store.objects.get(pootle_path=pootle_path)
assert updated_store.obsolete
# The units they contained are obsolete too
assert not updated_store.units.exists()
assert updated_store.unit_set.filter(state=OBSOLETE).exists()
obs_unit = updated_store.unit_set.filter(state=OBSOLETE).first()
obs_unit.submission_set.count() == 0
@pytest.mark.django_db
def test_sync(project0_nongnu, project0, store0):
"""Tests that the new on-disk file is created after sync for existing
in-DB Store if the corresponding on-disk file ceased to exist.
"""
tp = TranslationProjectFactory(
project=project0, language=LanguageDBFactory())
store = StoreDBFactory(
translation_project=tp,
parent=tp.directory)
store.update(store.deserialize(store0.serialize()))
assert not store.file.exists()
store.sync()
assert store.file.exists()
os.remove(store.file.path)
assert not store.file.exists()
store.sync()
assert store.file.exists()
@pytest.mark.django_db
def test_update_from_ts(store0, test_fs, member):
store0.parsed = True
orig_units = store0.units.count()
existing_created_at = store0.units.aggregate(
Max("creation_time"))["creation_time__max"]
existing_mtime = store0.units.aggregate(
Max("mtime"))["mtime__max"]
old_revision = store0.data.max_unit_revision
with test_fs.open(['data', 'ts', 'tutorial', 'en', 'tutorial.ts']) as f:
store = getclass(f)(f.read())
store0.update(
store,
submission_type=SubmissionTypes.UPLOAD,
user=member)
assert not store0.units[orig_units].hasplural()
unit = store0.units[orig_units + 1]
assert unit.submission_set.count() == 0
assert unit.hasplural()
assert unit.creation_time >= existing_created_at
assert unit.creation_time >= existing_mtime
unit_source = unit.unit_source
assert unit_source.created_with == SubmissionTypes.UPLOAD
assert unit_source.created_by == member
assert unit.change.changed_with == SubmissionTypes.UPLOAD
assert unit.change.submitted_by == member
assert unit.change.submitted_on >= unit.creation_time
assert unit.change.reviewed_by is None
assert unit.change.reviewed_on is None
assert unit.revision > old_revision
@pytest.mark.django_db
def test_update_ts_plurals(store_po, test_fs, ts):
project = store_po.translation_project.project
filetype_tool = project.filetype_tool
project.filetypes.add(ts)
filetype_tool.set_store_filetype(store_po, ts)
with test_fs.open(['data', 'ts', 'add_plurals.ts']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
unit = store_po.units[0]
assert unit.hasplural()
assert unit.submission_set.count() == 0
with test_fs.open(['data', 'ts', 'update_plurals.ts']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
unit = store_po.units[0]
assert unit.hasplural()
assert unit.submission_set.count() == 1
update_sub = unit.submission_set.first()
assert update_sub.revision == unit.revision
assert update_sub.creation_time == unit.change.submitted_on
assert update_sub.submitter == unit.change.submitted_by
assert update_sub.new_value == unit.target
assert update_sub.type == unit.change.changed_with
assert update_sub.field == SubmissionFields.TARGET
# this fails 8(
# from pootle.core.utils.multistring import unparse_multistring
# assert (
# unparse_multistring(update_sub.new_value)
# == unparse_multistring(unit.target))
@pytest.mark.django_db
def test_update_with_non_ascii(store0, test_fs):
store0.state = PARSED
orig_units = store0.units.count()
path = 'data', 'po', 'tutorial', 'en', 'tutorial_non_ascii.po'
with test_fs.open(path) as f:
store = getclass(f)(f.read())
store0.update(store)
last_unit = store0.units[orig_units]
updated_target = "Hèḽḽě, ŵôrḽḓ"
assert last_unit.target == updated_target
assert last_unit.submission_set.count() == 0
# last_unit.target = "foo"
# last_unit.save()
# this should now have a submission with the old target
# but it fails
# assert last_unit.submission_set.count() == 1
# update_sub = last_unit.submission_set.first()
# assert update_sub.old_value == updated_target
# assert update_sub.new_value == "foo"
@pytest.mark.django_db
def test_update_unit_order(project0_nongnu, ordered_po, ordered_update_ttk):
"""Tests unit order after a specific update.
"""
# Set last sync revision
ordered_po.sync()
assert ordered_po.file.exists()
expected_unit_list = ['1->2', '2->4', '3->3', '4->5']
updated_unit_list = [unit.unitid for unit in ordered_po.units]
assert expected_unit_list == updated_unit_list
original_revision = ordered_po.get_max_unit_revision()
ordered_po.update(
ordered_update_ttk,
store_revision=original_revision)
expected_unit_list = [
'X->1', '1->2', '3->3', '2->4',
'4->5', 'X->6', 'X->7', 'X->8']
updated_unit_list = [unit.unitid for unit in ordered_po.units]
assert expected_unit_list == updated_unit_list
unit = ordered_po.units.first()
assert unit.revision > original_revision
assert unit.submission_set.count() == 0
@pytest.mark.django_db
def test_update_save_changed_units(project0_nongnu, store0, member, system):
"""Tests that any update saves changed units only.
"""
# not sure if this is testing anything
store = store0
# Set last sync revision
store.sync()
store.update(store.file.store)
unit_list = list(store.units)
store.file = 'tutorial/ru/update_save_changed_units_updated.po'
store.update(store.file.store, user=member)
updated_unit_list = list(store.units)
# nothing changed
for index in range(0, len(unit_list)):
unit = unit_list[index]
updated_unit = updated_unit_list[index]
assert unit.revision == updated_unit.revision
assert unit.mtime == updated_unit.mtime
assert unit.target == updated_unit.target
@pytest.mark.django_db
def test_update_set_last_sync_revision(project0_nongnu, tp0, store0, test_fs):
"""Tests setting last_sync_revision after store creation.
"""
unit = store0.units.first()
unit.target = "UPDATED TARGET"
unit.save()
store0.sync()
# Store is already parsed and store.last_sync_revision should be equal to
# max unit revision
assert store0.last_sync_revision == store0.get_max_unit_revision()
# store.last_sync_revision is not changed after empty update
saved_last_sync_revision = store0.last_sync_revision
store0.updater.update_from_disk()
assert store0.last_sync_revision == saved_last_sync_revision
orig = str(store0)
update_file = test_fs.open(
"data/po/tutorial/ru/update_set_last_sync_revision_updated.po",
"r")
with update_file as sourcef:
with open(store0.file.path, "wb") as targetf:
targetf.write(sourcef.read())
store0 = Store.objects.get(pk=store0.pk)
# any non-empty update sets last_sync_revision to next global revision
next_revision = Revision.get() + 1
store0.updater.update_from_disk()
assert store0.last_sync_revision == next_revision
# store.last_sync_revision is not changed after empty update (even if it
# has unsynced units)
item_index = 0
next_unit_revision = Revision.get() + 1
dbunit = store0.units.first()
dbunit.target = "ANOTHER DB TARGET UPDATE"
dbunit.save()
assert dbunit.revision == next_unit_revision
store0.updater.update_from_disk()
assert store0.last_sync_revision == next_revision
# Non-empty update sets store.last_sync_revision to next global revision
# (even the store has unsynced units). There is only one unsynced unit in
# this case so its revision should be set next to store.last_sync_revision
next_revision = Revision.get() + 1
with open(store0.file.path, "wb") as targetf:
targetf.write(orig)
store0 = Store.objects.get(pk=store0.pk)
store0.updater.update_from_disk()
assert store0.last_sync_revision == next_revision
# Get unsynced unit in DB. Its revision should be greater
# than store.last_sync_revision to allow to keep this change during
# update from a file
dbunit = store0.units[item_index]
assert dbunit.revision == store0.last_sync_revision + 1
@pytest.mark.django_db
def test_update_upload_defaults(store0, system):
store0.state = PARSED
unit = store0.units.first()
original_revision = unit.revision
last_sub_pk = unit.submission_set.order_by(
"id").values_list("id", flat=True).last() or 0
update_store(
store0,
[(unit.source, "%s UPDATED" % unit.source, False)],
store_revision=Revision.get() + 1)
unit = store0.units[0]
assert unit.change.submitted_by == system
assert unit.change.submitted_on >= unit.creation_time
assert unit.change.submitted_by == system
assert (
unit.submission_set.last().type
== SubmissionTypes.SYSTEM)
assert unit.revision > original_revision
new_subs = unit.submission_set.filter(id__gt=last_sub_pk).order_by("id")
# there should be 2 new subs - state_change and target_change
new_subs = unit.submission_set.filter(id__gt=last_sub_pk).order_by("id")
assert new_subs.count() == 2
target_sub = new_subs[0]
assert target_sub.old_value == ""
assert target_sub.new_value == unit.target
assert target_sub.field == SubmissionFields.TARGET
assert target_sub.type == SubmissionTypes.SYSTEM
assert target_sub.submitter == system
assert target_sub.revision == unit.revision
assert target_sub.creation_time == unit.change.submitted_on
state_sub = new_subs[1]
assert state_sub.old_value == "0"
assert state_sub.new_value == "200"
assert state_sub.field == SubmissionFields.STATE
assert state_sub.type == SubmissionTypes.SYSTEM
assert state_sub.submitter == system
assert state_sub.revision == unit.revision
assert state_sub.creation_time == unit.change.submitted_on
@pytest.mark.django_db
def test_update_upload_member_user(store0, system, member):
store0.state = PARSED
original_unit = store0.units.first()
original_revision = original_unit.revision
last_sub_pk = original_unit.submission_set.order_by(
"id").values_list("id", flat=True).last() or 0
update_store(
store0,
[(original_unit.source, "%s UPDATED" % original_unit.source, False)],
user=member,
store_revision=Revision.get() + 1,
submission_type=SubmissionTypes.UPLOAD)
unit = store0.units[0]
assert unit.change.submitted_by == member
assert unit.change.changed_with == SubmissionTypes.UPLOAD
assert unit.change.submitted_on >= unit.creation_time
assert unit.change.reviewed_on is None
assert unit.revision > original_revision
unit_source = unit.unit_source
unit_source.created_by == system
unit_source.created_with == SubmissionTypes.SYSTEM
# there should be 2 new subs - state_change and target_change
new_subs = unit.submission_set.filter(id__gt=last_sub_pk).order_by("id")
assert new_subs.count() == 2
target_sub = new_subs[0]
assert target_sub.old_value == ""
assert target_sub.new_value == unit.target
assert target_sub.field == SubmissionFields.TARGET
assert target_sub.type == SubmissionTypes.UPLOAD
assert target_sub.submitter == member
assert target_sub.revision == unit.revision
assert target_sub.creation_time == unit.change.submitted_on
state_sub = new_subs[1]
assert state_sub.old_value == "0"
assert state_sub.new_value == "200"
assert state_sub.field == SubmissionFields.STATE
assert state_sub.type == SubmissionTypes.UPLOAD
assert state_sub.submitter == member
assert state_sub.revision == unit.revision
assert state_sub.creation_time == unit.change.submitted_on
@pytest.mark.django_db
def test_update_upload_submission_type(store0):
store0.state = PARSED
unit = store0.units.first()
last_sub_pk = unit.submission_set.order_by(
"id").values_list("id", flat=True).last() or 0
update_store(
store0,
[(unit.source, "%s UPDATED" % unit.source, False)],
submission_type=SubmissionTypes.UPLOAD,
store_revision=Revision.get() + 1)
unit_source = store0.units[0].unit_source
unit_change = store0.units[0].change
assert unit_source.created_with == SubmissionTypes.SYSTEM
assert unit_change.changed_with == SubmissionTypes.UPLOAD
# there should be 2 new subs - state_change and target_change
# and both should show as by UPLOAD
new_subs = unit.submission_set.filter(id__gt=last_sub_pk)
assert (
list(new_subs.values_list("type", flat=True))
== [SubmissionTypes.UPLOAD] * 2)
@pytest.mark.django_db
def test_update_upload_new_revision(store0, member):
original_revision = store0.data.max_unit_revision
old_unit = store0.units.first()
update_store(
store0,
[("Hello, world", "Hello, world UPDATED", False)],
submission_type=SubmissionTypes.UPLOAD,
store_revision=Revision.get() + 1,
user=member)
old_unit.refresh_from_db()
assert old_unit.state == OBSOLETE
assert len(store0.units) == 1
unit = store0.units[0]
unit_source = unit.unit_source
assert unit.revision > original_revision
assert unit_source.created_by == member
assert unit.change.submitted_by == member
assert unit.creation_time == unit.change.submitted_on
assert unit.change.reviewed_by is None
assert unit.change.reviewed_on is None
assert unit.target == "Hello, world UPDATED"
assert unit.submission_set.count() == 0
@pytest.mark.django_db
def test_update_upload_again_new_revision(store0, member, member2):
store = store0
assert store.state == NEW
original_unit = store0.units[0]
update_store(
store,
[("Hello, world", "Hello, world UPDATED", False)],
submission_type=SubmissionTypes.UPLOAD,
store_revision=Revision.get() + 1,
user=member)
original_unit.refresh_from_db()
assert original_unit.state == OBSOLETE
store = Store.objects.get(pk=store0.pk)
assert store.state == PARSED
created_unit = store.units[0]
assert created_unit.target == "Hello, world UPDATED"
assert created_unit.state == TRANSLATED
assert created_unit.submission_set.count() == 0
old_unit_revision = store.data.max_unit_revision
update_store(
store0,
[("Hello, world", "Hello, world UPDATED AGAIN", False)],
submission_type=SubmissionTypes.WEB,
user=member2,
store_revision=Revision.get() + 1)
assert created_unit.submission_set.count() == 1
update_sub = created_unit.submission_set.first()
store = Store.objects.get(pk=store0.pk)
assert store.state == PARSED
unit = store.units[0]
unit_source = unit.unit_source
assert unit.revision > old_unit_revision
assert unit.target == "Hello, world UPDATED AGAIN"
assert unit_source.created_by == member
assert unit_source.created_with == SubmissionTypes.UPLOAD
assert unit.change.submitted_by == member2
assert unit.change.submitted_on >= unit.creation_time
assert unit.change.reviewed_by is None
assert unit.change.reviewed_on is None
assert unit.change.changed_with == SubmissionTypes.WEB
assert update_sub.creation_time == unit.change.submitted_on
assert update_sub.type == unit.change.changed_with
assert update_sub.field == SubmissionFields.TARGET
assert update_sub.submitter == unit.change.submitted_by
assert update_sub.old_value == created_unit.target
assert update_sub.new_value == unit.target
assert update_sub.revision == unit.revision
@pytest.mark.django_db
def test_update_upload_old_revision_unit_conflict(store0, admin, member):
original_revision = Revision.get()
original_unit = store0.units[0]
update_store(
store0,
[("Hello, world", "Hello, world UPDATED", False)],
submission_type=SubmissionTypes.UPLOAD,
store_revision=original_revision + 1,
user=admin)
unit = store0.units[0]
unit_source = unit.unit_source
assert unit_source.created_by == admin
updated_revision = unit.revision
assert (
unit_source.created_with
== SubmissionTypes.UPLOAD)
assert unit.change.submitted_by == admin
assert (
unit.change.changed_with
== SubmissionTypes.UPLOAD)
last_submit_time = unit.change.submitted_on
assert last_submit_time >= unit.creation_time
# load update with expired revision and conflicting unit
update_store(
store0,
[("Hello, world", "Hello, world CONFLICT", False)],
submission_type=SubmissionTypes.WEB,
store_revision=original_revision,
user=member)
unit = store0.units[0]
assert unit.submission_set.count() == 0
unit_source = unit.unit_source
# unit target is not updated and revision remains the same
assert store0.units[0].target == "Hello, world UPDATED"
assert unit.revision == updated_revision
unit_source = original_unit.unit_source
unit_source.created_by == admin
assert unit_source.created_with == SubmissionTypes.SYSTEM
unit.change.changed_with == SubmissionTypes.UPLOAD
unit.change.submitted_by == admin
unit.change.submitted_on == last_submit_time
unit.change.reviewed_by is None
unit.change.reviewed_on is None
# but suggestion is added
suggestion = store0.units[0].get_suggestions()[0]
assert suggestion.target == "Hello, world CONFLICT"
assert suggestion.user == member
@pytest.mark.django_db
def test_update_upload_new_revision_new_unit(store0, member):
file_name = "pytest_pootle/data/po/tutorial/en/tutorial_update_new_unit.po"
store0.state = PARSED
old_unit_revision = store0.data.max_unit_revision
_update_from_upload_file(
store0,
file_name,
user=member,
submission_type=SubmissionTypes.WEB)
unit = store0.units.last()
unit_source = unit.unit_source
# the new unit has been added
assert unit.submission_set.count() == 0
assert unit.revision > old_unit_revision
assert unit.target == 'Goodbye, world'
assert unit_source.created_by == member
assert unit_source.created_with == SubmissionTypes.WEB
assert unit.change.submitted_by == member
assert unit.change.changed_with == SubmissionTypes.WEB
@pytest.mark.django_db
def test_update_upload_old_revision_new_unit(store0, member2):
store0.units.delete()
store0.state = PARSED
old_unit_revision = store0.data.max_unit_revision
# load initial update
_update_from_upload_file(
store0,
"pytest_pootle/data/po/tutorial/en/tutorial_update.po")
# load old revision with new unit
file_name = "pytest_pootle/data/po/tutorial/en/tutorial_update_old_unit.po"
_update_from_upload_file(
store0,
file_name,
user=member2,
submission_type=SubmissionTypes.WEB)
# the unit has been added because its not already obsoleted
assert store0.units.count() == 2
unit = store0.units.last()
unit_source = unit.unit_source
# the new unit has been added
assert unit.submission_set.count() == 0
assert unit.revision > old_unit_revision
assert unit.target == 'Goodbye, world'
assert unit_source.created_by == member2
assert unit_source.created_with == SubmissionTypes.WEB
assert unit.change.submitted_by == member2
assert unit.change.changed_with == SubmissionTypes.WEB
def _test_store_update_indexes(store, *test_args):
# make sure indexes are not fooed indexes only have to be unique
indexes = [x.index for x in store.units]
assert len(indexes) == len(set(indexes))
def _test_store_update_units_before(*test_args):
# test what has happened to the units that were present before the update
(store, units_update, store_revision, resolve_conflict,
units_before, member_, member2) = test_args
updates = {unit[0]: unit[1] for unit in units_update}
for unit, change in units_before:
updated_unit = store.unit_set.get(unitid=unit.unitid)
if unit.source not in updates:
# unit is not in update, target should be left unchanged
assert updated_unit.target == unit.target
assert updated_unit.change.submitted_by == change.submitted_by
# depending on unit/store_revision should be obsoleted
if unit.isobsolete() or store_revision >= unit.revision:
assert updated_unit.isobsolete()
else:
assert not updated_unit.isobsolete()
else:
# unit is in update
if store_revision >= unit.revision:
assert not updated_unit.isobsolete()
elif unit.isobsolete():
# the unit has been obsoleted since store_revision
assert updated_unit.isobsolete()
else:
assert not updated_unit.isobsolete()
if not updated_unit.isobsolete():
if store_revision >= unit.revision:
# file store wins outright
assert updated_unit.target == updates[unit.source]
if unit.target != updates[unit.source]:
# unit has changed, or was resurrected
assert updated_unit.change.submitted_by == member2
# damn mysql microsecond precision
if change.submitted_on.time().microsecond != 0:
assert (
updated_unit.change.submitted_on
!= change.submitted_on)
elif unit.isobsolete():
# unit has changed, or was resurrected
assert updated_unit.change.reviewed_by == member2
# damn mysql microsecond precision
if change.reviewed_on.time().microsecond != 0:
assert (
updated_unit.change.reviewed_on
!= change.reviewed_on)
else:
assert (
updated_unit.change.submitted_by
== change.submitted_by)
assert (
updated_unit.change.submitted_on
== change.submitted_on)
assert updated_unit.get_suggestions().count() == 0
else:
# conflict found
suggestion = updated_unit.get_suggestions()[0]
if resolve_conflict == POOTLE_WINS:
assert updated_unit.target == unit.target
assert (
updated_unit.change.submitted_by
== change.submitted_by)
assert suggestion.target == updates[unit.source]
assert suggestion.user == member2
else:
assert updated_unit.target == updates[unit.source]
assert updated_unit.change.submitted_by == member2
assert suggestion.target == unit.target
assert suggestion.user == change.submitted_by
def _test_store_update_ordering(*test_args):
(store, units_update, store_revision, resolve_conflict_,
units_before, member_, member2_) = test_args
updates = {unit[0]: unit[1] for unit in units_update}
old_units = {unit.source: unit for unit, change in units_before}
# test ordering
new_unit_list = []
for unit, change_ in units_before:
add_unit = (not unit.isobsolete()
and unit.source not in updates
and unit.revision > store_revision)
if add_unit:
new_unit_list.append(unit.source)
for source, target_, is_fuzzy_ in units_update:
if source in old_units:
old_unit = old_units[source]
should_add = (not old_unit.isobsolete()
or old_unit.revision <= store_revision)
if should_add:
new_unit_list.append(source)
else:
new_unit_list.append(source)
assert new_unit_list == [x.source for x in store.units]
def _test_store_update_units_now(*test_args):
(store, units_update, store_revision, resolve_conflict_,
units_before, member_, member2_) = test_args
# test that all the current units should be there
updates = {unit[0]: unit[1] for unit in units_update}
old_units = {unit.source: unit for unit, change in units_before}
for unit in store.units:
assert (
unit.source in updates
or (old_units[unit.source].revision > store_revision
and not old_units[unit.source].isobsolete()))
@pytest.mark.django_db
def test_store_update(param_update_store_test):
_test_store_update_indexes(*param_update_store_test)
_test_store_update_units_before(*param_update_store_test)
_test_store_update_units_now(*param_update_store_test)
_test_store_update_ordering(*param_update_store_test)
@pytest.mark.django_db
def test_store_file_diff(store_diff_tests):
diff, store, update_units, store_revision = store_diff_tests
assert diff.target_store == store
assert diff.source_revision == store_revision
assert (
update_units
== [(x.source, x.target, x.isfuzzy())
for x in diff.source_store.units[1:]]
== [(v['source'], v['target'], v['state'] == 50)
for v in diff.source_units.values()])
assert diff.active_target_units == [x.source for x in store.units]
assert diff.target_revision == store.get_max_unit_revision()
assert (
diff.target_units
== {unit["source_f"]: unit
for unit
in store.unit_set.values("source_f", "index", "target_f",
"state", "unitid", "id", "revision",
"developer_comment", "translator_comment",
"locations", "context")})
diff_diff = diff.diff()
if diff_diff is not None:
assert (
sorted(diff_diff.keys())
== ["add", "index", "obsolete", "update"])
# obsoleted units have no index - so just check they are all they match
obsoleted = (store.unit_set.filter(state=OBSOLETE)
.filter(revision__gt=store_revision)
.values_list("source_f", flat=True))
assert len(diff.obsoleted_target_units) == obsoleted.count()
assert all(x in diff.obsoleted_target_units for x in obsoleted)
assert (
diff.updated_target_units
== list(store.units.filter(revision__gt=store_revision)
.values_list("source_f", flat=True)))
@pytest.mark.django_db
def test_store_repr():
store = Store.objects.first()
assert str(store) == str(store.syncer.convert(store.syncer.file_class))
assert repr(store) == u"<Store: %s>" % store.pootle_path
@pytest.mark.django_db
def test_store_po_deserializer(test_fs, store_po):
with test_fs.open("data/po/complex.po") as test_file:
test_string = test_file.read()
ttk_po = getclass(test_file)(test_string)
store_po.update(store_po.deserialize(test_string))
assert len(ttk_po.units) - 1 == store_po.units.count()
@pytest.mark.django_db
def test_store_po_serializer(test_fs, store_po):
with test_fs.open("data/po/complex.po") as test_file:
test_string = test_file.read()
ttk_po = getclass(test_file)(test_string)
store_po.update(store_po.deserialize(test_string))
store_io = io.BytesIO(store_po.serialize())
store_ttk = getclass(store_io)(store_io.read())
assert len(store_ttk.units) == len(ttk_po.units)
@pytest.mark.django_db
def test_store_po_serializer_custom(test_fs, store_po):
class SerializerCheck(object):
original_data = None
context = None
checker = SerializerCheck()
class EGSerializer(Serializer):
@property
def output(self):
checker.original_data = self.original_data
checker.context = self.context
@provider(serializers, sender=Project)
def provide_serializers(**kwargs):
return dict(eg_serializer=EGSerializer)
with test_fs.open("data/po/complex.po") as test_file:
test_string = test_file.read()
# ttk_po = getclass(test_file)(test_string)
store_po.update(store_po.deserialize(test_string))
# add config to the project
project = store_po.translation_project.project
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
["eg_serializer"])
store_po.serialize()
assert checker.context == store_po
assert (
not isinstance(checker.original_data, six.text_type)
and isinstance(checker.original_data, str))
assert checker.original_data == _store_as_string(store_po)
@pytest.mark.django_db
def test_store_po_deserializer_custom(test_fs, store_po):
class DeserializerCheck(object):
original_data = None
context = None
checker = DeserializerCheck()
class EGDeserializer(Deserializer):
@property
def output(self):
checker.context = self.context
checker.original_data = self.original_data
return self.original_data
@provider(deserializers, sender=Project)
def provide_deserializers(**kwargs):
return dict(eg_deserializer=EGDeserializer)
with test_fs.open("data/po/complex.po") as test_file:
test_string = test_file.read()
# add config to the project
project = store_po.translation_project.project
config.get().set_config(
"pootle.core.deserializers",
["eg_deserializer"],
project)
store_po.deserialize(test_string)
assert checker.original_data == test_string
assert checker.context == store_po
@pytest.mark.django_db
def test_store_base_serializer(store_po):
original_data = "SOME DATA"
serializer = Serializer(store_po, original_data)
assert serializer.context == store_po
assert serializer.data == original_data
@pytest.mark.django_db
def test_store_base_deserializer(store_po):
original_data = "SOME DATA"
deserializer = Deserializer(store_po, original_data)
assert deserializer.context == store_po
assert deserializer.data == original_data
@pytest.mark.django_db
def test_store_set_bad_deserializers(store_po):
project = store_po.translation_project.project
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.deserializers",
["DESERIALIZER_DOES_NOT_EXIST"])
class EGDeserializer(object):
pass
@provider(deserializers)
def provide_deserializers(**kwargs):
return dict(eg_deserializer=EGDeserializer)
# must be list
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.deserializers",
"eg_deserializer")
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.deserializers",
dict(serializer="eg_deserializer"))
config.get(project.__class__, instance=project).set_config(
"pootle.core.deserializers",
["eg_deserializer"])
@pytest.mark.django_db
def test_store_set_bad_serializers(store_po):
project = store_po.translation_project.project
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
["SERIALIZER_DOES_NOT_EXIST"])
class EGSerializer(Serializer):
pass
@provider(serializers)
def provide_serializers(**kwargs):
return dict(eg_serializer=EGSerializer)
# must be list
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
"eg_serializer")
with pytest.raises(ConfigurationError):
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
dict(serializer="eg_serializer"))
config.get(project.__class__, instance=project).set_config(
"pootle.core.serializers",
["eg_serializer"])
@pytest.mark.django_db
def test_store_create_by_bad_path(project0):
# bad project name
with pytest.raises(Project.DoesNotExist):
Store.objects.create_by_path(
"/language0/does/not/exist.po")
# bad language code
with pytest.raises(Language.DoesNotExist):
Store.objects.create_by_path(
"/does/project0/not/exist.po")
# project and project code dont match
with pytest.raises(ValueError):
Store.objects.create_by_path(
"/language0/project1/store.po",
project=project0)
# bad store.ext
with pytest.raises(ValueError):
Store.objects.create_by_path(
"/language0/project0/store_by_path.foo")
# subdir doesnt exist
path = '/language0/project0/path/to/subdir.po'
with pytest.raises(Directory.DoesNotExist):
Store.objects.create_by_path(
path, create_directory=False)
path = '/%s/project0/notp.po' % LanguageDBFactory().code
with pytest.raises(TranslationProject.DoesNotExist):
Store.objects.create_by_path(
path, create_tp=False)
@pytest.mark.django_db
def test_store_create_by_path(po_directory):
# create in tp
path = '/language0/project0/path.po'
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
# "create" in tp again - get existing store
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
# create in existing subdir
path = '/language0/project0/subdir0/exists.po'
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
# create in new subdir
path = '/language0/project0/path/to/subdir.po'
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
@pytest.mark.django_db
def test_store_create_by_path_with_project(project0):
# create in tp with project
path = '/language0/project0/path2.po'
store = Store.objects.create_by_path(
path, project=project0)
assert store.pootle_path == path
# create in existing subdir with project
path = '/language0/project0/subdir0/exists2.po'
store = Store.objects.create_by_path(
path, project=project0)
assert store.pootle_path == path
# create in new subdir with project
path = '/language0/project0/path/to/subdir2.po'
store = Store.objects.create_by_path(
path, project=project0)
assert store.pootle_path == path
@pytest.mark.django_db
def test_store_create_by_new_tp_path(po_directory):
language = LanguageDBFactory()
path = '/%s/project0/tp.po' % language.code
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
assert store.translation_project.language == language
language = LanguageDBFactory()
path = '/%s/project0/with/subdir/tp.po' % language.code
store = Store.objects.create_by_path(path)
assert store.pootle_path == path
assert store.translation_project.language == language
@pytest.mark.django_db
def test_store_create(tp0):
tp = tp0
project = tp.project
registry = formats.get()
po = Format.objects.get(name="po")
po2 = registry.register("special_po_2", "po")
po3 = registry.register("special_po_3", "po")
xliff = Format.objects.get(name="xliff")
project.filetypes.add(xliff)
project.filetypes.add(po2)
project.filetypes.add(po3)
store = Store.objects.create(
name="store.po",
parent=tp.directory,
translation_project=tp)
assert store.filetype == po
assert not store.is_template
store = Store.objects.create(
name="store.pot",
parent=tp.directory,
translation_project=tp)
# not in source_language folder
assert not store.is_template
assert store.filetype == po
store = Store.objects.create(
name="store.xliff",
parent=tp.directory,
translation_project=tp)
assert store.filetype == xliff
# push po to the back of the queue
project.filetypes.remove(po)
project.filetypes.add(po)
store = Store.objects.create(
name="another_store.po",
parent=tp.directory,
translation_project=tp)
assert store.filetype == po2
store = Store.objects.create(
name="another_store.pot",
parent=tp.directory,
translation_project=tp)
assert store.filetype == po
store = Store.objects.create(
name="another_store.xliff",
parent=tp.directory,
translation_project=tp)
with pytest.raises(UnrecognizedFiletype):
store = Store.objects.create(
name="another_store.foo",
parent=tp.directory,
translation_project=tp)
@pytest.mark.django_db
def test_store_create_name_with_slashes_or_backslashes(tp0):
"""Test Stores are not created with (back)slashes on their name."""
with pytest.raises(ValidationError):
Store.objects.create(name="slashed/name.po", parent=tp0.directory,
translation_project=tp0)
with pytest.raises(ValidationError):
Store.objects.create(name="backslashed\\name.po", parent=tp0.directory,
translation_project=tp0)
@pytest.mark.django_db
def test_store_get_file_class():
store = Store.objects.filter(
translation_project__project__code="project0",
translation_project__language__code="language0").first()
# this matches because po is recognised by ttk
assert store.syncer.file_class == getclass(store)
# file_class is cached so lets delete it
del store.syncer.__dict__["file_class"]
class CustomFormatClass(object):
pass
@provider(format_classes)
def format_class_provider(**kwargs):
return dict(po=CustomFormatClass)
# we get the CutomFormatClass as it was registered
assert store.syncer.file_class is CustomFormatClass
# the Store.filetype is used in this case not the name
store.name = "new_store_name.foo"
del store.syncer.__dict__["file_class"]
assert store.syncer.file_class is CustomFormatClass
# lets register a foo filetype
format_registry = formats.get()
foo_filetype = format_registry.register("foo", "foo")
store.filetype = foo_filetype
store.save()
# oh no! not recognised by ttk
del store.syncer.__dict__["file_class"]
with pytest.raises(ValueError):
store.syncer.file_class
@provider(format_classes)
def another_format_class_provider(**kwargs):
return dict(foo=CustomFormatClass)
# works now
assert store.syncer.file_class is CustomFormatClass
format_classes.disconnect(format_class_provider)
format_classes.disconnect(another_format_class_provider)
@pytest.mark.django_db
def test_store_get_template_file_class(po_directory, templates):
project = ProjectDBFactory(source_language=templates)
tp = TranslationProjectFactory(language=templates, project=project)
format_registry = formats.get()
foo_filetype = format_registry.register("foo", "foo", template_extension="bar")
tp.project.filetypes.add(foo_filetype)
store = Store.objects.create(
name="mystore.bar",
translation_project=tp,
parent=tp.directory)
# oh no! not recognised by ttk
with pytest.raises(ValueError):
store.syncer.file_class
class CustomFormatClass(object):
pass
@provider(format_classes)
def format_class_provider(**kwargs):
return dict(foo=CustomFormatClass)
assert store.syncer.file_class == CustomFormatClass
format_classes.disconnect(format_class_provider)
@pytest.mark.django_db
def test_store_create_templates(po_directory, templates):
project = ProjectDBFactory(source_language=templates)
tp = TranslationProjectFactory(language=templates, project=project)
po = Format.objects.get(name="po")
store = Store.objects.create(
name="mystore.pot",
translation_project=tp,
parent=tp.directory)
assert store.filetype == po
assert store.is_template
@pytest.mark.django_db
def test_store_get_or_create_templates(po_directory, templates):
project = ProjectDBFactory(source_language=templates)
tp = TranslationProjectFactory(language=templates, project=project)
po = Format.objects.get(name="po")
store = Store.objects.get_or_create(
name="mystore.pot",
translation_project=tp,
parent=tp.directory)[0]
assert store.filetype == po
assert store.is_template
@pytest.mark.django_db
def test_store_diff(diffable_stores):
target_store, source_store = diffable_stores
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
# no changes
assert not differ.diff()
assert differ.target_store == target_store
assert differ.source_store == source_store
@pytest.mark.django_db
def test_store_diff_delete_target_unit(diffable_stores):
target_store, source_store = diffable_stores
# delete a unit in the target store
remove_unit = target_store.units.first()
remove_unit.delete()
# the unit will always be re-added (as its not obsolete)
# with source_revision to the max
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision())
result = differ.diff()
assert result["add"][0][0].source_f == remove_unit.source_f
assert len(result["add"]) == 1
assert len(result["index"]) == 0
assert len(result["obsolete"]) == 0
assert result['update'] == (set(), {})
# and source_revision to 0
differ = StoreDiff(
target_store,
source_store,
0)
result = differ.diff()
assert result["add"][0][0].source_f == remove_unit.source_f
assert len(result["add"]) == 1
assert len(result["index"]) == 0
assert len(result["obsolete"]) == 0
assert result['update'] == (set(), {})
@pytest.mark.django_db
def test_store_diff_delete_source_unit(diffable_stores):
target_store, source_store = diffable_stores
# delete a unit in the source store
remove_unit = source_store.units.first()
remove_unit.delete()
# set the source_revision to max and the unit will be obsoleted
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision())
result = differ.diff()
to_remove = target_store.units.get(unitid=remove_unit.unitid)
assert result["obsolete"] == [to_remove.pk]
assert len(result["obsolete"]) == 1
assert len(result["add"]) == 0
assert len(result["index"]) == 0
# set the source_revision to less that than the target_stores' max_revision
# and the unit will be ignored, as its assumed to have been previously
# deleted
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() - 1)
assert not differ.diff()
@pytest.mark.django_db
def test_store_diff_delete_obsoleted_target_unit(diffable_stores):
target_store, source_store = diffable_stores
# delete a unit in the source store
remove_unit = source_store.units.first()
remove_unit.delete()
# and obsolete the same unit in the target
obsolete_unit = target_store.units.get(unitid=remove_unit.unitid)
obsolete_unit.makeobsolete()
obsolete_unit.save()
# as the unit is already obsolete - nothing
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
assert not differ.diff()
@pytest.mark.django_db
def test_store_diff_obsoleted_target_unit(diffable_stores):
target_store, source_store = diffable_stores
# obsolete a unit in target
obsolete_unit = target_store.units.first()
obsolete_unit.makeobsolete()
obsolete_unit.save()
# as the revision is higher it gets unobsoleted
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
result = differ.diff()
assert result["update"][0] == set([obsolete_unit.pk])
assert len(result["update"][1]) == 1
assert result["update"][1][obsolete_unit.unitid]["dbid"] == obsolete_unit.pk
# if the revision is less - no change
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() - 1)
assert not differ.diff()
@pytest.mark.django_db
def test_store_diff_update_target_unit(diffable_stores):
target_store, source_store = diffable_stores
# update a unit in target
update_unit = target_store.units.first()
update_unit.target_f = "Some other string"
update_unit.save()
# the unit is always marked for update
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
result = differ.diff()
assert result["update"][0] == set([update_unit.pk])
assert result["update"][1] == {}
assert len(result["add"]) == 0
assert len(result["index"]) == 0
differ = StoreDiff(
target_store,
source_store,
0)
result = differ.diff()
assert result["update"][0] == set([update_unit.pk])
assert result["update"][1] == {}
assert len(result["add"]) == 0
assert len(result["index"]) == 0
@pytest.mark.django_db
def test_store_diff_update_source_unit(diffable_stores):
target_store, source_store = diffable_stores
# update a unit in source
update_unit = source_store.units.first()
update_unit.target_f = "Some other string"
update_unit.save()
target_unit = target_store.units.get(
unitid=update_unit.unitid)
# the unit is always marked for update
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
result = differ.diff()
assert result["update"][0] == set([target_unit.pk])
assert result["update"][1] == {}
assert len(result["add"]) == 0
assert len(result["index"]) == 0
differ = StoreDiff(
target_store,
source_store,
0)
result = differ.diff()
assert result["update"][0] == set([target_unit.pk])
assert result["update"][1] == {}
assert len(result["add"]) == 0
assert len(result["index"]) == 0
@pytest.mark.django_db
def test_store_diff_custom(diffable_stores):
target_store, source_store = diffable_stores
class CustomDiffableStore(DiffableStore):
pass
@provider(format_diffs)
def format_diff_provider(**kwargs):
return {
target_store.filetype.name: CustomDiffableStore}
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
assert isinstance(
differ.diffable, CustomDiffableStore)
@pytest.mark.django_db
def test_store_diff_delete_obsoleted_source_unit(diffable_stores):
target_store, source_store = diffable_stores
# delete a unit in the target store
remove_unit = target_store.units.first()
remove_unit.delete()
# and obsolete the same unit in the target
obsolete_unit = source_store.units.get(unitid=remove_unit.unitid)
obsolete_unit.makeobsolete()
obsolete_unit.save()
# as the unit is already obsolete - nothing
differ = StoreDiff(
target_store,
source_store,
target_store.get_max_unit_revision() + 1)
assert not differ.diff()
@pytest.mark.django_db
def test_store_syncer(tp0):
store = tp0.stores.live().first()
assert isinstance(store.syncer, PoStoreSyncer)
assert store.syncer.file_class == getclass(store)
assert store.syncer.translation_project == store.translation_project
assert (
store.syncer.language
== store.translation_project.language)
assert (
store.syncer.project
== store.translation_project.project)
assert (
store.syncer.source_language
== store.translation_project.project.source_language)
@pytest.mark.django_db
def test_store_syncer_obsolete_unit(tp0):
store = tp0.stores.live().first()
unit = store.units.filter(state=TRANSLATED).first()
unit_syncer = store.syncer.unit_sync_class(unit)
newunit = unit_syncer.create_unit(store.syncer.file_class.UnitClass)
# unit is untranslated, its always just deleted
obsolete, deleted = store.syncer.obsolete_unit(newunit, True)
assert not obsolete
assert deleted
obsolete, deleted = store.syncer.obsolete_unit(newunit, False)
assert not obsolete
assert deleted
# set unit to translated
newunit.target = unit.target
# if conservative, nothings changed
obsolete, deleted = store.syncer.obsolete_unit(newunit, True)
assert not obsolete
assert not deleted
# not conservative and the unit is deleted
obsolete, deleted = store.syncer.obsolete_unit(newunit, False)
assert obsolete
assert not deleted
@pytest.mark.django_db
def test_store_syncer_sync_store(tp0, dummy_store_syncer):
store = tp0.stores.live().first()
DummyStoreSyncer, __, expected = dummy_store_syncer
disk_store = store.syncer.convert()
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
update_structure=expected["update_structure"],
conservative=expected["conservative"])
assert result[0] is True
assert result[1]["updated"] == expected["changes"]
# conservative makes no diff here
expected["conservative"] = False
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
update_structure=expected["update_structure"],
conservative=expected["conservative"])
assert result[0] is True
assert result[1]["updated"] == expected["changes"]
@pytest.mark.django_db
def test_store_syncer_sync_store_no_changes(tp0, dummy_store_syncer):
store = tp0.stores.live().first()
DummyStoreSyncer, __, expected = dummy_store_syncer
disk_store = store.syncer.convert()
dummy_syncer = DummyStoreSyncer(store, expected=expected)
# no changes
expected["changes"] = []
expected["conservative"] = True
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
expected["update_structure"],
expected["conservative"])
assert result[0] is False
assert not result[1].get("updated")
# conservative makes no diff here
expected["conservative"] = False
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
expected["update_structure"],
expected["conservative"])
assert result[0] is False
assert not result[1].get("updated")
@pytest.mark.django_db
def test_store_syncer_sync_store_structure(tp0, dummy_store_syncer):
store = tp0.stores.live().first()
DummyStoreSyncer, DummyDiskStore, expected = dummy_store_syncer
disk_store = DummyDiskStore(expected)
expected["update_structure"] = True
expected["changes"] = []
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
expected["update_structure"],
expected["conservative"])
assert result[0] is True
assert result[1]["updated"] == []
assert result[1]["obsolete"] == 8
assert result[1]["deleted"] == 9
assert result[1]["added"] == 10
expected["obsolete_units"] = []
expected["new_units"] = []
expected["changes"] = []
dummy_syncer = DummyStoreSyncer(store, expected=expected)
result = dummy_syncer.sync(
disk_store,
expected["last_revision"],
expected["update_structure"],
expected["conservative"])
assert result[0] is False
@pytest.mark.django_db
def test_store_syncer_sync_update_structure(dummy_store_structure_syncer, tp0):
store = tp0.stores.live().first()
DummyStoreSyncer, DummyDiskStore, DummyUnit = dummy_store_structure_syncer
expected = dict(
unit_class="FOO",
conservative=True,
obsolete_delete=(True, True),
obsolete_units=["a", "b", "c"])
expected["new_units"] = [
DummyUnit(unit, expected=expected)
for unit in ["5", "6", "7"]]
syncer = DummyStoreSyncer(store, expected=expected)
disk_store = DummyDiskStore(expected)
result = syncer.update_structure(
disk_store,
expected["obsolete_units"],
expected["new_units"],
expected["conservative"])
obsolete_units = (
len(expected["obsolete_units"])
if expected["obsolete_delete"][0]
else 0)
deleted_units = (
len(expected["obsolete_units"])
if expected["obsolete_delete"][1]
else 0)
new_units = len(expected["new_units"])
assert result == (obsolete_units, deleted_units, new_units)
def _test_get_new(results, syncer, old_ids, new_ids):
assert list(results) == list(
syncer.store.findid_bulk(
[syncer.dbid_index.get(uid)
for uid
in new_ids - old_ids]))
def _test_get_obsolete(results, disk_store, syncer, old_ids, new_ids):
assert list(results) == list(
disk_store.findid(uid)
for uid
in old_ids - new_ids
if (disk_store.findid(uid)
and not disk_store.findid(uid).isobsolete()))
@pytest.mark.django_db
def test_store_syncer_obsolete_units(dummy_store_syncer_units, tp0):
store = tp0.stores.live().first()
disk_store = store.syncer.convert()
expected = dict(
old_ids=set(),
new_ids=set(),
disk_ids={})
syncer = dummy_store_syncer_units(store, expected=expected)
results = syncer.get_units_to_obsolete(
disk_store, expected["old_ids"], expected["new_ids"])
_test_get_obsolete(
results, disk_store, syncer,
expected["old_ids"], expected["new_ids"])
expected = dict(
old_ids=set(["2", "3", "4"]),
new_ids=set(["3", "4", "5"]),
disk_ids={"3": "foo", "4": "bar", "5": "baz"})
results = syncer.get_units_to_obsolete(
disk_store, expected["old_ids"], expected["new_ids"])
_test_get_obsolete(
results, disk_store, syncer, expected["old_ids"], expected["new_ids"])
@pytest.mark.django_db
def test_store_syncer_new_units(dummy_store_syncer_units, tp0):
store = tp0.stores.live().first()
expected = dict(
old_ids=set(),
new_ids=set(),
disk_ids={},
db_ids={})
syncer = dummy_store_syncer_units(store, expected=expected)
results = syncer.get_new_units(
expected["old_ids"], expected["new_ids"])
_test_get_new(
results, syncer, expected["old_ids"], expected["new_ids"])
expected = dict(
old_ids=set(["2", "3", "4"]),
new_ids=set(["3", "4", "5"]),
db_ids={"3": "foo", "4": "bar", "5": "baz"})
syncer = dummy_store_syncer_units(store, expected=expected)
results = syncer.get_new_units(
expected["old_ids"], expected["new_ids"])
_test_get_new(
results, syncer, expected["old_ids"], expected["new_ids"])
@pytest.mark.django_db
def test_store_path(store0):
assert store0.path == to_tp_relative_path(store0.pootle_path)
@pytest.mark.django_db
def test_store_sync_empty(project0_nongnu, tp0, caplog):
store = StoreDBFactory(
name="empty.po",
translation_project=tp0,
parent=tp0.directory)
store.sync()
assert os.path.exists(store.file.path)
modified = os.stat(store.file.path).st_mtime
store.sync()
assert modified == os.stat(store.file.path).st_mtime
# warning message - nothing changes
store.sync(conservative=True, only_newer=False)
assert "nothing changed" in caplog.records[-1].message
assert modified == os.stat(store.file.path).st_mtime
@pytest.mark.django_db
def test_store_sync_template(project0_nongnu, templates_project0, caplog):
template = templates_project0.stores.first()
template.sync()
modified = os.stat(template.file.path).st_mtime
unit = template.units.first()
unit.target = "NEW TARGET"
unit.save()
template.sync(conservative=True)
assert modified == os.stat(template.file.path).st_mtime
template.sync(conservative=False)
assert not modified == os.stat(template.file.path).st_mtime
@pytest.mark.django_db
def test_store_update_with_state_change(store0, admin):
units = dict([(x.id, (x.source, x.target, not x.isfuzzy()))
for x in store0.units])
update_store(
store0,
units=units.values(),
store_revision=store0.data.max_unit_revision,
user=admin)
for unit_id, unit in units.items():
assert unit[2] == store0.units.get(id=unit_id).isfuzzy()
@pytest.mark.django_db
def test_update_xliff(store_po, test_fs, xliff):
project = store_po.translation_project.project
filetype_tool = project.filetype_tool
project.filetypes.add(xliff)
filetype_tool.set_store_filetype(store_po, xliff)
with test_fs.open(['data', 'xliff', 'welcome.xliff']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
unit = store_po.units[0]
assert unit.istranslated()
with test_fs.open(['data', 'xliff', 'updated_welcome.xliff']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
updated_unit = store_po.units.get(id=unit.id)
assert unit.source != updated_unit.source
@pytest.mark.django_db
def test_update_resurrect(store_po, test_fs):
with test_fs.open(['data', 'po', 'obsolete.po']) as f:
file_store = getclass(f)(f.read())
store_po.update(file_store)
obsolete_units = store_po.unit_set.filter(state=OBSOLETE)
obsolete_ids = list(obsolete_units.values_list('id', flat=True))
assert len(obsolete_ids) > 0
with test_fs.open(['data', 'po', 'resurrected.po']) as f:
file_store = getclass(f)(f.read())
store_revision = store_po.data.max_unit_revision
# set store_revision as we do in update_stores cli command
store_po.update(file_store, store_revision=store_revision - 1)
obsolete_units = store_po.unit_set.filter(state=OBSOLETE)
assert obsolete_units.count() == len(obsolete_ids)
for unit in obsolete_units.filter(id__in=obsolete_ids):
assert unit.isobsolete()
# set store_revision as we do in update_stores cli command
store_po.update(file_store, store_revision=store_revision)
units = store_po.units.filter(id__in=obsolete_ids)
assert units.count() == len(obsolete_ids)
for unit in units:
assert not unit.isobsolete()
@pytest.mark.django_db
def test_store_comment_update(store0, member):
ttk = store0.deserialize(store0.serialize())
fileunit = ttk.units[-1]
fileunit.removenotes()
fileunit.addnote("A new comment")
unit = store0.findid(fileunit.getid())
last_sub_pk = unit.submission_set.order_by(
"id").values_list("id", flat=True).last() or 0
store0.update(
ttk, store_revision=store0.data.max_unit_revision + 1,
user=member
)
assert ttk.units[-1].getnotes("translator") == "A new comment"
unit = store0.units.get(id=unit.id)
assert unit.translator_comment == "A new comment"
assert unit.change.commented_by == member
new_subs = unit.submission_set.filter(id__gt=last_sub_pk).order_by("id")
assert new_subs.count() == 1
comment_sub = new_subs[0]
assert comment_sub.old_value == ""
assert comment_sub.new_value == "A new comment"
assert comment_sub.field == SubmissionFields.COMMENT
assert comment_sub.type == SubmissionTypes.SYSTEM
assert comment_sub.submitter == member
assert comment_sub.revision == unit.revision
assert comment_sub.creation_time == unit.change.commented_on
| ta2-1/pootle | tests/models/store.py | Python | gpl-3.0 | 64,439 |
import pilas
import json
from pilas.escena import Base
from general import General
from individual import Individual
class jugadores(Base):
def __init__(self):
Base.__init__(self)
def fondo(self):
pilas.fondos.Fondo("data/img/fondos/aplicacion.jpg")
def general(self):
self.sonido_boton.reproducir()
pilas.almacenar_escena(General())
def individual(self):
self.sonido_boton.reproducir()
pilas.almacenar_escena(Individual())
def volver(self):
self.sonido_boton.reproducir()
pilas.recuperar_escena()
def iniciar(self):
self.fondo()
self.sonido_boton = pilas.sonidos.cargar("data/audio/boton.ogg")
self.interfaz()
self.mostrar()
def interfaz(self):
opcion= [("General",self.general),("Individual",self.individual),("Volver",self.volver)]
menu = pilas.actores.Menu(opcion, y=50, fuente="data/fonts/American Captain.ttf")
menu.escala = 1.3
enunciado = pilas.actores.Actor("data/img/enunciados/estadisticas.png",y=250)
enunciado.escala = 0.3 | gercordero/va_de_vuelta | src/estadisticas.py | Python | gpl-3.0 | 1,023 |
import logging
logging.basicConfig()
from enum import Enum
logger = logging.getLogger('loopabull')
logger.setLevel(logging.INFO)
class Result(Enum):
runfinished = 1
runerrored = 2
unrouted = 3
error = 4
# vim: set expandtab sw=4 sts=4 ts=4
| maxamillion/loopabull | loopabull/__init__.py | Python | gpl-3.0 | 261 |
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import traceback
REQUESTS_IMP_ERR = None
try:
import requests
HAS_REQUESTS = True
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
HAS_REQUESTS = False
PYVMOMI_IMP_ERR = None
try:
from pyVim import connect
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
PYVMOMI_IMP_ERR = traceback.format_exc()
HAS_PYVMOMI = False
VSPHERE_IMP_ERR = None
try:
from com.vmware.vapi.std_client import DynamicID
from vmware.vapi.vsphere.client import create_vsphere_client
from com.vmware.vapi.std.errors_client import Unauthorized
from com.vmware.content.library_client import Item
from com.vmware.vcenter_client import (Folder,
Datacenter,
ResourcePool,
Datastore,
Cluster,
Host)
HAS_VSPHERE = True
except ImportError:
VSPHERE_IMP_ERR = traceback.format_exc()
HAS_VSPHERE = False
from ansible.module_utils.basic import env_fallback, missing_required_lib
class VmwareRestClient(object):
def __init__(self, module):
"""
Constructor
"""
self.module = module
self.params = module.params
self.check_required_library()
self.api_client = self.connect_to_vsphere_client()
# Helper function
def get_error_message(self, error):
"""
Helper function to show human readable error messages.
"""
err_msg = []
if not error.messages:
if isinstance(error, Unauthorized):
return "Authorization required."
return "Generic error occurred."
for err in error.messages:
err_msg.append(err.default_message % err.args)
return " ,".join(err_msg)
def check_required_library(self):
"""
Check required libraries
"""
if not HAS_REQUESTS:
self.module.fail_json(msg=missing_required_lib('requests'),
exception=REQUESTS_IMP_ERR)
if not HAS_PYVMOMI:
self.module.fail_json(msg=missing_required_lib('PyVmomi'),
exception=PYVMOMI_IMP_ERR)
if not HAS_VSPHERE:
self.module.fail_json(
msg=missing_required_lib('vSphere Automation SDK',
url='https://code.vmware.com/web/sdk/65/vsphere-automation-python'),
exception=VSPHERE_IMP_ERR)
@staticmethod
def vmware_client_argument_spec():
return dict(
hostname=dict(type='str',
fallback=(env_fallback, ['VMWARE_HOST'])),
username=dict(type='str',
fallback=(env_fallback, ['VMWARE_USER']),
aliases=['user', 'admin']),
password=dict(type='str',
fallback=(env_fallback, ['VMWARE_PASSWORD']),
aliases=['pass', 'pwd'],
no_log=True),
protocol=dict(type='str',
default='https',
choices=['https', 'http']),
validate_certs=dict(type='bool',
fallback=(env_fallback, ['VMWARE_VALIDATE_CERTS']),
default=True),
)
def connect_to_vsphere_client(self):
"""
Connect to vSphere API Client with Username and Password
"""
username = self.params.get('username')
password = self.params.get('password')
hostname = self.params.get('hostname')
session = requests.Session()
session.verify = self.params.get('validate_certs')
if not all([hostname, username, password]):
self.module.fail_json(msg="Missing one of the following : hostname, username, password."
" Please read the documentation for more information.")
client = create_vsphere_client(
server=hostname,
username=username,
password=password,
session=session)
if client is None:
self.module.fail_json(msg="Failed to login to %s" % hostname)
return client
def get_tags_for_object(self, tag_service=None, tag_assoc_svc=None, dobj=None):
"""
Return list of tag objects associated with an object
Args:
dobj: Dynamic object
tag_service: Tag service object
tag_assoc_svc: Tag Association object
Returns: List of tag objects associated with the given object
"""
# This method returns list of tag objects only,
# Please use get_tags_for_dynamic_obj for more object details
tags = []
if not dobj:
return tags
if not tag_service:
tag_service = self.api_client.tagging.Tag
if not tag_assoc_svc:
tag_assoc_svc = self.api_client.tagging.TagAssociation
tag_ids = tag_assoc_svc.list_attached_tags(dobj)
for tag_id in tag_ids:
tags.append(tag_service.get(tag_id))
return tags
def get_tags_for_dynamic_obj(self, mid=None, type=None):
"""
Return list of tag object details associated with object
Args:
mid: Dynamic object for specified object
type: Type of DynamicID to lookup
Returns: List of tag object details associated with the given object
"""
tags = []
if mid is None:
return tags
dynamic_managed_object = DynamicID(type=type, id=mid)
temp_tags_model = self.get_tags_for_object(dobj=dynamic_managed_object)
category_service = self.api_client.tagging.Category
for tag_obj in temp_tags_model:
tags.append({
'id': tag_obj.id,
'category_name': category_service.get(tag_obj.category_id).name,
'name': tag_obj.name,
'description': tag_obj.description,
'category_id': tag_obj.category_id,
})
return tags
def get_tags_for_cluster(self, cluster_mid=None):
"""
Return list of tag object associated with cluster
Args:
cluster_mid: Dynamic object for cluster
Returns: List of tag object associated with the given cluster
"""
return self.get_tags_for_dynamic_obj(mid=cluster_mid, type='ClusterComputeResource')
def get_tags_for_hostsystem(self, hostsystem_mid=None):
"""
Return list of tag object associated with host system
Args:
hostsystem_mid: Dynamic object for host system
Returns: List of tag object associated with the given host system
"""
return self.get_tags_for_dynamic_obj(mid=hostsystem_mid, type='HostSystem')
def get_tags_for_vm(self, vm_mid=None):
"""
Return list of tag object associated with virtual machine
Args:
vm_mid: Dynamic object for virtual machine
Returns: List of tag object associated with the given virtual machine
"""
return self.get_tags_for_dynamic_obj(mid=vm_mid, type='VirtualMachine')
def get_vm_tags(self, tag_service=None, tag_association_svc=None, vm_mid=None):
"""
Return list of tag name associated with virtual machine
Args:
tag_service: Tag service object
tag_association_svc: Tag association object
vm_mid: Dynamic object for virtual machine
Returns: List of tag names associated with the given virtual machine
"""
# This API returns just names of tags
# Please use get_tags_for_vm for more tag object details
tags = []
if vm_mid is None:
return tags
dynamic_managed_object = DynamicID(type='VirtualMachine', id=vm_mid)
temp_tags_model = self.get_tags_for_object(tag_service, tag_association_svc, dynamic_managed_object)
for tag_obj in temp_tags_model:
tags.append(tag_obj.name)
return tags
def get_library_item_by_name(self, name):
"""
Returns the identifier of the library item with the given name.
Args:
name (str): The name of item to look for
Returns:
str: The item ID or None if the item is not found
"""
find_spec = Item.FindSpec(name=name)
item_ids = self.api_client.content.library.Item.find(find_spec)
item_id = item_ids[0] if item_ids else None
return item_id
def get_datacenter_by_name(self, datacenter_name):
"""
Returns the identifier of a datacenter
Note: The method assumes only one datacenter with the mentioned name.
"""
filter_spec = Datacenter.FilterSpec(names=set([datacenter_name]))
datacenter_summaries = self.api_client.vcenter.Datacenter.list(filter_spec)
datacenter = datacenter_summaries[0].datacenter if len(datacenter_summaries) > 0 else None
return datacenter
def get_folder_by_name(self, datacenter_name, folder_name):
"""
Returns the identifier of a folder
with the mentioned names.
"""
datacenter = self.get_datacenter_by_name(datacenter_name)
if not datacenter:
return None
filter_spec = Folder.FilterSpec(type=Folder.Type.VIRTUAL_MACHINE,
names=set([folder_name]),
datacenters=set([datacenter]))
folder_summaries = self.api_client.vcenter.Folder.list(filter_spec)
folder = folder_summaries[0].folder if len(folder_summaries) > 0 else None
return folder
def get_resource_pool_by_name(self, datacenter_name, resourcepool_name):
"""
Returns the identifier of a resource pool
with the mentioned names.
"""
datacenter = self.get_datacenter_by_name(datacenter_name)
if not datacenter:
return None
names = set([resourcepool_name]) if resourcepool_name else None
filter_spec = ResourcePool.FilterSpec(datacenters=set([datacenter]),
names=names)
resource_pool_summaries = self.api_client.vcenter.ResourcePool.list(filter_spec)
resource_pool = resource_pool_summaries[0].resource_pool if len(resource_pool_summaries) > 0 else None
return resource_pool
def get_datastore_by_name(self, datacenter_name, datastore_name):
"""
Returns the identifier of a datastore
with the mentioned names.
"""
datacenter = self.get_datacenter_by_name(datacenter_name)
if not datacenter:
return None
names = set([datastore_name]) if datastore_name else None
filter_spec = Datastore.FilterSpec(datacenters=set([datacenter]),
names=names)
datastore_summaries = self.api_client.vcenter.Datastore.list(filter_spec)
datastore = datastore_summaries[0].datastore if len(datastore_summaries) > 0 else None
return datastore
def get_cluster_by_name(self, datacenter_name, cluster_name):
"""
Returns the identifier of a cluster
with the mentioned names.
"""
datacenter = self.get_datacenter_by_name(datacenter_name)
if not datacenter:
return None
names = set([cluster_name]) if cluster_name else None
filter_spec = Cluster.FilterSpec(datacenters=set([datacenter]),
names=names)
cluster_summaries = self.api_client.vcenter.Cluster.list(filter_spec)
cluster = cluster_summaries[0].cluster if len(cluster_summaries) > 0 else None
return cluster
def get_host_by_name(self, datacenter_name, host_name):
"""
Returns the identifier of a Host
with the mentioned names.
"""
datacenter = self.get_datacenter_by_name(datacenter_name)
if not datacenter:
return None
names = set([host_name]) if host_name else None
filter_spec = Host.FilterSpec(datacenters=set([datacenter]),
names=names)
host_summaries = self.api_client.vcenter.Host.list(filter_spec)
host = host_summaries[0].host if len(host_summaries) > 0 else None
return host
@staticmethod
def search_svc_object_by_name(service, svc_obj_name=None):
"""
Return service object by name
Args:
service: Service object
svc_obj_name: Name of service object to find
Returns: Service object if found else None
"""
if not svc_obj_name:
return None
for svc_object in service.list():
svc_obj = service.get(svc_object)
if svc_obj.name == svc_obj_name:
return svc_obj
return None
| 2ndQuadrant/ansible | lib/ansible/module_utils/vmware_rest_client.py | Python | gpl-3.0 | 13,483 |
# NOTE: this should inherit from (object) to function correctly with python 2.7
class CachedProperty(object):
""" A property that is only computed once per instance and
then stores the result in _cached_properties of the object.
Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
propname = self.func.__name__
if not hasattr(obj, '_cached_properties'):
obj._cached_properties = {}
if propname not in obj._cached_properties:
obj._cached_properties[propname] = self.func(obj)
# value = obj.__dict__[propname] = self.func(obj)
return obj._cached_properties[propname]
@staticmethod
def clear(obj):
"""clears cache of obj"""
if hasattr(obj, '_cached_properties'):
obj._cached_properties = {}
@staticmethod
def is_cached(obj, propname):
if hasattr(obj, '_cached_properties') and propname in obj._cached_properties:
return True
else:
return False | psy0rz/zfs_autobackup | zfs_autobackup/CachedProperty.py | Python | gpl-3.0 | 1,252 |
from vsg.rules import token_prefix
from vsg import token
lTokens = []
lTokens.append(token.signal_declaration.identifier)
class rule_008(token_prefix):
'''
This rule checks for valid prefixes on signal identifiers.
Default signal prefix is *s\_*.
|configuring_prefix_and_suffix_rules_link|
**Violation**
.. code-block:: vhdl
signal wr_en : std_logic;
signal rd_en : std_logic;
**Fix**
.. code-block:: vhdl
signal s_wr_en : std_logic;
signal s_rd_en : std_logic;
'''
def __init__(self):
token_prefix.__init__(self, 'signal', '008', lTokens)
self.prefixes = ['s_']
self.solution = 'Signal identifiers'
| jeremiah-c-leary/vhdl-style-guide | vsg/rules/signal/rule_008.py | Python | gpl-3.0 | 705 |
#!/usr/bin/python
# Author: Thomas Goodwin <[email protected]>
import urllib2, json, os, sys, re
def download_asset(path, url):
asset_path = None
try:
file_name = os.path.basename(url)
asset_path = os.path.join(path, file_name)
if os.path.exists(asset_path):
# Skip downloading
asset_path = None
else:
if not os.path.exists(path):
os.makedirs(path)
f = urllib2.urlopen(url)
with open(asset_path, "wb") as local_file:
local_file.write(f.read())
except Exception as e:
sys.exit('Failed to fetch IDE. Error: {0}'.format(e))
finally:
return asset_path
def handle_release_assets(assets):
assets = [ asset for asset in assets if re.match(r'redhawk-ide.+?(?=x86_64)', asset['name'])]
if not assets:
sys.exit('Failed to find the IDE asset')
elif len(assets) > 1:
sys.exit('Found too many IDE assets matching that description...?')
return download_asset('downloads', assets[0]['browser_download_url'])
def run(pv):
RELEASES_URL = 'http://api.github.com/repos/RedhawkSDR/redhawk/releases'
ide_asset = ''
try:
releases = json.loads(urllib2.urlopen(RELEASES_URL).read())
releases = [r for r in releases if r['tag_name'] == pv]
if releases:
ide_asset = handle_release_assets(releases[0]['assets'])
else:
sys.exit('Failed to find the release: {0}'.format(pv))
finally:
return ide_asset
if __name__ == '__main__':
# First argument is the version
asset = run(sys.argv[1])
print asset | Geontech/docker-redhawk-ubuntu | Dockerfiles/files/build/ide-fetcher.py | Python | gpl-3.0 | 1,661 |
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='MapperTools',
packages=['MapperTools'],
version='0.1',
description='A python 2.7 implementation of Mapper algorithm for Topological Data Analysis',
keywords='mapper TDA python',
long_description=readme(),
url='http://github.com/alpatania',
author='Alice Patania',
author_email='[email protected]',
license='MIT',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'],
install_requires=['hdbscan', 'sklearn', 'pandas', 'collections'],
include_package_data=True,
zip_safe=False) | alpatania/MapperTools | setup.py | Python | gpl-3.0 | 791 |
# Copyright 2013-2015 Lenna X. Peterson. All rights reserved.
from .meta import classproperty
class AtomData(object):
# Maximum ASA for each residue
# from Miller et al. 1987, JMB 196: 641-656
total_asa = {
'A': 113.0,
'R': 241.0,
'N': 158.0,
'D': 151.0,
'C': 140.0,
'Q': 189.0,
'E': 183.0,
'G': 85.0,
'H': 194.0,
'I': 182.0,
'L': 180.0,
'K': 211.0,
'M': 204.0,
'F': 218.0,
'P': 143.0,
'S': 122.0,
'T': 146.0,
'W': 259.0,
'Y': 229.0,
'V': 160.0,
}
@classmethod
def is_surface(cls, resn, asa, total_asa=None, cutoff=0.1):
"""Return True if ratio of residue ASA to max ASA >= cutoff"""
if total_asa is None:
total_asa = cls.total_asa
resn = resn.upper()
if len(resn) == 3:
resn = cls.three_to_one[resn]
return float(asa) / total_asa[resn] >= cutoff
three_to_full = {
'Val': 'Valine', 'Ile': 'Isoleucine', 'Leu': 'Leucine',
'Glu': 'Glutamic acid', 'Gln': 'Glutamine',
'Asp': 'Aspartic acid', 'Asn': 'Asparagine', 'His': 'Histidine',
'Trp': 'Tryptophan', 'Phe': 'Phenylalanine', 'Tyr': 'Tyrosine',
'Arg': 'Arginine', 'Lys': 'Lysine',
'Ser': 'Serine', 'Thr': 'Threonine',
'Met': 'Methionine', 'Ala': 'Alanine',
'Gly': 'Glycine', 'Pro': 'Proline', 'Cys': 'Cysteine'}
three_to_one = {
'VAL': 'V', 'ILE': 'I', 'LEU': 'L', 'GLU': 'E', 'GLN': 'Q',
'ASP': 'D', 'ASN': 'N', 'HIS': 'H', 'TRP': 'W', 'PHE': 'F', 'TYR': 'Y',
'ARG': 'R', 'LYS': 'K', 'SER': 'S', 'THR': 'T', 'MET': 'M', 'ALA': 'A',
'GLY': 'G', 'PRO': 'P', 'CYS': 'C'}
one_to_three = {o: t for t, o in three_to_one.iteritems()}
@classproperty
def one_to_full(cls):
"""
This can't see three_to_full unless explicitly passed because
dict comprehensions create their own local scope
"""
return {o: cls.three_to_full[t.title()] for t, o in cls.three_to_one.iteritems()}
res_atom_list = dict(
ALA=['C', 'CA', 'CB', 'N', 'O'],
ARG=['C', 'CA', 'CB', 'CD', 'CG', 'CZ', 'N', 'NE', 'NH1', 'NH2', 'O'],
ASN=['C', 'CA', 'CB', 'CG', 'N', 'ND2', 'O', 'OD1'],
ASP=['C', 'CA', 'CB', 'CG', 'N', 'O', 'OD1', 'OD2'],
CYS=['C', 'CA', 'CB', 'N', 'O', 'SG'],
GLN=['C', 'CA', 'CB', 'CD', 'CG', 'N', 'NE2', 'O', 'OE1'],
GLU=['C', 'CA', 'CB', 'CD', 'CG', 'N', 'O', 'OE1', 'OE2'],
GLY=['C', 'CA', 'N', 'O'],
HIS=['C', 'CA', 'CB', 'CD2', 'CE1', 'CG', 'N', 'ND1', 'NE2', 'O'],
ILE=['C', 'CA', 'CB', 'CD1', 'CG1', 'CG2', 'N', 'O'],
LEU=['C', 'CA', 'CB', 'CD1', 'CD2', 'CG', 'N', 'O'],
LYS=['C', 'CA', 'CB', 'CD', 'CE', 'CG', 'N', 'NZ', 'O'],
MET=['C', 'CA', 'CB', 'CE', 'CG', 'N', 'O', 'SD'],
PHE=['C', 'CA', 'CB', 'CD1', 'CD2',
'CE1', 'CE2', 'CG', 'CZ', 'N', 'O'],
PRO=['C', 'CA', 'CB', 'CD', 'CG', 'N', 'O'],
SER=['C', 'CA', 'CB', 'N', 'O', 'OG'],
THR=['C', 'CA', 'CB', 'CG2', 'N', 'O', 'OG1'],
TRP=['C', 'CA', 'CB', 'CD1', 'CD2', 'CE2',
'CE3', 'CG', 'CH2', 'CZ2', 'CZ3', 'N', 'NE1', 'O'],
TYR=['C', 'CA', 'CB', 'CD1', 'CD2',
'CE1', 'CE2', 'CG', 'CZ', 'N', 'O', 'OH'],
VAL=['C', 'CA', 'CB', 'CG1', 'CG2', 'N', 'O'],
)
all_chi = dict(
chi1=dict(
ARG=['N', 'CA', 'CB', 'CG'],
ASN=['N', 'CA', 'CB', 'CG'],
ASP=['N', 'CA', 'CB', 'CG'],
CYS=['N', 'CA', 'CB', 'SG'],
GLN=['N', 'CA', 'CB', 'CG'],
GLU=['N', 'CA', 'CB', 'CG'],
HIS=['N', 'CA', 'CB', 'CG'],
ILE=['N', 'CA', 'CB', 'CG1'],
LEU=['N', 'CA', 'CB', 'CG'],
LYS=['N', 'CA', 'CB', 'CG'],
MET=['N', 'CA', 'CB', 'CG'],
PHE=['N', 'CA', 'CB', 'CG'],
PRO=['N', 'CA', 'CB', 'CG'],
SER=['N', 'CA', 'CB', 'OG'],
THR=['N', 'CA', 'CB', 'OG1'],
TRP=['N', 'CA', 'CB', 'CG'],
TYR=['N', 'CA', 'CB', 'CG'],
VAL=['N', 'CA', 'CB', 'CG1'],
),
chi2=dict(
ARG=['CA', 'CB', 'CG', 'CD'],
ASN=['CA', 'CB', 'CG', 'OD1'],
ASP=['CA', 'CB', 'CG', 'OD1'],
GLN=['CA', 'CB', 'CG', 'CD'],
GLU=['CA', 'CB', 'CG', 'CD'],
HIS=['CA', 'CB', 'CG', 'ND1'],
ILE=['CA', 'CB', 'CG1', 'CD1'],
LEU=['CA', 'CB', 'CG', 'CD1'],
LYS=['CA', 'CB', 'CG', 'CD'],
MET=['CA', 'CB', 'CG', 'SD'],
PHE=['CA', 'CB', 'CG', 'CD1'],
PRO=['CA', 'CB', 'CG', 'CD'],
TRP=['CA', 'CB', 'CG', 'CD1'],
TYR=['CA', 'CB', 'CG', 'CD1'],
),
chi3=dict(
ARG=['CB', 'CG', 'CD', 'NE'],
GLN=['CB', 'CG', 'CD', 'OE1'],
GLU=['CB', 'CG', 'CD', 'OE1'],
LYS=['CB', 'CG', 'CD', 'CE'],
MET=['CB', 'CG', 'SD', 'CE'],
),
chi4=dict(
ARG=['CG', 'CD', 'NE', 'CZ'],
LYS=['CG', 'CD', 'CE', 'NZ'],
),
chi5=dict(
ARG=['CD', 'NE', 'CZ', 'NH1'],
),
)
alt_chi = dict(
chi1=dict(
VAL=['N', 'CA', 'CB', 'CG2'],
),
chi2=dict(
ASP=['CA', 'CB', 'CG', 'OD2'],
LEU=['CA', 'CB', 'CG', 'CD2'],
PHE=['CA', 'CB', 'CG', 'CD2'],
TYR=['CA', 'CB', 'CG', 'CD2'],
),
)
chi_atoms = dict(
ARG=set(['CB', 'CA', 'CG', 'NE', 'N', 'CZ', 'NH1', 'CD']),
ASN=set(['CB', 'CA', 'N', 'CG', 'OD1']),
ASP=set(['CB', 'CA', 'N', 'CG', 'OD1', 'OD2']),
CYS=set(['CB', 'CA', 'SG', 'N']),
GLN=set(['CB', 'CA', 'CG', 'N', 'CD', 'OE1']),
GLU=set(['CB', 'CA', 'CG', 'N', 'CD', 'OE1']),
HIS=set(['ND1', 'CB', 'CA', 'CG', 'N']),
ILE=set(['CG1', 'CB', 'CA', 'CD1', 'N']),
LEU=set(['CB', 'CA', 'CG', 'CD1', 'CD2', 'N']),
LYS=set(['CB', 'CA', 'CG', 'CE', 'N', 'NZ', 'CD']),
MET=set(['CB', 'CA', 'CG', 'CE', 'N', 'SD']),
PHE=set(['CB', 'CA', 'CG', 'CD1', 'CD2', 'N']),
PRO=set(['CB', 'CA', 'N', 'CG', 'CD']),
SER=set(['OG', 'CB', 'CA', 'N']),
THR=set(['CB', 'CA', 'OG1', 'N']),
TRP=set(['CB', 'CA', 'CG', 'CD1', 'N']),
TYR=set(['CB', 'CA', 'CG', 'CD1', 'CD2', 'N']),
VAL=set(['CG1', 'CG2', 'CB', 'CA', 'N']),
)
| lennax/util | util/atom_data.py | Python | gpl-3.0 | 6,616 |
import sys
import subprocess
result = subprocess.Popen('sh test.sh', shell=True)
text = result.communicate()[0]
sys.exit(result.returncode)
| marcindulak/accts | accts/asegpaw/3.6.0-0.9.0.8965/ase/test.py | Python | gpl-3.0 | 142 |
#!D:\PycharmProjects\UFT\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==0.6c11','console_scripts','easy_install'
__requires__ = 'setuptools==0.6c11'
import sys
from pkg_resources import load_entry_point
sys.exit(
load_entry_point('setuptools==0.6c11', 'console_scripts', 'easy_install')()
)
| hardanimal/UFT_UPGEM | Scripts/easy_install-script.py | Python | gpl-3.0 | 309 |
# Author: Jason Lu
import urllib.request
from bs4 import BeautifulSoup
import time
req_header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding':'en-us',
'Connection':'keep-alive',
'Referer':'http://www.baidu.com/'
}
req_timeout = 5
testUrl = "http://www.baidu.com/"
testStr = "wahaha"
file1 = open('proxy.txt' , 'w')
# url = ""
# req = urllib2.Request(url,None,req_header)
# jsondatas = urllib2.urlopen(req,None,req_timeout).read()
# cookies = urllib2.HTTPCookieProcessor()
# 希望登录状态一直保持,使用Cookie处理
import http.cookiejar
# 使用http.cookiejar.CookieJar()创建CookieJar对象
cjar = http.cookiejar.CookieJar()
cookies = urllib.request.HTTPCookieProcessor(cjar)
checked_num = 0
grasp_num = 0
for page in range(1, 3):
# req = urllib2.Request('http://www.xici.net.co/nn/' + str(page), None, req_header)
# html_doc = urllib2.urlopen(req, None, req_timeout).read()
req = urllib.request.Request('http://www.xici.net.co/nn/' + str(page))
req.add_header('User-Agent',
"Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e Safari/602.1")
html_doc = urllib.request.urlopen(req).read().decode('utf-8')
# html_doc = urllib2.urlopen('http://www.xici.net.co/nn/' + str(page)).read()
soup = BeautifulSoup(html_doc)
trs = soup.find('table', id='ip_list').find_all('tr')
print(trs)
for tr in trs[1:]:
tds = tr.find_all('td')
ip = tds[1].text.strip()
port = tds[2].text.strip()
protocol = tds[5].text.strip()
if protocol == 'HTTP' or protocol == 'HTTPS':
#of.write('%s=%s:%s\n' % (protocol, ip, port))
print('%s=%s:%s' % (protocol, ip, port))
grasp_num +=1
proxyHandler = urllib.request.ProxyHandler({"http": r'http://%s:%s' % (ip, port)})
opener = urllib.request.build_opener(cookies, proxyHandler)
opener.addheaders = [('User-Agent',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36')]
t1 = time.time()
try:
req = opener.open(testUrl, timeout=req_timeout)
result = req.read()
timeused = time.time() - t1
pos = result.find(testStr)
if pos > 1:
file1.write(protocol+"\t"+ip+"\t"+port+"\n")
checked_num+=1
print(checked_num, grasp_num)
else:
continue
except Exception as e:
print(str(e))
continue
file1.close()
print(checked_num,grasp_num) | jinzekid/codehub | python/lyutil/ly_proxy_test.py | Python | gpl-3.0 | 3,046 |
import tensorflow as tf
import matplotlib.pyplot as plt
import math
x_node = tf.random_uniform([1], minval=-1, maxval=1, dtype=tf.float32,
name='x_node')
y_node = tf.random_uniform([1], minval=-1, maxval=1, dtype=tf.float32,
name='y_node')
times = 5000
hits = 0
pis = []
with tf.Session() as session:
for i in range(1, times):
x = session.run(x_node)
y = session.run(y_node)
if x*x + y*y < 1:
hits += 1
pass
pi = 4 * float(hits) / i
print(pi)
pis.append(pi)
pass
pass
plt.plot(pis)
plt.plot([0, times], [math.pi, math.pi])
plt.show()
| thiswind/nn_practice | tensorflow/calculate_pi_old.py | Python | gpl-3.0 | 581 |
# .oooooooo .oooo. oooooooo .ooooo. .ooooo.
# 888' `88b `P )88b d'""7d8P d88' `88b d88' `88b
# 888 888 .oP"888 .d8P' 888ooo888 888ooo888
# `88bod8P' d8( 888 .d8P' .P 888 .o 888 .o
# `8oooooo. `Y888""8o d8888888P `Y8bod8P' `Y8bod8P'
# d" YD
# "Y88888P'
#
# config class - btx
#
import sys
import os
import configparser
import logging
log = logging.getLogger(__name__)
class gcfg(object):
datapath = None
cfgpath = None
defaults = {'bind_address': '127.0.0.1',
'port': '4242',
'data_dir': '~/.gazee',
'temp_dir': '',
'comic_path': '',
'comic_scan_interval': '60',
'comics_per_page': '15',
'thumb_maxwidth': '300',
'thumb_maxheight': '400',
'image_script': '0',
'mylar_db': '',
'ssl_key': '',
'ssl_cert': '',
'web_text_color': 'ffffff',
'main_color': '757575',
'accent_color': 'bdbdbd'}
def __init__(self, data_override=None):
self.cfg = configparser.ConfigParser()
self.datapath = data_override
self.logpath = None
self.dbpath = None
self.sessionspath = None
print("Created a new gcfg...")
if self.datapath is not None:
self.datapath = os.path.realpath(os.path.expanduser(self.datapath))
if self.datapath is None and data_override is not None:
log.error("Somehow the datapath is now None.")
self.configRead()
log.debug("Initialized configation... in %s", __name__)
def create_init_dirs(self, data_dir):
''' Sets up the data_dir plus the two paths that aren't
configurable, and are relative to the data_dir - the
log_dir and db_dir
'''
if self.datapath is not None and data_dir is None:
log.error("data_dir is None while datapath is not.")
self.datapath = data_dir
self.logpath = os.path.join(self.datapath, "logs")
self.dbpath = os.path.join(self.datapath, "db")
self.sessionspath = os.path.join(self.datapath, "sessions")
if not os.path.exists(self.logpath):
os.makedirs(self.logpath, 0o700)
if not os.path.exists(self.dbpath):
os.makedirs(self.dbpath, 0o700)
if not os.path.exists(self.sessionspath):
os.makedirs(self.sessionspath, 0o700)
def find_config(self):
''' Looks for where the data dir is located.
Once it finds the dir, it calls create_
'''
dirfound = None
firstdir = None
cfgfound = None
# print("Looking for config in find_config() - datapath: %s" % (self.datapath))
if self.datapath is not None:
if not os.path.exists(self.datapath):
msg = 'Path %s does not exist.\n\nDo you wish to create it? [y/n]: ' % self.datapath
if self.get_yn(msg):
try:
os.makedirs(self.datapath)
except PermissionError:
print("You don't have the permissions to create that path.\nExiting.")
sys.exit(1)
else:
print("Exiting.")
sys.exit(1)
firstdir = dirfound = self.datapath
cfile = os.path.join(dirfound, "app.ini")
if os.path.exists(cfile):
cfgfound = cfile
else:
cfgfound = None
else:
dirs = ['data', '~/.gazee', '../data']
for d in dirs:
ddir = os.path.realpath(os.path.expanduser(d))
cfile = os.path.join(ddir, "app.ini")
if os.path.exists(ddir) and os.path.isdir(ddir):
if firstdir is None:
firstdir = ddir
dirfound = ddir
if os.path.exists(cfile):
cfgfound = cfile
break
if dirfound is None:
log.error("Data directory not found!")
return False
dirfound = firstdir
self.datapath = dirfound
self.create_init_dirs(dirfound)
if cfgfound is not None:
log.debug('cfgfound=%s', cfgfound)
self.cfgpath = cfgfound
else:
cfile = os.path.join(self.datapath, 'app.ini')
self.cfg['GLOBAL'] = {}
self.cfg['DEFAULT'] = self.defaults
self.cfg.set('DEFAULT', 'data_dir', self.datapath)
self.cfg.set('DEFAULT', 'image_script', self.defaults['image_script'])
cfgfound = cfile
self.cfgpath = cfgfound
self.configWrite()
self.cfg.set('GLOBAL', 'data_dir', self.datapath)
self.cfg.set('GLOBAL', 'log_dir', self.logpath)
self.cfg.set('GLOBAL', 'db_dir', self.dbpath)
self.cfg.set('GLOBAL', 'sessions_dir', self.sessionspath)
return True
def configWrite(self):
''' Write self.cfg to disk
'''
with open(self.cfgpath, 'w') as configfile:
self.cfg.write(configfile)
return True
def globalize(self):
''' Place the cfg variables into the self.config
scope
'''
mod = sys.modules[__name__]
for vn in self.cfg['GLOBAL']:
vn = vn.upper()
v = self.cfg.get('GLOBAL', vn)
if vn in ['PORT', 'COMIC_SCAN_INTERVAL', 'IMAGE_SCRIPT',
'COMICS_PER_PAGE', 'THUMB_MAXWIDTH', 'THUMB_MAXHEIGHT']:
if v == '':
v = self.cfg.get('DEFAULT', vn)
v = int(v, 10)
setattr(mod, vn, v)
def get_yn(self, msg):
while True:
v = input(msg)
if v.lower() in ['y', 'n']:
break
print("\nInvalid response. Enter 'y' or 'n'.")
return v.lower() == 'y'
def get_path(self, name):
p = None
while True:
prompt = 'Please enter %s: ' % name
p = input(prompt)
if not os.path.exists(p):
msg = 'Path %s does not exist.\n\nDo you wish to create it? [y/n]: ' % p
if self.get_yn(msg):
try:
os.makedirs(p)
except PermissionError:
print("You don't have the permissions to create that path.\n")
continue
else:
print("Not creating directory: %s" % p)
continue
break
return p
def configRead(self):
''' Read the app.ini config file.
'''
print("configRead() being called...")
dp = self.find_config()
if dp is None or self.datapath is None:
log.error("Failed to find_config()")
sys.exit(1)
self.cfgpath = os.path.join(self.datapath, 'app.ini')
self.cfg.read(self.cfgpath)
for k in self.defaults.keys():
if k not in self.cfg['DEFAULT']:
v = self.defaults[k]
log.info("Setting default[%s] = %s", k, v)
self.cfg['DEFAULT'][k] = v
if 'GLOBAL' not in self.cfg:
log.info("Resetting GLOBAL cfg...")
self.cfg['GLOBAL'] = {}
self.cfg.set('GLOBAL', 'data_dir', self.datapath)
if 'comic_path' not in self.cfg['GLOBAL'] or self.cfg.get('GLOBAL', 'comic_path') in [None, '']:
cpath = self.get_path("your comic share's path")
if cpath is not None:
self.cfg.set('GLOBAL', 'comic_path', cpath)
if 'temp_dir' not in self.cfg['GLOBAL'] or self.cfg.get('GLOBAL', 'temp_dir') in [None, '']:
tdir = self.get_path('a directory for temporary (large) file storage')
if tdir is not None:
self.cfg.set('GLOBAL', 'temp_dir', tdir)
self.configWrite()
self.cfg.set('GLOBAL', 'log_dir', self.logpath)
self.cfg.set('GLOBAL', 'db_dir', self.dbpath)
self.cfg.set('GLOBAL', 'sessions_dir', self.sessionspath)
self.globalize()
return True
def updateCfg(self, newvals):
''' Update the self.cfg with newvals, which should be
a dict in the form {'GLOBAL': {'varname': 'varval'}}
'''
log.debug(newvals)
for k in newvals['GLOBAL'].keys():
if not isinstance(newvals['GLOBAL'][k], str):
if newvals['GLOBAL'][k] is None:
newvals['GLOBAL'][k] = ''
else:
log.debug("newvals['GLOBAL'][%s] is type %s",
k, str(type(newvals['GLOBAL'][k])))
self.cfg.set('GLOBAL', k, newvals['GLOBAL'][k])
self.configWrite()
self.globalize()
return True
| btxgit/gazee | gazee/config.py | Python | gpl-3.0 | 9,088 |
# -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <[email protected]>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pytest
from mathmaker.lib.core.root_calculus import Value
from mathmaker.lib.core.base_geometry import Point
from mathmaker.lib.core.geometry import Polygon
@pytest.fixture
def p1():
p1 = Polygon([Point('A', 0.5, 0.5),
Point('B', 3, 1),
Point('C', 3.2, 4),
Point('D', 0.8, 3)
])
p1.side[0].label = Value(4, unit='cm')
p1.side[1].label = Value(3, unit='cm')
p1.side[2].label = Value(2, unit='cm')
p1.side[3].label = Value(6.5, unit='cm')
p1.angle[0].label = Value(64, unit="\\textdegree")
p1.angle[1].label = Value(128, unit="\\textdegree")
p1.angle[2].label = Value(32, unit="\\textdegree")
p1.angle[3].label = Value(256, unit="\\textdegree")
p1.angle[0].mark = 'simple'
p1.angle[1].mark = 'simple'
p1.angle[2].mark = 'simple'
p1.angle[3].mark = 'simple'
return p1
def test_p1_into_euk(p1):
"""Check Polygon's generated euk file."""
assert p1.into_euk() == \
'box -0.1, -0.1, 3.8, 4.6\n\n'\
'A = point(0.5, 0.5)\n'\
'B = point(3, 1)\n'\
'C = point(3.2, 4)\n'\
'D = point(0.8, 3)\n'\
'\n'\
'draw\n'\
' (A.B.C.D)\n'\
' $\\rotatebox{11}{\sffamily 4~cm}$ A 11 - 12.7 deg 4.1\n'\
' $\\rotatebox{86}{\sffamily 3~cm}$ B 86 - 8.9 deg 4.9\n'\
' $\\rotatebox{23}{\sffamily 2~cm}$ C 203 - 12.2 deg 4.2\n'\
' $\\rotatebox{83}{\sffamily 6.5~cm}$ D 263 - 12.9 deg 4.1\n'\
' $\\rotatebox{47.3}{\sffamily 64\\textdegree}$ A 47.3 deg 2.7\n'\
' $\\rotatebox{-41.3}{\sffamily 128\\textdegree}$ B 138.7 deg 2.7\n'\
' $\\rotatebox{54.3}{\sffamily 32\\textdegree}$ C 234.3 deg 2.7\n'\
' $\\rotatebox{322.7}{\sffamily 256\\textdegree}$ D 322.7 deg 2.7\n'\
' "A" A 227.3 deg, font("sffamily")\n'\
' "B" B 318.7 deg, font("sffamily")\n'\
' "C" C 54.3 deg, font("sffamily")\n'\
' "D" D 142.7 deg, font("sffamily")\n'\
'end\n\n'\
'label\n'\
' B, A, D simple\n'\
' C, B, A simple\n'\
' D, C, B simple\n'\
' A, D, C simple\n'\
'end\n'
def test_p1_rename_errors(p1):
"""Check wrong arguments trigger exceptions when renaming."""
with pytest.raises(TypeError):
p1.rename(5678)
with pytest.raises(ValueError):
p1.rename('KJLIZ')
def test_p1_renamed(p1):
"""Check renaming Polygon is OK."""
p1.rename('YOGA')
assert p1.into_euk() == \
'box -0.1, -0.1, 3.8, 4.6\n\n'\
'A = point(0.5, 0.5)\n'\
'G = point(3, 1)\n'\
'O = point(3.2, 4)\n'\
'Y = point(0.8, 3)\n'\
'\n'\
'draw\n'\
' (A.G.O.Y)\n'\
' $\\rotatebox{11}{\sffamily 4~cm}$ A 11 - 12.7 deg 4.1\n'\
' $\\rotatebox{86}{\sffamily 3~cm}$ G 86 - 8.9 deg 4.9\n'\
' $\\rotatebox{23}{\sffamily 2~cm}$ O 203 - 12.2 deg 4.2\n'\
' $\\rotatebox{83}{\sffamily 6.5~cm}$ Y 263 - 12.9 deg 4.1\n'\
' $\\rotatebox{47.3}{\sffamily 64\\textdegree}$ A 47.3 deg 2.7\n'\
' $\\rotatebox{-41.3}{\sffamily 128\\textdegree}$ G 138.7 deg 2.7\n'\
' $\\rotatebox{54.3}{\sffamily 32\\textdegree}$ O 234.3 deg 2.7\n'\
' $\\rotatebox{322.7}{\sffamily 256\\textdegree}$ Y 322.7 deg 2.7\n'\
' "A" A 227.3 deg, font("sffamily")\n'\
' "G" G 318.7 deg, font("sffamily")\n'\
' "O" O 54.3 deg, font("sffamily")\n'\
' "Y" Y 142.7 deg, font("sffamily")\n'\
'end\n\n'\
'label\n'\
' G, A, Y simple\n'\
' O, G, A simple\n'\
' Y, O, G simple\n'\
' A, Y, O simple\n'\
'end\n'
| nicolashainaux/mathmaker | tests/01_core_objects/test_110_polygons.py | Python | gpl-3.0 | 4,579 |
# Copyright (C) 2011-2015 Patrick Totzke <[email protected]>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
from __future__ import absolute_import
import re
import abc
class AddressbookError(Exception):
pass
class AddressBook(object):
"""can look up email addresses and realnames for contacts.
.. note::
This is an abstract class that leaves :meth:`get_contacts`
unspecified. See :class:`AbookAddressBook` and
:class:`ExternalAddressbook` for implementations.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, ignorecase=True):
self.reflags = re.IGNORECASE if ignorecase else 0
@abc.abstractmethod
def get_contacts(self): # pragma no cover
"""list all contacts tuples in this abook as (name, email) tuples"""
return []
def lookup(self, query=''):
"""looks up all contacts where name or address match query"""
res = []
query = re.compile('.*%s.*' % query, self.reflags)
for name, email in self.get_contacts():
if query.match(name) or query.match(email):
res.append((name, email))
return res
| geier/alot | alot/addressbook/__init__.py | Python | gpl-3.0 | 1,232 |
import numpy as np
from scipy.signal import medfilt
import manager.operations.method as method
from manager.operations.methodsteps.confirmation import Confirmation
from manager.exceptions import VoltPyNotAllowed
class MedianFilter(method.ProcessingMethod):
can_be_applied = True
_steps = [
{
'class': Confirmation,
'title': 'Apply median filter',
'desc': 'Press Forward to apply Median Filter.',
},
]
description = """
Median filter is smoothing algorithm similar to the Savitzky-Golay, however instead of fitting of the polynomial,
the middle point of the window is moved to the value of median of the points in the window. The median filter is
most usefull for removal of spikes from the signal (single point, large amplitude errors).
"""
@classmethod
def __str__(cls):
return "Median Filter"
def apply(self, user, dataset):
if self.model.completed is not True:
raise VoltPyNotAllowed('Incomplete procedure.')
self.__perform(dataset)
def __perform(self, dataset):
for cd in dataset.curves_data.all():
yvec = cd.yVector
newyvec = medfilt(yvec)
dataset.updateCurve(self.model, cd, newyvec)
dataset.save()
def finalize(self, user):
self.__perform(self.model.dataset)
self.model.step = None
self.model.completed = True
self.model.save()
return True
| efce/voltPy | manager/operations/methods/MedianFilter.py | Python | gpl-3.0 | 1,485 |
"""
Django settings for lwc project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7fm_f66p8e!p%o=sr%d&cue(%+bh@@j_y6*b3d@t^c5%i8)1)2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
#Share url
SHARER_URL = "http://127.0.0.1:8000/?ref="
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'joins',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'lwc.middleware.ReferMiddleware',
]
ROOT_URLCONF = 'lwc.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lwc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static', 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', 'static_dirs'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static', 'media')
| Phexcom/product-launcher | lwc/settings/base.py | Python | gpl-3.0 | 3,530 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "MyGarden.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| pmandr/plant_carer | MyGarden/manage.py | Python | gpl-3.0 | 806 |
from osweb.projects.ManageProject import ManageProject
from osweb.projects.projects_data import ProjectsData | openshine/osweb | osweb/projects/__init__.py | Python | gpl-3.0 | 108 |
# Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
def test_generators():
import os
import tempfile
from fusesoc.config import Config
from fusesoc.coremanager import CoreManager
from fusesoc.edalizer import Edalizer
from fusesoc.librarymanager import Library
from fusesoc.vlnv import Vlnv
tests_dir = os.path.dirname(__file__)
cores_dir = os.path.join(tests_dir, "capi2_cores", "misc", "generate")
lib = Library("edalizer", cores_dir)
cm = CoreManager(Config())
cm.add_library(lib)
core = cm.get_core(Vlnv("::generate"))
build_root = tempfile.mkdtemp(prefix="export_")
cache_root = tempfile.mkdtemp(prefix="export_cache_")
export_root = os.path.join(build_root, "exported_files")
edalizer = Edalizer(
toplevel=core.name,
flags={"tool": "icarus"},
core_manager=cm,
cache_root=cache_root,
work_root=os.path.join(build_root, "work"),
export_root=export_root,
system_name=None,
)
edalizer.run()
gendir = os.path.join(
cache_root, "generated", "generate-testgenerate_without_params_0"
)
assert os.path.isfile(os.path.join(gendir, "generated.core"))
assert os.path.isfile(os.path.join(gendir, "testgenerate_without_params_input.yml"))
gendir = os.path.join(
cache_root, "generated", "generate-testgenerate_with_params_0"
)
assert os.path.isfile(os.path.join(gendir, "generated.core"))
assert os.path.isfile(os.path.join(gendir, "testgenerate_with_params_input.yml"))
| lowRISC/fusesoc | tests/test_edalizer.py | Python | gpl-3.0 | 1,641 |
#! /usr/bin/env python3
from bollinger import bands, plot, strategies
import argparse
parser = argparse.ArgumentParser(description="plots bollinger bands or suggests investments", epilog="example: bolly.py plot AMZN FB")
parser.add_argument("action", metavar="ACTION", choices=["plot", "suggest"], help="either plot or suggest")
parser.add_argument("symbols", metavar="SYMBOL", nargs="+", help="stock symbols")
parser.add_argument("-s", "--strategy", choices=["uponce", "downonce", "moreup", "moredown"], default="moredown", help="selects invesment strategy")
args = parser.parse_args()
if args.action == "plot":
for symbol in args.symbols:
print("plot [ %s ]: " %(symbol), end="")
b = bands.Bands(symbol)
b.fetch()
try:
p = plot.Plot(b)
p.save()
print("OK")
except Exception as ex:
print("FAIL: (%s)"%(ex))
if args.action == "suggest":
for symbol in args.symbols:
print("suggest [ %s ]: " %(symbol), end="")
b = bands.Bands(symbol)
b.fetch()
try:
if args.strategy == "uponce": s = strategies.UpOnce(b)
elif args.strategy == "downonce": s = strategies.DownOnce(b)
elif args.strategy == "moreup": s = strategies.MoreUp(b)
elif args.strategy == "moredown": s = strategies.MoreDown(b)
print("YES" if s.invest() else "NO")
except Exception as ex:
print("FAIL: (%s)"%(ex))
| juantascon/bollinger-bands | python/bolly.py | Python | gpl-3.0 | 1,507 |
# -*- coding: utf-8 -*-
import sys
import csv
from itertools import izip
# https://pypi.python.org/pypi/unicodecsv
# http://semver.org/
VERSION = (0, 9, 4)
__version__ = ".".join(map(str, VERSION))
pass_throughs = [
'register_dialect',
'unregister_dialect',
'get_dialect',
'list_dialects',
'field_size_limit',
'Dialect',
'excel',
'excel_tab',
'Sniffer',
'QUOTE_ALL',
'QUOTE_MINIMAL',
'QUOTE_NONNUMERIC',
'QUOTE_NONE',
'Error'
]
__all__ = [
'reader',
'writer',
'DictReader',
'DictWriter',
] + pass_throughs
for prop in pass_throughs:
globals()[prop] = getattr(csv, prop)
def _stringify(s, encoding, errors):
if s is None:
return ''
if isinstance(s, unicode):
return s.encode(encoding, errors)
elif isinstance(s, (int, float)):
pass # let csv.QUOTE_NONNUMERIC do its thing.
elif not isinstance(s, str):
s = str(s)
return s
def _stringify_list(l, encoding, errors='strict'):
try:
return [_stringify(s, encoding, errors) for s in iter(l)]
except TypeError, e:
raise csv.Error(str(e))
def _unicodify(s, encoding):
if s is None:
return None
if isinstance(s, (unicode, int, float)):
return s
elif isinstance(s, str):
return s.decode(encoding)
return s
class UnicodeWriter(object):
"""
>>> import unicodecsv
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = unicodecsv.writer(f, encoding='utf-8')
>>> w.writerow((u'é', u'ñ'))
>>> f.seek(0)
>>> r = unicodecsv.reader(f, encoding='utf-8')
>>> row = r.next()
>>> row[0] == u'é'
True
>>> row[1] == u'ñ'
True
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', errors='strict',
*args, **kwds):
self.encoding = encoding
self.writer = csv.writer(f, dialect, *args, **kwds)
self.encoding_errors = errors
def writerow(self, row):
self.writer.writerow(
_stringify_list(row, self.encoding, self.encoding_errors))
def writerows(self, rows):
for row in rows:
self.writerow(row)
@property
def dialect(self):
return self.writer.dialect
writer = UnicodeWriter
class UnicodeReader(object):
def __init__(self, f, dialect=None, encoding='utf-8', errors='strict',
**kwds):
format_params = ['delimiter', 'doublequote', 'escapechar',
'lineterminator', 'quotechar', 'quoting', 'skipinitialspace']
if dialect is None:
if not any([kwd_name in format_params for kwd_name in kwds.keys()]):
dialect = csv.excel
self.reader = csv.reader(f, dialect, **kwds)
self.encoding = encoding
self.encoding_errors = errors
def next(self):
row = self.reader.next()
encoding = self.encoding
encoding_errors = self.encoding_errors
float_ = float
unicode_ = unicode
try:
val = [(value if isinstance(value, float_) else unicode_(value, encoding, encoding_errors))
for value in row]
except UnicodeDecodeError as e:
# attempt a different encoding...
encoding = 'ISO-8859-1'
val = [(value if isinstance(value, float_) else unicode_(value, encoding, encoding_errors))
for value in row]
return val
def __iter__(self):
return self
@property
def dialect(self):
return self.reader.dialect
@property
def line_num(self):
return self.reader.line_num
reader = UnicodeReader
class DictWriter(csv.DictWriter):
"""
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = DictWriter(f, ['a', u'ñ', 'b'], restval=u'î')
>>> w.writerow({'a':'1', u'ñ':'2'})
>>> w.writerow({'a':'1', u'ñ':'2', 'b':u'ø'})
>>> w.writerow({'a':u'é', u'ñ':'2'})
>>> f.seek(0)
>>> r = DictReader(f, fieldnames=['a', u'ñ'], restkey='r')
>>> r.next() == {'a': u'1', u'ñ':'2', 'r': [u'î']}
True
>>> r.next() == {'a': u'1', u'ñ':'2', 'r': [u'\xc3\xb8']}
True
>>> r.next() == {'a': u'\xc3\xa9', u'ñ':'2', 'r': [u'\xc3\xae']}
True
"""
def __init__(self, csvfile, fieldnames, restval='', extrasaction='raise', dialect='excel', encoding='utf-8', errors='strict', *args, **kwds):
self.encoding = encoding
csv.DictWriter.__init__(
self, csvfile, fieldnames, restval, extrasaction, dialect, *args, **kwds)
self.writer = UnicodeWriter(
csvfile, dialect, encoding=encoding, errors=errors, *args, **kwds)
self.encoding_errors = errors
def writeheader(self):
fieldnames = _stringify_list(
self.fieldnames, self.encoding, self.encoding_errors)
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
class DictReader(csv.DictReader):
"""
>>> from cStringIO import StringIO
>>> f = StringIO()
>>> w = DictWriter(f, fieldnames=['name', 'place'])
>>> w.writerow({'name': 'Cary Grant', 'place': 'hollywood'})
>>> w.writerow({'name': 'Nathan Brillstone', 'place': u'øLand'})
>>> w.writerow({'name': u'Willam ø. Unicoder', 'place': u'éSpandland'})
>>> f.seek(0)
>>> r = DictReader(f, fieldnames=['name', 'place'])
>>> print r.next() == {'name': 'Cary Grant', 'place': 'hollywood'}
True
>>> print r.next() == {'name': 'Nathan Brillstone', 'place': u'øLand'}
True
>>> print r.next() == {'name': u'Willam ø. Unicoder', 'place': u'éSpandland'}
True
"""
def __init__(self, csvfile, fieldnames=None, restkey=None, restval=None,
dialect='excel', encoding='utf-8', errors='strict', *args,
**kwds):
if fieldnames is not None:
fieldnames = _stringify_list(fieldnames, encoding)
csv.DictReader.__init__(
self, csvfile, fieldnames, restkey, restval, dialect, *args, **kwds)
self.reader = UnicodeReader(csvfile, dialect, encoding=encoding,
errors=errors, *args, **kwds)
if fieldnames is None and not hasattr(csv.DictReader, 'fieldnames'):
# Python 2.5 fieldnames workaround.
# (http://bugs.python.org/issue3436)
reader = UnicodeReader(
csvfile, dialect, encoding=encoding, *args, **kwds)
self.fieldnames = _stringify_list(reader.next(), reader.encoding)
self.unicode_fieldnames = [_unicodify(f, encoding) for f in
self.fieldnames]
self.unicode_restkey = _unicodify(restkey, encoding)
def next(self):
row = csv.DictReader.next(self)
result = dict((uni_key, row[str_key]) for (str_key, uni_key) in
izip(self.fieldnames, self.unicode_fieldnames))
rest = row.get(self.restkey)
if rest:
result[self.unicode_restkey] = rest
return result
| archives-new-zealand/archwayimportgenerator | libs/unicodecsv.py | Python | gpl-3.0 | 7,077 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os,re,sys,pprint,shutil
from pathlib import Path
PACKAGES_DIR = "../packages"
def errorExit(msg):
print(msg)
sys.exit(1)
def isPathDisabled(path):
for part in path.parts:
if part.lower().startswith("_disabled"):
return True
return False
depsFolder = Path("_deps_split")
prodFolder = Path("_prods_split")
merged_deps = Path("merged_deps.py" )
merged_prods = Path("merged_prods.py")
if not os.path.isfile(merged_deps):
errorExit("Merged depends file does not exist")
if not os.path.isfile(merged_prods):
errorExit("Merged products file does not exist")
if not os.path.isdir(depsFolder):
os.makedirs(depsFolder)
else:
print("Clearing old split folder:" + str(depsFolder))
shutil.rmtree(depsFolder)
os.makedirs(depsFolder)
if not os.path.isdir(prodFolder):
os.makedirs(prodFolder)
else:
print("Clearing old split folder:" + str(prodFolder))
shutil.rmtree(prodFolder)
os.makedirs(prodFolder)
things = { "merged_deps.py" : depsFolder, "merged_prods.py" : prodFolder, }
for mergefile_name in things:
mergedFile = None
enableWrite = False
curFile = None
print("Splitting " +mergefile_name+ " into seperate files in " + str(things[mergefile_name]))
with open(mergefile_name, "r", encoding="utf-8") as f:
mergedFile = f.read().split("\n")
fileBuffer = ""
for line in mergedFile:
startR = re.search("^########START:\[(.+)\]$",line)
endR = re.search("^########END:\[(.+)\]$",line)
if endR != None:
enableWrite = False
curFile.write(fileBuffer.rstrip("\n"))
curFile.close()
if enableWrite:
fileBuffer+=line+"\n"
if startR != None:
enableWrite = True
fileBuffer = ""
curFile = open(os.path.join(things[mergefile_name],startR.groups()[0]) ,"w",encoding="utf-8")
print("Done") | DeadSix27/python_cross_compile_script | tools/split.py | Python | mpl-2.0 | 1,806 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from unittest import mock
from configman.dotdict import DotDict
from socorro.lib.task_manager import TaskManager, default_task_func
class TestTaskManager:
def test_constuctor1(self):
config = DotDict()
config.quit_on_empty_queue = False
tm = TaskManager(config)
assert tm.config == config
assert tm.task_func == default_task_func
assert tm.quit is False
def test_get_iterator(self):
config = DotDict()
config.quit_on_empty_queue = False
tm = TaskManager(config, job_source_iterator=range(1))
assert list(tm._get_iterator()) == [0]
def an_iter(self):
yield from range(5)
tm = TaskManager(config, job_source_iterator=an_iter)
assert list(tm._get_iterator()) == [0, 1, 2, 3, 4]
class X:
def __init__(self, config):
self.config = config
def __iter__(self):
yield from self.config
tm = TaskManager(config, job_source_iterator=X(config))
assert list(tm._get_iterator()) == list(config.keys())
def test_blocking_start(self):
config = DotDict()
config.idle_delay = 1
config.quit_on_empty_queue = False
class MyTaskManager(TaskManager):
def _responsive_sleep(self, seconds, wait_log_interval=0, wait_reason=""):
try:
if self.count >= 2:
raise KeyboardInterrupt
self.count += 1
except AttributeError:
self.count = 0
tm = MyTaskManager(config, task_func=mock.Mock())
waiting_func = mock.Mock()
tm.blocking_start(waiting_func=waiting_func)
assert tm.task_func.call_count == 10
assert waiting_func.call_count == 0
def test_blocking_start_with_quit_on_empty(self):
config = DotDict()
config.idle_delay = 1
config.quit_on_empty_queue = True
tm = TaskManager(config, task_func=mock.Mock())
waiting_func = mock.Mock()
tm.blocking_start(waiting_func=waiting_func)
assert tm.task_func.call_count == 10
assert waiting_func.call_count == 0
| lonnen/socorro | socorro/unittest/lib/test_task_manager.py | Python | mpl-2.0 | 2,418 |
#!/usr/bin/env python3
#
# Copyright (C) 2017-2020 EOS di Manlio Morini.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/
#
# A python program that helps to set up a new version of Vita.
#
import argparse
import datetime
import os
import re
def version_str(args):
return str(args.major) + "." + str(args.minor) + "." + str(args.maintenance)
def file_process(name, rule, args):
print("--- Processing " + os.path.basename(name))
with open(name) as source:
data = rule(source.read(), args)
if not data:
return
print("Writing " + name)
with open(name) as dest:
dest = open(name, "w")
dest.write(data)
def changelog_rule(data, args):
new_version = version_str(args)
regex = r"## \[Unreleased\]"
subst = r"## [Unreleased]\n\n## [" + new_version + r"] - " + datetime.date.today().isoformat()
result = re.subn(regex, subst, data)
if result[1] != 1:
return None
regex = r"(\[Unreleased)(\]: https://github.com/morinim/vita/compare/v)(.+)(\.\.\.HEAD)"
subst = r"\g<1>\g<2>" + new_version + r"\g<4>\n[" + new_version + r"\g<2>\g<3>...v" + new_version
result = re.subn(regex, subst, result[0])
return result[0] if result[1] == 1 else None
def doxygen_rule(data, args):
regex = r"([\s]+)(\*[\s]+\\mainpage VITA v)([\d]+)\.([\d]+)\.([\d]+)([\s]*)"
subst = r"\g<1>\g<2>" + version_str(args) + r"\g<6>"
result = re.subn(regex, subst, data)
return result[0] if result[1] > 0 else None
def get_cmd_line_options():
description = "Helps to set up a new version of Vita"
parser = argparse.ArgumentParser(description = description)
parser.add_argument("-v", "--verbose", action = "store_true",
help = "Turn on verbose mode")
# Now the positional arguments.
parser.add_argument("major", type=int)
parser.add_argument("minor", type=int)
parser.add_argument("maintenance", type=int)
return parser
def main():
args = get_cmd_line_options().parse_args()
print("Setting version to v" + str(args.major)
+ "." + str(args.minor)
+ "." + str(args.maintenance))
file_process("../NEWS.md", changelog_rule, args)
file_process("../doc/doxygen/doxygen.h", doxygen_rule, args)
print("\n\nRELEASE NOTE\n")
print("1. Build. cmake -DCMAKE_BUILD_TYPE=Release -B build/ src/ ; cmake --build build/")
print("2. Check. cd build/ ; ./tests")
print('3. Commit. git commit -am "[DOC] Changed revision number to v'
+ version_str(args) + '"')
print("4. Tag. git tag -a v" + version_str(args) + " -m \"tag message\"")
print("\nRemember to 'git push' both code and tag. For the tag:\n")
print(" git push origin [tagname]\n")
if __name__ == "__main__":
main()
| morinim/vita | src/setversion.py | Python | mpl-2.0 | 2,950 |
import json
import mock
from django.test import TestCase
from django.core.urlresolvers import reverse
class TestAPI(TestCase):
@mock.patch('ldap.initialize')
def test_exists(self, mocked_initialize):
connection = mock.MagicMock()
mocked_initialize.return_value = connection
url = reverse('api:exists')
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
# check that 400 Bad Request errors are proper JSON
self.assertEqual(response['Content-Type'], 'application/json')
self.assertEqual(
json.loads(response.content),
{'error': "missing key 'mail'"}
)
response = self.client.get(url, {'mail': ''})
self.assertEqual(response.status_code, 400)
result = {
'abc123': {'uid': 'abc123', 'mail': '[email protected]'},
}
def search_s(base, scope, filterstr, *args, **kwargs):
if '[email protected]' in filterstr:
# if 'hgaccountenabled=TRUE' in filterstr:
# return []
return result.items()
return []
connection.search_s.side_effect = search_s
response = self.client.get(url, {'mail': '[email protected]'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertEqual(json.loads(response.content), True)
response = self.client.get(url, {'mail': '[email protected]'})
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), False)
# response = self.client.get(url, {'mail': '[email protected]',
# 'hgaccountenabled': ''})
# self.assertEqual(response.status_code, 200)
# self.assertEqual(json.loads(response.content), False)
response = self.client.get(url, {'mail': '[email protected]',
'gender': 'male'})
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), True)
@mock.patch('ldap.initialize')
def test_employee(self, mocked_initialize):
connection = mock.MagicMock()
mocked_initialize.return_value = connection
url = reverse('api:employee')
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
response = self.client.get(url, {'mail': ''})
self.assertEqual(response.status_code, 400)
result = {
'abc123': {'uid': 'abc123',
'mail': '[email protected]',
'sn': u'B\xe3ngtsson'},
}
def search_s(base, scope, filterstr, *args, **kwargs):
if '[email protected]' in filterstr:
return result.items()
return []
connection.search_s.side_effect = search_s
response = self.client.get(url, {'mail': '[email protected]'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertEqual(json.loads(response.content), True)
response = self.client.get(url, {'mail': '[email protected]'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertEqual(json.loads(response.content), False)
@mock.patch('ldap.initialize')
def test_ingroup(self, mocked_initialize):
connection = mock.MagicMock()
mocked_initialize.return_value = connection
url = reverse('api:in-group')
response = self.client.get(url)
self.assertEqual(response.status_code, 400)
response = self.client.get(url, {'mail': ''})
self.assertEqual(response.status_code, 400)
response = self.client.get(url, {'mail': '[email protected]'})
self.assertEqual(response.status_code, 400)
response = self.client.get(url, {'mail': '[email protected]',
'cn': ''})
self.assertEqual(response.status_code, 400)
result = {
'abc123': {'uid': 'abc123', 'mail': '[email protected]'},
}
def search_s(base, scope, filterstr, *args, **kwargs):
if 'ou=groups' in base:
if (
'[email protected]' in filterstr and
'cn=CrashStats' in filterstr
):
return result.items()
else:
# basic lookup
if '[email protected]' in filterstr:
return result.items()
return []
connection.search_s.side_effect = search_s
response = self.client.get(url, {'mail': '[email protected]',
'cn': 'CrashStats'})
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), False)
response = self.client.get(url, {'mail': '[email protected]',
'cn': 'CrashStats'})
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), True)
response = self.client.get(url, {'mail': '[email protected]',
'cn': 'NotInGroup'})
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), False)
| mozilla/medlem | medlem/api/tests.py | Python | mpl-2.0 | 5,529 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2016 Digi International Inc. All Rights Reserved.
"""
Monitor the WR31 door enclosure
"""
import time
import sys
import sarcli
import idigidata
def millisecond_timestamp():
"""
Return a timestamp, in milliseconds
:return ms_timestamp: int, Timestamp in milliseconds
"""
ms_timestamp = int(time.time() * 1000)
return ms_timestamp
def cli_command(cmd):
"""
Send a command to the SarOS CLI and receive the response
:param cmd: str, Command to run
:return response: str, Response to cmd
"""
cli = sarcli.open()
cli.write(cmd)
response = cli.read()
cli.close()
return response
class SmsAlert(object):
"""
Send an SMS alert
"""
def __init__(self, destination, custom_text):
self.destination = destination
self.custom_text = custom_text
def send_alert(self, message):
"""
Send an SMS alert
:param message: str, Content of SMS message
:return response: str, Response to sendsms command
"""
message = "{0}: {1}".format(self.custom_text, message)
command = 'sendsms ' + self.destination + ' "' + message + '" '
response = cli_command(command)
return response
class DatapointAlert(object):
"""
Send a Datapoint alert
"""
def __init__(self, destination):
self.destination = destination
def send_alert(self, message):
"""
Send a Datapoint alert
:param message: str, Datapoint content
:return response: tuple, Result code of datapoint upload attempt
"""
timestamp = millisecond_timestamp()
dpoint = """\
<DataPoint>
<dataType>STRING</dataType>
<data>{0}</data>
<timestamp>{1}</timestamp>
<streamId>{2}</streamId>
</DataPoint>""".format(message, timestamp, self.destination)
response = idigidata.send_to_idigi(dpoint, "DataPoint/stream.xml")
return response
class DoorMonitor(object):
"""
Provides methods to monitor the enclosure door status
"""
def __init__(self, alert_list):
self.d1_status = ""
self.alert_list = alert_list
@classmethod
def switch_status(cls):
"""
Reads line status and sends an alert if the status is different
:return status: str, Door status, "OPEN" or "CLOSED"
"""
response = cli_command("gpio dio")
if "D1: DOUT=OFF, DIN=LOW" in response:
if not "D0: DOUT=ON" in response:
# Door is closed
status = "CLOSED"
else:
# Door is open
status = "OPEN"
return status
def send_alert(self, text):
"""
:param text: str, Alert content
:return:
"""
for alert in self.alert_list:
alert.send_alert(text)
def monitor_switch(self):
"""
Runs line monitoring and alerting in a loop
:return:
"""
while True:
status = self.switch_status()
if status != self.d1_status:
print "WR31 door is: {0}".format(status)
self.send_alert(status)
self.d1_status = status
time.sleep(.5)
if __name__ == '__main__':
ALERT_FUNCTIONS = [DatapointAlert("WR31_door")]
if len(sys.argv) >= 3:
CUSTOM_TEXT = sys.argv[2]
else:
CUSTOM_TEXT = "WR31 Door"
if len(sys.argv) >= 2:
ALERT_FUNCTIONS.append(SmsAlert(sys.argv[1], CUSTOM_TEXT))
MONITOR = DoorMonitor(ALERT_FUNCTIONS)
MONITOR.monitor_switch()
| digidotcom/transport_examples | WR31/doormon.py | Python | mpl-2.0 | 3,979 |
import copy
import logging
import os
import time
from datetime import datetime
from hashlib import sha1
import newrelic.agent
from django.core.exceptions import ObjectDoesNotExist
from django.db.utils import IntegrityError
from past.builtins import long
from treeherder.etl.artifact import (serialize_artifact_json_blobs,
store_job_artifacts)
from treeherder.etl.common import get_guid_root
from treeherder.model.models import (BuildPlatform,
FailureClassification,
Job,
JobGroup,
JobLog,
JobType,
Machine,
MachinePlatform,
Option,
OptionCollection,
Product,
Push,
ReferenceDataSignatures,
TaskclusterMetadata)
logger = logging.getLogger(__name__)
def _get_number(s):
try:
return long(s)
except (ValueError, TypeError):
return 0
def _remove_existing_jobs(data):
"""
Remove jobs from data where we already have them in the same state.
1. split the incoming jobs into pending, running and complete.
2. fetch the ``job_guids`` from the db that are in the same state as they
are in ``data``.
3. build a new list of jobs in ``new_data`` that are not already in
the db and pass that back. It could end up empty at that point.
"""
new_data = []
guids = [datum['job']['job_guid'] for datum in data]
state_map = {
guid: state for (guid, state) in Job.objects.filter(
guid__in=guids).values_list('guid', 'state')
}
for datum in data:
job = datum['job']
if not state_map.get(job['job_guid']):
new_data.append(datum)
else:
# should not transition from running to pending,
# or completed to any other state
current_state = state_map[job['job_guid']]
if current_state == 'completed' or (
job['state'] == 'pending' and
current_state == 'running'):
continue
new_data.append(datum)
return new_data
def _load_job(repository, job_datum, push_id):
"""
Load a job into the treeherder database
If the job is a ``retry`` the ``job_guid`` will have a special
suffix on it. But the matching ``pending``/``running`` job will not.
So we append the suffixed ``job_guid`` to ``retry_job_guids``
so that we can update the job_id_lookup later with the non-suffixed
``job_guid`` (root ``job_guid``). Then we can find the right
``pending``/``running`` job and update it with this ``retry`` job.
"""
build_platform, _ = BuildPlatform.objects.get_or_create(
os_name=job_datum.get('build_platform', {}).get('os_name', 'unknown'),
platform=job_datum.get('build_platform', {}).get('platform', 'unknown'),
architecture=job_datum.get('build_platform', {}).get('architecture',
'unknown'))
machine_platform, _ = MachinePlatform.objects.get_or_create(
os_name=job_datum.get('machine_platform', {}).get('os_name', 'unknown'),
platform=job_datum.get('machine_platform', {}).get('platform', 'unknown'),
architecture=job_datum.get('machine_platform', {}).get('architecture',
'unknown'))
option_names = job_datum.get('option_collection', [])
option_collection_hash = OptionCollection.calculate_hash(
option_names)
if not OptionCollection.objects.filter(
option_collection_hash=option_collection_hash).exists():
# in the unlikely event that we haven't seen this set of options
# before, add the appropriate database rows
options = []
for option_name in option_names:
option, _ = Option.objects.get_or_create(name=option_name)
options.append(option)
for option in options:
OptionCollection.objects.create(
option_collection_hash=option_collection_hash,
option=option)
machine, _ = Machine.objects.get_or_create(
name=job_datum.get('machine', 'unknown'))
job_type, _ = JobType.objects.get_or_create(
symbol=job_datum.get('job_symbol') or 'unknown',
name=job_datum.get('name') or 'unknown')
job_group, _ = JobGroup.objects.get_or_create(
name=job_datum.get('group_name') or 'unknown',
symbol=job_datum.get('group_symbol') or 'unknown')
product_name = job_datum.get('product_name', 'unknown')
if not product_name.strip():
product_name = 'unknown'
product, _ = Product.objects.get_or_create(name=product_name)
job_guid = job_datum['job_guid']
job_guid = job_guid[0:50]
who = job_datum.get('who') or 'unknown'
who = who[0:50]
reason = job_datum.get('reason') or 'unknown'
reason = reason[0:125]
state = job_datum.get('state') or 'unknown'
state = state[0:25]
build_system_type = job_datum.get('build_system_type', 'buildbot')
reference_data_name = job_datum.get('reference_data_name', None)
default_failure_classification = FailureClassification.objects.get(
name='not classified')
sh = sha1()
sh.update(''.join(
map(str,
[build_system_type, repository.name, build_platform.os_name,
build_platform.platform, build_platform.architecture,
machine_platform.os_name, machine_platform.platform,
machine_platform.architecture,
job_group.name, job_group.symbol, job_type.name,
job_type.symbol, option_collection_hash,
reference_data_name])).encode('utf-8'))
signature_hash = sh.hexdigest()
# Should be the buildername in the case of buildbot (if not provided
# default to using the signature hash)
if not reference_data_name:
reference_data_name = signature_hash
signature, _ = ReferenceDataSignatures.objects.get_or_create(
name=reference_data_name,
signature=signature_hash,
build_system_type=build_system_type,
repository=repository.name, defaults={
'first_submission_timestamp': time.time(),
'build_os_name': build_platform.os_name,
'build_platform': build_platform.platform,
'build_architecture': build_platform.architecture,
'machine_os_name': machine_platform.os_name,
'machine_platform': machine_platform.platform,
'machine_architecture': machine_platform.architecture,
'job_group_name': job_group.name,
'job_group_symbol': job_group.symbol,
'job_type_name': job_type.name,
'job_type_symbol': job_type.symbol,
'option_collection_hash': option_collection_hash
})
tier = job_datum.get('tier') or 1
result = job_datum.get('result', 'unknown')
submit_time = datetime.fromtimestamp(
_get_number(job_datum.get('submit_timestamp')))
start_time = datetime.fromtimestamp(
_get_number(job_datum.get('start_timestamp')))
end_time = datetime.fromtimestamp(
_get_number(job_datum.get('end_timestamp')))
# first, try to create the job with the given guid (if it doesn't
# exist yet)
job_guid_root = get_guid_root(job_guid)
if not Job.objects.filter(guid__in=[job_guid, job_guid_root]).exists():
# This could theoretically already have been created by another process
# that is running updates simultaneously. So just attempt to create
# it, but allow it to skip if it's the same guid. The odds are
# extremely high that this is a pending and running job that came in
# quick succession and are being processed by two different workers.
Job.objects.get_or_create(
guid=job_guid,
defaults={
"repository": repository,
"signature": signature,
"build_platform": build_platform,
"machine_platform": machine_platform,
"machine": machine,
"option_collection_hash": option_collection_hash,
"job_type": job_type,
"job_group": job_group,
"product": product,
"failure_classification": default_failure_classification,
"who": who,
"reason": reason,
"result": result,
"state": state,
"tier": tier,
"submit_time": submit_time,
"start_time": start_time,
"end_time": end_time,
"last_modified": datetime.now(),
"push_id": push_id
}
)
# Can't just use the ``job`` we would get from the ``get_or_create``
# because we need to try the job_guid_root instance first for update,
# rather than a possible retry job instance.
try:
job = Job.objects.get(guid=job_guid_root)
except ObjectDoesNotExist:
job = Job.objects.get(guid=job_guid)
# add taskcluster metadata if applicable
if all([k in job_datum for k in ['taskcluster_task_id', 'taskcluster_retry_id']]):
try:
TaskclusterMetadata.objects.create(
job=job,
task_id=job_datum['taskcluster_task_id'],
retry_id=job_datum['taskcluster_retry_id'])
except IntegrityError:
pass
# Update job with any data that would have changed
Job.objects.filter(id=job.id).update(
guid=job_guid,
signature=signature,
build_platform=build_platform,
machine_platform=machine_platform,
machine=machine,
option_collection_hash=option_collection_hash,
job_type=job_type,
job_group=job_group,
product=product,
result=result,
state=state,
tier=tier,
submit_time=submit_time,
start_time=start_time,
end_time=end_time,
last_modified=datetime.now(),
push_id=push_id)
artifacts = job_datum.get('artifacts', [])
has_text_log_summary = any(x for x in artifacts
if x['name'] == 'text_log_summary')
if artifacts:
artifacts = serialize_artifact_json_blobs(artifacts)
# need to add job guid to artifacts, since they likely weren't
# present in the beginning
for artifact in artifacts:
if not all(k in artifact for k in ("name", "type", "blob")):
raise ValueError(
"Artifact missing properties: {}".format(artifact))
# Ensure every artifact has a ``job_guid`` value.
# It is legal to submit an artifact that doesn't have a
# ``job_guid`` value. But, if missing, it should inherit that
# value from the job itself.
if "job_guid" not in artifact:
artifact["job_guid"] = job_guid
store_job_artifacts(artifacts)
log_refs = job_datum.get('log_references', [])
job_logs = []
if log_refs:
for log in log_refs:
name = log.get('name') or 'unknown'
name = name[0:50]
url = log.get('url') or 'unknown'
url = url[0:255]
# this indicates that a summary artifact was submitted with
# this job that corresponds to the buildbot_text log url.
# Therefore, the log does not need parsing. So we should
# ensure that it's marked as already parsed.
if has_text_log_summary and name == 'buildbot_text':
parse_status = JobLog.PARSED
else:
parse_status_map = dict([(k, v) for (v, k) in
JobLog.STATUSES])
mapped_status = parse_status_map.get(
log.get('parse_status'))
if mapped_status:
parse_status = mapped_status
else:
parse_status = JobLog.PENDING
jl, _ = JobLog.objects.get_or_create(
job=job, name=name, url=url, defaults={
'status': parse_status
})
job_logs.append(jl)
_schedule_log_parsing(job, job_logs, result)
return job_guid
def _schedule_log_parsing(job, job_logs, result):
"""Kick off the initial task that parses the log data.
log_data is a list of job log objects and the result for that job
"""
# importing here to avoid an import loop
from treeherder.log_parser.tasks import parse_logs
task_types = {
"errorsummary_json",
"buildbot_text",
"builds-4h"
}
job_log_ids = []
for job_log in job_logs:
# a log can be submitted already parsed. So only schedule
# a parsing task if it's ``pending``
# the submitter is then responsible for submitting the
# text_log_summary artifact
if job_log.status != JobLog.PENDING:
continue
# if this is not a known type of log, abort parse
if job_log.name not in task_types:
continue
job_log_ids.append(job_log.id)
# TODO: Replace the use of different queues for failures vs not with the
# RabbitMQ priority feature (since the idea behind separate queues was
# only to ensure failures are dealt with first if there is a backlog).
if result != 'success':
queue = 'log_parser_fail'
priority = 'failures'
else:
queue = 'log_parser'
priority = "normal"
parse_logs.apply_async(queue=queue,
args=[job.id, job_log_ids, priority])
def store_job_data(repository, originalData):
"""
Store job data instances into jobs db
Example:
[
{
"revision": "24fd64b8251fac5cf60b54a915bffa7e51f636b5",
"job": {
"job_guid": "d19375ce775f0dc166de01daa5d2e8a73a8e8ebf",
"name": "xpcshell",
"desc": "foo",
"job_symbol": "XP",
"group_name": "Shelliness",
"group_symbol": "XPC",
"product_name": "firefox",
"state": "TODO",
"result": 0,
"reason": "scheduler",
"who": "sendchange-unittest",
"submit_timestamp": 1365732271,
"start_timestamp": "20130411165317",
"end_timestamp": "1365733932"
"machine": "tst-linux64-ec2-314",
"build_platform": {
"platform": "Ubuntu VM 12.04",
"os_name": "linux",
"architecture": "x86_64"
},
"machine_platform": {
"platform": "Ubuntu VM 12.04",
"os_name": "linux",
"architecture": "x86_64"
},
"option_collection": {
"opt": true
},
"log_references": [
{
"url": "http://ftp.mozilla.org/pub/...",
"name": "unittest"
}
],
artifacts:[{
type:" json | img | ...",
name:"",
log_urls:[
]
blob:""
}],
},
"superseded": []
},
...
]
"""
data = copy.deepcopy(originalData)
# Ensure that we have job data to process
if not data:
return
# remove any existing jobs that already have the same state
data = _remove_existing_jobs(data)
if not data:
return
superseded_job_guid_placeholders = []
# TODO: Refactor this now that store_job_data() is only over called with one job at a time.
for datum in data:
try:
# TODO: this might be a good place to check the datum against
# a JSON schema to ensure all the fields are valid. Then
# the exception we caught would be much more informative. That
# being said, if/when we transition to only using the pulse
# job consumer, then the data will always be vetted with a
# JSON schema before we get to this point.
job = datum['job']
revision = datum['revision']
superseded = datum.get('superseded', [])
revision_field = 'revision__startswith' if len(revision) < 40 else 'revision'
filter_kwargs = {'repository': repository, revision_field: revision}
push_id = Push.objects.values_list('id', flat=True).get(**filter_kwargs)
# load job
job_guid = _load_job(repository, job, push_id)
for superseded_guid in superseded:
superseded_job_guid_placeholders.append(
# superseded by guid, superseded guid
[job_guid, superseded_guid]
)
except Exception as e:
# Surface the error immediately unless running in production, where we'd
# rather report it on New Relic and not block storing the remaining jobs.
# TODO: Once buildbot support is removed, remove this as part of
# refactoring this method to process just one job at a time.
if 'DYNO' not in os.environ:
raise
logger.exception(e)
# make more fields visible in new relic for the job
# where we encountered the error
datum.update(datum.get("job", {}))
newrelic.agent.record_exception(params=datum)
# skip any jobs that hit errors in these stages.
continue
# Update the result/state of any jobs that were superseded by those ingested above.
if superseded_job_guid_placeholders:
for (job_guid, superseded_by_guid) in superseded_job_guid_placeholders:
Job.objects.filter(guid=superseded_by_guid).update(
result='superseded',
state='completed')
| KWierso/treeherder | treeherder/etl/jobs.py | Python | mpl-2.0 | 18,563 |
# -*- coding: utf-8 -*-
# Copyright: Damien Elmes <[email protected]>
# Copyright © 2014 Roland Sieker <[email protected]>
#
# License: GNU AGPL, version 3 or later;
# http://www.gnu.org/licenses/agpl.html
import os
import re
import shutil
import zipfile
from . import Collection
from .hooks import runHook
from .lang import _
from .utils import ids2str, json, splitFields
class Exporter(object):
def __init__(self, col, did=None):
self.col = col
self.did = did
def exportInto(self, path):
self._escapeCount = 0
file = open(path, "wb")
self.doExport(file)
file.close()
def escapeText(self, text):
"Escape newlines, tabs, CSS and quotechar."
text = text.replace("\n", "<br>")
text = text.replace("\t", " " * 8)
text = re.sub("(?i)<style>.*?</style>", "", text)
if "\"" in text:
text = "\"" + text.replace("\"", "\"\"") + "\""
return text
def cardIds(self):
if not self.did:
cids = self.col.db.list("select id from cards")
else:
cids = self.col.decks.cids(self.did, children=True)
self.count = len(cids)
return cids
# Cards as TSV
######################################################################
class TextCardExporter(Exporter):
key = _("Cards in Plain Text")
ext = ".txt"
hideTags = True
def __init__(self, col):
Exporter.__init__(self, col)
def doExport(self, file):
ids = sorted(self.cardIds())
# strids = ids2str(ids)
def esc(s):
# strip off the repeated question in answer if exists
s = re.sub("(?si)^.*<hr id=answer>\n*", "", s)
return self.escapeText(s)
out = ""
for cid in ids:
c = self.col.getCard(cid)
out += esc(c.q())
out += "\t" + esc(c.a()) + "\n"
file.write(out.encode("utf-8"))
# Notes as TSV
######################################################################
class TextNoteExporter(Exporter):
key = _("Notes in Plain Text")
ext = ".txt"
def __init__(self, col):
Exporter.__init__(self, col)
self.includeID = False
self.includeTags = True
def doExport(self, file):
cardIds = self.cardIds()
data = []
for id, flds, tags in self.col.db.execute("""
select guid, flds, tags from notes
where id in
(select nid from cards
where cards.id in %s)""" % ids2str(cardIds)):
row = []
# note id
if self.includeID:
row.append(str(id))
# fields
row.extend([self.escapeText(f) for f in splitFields(flds)])
# tags
if self.includeTags:
row.append(tags.strip())
data.append("\t".join(row))
self.count = len(data)
out = "\n".join(data)
file.write(out.encode("utf-8"))
# Anki decks
######################################################################
# media files are stored in self.mediaFiles, but not exported.
class AnkiExporter(Exporter):
key = _("Anki 2.0 Deck")
ext = ".anki2"
def __init__(self, col):
Exporter.__init__(self, col)
self.includeSched = False
self.includeMedia = True
def exportInto(self, path):
# create a new collection at the target
try:
os.unlink(path)
except (IOError, OSError):
pass
self.dst = Collection(path)
self.src = self.col
# find cards
if not self.did:
cids = self.src.db.list("select id from cards")
else:
cids = self.src.decks.cids(self.did, children=True)
# copy cards, noting used nids
nids = {}
data = []
for row in self.src.db.execute(
"select * from cards where id in " + ids2str(cids)):
nids[row[1]] = True
data.append(row)
self.dst.db.executemany(
"insert into cards values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
data)
# notes
strnids = ids2str(list(nids.keys()))
notedata = []
for row in self.src.db.all(
"select * from notes where id in "+strnids):
# remove system tags if not exporting scheduling info
if not self.includeSched:
row = list(row)
row[5] = self.removeSystemTags(row[5])
notedata.append(row)
self.dst.db.executemany(
"insert into notes values (?,?,?,?,?,?,?,?,?,?,?)",
notedata)
# models used by the notes
mids = self.dst.db.list(
"select distinct mid from notes where id in " + strnids)
# card history and revlog
if self.includeSched:
data = self.src.db.all(
"select * from revlog where cid in " + ids2str(cids))
self.dst.db.executemany(
"insert into revlog values (?,?,?,?,?,?,?,?,?)",
data)
else:
# need to reset card state
self.dst.sched.resetCards(cids)
# models - start with zero
self.dst.models.models = {}
for m in self.src.models.all():
if int(m['id']) in mids:
self.dst.models.update(m)
# decks
if not self.did:
dids = []
else:
dids = [self.did] + [
x[1] for x in self.src.decks.children(self.did)]
dconfs = {}
for d in self.src.decks.all():
if str(d['id']) == "1":
continue
if dids and d['id'] not in dids:
continue
if not d['dyn'] and d['conf'] != 1:
if self.includeSched:
dconfs[d['conf']] = True
if not self.includeSched:
# scheduling not included, so reset deck settings to default
d = dict(d)
d['conf'] = 1
self.dst.decks.update(d)
# copy used deck confs
for dc in self.src.decks.allConf():
if dc['id'] in dconfs:
self.dst.decks.updateConf(dc)
# find used media
media = {}
self.mediaDir = self.src.media.dir()
if self.includeMedia:
for row in notedata:
flds = row[6]
mid = row[2]
for file in self.src.media.filesInStr(mid, flds):
media[file] = True
if self.mediaDir:
for fname in os.listdir(self.mediaDir):
if fname.startswith("_"):
media[fname] = True
self.mediaFiles = list(media.keys())
self.dst.crt = self.src.crt
# todo: tags?
self.count = self.dst.cardCount()
self.dst.setMod()
self.postExport()
self.dst.close()
def postExport(self):
# overwrite to apply customizations to the deck before it's closed,
# such as update the deck description
pass
def removeSystemTags(self, tags):
return self.src.tags.remFromStr("marked leech", tags)
# Packaged Anki decks
######################################################################
class AnkiPackageExporter(AnkiExporter):
key = _("Anki Deck Package")
ext = ".apkg"
def __init__(self, col):
AnkiExporter.__init__(self, col)
def exportInto(self, path):
# open a zip file
z = zipfile.ZipFile(path, "w", zipfile.ZIP_DEFLATED)
# if all decks and scheduling included, full export
if self.includeSched and not self.did:
media = self.exportVerbatim(z)
else:
# otherwise, filter
media = self.exportFiltered(z, path)
# media map
z.writestr("media", json.dumps(media))
z.close()
def exportFiltered(self, z, path):
# export into the anki2 file
colfile = path.replace(".apkg", ".anki2")
AnkiExporter.exportInto(self, colfile)
z.write(colfile, "collection.anki2")
# and media
self.prepareMedia()
media = {}
for c, file in enumerate(self.mediaFiles):
c = str(c)
mpath = os.path.join(self.mediaDir, file)
if os.path.exists(mpath):
z.write(mpath, c)
media[c] = file
# tidy up intermediate files
os.unlink(colfile)
p = path.replace(".apkg", ".media.db2")
if os.path.exists(p):
os.unlink(p)
os.chdir(self.mediaDir)
shutil.rmtree(path.replace(".apkg", ".media"))
return media
def exportVerbatim(self, z):
# close our deck & write it into the zip file, and reopen
self.count = self.col.cardCount()
self.col.close()
z.write(self.col.path, "collection.anki2")
self.col.reopen()
# copy all media
if not self.includeMedia:
return {}
media = {}
mdir = self.col.media.dir()
for c, file in enumerate(os.listdir(mdir)):
c = str(c)
mpath = os.path.join(mdir, file)
if os.path.exists(mpath):
z.write(mpath, c)
media[c] = file
return media
def prepareMedia(self):
# chance to move each file in self.mediaFiles into place before media
# is zipped up
pass
# Export modules
##########################################################################
def exporters():
def id(obj):
return ("%s (*%s)" % (obj.key, obj.ext), obj)
exps = [
id(AnkiPackageExporter),
id(TextNoteExporter),
id(TextCardExporter),
]
runHook("exportersList", exps)
return exps
| ospalh/libanki3 | libanki3/exporting.py | Python | agpl-3.0 | 9,791 |
# -*- coding: utf-8 -*-
# See README.rst file on addon root folder for license details
from . import res_partner
| incaser/odoo-templates | sample_addon_oca/models/__init__.py | Python | agpl-3.0 | 114 |
# -*- coding: utf-8 -*-
# Copyright© 2016 ICTSTUDIO <http://www.ictstudio.eu>
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': "MIS Builder Cost Center Filter",
'version': '8.0.1.0.0',
'category': 'Reporting',
'summary': """
Add Cost Center filters to MIS Reports
""",
'author':
'ICTSTUDIO,'
'ACSONE SA/NV,'
'Odoo Community Association (OCA)',
'website': "http://www.ictstudio.eu",
'license': 'AGPL-3',
'depends': [
'mis_builder',
'account_cost_center'
],
'data': [
'views/mis_report_view.xml',
'views/mis_builder_cost_center.xml',
],
'qweb': [
'static/src/xml/mis_widget.xml'
],
}
| ICTSTUDIO/accounting-addons | mis_builder_cost_center_filter/__openerp__.py | Python | agpl-3.0 | 793 |
# -*- coding: utf-8 -*-
__license__ = "GNU Affero General Public License, Ver.3"
__author__ = "Pablo Alvarez de Sotomayor Posadillo"
# This file is part of Kirinki.
#
# Kirinki is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Kirinki is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with kirinki. If not, see <http://www.gnu.org/licenses/>.
# Python general imports
import os
import os.path
import subprocess
import httplib
from datetime import datetime
# Django imports
from django import forms
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.template.loader import render_to_string
# Application imports
from kirinki.config import Config
from kirinki.common import ErrorClear
from kirinki.mainviewer import MainViewer
from kirinki.models import streaming
from kirinki.models import video
from kirinki.message import Message
from kirinki.user import LoginForm
class StreamingController():
'''Class that implements the Streaming controller'''
def __init__(self, request):
if request.session.get('isConfig', False) is False:
Config.getSession(request.session)
# Left block
leftBlocks = []
if not request.session['user'].is_authenticated():
leftBlocks = [render_to_string('kirinki/section.html', {'title' : 'login', 'content': render_to_string('kirinki/form.html', {'form' : LoginForm(), 'action' : request.session['base_url'] + '/login'}, context_instance=RequestContext(request))})]
# Center block
centerBlocks = []
try:
videoStr = streaming.objects.all()
for video in videoStr:
centerBlocks = [render_to_string('kirinki/section.html', {'title' : 'login', 'content': str(video.idStreaming)})]
except streaming.DoesNotExist:
pass
self.render = MainViewer(request).render(leftBlocks, centerBlocks, [])
def getRender(self):
'''This method return the html rendered'''
return self.render
class StrForm(forms.Form):
isVideo = forms.BooleanField(label='Emitir Video',
required=False)
srcIP = forms.IPAddressField(label='Ip de origen',
required=False)
srcPort = forms.IntegerField(label='Puerto de origen',
required=False)
srcMux = forms.ChoiceField(label='Multiplexor de origen',
choices=[('ogg', 'ogg'), ('ffmpeg{mux=flv}', 'mp4'), ('webm', 'webm')],
required=False)
vStream = forms.ChoiceField(label='Video a emitir',
choices=[],
required=True)
class StreamController():
'''Class to implement the Stream controller'''
def __init__(self, request):
if request.session.get('isConfig', False) is False:
Config.getSession(request.session)
if request.method == 'GET':
# GET request
form = StrForm(error_class=ErrorClear)
form.fields['isVideo'].initial = False
form.fields['srcIP'].initial = request.META['REMOTE_ADDR']
form.fields['srcPort'].initial = 9000
form.fields['vStream'].choices = self.userVideos(request)
self.render = MainViewer(request).render([], [render_to_string('kirinki/form.html', {'form' : form, 'action' : request.session['base_url'] + '/stream', 'id' : 'stream'}, context_instance=RequestContext(request))], [])
elif request.method == 'POST':
# POST request
form = StrForm(request.POST, error_class=ErrorClear)
form.fields['isVideo'].initial = False
form.fields['srcIP'].initial = request.META['REMOTE_ADDR']
form.fields['srcPort'].initial = 9000
form.fields['vStream'].choices = self.userVideos(request)
# Check if the form data is valid and try to start the streaming
if form.is_valid():
try:
v = video.objects.filter(idVideo=form.cleaned_data['vStream'])[0]
except video.DoesNotExist:
v = None
if form.cleaned_data['isVideo'] is True and v is not None:
clvc = None
if v.format == 'video/mp4':
cvlc = subprocess.Popen(["/usr/bin/cvlc " + v.path + " --sout '#http{mux=ffmpeg{mux=flv},dst=" + request.session['strIP'] + ":" + request.session['strPort'] + "/} -no-sout-rtp-sap -no-sout-standard-sap -sout-keep' --ttl 12"],
shell=True)
elif v.format == 'video/webm':
cvlc = subprocess.Popen(["/usr/bin/cvlc " + v.path + " --sout '#http{mux=webm,dst=" + request.session['strIP'] + ":" + request.session['strPort'] + "/} -no-sout-rtp-sap -no-sout-standard-sap -sout-keep' --ttl 12"],
shell=True)
elif v.format == 'video/ogg':
cvlc = subprocess.Popen(["/usr/bin/cvlc " + v.path + " --sout '#http{mux=ogg,dst=" + request.session['strIP'] + ":" + request.session['strPort'] + "/} -no-sout-rtp-sap -no-sout-standard-sap -sout-keep' --ttl 12"],
shell=True)
else:
Message.pushMessage(request, Message.ERROR,'Video type not supported')
if clvc is not None:
vStream = streaming(src=form.cleaned_data['srcIP'], port=form.cleaned_data['srcPort'], mux=form.cleaned_data['srcMux'], vMode=form.cleaned_data['isVideo'], pid=cvlc.pid,video=v, owner=request.session['user'])
vStream.save()
Message.pushMessage(request, Message.INFO,'Video streaming')
elif form.cleaned_data['isVideo'] is False:
if form.cleaned_data['srcMux'] != "ffmpeg{mux=flv}" and form.cleaned_data['srcMux'] != "webm" and form.cleaned_data['srcMux'] != "ogg":
Message.pushMessage(request, Message.ERROR,'Video type not supported')
else:
cvlc = subprocess.Popen(["/usr/bin/cvlc http://" + str(form.cleaned_data['srcIP']) + ":" + str(form.cleaned_data['srcPort']) + " --sout '#http{mux=" + str(form.cleaned_data['srcMux']) + ",dst=" + request.session['strIP'] + ":" + request.session['strPort'] + "/} -no-sout-rtp-sap -no-sout-standard-sap -sout-keep' --ttl 12"],
shell=True)
vStream = streaming(src=form.cleaned_data['srcIP'], port=form.cleaned_data['srcPort'], mux=form.cleaned_data['srcMux'], vMode=form.cleaned_data['isVideo'], pid=cvlc.pid,video=v, owner=request.session['user'])
vStream.save()
Message.pushMessage(request, Message.ERROR, 'External video streaming.')
else:
Message.pushMessage(request, Message.ERROR, 'If you select the video mode you must select a video.')
# os.waitpid(p.pid, 0)[1]
self.render = HttpResponseRedirect('/streaming')
else:
for error in form.errors:
Message.pushMessage(request, Message.ERROR, 'Error en ' + error + ': ' + str(form._errors[error]))
if request.META.get('HTTP_REFERER', False) is not False:
self.render = HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
self.render = HttpResponseRedirect('/index')
else:
raise Http404
def userVideos(self, request):
'''This method return the videos owned by the actual user.'''
init = []
try:
videos = video.objects.filter(owner=request.session['user'])
for v in videos:
init.append((v.idVideo, v.name))
except video.DoesNotExist:
pass
return init
def getRender(self):
'''This method return the html rendered'''
return self.render
class VideoController():
'''Class to implement the Video controller'''
# Definition of the video actions
LIST = 0
VIEW = 1
DELETE = 2
REFERENCE = 3
def __init__(self, request, action=0, key=None):
if request.session.get('isConfig', False) is False:
Config.getSession(request.session)
# Blocks assigned to the left area
leftBlocks = []
if not request.session['user'].is_authenticated():
leftBlocks = [render_to_string('kirinki/section.html', {'title' : 'login', 'content': render_to_string('kirinki/form.html', {'form' : LoginForm(), 'action' : request.session['base_url'] + '/login'}, context_instance=RequestContext(request))})]
else:
try:
myVideos = video.objects.filter(owner = request.session['user'])
leftBlocks = [render_to_string('kirinki/section.html', {'title' : 'Mis vídeos', 'content' : render_to_string('kirinki/myVideo.html', {'videos' : myVideos, 'session' : request.session}).encode('utf-8')})]
except video.DoesNotExist:
pass
# Blocks assigned to the center area
centerBlocks = []
if action == self.LIST:
try:
videoList = video.objects.all()
centerBlocks = [render_to_string('kirinki/section.html', {'title' : 'Lista de videos', 'content': render_to_string('kirinki/videoList.html', {'videos' : videoList, 'session' : request.session}).encode('utf-8')})]
except video.DoesNotExist:
pass
elif action == self.VIEW:
if key is not None:
try:
v = video.objects.get(idVideo=key)
bfile = '/media/'+v.path[v.path.rfind('/')+1:v.path.rfind('.')]
src = {'orig' : request.session['base_url'] + '/media/'+v.path[v.path.rfind('/')+1:]}
if os.path.exists(v.path[:v.path.rfind('.')]+'.ogv'):
src['ogv'] = request.session['base_url'] +bfile+'.ogv'
if os.path.exists(v.path[:v.path.rfind('.')]+'.webm'):
src['webm'] = request.session['base_url'] +bfile+'.webm'
if os.path.exists(v.path[:v.path.rfind('.')]+'.mp4'):
src['mp4'] = request.session['base_url'] +bfile+'.mp4'
if os.path.exists(v.path[:v.path.rfind('.')]+'.flv'):
src['flv'] = request.session['base_url'] +bfile+'.flv'
src['flash'] = request.session['base_url']+'/static/flowplayer/flowplayer-3.2.5.swf'
src['flash_str'] = request.session['base_url']+'/static/flowplayer.pseudostreaming/flowplayer.pseudostreaming-3.2.5.swf'
centerBlocks = [render_to_string('kirinki/section.html', {'title' : v.name, 'content': render_to_string('kirinki/video.html', {'controls' : True, 'src' : src})})]
except video.DoesNotExist:
pass
elif action == self.DELETE:
try:
v = video.objects.get(idVideo=key, owner=request.session['user'])
name = v.name
os.remove(v.path)
v.delete()
centerBlocks = ['<p>Video ' + name + ' deleted.</p>']
except video.DoesNotExist:
pass
elif action == self.REFERENCE:
pass
else:
# Error. Action not defined
raise Http404
# Blocks assigned to the right area
# Ultimos subidos, ultimos usuarios que han subido, usuarios que mas han subido, ...
rightBlocks = []
self.render = MainViewer(request).render(leftBlocks, centerBlocks, rightBlocks)
def getRender(self):
'''This method returns the html generated'''
return self.render
class UploadForm(forms.Form):
title = forms.CharField(label='Título',
min_length=5,
max_length=80,
required=True)
description = forms.CharField(label='Descripción',
min_length=5,
max_length=250,
required=True)
fileUpload = forms.FileField(label='Fichero',
required=True)
convertMP4 = forms.BooleanField(label='Convertir a mp4',
required=False)
convertOGG = forms.BooleanField(label='Convertir a ogg',
required=False)
convertWEBM = forms.BooleanField(label='Convertir a webm',
required=False)
class UploadController():
'''Class to implement the Upload controller. This class will be merged with the VideoController'''
def __init__(self, request):
if request.session.get('isConfig', False) is False:
Config.getSession(request.session)
if request.method == 'GET':
# GET request
leftBlocks = [self.getMyVideos(request.session)]
centerBlocks = [self.getUploadVideo(request.session['base_url'], request)]
self.render = MainViewer(request).render(leftBlocks, centerBlocks, [])
elif request.method == 'POST':
# POST request.
form = UploadForm(request.POST, request.FILES, error_class=ErrorClear)
if form.is_valid():
upFile = request.FILES['fileUpload']
if upFile.size > 0:
path = ''
if request.session.get('upload_path', False):
path = request.session['upload_path']+'/'
path += upFile.name
destination = open(path, 'wb+')
for chunk in upFile.chunks():
destination.write(chunk)
destination.close()
v = video(name=form.cleaned_data['title'], description=form.cleaned_data['description'], path=path, format=upFile.content_type, pub_date=datetime.now(), owner=request.session['user'])
v.save()
if form.cleaned_data['convertMP4'] and path[v.path.rfind('.'):].lower() != 'mp4':
pass
if form.cleaned_data['convertOGG'] and path[v.path.rfind('.'):].lower() != 'ogg':
pass
if form.cleaned_data['convertWEBM'] and path[v.path.rfind('.'):].lower() != 'web':
pass
if path[v.path.rfind('.'):].lower() != 'flv':
pass
else:
for error in form.errors:
Message.pushMessage(request, Message.ERROR, 'Error en ' + error + ': ' + str(form._errors[error]))
if request.META.get('HTTP_REFERER', False) is not False:
self.render = HttpResponseRedirect(request.META['HTTP_REFERER'])
else:
self.render = HttpResponseRedirect('/index')
else:
raise Http404
def getMyVideos(self, session):
'''This method return the videos owned by the actual user.'''
content = ''
try:
myVideos = video.objects.filter(owner = session['user'])
content = render_to_string('kirinki/myVideo.html', {'videos' : myVideos, 'session' : session}).encode('utf-8')
except video.DoesNotExist:
pass
return render_to_string('kirinki/section.html', {'title' : 'Mis vídeos', 'content' : content})
def getUploadVideo(self, base_url, request):
content = render_to_string('kirinki/form.html', {'form' : UploadForm(request.POST, request.FILES, error_class=ErrorClear), 'action' : base_url + '/upload', 'upload' : True}, context_instance=RequestContext(request))
return render_to_string('kirinki/section.html', {'title' : 'Subir vídeo', 'content' : content})
def getRender(self):
'''This method returns the html generated'''
return self.render
| i02sopop/Kirinki | kirinki/videos.py | Python | agpl-3.0 | 16,846 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import the relevant PTS classes and modules
from pts.modeling.core.environment import load_modeling_environment_cwd
from pts.modeling.config.component import definition
# -----------------------------------------------------------------
# Load environment and model suite
environment = load_modeling_environment_cwd()
runs = environment.fitting_runs
# -----------------------------------------------------------------
properties = ["representation", "filters", "ranges", "genetic", "grid", "units", "types"]
# -----------------------------------------------------------------
definition = definition.copy()
# -----------------------------------------------------------------
# The fitting run for which to adapt the configuration
if runs.empty: raise RuntimeError("No fitting runs are present")
elif runs.has_single: definition.add_fixed("name", "name of the fitting run", runs.single_name)
else: definition.add_required("name", "string", "name of the fitting run", choices=runs.names)
# -----------------------------------------------------------------
# Dust or stellar
definition.add_positional_optional("properties", "string_list", "properties to adapt", default=properties, choices=properties)
# -----------------------------------------------------------------
# Select certain properties
definition.add_optional("contains", "string", "only adapt properties containing this string in their name")
definition.add_optional("not_contains", "string", "don't adapt properties containing this string in their name")
definition.add_optional("exact_name", "string", "only adapt properties with this exact string as their name")
definition.add_optional("exact_not_name", "string", "don't adapt properties with this exact string as their name")
definition.add_optional("startswith", "string", "only adapt properties whose name starts with this string")
definition.add_optional("endswith", "string", "only adapt properties whose name starts with this string")
# -----------------------------------------------------------------
# Save
definition.add_flag("save", "save adapted properties", True)
# -----------------------------------------------------------------
| SKIRT/PTS | modeling/config/adapt_fit.py | Python | agpl-3.0 | 2,495 |
import os
from importlib import import_module
from django.core.management.base import BaseCommand
from django.utils import translation
from django.conf import settings
def get_modules():
path = os.path.join(settings.BASE_DIR, 'utils', 'upgrade')
root, dirs, files = next(os.walk(path))
return files
class Command(BaseCommand):
"""
Upgrades Janeway
"""
help = "Upgrades an install from one version to another."
def add_arguments(self, parser):
"""Adds arguments to Django's management command-line parser.
:param parser: the parser to which the required arguments will be added
:return: None
"""
parser.add_argument('--path', required=False)
def handle(self, *args, **options):
if not options.get('path'):
print('No upgrade selected. Available upgrade paths: ')
for file in get_modules():
module_name = file.split('.')[0]
print('- {module_name}'.format(module_name=module_name))
print('To run an upgrade use the following: `python3 manage.py run_upgrade --script 12_13`')
else:
translation.activate('en')
upgrade_module_name = options.get('path')
upgrade_module_path = 'utils.upgrade.{module_name}'.format(module_name=upgrade_module_name)
try:
upgrade_module = import_module(upgrade_module_path)
upgrade_module.execute()
except ImportError as e:
print('There was an error running the requested upgrade: ')
print(e)
| BirkbeckCTP/janeway | src/utils/management/commands/run_upgrade.py | Python | agpl-3.0 | 1,613 |
from .naive import StratNaive
import random
import numpy as np
class BetaDecreaseStrat(StratNaive):
def __init__(self, vu_cfg, time_scale=0.9, **strat_cfg2):
StratNaive.__init__(self,vu_cfg=vu_cfg, **strat_cfg2)
self.time_scale = time_scale
def update_speaker(self, ms, w, mh, voc, mem, bool_succ, context=[]):
self.voc_update.beta = max(0,self.voc_update.beta - 1./self.time_scale)
return self.voc_update.update_speaker(ms, w, mh, voc, mem, bool_succ, context)
def update_hearer(self, ms, w, mh, voc, mem, bool_succ, context=[]):
self.voc_update.beta = max(0,self.voc_update.beta - 1./self.time_scale)
return self.voc_update.update_hearer(ms, w, mh, voc, mem, bool_succ, context)
| flowersteam/naminggamesal | naminggamesal/ngstrat/beta_decrease.py | Python | agpl-3.0 | 700 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import connection
from django.conf import settings
from django.utils import timezone
from taiga.projects.history import services as history_service
from taiga.projects.history.choices import HistoryType
from . import tasks
def _get_project_webhooks(project):
webhooks = []
for webhook in project.webhooks.all():
webhooks.append({
"id": webhook.pk,
"url": webhook.url,
"key": webhook.key,
})
return webhooks
def on_new_history_entry(sender, instance, created, **kwargs):
if not settings.WEBHOOKS_ENABLED:
return None
if instance.is_hidden:
return None
model = history_service.get_model_from_key(instance.key)
pk = history_service.get_pk_from_key(instance.key)
try:
obj = model.objects.get(pk=pk)
except model.DoesNotExist:
# Catch simultaneous DELETE request
return None
webhooks = _get_project_webhooks(obj.project)
if instance.type == HistoryType.create:
task = tasks.create_webhook
extra_args = []
elif instance.type == HistoryType.change:
task = tasks.change_webhook
extra_args = [instance]
elif instance.type == HistoryType.delete:
task = tasks.delete_webhook
extra_args = []
by = instance.owner
date = timezone.now()
webhooks_args = []
for webhook in webhooks:
args = [webhook["id"], webhook["url"], webhook["key"], by, date, obj] + extra_args
webhooks_args.append(args)
connection.on_commit(lambda: _execute_task(task, webhooks_args))
def _execute_task(task, webhooks_args):
for webhook_args in webhooks_args:
if settings.CELERY_ENABLED:
task.delay(*webhook_args)
else:
task(*webhook_args)
| taigaio/taiga-back | taiga/webhooks/signal_handlers.py | Python | agpl-3.0 | 2,526 |
#!/usr/bin/env python
'''
##BOILERPLATE_COPYRIGHT
##BOILERPLATE_COPYRIGHT_END
'''
import unittest, copy
from testRoot import RootClass
from noink.user_db import UserDB
from noink.entry_db import EntryDB
class AddEntry(RootClass):
def test_AddEntry(self):
userDB = UserDB()
entryDB = EntryDB()
u = userDB.add("jontest", "pass", "Jon Q. Testuser")
title = 'Little Buttercup'
entry = 'There once was a man from Nantucket,' + \
'who kept his wife in a Bucket.' + \
"Wait... how'd she fit in that bucket anyway?"
e = entryDB.add(copy.deepcopy(title), entry, u)
self.assertTrue(e.title == title)
if __name__ == '__main__':
unittest.main()
| criswell/noink | src/tests/test_DelEntry.py | Python | agpl-3.0 | 740 |
from unittest import TestCase
from micall.drivers.run_info import RunInfo
from micall.drivers.sample import Sample
from micall.drivers.sample_group import SampleGroup
class RunInfoTest(TestCase):
def test_get_all_samples(self):
expected_fastq_paths = ['1a_R1_001.fastq',
'1b_R1_001.fastq',
'2_R1_001.fastq']
run_info = RunInfo(
sample_groups=[SampleGroup(Sample(fastq1='1a_R1_001.fastq'),
Sample(fastq1='1b_R1_001.fastq')),
SampleGroup(Sample(fastq1='2_R1_001.fastq'))])
fastq_paths = [sample.fastq1 for sample in run_info.get_all_samples()]
self.assertEqual(expected_fastq_paths, fastq_paths)
| cfe-lab/MiCall | micall/tests/test_run_info.py | Python | agpl-3.0 | 777 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
import arrow
import magic
import hashlib
import logging
import requests
from io import BytesIO
from PIL import Image
from flask import json
from .image import get_meta
from .video import get_meta as video_meta
import base64
from superdesk.errors import SuperdeskApiError
logger = logging.getLogger(__name__)
def hash_file(afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.hexdigest()
def get_file_name(file):
return hash_file(file, hashlib.sha256())
def download_file_from_url(url):
rv = requests.get(url, timeout=15)
if rv.status_code not in (200, 201):
raise SuperdeskApiError.internalError('Failed to retrieve file from URL: %s' % url)
mime = magic.from_buffer(rv.content, mime=True).decode('UTF-8')
ext = mime.split('/')[1]
name = 'stub.' + ext
return BytesIO(rv.content), name, mime
def download_file_from_encoded_str(encoded_str):
content = encoded_str.split(';base64,')
mime = content[0].split(':')[1]
ext = content[0].split('/')[1]
name = 'web_capture.' + ext
content = base64.b64decode(content[1])
return BytesIO(content), name, mime
def process_file_from_stream(content, content_type=None):
content_type = content_type or content.content_type
content = BytesIO(content.read())
if 'application/' in content_type:
content_type = magic.from_buffer(content.getvalue(), mime=True).decode('UTF-8')
content.seek(0)
file_type, ext = content_type.split('/')
try:
metadata = process_file(content, file_type)
except OSError: # error from PIL when image is supposed to be an image but is not.
raise SuperdeskApiError.internalError('Failed to process file')
file_name = get_file_name(content)
content.seek(0)
metadata = encode_metadata(metadata)
metadata.update({'length': json.dumps(len(content.getvalue()))})
return file_name, content_type, metadata
def encode_metadata(metadata):
return dict((k.lower(), json.dumps(v)) for k, v in metadata.items())
def decode_metadata(metadata):
return dict((k.lower(), decode_val(v)) for k, v in metadata.items())
def decode_val(string_val):
"""Format dates that elastic will try to convert automatically."""
val = json.loads(string_val)
try:
arrow.get(val, 'YYYY-MM-DD') # test if it will get matched by elastic
return str(arrow.get(val))
except (Exception):
return val
def process_file(content, type):
if type == 'image':
return process_image(content, type)
if type in ('audio', 'video'):
return process_video(content, type)
return {}
def process_video(content, type):
content.seek(0)
meta = video_meta(content)
content.seek(0)
return meta
def process_image(content, type):
content.seek(0)
meta = get_meta(content)
content.seek(0)
return meta
def crop_image(content, file_name, cropping_data):
if cropping_data:
file_ext = os.path.splitext(file_name)[1][1:]
if file_ext in ('JPG', 'jpg'):
file_ext = 'jpeg'
logger.debug('Opened image from stream, going to crop it s')
content.seek(0)
img = Image.open(content)
cropped = img.crop(cropping_data)
logger.debug('Cropped image from stream, going to save it')
try:
out = BytesIO()
cropped.save(out, file_ext)
out.seek(0)
return (True, out)
except Exception as io:
logger.exception(io)
return (False, content)
| vied12/superdesk | server/superdesk/media/media_operations.py | Python | agpl-3.0 | 3,953 |
#!/usr/bin/python
"""
Copyright 2012 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import cgi
import Cookie
import dbSession
import dbShared
import MySQLdb
import ghShared
import ghLists
from jinja2 import Environment, FileSystemLoader
# Get current url
try:
url = os.environ['SCRIPT_NAME']
except KeyError:
url = ''
uiTheme = ''
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = ''
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
# Get a session
logged_state = 0
linkappend = ''
disableStr = ''
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
else:
disableStr = ' disabled="disabled"'
if (uiTheme == ''):
uiTheme = 'crafter'
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
print 'Content-type: text/html\n'
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
template = env.get_template('waypointmaps.html')
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url=url, pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList(), planetList=ghLists.getPlanetList())
| clreinki/GalaxyHarvester | waypointMaps.py | Python | agpl-3.0 | 2,719 |
import os
import sys
import nose
from subprocess import CalledProcessError, check_output as run
from functools import partial
GJSLINT_COMMAND = 'gjslint'
GJSLINT_OPTIONS = ['--strict']
JS_BASE_FOLDER = os.path.join('skylines', 'public', 'js')
JS_FILES = [
'baro.js',
'fix-table.js',
'flight.js',
'general.js',
'map.js',
'phase-table.js',
'topbar.js',
'tracking.js',
'units.js',
]
def test_js_files():
for filename in JS_FILES:
f = partial(run_gjslint, filename)
f.description = 'gjslint {}'.format(filename)
yield f
def run_gjslint(filename):
path = os.path.join(JS_BASE_FOLDER, filename)
args = [GJSLINT_COMMAND]
args.extend(GJSLINT_OPTIONS)
args.append(path)
try:
run(args)
except CalledProcessError, e:
print e.output
raise AssertionError('gjslint has found errors.')
except OSError:
raise OSError('Failed to run gjslint. Please check that you have '
'installed it properly.')
if __name__ == "__main__":
sys.argv.append(__name__)
nose.run()
| dkm/skylines | skylines/tests/test_gjslint.py | Python | agpl-3.0 | 1,110 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (C) 2014 Didotech srl (<http://www.didotech.com>).
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import sale_order
from . import purchase_order
| iw3hxn/LibrERP | sale_direct_buy/models/__init__.py | Python | agpl-3.0 | 1,069 |
"""
Learning Tools Interoperability (LTI) module.
Resources
---------
Theoretical background and detailed specifications of LTI can be found on:
http://www.imsglobal.org/LTI/v1p1p1/ltiIMGv1p1p1.html
This module is based on the version 1.1.1 of the LTI specifications by the
IMS Global authority. For authentication, it uses OAuth1.
When responding back to the LTI tool provider, we must issue a correct
response. Types of responses and their message payload is available at:
Table A1.2 Interpretation of the 'CodeMajor/severity' matrix.
http://www.imsglobal.org/gws/gwsv1p0/imsgws_wsdlBindv1p0.html
A resource to test the LTI protocol (PHP realization):
http://www.imsglobal.org/developers/LTI/test/v1p1/lms.php
We have also begun to add support for LTI 1.2/2.0. We will keep this
docstring in synch with what support is available. The first LTI 2.0
feature to be supported is the REST API results service, see specification
at
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
What is supported:
------------------
1.) Display of simple LTI in iframe or a new window.
2.) Multiple LTI components on a single page.
3.) The use of multiple LTI providers per course.
4.) Use of advanced LTI component that provides back a grade.
A) LTI 1.1.1 XML endpoint
a.) The LTI provider sends back a grade to a specified URL.
b.) Currently only action "update" is supported. "Read", and "delete"
actions initially weren't required.
B) LTI 2.0 Result Service JSON REST endpoint
(http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html)
a.) Discovery of all such LTI http endpoints for a course. External tools GET from this discovery
endpoint and receive URLs for interacting with individual grading units.
(see lms/djangoapps/courseware/views.py:get_course_lti_endpoints)
b.) GET, PUT and DELETE in LTI Result JSON binding
(http://www.imsglobal.org/lti/ltiv2p0/mediatype/application/vnd/ims/lis/v2/result+json/index.html)
for a provider to synchronize grades into edx-platform. Reading, Setting, and Deleteing
Numeric grades between 0 and 1 and text + basic HTML feedback comments are supported, via
GET / PUT / DELETE HTTP methods respectively
"""
import datetime
from django.utils.timezone import UTC
import logging
import oauthlib.oauth1
from oauthlib.oauth1.rfc5849 import signature
import hashlib
import base64
import urllib
import textwrap
import bleach
from lxml import etree
from webob import Response
import mock
from xml.sax.saxutils import escape
from xmodule.editing_module import MetadataOnlyEditingDescriptor
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.x_module import XModule, module_attr
from xmodule.course_module import CourseDescriptor
from xmodule.lti_2_util import LTI20ModuleMixin, LTIError
from pkg_resources import resource_string
from xblock.core import String, Scope, List, XBlock
from xblock.fields import Boolean, Float
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
DOCS_ANCHOR_TAG_OPEN = (
"<a target='_blank' "
"href='http://edx.readthedocs.org/projects/ca/en/latest/exercises_tools/lti_component.html'>"
)
class LTIFields(object):
"""
Fields to define and obtain LTI tool from provider are set here,
except credentials, which should be set in course settings::
`lti_id` is id to connect tool with credentials in course settings. It should not contain :: (double semicolon)
`launch_url` is launch URL of tool.
`custom_parameters` are additional parameters to navigate to proper book and book page.
For example, for Vitalsource provider, `launch_url` should be
*https://bc-staging.vitalsource.com/books/book*,
and to get to proper book and book page, you should set custom parameters as::
vbid=put_book_id_here
book_location=page/put_page_number_here
Default non-empty URL for `launch_url` is needed due to oauthlib demand (URL scheme should be presented)::
https://github.com/idan/oauthlib/blob/master/oauthlib/oauth1/rfc5849/signature.py#L136
"""
display_name = String(
display_name=_("Display Name"),
help=_(
"Enter the name that students see for this component. "
"Analytics reports may also use the display name to identify this component."
),
scope=Scope.settings,
default="LTI",
)
lti_id = String(
display_name=_("LTI ID"),
help=_(
"Enter the LTI ID for the external LTI provider. "
"This value must be the same LTI ID that you entered in the "
"LTI Passports setting on the Advanced Settings page."
"<br />See {docs_anchor_open}the edX LTI documentation{anchor_close} for more details on this setting."
).format(
docs_anchor_open=DOCS_ANCHOR_TAG_OPEN,
anchor_close="</a>"
),
default='',
scope=Scope.settings
)
launch_url = String(
display_name=_("LTI URL"),
help=_(
"Enter the URL of the external tool that this component launches. "
"This setting is only used when Hide External Tool is set to False."
"<br />See {docs_anchor_open}the edX LTI documentation{anchor_close} for more details on this setting."
).format(
docs_anchor_open=DOCS_ANCHOR_TAG_OPEN,
anchor_close="</a>"
),
default='http://www.example.com',
scope=Scope.settings)
custom_parameters = List(
display_name=_("Custom Parameters"),
help=_(
"Add the key/value pair for any custom parameters, such as the page your e-book should open to or "
"the background color for this component."
"<br />See {docs_anchor_open}the edX LTI documentation{anchor_close} for more details on this setting."
).format(
docs_anchor_open=DOCS_ANCHOR_TAG_OPEN,
anchor_close="</a>"
),
scope=Scope.settings)
open_in_a_new_page = Boolean(
display_name=_("Open in New Page"),
help=_(
"Select True if you want students to click a link that opens the LTI tool in a new window. "
"Select False if you want the LTI content to open in an IFrame in the current page. "
"This setting is only used when Hide External Tool is set to False. "
),
default=True,
scope=Scope.settings
)
has_score = Boolean(
display_name=_("Scored"),
help=_(
"Select True if this component will receive a numerical score from the external LTI system."
),
default=False,
scope=Scope.settings
)
weight = Float(
display_name=_("Weight"),
help=_(
"Enter the number of points possible for this component. "
"The default value is 1.0. "
"This setting is only used when Scored is set to True."
),
default=1.0,
scope=Scope.settings,
values={"min": 0},
)
module_score = Float(
help=_("The score kept in the xblock KVS -- duplicate of the published score in django DB"),
default=None,
scope=Scope.user_state
)
score_comment = String(
help=_("Comment as returned from grader, LTI2.0 spec"),
default="",
scope=Scope.user_state
)
hide_launch = Boolean(
display_name=_("Hide External Tool"),
help=_(
"Select True if you want to use this component as a placeholder for syncing with an external grading "
"system rather than launch an external tool. "
"This setting hides the Launch button and any IFrames for this component."
),
default=False,
scope=Scope.settings
)
# Users will be presented with a message indicating that their e-mail/username would be sent to a third
# party application. When "Open in New Page" is not selected, the tool automatically appears without any user action.
ask_to_send_username = Boolean(
display_name=_("Request user's username"),
# Translators: This is used to request the user's username for a third party service.
# Usernames can only be requested if "Open in New Page" is set to True.
help=_(
"Select True to request the user's username. You must also set Open in New Page to True to get the user's information."
),
default=False,
scope=Scope.settings
)
ask_to_send_email = Boolean(
display_name=_("Request user's email"),
# Translators: This is used to request the user's email for a third party service.
# Emails can only be requested if "Open in New Page" is set to True.
help=_(
"Select True to request the user's email address. You must also set Open in New Page to True to get the user's information."
),
default=False,
scope=Scope.settings
)
description = String(
display_name=_("LTI Application Information"),
help=_(
"Enter a description of the third party application. If requesting username and/or email, use this text box to inform users "
"why their username and/or email will be forwarded to a third party application."
),
default="",
scope=Scope.settings
)
button_text = String(
display_name=_("Button Text"),
help=_(
"Enter the text on the button used to launch the third party application."
),
default="",
scope=Scope.settings
)
accept_grades_past_due = Boolean(
display_name=_("Accept grades past deadline"),
help=_("Select True to allow third party systems to post grades past the deadline."),
default=True,
scope=Scope.settings
)
class LTIModule(LTIFields, LTI20ModuleMixin, XModule):
"""
Module provides LTI integration to course.
Except usual Xmodule structure it proceeds with OAuth signing.
How it works::
1. Get credentials from course settings.
2. There is minimal set of parameters need to be signed (presented for Vitalsource)::
user_id
oauth_callback
lis_outcome_service_url
lis_result_sourcedid
launch_presentation_return_url
lti_message_type
lti_version
roles
*+ all custom parameters*
These parameters should be encoded and signed by *OAuth1* together with
`launch_url` and *POST* request type.
3. Signing proceeds with client key/secret pair obtained from course settings.
That pair should be obtained from LTI provider and set into course settings by course author.
After that signature and other OAuth data are generated.
OAuth data which is generated after signing is usual::
oauth_callback
oauth_nonce
oauth_consumer_key
oauth_signature_method
oauth_timestamp
oauth_version
4. All that data is passed to form and sent to LTI provider server by browser via
autosubmit via JavaScript.
Form example::
<form
action="${launch_url}"
name="ltiLaunchForm-${element_id}"
class="ltiLaunchForm"
method="post"
target="ltiLaunchFrame-${element_id}"
encType="application/x-www-form-urlencoded"
>
<input name="launch_presentation_return_url" value="" />
<input name="lis_outcome_service_url" value="" />
<input name="lis_result_sourcedid" value="" />
<input name="lti_message_type" value="basic-lti-launch-request" />
<input name="lti_version" value="LTI-1p0" />
<input name="oauth_callback" value="about:blank" />
<input name="oauth_consumer_key" value="${oauth_consumer_key}" />
<input name="oauth_nonce" value="${oauth_nonce}" />
<input name="oauth_signature_method" value="HMAC-SHA1" />
<input name="oauth_timestamp" value="${oauth_timestamp}" />
<input name="oauth_version" value="1.0" />
<input name="user_id" value="${user_id}" />
<input name="role" value="student" />
<input name="oauth_signature" value="${oauth_signature}" />
<input name="custom_1" value="${custom_param_1_value}" />
<input name="custom_2" value="${custom_param_2_value}" />
<input name="custom_..." value="${custom_param_..._value}" />
<input type="submit" value="Press to Launch" />
</form>
5. LTI provider has same secret key and it signs data string via *OAuth1* and compares signatures.
If signatures are correct, LTI provider redirects iframe source to LTI tool web page,
and LTI tool is rendered to iframe inside course.
Otherwise error message from LTI provider is generated.
"""
js = {
'js': [
resource_string(__name__, 'js/src/lti/lti.js')
]
}
css = {'scss': [resource_string(__name__, 'css/lti/lti.scss')]}
js_module_name = "LTI"
def get_input_fields(self):
# LTI provides a list of default parameters that might be passed as
# part of the POST data. These parameters should not be prefixed.
# Likewise, The creator of an LTI link can add custom key/value parameters
# to a launch which are to be included with the launch of the LTI link.
# In this case, we will automatically add `custom_` prefix before this parameters.
# See http://www.imsglobal.org/LTI/v1p1p1/ltiIMGv1p1p1.html#_Toc316828520
PARAMETERS = [
"lti_message_type",
"lti_version",
"resource_link_title",
"resource_link_description",
"user_image",
"lis_person_name_given",
"lis_person_name_family",
"lis_person_name_full",
"lis_person_contact_email_primary",
"lis_person_sourcedid",
"role_scope_mentor",
"context_type",
"context_title",
"context_label",
"launch_presentation_locale",
"launch_presentation_document_target",
"launch_presentation_css_url",
"launch_presentation_width",
"launch_presentation_height",
"launch_presentation_return_url",
"tool_consumer_info_product_family_code",
"tool_consumer_info_version",
"tool_consumer_instance_guid",
"tool_consumer_instance_name",
"tool_consumer_instance_description",
"tool_consumer_instance_url",
"tool_consumer_instance_contact_email",
]
client_key, client_secret = self.get_client_key_secret()
# parsing custom parameters to dict
custom_parameters = {}
for custom_parameter in self.custom_parameters:
try:
param_name, param_value = [p.strip() for p in custom_parameter.split('=', 1)]
except ValueError:
_ = self.runtime.service(self, "i18n").ugettext
msg = _('Could not parse custom parameter: {custom_parameter}. Should be "x=y" string.').format(
custom_parameter="{0!r}".format(custom_parameter)
)
raise LTIError(msg)
# LTI specs: 'custom_' should be prepended before each custom parameter, as pointed in link above.
if param_name not in PARAMETERS:
param_name = 'custom_' + param_name
custom_parameters[unicode(param_name)] = unicode(param_value)
return self.oauth_params(
custom_parameters,
client_key,
client_secret,
)
def get_context(self):
"""
Returns a context.
"""
# use bleach defaults. see https://github.com/jsocol/bleach/blob/master/bleach/__init__.py
# ALLOWED_TAGS are
# ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol', 'strong', 'ul']
#
# ALLOWED_ATTRIBUTES are
# 'a': ['href', 'title'],
# 'abbr': ['title'],
# 'acronym': ['title'],
#
# This lets all plaintext through.
sanitized_comment = bleach.clean(self.score_comment)
return {
'input_fields': self.get_input_fields(),
# These parameters do not participate in OAuth signing.
'launch_url': self.launch_url.strip(),
'element_id': self.location.html_id(),
'element_class': self.category,
'open_in_a_new_page': self.open_in_a_new_page,
'display_name': self.display_name,
'form_url': self.runtime.handler_url(self, 'preview_handler').rstrip('/?'),
'hide_launch': self.hide_launch,
'has_score': self.has_score,
'weight': self.weight,
'module_score': self.module_score,
'comment': sanitized_comment,
'description': self.description,
'ask_to_send_username': self.ask_to_send_username,
'ask_to_send_email': self.ask_to_send_email,
'button_text': self.button_text,
'accept_grades_past_due': self.accept_grades_past_due,
}
def get_html(self):
"""
Renders parameters to template.
"""
return self.system.render_template('lti.html', self.get_context())
@XBlock.handler
def preview_handler(self, _, __):
"""
This is called to get context with new oauth params to iframe.
"""
template = self.system.render_template('lti_form.html', self.get_context())
return Response(template, content_type='text/html')
def get_user_id(self):
user_id = self.runtime.anonymous_student_id
assert user_id is not None
return unicode(urllib.quote(user_id))
def get_outcome_service_url(self, service_name="grade_handler"):
"""
Return URL for storing grades.
To test LTI on sandbox we must use http scheme.
While testing locally and on Jenkins, mock_lti_server use http.referer
to obtain scheme, so it is ok to have http(s) anyway.
The scheme logic is handled in lms/lib/xblock/runtime.py
"""
return self.runtime.handler_url(self, service_name, thirdparty=True).rstrip('/?')
def get_resource_link_id(self):
"""
This is an opaque unique identifier that the TC guarantees will be unique
within the TC for every placement of the link.
If the tool / activity is placed multiple times in the same context,
each of those placements will be distinct.
This value will also change if the item is exported from one system or
context and imported into another system or context.
This parameter is required.
Example: u'edx.org-i4x-2-3-lti-31de800015cf4afb973356dbe81496df'
Hostname, edx.org,
makes resource_link_id change on import to another system.
Last part of location, location.name - 31de800015cf4afb973356dbe81496df,
is random hash, updated by course_id,
this makes resource_link_id unique inside single course.
First part of location is tag-org-course-category, i4x-2-3-lti.
Location.name itself does not change on import to another course,
but org and course_id change.
So together with org and course_id in a form of
i4x-2-3-lti-31de800015cf4afb973356dbe81496df this part of resource_link_id:
makes resource_link_id to be unique among courses inside same system.
"""
return unicode(urllib.quote("{}-{}".format(self.system.hostname, self.location.html_id())))
def get_lis_result_sourcedid(self):
"""
This field contains an identifier that indicates the LIS Result Identifier (if any)
associated with this launch. This field identifies a unique row and column within the
TC gradebook. This field is unique for every combination of context_id / resource_link_id / user_id.
This value may change for a particular resource_link_id / user_id from one launch to the next.
The TP should only retain the most recent value for this field for a particular resource_link_id / user_id.
This field is generally optional, but is required for grading.
"""
return "{context}:{resource_link}:{user_id}".format(
context=urllib.quote(self.context_id),
resource_link=self.get_resource_link_id(),
user_id=self.get_user_id()
)
def get_course(self):
"""
Return course by course id.
"""
return self.descriptor.runtime.modulestore.get_course(self.course_id)
@property
def context_id(self):
"""
Return context_id.
context_id is an opaque identifier that uniquely identifies the context (e.g., a course)
that contains the link being launched.
"""
return self.course_id.to_deprecated_string()
@property
def role(self):
"""
Get system user role and convert it to LTI role.
"""
roles = {
'student': u'Student',
'staff': u'Administrator',
'instructor': u'Instructor',
}
return roles.get(self.system.get_user_role(), u'Student')
def oauth_params(self, custom_parameters, client_key, client_secret):
"""
Signs request and returns signature and OAuth parameters.
`custom_paramters` is dict of parsed `custom_parameter` field
`client_key` and `client_secret` are LTI tool credentials.
Also *anonymous student id* is passed to template and therefore to LTI provider.
"""
client = oauthlib.oauth1.Client(
client_key=unicode(client_key),
client_secret=unicode(client_secret)
)
# Must have parameters for correct signing from LTI:
body = {
u'user_id': self.get_user_id(),
u'oauth_callback': u'about:blank',
u'launch_presentation_return_url': '',
u'lti_message_type': u'basic-lti-launch-request',
u'lti_version': 'LTI-1p0',
u'roles': self.role,
# Parameters required for grading:
u'resource_link_id': self.get_resource_link_id(),
u'lis_result_sourcedid': self.get_lis_result_sourcedid(),
u'context_id': self.context_id,
}
if self.has_score:
body.update({
u'lis_outcome_service_url': self.get_outcome_service_url()
})
self.user_email = ""
self.user_username = ""
# Username and email can't be sent in studio mode, because the user object is not defined.
# To test functionality test in LMS
if callable(self.runtime.get_real_user):
real_user_object = self.runtime.get_real_user(self.runtime.anonymous_student_id)
try:
self.user_email = real_user_object.email
except AttributeError:
self.user_email = ""
try:
self.user_username = real_user_object.username
except AttributeError:
self.user_username = ""
if self.open_in_a_new_page:
if self.ask_to_send_username and self.user_username:
body["lis_person_sourcedid"] = self.user_username
if self.ask_to_send_email and self.user_email:
body["lis_person_contact_email_primary"] = self.user_email
# Appending custom parameter for signing.
body.update(custom_parameters)
headers = {
# This is needed for body encoding:
'Content-Type': 'application/x-www-form-urlencoded',
}
try:
__, headers, __ = client.sign(
unicode(self.launch_url.strip()),
http_method=u'POST',
body=body,
headers=headers)
except ValueError: # Scheme not in url.
# https://github.com/idan/oauthlib/blob/master/oauthlib/oauth1/rfc5849/signature.py#L136
# Stubbing headers for now:
headers = {
u'Content-Type': u'application/x-www-form-urlencoded',
u'Authorization': u'OAuth oauth_nonce="80966668944732164491378916897", \
oauth_timestamp="1378916897", oauth_version="1.0", oauth_signature_method="HMAC-SHA1", \
oauth_consumer_key="", oauth_signature="frVp4JuvT1mVXlxktiAUjQ7%2F1cw%3D"'}
params = headers['Authorization']
# Parse headers to pass to template as part of context:
params = dict([param.strip().replace('"', '').split('=') for param in params.split(',')])
params[u'oauth_nonce'] = params[u'OAuth oauth_nonce']
del params[u'OAuth oauth_nonce']
# oauthlib encodes signature with
# 'Content-Type': 'application/x-www-form-urlencoded'
# so '='' becomes '%3D'.
# We send form via browser, so browser will encode it again,
# So we need to decode signature back:
params[u'oauth_signature'] = urllib.unquote(params[u'oauth_signature']).decode('utf8')
# Add LTI parameters to OAuth parameters for sending in form.
params.update(body)
return params
def max_score(self):
return self.weight if self.has_score else None
@XBlock.handler
def grade_handler(self, request, suffix): # pylint: disable=unused-argument
"""
This is called by courseware.module_render, to handle an AJAX call.
Used only for grading. Returns XML response.
Example of request body from LTI provider::
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns = "some_link (may be not required)">
<imsx_POXHeader>
<imsx_POXRequestHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>528243ba5241b</imsx_messageIdentifier>
</imsx_POXRequestHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<replaceResultRequest>
<resultRecord>
<sourcedGUID>
<sourcedId>feb-123-456-2929::28883</sourcedId>
</sourcedGUID>
<result>
<resultScore>
<language>en-us</language>
<textString>0.4</textString>
</resultScore>
</result>
</resultRecord>
</replaceResultRequest>
</imsx_POXBody>
</imsx_POXEnvelopeRequest>
Example of correct/incorrect answer XML body:: see response_xml_template.
"""
response_xml_template = textwrap.dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<imsx_POXEnvelopeResponse xmlns = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXResponseHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>{imsx_messageIdentifier}</imsx_messageIdentifier>
<imsx_statusInfo>
<imsx_codeMajor>{imsx_codeMajor}</imsx_codeMajor>
<imsx_severity>status</imsx_severity>
<imsx_description>{imsx_description}</imsx_description>
<imsx_messageRefIdentifier>
</imsx_messageRefIdentifier>
</imsx_statusInfo>
</imsx_POXResponseHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>{response}</imsx_POXBody>
</imsx_POXEnvelopeResponse>
""")
# Returns when `action` is unsupported.
# Supported actions:
# - replaceResultRequest.
unsupported_values = {
'imsx_codeMajor': 'unsupported',
'imsx_description': 'Target does not support the requested operation.',
'imsx_messageIdentifier': 'unknown',
'response': ''
}
# Returns if:
# - past due grades are not accepted and grade is past due
# - score is out of range
# - can't parse response from TP;
# - can't verify OAuth signing or OAuth signing is incorrect.
failure_values = {
'imsx_codeMajor': 'failure',
'imsx_description': 'The request has failed.',
'imsx_messageIdentifier': 'unknown',
'response': ''
}
if not self.accept_grades_past_due and self.is_past_due():
failure_values['imsx_description'] = "Grade is past due"
return Response(response_xml_template.format(**failure_values), content_type="application/xml")
try:
imsx_messageIdentifier, sourcedId, score, action = self.parse_grade_xml_body(request.body)
except Exception as e:
error_message = "Request body XML parsing error: " + escape(e.message)
log.debug("[LTI]: " + error_message)
failure_values['imsx_description'] = error_message
return Response(response_xml_template.format(**failure_values), content_type="application/xml")
# Verify OAuth signing.
try:
self.verify_oauth_body_sign(request)
except (ValueError, LTIError) as e:
failure_values['imsx_messageIdentifier'] = escape(imsx_messageIdentifier)
error_message = "OAuth verification error: " + escape(e.message)
failure_values['imsx_description'] = error_message
log.debug("[LTI]: " + error_message)
return Response(response_xml_template.format(**failure_values), content_type="application/xml")
real_user = self.system.get_real_user(urllib.unquote(sourcedId.split(':')[-1]))
if not real_user: # that means we can't save to database, as we do not have real user id.
failure_values['imsx_messageIdentifier'] = escape(imsx_messageIdentifier)
failure_values['imsx_description'] = "User not found."
return Response(response_xml_template.format(**failure_values), content_type="application/xml")
if action == 'replaceResultRequest':
self.set_user_module_score(real_user, score, self.max_score())
values = {
'imsx_codeMajor': 'success',
'imsx_description': 'Score for {sourced_id} is now {score}'.format(sourced_id=sourcedId, score=score),
'imsx_messageIdentifier': escape(imsx_messageIdentifier),
'response': '<replaceResultResponse/>'
}
log.debug("[LTI]: Grade is saved.")
return Response(response_xml_template.format(**values), content_type="application/xml")
unsupported_values['imsx_messageIdentifier'] = escape(imsx_messageIdentifier)
log.debug("[LTI]: Incorrect action.")
return Response(response_xml_template.format(**unsupported_values), content_type='application/xml')
@classmethod
def parse_grade_xml_body(cls, body):
"""
Parses XML from request.body and returns parsed data
XML body should contain nsmap with namespace, that is specified in LTI specs.
Returns tuple: imsx_messageIdentifier, sourcedId, score, action
Raises Exception if can't parse.
"""
lti_spec_namespace = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0"
namespaces = {'def': lti_spec_namespace}
data = body.strip().encode('utf-8')
parser = etree.XMLParser(ns_clean=True, recover=True, encoding='utf-8')
root = etree.fromstring(data, parser=parser)
imsx_messageIdentifier = root.xpath("//def:imsx_messageIdentifier", namespaces=namespaces)[0].text or ''
sourcedId = root.xpath("//def:sourcedId", namespaces=namespaces)[0].text
score = root.xpath("//def:textString", namespaces=namespaces)[0].text
action = root.xpath("//def:imsx_POXBody", namespaces=namespaces)[0].getchildren()[0].tag.replace('{' + lti_spec_namespace + '}', '')
# Raise exception if score is not float or not in range 0.0-1.0 regarding spec.
score = float(score)
if not 0 <= score <= 1:
raise LTIError('score value outside the permitted range of 0-1.')
return imsx_messageIdentifier, sourcedId, score, action
def verify_oauth_body_sign(self, request, content_type='application/x-www-form-urlencoded'):
"""
Verify grade request from LTI provider using OAuth body signing.
Uses http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html::
This specification extends the OAuth signature to include integrity checks on HTTP request bodies
with content types other than application/x-www-form-urlencoded.
Arguments:
request: DjangoWebobRequest.
Raises:
LTIError if request is incorrect.
"""
client_key, client_secret = self.get_client_key_secret()
headers = {
'Authorization': unicode(request.headers.get('Authorization')),
'Content-Type': content_type,
}
sha1 = hashlib.sha1()
sha1.update(request.body)
oauth_body_hash = base64.b64encode(sha1.digest())
oauth_params = signature.collect_parameters(headers=headers, exclude_oauth_signature=False)
oauth_headers = dict(oauth_params)
oauth_signature = oauth_headers.pop('oauth_signature')
mock_request_lti_1 = mock.Mock(
uri=unicode(urllib.unquote(self.get_outcome_service_url())),
http_method=unicode(request.method),
params=oauth_headers.items(),
signature=oauth_signature
)
mock_request_lti_2 = mock.Mock(
uri=unicode(urllib.unquote(request.url)),
http_method=unicode(request.method),
params=oauth_headers.items(),
signature=oauth_signature
)
if oauth_body_hash != oauth_headers.get('oauth_body_hash'):
log.error(
"OAuth body hash verification failed, provided: {}, "
"calculated: {}, for url: {}, body is: {}".format(
oauth_headers.get('oauth_body_hash'),
oauth_body_hash,
self.get_outcome_service_url(),
request.body
)
)
raise LTIError("OAuth body hash verification is failed.")
if (not signature.verify_hmac_sha1(mock_request_lti_1, client_secret) and not
signature.verify_hmac_sha1(mock_request_lti_2, client_secret)):
log.error("OAuth signature verification failed, for "
"headers:{} url:{} method:{}".format(
oauth_headers,
self.get_outcome_service_url(),
unicode(request.method)
))
raise LTIError("OAuth signature verification has failed.")
def get_client_key_secret(self):
"""
Obtains client_key and client_secret credentials from current course.
"""
course = self.get_course()
for lti_passport in course.lti_passports:
try:
lti_id, key, secret = [i.strip() for i in lti_passport.split(':')]
except ValueError:
_ = self.runtime.service(self, "i18n").ugettext
msg = _('Could not parse LTI passport: {lti_passport}. Should be "id:key:secret" string.').format(
lti_passport='{0!r}'.format(lti_passport)
)
raise LTIError(msg)
if lti_id == self.lti_id.strip():
return key, secret
return '', ''
def is_past_due(self):
"""
Is it now past this problem's due date, including grace period?
"""
due_date = self.due # pylint: disable=no-member
if self.graceperiod is not None and due_date: # pylint: disable=no-member
close_date = due_date + self.graceperiod # pylint: disable=no-member
else:
close_date = due_date
return close_date is not None and datetime.datetime.now(UTC()) > close_date
class LTIDescriptor(LTIFields, MetadataOnlyEditingDescriptor, EmptyDataRawDescriptor):
"""
Descriptor for LTI Xmodule.
"""
module_class = LTIModule
grade_handler = module_attr('grade_handler')
preview_handler = module_attr('preview_handler')
lti_2_0_result_rest_handler = module_attr('lti_2_0_result_rest_handler')
clear_user_module_score = module_attr('clear_user_module_score')
get_outcome_service_url = module_attr('get_outcome_service_url')
| mtlchun/edx | common/lib/xmodule/xmodule/lti_module.py | Python | agpl-3.0 | 37,531 |
from datetime import datetime, date
import pytest
from pytz import UTC
from uber.config import c
from uber.models import Attendee, Session
from uber.site_sections import summary
@pytest.fixture
def birthdays():
dates = [
date(1964, 12, 30),
date(1964, 12, 31),
date(1964, 1, 1),
date(1964, 1, 2),
date(1964, 1, 9),
date(1964, 1, 10),
date(1964, 1, 11),
date(1964, 1, 12),
date(1964, 1, 30),
date(1964, 1, 31),
date(1964, 2, 1),
date(1964, 2, 2),
date(1964, 2, 27),
date(1964, 2, 28),
date(1964, 2, 29),
date(1964, 3, 1),
date(1964, 3, 2)]
attendees = []
for d in dates:
attendees.append(Attendee(
placeholder=True,
first_name='Born on',
last_name=d.strftime('%B %-d, %Y'),
ribbon=c.VOLUNTEER_RIBBON,
staffing=True,
birthdate=d))
ids = []
with Session() as session:
session.bulk_insert(attendees)
ids = [a.id for a in attendees]
yield ids
with Session() as session:
session.query(Attendee).filter(Attendee.id.in_(ids)).delete(
synchronize_session=False)
class TestBirthdayCalendar(object):
@pytest.mark.parametrize('year', [None, 2027, 2028])
def test_attendee_birthday_calendar(
self,
admin_attendee,
year,
birthdays,
monkeypatch):
if year:
assert str(year)
response = summary.Root().attendee_birthday_calendar(year=year)
else:
assert str(datetime.now(UTC).year)
response = summary.Root().attendee_birthday_calendar()
if isinstance(response, bytes):
response = response.decode('utf-8')
lines = response.strip().split('\n')
assert len(lines) == (17 + 1) # Extra line for the header
@pytest.mark.parametrize('epoch,eschaton,expected', [
(datetime(2018, 1, 10), datetime(2018, 1, 11), 2), # Normal dates
(datetime(2017, 12, 31), datetime(2018, 1, 1), 2), # Crossing the year
(datetime(2018, 1, 31), datetime(2018, 2, 1), 2), # Crossing the month
(datetime(2018, 2, 28), datetime(2018, 3, 1), 3), # Leap day
(datetime(2018, 1, 1), datetime(2018, 3, 4), 15), # Multi-month
(datetime(2017, 12, 28), datetime(2018, 3, 4), 17), # Everybody
])
def test_event_birthday_calendar(
self,
admin_attendee,
epoch,
eschaton,
expected,
birthdays,
monkeypatch):
monkeypatch.setattr(c, 'EPOCH', epoch)
monkeypatch.setattr(c, 'ESCHATON', eschaton)
response = summary.Root().event_birthday_calendar()
if isinstance(response, bytes):
response = response.decode('utf-8')
lines = response.strip().split('\n')
assert len(lines) == (expected + 1) # Extra line for the header
def test_event_birthday_calendar_correct_birthday_years(
self,
admin_attendee,
birthdays,
monkeypatch):
monkeypatch.setattr(c, 'EPOCH', datetime(2017, 12, 31))
monkeypatch.setattr(c, 'ESCHATON', datetime(2018, 1, 1))
response = summary.Root().event_birthday_calendar()
if isinstance(response, bytes):
response = response.decode('utf-8')
assert '"Born on December 31, 1964\'s Birthday",2017-12-31' in response
assert '"Born on January 1, 1964\'s Birthday",2018-01-01' in response
lines = response.strip().split('\n')
assert len(lines) == (2 + 1) # Extra line for the header
| magfest/ubersystem | tests/uber/site_sections/test_summary.py | Python | agpl-3.0 | 3,732 |
from django.template import Library, Node, TemplateSyntaxError, Variable
from django.conf import settings
from django.core import urlresolvers
import hashlib
import re
register = Library()
class ViewNode(Node):
def __init__(self, parser, token):
self.args = []
self.kwargs = {}
tokens = token.split_contents()
if len(tokens) < 2:
raise TemplateSyntaxError("%r tag requires one or more arguments" % token.contents.split()[0])
tag_name = tokens.pop(0)
self.url_or_view = tokens.pop(0)
for token in tokens:
equals = token.find("=")
if equals == -1:
self.args.append(token)
else:
self.kwargs[str(token[:equals])] = token[equals + 1:]
def render(self, context):
print('render view tag...')
if 'request' not in context:
return ""
request = context['request']
# get the url for the view
url = Variable(self.url_or_view).resolve(context)
if not settings.USE_AJAX_REQUESTS:
# do not load the whole template, just the content, like an ajax request
#request.is_ajax = True # not needed since the jQuery.get() is implying this
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
# get the view function
view, args, kwargs = resolver.resolve(url)
try:
if callable(view):
ret = view(context['request'], *args, **kwargs).render()
return ret.rendered_content
raise Exception("%r is not callable" % view)
except:
if settings.TEMPLATE_DEBUG:
raise
else:
print('return js code for jquery')
return """<div id="%(div_id)s">loading ...</div>
<script>
$.get( "%(url)s", function( data ) {
$( "#%(div_id)s" ).html( data );
});
</script>""" % {'div_id': url.replace("/", ""), 'url': url}
return ""
register.tag('view', ViewNode)
| meymarce/overlord | overlord/templatetags/view_tag.py | Python | agpl-3.0 | 2,130 |
###########################################################################
# Concurrent WSGI server - webserver3h.py #
# #
# Tested with Python 2.7.9 on Ubuntu 14.04 & Mac OS X #
###########################################################################
import errno
import os
import signal
import socket
import StringIO
import sys
def grim_reaper(signum, frame):
while True:
try:
pid, status = os.waitpid(
-1, # Wait for any child process
os.WNOHANG # Do not block and return EWOULDBLOCK error
)
print(
'Child {pid} terminated with status {status}'
'\n'.format(pid=pid, status=status)
)
except OSError:
return
if pid == 0: # no more zombies
return
class WSGIServer(object):
address_family = socket.AF_INET
socket_type = socket.SOCK_STREAM
request_queue_size = 1024
def __init__(self, server_address):
# Create a listening socket
self.listen_socket = listen_socket = socket.socket(
self.address_family,
self.socket_type
)
# Allow to reuse the same address
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind
listen_socket.bind(server_address)
# Activate
listen_socket.listen(self.request_queue_size)
# Get server host name and port
host, port = self.listen_socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
# Return headers set by Web framework/Web application
self.headers_set = []
def set_app(self, application):
self.application = application
def serve_forever(self):
listen_socket = self.listen_socket
while True:
try:
self.client_connection, client_address = listen_socket.accept()
except IOError as e:
code, msg = e.args
# restart 'accept' if it was interrupted
if code == errno.EINTR:
continue
else:
raise
pid = os.fork()
if pid == 0: # child
listen_socket.close() # close child copy
# Handle one request and close the client connection.
self.handle_one_request()
os._exit(0)
else: # parent
self.client_connection.close() # close parent copy
def handle_one_request(self):
self.request_data = request_data = self.client_connection.recv(1024)
# Print formatted request data a la 'curl -v'
print(''.join(
'< {line}\n'.format(line=line)
for line in request_data.splitlines()
))
self.parse_request(request_data)
# Construct environment dictionary using request data
env = self.get_environ()
# It's time to call our application callable and get
# back a result that will become HTTP response body
result = self.application(env, self.start_response)
# Construct a response and send it back to the client
self.finish_response(result)
def parse_request(self, text):
request_line = text.splitlines()[0]
request_line = request_line.rstrip('\r\n')
# Break down the request line into components
(self.request_method, # GET
self.path, # /hello
self.request_version # HTTP/1.1
) = request_line.split()
def get_environ(self):
env = {}
# The following code snippet does not follow PEP8 conventions
# but it's formatted the way it is for demonstration purposes
# to emphasize the required variables and their values
#
# Required WSGI variables
env['wsgi.version'] = (1, 0)
env['wsgi.url_scheme'] = 'http'
env['wsgi.input'] = StringIO.StringIO(self.request_data)
env['wsgi.errors'] = sys.stderr
env['wsgi.multithread'] = False
env['wsgi.multiprocess'] = False
env['wsgi.run_once'] = False
# Required CGI variables
env['REQUEST_METHOD'] = self.request_method # GET
env['PATH_INFO'] = self.path # /hello
env['SERVER_NAME'] = self.server_name # localhost
env['SERVER_PORT'] = str(self.server_port) # 8888
return env
def start_response(self, status, response_headers, exc_info=None):
# Add necessary server headers
server_headers = [
('Date', 'Tue, 31 Mar 2015 12:54:48 GMT'),
('Server', 'WSGIServer 0.2'),
]
self.headers_set = [status, response_headers + server_headers]
# To adhere to WSGI specification the start_response must return
# a 'write' callable. We simplicity's sake we'll ignore that detail
# for now.
# return self.finish_response
def finish_response(self, result):
try:
status, response_headers = self.headers_set
response = 'HTTP/1.1 {status}\r\n'.format(status=status)
for header in response_headers:
response += '{0}: {1}\r\n'.format(*header)
response += '\r\n'
for data in result:
response += data
# Print formatted response data a la 'curl -v'
print(''.join(
'> {line}\n'.format(line=line)
for line in response.splitlines()
))
self.client_connection.sendall(response)
finally:
self.client_connection.close()
SERVER_ADDRESS = (HOST, PORT) = '', 8888
def make_server(server_address, application):
signal.signal(signal.SIGCHLD, grim_reaper)
server = WSGIServer(server_address)
server.set_app(application)
return server
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit('Provide a WSGI application object as module:callable')
app_path = sys.argv[1]
module, application = app_path.split(':')
module = __import__(module)
application = getattr(module, application)
httpd = make_server(SERVER_ADDRESS, application)
print('WSGIServer: Serving HTTP on port {port} ...\n'.format(port=PORT))
httpd.serve_forever()
| hamishcunningham/fishy-wifi | wegrow-cloudside/elf-data-collector/webserver4/server-again.py | Python | agpl-3.0 | 6,539 |
# Copyright (C) 2018 - TODAY, Pavlov Media
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import models
from . import wizards
| OCA/contract | agreement_legal/__init__.py | Python | agpl-3.0 | 151 |
# -*- coding: utf-8 -*-
{
'name': "Better validation for Attendance",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "Jörn Mankiewicz",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '8.0.0.1',
# any module necessary for this one to work correctly
'depends': ['base','hr_attendance','hr_timesheet_improvement'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/hr_attendance.xml',
],
# only loaded in demonstration mode
'demo': [
'demo.xml',
],
}
| jmankiewicz/odooAddons | hr_attendance_new_check/__openerp__.py | Python | agpl-3.0 | 958 |
import os
import time
import sys
FOLDERPATH = sys.argv[1]
#os.chdir(FOLDERPATH)
walk = os.walk(FOLDERPATH)
FSEVENT = "delete"
for item in walk:
FILEPATHPREFIX = item[0] + "\\"
for song in item[2]:
if song.endswith(".mp3"):
FILEPATH = "%s%s" % (FILEPATHPREFIX, song)
os.system('python script.py "' + song + '" "' + FILEPATH + '" "' + FSEVENT + '"') | collaj/MusicServer | scripts/test_script_delete.py | Python | agpl-3.0 | 404 |
# coding=utf-8
# Copyright (c) 2001, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# [matrix] channel #navitia:matrix.org (https://app.element.io/#/room/#navitia:matrix.org)
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from tests.mock_navitia import navitia_response
response = navitia_response.NavitiaResponse()
response.queries = [
"vehicle_journeys/?filter=vehicle_journey.has_code(source, Code-orders)&since=20120615T120000Z&until=20120615T190000Z&data_freshness=base_schedule&depth=2"
# resquest time is UTC -> 12:00 is 8:00 local in Sherbrooke
]
response.response_code = 200
response.json_response = """
{
"disruptions": [],
"feed_publishers": [
{
"id": "builder",
"license": "ODBL",
"name": "departure board",
"url": "www.canaltp.fr"
}
],
"links": [
],
"pagination": {
"items_on_page": 1,
"items_per_page": 25,
"start_page": 0,
"total_result": 1
},
"vehicle_journeys": [
{
"calendars": [
{
"active_periods": [
{
"begin": "20120615",
"end": "20130615"
}
],
"week_pattern": {
"friday": true,
"monday": false,
"saturday": false,
"sunday": false,
"thursday": false,
"tuesday": false,
"wednesday": false
}
}
],
"disruptions": [],
"id": "R:vj1",
"name": "R:vj1",
"stop_times": [
{
"arrival_time": "100000",
"departure_time": "100000",
"utc_arrival_time": "140000",
"utc_departure_time": "140000",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:14"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR1"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR1",
"label": "StopR1",
"links": [],
"name": "StopR1",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR1",
"label": "StopR1",
"links": [],
"name": "StopR1",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "101000",
"departure_time": "101000",
"utc_arrival_time": "140100",
"utc_departure_time": "140100",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:15"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR2"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR2",
"label": "StopR2",
"links": [],
"name": "StopR2",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR2",
"label": "StopR2",
"links": [],
"name": "StopR2",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "102000",
"departure_time": "102000",
"utc_arrival_time": "140200",
"utc_departure_time": "140200",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:16"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR3"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR3",
"label": "StopR3",
"links": [],
"name": "StopR3",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR3",
"label": "StopR3",
"links": [],
"name": "StopR3",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "103000",
"departure_time": "103000",
"utc_arrival_time": "140300",
"utc_departure_time": "140300",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:17"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR4"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR4",
"label": "StopR4",
"links": [],
"name": "StopR4",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR4",
"label": "StopR4",
"links": [],
"name": "StopR4",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "104000",
"departure_time": "104000",
"utc_arrival_time": "140400",
"utc_departure_time": "140400",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:17"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR5"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR5",
"label": "StopR5",
"links": [],
"name": "StopR5",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR5",
"label": "StopR5",
"links": [],
"name": "StopR5",
"timezone": "America/Montreal"
}
}
},
{
"arrival_time": "105000",
"departure_time": "105000",
"utc_arrival_time": "140500",
"utc_departure_time": "140500",
"headsign": "R:vj1",
"journey_pattern_point": {
"id": "journey_pattern_point:17"
},
"stop_point": {
"codes": [
{
"type": "source",
"value": "Code-StopR6"
}
],
"coord": {
"lat": "0",
"lon": "0"
},
"equipments": [
"has_wheelchair_boarding",
"has_bike_accepted"
],
"id": "StopR6",
"label": "StopR6",
"links": [],
"name": "StopR6",
"stop_area": {
"coord": {
"lat": "0",
"lon": "0"
},
"id": "StopR6",
"label": "StopR6",
"links": [],
"name": "StopR6",
"timezone": "America/Montreal"
}
}
}
],
"trip": {
"id": "R:vj1",
"name": "R:vj1"
},
"validity_pattern": {
"beginning_date": "20120614",
"days": "100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010000001000000100000010"
}
}
]
}
"""
| CanalTP/kirin | tests/mock_navitia/vj_bad_order.py | Python | agpl-3.0 | 13,166 |
####################################################################################################
# Copyright (C) 2016 by Ingo Keller, Katrin Lohan #
# <[email protected]> #
# #
# This file is part of pyJD (Python/Yarp Tools for the JD robot). #
# #
# pyJD is free software: you can redistribute it and/or modify it under the terms of the #
# GNU Affero General Public License as published by the Free Software Foundation, either #
# version 3 of the License, or (at your option) any later version. #
# #
# pyJD is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; #
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #
# See the GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with pyJD. If not, see <http://www.gnu.org/licenses/>. #
####################################################################################################
import argparse
import socket
import time
import yarp
EMSG_YARP_NOT_FOUND = "Could not connect to the yarp server. Try running 'yarp detect'."
EMSG_ROBOT_NOT_FOUND = 'Could not connect to the robot at %s:%s'
class EZModule(yarp.RFModule):
""" The EZBModule class provides a base class for developing modules for the JD robot.
"""
# Default IP Address and Port for the JD Humanoid Robot.
TCP_IP = '192.168.1.1'
TCP_PORT = 23
# Existing motor ID's are D0-D9, D12-D14 and D16-D18 there are more limits
LIMITS = [ (30, 180),
(70, 170),
(0, 170),
(0, 170),
(0, 60),
(0, 180),
(0, 90),
(0, 60),
(0, 180),
(0, 180),
(0, 180),
(0, 160),
(0, 180),
(0, 130),
(0, 180),
(0, 160),
(0, 180),
(50, 130),
(0, 180),
(0, 180),
(0, 180) ]
def __init__(self, ip, port, prefix):
yarp.RFModule.__init__(self)
self.ip = ip
self.port = int(port)
self.prefix = prefix
# self.last_pos = [-1] * len(EZModule.LIMITS)
def configure(self, rf):
name = self.__class__.__name__
if self.prefix:
name = self.prefix + '/' + name
self.setName(name)
# RPC Port
self.rpc_port = yarp.RpcServer()
# name settings
port_name = '/%s/%s' % (name, 'rpc')
if not self.rpc_port.open(port_name):
raise RuntimeError, EMSG_YARP_NOT_FOUND
self.attach_rpc_server(self.rpc_port)
return True
def interruptModule(self):
self.rpc_port.interrupt()
for x in dir(self):
if x.endswith('Port') and 'interrupt' in dir(getattr(self, x)):
getattr(self, x).interrupt()
return True
def close(self):
self.rpc_port.close()
for x in dir(self):
if x.endswith('Port') and 'close' in dir(getattr(self, x)):
getattr(self, x).close()
return True
def getPeriod(self):
return 0.1
def updateModule(self):
# XXX: I do not know why we need that, but if method is empty the module gets stuck
time.sleep(0.000001)
return True
def createInputPort(self, name, mode = 'unbuffered'):
""" This method returns an input port.
@param obj - the object that the port is created for
@param name - if a name is provided it gets appended to the modules name
@param buffered - if buffered is True a buffered port will be used otherwise not;
default is True.
@result port
"""
return self.__createPort(name + ':i', None, mode)
def __createPort(self, name, target = None, mode = 'unbuffered'):
""" This method returns a port object.
@param name - yarp name for the port
@param obj - object for which the port is created
@param buffered - if buffered is True a buffered port will be used otherwise not;
default is True.
@result port
"""
# create port
if mode == 'buffered':
port = yarp.BufferedPortBottle()
elif mode == 'rpcclient':
port = yarp.RpcClient()
elif mode == 'rpcserver':
port = yarp.RpcServer()
else:
port = yarp.Port()
# build port name
port_name = ['']
# prefix handling
if hasattr(self, 'prefix') and self.prefix:
port_name.append(self.prefix)
port_name.append(self.__class__.__name__)
port_name.append(name)
# open port
if not port.open('/'.join(port_name)):
raise RuntimeError, EMSG_YARP_NOT_FOUND
# add output if given
if target:
port.addOutput(target)
if hasattr(self, '_ports'):
self._ports.append(port)
return port
def createOutputPort(self, name, target = None, mode = 'unbuffered'):
""" This method returns an output port.
@param obj - the object that the port is created for
@param name - if a name is provided it gets appended to the modules name
@param buffered - if buffered is True a buffered port will be used otherwise not;
default is True.
@result port
"""
return self.__createPort(name + ':o', target, mode)
####################################################################################################
#
# Default methods for running the modules standalone
#
####################################################################################################
def createArgParser():
""" This method creates a base argument parser.
@return Argument Parser object
"""
parser = argparse.ArgumentParser(description='Create a JDModule to control the JD robot.')
parser.add_argument( '-i', '--ip',
dest = 'ip',
default = str(EZModule.TCP_IP),
help = 'IP address for the JD robot.')
parser.add_argument( '-p', '--port',
dest = 'port',
default = str(EZModule.TCP_PORT),
help = 'Port for the JD robot')
parser.add_argument( '-n', '--name',
dest = 'name',
default = '',
help = 'Name prefix for Yarp port names')
return parser.parse_args()
def main(module_cls):
""" This is a main method to run a module from command line.
@param module_cls - an EZModule based class that can be started as a standalone module.
"""
args = createArgParser()
yarp.Network.init()
resource_finder = yarp.ResourceFinder()
resource_finder.setVerbose(True)
# resource_finder.configure(argc,argv);
module = module_cls(args.ip, args.port, args.name)
module.runModule(resource_finder)
yarp.Network.fini()
| BrutusTT/pyJD | pyJD/EZModule.py | Python | agpl-3.0 | 8,093 |
import os
from xbrowse_server import xbrowse_controls
from django.core.management.base import BaseCommand
from xbrowse_server.base.models import Project, Individual, VCFFile
from xbrowse_server import sample_management
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('args', nargs='*')
parser.add_argument('--indiv-id')
parser.add_argument('--cohort-id')
parser.add_argument('--clear', action="store_true", help="Whether to clear any previously-added VCF paths before adding this one")
parser.add_argument('--load', action="store_true", help="Whether to also load the VCF data, and not just add record its path in the meta-data tables")
def handle(self, *args, **options):
project_id = args[0]
project = Project.objects.get(project_id=project_id)
vcf_file_path = os.path.abspath(args[1])
vcf_file = VCFFile.objects.get_or_create(file_path=vcf_file_path)[0]
if options.get('clear'):
for individual in project.individual_set.all():
individual.vcf_files.clear()
if options.get('indiv_id'):
individual = Individual.objects.get(
project=project,
indiv_id=options.get('indiv_id')
)
sample_management.add_vcf_file_to_individual(individual, vcf_file)
else:
sample_management.add_vcf_file_to_project(project, vcf_file)
if options.get('load'):
print("Loading VCF into project store")
xbrowse_controls.load_project(project_id, vcf_files=[vcf_file_path])
print("Loading VCF datastore")
xbrowse_controls.load_project_datastore(project_id, vcf_files=[vcf_file_path])
| macarthur-lab/xbrowse | xbrowse_server/base/management/commands/add_vcf_to_project.py | Python | agpl-3.0 | 1,768 |
from __future__ import print_function
import time
from flask import Flask, session, url_for
from flask_debugtoolbar import DebugToolbarExtension
from weblablib import WebLab, requires_active, weblab_user, poll
app = Flask(__name__)
# XXX: IMPORTANT SETTINGS TO CHANGE
app.config['SECRET_KEY'] = 'something random' # e.g., run: os.urandom(32) and put the output here
app.config['WEBLAB_USERNAME'] = 'weblabdeusto' # This is the http_username you put in WebLab-Deusto
app.config['WEBLAB_PASSWORD'] = 'password' # This is the http_password you put in WebLab-Deusto
# XXX You should change...
# Use different cookie names for different labs
app.config['SESSION_COOKIE_NAME'] = 'lab'
# app.config['WEBLAB_UNAUTHORIZED_LINK'] = 'https://weblab.deusto.es/weblab/' # Your own WebLab-Deusto URL
# The URL for this lab (e.g., you might have two labs, /lab1 and /lab2 in the same server)
app.config['SESSION_COOKIE_PATH'] = '/lab'
# The session_id is stored in the Flask session. You might also use a different name
app.config['WEBLAB_SESSION_ID_NAME'] = 'lab_session_id'
# These are optional parameters
# Flask-Debug: don't intercept redirects (go directly)
app.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False
# app.config['WEBLAB_BASE_URL'] = '' # If you want the weblab path to start by /foo/weblab, you can put '/foo'
# app.config['WEBLAB_REDIS_URL'] = 'redis://localhost:6379/0' # default value
# app.config['WEBLAB_REDIS_BASE'] = 'lab1' # If you have more than one lab in the same redis database
# app.config['WEBLAB_CALLBACK_URL'] = '/lab/public' # If you don't pass it in the creator
# app.config['WEBLAB_TIMEOUT'] = 15 # in seconds, default value
# app.config['WEBLAB_SCHEME'] = 'https'
weblab = WebLab(app, callback_url='/lab/public')
toolbar = DebugToolbarExtension(app)
@weblab.initial_url
def initial_url():
"""
This returns the landing URL (e.g., where the user will be forwarded).
"""
return url_for('.lab')
@weblab.on_start
def on_start(client_data, server_data):
"""
In this code, you can do something to setup the experiment. It is
called for every user, before they start using it.
"""
print("New user!")
print(weblab_user)
@weblab.on_dispose
def on_stop():
"""
In this code, you can do something to clean up the experiment. It is
guaranteed to be run.
"""
print("User expired. Here you should clean resources")
print(weblab_user)
@app.route('/lab/')
@requires_active
def lab():
"""
This is your code. If you provide @requires_active to any other URL, it is secured.
"""
user = weblab_user
return "Hello %s. You didn't poll in %.2f seconds (timeout configured to %s). Total time left: %s" % (user.username, user.time_without_polling, weblab.timeout, user.time_left)
@app.route("/")
def index():
return "<html><head></head><body><a href='{}'>Access to the lab</a></body></html>".format(url_for('.lab'))
if __name__ == '__main__':
print("Run the following:")
print()
print(" (optionally) $ export FLASK_DEBUG=1")
print(" $ export FLASK_APP={}".format(__file__))
print(" $ flask run")
print()
| weblabdeusto/weblablib | examples/simple/example.py | Python | agpl-3.0 | 3,126 |
# -*- coding: utf-8 -*-
"""
Class_LabExperimBased provides functionalities for data handling of data obtained in lab experiments in the field of (waste)water treatment.
Copyright (C) 2016 Chaim De Mulder
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see http://www.gnu.org/licenses/.
"""
import sys
#import os
#from os import listdir
#import pandas as pd
#import scipy as sp
#import numpy as np
#import datetime as dt
import matplotlib.pyplot as plt #plotten in python
import warnings as wn
from wwdata.Class_HydroData import HydroData
class LabExperimBased(HydroData):
"""
Superclass for a HydroData object, expanding the functionalities with
specific functions for data gathered is lab experiments.
Attributes
----------
timedata_column : str
name of the column containing the time data
data_type : str
type of the data provided
experiment_tag : str
A tag identifying the experiment; can be a date or a code used by
the producer/owner of the data.
time_unit : str
The time unit in which the time data is given
units : array
The units of the variables in the columns
"""
def __init__(self,data,timedata_column='index',data_type='NAT',
experiment_tag='No tag given',time_unit=None):
"""
initialisation of a LabExperimBased object, based on a previously defined
HydroData object.
"""
HydroData.__init__(self,data,timedata_column=timedata_column,data_type=data_type,
experiment_tag=experiment_tag,time_unit=time_unit)
def hours(self,time_column='index'):
"""
calculates the hours from the relative values
Parameters
----------
time_column : string
column containing the relative time values; default to index
"""
if time_column == 'index':
self.data['index']=self.time.values
self.data['h']= (self.data['indexes'])*24 + self.data['indexes'].shift(1)
self.data['h'].fillna(0,inplace=True)
self.data.drop('index', axis=1, inplace=True)
else:
self.data['h']= (self.data[time_column])*24 + self.data[time_column].shift(1)
self.data['h'].fillna(0,inplace=True)
def add_conc(self,column_name,x,y,new_name='default'):
"""
calculates the concentration values of the given column and adds them as
a new column to the DataFrame.
Parameters
----------
column_name : str
column with values
x : int
...
y : int
...
new_name : str
name of the new column, default to 'column_name + mg/L'
"""
if new_name == 'default':
new_name = column_name + ' ' + 'mg/L'
self.data[new_name] = self.data[column_name].values*x*y
## Instead of this function: define a dataframe/dict with conversion or
## concentration factors, so that you can have a function that automatically
## converts all parameters in the frame to concentrations
def check_ph(self,ph_column='pH',thresh=0.4):
"""
gives the maximal change in pH
Parameters
----------
ph_column : str
column with pH-values, default to 'pH'
threshold : int
threshold value for warning, default to '0.4'
"""
dph = self.data[ph_column].max()-self.data[ph_column].min()
if dph > thresh:
wn.warn('Strong change in pH during experiment!')
else:
self.delta_ph = dph
def in_out(self,columns):
"""
(start_values-end_values)
Parameters
----------
columns : array of strings
"""
inv=0
outv=0
indexes= self.time.values
for column in columns:
inv += self.data[column][indexes[0]]
for column in columns:
outv += self.data[column][indexes[-1]]
in_out = inv-outv
return in_out
def removal(self,columns):
"""
total removal of nitrogen
(1-(end_values/start_values))
Parameters
----------
columns : array of strings
"""
inv=0
outv=0
indexes= self.time.values
for column in columns:
inv += self.data[column][indexes[0]]
for column in columns:
outv += self.data[column][indexes[-1]]
removal = 1-(outv/inv)
return removal
def calc_slope(self,columns,time_column='h'):
"""
calculates the slope of the selected columns
Parameters
----------
columns : array of strings
columns to calculate the slope for
time_column : str
time used for calculation; default to 'h'
"""
for column in columns:
self.data[column + " " +'slope'] = (self.data[column].shift(1)-self.data[column])\
/(self.data[time_column]-self.data[time_column].shift(1))
def plot(self,columns,time_column='index'):
"""
calculates the slope of the selected columns
Parameters
----------
columns : array of strings
columns to plot
time_column : str
time used for calculation; default to 'h'
"""
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(111)
if time_column=='index':
for column in columns:
ax.plot(self.time,self.data[column],marker='o')
else:
for column in columns:
ax.plot(self.data[time_column],self.data[column],marker='o')
ax.legend()
return fig,ax
#######################################
def _print_removed_output(original,new,type_):
"""
function printing the output of functions that remove datapoints.
Parameters
----------
original : int
original length of the dataset
new : int
length of the new dataset
type_ : str
'removed' or 'dropped'
"""
print('Original dataset:',original,'datapoints')
print('New dataset:',new,'datapoints')
print(original-new,'datapoints ',type_)
def _log_removed_output(log_file,original,new,type_):
"""
function writing the output of functions that remove datapoints to a log file.
Parameters
----------
log_file : str
string containing the directory to the log file to be written out
original : int
original length of the dataset
new : int
length of the new dataset
type_ : str
'removed' or 'dropped'
"""
log_file = open(log_file,'a')
log_file.write(str('\nOriginal dataset: '+str(original)+' datapoints; new dataset: '+
str(new)+' datapoints'+str(original-new)+' datapoints ',type_))
log_file.close()
| cdemulde/wwdata | wwdata/Class_LabExperimBased.py | Python | agpl-3.0 | 7,474 |
import source_navigation_steps
import functional_test
class TestSourceInterfaceNotFound(
functional_test.FunctionalTest,
source_navigation_steps.SourceNavigationStepsMixin):
def test_not_found(self):
self._source_not_found()
| garrettr/securedrop | securedrop/tests/functional/test_source_notfound.py | Python | agpl-3.0 | 256 |
from common.log import logUtils as log
from constants import clientPackets
from constants import serverPackets
def handle(userToken, packetData):
# get token data
username = userToken.username
# Read packet data
packetData = clientPackets.setAwayMessage(packetData)
# Set token away message
userToken.awayMessage = packetData["awayMessage"]
# Send private message from fokabot
if packetData["awayMessage"] == "":
fokaMessage = "Your away message has been reset"
else:
fokaMessage = "Your away message is now: {}".format(packetData["awayMessage"])
userToken.enqueue(serverPackets.sendMessage("FokaBot", username, fokaMessage))
log.info("{} has changed their away message to: {}".format(username, packetData["awayMessage"]))
| osuripple/pep.py | events/setAwayMessageEvent.py | Python | agpl-3.0 | 743 |
"""
WSGI config for tumuli project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tumuli.settings")
application = get_wsgi_application()
| daonb/tumulus | tumuli/wsgi.py | Python | agpl-3.0 | 389 |
"""course_discovery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
import os
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.views import logout
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
from course_discovery.apps.core import views as core_views
admin.autodiscover()
# pylint: disable=invalid-name
# Always login via edX OpenID Connect
login = RedirectView.as_view(url=reverse_lazy('social:begin', args=['edx-oidc']), permanent=False, query_string=True)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include('course_discovery.apps.api.urls', namespace='api')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^auto_auth/$', core_views.AutoAuth.as_view(), name='auto_auth'),
url(r'^health/$', core_views.health, name='health'),
url(r'^login/$', login, name='login'),
url(r'^logout/$', logout, name='logout'),
url('', include('social.apps.django_app.urls', namespace='social')),
]
if settings.DEBUG and os.environ.get('ENABLE_DJANGO_TOOLBAR', False): # pragma: no cover
import debug_toolbar # pylint: disable=import-error
urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls)))
| cpennington/course-discovery | course_discovery/urls.py | Python | agpl-3.0 | 1,899 |
# Copyright (C) 2021 ForgeFlow S.L.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
from odoo.tests.common import TransactionCase
class TestProjectDuplicateSubtask(TransactionCase):
def setUp(self):
super().setUp()
self.project1 = self.env["project.project"].create({"name": "Project 1"})
self.task1 = self.env["project.task"].create(
{"name": "name1", "project_id": self.project1.id}
)
self.subtask1 = self.env["project.task"].create(
{"name": "2", "project_id": self.project1.id, "parent_id": self.task1.id}
)
self.subtask2 = self.env["project.task"].create(
{"name": "3", "project_id": self.project1.id, "parent_id": self.task1.id}
)
def test_check_subtasks(self):
self.task1.action_duplicate_subtasks()
new_task = self.env["project.task"].search(
[("name", "ilike", self.task1.name), ("name", "ilike", "copy")]
)
self.assertEqual(
len(new_task.child_ids), 2, "Two subtasks should have been created"
)
| OCA/project | project_duplicate_subtask/tests/test_project_duplicate_subtask.py | Python | agpl-3.0 | 1,104 |
# This file is part of authapi.
# Copyright (C) 2014-2020 Agora Voting SL <[email protected]>
# authapi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License.
# authapi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with authapi. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import url
from .decorators import captcha_required
from captcha import views
urlpatterns = [
url(r'^new/', views.new_captcha, name='new_captcha'),
]
| agoravoting/authapi | authapi/captcha/urls.py | Python | agpl-3.0 | 874 |
# © 2008-2020 Dorin Hongu <dhongu(@)gmail(.)com
# See README.rst file on addons root folder for license details
{
"name": "Romania - Invoice Report ",
"summary": "Localizare Terrabit",
"version": "14.0.3.0.3",
"author": "Dorin Hongu," "Odoo Community Association (OCA)",
"website": "https://github.com/OCA/l10n-romania",
"license": "AGPL-3",
"category": "Generic Modules",
"depends": [
"base",
"account",
"l10n_ro_config",
"purchase",
# "deltatech_watermark"
],
"data": [
"views/invoice_report.xml",
"views/voucher_report.xml",
"views/payment_report.xml",
# 'views/account_invoice_view.xml',
"views/account_voucher_report.xml",
"views/account_bank_statement_view.xml",
"views/statement_report.xml",
# 'views/res_partner_view.xml',
],
}
| dhongu/l10n-romania | l10n_ro_invoice_report/__manifest__.py | Python | agpl-3.0 | 887 |
##############################################################################
#
# Copyright (C) 2014 Comunitea Servicios Tecnológicos All Rights Reserved
# $Kiko Sánchez <[email protected]>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import fields, models, api
from datetime import datetime
class OutletLoss(models.Model):
_name = 'outlet.loss'
@api.multi
@api.depends('qty', 'price_outlet', 'price_unit')
def _get_outlet_loss(self):
for loss in self:
loss.total_lost = loss.qty*(loss.price_outlet-loss.price_unit)
product_id = fields.Many2one('product.product', 'Product')
price_unit = fields.Float('Price')
price_outlet = fields.Float('Outlet Price')
total_lost = fields.Float("Outlet Loss", compute=_get_outlet_loss,
store=True, readonly=True)
date_move = fields.Date('Move to outlet on', default=fields.datetime.now())
outlet_ok = fields.Boolean('Outlet')
order_line_id = fields.Many2one('sale.order.line', 'Order Line')
qty = fields.Float('Quantity')
percent = fields.Float('Outlet Percent')
| Comunitea/CMNT_004_15 | project-addons/product_outlet_loss/models/product.py | Python | agpl-3.0 | 1,853 |
import tempfile
from datetime import datetime
import flask_testing
from flask import url_for
import iis
from iis.models import User
from iis.extensions import db
class BaseTestCase(flask_testing.TestCase):
DB_FILE = tempfile.mkstemp()
SQLALCHEMY_DATABASE_URI = "sqlite:///" + DB_FILE[1]
LOGGING = {"version": 1}
TESTING = True
WTF_CSRF_ENABLED = False
USER_ENABLE_LOGIN_WITHOUT_CONFIRM = True
def create_app(self):
ret = iis.create_app(self.__class__)
app = ret[0]
self.user_manager = ret[1]
return app
def setUp(self):
db.create_all()
self.create_user("admin", "passW1")
def tearDown(self):
db.session.remove()
db.drop_all()
def login(self, username=None, password=None):
username = username or "admin"
password = password or "passW1"
self.client.post(url_for('user.login'), data=dict(
username=username,
password=password
), follow_redirects=False)
return User.query.filter_by(username=username).one()
def logout(self):
self.client.get(url_for("user.logout"))
def create_user(self, username, password):
user = User(username=username,
password=self.user_manager.hash_password(password),
email=username + "@localhost",
confirmed_at=datetime.fromtimestamp(0.0),
active=True)
db.session.add(user)
db.session.commit()
return user
def assertLoginRequired(self, url):
self.logout()
res = self.client.get(url)
self.assertEqual(302, res.status_code)
self.assertIn(url_for('user.login'), res.headers['Location'])
| interactomix/iis | test_utils/base.py | Python | agpl-3.0 | 1,749 |
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-isegory',
version='0.1',
packages=['isegory'],
include_package_data=True,
license='AGPL',
description='A simple Django app to declare the provenance of a dataset.',
long_description=README,
url='http://github.com/jdelacueva/django-isegory/',
author='Javier de la Cueva',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: AGPL',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| jdelacueva/django-isegory | setup.py | Python | agpl-3.0 | 1,049 |
# Recall is a program for storing bookmarks of different things
# Copyright (C) 2012 Cal Paterson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import uuid
from bottle import Bottle, request, response, abort
import bcrypt
from recall.data import whitelist, blacklist
from recall import convenience as c
from recall import plugins, jobs, messages
app = Bottle()
app.install(plugins.exceptions)
app.install(plugins.ppjson)
app.install(plugins.auth)
app.install(plugins.cors)
app.error_handler = plugins.handler_dict
logger = c.logger("people")
@app.get("/")
def users():
abort(503, "Not yet implemented")
@app.get("/<who>/")
def user_(who):
try:
return whitelist(c.db().users.find_one({"email": who}), [
"email",
"firstName",
"pseudonym"
])
except TypeError:
logger.warn("Asked about {email}, but that is not a user".format(
email=who))
abort(404, "User not found")
@app.get("/<who>/self")
def _self(who, user):
if who != user["email"]:
response.status = 400
else:
return whitelist(user, ["pseudonym",
"firstName",
"surname",
"email",
"private_email"])
@app.post("/<who>/")
def request_invite(who):
# FIXME: Don't allow the pseudonym "public"
user = whitelist(request.json, [
"pseudonym",
"firstName",
"surname",
"private_email",
"token",
])
if "private_email" not in user:
abort(400, "You must provide a private_email field")
user["email_key"] = str(uuid.uuid4())
user["registered"] = c.unixtime()
user["email"] = who
c.db().users.ensure_index("email", unique=True)
c.db().users.insert(user, safe=True)
response.status = 202
logger.info("{email} subscribed".format(email=who))
jobs.enqueue(messages.SendInvite(user))
@app.post("/<who>/<email_key>")
def verify_email(who, email_key):
if "RECALL_TEST_MODE" in c.settings or "RECALL_DEBUG_MODE" in c.settings:
salt = bcrypt.gensalt(1)
else:
salt = bcrypt.gensalt()
password_hash = bcrypt.hashpw(request.json["password"], salt)
spec = {"email_key": email_key, "verified": {"$exists": False}}
update = {"$set": {"password_hash": password_hash,
"verified": c.unixtime()}}
success = c.db().users.update(spec, update, safe=True)["updatedExisting"]
if not success:
if c.db().users.find_one({"email_key": email_key}):
logger.warn("{email} tried to verify a second time".format(email=who))
abort(403, "Already verified")
else:
logger.warn("Someone tried to verify with a key, but it doesn't exist")
abort(404, "Don't know that key")
user = c.db().users.find_one({"email_key": email_key})
response.status = 201
return blacklist(user, ["_id", "email_key", "password_hash"])
| calpaterson/recall | src/recall/people.py | Python | agpl-3.0 | 3,665 |
from collections import defaultdict
from fs.errors import ResourceNotFoundError
import logging
import inspect
import re
from path import path
from django.http import Http404
from django.conf import settings
from .module_render import get_module
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore import Location, XML_MODULESTORE_TYPE
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore.exceptions import ItemNotFoundError, InvalidLocationError
from courseware.model_data import FieldDataCache
from static_replace import replace_static_urls
from courseware.access import has_access
import branding
log = logging.getLogger(__name__)
def get_request_for_thread():
"""Walk up the stack, return the nearest first argument named "request"."""
frame = None
try:
for f in inspect.stack()[1:]:
frame = f[0]
code = frame.f_code
if code.co_varnames[:1] == ("request",):
return frame.f_locals["request"]
elif code.co_varnames[:2] == ("self", "request",):
return frame.f_locals["request"]
finally:
del frame
def get_course(course_id, depth=0):
"""
Given a course id, return the corresponding course descriptor.
If course_id is not valid, raises a ValueError. This is appropriate
for internal use.
depth: The number of levels of children for the modulestore to cache.
None means infinite depth. Default is to fetch no children.
"""
try:
course_loc = CourseDescriptor.id_to_location(course_id)
return modulestore().get_instance(course_id, course_loc, depth=depth)
except (KeyError, ItemNotFoundError):
raise ValueError("Course not found: {}".format(course_id))
except InvalidLocationError:
raise ValueError("Invalid location: {}".format(course_id))
def get_course_by_id(course_id, depth=0):
"""
Given a course id, return the corresponding course descriptor.
If course_id is not valid, raises a 404.
depth: The number of levels of children for the modulestore to cache. None means infinite depth
"""
try:
course_loc = CourseDescriptor.id_to_location(course_id)
return modulestore().get_instance(course_id, course_loc, depth=depth)
except (KeyError, ItemNotFoundError):
raise Http404("Course not found.")
except InvalidLocationError:
raise Http404("Invalid location")
def get_course_with_access(user, course_id, action, depth=0):
"""
Given a course_id, look up the corresponding course descriptor,
check that the user has the access to perform the specified action
on the course, and return the descriptor.
Raises a 404 if the course_id is invalid, or the user doesn't have access.
depth: The number of levels of children for the modulestore to cache. None means infinite depth
"""
course = get_course_by_id(course_id, depth=depth)
if not has_access(user, course, action):
# Deliberately return a non-specific error message to avoid
# leaking info about access control settings
raise Http404("Course not found.")
return course
def get_opt_course_with_access(user, course_id, action):
"""
Same as get_course_with_access, except that if course_id is None,
return None without performing any access checks.
"""
if course_id is None:
return None
return get_course_with_access(user, course_id, action)
def course_image_url(course):
"""Try to look up the image url for the course. If it's not found,
log an error and return the dead link"""
if course.static_asset_path or modulestore().get_modulestore_type(course.location.course_id) == XML_MODULESTORE_TYPE:
return '/static/' + (course.static_asset_path or getattr(course, 'data_dir', '')) + "/images/course_image.jpg"
else:
loc = course.location.replace(tag='c4x', category='asset', name=course.course_image)
_path = StaticContent.get_url_path_from_location(loc)
return _path
def find_file(filesystem, dirs, filename):
"""
Looks for a filename in a list of dirs on a filesystem, in the specified order.
filesystem: an OSFS filesystem
dirs: a list of path objects
filename: a string
Returns d / filename if found in dir d, else raises ResourceNotFoundError.
"""
for directory in dirs:
filepath = path(directory) / filename
if filesystem.exists(filepath):
return filepath
raise ResourceNotFoundError("Could not find {0}".format(filename))
def get_course_about_section(course, section_key):
"""
This returns the snippet of html to be rendered on the course about page,
given the key for the section.
Valid keys:
- overview
- title
- university
- number
- short_description
- description
- key_dates (includes start, end, exams, etc)
- video
- course_staff_short
- course_staff_extended
- requirements
- syllabus
- textbook
- faq
- more_info
- ocw_links
"""
# Many of these are stored as html files instead of some semantic
# markup. This can change without effecting this interface when we find a
# good format for defining so many snippets of text/html.
# TODO: Remove number, instructors from this list
if section_key in ['short_description', 'description', 'key_dates', 'video',
'course_staff_short', 'course_staff_extended',
'requirements', 'syllabus', 'textbook', 'faq', 'more_info',
'number', 'instructors', 'overview',
'effort', 'end_date', 'prerequisites', 'ocw_links']:
try:
request = get_request_for_thread()
loc = course.location.replace(category='about', name=section_key)
# Use an empty cache
field_data_cache = FieldDataCache([], course.id, request.user)
about_module = get_module(
request.user,
request,
loc,
field_data_cache,
course.id,
not_found_ok=True,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path
)
html = ''
if about_module is not None:
html = about_module.render('student_view').content
return html
except ItemNotFoundError:
log.warning("Missing about section {key} in course {url}".format(
key=section_key, url=course.location.url()))
return None
elif section_key == "title":
return course.display_name_with_default
elif section_key == "university":
return course.display_org_with_default
elif section_key == "number":
return course.display_number_with_default
raise KeyError("Invalid about key " + str(section_key))
def get_course_info_section(request, course, section_key):
"""
This returns the snippet of html to be rendered on the course info page,
given the key for the section.
Valid keys:
- handouts
- guest_handouts
- updates
- guest_updates
"""
loc = Location(course.location.tag, course.location.org, course.location.course, 'course_info', section_key)
# Use an empty cache
field_data_cache = FieldDataCache([], course.id, request.user)
info_module = get_module(
request.user,
request,
loc,
field_data_cache,
course.id,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path
)
html = ''
if info_module is not None:
html = info_module.render('student_view').content
return html
# TODO: Fix this such that these are pulled in as extra course-specific tabs.
# arjun will address this by the end of October if no one does so prior to
# then.
def get_course_syllabus_section(course, section_key):
"""
This returns the snippet of html to be rendered on the syllabus page,
given the key for the section.
Valid keys:
- syllabus
- guest_syllabus
"""
# Many of these are stored as html files instead of some semantic
# markup. This can change without effecting this interface when we find a
# good format for defining so many snippets of text/html.
if section_key in ['syllabus', 'guest_syllabus']:
try:
filesys = course.system.resources_fs
# first look for a run-specific version
dirs = [path("syllabus") / course.url_name, path("syllabus")]
filepath = find_file(filesys, dirs, section_key + ".html")
with filesys.open(filepath) as html_file:
return replace_static_urls(
html_file.read().decode('utf-8'),
getattr(course, 'data_dir', None),
course_id=course.location.course_id,
static_asset_path=course.static_asset_path,
)
except ResourceNotFoundError:
log.exception("Missing syllabus section {key} in course {url}".format(
key=section_key, url=course.location.url()))
return "! Syllabus missing !"
raise KeyError("Invalid about key " + str(section_key))
def get_courses_by_university(user, domain=None):
'''
Returns dict of lists of courses available, keyed by course.org (ie university).
Courses are sorted by course.number.
'''
# TODO: Clean up how 'error' is done.
# filter out any courses that errored.
visible_courses = get_courses(user, domain)
universities = defaultdict(list)
for course in visible_courses:
universities[course.org].append(course)
return universities
def get_courses(user, domain=None):
'''
Returns a list of courses available, sorted by course.number
'''
courses = branding.get_visible_courses(domain)
courses = [c for c in courses if has_access(user, c, 'see_exists')]
courses = sorted(courses, key=lambda course: course.number)
return courses
def sort_by_announcement(courses):
"""
Sorts a list of courses by their announcement date. If the date is
not available, sort them by their start date.
"""
# Sort courses by how far are they from they start day
key = lambda course: course.sorting_score
courses = sorted(courses, key=key)
return courses
def get_cms_course_link_by_id(course_id):
"""
Returns a proto-relative link to course_index for editing the course in cms, assuming that the course is actually
cms-backed. If course_id is improperly formatted, just return the root of the cms
"""
format_str = r'^(?P<org>[^/]+)/(?P<course>[^/]+)/(?P<name>[^/]+)$'
host = "//{}/".format(settings.CMS_BASE) # protocol-relative
m_obj = re.match(format_str, course_id)
if m_obj:
return "{host}{org}/{course}/course/{name}".format(host=host,
org=m_obj.group('org'),
course=m_obj.group('course'),
name=m_obj.group('name'))
return host
| abo-abo/edx-platform | lms/djangoapps/courseware/courses.py | Python | agpl-3.0 | 11,362 |
import io
import pytest
import databot
import pandas as pd
from databot.db.utils import Row
from databot.exporters.utils import flatten_nested_lists, flatten_nested_dicts, get_level_keys, flatten, sort_fields
from databot.exporters import jsonl
from databot.exporters import pandas
@pytest.fixture
def data():
return {
'a': 1,
'b': 2,
'c': {
'x': 1,
'y': 2,
'z': ['foo', 'bar', 'baz'],
}
}
def test_flatten_rows_update(data):
rows = [
Row(key=1, value={'text': 'abc'}),
Row(key=1, value={'text': 'abcde'}),
]
update = {'size': databot.this.value.text.apply(len)}
assert list(flatten(rows, include=['key', 'size'], update=update)) == [
('key', 'size'),
(1, 3),
(1, 5),
]
def test_flattenjson():
rows = [
{'key': 1, 'value': {'foo': 'bar', 'events': [
{'name': 'Event 1', 'date': '2017-01-01', 'people': ['a', 'b']},
{'name': 'Event 2', 'date': '2017-01-02', 'people': ['a']},
]}},
{'key': 2, 'value': {'foo': 'baz', 'events': [
{'name': 'Event 3', 'date': '2017-01-03', 'people': ['x', 'y']},
{'name': 'Event 4', 'date': '2017-01-04', 'people': ['z']},
]}},
]
assert list(map(dict, flatten_nested_lists(rows, include={('key',), ('value', 'events', 'date')}))) == [
{('key',): 1, ('value', 'events', 'date'): '2017-01-01'},
{('key',): 1, ('value', 'events', 'date'): '2017-01-02'},
{('key',): 2, ('value', 'events', 'date'): '2017-01-03'},
{('key',): 2, ('value', 'events', 'date'): '2017-01-04'},
]
assert list(map(dict, flatten_nested_lists(rows, include={('key',), ('value', 'events', 'people')}))) == [
{('key',): 1, ('value', 'events', 'people'): 'a'},
{('key',): 1, ('value', 'events', 'people'): 'b'},
{('key',): 1, ('value', 'events', 'people'): 'a'},
{('key',): 2, ('value', 'events', 'people'): 'x'},
{('key',): 2, ('value', 'events', 'people'): 'y'},
{('key',): 2, ('value', 'events', 'people'): 'z'},
]
assert [{v for k, v in x} for x in flatten_nested_lists(rows, include=[('key',), ('value',)])] == [
{1, 'bar', '2017-01-01', 'Event 1', 'a'},
{1, 'bar', '2017-01-01', 'Event 1', 'b'},
{1, 'bar', '2017-01-02', 'Event 2', 'a'},
{2, 'baz', '2017-01-03', 'Event 3', 'x'},
{2, 'baz', '2017-01-03', 'Event 3', 'y'},
{2, 'baz', '2017-01-04', 'Event 4', 'z'},
]
assert [{v for k, v in x} for x in flatten_nested_lists(rows)] == [
{1, 'bar', '2017-01-01', 'Event 1', 'a'},
{1, 'bar', '2017-01-01', 'Event 1', 'b'},
{1, 'bar', '2017-01-02', 'Event 2', 'a'},
{2, 'baz', '2017-01-03', 'Event 3', 'x'},
{2, 'baz', '2017-01-03', 'Event 3', 'y'},
{2, 'baz', '2017-01-04', 'Event 4', 'z'},
]
def test_flatten_nested_dicts():
assert set(flatten_nested_dicts({'a': 1, 'b': 2, 'c': 3})) == {
(('a',), 1),
(('b',), 2),
(('c',), 3),
}
def test_flatten_nested_dicts_include():
assert set(flatten_nested_dicts({'a': 1, 'b': 2, 'c': 3}, include=[('b',), ('a',), ('c',)])) == {
(('b',), 2),
(('a',), 1),
(('c',), 3),
}
def test_get_level_keys():
assert list(get_level_keys(keys=['c', 'b', 'a'], field=(), include=())) == ['a', 'b', 'c']
assert list(get_level_keys(keys=['c', 'b', 'a'], field=(), include=[('b',), ('a',), ('c',)])) == ['b', 'a', 'c']
assert list(get_level_keys(keys=['c', 'b', 'a'], field=('x',), include=())) == ['a', 'b', 'c']
assert list(get_level_keys(keys=['c', 'b', 'a'], field=('x',), include=[('x', 'b',), ('x', 'c',)])) == ['b', 'c']
assert list(get_level_keys(keys=['c', 'b', 'a'], field=(), include=[('b',), ('x',)])) == ['b']
assert list(get_level_keys(keys=['c', 'b', 'a'], field=('x', 'y'), include=[('x',)])) == ['a', 'b', 'c']
def test_flatten():
rows = [
Row(key=1, value={'foo': 'bar', 'events': [
{'name': 'Event 1', 'date': '2017-01-01', 'people': ['a', 'b']},
{'name': 'Event 2', 'date': '2017-01-02', 'people': ['a']},
]}),
Row(key=2, value={'foo': 'baz', 'events': [
{'name': 'Event 3', 'date': '2017-01-03', 'people': ['x', 'y']},
{'name': 'Event 4', 'date': '2017-01-04', 'people': ['z']},
]}),
]
assert list(flatten(rows)) == [
('events.date', 'events.name', 'events.people', 'foo', 'key'),
('2017-01-01', 'Event 1', 'a', 'bar', 1),
('2017-01-01', 'Event 1', 'b', 'bar', 1),
('2017-01-02', 'Event 2', 'a', 'bar', 1),
('2017-01-03', 'Event 3', 'x', 'baz', 2),
('2017-01-03', 'Event 3', 'y', 'baz', 2),
('2017-01-04', 'Event 4', 'z', 'baz', 2),
]
assert list(flatten(rows, include=('key', 'foo', 'events.people'))) == [
('key', 'foo', 'events.people'),
(1, 'bar', 'a'),
(1, 'bar', 'b'),
(1, 'bar', 'a'),
(2, 'baz', 'x'),
(2, 'baz', 'y'),
(2, 'baz', 'z'),
]
assert list(flatten(rows, include=('key', 'foo'))) == [
('key', 'foo'),
(1, 'bar'),
(2, 'baz'),
]
def test_sort_fields():
def _(fields, include):
fields = [tuple(x.split('.')) for x in fields]
include = [tuple(x.split('.')) for x in include]
return ['.'.join(x) for x in sort_fields(fields, include)]
assert _(['c', 'b', 'a'], []) == ['a', 'b', 'c']
assert _(['c', 'b', 'a'], ['a', 'c']) == ['a', 'c']
assert _(['x.c', 'x.b', 'x.a'], ['x']) == ['x.a', 'x.b', 'x.c']
assert _(['z', 'x.b', 'x.a'], ['x', 'z']) == ['x.a', 'x.b', 'z']
def test_flatten_rows_update_without_include(data):
rows = [
Row(key=1, value={'text': 'abc'}),
Row(key=1, value={'text': 'abcde'}),
]
update = {'size': databot.this.value.text.apply(len)}
assert list(flatten(rows, update=update)) == [
('key', 'size', 'text'),
(1, 3, 'abc'),
(1, 5, 'abcde'),
]
def test_flatten_rows_callable_update(data):
rows = [
Row(key=1, value={'text': 'abc'}),
Row(key=1, value={'text': 'abcde'}),
]
def update(row):
return {'size': len(row.value['text'])}
assert list(flatten(rows, update=update)) == [
('size',),
(3,),
(5,),
]
def test_flatten_rows_include(data):
rows = [
Row(key=1, value={'a': 1}),
Row(key=2, value={'b': 2}),
]
assert list(flatten(rows, include=['a', 'b'])) == [
('a', 'b'),
(1, None),
(None, 2),
]
def test_flatten_rows_include_value(data):
rows = [
Row(key=1, value='a'),
Row(key=2, value='b'),
]
assert list(flatten(rows, include=['key', 'value'])) == [
('key', 'value'),
(1, 'a'),
(2, 'b'),
]
def test_flatten_rows_value(data):
rows = [
Row(key=1, value='a'),
Row(key=2, value='b'),
]
assert list(flatten(rows)) == [
('key', 'value'),
(1, 'a'),
(2, 'b'),
]
def test_flatten_int_key(data):
rows = [
Row(key=1, value={'year': {2000: 1, 2001: 2}}),
Row(key=2, value={'year': {2000: 3, 2001: 4}}),
]
assert list(flatten(rows)) == [
('key', 'year.2000', 'year.2001'),
(1, 1, 2),
(2, 3, 4),
]
def test_flatten_list(data):
rows = [
Row(key=1, value={'events': [
{'name': 'Event 1', 'date': '2017-01-01'},
{'name': 'Event 2', 'date': '2017-02-01'},
]}),
Row(key=2, value={'events': [
{'name': 'Event 3', 'date': '2017-03-01'},
{'name': 'Event 4', 'date': '2017-04-01'},
]}),
]
assert list(flatten(rows)) == [
('events.date', 'events.name', 'key'),
('2017-01-01', 'Event 1', 1),
('2017-02-01', 'Event 2', 1),
('2017-03-01', 'Event 3', 2),
('2017-04-01', 'Event 4', 2),
]
def test_jsonl(bot):
pipe = bot.define('p1').append([('1', 'a'), ('2', 'b')])
stream = io.StringIO()
jsonl.export(stream, pipe.rows())
assert stream.getvalue().splitlines() == [
'{"key": "1", "value": "a"}',
'{"key": "2", "value": "b"}',
]
def test_jsonl_dict(bot):
pipe = bot.define('p1').append([('1', {'a': 2}), ('2', {'b': 3})])
stream = io.StringIO()
jsonl.export(stream, pipe.rows())
assert stream.getvalue().splitlines() == [
'{"key": "1", "a": 2}',
'{"key": "2", "b": 3}',
]
def test_pandas_rows_to_dataframe_items():
rows = [
[1, 'a', 'x'],
[2, 'b', 'y'],
]
assert list(pandas.rows_to_dataframe_items(rows, 0)) == [
(1, ['a', 'x']),
(2, ['b', 'y'])
]
assert list(pandas.rows_to_dataframe_items(rows, 2)) == [
('x', [1, 'a']),
('y', [2, 'b'])
]
def test_pandas(bot):
pipe = bot.define('p1').append([
(1, {'a': 10}),
(2, {'a': 20}),
])
assert [dict(x._asdict()) for x in pipe.export(pd).itertuples()] == [
{'Index': 1, 'a': 10.0},
{'Index': 2, 'a': 20.0},
]
| sirex/databot | tests/test_export.py | Python | agpl-3.0 | 9,232 |
############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
############################################################################
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404, render
from base.models.person import Person
from base.views.learning_units.common import get_learning_unit_identification_context
@login_required
def learning_unit_identification(request, learning_unit_year_id):
person = get_object_or_404(Person, user=request.user)
context = get_learning_unit_identification_context(learning_unit_year_id, person)
learning_unit_year = context['learning_unit_year']
if learning_unit_year.is_external():
template = "learning_unit/external/read.html"
permission = 'base.can_access_externallearningunityear'
else:
template = "learning_unit/identification.html"
permission = 'base.can_access_learningunit'
if not person.user.has_perm(permission):
raise PermissionDenied
return render(request, template, context)
| uclouvain/osis_louvain | base/views/learning_units/detail.py | Python | agpl-3.0 | 2,249 |
Subsets and Splits