repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
dabrahams/0install
zeroinstall/0launch-gui/main.py
1
6132
# Copyright (C) 2009, Thomas Leonard # See the README file for details, or visit http://0install.net. from __future__ import print_function import os, sys import logging import warnings from optparse import OptionParser from zeroinstall import _, SafeException from zeroinstall.injector import requirements from zeroinstall.injector.driver import Driver from zeroinstall.injector.config import load_config from zeroinstall.support import tasks _recalculate = tasks.Blocker('recalculate') def recalculate(): """Ask the mainloop to recalculate. If we're already recalculating, wait for that to finish and then do it again.""" global _recalculate _recalculate.trigger() _recalculate = tasks.Blocker('recalculate') def run_gui(args): parser = OptionParser(usage=_("usage: %prog [options] interface")) parser.add_option("", "--before", help=_("choose a version before this"), metavar='VERSION') parser.add_option("", "--cpu", help=_("target CPU type"), metavar='CPU') parser.add_option("", "--command", help=_("command to select"), metavar='COMMAND') parser.add_option("-d", "--download-only", help=_("fetch but don't run"), action='store_true') parser.add_option("-g", "--force-gui", help=_("display an error if there's no GUI"), action='store_true') parser.add_option("", "--message", help=_("message to display when interacting with user")) parser.add_option("", "--not-before", help=_("minimum version to choose"), metavar='VERSION') parser.add_option("", "--os", help=_("target operation system type"), metavar='OS') parser.add_option("-r", "--refresh", help=_("check for updates of all interfaces"), action='store_true') parser.add_option("", "--select-only", help=_("only download the feeds"), action='store_true') parser.add_option("-s", "--source", help=_("select source code"), action='store_true') parser.add_option("", "--systray", help=_("download in the background"), action='store_true') parser.add_option("-v", "--verbose", help=_("more verbose output"), action='count') parser.add_option("-V", "--version", help=_("display version information"), action='store_true') parser.add_option("", "--with-store", help=_("add an implementation cache"), action='append', metavar='DIR') parser.disable_interspersed_args() (options, args) = parser.parse_args(args) if options.verbose: logger = logging.getLogger() if options.verbose == 1: logger.setLevel(logging.INFO) else: logger.setLevel(logging.DEBUG) if options.version: import gui print("0launch-gui (zero-install) " + gui.version) print("Copyright (C) 2010 Thomas Leonard") print(_("This program comes with ABSOLUTELY NO WARRANTY," "\nto the extent permitted by law." "\nYou may redistribute copies of this program" "\nunder the terms of the GNU Lesser General Public License." "\nFor more information about these matters, see the file named COPYING.")) sys.exit(0) def nogui(ex): if options.force_gui: fn = logging.warn else: fn = logging.info fn("No GUI available", exc_info = ex) sys.exit(100) with warnings.catch_warnings(): if not options.force_gui: warnings.filterwarnings("ignore") if sys.version_info[0] < 3: try: import pygtk; pygtk.require('2.0') except ImportError as ex: nogui(ex) import gui try: if sys.version_info[0] > 2: from zeroinstall.gtkui import pygtkcompat pygtkcompat.enable() pygtkcompat.enable_gtk(version = '3.0') import gtk except (ImportError, ValueError) as ex: nogui(ex) if gtk.gdk.get_display() is None: try: raise SafeException("Failed to connect to display.") except SafeException as ex: nogui(ex) # logging needs this as a raised exception handler = gui.GUIHandler() config = load_config(handler) if options.with_store: from zeroinstall import zerostore for x in options.with_store: config.stores.stores.append(zerostore.Store(os.path.abspath(x))) if len(args) < 1: @tasks.async def prefs_main(): import preferences box = preferences.show_preferences(config) done = tasks.Blocker('close preferences') box.connect('destroy', lambda w: done.trigger()) yield done tasks.wait_for_blocker(prefs_main()) sys.exit(0) interface_uri = args[0] if len(args) > 1: parser.print_help() sys.exit(1) import mainwindow, dialog r = requirements.Requirements(interface_uri) r.parse_options(options) widgets = dialog.Template('main') driver = Driver(config = config, requirements = r) root_iface = config.iface_cache.get_interface(interface_uri) driver.solver.record_details = True window = mainwindow.MainWindow(driver, widgets, download_only = bool(options.download_only), select_only = bool(options.select_only)) handler.mainwindow = window if options.message: window.set_message(options.message) root = config.iface_cache.get_interface(r.interface_uri) window.browser.set_root(root) window.window.connect('destroy', lambda w: handler.abort_all_downloads()) if options.systray: window.use_systray_icon() @tasks.async def main(): force_refresh = bool(options.refresh) while True: window.refresh_button.set_sensitive(False) window.browser.set_update_icons(force_refresh) solved = driver.solve_with_downloads(force = force_refresh, update_local = True) if not window.systray_icon: window.show() yield solved try: window.refresh_button.set_sensitive(True) window.browser.highlight_problems() tasks.check(solved) except Exception as ex: window.report_exception(ex) if window.systray_icon and window.systray_icon.get_visible() and \ window.systray_icon.is_embedded(): if driver.solver.ready: window.systray_icon.set_tooltip(_('Downloading updates for %s') % root_iface.get_name()) window.run_button.set_active(True) else: # Should already be reporting an error, but # blink it again just in case window.systray_icon.set_blinking(True) refresh_clicked = dialog.ButtonClickedBlocker(window.refresh_button) yield refresh_clicked, _recalculate if refresh_clicked.happened: force_refresh = True tasks.wait_for_blocker(main())
lgpl-2.1
4,441,060,670,262,614,500
31.791444
134
0.708415
false
3.347162
true
false
false
otov4its/django-walletone
walletone/views.py
1
1337
# -*- coding: utf-8 -*- from __future__ import unicode_literals import logging from django.http.response import HttpResponse, HttpResponseBadRequest from django.views.decorators.csrf import csrf_exempt from .forms import WalletOneConfirmForm from . import signals logger = logging.getLogger(__name__) @csrf_exempt def payment_confirm(request): if request.method == 'POST': logger.info('Received a request from WalletOne') confirm_form = WalletOneConfirmForm(request.POST) if confirm_form.is_valid(): payment = confirm_form.save() logger.info('Payment was created') # Send signal with payment object as arguments signals.payment_received.send(sender=type(payment), payment=payment) logger.info('payment_received signal was sent') return HttpResponse('WMI_RESULT=OK') else: errors_message = '' for key, messages in confirm_form.errors.items(): errors_message += ' '.join(messages) errors_message = 'Received form not valid: ' + errors_message logger.warning(errors_message) return HttpResponse( 'WMI_RESULT=OK&WMI_DESCRIPTION=%s' % errors_message ) else: return HttpResponseBadRequest("Expected POST request")
mit
1,945,754,972,225,154,600
35.162162
80
0.646223
false
4.471572
false
false
false
amrishparmar/mal_cl_interface
nl_interface/search.py
1
4686
import html from enum import Enum import click import requests from bs4 import BeautifulSoup import agent import network import ui class StatusCode(Enum): """An Enum represented the type of result of database searches""" NO_RESULTS = 0 USER_CANCELLED = 1 def display_entry_details(entry): """Display all the details of a given entry :param entry: an anime or manga entry as a Beautiful Soup Tag object """ for detail in entry.children: # ignore newlines in children if detail != "\n": # replace in tag name the underscores with spaces and convert to title case detail_name = detail.name.replace("_", " ").title() # set the string to be the detail.string by default detail_string = detail.string # check that the string contains something if detail_string is not None: # unescape html entities and remove break tags detail_string = html.unescape(detail_string).replace("<br />", "") detail_string = detail_string.replace("[i]", "").replace("[/i]", "") click.echo("{}: {}".format(detail_name, detail_string)) def search(credentials, search_type, search_string, display_details=True): """Search for an anime or manga entry :param credentials: A tuple containing valid MAL account details in the format (username, password) :param search_type: A string denoting the media type to search for, should be either "anime" or "manga" :param search_string: A string, the anime or manga to search for :param display_details: A boolean, whether to print the details of the found entry or whether to just return it :return: A beautiful soup tag, or a network status code if there was an error or the user quit """ if search_type not in ["anime", "manga"]: raise ValueError("Invalid argument for {}, must be either {} or {}.".format(search_type, "anime", "manga")) url = "https://myanimelist.net/api/{}/search.xml?q={}".format(search_type, search_string.replace(" ", "+")) # send the async search request to the server r = ui.threaded_action(network.make_request, "Searching for \"{}\"".format(search_string), request=requests.get, url=url, auth=credentials, stream=True) # check if there was an error with the user's internet connection if r == network.StatusCode.CONNECTION_ERROR: agent.print_connection_error_msg() return r if r.status_code == 204: agent.print_msg("I'm sorry I could not find any results for \"{}\".".format(search_string)) return StatusCode.NO_RESULTS elif r.status_code == 200: # decode the raw content so beautiful soup can read it as xml not a string r.raw.decode_content = True soup = BeautifulSoup(r.raw, "xml") # get all entries matches = soup.find_all("entry") # store the length of all_matched list since needed multiple times num_results = len(matches) if num_results == 1: if display_details: display_entry_details(matches[0]) else: return matches[0] else: agent.print_msg("I found {} results. Did you mean:".format(num_results)) # iterate over the matches and print them out for i in range(num_results): # use a different layout for entries that don't have any synonyms title_format = "{}> {} ({})" if matches[i].synonyms.get_text() != "" else "{}> {}" click.echo(title_format.format(i + 1, matches[i].title.get_text(), matches[i].synonyms.get_text())) click.echo("{}> [None of these]".format(num_results + 1)) # get a valid choice from the user while True: option = click.prompt("Please choose an option", type=int) if 1 <= option <= num_results + 1: break else: click.echo("You must enter a value between {} and {}".format(1, num_results + 1)) click.echo() # check that the user didn't choose the none of these option before trying to display entry if option != num_results + 1: if display_details: display_entry_details(matches[option - 1]) else: return matches[option - 1] else: return StatusCode.USER_CANCELLED else: agent.print_msg("There was an error getting the entry on your list. Please try again.") return network.StatusCode.OTHER_ERROR
mit
740,526,283,656,575,100
39.396552
116
0.607981
false
4.26
false
false
false
vlegoff/tsunami
src/secondaires/peche/commandes/banc/editer.py
1
2649
# -*-coding:Utf-8 -* # Copyright (c) 2010-2017 LE GOFF Vincent # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT # OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Package contenant le paramètre 'éditer' de la commande 'banc'.""" from primaires.interpreteur.masque.parametre import Parametre class PrmEditer(Parametre): """Paramètre 'éditer de la commande 'banc'""" def __init__(self): """Constructeur du paramètre.""" Parametre.__init__(self, "éditer", "edit") self.schema = "<cle>" self.aide_courte = "ouvre l'éditeur de banc de poisson" self.aide_longue = \ "Cette commande permet d'accéder à l'éditeur " \ "du banc de poisson indiqué." def interpreter(self, personnage, dic_masques): """Méthode d'interprétation de commande""" cle = dic_masques["cle"].cle if cle not in importeur.peche.bancs: personnage << "|err|Ce banc n'existe pas.|ff|" return banc = importeur.peche.bancs[cle] editeur = importeur.interpreteur.construire_editeur( "schooledit", personnage, banc) personnage.contextes.ajouter(editeur) editeur.actualiser()
bsd-3-clause
-2,259,236,170,578,934,000
43.677966
79
0.709408
false
3.848175
false
false
false
reviewboard/rbtools
rbtools/clients/plastic.py
1
13440
"""A client for Plastic SCM.""" from __future__ import unicode_literals import logging import os import re from rbtools.clients import SCMClient, RepositoryInfo from rbtools.clients.errors import (InvalidRevisionSpecError, TooManyRevisionsError, SCMError) from rbtools.utils.checks import check_install from rbtools.utils.filesystem import make_tempfile from rbtools.utils.process import execute class PlasticClient(SCMClient): """A client for Plastic SCM. This is a wrapper around the cm executable that fetches repository information and generates compatible diffs. """ name = 'Plastic' server_tool_names = 'Plastic SCM' supports_changesets = True supports_patch_revert = True REVISION_CHANGESET_PREFIX = 'cs:' def __init__(self, **kwargs): """Initialize the client. Args: **kwargs (dict): Keyword arguments to pass through to the superclass. """ super(PlasticClient, self).__init__(**kwargs) def get_local_path(self): """Return the local path to the working tree. Returns: unicode: The filesystem path of the repository on the client system. """ if not check_install(['cm', 'version']): logging.debug('Unable to execute "cm version": skipping Plastic') return None # Get the workspace directory, so we can strip it from the diff output self.workspacedir = execute(['cm', 'gwp', '.', '--format={1}'], split_lines=False, ignore_errors=True).strip() logging.debug('Workspace is %s', self.workspacedir) # Get the repository that the current directory is from split = execute(['cm', 'ls', self.workspacedir, '--format={8}'], split_lines=True, ignore_errors=True) # remove blank lines split = [x for x in split if x] m = re.search(r'^rep:(.+)$', split[0], re.M) if not m: return None return m.group(1) def get_repository_info(self): """Return repository information for the current working tree. Returns: rbtools.clients.RepositoryInfo: The repository info structure. """ local_path = self.get_local_path() if local_path: return RepositoryInfo(path=local_path, local_path=local_path) return None def parse_revision_spec(self, revisions=[]): """Parse the given revision spec. Args: revisions (list of unicode, optional): A list of revisions as specified by the user. Items in the list do not necessarily represent a single revision, since the user can use SCM-native syntaxes such as ``r1..r2`` or ``r1:r2``. SCMTool-specific overrides of this method are expected to deal with such syntaxes. Raises: rbtools.clients.errors.InvalidRevisionSpecError: The given revisions could not be parsed. rbtools.clients.errors.TooManyRevisionsError: The specified revisions list contained too many revisions. Returns: dict: A dictionary with the following keys: ``base`` (:py:class:`NoneType`): Always None. ``tip`` (:py:class:`unicode`): A revision to use as the tip of the resulting diff. These will be used to generate the diffs to upload to Review Board (or print). The Plastic implementation requires that one and only one revision is passed in. The diff for review will include the changes in the given changeset or branch. """ n_revisions = len(revisions) if n_revisions == 0: raise InvalidRevisionSpecError( 'Either a changeset or a branch must be specified') elif n_revisions == 1: return { 'base': None, 'tip': revisions[0], } else: raise TooManyRevisionsError def diff(self, revisions, include_files=[], exclude_patterns=[], extra_args=[], **kwargs): """Perform a diff across all modified files in a Plastic workspace. Parent diffs are not supported (the second value in the tuple). Args: revisions (dict): A dictionary of revisions, as returned by :py:meth:`parse_revision_spec`. include_files (list of unicode, optional): A list of files to whitelist during the diff generation. exclude_patterns (list of unicode, optional): A list of shell-style glob patterns to blacklist during diff generation. extra_args (list, unused): Additional arguments to be passed to the diff generation. **kwargs (dict, unused): Unused keyword arguments. Returns: dict: A dictionary containing the following keys: ``diff`` (:py:class:`bytes`): The contents of the diff to upload. ``changenum`` (:py:class:`unicode`): The number of the changeset being posted (if ``revisions`` represents a single changeset). """ # TODO: use 'files' changenum = None tip = revisions['tip'] if tip.startswith(self.REVISION_CHANGESET_PREFIX): logging.debug('Doing a diff against changeset %s', tip) try: changenum = str(int( tip[len(self.REVISION_CHANGESET_PREFIX):])) except ValueError: pass else: logging.debug('Doing a diff against branch %s', tip) if not getattr(self.options, 'branch', None): self.options.branch = tip diff_entries = execute( ['cm', 'diff', tip, '--format={status} {path} rev:revid:{revid} ' 'rev:revid:{parentrevid} src:{srccmpath} ' 'dst:{dstcmpath}{newline}'], results_unicode=False, split_lines=True) diff = self._process_diffs(diff_entries) return { 'diff': diff, 'changenum': changenum, } def _process_diffs(self, diff_entries): """Process the given diff entries. Args: diff_entries (list): The list of diff entries. Returns: bytes: The processed diffs. """ diff_lines = [] empty_filename = make_tempfile() tmp_diff_from_filename = make_tempfile() tmp_diff_to_filename = make_tempfile() for f in diff_entries: f = f.strip() if not f: continue m = re.search(br'(?P<type>[ACMD]) (?P<file>.*) ' br'(?P<revspec>rev:revid:[-\d]+) ' br'(?P<parentrevspec>rev:revid:[-\d]+) ' br'src:(?P<srcpath>.*) ' br'dst:(?P<dstpath>.*)$', f) if not m: raise SCMError('Could not parse "cm log" response: %s' % f) changetype = m.group('type') filename = m.group('file') if changetype == b'M': # Handle moved files as a delete followed by an add. # Clunky, but at least it works oldfilename = m.group('srcpath') oldspec = m.group('revspec') newfilename = m.group('dstpath') newspec = m.group('revspec') self._write_file(oldfilename, oldspec, tmp_diff_from_filename) dl = self._diff_files(tmp_diff_from_filename, empty_filename, oldfilename, 'rev:revid:-1', oldspec, changetype) diff_lines += dl self._write_file(newfilename, newspec, tmp_diff_to_filename) dl = self._diff_files(empty_filename, tmp_diff_to_filename, newfilename, newspec, 'rev:revid:-1', changetype) diff_lines += dl else: newrevspec = m.group('revspec') parentrevspec = m.group('parentrevspec') logging.debug('Type %s File %s Old %s New %s', changetype, filename, parentrevspec, newrevspec) old_file = new_file = empty_filename if (changetype in [b'A'] or (changetype in [b'C'] and parentrevspec == b'rev:revid:-1')): # There's only one content to show self._write_file(filename, newrevspec, tmp_diff_to_filename) new_file = tmp_diff_to_filename elif changetype in [b'C']: self._write_file(filename, parentrevspec, tmp_diff_from_filename) old_file = tmp_diff_from_filename self._write_file(filename, newrevspec, tmp_diff_to_filename) new_file = tmp_diff_to_filename elif changetype in [b'D']: self._write_file(filename, parentrevspec, tmp_diff_from_filename) old_file = tmp_diff_from_filename else: raise SCMError('Unknown change type "%s" for %s' % (changetype, filename)) dl = self._diff_files(old_file, new_file, filename, newrevspec, parentrevspec, changetype) diff_lines += dl os.unlink(empty_filename) os.unlink(tmp_diff_from_filename) os.unlink(tmp_diff_to_filename) return b''.join(diff_lines) def _diff_files(self, old_file, new_file, filename, newrevspec, parentrevspec, changetype): """Do the work of producing a diff for Plastic. Args: old_file (bytes): The absolute path to the old file. new_file (bytes): The absolute path to the new file. filename (bytes): The file in the Plastic workspace. newrevspec (bytes): The revid spec of the new file. parentrevspec (bytes): The revid spec of the old file. changetype (bytes): The change type as a single character string. Returns: list of bytes: The computed diff. """ if filename.startswith(self.workspacedir): filename = filename[len(self.workspacedir):] # Diff returns "1" if differences were found. dl = execute(['diff', '-urN', old_file, new_file], extra_ignore_errors=(1, 2), results_unicode=False) # If the input file has ^M characters at end of line, lets ignore them. dl = dl.replace(b'\r\r\n', b'\r\n') dl = dl.splitlines(True) # Special handling for the output of the diff tool on binary files: # diff outputs "Files a and b differ" # and the code below expects the output to start with # "Binary files " if (len(dl) == 1 and dl[0].startswith(b'Files %s and %s differ' % (old_file.encode('utf-8'), new_file.encode('utf-8')))): dl = [b'Binary files %s and %s differ\n' % (old_file.encode('utf-8'), new_file.encode('utf-8'))] if dl == [] or dl[0].startswith(b'Binary files '): if dl == []: return [] dl.insert(0, b'==== %s (%s) ==%s==\n' % (filename, newrevspec, changetype)) dl.append('\n') else: dl[0] = '--- %s\t%s\n' % (filename, parentrevspec) dl[1] = '+++ %s\t%s\n' % (filename, newrevspec) # Not everybody has files that end in a newline. This ensures # that the resulting diff file isn't broken. if dl[-1][-1] != b'\n': dl.append(b'\n') return dl def _write_file(self, filename, filespec, tmpfile): """Retrieve a file from Plastic and write it to a temp file. Args: filename (bytes): The filename to fetch. filespec (bytes): The revision of the file to fetch. tmpfile (unicode): The name of the temporary file to write to. """ logging.debug('Writing "%s" (rev %s) to "%s"', filename.decode('utf-8'), filespec.decode('utf-8'), tmpfile) execute(['cm', 'cat', filespec, '--file=' + tmpfile])
mit
1,965,228,939,996,342,800
34.368421
79
0.510714
false
4.543611
false
false
false
Statoil/SegyIO
python/examples/write.py
1
2756
import sys import segyio import numpy as np def main(): if len( sys.argv ) < 2: sys.exit("Usage: write.py [file]") filename = sys.argv[1] # the mode parameter is passed directly to C's fopen # opening the file for writing requires r+, not rw because rw would # truncate (effectively destroy) the file, and r+ would preserve the size with segyio.open( filename, "r+" ) as src: # read trace 0, then double all values trace = src.trace[0] trace *= 2 # write trace 0 back to disk src.trace[0] = trace # read trace 1, but re-use the memory for speed trace = src.trace[1] # square all values. the trace is a plain numpy array trace = np.square(trace, trace) # write the trace back to disk, but at trace 2 src.trace[2] = trace # read every other trace, from 10 through 20 # then write them to every third step from 40 through 52 # i.e. 40, 43, 46... # slices yield a generator, so only one numpy array is created for tr, i in zip(src.trace[10:20:2], range(2,13,3)): src.trace[i] = tr # iterate over all traces in a file. this is a generator with a shared # buffer, so it's quite efficient tracesum = 0 for tr in src.trace: # accumulate the traces' 30th value tracesum += tr[30] print("Trace sum: {}".format(tracesum)) # write the iline at 2 to the iline at 3 sum3 = np.sum(src.iline[3]) src.iline[2] = src.iline[3] # flush to make sure our changes to the file are visible src.flush() sum2 = np.sum(src.iline[2]) print("Accumulates of inlines 2 and 3: {} -- {}".format(sum2, sum3)) # ilines too are plain numpy ndarrays, with trace-major addressing # i.e. iline[2,40] would be yield trace#2's 40th value iline = src.iline[2] # since ilines are numpy arrays they also support numpy operations iline = np.add(iline, src.iline[4]) # lines too have generator support, so we accumulate the 2nd trace's # 22nd value. linesum = 0 for line in src.iline: linesum += line[2,22] print("Inline sum: {}".format(linesum)) # xline access is identical to iline access linesum = 0 for line in src.xline: linesum += line[2,22] print("Crossline sum: {}".format(linesum)) # accessing a non-existing inline will raise a KeyError try: _ = src.iline[5000] sys.exit("Was able to access non-existing inline") except KeyError as e: print(str(e)) if __name__ == '__main__': main()
lgpl-3.0
8,186,660,944,869,912,000
31.809524
78
0.58164
false
3.770178
false
false
false
oblalex/gnuplot.py-py3k
PlotItems.py
1
26123
# $Id: PlotItems.py 299 2007-03-30 12:52:17Z mhagger $ # Copyright (C) 1998-2003 Michael Haggerty <[email protected]> # # This file is licensed under the GNU Lesser General Public License # (LGPL). See LICENSE.txt for details. """PlotItems.py -- Objects that can be plotted by Gnuplot. This module contains several types of PlotItems. PlotItems can be plotted by passing them to a Gnuplot.Gnuplot object. You can derive your own classes from the PlotItem hierarchy to customize their behavior. """ import os, string, tempfile, types from io import StringIO import numpy import gp, utils, Errors class _unset: """Used to represent unset keyword arguments.""" pass class PlotItem: """Plotitem represents an item that can be plotted by gnuplot. For the finest control over the output, you can create 'PlotItems' yourself with additional keyword options, or derive new classes from 'PlotItem'. The handling of options is complicated by the attempt to allow options and their setting mechanism to be inherited conveniently. Note first that there are some options that can only be set in the constructor then never modified, and others that can be set in the constructor and/or modified using the 'set_option()' member function. The former are always processed within '__init__'. The latter are always processed within 'set_option', which is called by the constructor. 'set_option' is driven by a class-wide dictionary called '_option_list', which is a mapping '{ <option> : <setter> }' from option name to the function object used to set or change the option. <setter> is a function object that takes two parameters: 'self' (the 'PlotItem' instance) and the new value requested for the option. If <setter> is 'None', then the option is not allowed to be changed after construction and an exception is raised. Any 'PlotItem' that needs to add options can add to this dictionary within its class definition. Follow one of the examples in this file. Alternatively it could override the 'set_option' member function if it needs to do wilder things. Members: '_basecommand' -- a string holding the elementary argument that must be passed to gnuplot's `plot' command for this item; e.g., 'sin(x)' or '"filename.dat"'. '_options' -- a dictionary of (<option>,<string>) tuples corresponding to the plot options that have been set for this instance of the PlotItem. <option> is the option as specified by the user; <string> is the string that needs to be set in the command line to set that option (or None if no string is needed). Example:: {'title' : ('Data', 'title "Data"'), 'with' : ('linespoints', 'with linespoints')} """ # For _option_list explanation, see docstring for PlotItem. _option_list = { 'axes' : lambda self, axes: self.set_string_option( 'axes', axes, None, 'axes %s'), 'with' : lambda self, with_: self.set_string_option( 'with', with_, None, 'with %s'), 'title' : lambda self, title: self.set_string_option( 'title', title, 'notitle', 'title "%s"'), } _option_list['with_'] = _option_list['with'] # order in which options need to be passed to gnuplot: _option_sequence = [ 'binary', 'index', 'every', 'thru', 'using', 'smooth', 'axes', 'title', 'with' ] def __init__(self, **keyw): """Construct a 'PlotItem'. Keyword options: 'with_=<string>' -- choose how item will be plotted, e.g., with_='points 3 3'. 'title=<string>' -- set the title to be associated with the item in the plot legend. 'title=None' -- choose 'notitle' option (omit item from legend). Note that omitting the title option is different than setting 'title=None'; the former chooses gnuplot's default whereas the latter chooses 'notitle'. """ self._options = {} self.set_option(**keyw) def get_option(self, name): """Return the setting of an option. May be overridden.""" try: return self._options[name][0] except: raise KeyError('option %s is not set!' % name) def set_option(self, **keyw): """Set or change a plot option for this PlotItem. See documentation for '__init__' for information about allowed options. This function can be overridden by derived classes to allow additional options, in which case those options will also be allowed by '__init__' for the derived class. However, it is easier to define a new '_option_list' variable for the derived class. """ for (option, value) in keyw.items(): try: setter = self._option_list[option] except KeyError: raise Errors.OptionError('%s=%s' % (option,value)) if setter is None: raise Errors.OptionError( 'Cannot modify %s option after construction!', option) else: setter(self, value) def set_string_option(self, option, value, default, fmt): """Set an option that takes a string value.""" if value is None: self._options[option] = (value, default) elif type(value) is str: self._options[option] = (value, fmt % value) else: Errors.OptionError('%s=%s' % (option, value,)) def clear_option(self, name): """Clear (unset) a plot option. No error if option was not set.""" try: del self._options[name] except KeyError: pass def get_base_command_string(self): raise NotImplementedError() def get_command_option_string(self): cmd = [] for opt in self._option_sequence: (val,str) = self._options.get(opt, (None,None)) if str is not None: cmd.append(str) return " ".join(cmd) def command(self): """Build the plot command to be sent to gnuplot. Build and return the plot command, with options, necessary to display this item. If anything else needs to be done once per plot, it can be done here too. """ return " ".join([ self.get_base_command_string(), self.get_command_option_string(), ]) def pipein(self, f): """Pipe necessary inline data to gnuplot. If the plot command requires data to be put on stdin (i.e., 'plot "-"'), this method should put that data there. Can be overridden in derived classes. """ pass class Func(PlotItem): """Represents a mathematical expression to plot. Func represents a mathematical expression that is to be computed by gnuplot itself, as if you would type for example:: gnuplot> plot sin(x) into gnuplot itself. The argument to the contructor is a string that should be a mathematical expression. Example:: g.plot(Func('sin(x)', with_='line 3')) As shorthand, a string passed to the plot method of a Gnuplot object is also treated as a Func:: g.plot('sin(x)') """ def __init__(self, function, **keyw): PlotItem.__init__(self, **keyw) self.function = function def get_base_command_string(self): return self.function class _FileItem(PlotItem): """A PlotItem representing a file that contains gnuplot data. This class is not meant for users but rather as a base class for other types of FileItem. """ _option_list = PlotItem._option_list.copy() _option_list.update({ 'binary' : lambda self, binary: self.set_option_binary(binary), 'index' : lambda self, value: self.set_option_colonsep('index', value), 'every' : lambda self, value: self.set_option_colonsep('every', value), 'using' : lambda self, value: self.set_option_colonsep('using', value), 'smooth' : lambda self, smooth: self.set_string_option( 'smooth', smooth, None, 'smooth %s' ), }) def __init__(self, filename, **keyw): """Represent a PlotItem that gnuplot treates as a file. This class holds the information that is needed to construct the plot command line, including options that are specific to file-like gnuplot input. <filename> is a string representing the filename to be passed to gnuplot within quotes. It may be the name of an existing file, '-' for inline data, or the name of a named pipe. Keyword arguments: 'using=<int>' -- plot that column against line number 'using=<tuple>' -- plot using a:b:c:d etc. Elements in the tuple that are None are output as the empty string. 'using=<string>' -- plot `using <string>' (allows gnuplot's arbitrary column arithmetic) 'every=<value>' -- plot 'every <value>'. <value> is formatted as for 'using' option. 'index=<value>' -- plot 'index <value>'. <value> is formatted as for 'using' option. 'binary=<boolean>' -- data in the file is in binary format (this option is only allowed for grid data for splot). 'smooth=<string>' -- smooth the data. Option should be 'unique', 'csplines', 'acsplines', 'bezier', or 'sbezier'. The keyword arguments recognized by 'PlotItem' can also be used here. Note that the 'using' option is interpreted by gnuplot, so columns must be numbered starting with 1. By default, gnuplot uses the name of the file plus any 'using' option as the dataset title. If you want another title, set it explicitly using the 'title' option. """ self.filename = filename PlotItem.__init__(self, **keyw) def get_base_command_string(self): return gp.double_quote_string(self.filename) def set_option_colonsep(self, name, value): if value is None: self.clear_option(name) elif type(value) in [str, int]: self._options[name] = (value, '%s %s' % (name, value,)) elif type(value) is tuple: subopts = [] for subopt in value: if subopt is None: subopts.append('') else: subopts.append(str(subopt)) self._options[name] = ( value, '%s %s' % (name, ":".join(subopts),), ) else: raise Errors.OptionError('%s=%s' % (name, value,)) def set_option_binary(self, binary): if binary: if not gp.GnuplotOpts.recognizes_binary_splot: raise Errors.OptionError( 'Gnuplot.py is currently configured to reject binary data') self._options['binary'] = (1, 'binary') else: self._options['binary'] = (0, None) class _NewFileItem(_FileItem): def __init__(self, content, filename=None, **keyw): binary = keyw.get('binary', 0) if binary: mode = 'wb' else: mode = 'w' if filename: # This is a permanent file self.temp = False f = open(filename, mode) else: self.temp = True if hasattr(tempfile, 'mkstemp'): # Use the new secure method of creating temporary files: (fd, filename,) = tempfile.mkstemp( suffix='.gnuplot', text=(not binary) ) f = os.fdopen(fd, mode) else: # for backwards compatibility to pre-2.3: filename = tempfile.mktemp() f = open(filename, mode) f.write(content) f.close() # If the user hasn't specified a title, set it to None so # that the name of the temporary file is not used: if self.temp and 'title' not in keyw: keyw['title'] = None _FileItem.__init__(self, filename, **keyw) def __del__(self): if self.temp: os.unlink(self.filename) class _InlineFileItem(_FileItem): """A _FileItem that actually indicates inline data. """ def __init__(self, content, **keyw): # If the user hasn't specified a title, set it to None so that # '-' is not used: if 'title' not in keyw: keyw['title'] = None if keyw.get('binary', 0): raise Errors.OptionError('binary inline data is not supported') _FileItem.__init__(self, '-', **keyw) if content[-1] == '\n': self.content = content else: self.content = content + '\n' def pipein(self, f): f.write(self.content + 'e\n') if gp.GnuplotOpts.support_fifo: import threading class _FIFOWriter(threading.Thread): """Create a FIFO (named pipe), write to it, then delete it. The writing takes place in a separate thread so that the main thread is not blocked. The idea is that once the writing is finished we know that gnuplot is done with the data that were in the file so we can delete the file. This technique removes the ambiguity about when the temporary files should be deleted. Since the tempfile module does not provide an easy, secure way to create a FIFO without race conditions, we instead create a temporary directory using mkdtemp() then create the FIFO within that directory. When the writer thread has written the full information to the FIFO, it deletes both the FIFO and the temporary directory that contained it. """ def __init__(self, content, mode='w'): self.content = content self.mode = mode if hasattr(tempfile, 'mkdtemp'): # Make the file within a temporary directory that is # created securely: self.dirname = tempfile.mkdtemp(suffix='.gnuplot') self.filename = os.path.join(self.dirname, 'fifo') else: # For backwards compatibility pre-2.3, just use # mktemp() to create filename: self.dirname = None self.filename = tempfile.mktemp() threading.Thread.__init__( self, name=('FIFO Writer for %s' % (self.filename,)), ) os.mkfifo(self.filename) self.start() def run(self): f = open(self.filename, self.mode) f.write(self.content) f.close() os.unlink(self.filename) if self.dirname is not None: os.rmdir(self.dirname) class _FIFOFileItem(_FileItem): """A _FileItem based on a FIFO (named pipe). This class depends on the availablity of os.mkfifo(), which only exists under Unix. """ def __init__(self, content, **keyw): # If the user hasn't specified a title, set it to None so that # the name of the temporary FIFO is not used: if 'title' not in keyw: keyw['title'] = None _FileItem.__init__(self, '', **keyw) self.content = content if keyw.get('binary', 0): self.mode = 'wb' else: self.mode = 'w' def get_base_command_string(self): """Create the gnuplot command for plotting this item. The basecommand is different each time because each FIFOWriter creates a new FIFO. """ # Create a new FIFO and a thread to write to it. Retrieve the # filename of the FIFO to be used in the basecommand. fifo = _FIFOWriter(self.content, self.mode) return gp.double_quote_string(fifo.filename) def File(filename, **keyw): """Construct a _FileItem object referring to an existing file. This is a convenience function that just returns a _FileItem that wraps the filename. <filename> is a string holding the filename of an existing file. The keyword arguments are the same as those of the _FileItem constructor. """ if type(filename) is not str: raise Errors.OptionError( 'Argument (%s) must be a filename' % (filename,) ) return _FileItem(filename, **keyw) def Data(*data, **keyw): """Create and return a _FileItem representing the data from *data. Create a '_FileItem' object (which is a type of 'PlotItem') out of one or more Float Python numpy arrays (or objects that can be converted to a float numpy array). If the routine is passed a single with multiple dimensions, then the last index ranges over the values comprising a single data point (e.g., [<x>, <y>, <sigma>]) and the rest of the indices select the data point. If passed a single array with 1 dimension, then each point is considered to have only one value (i.e., by default the values will be plotted against their indices). If the routine is passed more than one array, they must have identical shapes, and then each data point is composed of one point from each array. E.g., 'Data(x,x**2)' is a 'PlotItem' that represents x squared as a function of x. For the output format, see the comments for 'write_array()'. How the data are written to gnuplot depends on the 'inline' argument and preference settings for the platform in use. Keyword arguments: 'cols=<tuple>' -- write only the specified columns from each data point to the file. Since cols is used by python, the columns should be numbered in the python style (starting from 0), not the gnuplot style (starting from 1). 'inline=<bool>' -- transmit the data to gnuplot 'inline' rather than through a temporary file. The default is the value of gp.GnuplotOpts.prefer_inline_data. 'filename=<string>' -- save data to a permanent file. The keyword arguments recognized by '_FileItem' can also be used here. """ if len(data) == 1: # data was passed as a single structure data = utils.float_array(data[0]) # As a special case, if passed a single 1-D array, then it is # treated as one value per point (by default, plotted against # its index): if len(data.shape) == 1: data = data[:,numpy.newaxis] else: # data was passed column by column (for example, # Data(x,y)); pack it into one big array (this will test # that sizes are all the same): data = utils.float_array(data) dims = len(data.shape) # transpose so that the last index selects x vs. y: data = numpy.transpose(data, (dims-1,) + tuple(range(dims-1))) if 'cols' in keyw: cols = keyw['cols'] del keyw['cols'] if isinstance(cols, int): cols = (cols,) data = numpy.take(data, cols, -1) if 'filename' in keyw: filename = keyw['filename'] or None del keyw['filename'] else: filename = None if 'inline' in keyw: inline = keyw['inline'] del keyw['inline'] if inline and filename: raise Errors.OptionError( 'cannot pass data both inline and via a file' ) else: inline = (not filename) and gp.GnuplotOpts.prefer_inline_data # Output the content into a string: f = StringIO() utils.write_array(f, data) content = f.getvalue() if inline: return _InlineFileItem(content, **keyw) elif filename: return _NewFileItem(content, filename=filename, **keyw) elif gp.GnuplotOpts.prefer_fifo_data: return _FIFOFileItem(content, **keyw) else: return _NewFileItem(content, **keyw) def GridData( data, xvals=None, yvals=None, inline=_unset, filename=None, **keyw ): """Return a _FileItem representing a function of two variables. 'GridData' represents a function that has been tabulated on a rectangular grid. The data are written to a file; no copy is kept in memory. Arguments: 'data' -- the data to plot: a 2-d array with dimensions (numx,numy). 'xvals' -- a 1-d array with dimension 'numx' 'yvals' -- a 1-d array with dimension 'numy' 'binary=<bool>' -- send data to gnuplot in binary format? 'inline=<bool>' -- send data to gnuplot "inline"? 'filename=<string>' -- save data to a permanent file. Note the unusual argument order! The data are specified *before* the x and y values. (This inconsistency was probably a mistake; after all, the default xvals and yvals are not very useful.) 'data' must be a data array holding the values of a function f(x,y) tabulated on a grid of points, such that 'data[i,j] == f(xvals[i], yvals[j])'. If 'xvals' and/or 'yvals' are omitted, integers (starting with 0) are used for that coordinate. The data are written to a temporary file; no copy of the data is kept in memory. If 'binary=0' then the data are written to a datafile as 'x y f(x,y)' triplets (y changes most rapidly) that can be used by gnuplot's 'splot' command. Blank lines are included each time the value of x changes so that gnuplot knows to plot a surface through the data. If 'binary=1' then the data are written to a file in a binary format that 'splot' can understand. Binary format is faster and usually saves disk space but is not human-readable. If your version of gnuplot doesn't support binary format (it is a recently-added feature), this behavior can be disabled by setting the configuration variable 'gp.GnuplotOpts.recognizes_binary_splot=0' in the appropriate gp*.py file. Thus if you have three arrays in the above format and a Gnuplot instance called g, you can plot your data by typing 'g.splot(Gnuplot.GridData(data,xvals,yvals))'. """ # Try to interpret data as an array: data = utils.float_array(data) try: (numx, numy) = data.shape except ValueError: raise Errors.DataError('data array must be two-dimensional') if xvals is None: xvals = numpy.arange(numx) else: xvals = utils.float_array(xvals) if xvals.shape != (numx,): raise Errors.DataError( 'The size of xvals must be the same as the size of ' 'the first dimension of the data array') if yvals is None: yvals = numpy.arange(numy) else: yvals = utils.float_array(yvals) if yvals.shape != (numy,): raise Errors.DataError( 'The size of yvals must be the same as the size of ' 'the second dimension of the data array') # Binary defaults to true if recognizes_binary_plot is set; # otherwise it is forced to false. binary = keyw.get('binary', 1) and gp.GnuplotOpts.recognizes_binary_splot keyw['binary'] = binary if inline is _unset: inline = ( (not binary) and (not filename) and gp.GnuplotOpts.prefer_inline_data ) elif inline and filename: raise Errors.OptionError( 'cannot pass data both inline and via a file' ) # xvals, yvals, and data are now all filled with arrays of data. if binary: if inline: raise Errors.OptionError('binary inline data not supported') # write file in binary format # It seems that the gnuplot documentation for binary mode # disagrees with its actual behavior (as of v. 3.7). The # documentation has the roles of x and y exchanged. We ignore # the documentation and go with the code. mout = numpy.zeros((numy + 1, numx + 1), numpy.float32) mout[0,0] = numx mout[0,1:] = xvals.astype(numpy.float32) mout[1:,0] = yvals.astype(numpy.float32) try: # try copying without the additional copy implied by astype(): mout[1:,1:] = numpy.transpose(data) except: # if that didn't work then downcasting from double # must be necessary: mout[1:,1:] = numpy.transpose(data.astype(numpy.float32)) content = mout.tostring() if (not filename) and gp.GnuplotOpts.prefer_fifo_data: return _FIFOFileItem(content, **keyw) else: return _NewFileItem(content, filename=filename, **keyw) else: # output data to file as "x y f(x)" triplets. This # requires numy copies of each x value and numx copies of # each y value. First reformat the data: set = numpy.transpose( numpy.array( (numpy.transpose(numpy.resize(xvals, (numy, numx))), numpy.resize(yvals, (numx, numy)), data)), (1,2,0)) # Now output the data with the usual routine. This will # produce data properly formatted in blocks separated by blank # lines so that gnuplot can connect the points into a grid. f = StringIO() utils.write_array(f, set) content = f.getvalue() if inline: return _InlineFileItem(content, **keyw) elif filename: return _NewFileItem(content, filename=filename, **keyw) elif gp.GnuplotOpts.prefer_fifo_data: return _FIFOFileItem(content, **keyw) else: return _NewFileItem(content, **keyw)
lgpl-2.1
6,933,519,582,789,603,000
33.87717
79
0.598515
false
4.180349
false
false
false
wtpayne/hiai
a3_src/h70_internal/da/lwc/env.py
1
8714
# -*- coding: utf-8 -*- """ Local working copy runtime environment control. --- type: python_module validation_level: v00_minimum protection: k00_public copyright: "Copyright 2016 High Integrity Artificial Intelligence Systems" license: "Licensed under the Apache License, Version 2.0 (the License); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License." ... """ import os import platform import da.lwc.discover import da.register # ----------------------------------------------------------------------------- def api_path(dependency_id, iface_name = 'lib_python3', register = None, dirpath_lwc_root = None): """ Return the path to the specified api. """ return _iface_path( dependency_id = dependency_id, iface_type = 'api', iface_name = iface_name, register = register, dirpath_lwc_root = dirpath_lwc_root) # ----------------------------------------------------------------------------- def cli_path(dependency_id, application_name, register = None, dirpath_lwc_root = None): """ Return the path to the specified cli binary. """ return _iface_path( dependency_id = dependency_id, iface_type = 'cli', iface_name = application_name, register = register, dirpath_lwc_root = dirpath_lwc_root) # ----------------------------------------------------------------------------- def gui_path(dependency_id, application_name, register = None, dirpath_lwc_root = None): """ Return the path to the specified gui binary. """ return _iface_path( dependency_id = dependency_id, iface_type = 'gui', iface_name = application_name, register = register, dirpath_lwc_root = dirpath_lwc_root) # ----------------------------------------------------------------------------- def _iface_path(dependency_id, iface_type, iface_name, register = None, dirpath_lwc_root = None): """ Return the path for the specified interface type and dependency id. """ if register is None: register = dependencies_register( dirpath_lwc_root = dirpath_lwc_root) try: dependency_data = register[dependency_id] except KeyError: raise RuntimeError( 'Could not identify dependency: "{dep}".'.format( dep = dependency_id)) dirpath_env = da.lwc.discover.path('current_env', dirpath_lwc_root = dirpath_lwc_root) try: relpath_cli = dependency_data[iface_type][iface_name] except KeyError: raise RuntimeError( 'Dependency "{dep}" has no {type} with "{name}".'.format( dep = dependency_id, type = iface_type, name = iface_name)) return os.path.normpath(os.path.join(dirpath_env, dependency_data['dirname'], dependency_data['policy'], relpath_cli)) # ----------------------------------------------------------------------------- @da.memo.var def dependencies_register(dirpath_lwc_root = None): """ Return information about the location of dependencies. """ # Add some calculated file-paths to the dependency map. dirpath_curr_env = da.lwc.discover.path('current_env', dirpath_lwc_root) rootpath_env = da.lwc.discover.path('env', dirpath_lwc_root) rootpath_env_src = os.path.join(rootpath_env, 'src') register = da.register.load('dependencies') for (key, dep) in register.items(): dirname_dep = dep['dirname'] dirname_pol = dep['policy'] dirpath_src = os.path.join(rootpath_env_src, dirname_dep, dirname_pol) dirpath_dep = os.path.join(dirpath_curr_env, dirname_dep, dirname_pol) register[key]['name'] = key register[key]['dirpath_src'] = dirpath_src register[key]['dirpath_dep'] = dirpath_dep return register # ----------------------------------------------------------------------------- # TODO: Refactor to reduce number of branches. # (Rule disabled to facilitate tightening of the threshold) @da.memo.var def python_import_path(iface_name = None, # pylint: disable=R0912 dirpath_lwc_root = None): """ Return a list of Python import paths configured for the local working copy. Dependency information for the current local working copy is stored in the dependency map file. Different directories are used to store python slibraries for python2 and python3. """ if iface_name is None: iface_name = _iface_for_current_python_rt() dirpath_env = da.lwc.discover.path( 'current_env', dirpath_lwc_root = dirpath_lwc_root) register = dependencies_register( dirpath_lwc_root = dirpath_lwc_root) # python_path for the specified iface. # Replicates some of the logic in function # addpackage in site.py # python_path = [] for (_, dependency_data) in register.items(): try: relpath_iface = dependency_data['api'][iface_name] except KeyError: continue dirpath_package = os.path.normpath( os.path.join( dirpath_env, dependency_data['dirname'], dependency_data['policy'], relpath_iface)) if not os.path.isdir(dirpath_package): continue eggs = [os.path.join(dirpath_package, name) for name in os.listdir(dirpath_package) if name.endswith('.egg')] if eggs: python_path.extend(eggs) else: python_path.append(dirpath_package) # All top level directories from src are added to the python_path dirpath_src = da.lwc.discover.path( key = 'src', dirpath_lwc_root = dirpath_lwc_root) for (_, dir_list, _) in os.walk(dirpath_src): for name in dir_list: if name.startswith('.'): continue python_path.append(os.path.join(dirpath_src, name)) break # Add system python as well. # # TODO: !WARNING! !DANGEROUS! !REMOVE AS SOON AS POSSIBLE! # if iface_name == 'lib_python3': python_path.append('/usr/lib/python3.4') python_path.append('/usr/lib/python3.4/plat-x86_64-linux-gnu') python_path.append('/usr/lib/python3.4/lib-dynload') python_path.append('/usr/local/lib/python3.4/dist-packages') python_path.append('/usr/lib/python3/dist-packages') return python_path # ----------------------------------------------------------------------------- def _iface_for_current_python_rt(): """ Return a library interface id compatible with the current Python runtime. The interface id is used to determine which library version to import, so we can switch between python 2.x and python 3.x if required. """ (major, minor, _) = platform.python_version_tuple() try: return { '2': 'lib_python2', '3': 'lib_python3' }[major] except KeyError: raise RuntimeError( 'Python {major}.{minor} not supported.'.format(major = major, minor = minor))
apache-2.0
-3,498,644,829,901,860,400
33.995984
79
0.506197
false
4.672386
false
false
false
mahabs/nitro
nssrc/com/citrix/netscaler/nitro/resource/config/cr/crvserver_binding.py
1
4725
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class crvserver_binding(base_resource): """ Binding class showing the resources that can be bound to crvserver_binding. """ def __init__(self) : self._name = "" self.crvserver_filterpolicy_binding = [] self.crvserver_cmppolicy_binding = [] self.crvserver_lbvserver_binding = [] self.crvserver_policymap_binding = [] self.crvserver_cspolicy_binding = [] self.crvserver_crpolicy_binding = [] @property def name(self) : """Name of a cache redirection virtual server about which to display detailed information.<br/>Minimum length = 1. """ try : return self._name except Exception as e: raise e @name.setter def name(self, name) : """Name of a cache redirection virtual server about which to display detailed information.<br/>Minimum length = 1 """ try : self._name = name except Exception as e: raise e @property def crvserver_policymap_bindings(self) : """policymap that can be bound to crvserver. """ try : return self._crvserver_policymap_binding except Exception as e: raise e @property def crvserver_lbvserver_bindings(self) : """lbvserver that can be bound to crvserver. """ try : return self._crvserver_lbvserver_binding except Exception as e: raise e @property def crvserver_filterpolicy_bindings(self) : """filterpolicy that can be bound to crvserver. """ try : return self._crvserver_filterpolicy_binding except Exception as e: raise e @property def crvserver_cmppolicy_bindings(self) : """cmppolicy that can be bound to crvserver. """ try : return self._crvserver_cmppolicy_binding except Exception as e: raise e @property def crvserver_cspolicy_bindings(self) : """cspolicy that can be bound to crvserver. """ try : return self._crvserver_cspolicy_binding except Exception as e: raise e @property def crvserver_crpolicy_bindings(self) : """crpolicy that can be bound to crvserver. """ try : return self._crvserver_crpolicy_binding except Exception as e: raise e def _get_nitro_response(self, service, response) : """ converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(crvserver_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.crvserver_binding except Exception as e : raise e def _get_object_name(self) : """ Returns the value of object identifier argument """ try : if (self.name) : return str(self.name) return None except Exception as e : raise e @classmethod def get(self, service, name) : """ Use this API to fetch crvserver_binding resource. """ try : if type(name) is not list : obj = crvserver_binding() obj.name = name response = obj.get_resource(service) else : if name and len(name) > 0 : obj = [crvserver_binding() for _ in range(len(name))] for i in range(len(name)) : obj[i].name = name[i]; response[i] = obj[i].get_resource(service) return response except Exception as e: raise e class crvserver_binding_response(base_response) : def __init__(self, length=1) : self.crvserver_binding = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.crvserver_binding = [crvserver_binding() for _ in range(length)]
apache-2.0
-5,411,408,159,442,030,000
27.810976
119
0.702646
false
3.348689
false
false
false
jfouca/confucius
confucius/models/conference.py
1
7188
from django.db import models from django.db.models import Q from django.db.models.signals import pre_save, post_delete from django.dispatch import receiver from datetime import datetime from confucius.models import ConfuciusModel, User class Action(ConfuciusModel): name = models.CharField(max_length=155, verbose_name='Action') def __unicode__(self): return self.name class Alert(ConfuciusModel): title = models.CharField(max_length=100, default=None) content = models.TextField(default=None) conference = models.ForeignKey('Conference') trigger_date = models.DateField(verbose_name='trigger date', blank=True, null=True) reminder = models.ForeignKey('Reminder', blank=True, null=True) event = models.ForeignKey('Event', blank=True, null=True) action = models.ForeignKey('Action', blank=True, null=True) roles = models.ManyToManyField('Role', blank=True) class Meta(ConfuciusModel.Meta): unique_together = ('title', 'conference',) def __unicode__(self): return self.title def is_trigger(self): return self.action is None and self.reminder is None def is_reminder(self): return self.action is None and self.trigger_date is None def is_action(self): return self.reminder is None and self.trigger_date is None class Conference(ConfuciusModel): title = models.CharField(max_length=100, unique=True) is_open = models.BooleanField(default=False) has_finalize_paper_selections = models.BooleanField(default=False) start_date = models.DateField() submissions_start_date = models.DateField() submissions_end_date = models.DateField() reviews_start_date = models.DateField() reviews_end_date = models.DateField() url = models.URLField(blank=True) members = models.ManyToManyField(User, through='Membership') domains = models.ManyToManyField('Domain', related_name='conferences') access_key = models.CharField(max_length=8) maximum_score = models.IntegerField(default=10) minimum_reviews = models.IntegerField(default=2) enable_reviewer_confidence = models.BooleanField(default=True) def __unicode__(self): return self.title @models.permalink def get_absolute_url(self): return ('confucius.views.conference_access', (), {'conference_pk': self.pk, 'access_key': self.access_key}) def save(self, *args, **kwargs): from confucius.utils import random_string if self.pk is None: self.access_key = random_string(8) super(Conference, self).save(*args, **kwargs) def are_submissions_over(self): return datetime.now().date() > self.submissions_end_date def are_submissions_notstarted(self): return datetime.now().date() < self.submissions_start_date def are_reviews_notstarted(self): return datetime.now().date() < self.reviews_start_date def are_reviews_over(self): return self.has_finalize_paper_selections or datetime.now().date() > self.reviews_end_date def is_started(self): return datetime.now().date() > self.start_date class Domain(ConfuciusModel): name = models.CharField(max_length=50, unique=True) def __unicode__(self): return self.name class Event(ConfuciusModel): name = models.CharField(max_length=155, verbose_name='linked to') def __unicode__(self): return self.name class Membership(ConfuciusModel): user = models.ForeignKey(User, related_name='memberships') conference = models.ForeignKey(Conference) roles = models.ManyToManyField('Role') domains = models.ManyToManyField(Domain) last_accessed = models.BooleanField(default=False) class Meta(ConfuciusModel.Meta): unique_together = ('user', 'conference') def set_last_accessed(self): Membership.objects.filter(user=self.user).update(last_accessed=False) self.last_accessed = True self.save() def _has_role(self, code): try: self.roles.get(code=code) return True except: return False def has_chair_role(self): return self._has_role('C') def has_reviewer_role(self): return self._has_role('R') def has_submitter_role(self): return self._has_role('S') @receiver(pre_save, sender=Membership, dispatch_uid="Membership_identifier") def my_user_handler(sender, instance, **kwargs): conference = instance.conference user_pre_save = instance.user alerts = Alert.objects.filter((Q(action=1) | Q(action=2))) for alert in alerts: if alert.action.pk == 1 and instance.pk is None: try: Membership.objects.get(conference=conference, user=user_pre_save) except: my_send_mail(alert, conference) elif alert.action.pk == 2 and instance.pk is not None: try: Membership.objects.get(conference=alert.conference, user=user_pre_save) except: my_send_mail(alert, conference) def my_send_mail(alert, conference): from django.core.mail import send_mail for role in alert.roles.all(): memberships_list = Membership.objects.filter(roles=role, conference=conference).all() users_email = [unicode(membership.user.email) for membership in memberships_list] try: send_mail("[Confucius Alert] " + alert.title, alert.content, '[email protected]', users_email, fail_silently=False) except: print "Error occured during email sending process. Please check your SMTP settings" class MessageTemplate(ConfuciusModel): title = models.CharField(max_length=100, default=None) content = models.TextField(default=None) conference = models.ForeignKey(Conference, related_name="messages_templates") class Meta(ConfuciusModel.Meta): unique_together = ('title', 'conference') def __unicode__(self): return self.title class Reminder(ConfuciusModel): value = models.PositiveIntegerField() name = models.CharField(max_length=155, verbose_name='reminder') class Meta(ConfuciusModel.Meta): unique_together = ('value', 'name') def __unicode__(self): return self.name class Role(ConfuciusModel): code = models.CharField(max_length=1) name = models.CharField(max_length=9) def __unicode__(self): return self.name class Invitation(ConfuciusModel): user = models.ForeignKey(User) conference = models.ForeignKey(Conference) roles = models.ManyToManyField(Role) decision = models.CharField(max_length=1, choices=( ('A', 'Accepted'), ('R', 'Refused'), ('W', 'Waiting for response') ), default='W') key = models.CharField(max_length=64, unique=True) class Meta(ConfuciusModel.Meta): unique_together = ('user', 'conference') def _decision(self, code): self.decision = code self.save() def pending(self): return self.decision == 'W' def refuse(self): self._decision('R') def accept(self): self._decision('A')
bsd-3-clause
-5,912,912,759,637,472,000
31.378378
139
0.664719
false
3.821372
false
false
false
rajpushkar83/cloudmesh
cloudmesh/management/project.py
1
12236
from mongoengine import * from mongoengine.context_managers import switch_db from datetime import datetime import hashlib import uuid from user import User, Users # from comittee import Committee from pprint import pprint from cloudmeshobject import CloudmeshObject from cloudmesh_base.ConfigDict import ConfigDict from cloudmesh_base.locations import config_file from cloudmesh.config.cm_config import get_mongo_db, get_mongo_dbname_from_collection, DBConnFactory def IMPLEMENT(): print "IMPLEMENT ME" STATUS = ('pending', 'approved', 'completed', 'denied') CATEGORY = ('Database', 'FutureGrid', 'other') DISCIPLINE = ('other') # see https://ncsesdata.nsf.gov/nsf/srs/webcasp/data/gradstud.htm # put in discipline.txt and initialize from there through reading the file and codes # INSTITUTE_ROLE = ('gaduate student', 'undergraduate student', 'staff', 'faculty', 'visitor', 'other') CLUSTERS = ('india', 'bravo', 'echo', 'delta', 'other', 'None') SERVICES = ('eucalyptus', 'openstack', 'mpi', 'hadoop', 'mapreduce', 'docker', 'other', 'None') SOFTWARE = ('HPC', 'other') PROVISIONING = ('vm', 'baremetal', 'container', 'iaas', 'paas', 'other', 'None') GRANT_ORG = ('NSF', 'DOE', 'DoD', 'NIH', 'other', 'None') REQUIRED = False class Project(CloudmeshObject): # named connection (not 'default') dbname = get_mongo_dbname_from_collection("manage") if dbname: meta = {'db_alias': dbname} ''' The project object with its fields. The current fields include Attributes: title abstract intellectual_merit broader_impact use_of_fg scale_of_use categories keywords primary_discipline orientation contact url comment active projectid status lead managers members alumnis grant_orgnization grant_id grant_url results aggreement_use aggreement_slides aggreement_support aggreement_sotfware aggreement_documentation comments join_open join_notification resources_services resources_software resources_clusters resources_provision ''' # ------------------------------------------------------------------- # Project Information # ------------------------------------------------------------------- title = StringField(required=REQUIRED) # ------------------------------------------------------------------- # Project Vocabulary # ------------------------------------------------------------------- categories = ListField(StringField(choices=CATEGORY), required=REQUIRED) keywords = ListField(StringField(), required=REQUIRED) # ------------------------------------------------------------------- # Project Contact # ------------------------------------------------------------------- # lead_institutional_role = StringField(choices=INSTITUTE_ROLE, required=REQUIRED) lead = ReferenceField(User) managers = ListField(StringField()) members = ListField(ReferenceField(User)) alumnis = ListField(StringField()) contact = StringField(required=REQUIRED) # active_members = lead u managers u members - alumnis # if not active : active_members = None # ------------------------------------------------------------------- # Project Details # ------------------------------------------------------------------- orientation = StringField(required=REQUIRED) primary_discipline = StringField(choices=DISCIPLINE, required=REQUIRED) abstract = StringField(required=REQUIRED) intellectual_merit = StringField(required=REQUIRED) broader_impact = StringField(required=REQUIRED) url = URLField(required=REQUIRED) results = StringField() # ------------------------------------------------------------------- # Agreements # ------------------------------------------------------------------- agreement_use = BooleanField() agreement_slides = BooleanField() agreement_support = BooleanField() agreement_software = BooleanField() agreement_documentation = BooleanField() # ------------------------------------------------------------------- # Grant Information # ------------------------------------------------------------------- grant_organization = StringField(choices=GRANT_ORG) grant_id = StringField() grant_url = URLField() # ------------------------------------------------------------------- # Resources # ------------------------------------------------------------------- resources_services = ListField( StringField(choices=SERVICES), required=REQUIRED) resources_software = ListField( StringField(choices=SOFTWARE), required=REQUIRED) resources_clusters = ListField( StringField(choices=CLUSTERS), required=REQUIRED) resources_provision = ListField( StringField(choices=PROVISIONING), required=REQUIRED) comment = StringField() use_of_fg = StringField(required=REQUIRED) scale_of_use = StringField(required=REQUIRED) # ------------------------------------------------------------------- # Other # ------------------------------------------------------------------- comments = StringField() # ------------------------------------------------------------------- # Project Membership Management # ------------------------------------------------------------------- join_open = BooleanField() join_notification = BooleanField() # ------------------------------------------------------------------- # Location # ------------------------------------------------------------------- loc_name = StringField() loc_street = StringField() loc_additional = StringField() loc_state = StringField() loc_country = StringField() # example search in a list field # Project.objects(categories__contains='education') active = BooleanField(required=REQUIRED) projectid = UUIDField() status = StringField(choices=STATUS, required=REQUIRED) # maybe we do not need active as this may be covered in status # ------------------------------------------------------------------- # Project Comittee: contains all the information about the projects committee # ------------------------------------------------------------------- # comittee = ReferenceField(Committee) # BUG how can we add also arbitray info in case of other, mabe ommit # choices def to_json(self): """prints the project as a json object""" d = { "title": self.title, "abstract": self.abstract, "intellectual_merit": self.intellectual_merit, "broader_impact": self.broader_impact, "use_of_fg": self.use_of_fg, "scale_of_use": self.scale_of_use, "categories": self.categories, "keywords": self.keywords, "primary_discipline": self.primary_discipline, "orientation": self.orientation, "contact": self.contact, "url": self.url, "active": self.active, "status": self.status, "lead": self.lead, "members": self.members, "resources_services": self.resources_services, "resources_software": self.resources_software, "resources_clusters": self.resources_clusters, "resources_provision": self.resources_provision } return d def __str__(self): ''' printing the object as a string ''' d = self.to_json() return str(d) class Projects(object): ''' convenience opbject to manage multiple prpojects ''' def __init__(self): get_mongo_db("manage", DBConnFactory.TYPE_MONGOENGINE) self.projects = Project.objects() self.users = User.objects() def __str__(self): ''' not implemented ''' IMPLEMENT() def find(self): return Project.objects() def objects(self): ''' returns the projects ''' return Project.objects() def save(self, project): '''adds a project to the database but only after it has been verifie :param project: the project id :type project: uuid ''' project.save() def add_user(self, user_name, project, role): ''' Adds a member to the project. :param role: the role of the user :type role: String :param user_name: the username :type user_name: String :param project: the project id :type project: uuid ''' """adds members to a particular project""" users = User.objects(user_name=user_name) if users.count() == 1: if role == "member": project.members.append(user) elif role == "lead": project.lead.append(user) elif role == "lead": project.alumni.append(user) else: print "ERROR: The user `{0}` has not registered with FutureGrid".format(user_name) def find_users(self, project, role): '''returns all the members of a particular project :param role: the role of the user :type role: String :param project: the project id :type project: uuid ''' if role == "member": return project.members elif role == "lead": return project.leads elif role == "lead": return project.alumni def find_by_id(self, id): ''' finds projects by if :param id: the project id :type id: uuid ''' """Finds a project by the given id""" found = Project.objects(projectid=id) if found.count() > 0: return found[0].to_json() else: return None # User ID or project ID def find_by_category(self, category): ''' find the project by category :param category: the category :type category: String ''' """Finds and returns all project in that category""" found = Project.objects(categories=category) if found.count() > 0: return found[0].to_json() else: return None def find_by_keyword(self, keyword): ''' finds a projects matching a keyword :param keyword: a keyword :type keyword: String ''' """Finds and returns all projects with the entered keyword""" found = Project.objects(keyword=keyword) if found.count() > 0: return found[0].to_json() else: return None def add(self, project): ''' adds a project :param project: the username :type project: String ''' print "PPPPPP", project if not project.status: project.status = 'pending' if (project.projectid is None) or (project.projectid == ""): found = False proposedid = None # while not found: # proposedid = uuid.uuid4() # result = Project.objects(projectid=proposedid) # print "PPPPP", result # found = result.count() > 0 # print result.count() project.projectid = proposedid else: print "UUUUUU -{0}-".format(project.projectid) print "UUID", project.projectid project.save() def clear(self): """removes all projects from the database""" for project in Project.objects: project.delete()
apache-2.0
-4,249,167,181,425,727,000
28.990196
100
0.505721
false
4.886581
false
false
false
bkold/CarbonCopy
ribbit_app/tests.py
1
6847
from django.test import TestCase from django.test.client import Client from django.contrib.auth.models import User from django.conf.urls import url from ribbit_app.models import Ribbit from . import views from django.core.urlresolvers import reverse from django.test import LiveServerTestCase from django.contrib.auth.models import User from selenium import webdriver from selenium.webdriver.common.by import By from selenium.common.exceptions import NoSuchElementException import unittest, time, re from time import time class TestAccess(TestCase): def setUp(self): self.c = Client() def test_entries_access(self): #no account response = self.c.get('/') self.assertEqual(response.status_code,200) response = self.c.get('/login') self.assertEqual(response.status_code,302) #redirect to '/' response = self.c.get('/logout') self.assertEqual(response.status_code,302) #redirect to '/' response = self.c.get('/signup') self.assertEqual(response.status_code,302) #redirect to '/' response = self.c.get('/public') self.assertEqual(response.status_code,302) #redirect to '/' response = self.c.get('/submit') self.assertEqual(response.status_code,302) #redirect to '/' response = self.c.get('/users') self.assertEqual(response.status_code,301) #redirect to '/' response = self.c.get('/follow') self.assertEqual(response.status_code,302) #redirect to '/' class TestLogedInAccess(TestCase): def setUp(self): self.c = Client() self.user = User.objects.create_user(username="testMan", email="[email protected]", password="123") def test_entry_created(self): #####not loged in response = self.c.get(reverse('b')) self.assertEqual(response.status_code, 200) response = self.c.get(reverse('logn')) self.assertEqual(response.status_code, 302) #redirect to '/' response = self.c.get(reverse('logot')) self.assertEqual(response.status_code, 302) #redirect to '/' response = self.c.get(reverse('sign')) self.assertEqual(response.status_code, 302) #redirect to '/' response = self.c.get(reverse('pub')) self.assertEqual(response.status_code, 302) #redirect to '/' response = self.c.get(reverse('us')) self.assertEqual(response.status_code, 302) #redirect to '/' response = self.c.get(reverse('fol')) self.assertEqual(response.status_code, 302) #redirect to '/' #####login self.c.login(username='testMan', password='123') response = self.c.get(reverse('b')) self.assertEqual(response.status_code, 200) self.c.login(username='testMan', password='123') response = self.c.get(reverse('logn')) self.assertEqual(response.status_code, 302) #redirect to '/' self.c.login(username='testMan', password='123') response = self.c.get(reverse('logot')) self.assertEqual(response.status_code, 302) #redirect to '/' self.c.login(username='testMan', password='123') response = self.c.get(reverse('sign')) self.assertEqual(response.status_code, 302) #redirect to '/' self.c.login(username='testMan', password='123') response = self.c.get(reverse('pub')) self.assertEqual(response.status_code, 200) self.c.login(username='testMan', password='123') response = self.c.get(reverse('us')) self.assertEqual(response.status_code, 200) self.c.login(username='testMan', password='123') response = self.c.get(reverse('fol')) self.assertEqual(response.status_code, 302) #redirect to '/users' def test_entries_template_context(self): #####upload test Ribbit.objects.create(content='test post 2', pic='{{MEDIA_URL}}uploaded_files/test.jpg', brightness='20', user=self.user) response = self.c.get(reverse('sub')) class TestWebdriver(LiveServerTestCase): def setUp(self): self.driver = webdriver.Firefox() User.objects.create_superuser( username='admin', password='admin', email='[email protected]' ) def tearDown(self): # Call tearDown to close the web browser self.driver.quit() def test_auth_user(self): self.driver.get('http://127.0.0.1:8000/') self.driver.implicitly_wait(10) username = self.driver.find_element_by_xpath('//input[@placeholder="Username"]') username.send_keys("test_new_1") # This needs to change evertime password1 = self.driver.find_element_by_id("id_email") password1.send_keys("[email protected]") password1 = self.driver.find_element_by_id("id_password1") password1.send_keys("123") password2 = self.driver.find_element_by_id("id_password2") password2.send_keys("123") self.driver.find_element_by_xpath('//input[@value="Create Account"]').click() self.driver.implicitly_wait(10) #picture input brightness = self.driver.find_element_by_id("id_brightness") brightness.send_keys("10") content = self.driver.find_element_by_id("id_content") content.send_keys("test") pic = self.driver.find_element_by_id("id_pic") pic.send_keys("/home/brian/Desktop/CarbonCopy/CC/ribbit/pictures/uploaded_files/test.jpg") #This should be the addresses of your picture self.driver.find_element_by_xpath('//input[@value="Post!"]').click() self.driver.implicitly_wait(10) self.driver.find_element_by_link_text("Public Profiles").click() self.driver.implicitly_wait(10) self.driver.find_element_by_link_text("My Profile").click() self.driver.implicitly_wait(10) self.driver.find_element_by_link_text("Public Posts").click() self.driver.find_element_by_xpath('//input[@value="Log Out"]').click() def test_login_user(self): self.driver.get('http://127.0.0.1:8000/') username = self.driver.find_element_by_id("id_username") username.send_keys("test_new_1") #this needs to be a vaild user password = self.driver.find_element_by_id("id_password") password.send_keys("123") self.driver.implicitly_wait(10) self.driver.find_element_by_xpath('//input[@value="Log In"]').click() self.driver.implicitly_wait(10) self.driver.find_element_by_link_text("Home").click() #picture input brightness = self.driver.find_element_by_id("id_brightness") brightness.send_keys("10") content = self.driver.find_element_by_id("id_content") content.send_keys("test") pic = self.driver.find_element_by_id("id_pic") pic.send_keys("/home/brian/Desktop/CarbonCopy/CC/ribbit/pictures/uploaded_files/test.jpg") #This should be the addresses of your picture self.driver.find_element_by_xpath('//input[@value="Post!"]').click() self.driver.implicitly_wait(10) self.driver.find_element_by_link_text("Public Profiles").click() self.driver.implicitly_wait(10) self.driver.find_element_by_link_text("My Profile").click() self.driver.implicitly_wait(10) self.driver.find_element_by_link_text("Public Posts").click() self.driver.find_element_by_xpath('//input[@value="Log Out"]').click()
mit
3,674,270,257,799,125,000
32.89604
138
0.701913
false
3.116523
true
false
false
keans/dstools
dstools/utils.py
1
1422
import itertools def pairwise(iterable): """ s -> (s0,s1), (s1,s2), (s2, s3), ... """ a, b = itertools.tee(iterable) next(b, None) return list(zip(a, b)) def str2bool(v): """ converts a string to a boolean """ return v.lower() in ("yes", "true", "t", "1") def chunks(li, size): """ returns the given list in chunks of given size """ for i in range(0, len(li), size): yield li[i:i+size] def ngram(text, n=3): """ return ngrams of the given text """ for i in range(len(text) - n + 1): yield text[i:i+n] def sizeof_fmt(no_bytes, unit=None, kibi=True): """ returns a human friendly output of the given number of bytes in the given unit (or selecting an auto unit, if not provided) """ units = list("kMGTPEZY") assert(not unit or (unit in units)) if kibi: # kilo binary: 2**x base, suffix = (1024.0, "iB") else: # kilo: 10**x base, suffix = (1000.0, "B") if unit in units: # calculate size in the target unit no_bytes = no_bytes / (base ** (units.index(unit) + 1)) else: # find a useful representation for no, unit in enumerate(units): if -base < no_bytes < base: unit = units[no - 1] break no_bytes /= base return "{:3.2f} {}{}".format(no_bytes, unit, suffix)
mit
-6,534,625,633,302,136,000
21.571429
66
0.530942
false
3.276498
false
false
false
OpenTrons/opentrons_sdk
api/src/opentrons/deck_calibration/endpoints.py
1
24629
from uuid import uuid1 from typing import Dict, Tuple, Optional, NamedTuple import logging from enum import Enum try: from opentrons import instruments except ImportError: pass from opentrons.config import pipette_config, robot_configs, feature_flags from opentrons.types import Mount, Point from opentrons.hardware_control.types import CriticalPoint from opentrons.deck_calibration import jog, position, dots_set, z_pos from opentrons.util.linal import add_z, solve, identity_deck_transform mount_by_name = {'left': Mount.LEFT, 'right': Mount.RIGHT} log = logging.getLogger(__name__) class SessionWrapper: """Wrapper for single instance of SessionManager""" def __init__(self): self._session = None @property def session(self) -> Optional['SessionManager']: """Get access to the session manager""" return self._session @session.setter def session(self, s: Optional['SessionManager']): """Update the session manager""" self._session = s session_wrapper = SessionWrapper() class DeckCalibrationPoint(str, Enum): """ The name of a point relative to deck calibration. The number points are calibration crosses ("1" in slot 1, "2" in slot 3, "3" in slot 7); "safeZ" is a safe height above the deck, "attachTip" is a good place to go for the user to attach a tip. """ one = "1" two = "2" three = "3" safeZ = "safeZ" attachTip = "attachTip" def expected_points(): slot_1_lower_left,\ slot_3_lower_right,\ slot_7_upper_left = dots_set() return { DeckCalibrationPoint.one: slot_1_lower_left, DeckCalibrationPoint.two: slot_3_lower_right, DeckCalibrationPoint.three: slot_7_upper_left} def safe_points() -> Dict[str, Tuple[float, float, float]]: # Safe points are defined as 5mm toward the center of the deck in x, y and # 10mm above the deck. User is expect to jog to the critical point from the # corresponding safe point, to avoid collision depending on direction of # misalignment between the deck and the gantry. slot_1_lower_left, \ slot_3_lower_right, \ slot_7_upper_left = expected_points().values() slot_1_safe_point = ( slot_1_lower_left[0] + 5, slot_1_lower_left[1] + 5, 10) slot_3_safe_point = ( slot_3_lower_right[0] - 5, slot_3_lower_right[1] + 5, 10) slot_7_safe_point = ( slot_7_upper_left[0] + 5, slot_7_upper_left[1] - 5, 10) attach_tip_point = (200, 90, 130) return { DeckCalibrationPoint.one: slot_1_safe_point, DeckCalibrationPoint.two: slot_3_safe_point, DeckCalibrationPoint.three: slot_7_safe_point, DeckCalibrationPoint.safeZ: z_pos, DeckCalibrationPoint.attachTip: attach_tip_point } def _get_uuid() -> str: return str(uuid1()) class SessionManager: """ Creates a session manager to handle all commands required for factory calibration. Before issuing a movement command, the following must be done: 1. Create a session manager 2. Initialize a pipette 3. Select the current pipette """ def __init__(self, hardware): self.id = _get_uuid() self.pipettes = {} self.current_mount = None self.current_model = None self.tip_length = None self.points = {k: None for k in expected_points().keys()} self.z_value = None self.cp = None self.pipette_id = None self.adapter = hardware.sync self.current_transform = identity_deck_transform() self.backup_gantry_cal = self.adapter.config.gantry_calibration robot_configs.backup_configuration(self.adapter.config) # Start from fresh identity matrix every calibration session self.adapter.update_config(gantry_calibration=list( map(lambda i: list(i), self.current_transform))) def init_pipette(session): """ Finds pipettes attached to the robot currently and chooses the correct one to add to the session. :return: The pipette type and mount chosen for deck calibration """ pipette_info = set_current_mount(session) pipette = pipette_info['pipette'] res = {} if pipette: session.current_model = pipette_info['model'] if not feature_flags.use_protocol_api_v2(): mount = pipette.mount session.current_mount = mount else: mount = pipette.get('mount') session.current_mount = mount_by_name[mount] session.pipettes[mount] = pipette res = {'mount': mount, 'model': pipette_info['model']} log.info("Pipette info {}".format(session.pipettes)) return res def get_pipettes(sess: SessionManager): if not feature_flags.use_protocol_api_v2(): attached_pipettes = sess.adapter.get_attached_pipettes() left_pipette = None right_pipette = None left = attached_pipettes.get('left') right = attached_pipettes.get('right') if left['model'] in pipette_config.config_models: left_pipette = instruments.pipette_by_name( 'left', left['name']) if right['model'] in pipette_config.config_models: right_pipette = instruments.pipette_by_name( 'right', right['name']) else: attached_pipettes = sess.adapter.attached_instruments left_pipette = attached_pipettes.get(Mount.LEFT) right_pipette = attached_pipettes.get(Mount.RIGHT) return right_pipette, left_pipette def set_current_mount(session: SessionManager): """ Choose the pipette in which to execute commands. If there is no pipette, or it is uncommissioned, the pipette is not mounted. :attached_pipettes attached_pipettes: Information obtained from the current pipettes attached to the robot. This looks like the following: :dict with keys 'left' and 'right' and a model string for each mount, or 'uncommissioned' if no model string available :return: The selected pipette """ pipette = None right_channel = None left_channel = None right_pipette, left_pipette = get_pipettes(session) if right_pipette: if not feature_flags.use_protocol_api_v2(): right_channel = right_pipette.channels else: right_channel = right_pipette.get('channels') right_pipette['mount'] = 'right' if left_pipette: if not feature_flags.use_protocol_api_v2(): left_channel = left_pipette.channels else: left_channel = left_pipette.get('channels') left_pipette['mount'] = 'left' if right_channel == 1: pipette = right_pipette session.cp = CriticalPoint.NOZZLE elif left_channel == 1: pipette = left_pipette session.cp = CriticalPoint.NOZZLE elif right_pipette: pipette = right_pipette session.cp = CriticalPoint.FRONT_NOZZLE elif left_pipette: pipette = left_pipette session.cp = CriticalPoint.FRONT_NOZZLE model, pip_id = _get_model_name(pipette, session.adapter) session.pipette_id = pip_id return {'pipette': pipette, 'model': model} def _get_model_name(pipette, adapter): model = None pip_id = None if pipette: if not feature_flags.use_protocol_api_v2(): model = pipette.model pip_info = adapter.get_attached_pipettes()[pipette.mount] pip_id = pip_info['id'] else: model = pipette.get('model') mount = Mount.LEFT if pipette['mount'] == 'left' else Mount.RIGHT pip_info = adapter.attached_instruments[mount] pip_id = pip_info['pipette_id'] return model, pip_id # -------------- Route Fns ----------------------------------------------- # Note: endpoints should not call these functions directly, to ensure that # session protections are applied--should be called through the dispatch # endpoint # ------------------------------------------------------------------------ class CommandResult(NamedTuple): success: bool message: str async def attach_tip(data) -> CommandResult: """ Attach a tip to the current pipette :param data: a dict that with schema: { 'tipLength': a float representing how much the length of a pipette increases when a tip is added } """ if not session_wrapper.session: raise NoSessionInProgress() tip_length = data.get('tipLength') if not tip_length: message = 'Error: "tipLength" must be specified in request' status = False else: if not feature_flags.use_protocol_api_v2(): pipette = session_wrapper.session.pipettes[ session_wrapper.session.current_mount] if pipette.tip_attached: log.warning('attach tip called while tip already attached') pipette._remove_tip(pipette._tip_length) pipette._add_tip(tip_length) else: session_wrapper.session.adapter.add_tip( session_wrapper.session.current_mount, tip_length) if session_wrapper.session.cp == CriticalPoint.NOZZLE: session_wrapper.session.cp = CriticalPoint.TIP session_wrapper.session.tip_length = tip_length message = "Tip length set: {}".format(tip_length) status = True return CommandResult(success=status, message=message) async def detach_tip(data) -> CommandResult: """ Detach the tip from the current pipette :param data: unused """ if not session_wrapper.session: raise NoSessionInProgress() if not feature_flags.use_protocol_api_v2(): pipette = session_wrapper.session.pipettes[ session_wrapper.session.current_mount] if not pipette.tip_attached: log.warning('detach tip called with no tip') pipette._remove_tip(session_wrapper.session.tip_length) else: session_wrapper.session.adapter.remove_tip( session_wrapper.session.current_mount) if session_wrapper.session.cp == CriticalPoint.TIP: session_wrapper.session.cp = CriticalPoint.NOZZLE session_wrapper.session.tip_length = None return CommandResult(success=True, message="Tip removed") async def run_jog(data: dict) -> CommandResult: """ Allow the user to jog the selected pipette around the deck map :param data: a dict with schema: { 'axis': The current axis you wish to move 'direction': The direction you wish to move (+ or -) 'step': The increment you wish to move } :return: The position moved to based on axis, direction, step given by the user. """ if not session_wrapper.session: raise NoSessionInProgress() axis = data.get('axis') direction = data.get('direction') step = data.get('step') if axis not in {'x', 'y', 'z'}: message = '"axis" must be "x", "y", or "z"' status = False elif direction not in {-1, 1}: message = '"direction" must be -1 or 1' status = False elif step is None: message = '"step" must be specified' status = False else: position = jog( axis, direction, step, session_wrapper.session.adapter, session_wrapper.session.current_mount, session_wrapper.session.cp) message = 'Jogged to {}'.format(position) status = True return CommandResult(success=status, message=message) async def move(data) -> CommandResult: """ Allow the user to move the selected pipette to a specific point :param data: a dict with schema: { 'point': The name of the point to move to. Must be one of ["1", "2", "3", "safeZ", "attachTip"] } :return: The position you are moving to """ if not session_wrapper.session: raise NoSessionInProgress() point_name = data.get('point') point = safe_points().get(point_name) if point and len(point) == 3: if not feature_flags.use_protocol_api_v2(): pipette = session_wrapper.session.pipettes[ session_wrapper.session.current_mount] channels = pipette.channels # For multichannel pipettes in the V1 session, we use the tip closest # to the front of the robot rather than the back (this is the tip that # would go into well H1 of a plate when pipetting from the first row of # a 96 well plate, for instance). Since moves are issued for the A1 tip # we have to adjust the target point by 2 * Y_OFFSET_MULTI (where the # offset value is the distance from the axial center of the pipette to # the A1 tip). By sending the A1 tip to to the adjusted target, the H1 # tip should go to the desired point. Y_OFFSET_MULT must then be backed # out of xy positions saved in the `save_xy` handler # (not 2 * Y_OFFSET_MULTI, because the axial center of the pipette # will only be off by 1* Y_OFFSET_MULTI). if not channels == 1: x = point[0] y = point[1] + pipette_config.Y_OFFSET_MULTI * 2 z = point[2] point = (x, y, z) # hack: z=150mm is not a safe point for a gen2 pipette with a tip # attached, since their home location is z=+172mm and both 300ul # and 1000ul tips are more than 22mm long. This isn't an issue for # apiv2 because it can select the NOZZLE critical point. if pipette.tip_attached and point_name == 'attachTip': point = (point[0], point[1], point[2]-pipette._tip_length) pipette.move_to((session_wrapper.session.adapter.deck, point), strategy='arc') else: if not point_name == 'attachTip': intermediate_pos = position( session_wrapper.session.current_mount, session_wrapper.session.adapter, session_wrapper.session.cp) session_wrapper.session.adapter.move_to( session_wrapper.session.current_mount, Point( x=intermediate_pos[0], y=intermediate_pos[1], z=session_wrapper.session.tip_length), critical_point=session_wrapper.session.cp) session_wrapper.session.adapter.move_to( session_wrapper.session.current_mount, Point(x=point[0], y=point[1], z=session_wrapper.session.tip_length), critical_point=session_wrapper.session.cp) session_wrapper.session.adapter.move_to( session_wrapper.session.current_mount, Point(x=point[0], y=point[1], z=point[2]), critical_point=session_wrapper.session.cp) else: if session_wrapper.session.cp == CriticalPoint.TIP: session_wrapper.session.cp = CriticalPoint.NOZZLE session_wrapper.session.adapter.move_to( session_wrapper.session.current_mount, Point(x=point[0], y=point[1], z=point[2]), critical_point=session_wrapper.session.cp) message = 'Moved to {}'.format(point) status = True else: message = '"point" must be one of "1", "2", "3", "safeZ", "attachTip"' status = False return CommandResult(success=status, message=message) async def save_xy(data) -> CommandResult: """ Save the current XY values for the calibration data :param data: a dict with schema: { 'point': a string ID ['1', '2', or '3'] of the calibration point to save } """ if not session_wrapper.session: raise NoSessionInProgress() valid_points = list(session_wrapper.session.points.keys()) point = data.get('point') if point not in valid_points: message = 'point must be one of {}'.format(valid_points) status = False elif not session_wrapper.session.current_mount: message = "Mount must be set before calibrating" status = False else: if not feature_flags.use_protocol_api_v2(): mount = 'Z' if session_wrapper.session.current_mount == 'left'\ else 'A' x, y, _ = position(mount, session_wrapper.session.adapter) if session_wrapper.session.pipettes[ session_wrapper.session.current_mount].channels != 1: # See note in `move` y = y - pipette_config.Y_OFFSET_MULTI if session_wrapper.session.current_mount == 'left': dx, dy, _ = session_wrapper.session.adapter.config.mount_offset x = x + dx y = y + dy else: x, y, _ = position( session_wrapper.session.current_mount, session_wrapper.session.adapter, session_wrapper.session.cp) session_wrapper.session.points[point] = (x, y) message = "Saved point {} value: {}".format( point, session_wrapper.session.points[point]) status = True return CommandResult(success=status, message=message) async def save_z(data) -> CommandResult: """ Save the current Z height value for the calibration data :param data: unused """ if not session_wrapper.session: raise NoSessionInProgress() if not session_wrapper.session.tip_length: message = "Tip length must be set before calibrating" status = False else: if not feature_flags.use_protocol_api_v2(): mount = 'Z' if session_wrapper.session.current_mount == 'left' \ else 'A' actual_z = position( mount, session_wrapper.session.adapter)[-1] length_offset = pipette_config.load( session_wrapper.session.current_model, session_wrapper.session.pipette_id).model_offset[-1] session_wrapper.session.z_value =\ actual_z - session_wrapper.session.tip_length + length_offset else: session_wrapper.session.z_value = position( session_wrapper.session.current_mount, session_wrapper.session.adapter, session_wrapper.session.cp)[-1] session_wrapper.session.current_transform[2][3] =\ session_wrapper.session.z_value session_wrapper.session.adapter.update_config( gantry_calibration=list( list(i) for i in session_wrapper.session.current_transform ) ) message = "Saved z: {}".format(session_wrapper.session.z_value) status = True return CommandResult(success=status, message=message) async def save_transform(data) -> CommandResult: """ Calculate the transformation matrix that calibrates the gantry to the deck :param data: unused """ if not session_wrapper.session: raise NoSessionInProgress() if any([v is None for v in session_wrapper.session.points.values()]): message = "Not all points have been saved" status = False else: # expected values based on mechanical drawings of the robot expected_pos = expected_points() expected = [ expected_pos[p] for p in expected_pos.keys()] # measured data actual = [session_wrapper.session.points[p] for p in sorted(session_wrapper.session.points.keys())] # Generate a 2 dimensional transform matrix from the two matricies flat_matrix = solve(expected, actual).round(4) # replace relevant X, Y and angular components # [[cos_x, sin_y, const_zero, delta_x___], # [-sin_x, cos_y, const_zero, delta_y___], # [const_zero, const_zero, const_one_, delta_z___], # [const_zero, const_zero, const_zero, const_one_]] session_wrapper.session.current_transform = \ add_z(flat_matrix, session_wrapper.session.z_value) session_wrapper.session.adapter.update_config( gantry_calibration=list( list(i) for i in session_wrapper.session.current_transform) ) new_gantry_cal =\ session_wrapper.session.adapter.config.gantry_calibration session_wrapper.session.backup_gantry_cal = new_gantry_cal robot_configs.save_deck_calibration( session_wrapper.session.adapter.config) message = "Config file saved and backed up" status = True return CommandResult(success=status, message=message) async def release(data) -> CommandResult: """ Release a session :param data: unused """ if not session_wrapper.session: raise NoSessionInProgress() if not feature_flags.use_protocol_api_v2(): session_wrapper.session.adapter.remove_instrument('left') session_wrapper.session.adapter.remove_instrument('right') else: session_wrapper.session.adapter.cache_instruments() full_gantry_cal = session_wrapper.session.backup_gantry_cal session_wrapper.session.adapter.update_config( gantry_calibration=full_gantry_cal) session_wrapper.session = None return CommandResult(success=True, message="calibration session released") # ---------------------- End Route Fns ------------------------- # The description of the routes class CalibrationCommand(str, Enum): run_jog = "jog" move = "move" save_xy = "save xy" attach_tip = "attach tip" detach_tip = "detach tip" save_z = "save z" save_transform = "save transform" release = "release" # Router must be defined after all route functions router = { CalibrationCommand.run_jog: run_jog, CalibrationCommand.move: move, CalibrationCommand.save_xy: save_xy, CalibrationCommand.attach_tip: attach_tip, CalibrationCommand.detach_tip: detach_tip, CalibrationCommand.save_z: save_z, CalibrationCommand.save_transform: save_transform, CalibrationCommand.release: release } class SessionInProgress(Exception): pass class NoSessionInProgress(Exception): pass class SessionForbidden(Exception): pass class CreateSessionResult(NamedTuple): token: str pipette: Dict async def create_session(force: bool, hardware) -> CreateSessionResult: """ Begins the session manager for factory calibration, if a session is not already in progress, or if the "force" key is specified in the request. To force, use the following body: :param force: force creation of a session :param hardware: hardware instance :return: The current session ID token or an error message """ if session_wrapper.session and not force: raise SessionInProgress( 'Error, session in progress. Use "force" key in request ' 'body to override') if force and session_wrapper.session: await release({}) session_wrapper.session = SessionManager(hardware) res = init_pipette(session_wrapper.session) if not res: session_wrapper.session = None raise SessionForbidden('Error, pipette not recognized') return CreateSessionResult(token=session_wrapper.session.id, pipette=res) async def dispatch(token: str, command: str, command_data) -> CommandResult: """ Routes commands to subhandlers based on the command field in the body. :param token: The session token. Must match the current session :param command: The calibration command :param command_data: The data to pass to command router """ if not session_wrapper.session: raise NoSessionInProgress("Session must be started before " "issuing commands") log.info("Dispatching token=%s, command=%s, command_data=%s", token, command, command_data) if token != session_wrapper.session.id: raise SessionForbidden(f"Invalid token: {token}") try: command = CalibrationCommand(command) res = await router[command](data=command_data) except (ValueError, KeyError): raise SessionForbidden( f"Command \"{command}\" is unknown and cannot be executed") return res
apache-2.0
6,045,012,831,655,699,000
34.335725
79
0.618336
false
3.977552
true
false
false
benitesf/Skin-Lesion-Analysis-Towards-Melanoma-Detection
main.py
1
9476
# Import methods of features extraction from features_extraction.feature_extraction import FeatureExtraction # Import methods of learning from learning.learning import neural_network # Import methods of classification from classification.classification import classify, confusion_matrix, total_error, local_error # from skimage import io from PIL import Image # Import util methods from sklearn.model_selection import train_test_split import util.dirhandler as dh import config as cfg import numpy as np import time import sys """ Get train and test set ---------------------- """ all_melanoma = sorted(dh.get_file_name_dir(cfg.melanoma_path, cfg.melanoma_extension)) all_ground = sorted(dh.get_file_name_dir(cfg.ground_path, cfg.ground_extension)) melanoma_train, melanoma_test, ground_train, ground_test = train_test_split(all_melanoma, all_ground, test_size=0.25, random_state=25) """ ---------------------- """ """ Feature Extraction ------------------ """ feature = FeatureExtraction() start_t = time.time() X, y = feature.second_method(melanoma_train, ground_train) feature_t = (time.time() - start_t)/60 # minutes """ ------------------ """ """ Training Neural Network ----------------------- """ # Training the neural network with 83.3 % of the array features X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.16666) classifier = neural_network() start_t = time.time() classifier.fit(X_train, y_train) classifier_t = (time.time() - start_t)/60 # minutes score_test = classifier.score(X_test, y_test) score_train = classifier.score(X_train, y_train) """ ----------------------- """ """ Classify test images --------------- """ melanoma_list = melanoma_test ground_list = ground_test seg, tim, dim = classify(melanoma_list, ground_list, feature, classifier, block=True) """ --------------- """ """ Accuracy --------- """ confmat = confusion_matrix(seg, ground_list) local_err = local_error(confmat) sensitivity, specificity, accuracy = total_error(local_err) """ --------- """ """ Measure of times of execution ----------------------------- """ tim = np.array(tim) # sec dim = np.array(dim) dim = dim[0:,0] * dim[0:,1] t_by_pix = (tim*(10**6)) / dim # microsec / pix tim /= 60 # min total_time = (tim/60).sum() # total hours mean_time = tim.mean() # mean minutes std_time = tim.std() # std minutes """ ----------------------------- """ """ Saving values ------------- """ files = [f.split('.')[0]+'_classified.jpg' for f in melanoma_list] path_save = 'resultados/red3/preprocesado/test/' for s, f in zip(seg, files): img = Image.fromarray(s) img.convert('L').save(path_save + f) with open(path_save + 'Measures.txt', 'w') as output: output.write('---------------\n') output.write('---- RED 3 ----\n') output.write('---------------\n\n') output.write('Data Base: ' + cfg.melanoma_path + '\n') output.write('Number of images: ' + str(cfg.nImage) + '\n') output.write('Number of fields: ' + str(cfg.nCells) + '\n') output.write('Number of images to train: ' + str(len(melanoma_train)) + '\n') output.write('Number of image to test: ' + str(len(melanoma_test)) + '\n') output.write('Size of Train from Train_Images: ' + str(X_train.shape) + '\n') output.write('Size of Test from Train_Images: ' + str(X_test.shape) + '\n') output.write('Type of segmentation: block\n\n') output.write(classifier.__str__()+'\n\n') output.write('Final function value: ' + str(classifier.loss_)+'\n\n') output.write('-------------------------------------------------------------------------\n') output.write('Time of execution: \n') output.write('-------------------------------------------------------------------------\n\n') output.write('Feature Extraction: \n') output.write('\tTime: ' + str(feature_t) + ' min\n') output.write('Neural Network Training:\n') output.write('\tTime: ' + str(classifier_t) + ' min\n') output.write('Segmentation by image:\n') output.write('\tTotal: ' + str(total_time) + ' hrs\n') output.write('\tMean: ' + str(mean_time) + '+-' + str(std_time) + ' min\n') output.write('Segmentation by pixel:\n') output.write('\tMean: ' + str(t_by_pix.mean()) + '+-' + str(t_by_pix.std()) + ' mircosec/pix\n') output.write('-------------------------------------------------------------------------\n\n') output.write('Score:\n') output.write('\tX_train: ' + str(score_train) + '\n') output.write('\tX_test: ' + str(score_test) + '\n') output.write('-------------------------------------------------------------------------\n\n') output.write('Total error\n') output.write('\tSensitivity: ' + str(sensitivity[0]) + '+-' + str(sensitivity[1]) + '\n') output.write('\tSpecificity: ' + str(specificity[0]) + '+-' + str(specificity[1]) + '\n') output.write('\tAccuracy: ' + str(accuracy[0]) + '+-' + str(accuracy[1]) + '\n') output.write('-------------------------------------------------------------------------\n\n') output.write('Numero total de pixeles: ' + str(dim.sum()) + '\n') output.write('-------------------------------------------------------------------------\n\n') output.write('Local error: \n') output.write('\t[TP\tFP\tFN\tTN]|[sensitivity, specificity, accuracy]\t\n') for a, g, l, t, d in zip(confmat, ground_list, local_err, tim, dim): output.write(str(a) + '\t' + g + '\t' + str(l) + '\t' + str(t) + ' min' + '\t' + str(d) + ' pix\n') """ ------------- """ """ Classify train images --------------------- """ melanoma_list = melanoma_train ground_list = ground_train seg, tim, dim = classify(melanoma_list, ground_list, feature, classifier, block=True) """ --------------------- """ """ Accuracy --------- """ confmat = confusion_matrix(seg, ground_list) local_err = local_error(confmat) sensitivity, specificity, accuracy = total_error(local_err) """ --------- """ """ Measure of times of execution ----------------------------- """ tim = np.array(tim) # sec dim = np.array(dim) dim = dim[0:,0] * dim[0:,1] t_by_pix = (tim*(10**6)) / dim # microsec / pix tim /= 60 # min total_time = (tim/60).sum() # total hours mean_time = tim.mean() # mean minutes std_time = tim.std() # std minutes """ ----------------------------- """ """ Saving values ------------- """ files = [f.split('.')[0]+'_classified.jpg' for f in melanoma_list] path_save = 'resultados/red3/preprocesado/train/' for s, f in zip(seg, files): img = Image.fromarray(s) img.convert('L').save(path_save + f) with open(path_save + 'Measures.txt', 'w') as output: output.write('---------------\n') output.write('---- RED 3 ----\n') output.write('---------------\n\n') output.write('Data Base: ' + cfg.melanoma_path + '\n') output.write('Number of images: ' + str(cfg.nImage) + '\n') output.write('Number of fields: ' + str(cfg.nCells) + '\n') output.write('Number of images to train: ' + str(len(melanoma_train)) + '\n') output.write('Number of image to test: ' + str(len(melanoma_test)) + '\n') output.write('Size of Train from Train_Images: ' + str(X_train.shape) + '\n') output.write('Size of Test from Train_Images: ' + str(X_test.shape) + '\n') output.write('Type of segmentation: block\n\n') output.write(classifier.__str__()+'\n\n') output.write('Final function value: ' + str(classifier.loss_)+'\n\n') output.write('-------------------------------------------------------------------------\n') output.write('Time of execution: \n') output.write('-------------------------------------------------------------------------\n\n') output.write('Feature Extraction: \n') output.write('\tTime: ' + str(feature_t) + ' min\n') output.write('Neural Network Training:\n') output.write('\tTime: ' + str(classifier_t) + ' min\n') output.write('Segmentation by image:\n') output.write('\tTotal: ' + str(total_time) + ' hrs\n') output.write('\tMean: ' + str(mean_time) + '+-' + str(std_time) + ' min\n') output.write('Segmentation by pixel:\n') output.write('\tMean: ' + str(t_by_pix.mean()) + '+-' + str(t_by_pix.std()) + ' mircosec/pix\n') output.write('-------------------------------------------------------------------------\n\n') output.write('Score:\n') output.write('\tX_train: ' + str(score_train) + '\n') output.write('\tX_test: ' + str(score_test) + '\n') output.write('-------------------------------------------------------------------------\n\n') output.write('Total error\n') output.write('\tSensitivity: ' + str(sensitivity[0]) + '+-' + str(sensitivity[1]) + '\n') output.write('\tSpecificity: ' + str(specificity[0]) + '+-' + str(specificity[1]) + '\n') output.write('\tAccuracy: ' + str(accuracy[0]) + '+-' + str(accuracy[1]) + '\n') output.write('-------------------------------------------------------------------------\n\n') output.write('Numero total de pixeles: ' + str(dim.sum()) + '\n') output.write('-------------------------------------------------------------------------\n\n') output.write('Local error: \n') output.write('\t[TP\tFP\tFN\tTN]|[sensitivity, specificity, accuracy]\t\n') for a, g, l, t, d in zip(confmat, ground_list, local_err, tim, dim): output.write(str(a) + '\t' + g + '\t' + str(l) + '\t' + str(t) + ' min' + '\t' + str(d) + ' pix\n') """ ------------- """
mit
-516,867,449,965,981,630
32.132867
117
0.529232
false
3.194875
true
false
false
snakeleon/YouCompleteMe-x64
third_party/ycmd/third_party/watchdog_deps/watchdog/src/watchdog/events.py
2
16163
# coding: utf-8 # # Copyright 2011 Yesudeep Mangalapilly <[email protected]> # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.events :synopsis: File system events and event handlers. :author: [email protected] (Yesudeep Mangalapilly) :author: [email protected] (Mickaël Schoentgen) Event Classes ------------- .. autoclass:: FileSystemEvent :members: :show-inheritance: :inherited-members: .. autoclass:: FileSystemMovedEvent :members: :show-inheritance: .. autoclass:: FileMovedEvent :members: :show-inheritance: .. autoclass:: DirMovedEvent :members: :show-inheritance: .. autoclass:: FileModifiedEvent :members: :show-inheritance: .. autoclass:: DirModifiedEvent :members: :show-inheritance: .. autoclass:: FileCreatedEvent :members: :show-inheritance: .. autoclass:: FileClosedEvent :members: :show-inheritance: .. autoclass:: DirCreatedEvent :members: :show-inheritance: .. autoclass:: FileDeletedEvent :members: :show-inheritance: .. autoclass:: DirDeletedEvent :members: :show-inheritance: Event Handler Classes --------------------- .. autoclass:: FileSystemEventHandler :members: :show-inheritance: .. autoclass:: PatternMatchingEventHandler :members: :show-inheritance: .. autoclass:: RegexMatchingEventHandler :members: :show-inheritance: .. autoclass:: LoggingEventHandler :members: :show-inheritance: """ import os.path import logging import re from watchdog.utils.patterns import match_any_paths EVENT_TYPE_MOVED = 'moved' EVENT_TYPE_DELETED = 'deleted' EVENT_TYPE_CREATED = 'created' EVENT_TYPE_MODIFIED = 'modified' EVENT_TYPE_CLOSED = 'closed' class FileSystemEvent: """ Immutable type that represents a file system event that is triggered when a change occurs on the monitored file system. All FileSystemEvent objects are required to be immutable and hence can be used as keys in dictionaries or be added to sets. """ event_type = None """The type of the event as a string.""" is_directory = False """True if event was emitted for a directory; False otherwise.""" is_synthetic = False """ True if event was synthesized; False otherwise. These are events that weren't actually broadcast by the OS, but are presumed to have happened based on other, actual events. """ def __init__(self, src_path): self._src_path = src_path @property def src_path(self): """Source path of the file system object that triggered this event.""" return self._src_path def __str__(self): return self.__repr__() def __repr__(self): return ("<%(class_name)s: event_type=%(event_type)s, " "src_path=%(src_path)r, " "is_directory=%(is_directory)s>" ) % (dict( class_name=self.__class__.__name__, event_type=self.event_type, src_path=self.src_path, is_directory=self.is_directory)) # Used for comparison of events. @property def key(self): return (self.event_type, self.src_path, self.is_directory) def __eq__(self, event): return self.key == event.key def __ne__(self, event): return self.key != event.key def __hash__(self): return hash(self.key) class FileSystemMovedEvent(FileSystemEvent): """ File system event representing any kind of file system movement. """ event_type = EVENT_TYPE_MOVED def __init__(self, src_path, dest_path): super().__init__(src_path) self._dest_path = dest_path @property def dest_path(self): """The destination path of the move event.""" return self._dest_path # Used for hashing this as an immutable object. @property def key(self): return (self.event_type, self.src_path, self.dest_path, self.is_directory) def __repr__(self): return ("<%(class_name)s: src_path=%(src_path)r, " "dest_path=%(dest_path)r, " "is_directory=%(is_directory)s>" ) % (dict(class_name=self.__class__.__name__, src_path=self.src_path, dest_path=self.dest_path, is_directory=self.is_directory)) # File events. class FileDeletedEvent(FileSystemEvent): """File system event representing file deletion on the file system.""" event_type = EVENT_TYPE_DELETED class FileModifiedEvent(FileSystemEvent): """File system event representing file modification on the file system.""" event_type = EVENT_TYPE_MODIFIED class FileCreatedEvent(FileSystemEvent): """File system event representing file creation on the file system.""" event_type = EVENT_TYPE_CREATED class FileMovedEvent(FileSystemMovedEvent): """File system event representing file movement on the file system.""" class FileClosedEvent(FileSystemEvent): """File system event representing file close on the file system.""" event_type = EVENT_TYPE_CLOSED # Directory events. class DirDeletedEvent(FileSystemEvent): """File system event representing directory deletion on the file system.""" event_type = EVENT_TYPE_DELETED is_directory = True class DirModifiedEvent(FileSystemEvent): """ File system event representing directory modification on the file system. """ event_type = EVENT_TYPE_MODIFIED is_directory = True class DirCreatedEvent(FileSystemEvent): """File system event representing directory creation on the file system.""" event_type = EVENT_TYPE_CREATED is_directory = True class DirMovedEvent(FileSystemMovedEvent): """File system event representing directory movement on the file system.""" is_directory = True class FileSystemEventHandler: """ Base file system event handler that you can override methods from. """ def dispatch(self, event): """Dispatches events to the appropriate methods. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ self.on_any_event(event) { EVENT_TYPE_CREATED: self.on_created, EVENT_TYPE_DELETED: self.on_deleted, EVENT_TYPE_MODIFIED: self.on_modified, EVENT_TYPE_MOVED: self.on_moved, EVENT_TYPE_CLOSED: self.on_closed, }[event.event_type](event) def on_any_event(self, event): """Catch-all event handler. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ def on_moved(self, event): """Called when a file or a directory is moved or renamed. :param event: Event representing file/directory movement. :type event: :class:`DirMovedEvent` or :class:`FileMovedEvent` """ def on_created(self, event): """Called when a file or directory is created. :param event: Event representing file/directory creation. :type event: :class:`DirCreatedEvent` or :class:`FileCreatedEvent` """ def on_deleted(self, event): """Called when a file or directory is deleted. :param event: Event representing file/directory deletion. :type event: :class:`DirDeletedEvent` or :class:`FileDeletedEvent` """ def on_modified(self, event): """Called when a file or directory is modified. :param event: Event representing file/directory modification. :type event: :class:`DirModifiedEvent` or :class:`FileModifiedEvent` """ def on_closed(self, event): """Called when a file opened for writing is closed. :param event: Event representing file closing. :type event: :class:`FileClosedEvent` """ class PatternMatchingEventHandler(FileSystemEventHandler): """ Matches given patterns with file paths associated with occurring events. """ def __init__(self, patterns=None, ignore_patterns=None, ignore_directories=False, case_sensitive=False): super().__init__() self._patterns = patterns self._ignore_patterns = ignore_patterns self._ignore_directories = ignore_directories self._case_sensitive = case_sensitive @property def patterns(self): """ (Read-only) Patterns to allow matching event paths. """ return self._patterns @property def ignore_patterns(self): """ (Read-only) Patterns to ignore matching event paths. """ return self._ignore_patterns @property def ignore_directories(self): """ (Read-only) ``True`` if directories should be ignored; ``False`` otherwise. """ return self._ignore_directories @property def case_sensitive(self): """ (Read-only) ``True`` if path names should be matched sensitive to case; ``False`` otherwise. """ return self._case_sensitive def dispatch(self, event): """Dispatches events to the appropriate methods. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ if self.ignore_directories and event.is_directory: return paths = [] if hasattr(event, 'dest_path'): paths.append(os.fsdecode(event.dest_path)) if event.src_path: paths.append(os.fsdecode(event.src_path)) if match_any_paths(paths, included_patterns=self.patterns, excluded_patterns=self.ignore_patterns, case_sensitive=self.case_sensitive): super().dispatch(event) class RegexMatchingEventHandler(FileSystemEventHandler): """ Matches given regexes with file paths associated with occurring events. """ def __init__(self, regexes=None, ignore_regexes=None, ignore_directories=False, case_sensitive=False): super().__init__() if regexes is None: regexes = [r".*"] if ignore_regexes is None: ignore_regexes = [] if case_sensitive: self._regexes = [re.compile(r) for r in regexes] self._ignore_regexes = [re.compile(r) for r in ignore_regexes] else: self._regexes = [re.compile(r, re.I) for r in regexes] self._ignore_regexes = [re.compile(r, re.I) for r in ignore_regexes] self._ignore_directories = ignore_directories self._case_sensitive = case_sensitive @property def regexes(self): """ (Read-only) Regexes to allow matching event paths. """ return self._regexes @property def ignore_regexes(self): """ (Read-only) Regexes to ignore matching event paths. """ return self._ignore_regexes @property def ignore_directories(self): """ (Read-only) ``True`` if directories should be ignored; ``False`` otherwise. """ return self._ignore_directories @property def case_sensitive(self): """ (Read-only) ``True`` if path names should be matched sensitive to case; ``False`` otherwise. """ return self._case_sensitive def dispatch(self, event): """Dispatches events to the appropriate methods. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ if self.ignore_directories and event.is_directory: return paths = [] if hasattr(event, 'dest_path'): paths.append(os.fsdecode(event.dest_path)) if event.src_path: paths.append(os.fsdecode(event.src_path)) if any(r.match(p) for r in self.ignore_regexes for p in paths): return if any(r.match(p) for r in self.regexes for p in paths): super().dispatch(event) class LoggingEventHandler(FileSystemEventHandler): """Logs all the events captured.""" def __init__(self, logger=None): super().__init__() self.logger = logger or logging.root def on_moved(self, event): super().on_moved(event) what = 'directory' if event.is_directory else 'file' self.logger.info("Moved %s: from %s to %s", what, event.src_path, event.dest_path) def on_created(self, event): super().on_created(event) what = 'directory' if event.is_directory else 'file' self.logger.info("Created %s: %s", what, event.src_path) def on_deleted(self, event): super().on_deleted(event) what = 'directory' if event.is_directory else 'file' self.logger.info("Deleted %s: %s", what, event.src_path) def on_modified(self, event): super().on_modified(event) what = 'directory' if event.is_directory else 'file' self.logger.info("Modified %s: %s", what, event.src_path) def generate_sub_moved_events(src_dir_path, dest_dir_path): """Generates an event list of :class:`DirMovedEvent` and :class:`FileMovedEvent` objects for all the files and directories within the given moved directory that were moved along with the directory. :param src_dir_path: The source path of the moved directory. :param dest_dir_path: The destination path of the moved directory. :returns: An iterable of file system events of type :class:`DirMovedEvent` and :class:`FileMovedEvent`. """ for root, directories, filenames in os.walk(dest_dir_path): for directory in directories: full_path = os.path.join(root, directory) renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None event = DirMovedEvent(renamed_path, full_path) event.is_synthetic = True yield event for filename in filenames: full_path = os.path.join(root, filename) renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None event = FileMovedEvent(renamed_path, full_path) event.is_synthetic = True yield event def generate_sub_created_events(src_dir_path): """Generates an event list of :class:`DirCreatedEvent` and :class:`FileCreatedEvent` objects for all the files and directories within the given moved directory that were moved along with the directory. :param src_dir_path: The source path of the created directory. :returns: An iterable of file system events of type :class:`DirCreatedEvent` and :class:`FileCreatedEvent`. """ for root, directories, filenames in os.walk(src_dir_path): for directory in directories: event = DirCreatedEvent(os.path.join(root, directory)) event.is_synthetic = True yield event for filename in filenames: event = FileCreatedEvent(os.path.join(root, filename)) event.is_synthetic = True yield event
gpl-3.0
-7,516,226,166,804,324,000
27.55477
99
0.619602
false
4.19139
false
false
false
igorgai/django-custom-user
custom_user/forms.py
1
3880
"""EmailUser forms.""" import django from django import forms from django.contrib.auth import get_user_model from django.contrib.auth.forms import ReadOnlyPasswordHashField from django.utils.translation import ugettext_lazy as _ class EmailUserCreationForm(forms.ModelForm): """A form for creating new users. Includes all the required fields, plus a repeated password. """ error_messages = { 'duplicate_email': _("A user with that email already exists."), 'password_mismatch': _("The two password fields didn't match."), } password1 = forms.CharField( label=_("Password"), widget=forms.PasswordInput) password2 = forms.CharField( label=_("Password confirmation"), widget=forms.PasswordInput, help_text=_("Enter the same password as above, for verification.")) class Meta: model = get_user_model() fields = ('email',) def clean_email(self): """Clean form email. :return str email: cleaned email :raise forms.ValidationError: Email is duplicated """ # Since EmailUser.email is unique, this check is redundant, # but it sets a nicer error message than the ORM. See #13147. email = self.cleaned_data["email"] try: get_user_model()._default_manager.get(email=email) except get_user_model().DoesNotExist: return email raise forms.ValidationError( self.error_messages['duplicate_email'], code='duplicate_email', ) def clean_password2(self): """Check that the two password entries match. :return str password2: cleaned password2 :raise forms.ValidationError: password2 != password1 """ password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1 and password2 and password1 != password2: raise forms.ValidationError( self.error_messages['password_mismatch'], code='password_mismatch', ) return password2 def save(self, commit=True): """Save user. Save the provided password in hashed format. :return custom_user.models.EmailUser: user """ user = super(EmailUserCreationForm, self).save(commit=False) user.set_password(self.cleaned_data["password1"]) if commit: user.save() return user # Different password reset link in Django 1.9 if django.VERSION[:2] < (1, 9): password_reset_link = "password" else: password_reset_link = "../password" class EmailUserChangeForm(forms.ModelForm): """A form for updating users. Includes all the fields on the user, but replaces the password field with admin's password hash display field. """ password = ReadOnlyPasswordHashField( label=_("Password"), help_text=_( "Raw passwords are not stored, so there is no way to see this " "user's password, but you can change the password using " "<a href=\"{0}/\">this form</a>.".format(password_reset_link) ), ) class Meta: model = get_user_model() exclude = () def __init__(self, *args, **kwargs): """Init the form.""" super(EmailUserChangeForm, self).__init__(*args, **kwargs) f = self.fields.get('user_permissions') if f is not None: f.queryset = f.queryset.select_related('content_type') def clean_password(self): """Clean password. Regardless of what the user provides, return the initial value. This is done here, rather than on the field, because the field does not have access to the initial value. :return str password: """ return self.initial["password"]
bsd-3-clause
-1,206,752,125,892,193,000
28.846154
75
0.617268
false
4.439359
false
false
false
quater/calico-containers
calicoctl/calico_ctl/status.py
1
9262
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Usage: calicoctl status [--runtime=<RUNTIME>] Description: Print current status information regarding calico-node container and the BIRD routing daemon. Options: --runtime=<RUNTIME> Specify the runtime used to run the calico/node container, either "docker" or "rkt". [default: docker] """ import re import sys import subprocess32 from prettytable import PrettyTable from pycalico.datastore_errors import DataStoreError from requests import ConnectionError from subprocess32 import Popen, PIPE from connectors import docker_client, client from utils import hostname, RKT_CONTAINER_RE, enforce_root def status(arguments): """ Main dispatcher for status commands. Calls the corresponding helper function. :param arguments: A dictionary of arguments already processed through this file's docstring with docopt :return: None """ # Check runtime. runtime = arguments.get("--runtime") if not runtime in ["docker", "rkt"]: print "Invalid runtime specified: '%s'" % runtime sys.exit(1) # Start by locating the calico-node container and querying the package # summary file. if runtime == "rkt": enforce_root() check_container_status_rkt() else: check_container_status_docker() # Now query the host BGP details. If the AS number is not specified on the # host then it must be inheriting the default. try: bgp_ipv4, bgp_ipv6 = client.get_host_bgp_ips(hostname) bgp_as = client.get_host_as(hostname) if bgp_as is None: bgp_as = client.get_default_node_as() bgp_as += " (inherited)" except DataStoreError: print "Error connecting to etcd. Ensure ETCD_ENDPOINTS or ETCD_AUTHORITY is set properly." bgp_ipv4 = bgp_ipv6 = "unknown" bgp_as = "unknown" # TODO: Add additional information to the BIRD section: # TODO: - Include AS numbers of peers # TODO: - Include host name of peers when the peer is a calico-node # TODO: - Include details of peers configured multiple times print "\nIPv4 BGP status" if bgp_ipv4: print "IP: %s AS Number: %s" % (bgp_ipv4, bgp_as) pprint_bird_protocols(4) else: print "No IPv4 address configured.\n" print "IPv6 BGP status" if bgp_ipv6: print "IP: %s AS Number: %s" % (bgp_ipv6, bgp_as) pprint_bird_protocols(6) else: print "No IPv6 address configured.\n" def check_container_status_docker(): """ Checks and prints the calico/node container status when running in Docker. """ try: calico_node_info = filter(lambda container: "/calico-node" in container["Names"], docker_client.containers()) if len(calico_node_info) == 0: print "calico-node container not running" sys.exit(1) else: print "calico-node container is running. Status: %s" % \ calico_node_info[0]["Status"] libraries_cmd = docker_client.exec_create("calico-node", ["sh", "-c", "cat libraries.txt"]) libraries_out = docker_client.exec_start(libraries_cmd) result = re.search(r"^calico\s*\((.*)\)\s*$", libraries_out, re.MULTILINE) if result is not None: print "Running felix version %s" % result.group(1) except ConnectionError: print "Docker is not running" sys.exit(1) def check_container_status_rkt(): """ Checks and prints the calico/node container status when running in rkt. """ list_cmd = ["sudo", "rkt", "list"] p = Popen(list_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() containers = RKT_CONTAINER_RE.findall(stdout) if p.returncode: print "Unable to list rkt containers: '%s'" % stderr.strip() sys.exit(1) if len(containers) == 0: print "calico-node container not running" sys.exit(1) else: # Get statuses for all calico/node containers, and determine # if any are running. statuses = [c[2] for c in containers] running = "running" in statuses # If one is running, status is "running". Else, use the status of # the first container. status = "running" if running else statuses[0] # Print status. If it at least one is running, this will display # "running" status. print "calico-node container status: %s" % status def pprint_bird_protocols(version): """ Pretty print the output from the BIRD "show protocols". This parses the existing output and lays it out in pretty printed table. :param version: The IP version (4 or 6). :return: None. """ # This needs to be run as root to access the bird data in /var/run/calico enforce_root() # Based on the IP version, run the appropriate BIRD command, and select # the appropriate separator char for an IP address. if getattr(sys, 'frozen', False): # We're running under pyinstaller birdcl = sys._MEIPASS + "/birdcl" else: birdcl = "birdcl" try: if version == 4: results = subprocess32.check_output( "echo show protocols | %s -s /var/run/calico/bird.ctl" % birdcl, shell=True) ip_sep = "." else: results = subprocess32.check_output( "echo show protocols | %s -s /var/run/calico/bird6.ctl" % birdcl, shell=True) ip_sep = ":" except subprocess32.CalledProcessError: print "Couldn't connect to bird." return # Parse the output from BIRD to extract the values in the protocol status # table. We'll further parse the name since that includes details about # the type of peer and the peer IP address. x = PrettyTable(["Peer address", "Peer type", "State", "Since", "Info"]) lines = results.split("\n") found_table = False for line in lines: # When BIRD displays its protocol table, it prints the bird> prompt and # then shifts the cursor to print back over the prompt. However, this # means that we get rogue prompts when parsing the output. For this # processing just remove the prompt if it is present. if line.startswith("bird>"): line = line[5:] # Skip blank lines. line = line.strip() if not line: continue # Split the line into columns based on whitespace separators. We split # a maximum of 5 times because the 6th "info" column may contain a # string that itself includes whitespace that should be maintained. columns = re.split("\s+", line.strip(), 5) # Loop until we find the table heading. if columns == ["name", "proto", "table", "state", "since", "info"]: found_table = True continue elif not found_table: continue # We expect either 5 or 6 columns depending on whether there was a # value in the info column. Anything else is not handled, so revert # to displaying the raw BIRD output. if not (5 <= len(columns) <= 6): found_table = False break # Parse the name, we name our BGP peers as "Mesh", "Node" or "Global" # followed by the IP address. Extract the info so we can pretty # print it. combined = columns[0] if combined.startswith("Mesh_"): name = combined[5:].replace("_", ip_sep) ptype = "node-to-node mesh" elif combined.startswith("Node_"): name = combined[5:].replace("_", ip_sep) ptype = "node specific" elif combined.startswith("Global_"): name = combined[7:].replace("_", ip_sep) ptype = "global" else: # This is not a BGP Peer, so do not include in the output. continue x.add_row([name, ptype, columns[3], columns[4], columns[5] if len(columns) == 6 else ""]) # If we parsed the table then pretty print the table, otherwise just output # the BIRD output directly. The first line of the BIRD output provides an # overall BIRD status. if found_table: print str(x) + "\n" else: print results + "\n"
apache-2.0
-3,644,788,646,197,634,000
35.464567
99
0.603865
false
4.109139
false
false
false
niwinz/Green-Mine
src/greenmine/wiki/views.py
1
4981
# -*- coding: utf-8 -*- from __future__ import absolute_import from django.core.urlresolvers import reverse from django.template.defaultfilters import slugify from django.shortcuts import get_object_or_404 from ..core.utils.slug import slugify_uniquely from ..core.generic import GenericView from ..core.decorators import login_required from ..scrum.models import Project from .models import WikiPage, WikiPageHistory from .forms import WikiPageEditForm class WikiPageView(GenericView): menu = ['wiki'] template_path = 'wiki-page.html' @login_required def get(self, request, pslug, wslug): project = get_object_or_404(Project, slug=pslug) self.check_role(request.user, project, [ ('project', 'view'), ('wiki', 'view'), ]) try: wikipage = project.wiki_pages.get(slug=slugify(wslug)) except WikiPage.DoesNotExist: return self.render_redirect(reverse('wiki-page-edit', args=[project.slug, slugify(wslug)])) context = { 'project': project, 'wikipage': wikipage, } return self.render_to_response(self.template_path, context) class WikiPageEditView(GenericView): menu = ['wiki'] template_path = 'wiki-page-edit.html' @login_required def get(self, request, pslug, wslug): project = get_object_or_404(Project, slug=pslug) self.check_role(request.user, project, [ ('project', 'view'), ('wiki', ('view', 'create', 'edit')), ]) try: wikipage = project.wiki_pages.get(slug=slugify(wslug)) except WikiPage.DoesNotExist: wikipage = None form = WikiPageEditForm(instance=wikipage) context = { 'form': form, 'project': project, } return self.render_to_response(self.template_path, context) @login_required def post(self, request, pslug, wslug): project = get_object_or_404(Project, slug=pslug) self.check_role(request.user, project, [ ('project', 'view'), ('wiki', ('view', 'create', 'edit')), ]) try: wikipage = project.wiki_pages.get(slug=slugify(wslug)) except WikiPage.DoesNotExist: wikipage = None form = WikiPageEditForm(request.POST, instance=wikipage) if not form.is_valid(): return self.render_json_errors(form.errors) wikipage_new = form.save(commit=False) if wikipage is not None: old_wikipage = WikiPage.objects.get(pk=wikipage.pk) history_entry = WikiPageHistory( wikipage = old_wikipage, content = old_wikipage.content, owner = old_wikipage.owner, created_date = old_wikipage.created_date, ) history_entry.save() if not wikipage_new.slug: wikipage_new.slug = slugify_uniquely(wslug, wikipage_new.__class__) if not wikipage_new.project_id: wikipage_new.project = project wikipage_new.owner = request.user wikipage_new.save() return self.render_json({'redirect_to': wikipage_new.get_view_url()}) class WikiPageHistoryView(GenericView): menu = ['wiki'] template_path = 'wiki-page-history-view.html' @login_required def get(self, request, pslug, wslug, hpk): project = get_object_or_404(Project, slug=pslug) self.check_role(request.user, project, [ ('project', 'view'), ('wiki', 'view'), ]) wikipage = get_object_or_404(project.wiki_pages, slug=wslug) history_entry = get_object_or_404(wikipage.history_entries, pk=hpk) context = { 'project': project, 'wikipage': wikipage, 'history_entry': history_entry, } return self.render_to_response(self.template_path, context) class WikipageDeleteView(GenericView): template_path = 'wiki-page-delete.html' def get_context(self): project = get_object_or_404(Project, slug=self.kwargs['pslug']) self.check_role(self.request.user, project, [ ('project', 'view'), ('wiki', ('view', 'delete')), ]) wikipage = get_object_or_404(project.wiki_pages, slug=self.kwargs['wslug']) context = { 'project': project, 'wikipage': wikipage, } return context @login_required def get(self, request, **kwargs): context = self.get_context() return self.render_to_response(self.template_path, context) @login_required def post(self, request, **kwargs): context = self.get_context() context['wikipage'].history_entries.all().delete() context['wikipage'].delete() return self.render_redirect(reverse('wiki-page', args = [context['project'].slug, 'home']))
bsd-3-clause
2,264,128,126,756,378,400
28.64881
83
0.592853
false
3.79939
false
false
false
akesterson/dpath-python
tests/test_util_set.py
1
1710
import dpath.util def test_set_existing_separator(): dict = { "a": { "b": 0, }, } dpath.util.set(dict, ';a;b', 1, separator=";") assert(dict['a']['b'] == 1) dict['a']['b'] = 0 dpath.util.set(dict, ['a', 'b'], 1, separator=";") assert(dict['a']['b'] == 1) def test_set_existing_dict(): dict = { "a": { "b": 0, }, } dpath.util.set(dict, '/a/b', 1) assert(dict['a']['b'] == 1) dict['a']['b'] = 0 dpath.util.set(dict, ['a', 'b'], 1) assert(dict['a']['b'] == 1) def test_set_existing_list(): dict = { "a": [ 0, ], } dpath.util.set(dict, '/a/0', 1) assert(dict['a'][0] == 1) dict['a'][0] = 0 dpath.util.set(dict, ['a', '0'], 1) assert(dict['a'][0] == 1) def test_set_filter(): def afilter(x): if int(x) == 31: return True return False dict = { "a": { "b": 0, "c": 1, "d": 31, } } dpath.util.set(dict, '/a/*', 31337, afilter=afilter) assert (dict['a']['b'] == 0) assert (dict['a']['c'] == 1) assert (dict['a']['d'] == 31337) dict = { "a": { "b": 0, "c": 1, "d": 31, } } dpath.util.set(dict, ['a', '*'], 31337, afilter=afilter) assert (dict['a']['b'] == 0) assert (dict['a']['c'] == 1) assert (dict['a']['d'] == 31337) def test_set_existing_path_with_separator(): dict = { "a": { 'b/c/d': 0, }, } dpath.util.set(dict, ['a', 'b/c/d'], 1) assert(len(dict['a']) == 1) assert(dict['a']['b/c/d'] == 1)
mit
6,502,466,740,530,720,000
17.791209
60
0.391813
false
2.807882
false
false
false
openlawlibrary/pygls
examples/json-extension/server/tests/unit/test_features.py
1
3751
############################################################################ # Copyright(c) Open Law Library. All rights reserved. # # See ThirdPartyNotices.txt in the project root for additional notices. # # # # Licensed under the Apache License, Version 2.0 (the "License") # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http: // www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # ############################################################################ import json from typing import Optional import pytest from mock import Mock from pygls.lsp.types import (DidCloseTextDocumentParams, DidOpenTextDocumentParams, TextDocumentIdentifier, TextDocumentItem) from pygls.workspace import Document, Workspace from ...server import completions, did_close, did_open class FakeServer(): """We don't need real server to unit test features.""" publish_diagnostics = None show_message = None show_message_log = None def __init__(self): self.workspace = Workspace('', None) fake_document_uri = 'file://fake_doc.txt' fake_document_content = 'text' fake_document = Document(fake_document_uri, fake_document_content) server = FakeServer() server.publish_diagnostics = Mock() server.show_message = Mock() server.show_message_log = Mock() server.workspace.get_document = Mock(return_value=fake_document) def _reset_mocks(): server.publish_diagnostics.reset_mock() server.show_message.reset_mock() server.show_message_log.reset_mock() def test_completions(): completion_list = completions() labels = [i.label for i in completion_list.items] assert '"' in labels assert '[' in labels assert ']' in labels assert '{' in labels assert '}' in labels def test_did_close(): _reset_mocks() params = DidCloseTextDocumentParams( text_document=TextDocumentIdentifier(uri=fake_document_uri)) did_close(server, params) # Check if show message is called server.show_message.assert_called_once() @pytest.mark.asyncio async def test_did_open(): _reset_mocks() expected_msg = None # Get expected error message try: json.loads(fake_document_content) except json.JSONDecodeError as err: expected_msg = err.msg params = DidOpenTextDocumentParams( text_document=TextDocumentItem(uri=fake_document_uri, language_id='json', version=1, text=fake_document_content)) await did_open(server, params) # Check publish diagnostics is called server.publish_diagnostics.assert_called_once() # Check publish diagnostics args message args = server.publish_diagnostics.call_args assert args[0][1][0].message is expected_msg # Check other methods are called server.show_message.assert_called_once() server.show_message_log.assert_called_once()
apache-2.0
6,875,330,818,496,659,000
33.1
79
0.582245
false
4.546667
true
false
false
ustuehler/git-cvs
cvsgit/command/clone.py
1
3887
"""Command to clone a CVS repository or module as a Git repository.""" import os.path import shutil from cvsgit.main import Command, Conduit from cvsgit.i18n import _ from cvsgit.command.verify import Verify class Clone(Command): __doc__ = _( """Clone a CVS repository or module into a Git repository. Usage: %prog [options] <repository> [<directory>] Clones an entire CVS repository or a module into a Git repository. The source argument <repository> must be a local path pointing at the CVS repository root or a module directory within. The destination argument <directory> is selected automatically, based on the last component of the source path. """) def initialize_options(self): self.repository = None self.directory = None self.add_option('--bare', action='store_true', help=\ _("Create a bare Git repository without work tree.")) self.add_option('--limit', type='int', metavar='COUNT', help=\ _("Stop importing after COUNT new commits.")) self.add_option('--domain', metavar='DOMAIN', help=\ _("Set the e-mail domain to use for unknown authors.")) self.add_option('--verify', action='store_true', help=\ _("Run the verify command after cloning (does not work " "with --bare).")) self.add_option('--no-repack', action='store_true', help=\ _("Don't run \"git repack -adF\" after cloning (so you " "end up with an uncompressed pack file).")) self.add_quiet_option() self.add_verbose_option() self.add_no_skip_latest_option() self.add_authors_option() self.add_stop_on_unknown_author_option() def finalize_options(self): if len(self.args) < 1: self.usage_error(_('missing CVS repository path')) elif len(self.args) == 1: self.repository = os.path.abspath(self.args[0]) self.directory = os.path.basename(self.repository) elif len(self.args) == 2: self.repository, self.directory = self.args else: self.usage_error(_('too many arguments')) self.finalize_authors_option() def run(self): if os.path.exists(self.directory): self.fatal(_("destination path '%s' already exists") % \ self.directory) conduit = Conduit(self.directory) conduit.init(self.repository, bare=self.options.bare, domain=self.options.domain, quiet=self.options.quiet) try: conduit.fetch(limit=self.options.limit, quiet=self.options.quiet, verbose=self.options.verbose, flush=self.options.no_skip_latest, authors=self.options.authors, stop_on_unknown_author=\ self.options.stop_on_unknown_author) git = conduit.git if not self.options.no_repack: git.check_command('repack', '-adF') head_branch = git.symbolic_ref('HEAD') if head_branch == 'refs/heads/master': if self.options.bare: git.check_command('branch', '-f', 'master', conduit.branch) else: git.check_command('reset', '-q', '--hard', conduit.branch) except: shutil.rmtree(self.directory) raise # Verify after the above rmtree, because someone likely wants # to inspect the repository if the verification fails. if self.options.verify: try: olddir = os.getcwd() os.chdir(git.git_work_tree) Verify().eval() finally: os.chdir(olddir)
isc
4,724,368,370,476,854,000
38.262626
79
0.565475
false
4.285557
false
false
false
Clinical-Genomics/scout
scout/server/blueprints/dashboard/controllers.py
1
12437
import logging from flask import flash, redirect, request, url_for from flask_login import current_user from scout.server.extensions import store from scout.server.utils import user_institutes from .forms import DashboardFilterForm LOG = logging.getLogger(__name__) def institute_select_choices(): """Return a list of tuples with institute _id, institute names to populate a form select. Returns: institute_choices(list). Example:[(cust000, "Institute 1"), ..] """ institute_choices = [("All", "All institutes")] if current_user.is_admin else [] # Collect only institutes available to the user institute_objs = user_institutes(store, current_user) for inst in institute_objs: institute_choices.append((inst["_id"], inst["display_name"])) return institute_choices def dashboard_form(request_form=None): """Retrieve data to be displayed on dashboard page""" form = DashboardFilterForm(request_form) form.search_institute.choices = institute_select_choices() return form def compose_slice_query(search_type, search_term): """Extract a filter query given a form search term and search type Args: search_type(str): example -> "case:" search_term(str): example -> "17867" Returns: slice_query(str): example case:17867 """ slice_query = None if search_term and search_type: slice_query = "".join([search_type, search_term]) return slice_query def populate_dashboard_data(request): """Prepate data display object to be returned to the view Args: request(flask.rquest): request received by the view Returns: data(dict): data to be diplayed in the template """ data = {"dashboard_form": dashboard_form(request.form)} if request.method == "GET": return data allowed_insititutes = [inst[0] for inst in institute_select_choices()] institute_id = request.form.get( "search_institute", allowed_insititutes[0] ) # GET request has no institute, select the first option of the select if institute_id and institute_id not in allowed_insititutes: flash("Your user is not allowed to visualize this data", "warning") redirect(url_for("dashboard.index")) if institute_id == "All": institute_id = None slice_query = compose_slice_query( request.form.get("search_type"), request.form.get("search_term") ) get_dashboard_info(store, data, institute_id, slice_query) return data def get_dashboard_info(adapter, data={}, institute_id=None, slice_query=None): """Append case data stats to data display object Args: adapter(adapter.MongoAdapter) data(dict): data dictionary to be passed to template institute_id(str): institute id slice_query(str): example case:55888 Returns: data(dict): data to be diplayed in the template """ # If a slice_query is present then numbers in "General statistics" and "Case statistics" will # reflect the data available for the query general_sliced_info = get_general_case_info( adapter, institute_id=institute_id, slice_query=slice_query ) total_sliced_cases = general_sliced_info["total_cases"] data["total_cases"] = total_sliced_cases if total_sliced_cases == 0: return data data["pedigree"] = [] for ped_info in general_sliced_info["pedigree"].values(): ped_info["percent"] = ped_info["count"] / total_sliced_cases data["pedigree"].append(ped_info) data["cases"] = get_case_groups( adapter, total_sliced_cases, institute_id=institute_id, slice_query=slice_query ) data["analysis_types"] = get_analysis_types( adapter, total_sliced_cases, institute_id=institute_id, slice_query=slice_query ) overview = [ { "title": "Phenotype terms", "count": general_sliced_info["phenotype_cases"], "percent": general_sliced_info["phenotype_cases"] / total_sliced_cases, }, { "title": "Causative variants", "count": general_sliced_info["causative_cases"], "percent": general_sliced_info["causative_cases"] / total_sliced_cases, }, { "title": "Pinned variants", "count": general_sliced_info["pinned_cases"], "percent": general_sliced_info["pinned_cases"] / total_sliced_cases, }, { "title": "Cohort tag", "count": general_sliced_info["cohort_cases"], "percent": general_sliced_info["cohort_cases"] / total_sliced_cases, }, ] # Data from "Variant statistics tab" is not filtered by slice_query and numbers will # reflect verified variants in all available cases for an institute general_info = get_general_case_info(adapter, institute_id=institute_id) total_cases = general_info["total_cases"] sliced_case_ids = general_sliced_info["case_ids"] verified_query = { "verb": {"$in": ["validate", "sanger"]}, } if institute_id: # filter by institute if users wishes so verified_query["institute"] = institute_id # Case level information sliced_validation_cases = set() sliced_validated_cases = set() # Variant level information validated_tp = set() validated_fp = set() var_valid_orders = ( 0 # use this counter to count 'True Positive', 'False positive' and 'Not validated' vars ) validate_events = adapter.event_collection.find(verified_query) for validate_event in list(validate_events): case_id = validate_event.get("case") var_obj = adapter.variant(case_id=case_id, document_id=validate_event["variant_id"]) if var_obj: # Don't take into account variants which have been removed from db var_valid_orders += 1 if case_id in sliced_case_ids: sliced_validation_cases.add( case_id ) # add to the set. Can't add same id twice since it'a a set validation = var_obj.get("validation") if validation and validation in ["True positive", "False positive"]: if case_id in sliced_case_ids: sliced_validated_cases.add(case_id) if validation == "True positive": validated_tp.add(var_obj["_id"]) elif validation == "False positive": validated_fp.add(var_obj["_id"]) n_validation_cases = len(sliced_validation_cases) n_validated_cases = len(sliced_validated_cases) # append overview.append( { "title": "Validation ordered", "count": n_validation_cases, "percent": n_validation_cases / total_sliced_cases, } ) overview.append( { "title": "Validated cases (TP + FP)", "count": n_validated_cases, "percent": n_validated_cases / total_sliced_cases, } ) data["overview"] = overview variants = [] nr_validated = len(validated_tp) + len(validated_fp) variants.append({"title": "Validation ordered", "count": var_valid_orders, "percent": 1}) # taking into account that var_valid_orders might be 0: percent_validated_tp = 0 percent_validated_fp = 0 if var_valid_orders: percent_validated_tp = len(validated_tp) / var_valid_orders percent_validated_fp = len(validated_fp) / var_valid_orders variants.append( { "title": "Validated True Positive", "count": len(validated_tp), "percent": percent_validated_tp, } ) variants.append( { "title": "Validated False Positive", "count": len(validated_fp), "percent": percent_validated_fp, } ) data["variants"] = variants return data def get_general_case_info(adapter, institute_id=None, slice_query=None): """Return general information about cases Args: adapter(adapter.MongoAdapter) institute_id(str) slice_query(str): Query to filter cases to obtain statistics for. Returns: general(dict) """ general = {} # Potentially sensitive slice queries are assumed allowed if we have got this far name_query = slice_query cases = adapter.cases(owner=institute_id, name_query=name_query) phenotype_cases = 0 causative_cases = 0 pinned_cases = 0 cohort_cases = 0 pedigree = { 1: {"title": "Single", "count": 0}, 2: {"title": "Duo", "count": 0}, 3: {"title": "Trio", "count": 0}, "many": {"title": "Many", "count": 0}, } case_ids = set() total_cases = 0 for total_cases, case in enumerate(cases, 1): case_ids.add(case["_id"]) if case.get("phenotype_terms"): phenotype_cases += 1 if case.get("causatives"): causative_cases += 1 if case.get("suspects"): pinned_cases += 1 if case.get("cohorts"): cohort_cases += 1 nr_individuals = len(case.get("individuals", [])) if nr_individuals == 0: continue if nr_individuals > 3: pedigree["many"]["count"] += 1 else: pedigree[nr_individuals]["count"] += 1 general["total_cases"] = total_cases general["phenotype_cases"] = phenotype_cases general["causative_cases"] = causative_cases general["pinned_cases"] = pinned_cases general["cohort_cases"] = cohort_cases general["pedigree"] = pedigree general["case_ids"] = case_ids return general def get_case_groups(adapter, total_cases, institute_id=None, slice_query=None): """Return the information about case groups Args: store(adapter.MongoAdapter) total_cases(int): Total number of cases slice_query(str): Query to filter cases to obtain statistics for. Returns: cases(dict): """ # Create a group with all cases in the database cases = [{"status": "all", "count": total_cases, "percent": 1}] # Group the cases based on their status pipeline = [] group = {"$group": {"_id": "$status", "count": {"$sum": 1}}} subquery = {} if institute_id and slice_query: subquery = adapter.cases(owner=institute_id, name_query=slice_query, yield_query=True) elif institute_id: subquery = adapter.cases(owner=institute_id, yield_query=True) elif slice_query: subquery = adapter.cases(name_query=slice_query, yield_query=True) query = {"$match": subquery} if subquery else {} if query: pipeline.append(query) pipeline.append(group) res = adapter.case_collection.aggregate(pipeline) for status_group in res: cases.append( { "status": status_group["_id"], "count": status_group["count"], "percent": status_group["count"] / total_cases, } ) return cases def get_analysis_types(adapter, total_cases, institute_id=None, slice_query=None): """Return information about analysis types. Group cases based on analysis type for the individuals. Args: adapter(adapter.MongoAdapter) total_cases(int): Total number of cases institute_id(str) slice_query(str): Query to filter cases to obtain statistics for. Returns: analysis_types array of hashes with name: analysis_type(str), count: count(int) """ # Group cases based on analysis type of the individuals query = {} subquery = {} if institute_id and slice_query: subquery = adapter.cases(owner=institute_id, name_query=slice_query, yield_query=True) elif institute_id: subquery = adapter.cases(owner=institute_id, yield_query=True) elif slice_query: subquery = adapter.cases(name_query=slice_query, yield_query=True) query = {"$match": subquery} pipeline = [] if query: pipeline.append(query) pipeline.append({"$unwind": "$individuals"}) pipeline.append({"$group": {"_id": "$individuals.analysis_type", "count": {"$sum": 1}}}) analysis_query = adapter.case_collection.aggregate(pipeline) analysis_types = [{"name": group["_id"], "count": group["count"]} for group in analysis_query] return analysis_types
bsd-3-clause
-1,192,204,301,845,954,000
31.643045
98
0.617512
false
3.768788
false
false
false
Hitachi-Data-Systems/org-chart-builder
openpyxl/worksheet/dimensions.py
1
6673
from __future__ import absolute_import # Copyright (c) 2010-2014 openpyxl from openpyxl.compat import safe_string from openpyxl.cell import get_column_interval, column_index_from_string from openpyxl.descriptors import Integer, Float, Bool, Strict, String, Alias from openpyxl.compat import OrderedDict class Dimension(Strict): """Information about the display properties of a row or column.""" __fields__ = ('index', 'hidden', 'outlineLevel', 'collapsed',) index = Integer() hidden = Bool() outlineLevel = Integer(allow_none=True) outline_level = Alias('outlineLevel') collapsed = Bool() _style = None def __init__(self, index, hidden, outlineLevel, collapsed, worksheet, visible=True, style=None): self.index = index self.hidden = hidden self.outlineLevel = outlineLevel self.collapsed = collapsed self.worksheet = worksheet if style is not None: # accept pointer when parsing self._style = int(style) def __iter__(self): for key in self.__fields__[1:]: value = getattr(self, key) if value: yield key, safe_string(value) @property def visible(self): return not self.hidden @property def style(self): if self._style is not None: return self.worksheet.parent.shared_styles[self._style] @style.setter def style(self, style): if style is not None: self._style = self.worksheet.parent.shared_styles.add(style) class RowDimension(Dimension): """Information about the display properties of a row.""" __fields__ = Dimension.__fields__ + ('ht', 'customFormat', 'customHeight', 's') r = Alias('index') ht = Float(allow_none=True) height = Alias('ht') thickBot = Bool() thickTop = Bool() def __init__(self, index=0, ht=None, customHeight=None, # do not write s=None, customFormat=None, # do not write hidden=False, outlineLevel=0, outline_level=None, collapsed=False, visible=None, height=None, r=None, spans=None, thickBot=None, thickTop=None, worksheet=None): if r is not None: index = r if height is not None: ht = height self.ht = ht if visible is not None: hidden = not visible if outline_level is not None: outlineLevel = outlineLevel super(RowDimension, self).__init__(index, hidden, outlineLevel, collapsed, worksheet, style=s) @property def customFormat(self): """Always true if there is a style for the row""" return self._style is not None @property def customHeight(self): """Always true if there is a height for the row""" return self.ht is not None @property def s(self): return self.style @s.setter def s(self, style): self.style = style def __iter__(self): for key in self.__fields__[1:]: if key == 's': value = getattr(self, '_style') else: value = getattr(self, key) if value: yield key, safe_string(value) class ColumnDimension(Dimension): """Information about the display properties of a column.""" width = Float(allow_none=True) bestFit = Bool() auto_size = Alias('bestFit') index = String() min = Integer(allow_none=True) max = Integer(allow_none=True) collapsed = Bool() __fields__ = Dimension.__fields__ + ('width', 'bestFit', 'customWidth', 'style', 'min', 'max') def __init__(self, index='A', width=None, bestFit=False, hidden=False, outlineLevel=0, outline_level=None, collapsed=False, style=None, min=None, max=None, customWidth=False, # do not write visible=None, auto_size=None, worksheet=None): self.width = width self.min = min self.max = max if visible is not None: hidden = not visible if auto_size is not None: bestFit = auto_size self.bestFit = bestFit if outline_level is not None: outlineLevel = outline_level self.collapsed = collapsed super(ColumnDimension, self).__init__(index, hidden, outlineLevel, collapsed, worksheet, style=style) @property def customWidth(self): """Always true if there is a width for the column""" return self.width is not None def __iter__(self): for key in self.__fields__[1:]: if key == 'style': value = getattr(self, '_style') else: value = getattr(self, key) if value: yield key, safe_string(value) #@property # def col_label(self): # return get_column_letter(self.index) class DimensionHolder(OrderedDict): "hold (row|column)dimensions and allow operations over them" def __init__(self, direction, *args, **kwargs): self.direction = direction super(DimensionHolder, self).__init__(*args, **kwargs) def group(self, start, end=None, outline_level=1, hidden=False): """allow grouping a range of consecutive columns together :param start: first column to be grouped (mandatory) :param end: last column to be grouped (optional, default to start) :param outline_level: outline level :param hidden: should the group be hidden on workbook open or not """ if end is None: end = start if start in self: new_dim = self.pop(start) else: new_dim = ColumnDimension(index=start) work_sequence = get_column_interval(start, end) for column_letter in work_sequence: if column_letter in self: del self[column_letter] new_dim.min, new_dim.max = map(column_index_from_string, (start, end)) new_dim.outline_level = outline_level new_dim.hidden = hidden self[start] = new_dim
apache-2.0
5,091,376,572,354,088,000
30.476415
84
0.542335
false
4.433887
false
false
false
timberline-secondary/hackerspace
src/portfolios/views.py
1
8722
import os from django.urls import reverse from django.http import Http404 from django.contrib import messages from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.messages.views import SuccessMessageMixin from django.shortcuts import render, get_object_or_404, redirect from django.views.generic import ListView, CreateView, DetailView, UpdateView, DeleteView from comments.models import Document from portfolios.models import Portfolio, Artwork from tenant.views import allow_non_public_view, AllowNonPublicViewMixin from portfolios.forms import PortfolioForm, ArtworkForm class PortfolioList(AllowNonPublicViewMixin, LoginRequiredMixin, ListView): model = Portfolio template_name = 'portfolios/list.html' class PortfolioCreate(AllowNonPublicViewMixin, LoginRequiredMixin, CreateView): model = Portfolio form_class = PortfolioForm template_name = 'portfolios/form.html' def form_valid(self, form): data = form.save(commit=False) data.user = self.request.user data.save() return super(PortfolioCreate, self).form_valid(form) def get_context_data(self, **kwargs): # Call the base implementation first to get a context context = super(PortfolioCreate, self).get_context_data(**kwargs) context['heading'] = "Create " + self.request.user.get_username() + "'s Portfolio" context['submit_btn_value'] = "Create" return context class PortfolioDetail(AllowNonPublicViewMixin, LoginRequiredMixin, DetailView): model = Portfolio def dispatch(self, *args, **kwargs): # only allow admins or the users to see their own portfolios, unless they are shared portfolio = get_object_or_404(Portfolio, pk=self.kwargs.get('pk')) if portfolio.listed_locally or portfolio.user == self.request.user or self.request.user.is_staff: return super(PortfolioDetail, self).dispatch(*args, **kwargs) else: raise Http404("Sorry, this portfolio isn't shared!") @allow_non_public_view @login_required def detail(request, pk=None): if pk is None: pk = request.user.id user = get_object_or_404(User, id=pk) p, created = Portfolio.objects.get_or_create(user=user) # only allow admins or the users to see their own portfolios, unless they are shared if request.user.is_staff or p.pk == request.user.id or p.listed_locally: context = { "p": p, } return render(request, 'portfolios/detail.html', context) else: raise Http404("Sorry, this portfolio isn't shared!") def public_list(request): public_portfolios = Portfolio.objects.all().filter(listed_publicly=True) return render(request, 'portfolios/public_list.html', {"portfolios": public_portfolios}) def public(request, uuid): p = get_object_or_404(Portfolio, uuid=uuid) return render(request, 'portfolios/public.html', {"p": p}) @allow_non_public_view @login_required def edit(request, pk=None): # portfolio pk is portfolio.user.id if pk is None: pk = request.user.id user = get_object_or_404(User, id=pk) p = get_object_or_404(Portfolio, user=user) # if user submitted the Portfolio form to make changes: form = PortfolioForm(request.POST or None, instance=p) if form.is_valid(): form.save() messages.success(request, "Portfolio updated.") # only allow admins or the users to edit their own portfolios if request.user.is_staff or request.user == p.user: context = { "p": p, "form": form, } return render(request, 'portfolios/edit.html', context) else: raise Http404("Sorry, this portfolio isn't yours!") ###################################### # # ARTWORK VIEWS # ###################################### class ArtworkCreate(AllowNonPublicViewMixin, LoginRequiredMixin, SuccessMessageMixin, CreateView): model = Artwork form_class = ArtworkForm template_name = 'portfolios/art_form.html' success_message = "The art was added to the Portfolio" def get_success_url(self): return reverse('portfolios:edit', kwargs={'pk': self.object.portfolio.pk}) def form_valid(self, form): data = form.save(commit=False) data.portfolio = get_object_or_404(Portfolio, pk=self.kwargs.get('pk')) data.save() return super(ArtworkCreate, self).form_valid(form) def get_context_data(self, **kwargs): context = super(ArtworkCreate, self).get_context_data(**kwargs) portfolio = get_object_or_404(Portfolio, pk=self.kwargs.get('pk')) context['heading'] = "Add Art to " + portfolio.user.get_username() + "'s Portfolio" context['submit_btn_value'] = "Create" context['portfolio'] = portfolio return context def dispatch(self, *args, **kwargs): portfolio = get_object_or_404(Portfolio, pk=self.kwargs.get('pk')) # only allow the user or staff to edit if portfolio.user == self.request.user or self.request.user.is_staff: return super(ArtworkCreate, self).dispatch(*args, **kwargs) else: raise Http404("Sorry, this isn't your portfolio!") class ArtworkUpdate(AllowNonPublicViewMixin, LoginRequiredMixin, SuccessMessageMixin, UpdateView): model = Artwork form_class = ArtworkForm template_name = 'portfolios/art_form.html' success_message = "Art updated!" def get_success_url(self): return reverse('portfolios:edit', kwargs={'pk': self.object.portfolio.pk}) def get_context_data(self, **kwargs): # Call the base implementation first to get a context context = super(ArtworkUpdate, self).get_context_data(**kwargs) context['heading'] = "Edit " + self.object.portfolio.user.get_username() + "'s Portfolio Art" context['submit_btn_value'] = "Update" context['portfolio'] = self.object.portfolio return context def dispatch(self, *args, **kwargs): art = get_object_or_404(Artwork, pk=self.kwargs.get('pk')) # only allow the user or staff to edit if art.portfolio.user == self.request.user or self.request.user.is_staff: return super(ArtworkUpdate, self).dispatch(*args, **kwargs) else: raise Http404("Sorry, this isn't your art!") class ArtworkDelete(AllowNonPublicViewMixin, LoginRequiredMixin, DeleteView): model = Artwork def get_success_url(self): return reverse('portfolios:edit', kwargs={'pk': self.object.portfolio.pk}) # @login_required # def art_detail(request, pk): # art = get_object_or_404(Artwork, pk=pk) # # only allow admins or the users to view # if request.user.is_staff or art.portfolio.user == request.user: # context = { # "art": art, # } # return render(request, 'portfolios/art_detail.html', context) # else: # raise Http404("Sorry, this isn't your art!") def is_acceptable_image_type(filename): # Get extension from filename to determine filetype...very hacky... # TODO use MIMETYPES name, ext = os.path.splitext(filename) img_ext_list = [".png", ".gif", ".jpg"] return ext in img_ext_list def is_acceptable_vid_type(filename): # Get extension from filename to determine filetype...very hacky... name, ext = os.path.splitext(filename) vid_ext_list = [".ogg", ".avi", ".mp4", ".mkv", ".webm", ".ogv"] return ext in vid_ext_list @allow_non_public_view @login_required def art_add(request, doc_id): doc = get_object_or_404(Document, id=doc_id) doc_user = doc.comment.user if request.user.is_staff or doc_user == request.user: filename = os.path.basename(doc.docfile.name) if is_acceptable_image_type(filename): image_file = doc.docfile video_file = None elif is_acceptable_vid_type(filename): image_file = None video_file = doc.docfile else: raise Http404("Unsupported image or video format. See your teacher if" " you think this format should be supported.") portfolio, created = Portfolio.objects.get_or_create(user=doc_user) Artwork.create( title=os.path.splitext(filename)[0], image_file=image_file, video_file=video_file, portfolio=portfolio, date=doc.comment.timestamp.date(), ) return redirect('portfolios:detail', pk=portfolio.pk) else: raise Http404("I don't think you're supposed to be here....")
gpl-3.0
-2,756,887,367,504,752,600
35.957627
105
0.658794
false
3.608606
false
false
false
bzamecnik/sms-tools
tests/sprModel_test.py
1
1710
import math import numpy as np from scipy.signal import get_window from smst.utils.math import rmse from smst.utils import audio from smst.models import spr from .common import sound_path # TODO: the test needs fixing after the model is fixed def test_reconstruct_sound(): fs, x = audio.read_wav(sound_path("sax-phrase-short.wav")) window_size, fft_size, hop_size = 2001, 2048, 128 window = get_window('hamming', window_size) # fix the random seed for reproducibility np.random.seed(42) xtfreq, xtmag, xtphase, x_residual = spr.from_audio( x, fs, window, fft_size, hop_size, t=-80, maxnSines=100, minSineDur=.01, freqDevOffset=20, freqDevSlope=0.01) x_reconstructed, x_sine = spr.to_audio(xtfreq, xtmag, xtphase, x_residual, 512, hop_size, fs) assert 138746 == len(x) assert len(x) == len(x_residual) expected_frame_count = int(math.ceil(float(len(x)) / hop_size)) assert expected_frame_count == len(xtfreq) assert expected_frame_count == len(xtmag) assert expected_frame_count == len(xtphase) assert xtfreq.shape[1] <= 100 # statistics of the model for regression testing without explicitly storing the whole data assert np.allclose(799.3384358567838, xtfreq.mean()) assert np.allclose(-24.080251067421795, xtmag.mean()) assert np.allclose(1.0900513921895467, xtphase.mean()) # TODO: this is completely off, it should be equal to len(x)! assert 1083 * 128 == len(x_reconstructed) assert 1083 * 128 == len(x_sine) assert np.allclose(2.1079553110776107e-17, rmse(x[:len(x_reconstructed)], x_reconstructed)) assert np.allclose(0.0043912712540510645, rmse(x[:len(x_reconstructed)], x_sine))
agpl-3.0
-4,352,714,113,328,351,000
34.625
97
0.70117
false
3.092224
false
false
false
hojel/epubia
markdown2pdf.py
1
3489
# -*- coding: utf-8 -*- import markdown import ho.pisa as pisa import StringIO import os import re from Cheetah.Template import Template from tempfile import NamedTemporaryFile debug = False def markdown2pdf(text, pdffile, cssfile='xhtml2pdf.css', src_dir='.', fontfile='arial.ttf', skipTo1st=False): global debug md = markdown.Markdown(extensions=['meta','footnotes']) html = md.convert(text) # post-process unofficial markup # 1) <p>*</p> --> <p class="blankpara">&#160;</p> # 2) quotation mark html = html.replace('<p>*</p>', '<p class="blankpara">&#160;</p>') html = re.sub(u'“ ?', "&#8220;", html) html = html.replace(u'”',"&#8221;") html = re.sub(u"‘ ?", "&#8216;", html) html = html.replace(u"’","&#8217;") if debug: open('test.html','w').write(html.encode('utf-8')) htmline = [] #-- Cover & Title Page cover_file = None title = None author = None cif = None if 'cover_url' in md.Meta: cover_url = md.Meta['cover_url'][0] if cover_url.startswith('http://'): import urllib cif = NamedTemporaryFile(delete=False) cif.write( urllib.urlopen(cover_url).read() ) cif.close() cover_file = cif.name else: cover_file = cover_url if cover_url.startswith('file://'): cover_file = cover_url[7:] if 'title' in md.Meta: title = md.Meta['title'][0].replace(', ','<br />') if 'author' in md.Meta: author = md.Meta['author'][0].replace(', ','<br />') cover_tmpl = open(os.path.join('template','pdf','coverpage.html'), 'r').read().decode('utf-8') coverpg_htm = str( Template(cover_tmpl, searchList=[ {'cover_url':cover_file,'title':title,'author':author} ]) ) htmline.append( unicode(coverpg_htm,'utf-8') ) #-- Body # correct image path for url in re.compile('<img [^>]*src="(.*?)"').findall(html): if url.startswith('http://') or os.path.isabs(url): pass else: html = html.replace(url, os.path.normpath(src_dir+'/'+url)) if skipTo1st: html = html[ html.find('<h1'): ] html = html.replace('<h1 />','<h1></h1>') htmline.append(html) #-- PDF generation css_tmpl = open(os.path.join('template','pdf',cssfile), 'r').read().decode('utf-8') target_css = str( Template(css_tmpl, searchList=[ {'font':'fonts/'+fontfile} ]) ) fp = file(pdffile,'wb') pdf = pisa.pisaDocument( StringIO.StringIO('\n'.join(htmline).encode('utf-8')), fp, #path=src_dir, # not working! #link_callback=fetch_resources, default_css=target_css, #xhtml=True, encoding='utf-8') fp.close() if cif and os.path.exists(cif.name): os.remove(cif.name) #if debug and not pdf.err: # pisa.startViewer(pdffile) # suppress ho.pisa loggin message import logging class PisaNullHandler(logging.Handler): def emit(self, record): pass logging.getLogger("ho.pisa").addHandler(PisaNullHandler()) if __name__ == "__main__": debug = True import os, sys outfile = os.path.splitext(sys.argv[1])[0] + ".pdf" text = unicode(open(sys.argv[1],'r'),'utf-8')[1:] markdown2pdf(text, outfile, fontfile='SeoulHangang.ttf') # vim:sw=4:ts=4:et
mit
-3,050,844,844,239,022,000
34.642105
116
0.554726
false
3.327916
false
false
false
kickapoo/prometheus
prometheus/__init__.py
1
1867
from flask import Flask from flask.ext.sqlalchemy import SQLAlchemy from flask.ext.login import LoginManager from flask.ext.moment import Moment from flask.ext.bootstrap import Bootstrap import pyowm app = Flask(__name__, instance_relative_config=True) # config.default app.config.from_object('config.default') # config.prometheues-settings.py app.config.from_object('config.prometheus-settings') db = SQLAlchemy(app) login_manager = LoginManager(app) moment = Moment(app) bootstrap = Bootstrap(app) owm = pyowm.OWM(app.config['OWM_KEY']) # Landing_page hold a single view from blueprints.landing_page import landing_page as landing_page_blueprint app.register_blueprint(landing_page_blueprint, url_prefix='/') # Auth holds all login/logout/registration actions. # Auth uses 'User' model with NO relation mapping to app.models.core # Authentication is made with the help of Flask-Login from blueprints.auth import auth as auth_blueprint app.register_blueprint(auth_blueprint, url_prefix='/auth') # Coordinator holds an 'admin' panel for coordinators # A coordinator can create/edit/delete (CRUD), # Team/Voluntter/Needs to database from blueprints.coordinators import coordinator as coordinator_blueprint app.register_blueprint(coordinator_blueprint, url_prefix='/coordinator') # Volunter holds a single page in order potential volunteer to select his/her # daily contribution in Needs. from blueprints.volunteers import volunteer as volunteer_blueprint app.register_blueprint(volunteer_blueprint, url_prefix='/volunteer') # Flatpages holds terms-of-use, policy etc from blueprints.flatpages import flatpages as flatpages_blueprint app.register_blueprint(flatpages_blueprint, url_prefix='/flatpages') # Prometheus api using Basic AuthO Authentication from blueprints.api import api as api_blueprint app.register_blueprint(api_blueprint, url_prefix='/api/v1')
gpl-3.0
4,680,024,369,101,816,000
38.723404
77
0.803964
false
3.660784
true
false
false
shuitian/pokemon_rpg
Assets/server/sqldata.py
1
2027
#-*- coding:utf-8 –*- import sqlite3,os,time,json class sql(object): """处理数据库的类""" def __init__(self): """获取数据库连接""" super(sql, self).__init__() db = 'monsters.db' self.conn = sqlite3.connect(db) print "Open",db,"Success" def __del__(self): """关闭数据库连接""" self.conn.close() def show_table(self, table_name): """显示表中所有数据""" if table_name == None: table_name = "None" table = self.execute("SELECT * from " + table_name) if table != None: print table.fetchall() def execute(self, seq): """执行数据库语句""" # print seq return self.conn.execute(seq) def get_monster(self, id): """在MONSTER表中查询数据""" table = self.execute("SELECT * from MONSTER where id =" + str(id)) if table != None: return table.fetchone() def get_item(self, id): """在ITEM表中查询数据""" table = self.execute("SELECT * from item where id =" + str(id)) if table != None: return table.fetchone() keys = ["id",'name','hp','attack','defence','gold'] def get_item_json(self, id): d = {"type":"item"} l = self.get_item(id) if(l != None): d['body'] = dict(zip(self.keys,l)) return json.dumps(d) def get_monster_json(self, id): d = {"type":"monster"} l = self.get_monster(id) if(l != None): d['body'] = dict(zip(self.keys,l)) return json.dumps(d) def get_json_from_message(connection, string): d = json.loads(string) if d["type"] == 'item': return connection.get_item_json(d['body']['id']) elif d["type"] == 'monster': return connection.get_monster_json(d['body']['id']) if __name__ == '__main__': """创建怪物表""" s = sql() # s.get_monster(1) # s.get_item(1) # dict1 = {} # dict1['type'] = "monster" # table = s.execute("SELECT * from MONSTER where id =" + str(1)) # dict1['body'] = dict(zip(["id",'name','hp','attack','defence','gold'],table.fetchone())) # print json.dumps(dict1) print s.get_item_json(1), print get_json_from_message(s, s.get_item_json(1))
mit
71,420,953,494,727,550
23.551282
91
0.602089
false
2.45828
false
false
false
jsonchin/nba_stats_scraper_db_storage
nba_ss_db/scrape/query_param_values.py
1
2097
""" Contains functions to retrieve fillable values for API request jobs. """ import datetime from .. import db, CONFIG from ..scrape.utils import get_date_before, format_date_for_api_request, PROPER_DATE_FORMAT QUERY_PARAM_VALUES = {} def get_possible_query_param_values(query_param, is_daily): """ Valid query parameters are: - {SEASON} - {PLAYER_POSITION} - {GAME_ID} - {PLAYER_ID} - {GAME_DATE} - {DATE_TO} The last four return a dictionary mapping season to possible values. All other query parameters return a list of values to iterate through. """ if query_param not in QUERY_PARAM_VALUES: if query_param == '{SEASON}': values = CONFIG['SEASONS'] elif query_param == '{PLAYER_ID}': values = db.retrieve.fetch_player_ids() elif query_param == '{GAME_DATE}': values = db.retrieve.fetch_game_dates() elif query_param == '{DATE_TO}': values = db.retrieve.fetch_game_dates() for season in values: for i in range(len(values[season])): game_date = values[season][i] date_before = get_date_before(game_date) values[season][i] = format_date_for_api_request(date_before) elif query_param == '{GAME_ID}': values = db.retrieve.fetch_game_ids() elif query_param == '{PLAYER_POSITION}': values = ['G', 'F', 'C'] else: raise ValueError( 'Unsupported fillable type: {}'.format(query_param)) QUERY_PARAM_VALUES[query_param] = values if is_daily: if query_param == '{SEASON}': return [CONFIG['CURRENT_SEASON']] elif query_param == '{DATE_TO}': today_date = datetime.datetime.today().strftime(PROPER_DATE_FORMAT) prev_dates = QUERY_PARAM_VALUES[query_param][CONFIG['CURRENT_SEASON']] return {CONFIG['CURRENT_SEASON']: prev_dates + [format_date_for_api_request(get_date_before(today_date))]} return QUERY_PARAM_VALUES[query_param]
apache-2.0
-6,235,656,609,852,025,000
35.155172
118
0.592752
false
3.724689
false
false
false
wheeler-microfluidics/open-drop
pavement.py
1
1896
from collections import OrderedDict import sys from importlib import import_module from paver.easy import task, needs, path, sh, cmdopts, options from paver.setuputils import setup, find_package_data, install_distutils_tasks try: from base_node_rpc.pavement_base import * except ImportError: pass sys.path.insert(0, '.') import version install_distutils_tasks() DEFAULT_ARDUINO_BOARDS = ['uno'] PROJECT_PREFIX = [d for d in path('.').dirs() if d.joinpath('Arduino').isdir() and d.name not in ('build', )][0].name name = PROJECT_PREFIX.replace('_', '-') package_name = name rpc_module = import_module(PROJECT_PREFIX) VERSION = version.getVersion() URL='http://github.com/wheeler-microfluidics/%s.git' % name PROPERTIES = OrderedDict([('name', PROJECT_PREFIX), ('manufacturer', 'GaudiLabs'), ('software_version', VERSION), ('url', URL)]) options( rpc_module=rpc_module, PROPERTIES=PROPERTIES, base_classes=['BaseNodeSerialHandler', 'BaseNodeEeprom', 'BaseNodeI2c', 'BaseNodeI2cHandler<Handler>', 'BaseNodeConfig<ConfigMessage, Address>', 'BaseNodeState<StateMessage>'], rpc_classes=['open_drop::Node'], DEFAULT_ARDUINO_BOARDS=DEFAULT_ARDUINO_BOARDS, setup=dict(name=PROJECT_PREFIX.replace('_', '-'), version=VERSION, description='Arduino RPC node packaged as Python package.', author='Christian Fobel', author_email='[email protected]', url=URL, license='GPLv2', install_requires=['base-node-rpc>=0.11.post21', 'arduino-helpers>=0.3.post10'], include_package_data=True, packages=[str(PROJECT_PREFIX)]))
gpl-3.0
-3,173,363,006,376,368,600
36.92
78
0.594937
false
3.885246
false
false
false
DiegoCorrea/ouvidoMusical
apps/similarities/Cosine/algorithm/views.py
1
2656
from .algorithm import CosineSimilarity from apps.data.songs.models import Song, SongSimilarity from apps.similarities.Cosine.benchmark.models import BenchCosine_SongTitle from django.db import transaction from django.utils import timezone from multiprocessing.dummy import Pool as ThreadPool from apps.CONSTANTS import MAX_THREAD from random import sample import numpy as np import logging logger = logging.getLogger(__name__) songInterator = {} similarityMatrix = [] def saveTitleSimilarity(sBase): global similarityMatrix global songInterator logger.info("++ Song Psition: " + str(songInterator[sBase]['pos'])) for sComp in songInterator: if songInterator[sBase]['pos'] >= songInterator[sComp]['pos']: continue try: SongSimilarity.objects.create( songBase=songInterator[sBase]['obj'], songCompare=songInterator[sComp]['obj'], similarity=similarityMatrix[songInterator[sBase]['pos']][songInterator[sComp]['pos']] ) except Exception as e: logger.error(str(e)) continue def TitleSimilarity(): logger.info("[Start Title Similarity]") global similarityMatrix global songInterator allSongs = Song.objects.all() line = 0 similarityMatrix = CosineSimilarity([song.title for song in allSongs]) for song in allSongs: songInterator.setdefault(song.id, { 'obj': song, 'pos': line } ) line += 1 # Persiste Title similarity logger.info("Start to persiste Title similarity") pool = ThreadPool(MAX_THREAD) with transaction.atomic(): pool.map(saveTitleSimilarity, songInterator) pool.close() pool.join() logger.info("[Finish Title Similarity]") def TitleSimilarityWithObserver(setSize): logger.info("[Start Title Similarity]") allSongs = sample(set(Song.objects.all()), setSize) line = 0 similarityVale = [] startedAt = timezone.now() similarityMatrix = CosineSimilarity([song.title for song in allSongs]) finishedAt = timezone.now() for i in range(len(allSongs)): for j in range(i, len(allSongs)): if j == i: continue line += 1 similarityVale.append(similarityMatrix[i][j]) BenchCosine_SongTitle.objects.create( setSize=setSize, similarity=np.mean(similarityVale), started_at=startedAt, finished_at=finishedAt ) logger.info( "Benchmark: Start at - " + str(startedAt) + " || Finished at -" + str(finishedAt) )
mit
6,042,651,033,651,817,000
30.247059
101
0.643825
false
3.970105
false
false
false
Acimaz/Google_Apple_Financial_Reporter
Reporter.py
1
5765
# Reporting tool for querying Sales- and Financial Reports from iTunes Connect and Google Developer Console # # This tool can be used to download financial reports from both Google and Apple # for the app of your choice (of course i assume that you are the owner of this app) # # Copyright (c) 2017 Ayhan Sakarya, Kaasa health GmbH <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import getopt import csv import os from Utility import ReportDate from GoogleReports import GoogleReporter from AppleReporter import ApplePythonReport currentDate = None googleReporter = None appleReporter = None def UpdateMainReportFile(date): global googleReporter global appleReporter fileExists = os.path.isfile('financialReport.csv') with open('financialReport.csv', 'r') as csvFileRead: print 'Updating financialReport.csv..' dateExists = False deleteFirstRows = False headers = ['Date', 'Platform', 'newSubscriptions', 'cancelledSubscriptions', 'activeSubscriptions'] reader = csv.DictReader(csvFileRead, delimiter=',') #print 'Length: ' + len(list(reader)).__str__() readerList = list(reader) csvFileRead.seek(0) listLength = 0 for line in reader: listLength += 1 if date == line['Date']: dateExists = True if listLength > 118: #118 because we want to have the data of the past 60 days and we have 2 rows for each day (google, apple) deleteFirstRows = True csvFileRead.seek(0) with open('financialReport.csv', 'w') as csvFileWriter: writer = csv.DictWriter(csvFileWriter, delimiter=',', lineterminator='\n', fieldnames=headers) writer.writeheader() replaced = False startIndex = 2 if deleteFirstRows else 0 for line in readerList[startIndex:]: if date == line['Date']: if line['Platform'] == 'Apple': writer.writerow( {'Date': date, 'Platform': 'Apple', 'newSubscriptions': appleReporter.subscribers, 'cancelledSubscriptions': appleReporter.cancellations, 'activeSubscriptions': appleReporter.activeSubscribers}) if line['Platform'] == 'Google': writer.writerow( {'Date': date, 'Platform': 'Google', 'newSubscriptions': googleReporter.subscribers, 'cancelledSubscriptions': googleReporter.cancellations, 'activeSubscriptions': googleReporter.activeSubscribers}) replaced = True else: writer.writerow(line) if not replaced: writer.writerow( {'Date': date, 'Platform': 'Apple', 'newSubscriptions': appleReporter.subscribers, 'cancelledSubscriptions': appleReporter.cancellations, 'activeSubscriptions': appleReporter.activeSubscribers}) writer.writerow( {'Date': date, 'Platform': 'Google', 'newSubscriptions': googleReporter.subscribers, 'cancelledSubscriptions': googleReporter.cancellations, 'activeSubscriptions': googleReporter.activeSubscribers}) def main(argv): global currentDate global googleReporter global appleReporter try: opts, args = getopt.getopt(argv, "d:", ["days="]) except getopt.GetoptError: print 'Reporter.py -d <daysBefore>' sys.exit(2) for opt, arg in opts: if opt in ("-d", "--days"): currentDate = ReportDate(int(arg)) print 'Downloading financial reports for ' + currentDate.ToString() + "..." googleReporter = GoogleReporter( currentDate.year.__str__() + currentDate.month.__str__() + currentDate.day.__str__()) appleReporter = ApplePythonReport(currentDate.year.__str__() + currentDate.month.__str__() + currentDate.day.__str__()) # print '\nGoogle\nSubscribers: ' + googleReporter.subscribers.__str__() + ' Cancellations: ' + googleReporter.cancellations.__str__() + ' Active Users: ' + googleReporter.activeSubscribers.__str__() # print 'Apple\nSubscribers: ' + appleReporter.subscribers.__str__() + ' Cancellations: ' + appleReporter.cancellations.__str__() + ' Active Users: ' + appleReporter.activeSubscribers.__str__() UpdateMainReportFile(currentDate.year.__str__() + '-' + currentDate.month.__str__() + '-' + currentDate.day.__str__()) print 'Financial Reports are now up to date!\n' if __name__ == "__main__": main(sys.argv[1:])
mit
5,068,535,535,105,347,000
48.273504
203
0.646487
false
4.462074
false
false
false
openstack/swift
swift/common/middleware/recon.py
1
17726
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import json import os import time from resource import getpagesize from swift import __version__ as swiftver from swift import gettext_ as _ from swift.common.constraints import check_mount from swift.common.storage_policy import POLICIES from swift.common.swob import Request, Response from swift.common.utils import get_logger, SWIFT_CONF_FILE, md5_hash_for_file from swift.common.recon import RECON_OBJECT_FILE, RECON_CONTAINER_FILE, \ RECON_ACCOUNT_FILE, RECON_DRIVE_FILE, RECON_RELINKER_FILE, \ DEFAULT_RECON_CACHE_PATH class ReconMiddleware(object): """ Recon middleware used for monitoring. /recon/load|mem|async... will return various system metrics. Needs to be added to the pipeline and requires a filter declaration in the [account|container|object]-server conf file: [filter:recon] use = egg:swift#recon recon_cache_path = /var/cache/swift """ def __init__(self, app, conf, *args, **kwargs): self.app = app self.devices = conf.get('devices', '/srv/node') swift_dir = conf.get('swift_dir', '/etc/swift') self.logger = get_logger(conf, log_route='recon') self.recon_cache_path = conf.get('recon_cache_path', DEFAULT_RECON_CACHE_PATH) self.object_recon_cache = os.path.join(self.recon_cache_path, RECON_OBJECT_FILE) self.container_recon_cache = os.path.join(self.recon_cache_path, RECON_CONTAINER_FILE) self.account_recon_cache = os.path.join(self.recon_cache_path, RECON_ACCOUNT_FILE) self.drive_recon_cache = os.path.join(self.recon_cache_path, RECON_DRIVE_FILE) self.relink_recon_cache = os.path.join(self.recon_cache_path, RECON_RELINKER_FILE) self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz') self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz') self.rings = [self.account_ring_path, self.container_ring_path] # include all object ring files (for all policies) for policy in POLICIES: self.rings.append(os.path.join(swift_dir, policy.ring_name + '.ring.gz')) def _from_recon_cache(self, cache_keys, cache_file, openr=open, ignore_missing=False): """retrieve values from a recon cache file :params cache_keys: list of cache items to retrieve :params cache_file: cache file to retrieve items from. :params openr: open to use [for unittests] :params ignore_missing: Some recon stats are very temporary, in this case it would be better to not log if things are missing. :return: dict of cache items and their values or none if not found """ try: with openr(cache_file, 'r') as f: recondata = json.load(f) return {key: recondata.get(key) for key in cache_keys} except IOError as err: if err.errno == errno.ENOENT and ignore_missing: pass else: self.logger.exception(_('Error reading recon cache file')) except ValueError: self.logger.exception(_('Error parsing recon cache file')) except Exception: self.logger.exception(_('Error retrieving recon data')) return dict((key, None) for key in cache_keys) def get_version(self): """get swift version""" verinfo = {'version': swiftver} return verinfo def get_mounted(self, openr=open): """get ALL mounted fs from /proc/mounts""" mounts = [] with openr('/proc/mounts', 'r') as procmounts: for line in procmounts: mount = {} mount['device'], mount['path'], opt1, opt2, opt3, \ opt4 = line.rstrip().split() mounts.append(mount) return mounts def get_load(self, openr=open): """get info from /proc/loadavg""" loadavg = {} with openr('/proc/loadavg', 'r') as f: onemin, fivemin, ftmin, tasks, procs = f.read().rstrip().split() loadavg['1m'] = float(onemin) loadavg['5m'] = float(fivemin) loadavg['15m'] = float(ftmin) loadavg['tasks'] = tasks loadavg['processes'] = int(procs) return loadavg def get_mem(self, openr=open): """get info from /proc/meminfo""" meminfo = {} with openr('/proc/meminfo', 'r') as memlines: for i in memlines: entry = i.rstrip().split(":") meminfo[entry[0]] = entry[1].strip() return meminfo def get_async_info(self): """get # of async pendings""" return self._from_recon_cache(['async_pending', 'async_pending_last'], self.object_recon_cache) def get_driveaudit_error(self): """get # of drive audit errors""" return self._from_recon_cache(['drive_audit_errors'], self.drive_recon_cache) def get_sharding_info(self): """get sharding info""" return self._from_recon_cache(["sharding_stats", "sharding_time", "sharding_last"], self.container_recon_cache) def get_replication_info(self, recon_type): """get replication info""" replication_list = ['replication_time', 'replication_stats', 'replication_last'] if recon_type == 'account': return self._from_recon_cache(replication_list, self.account_recon_cache) elif recon_type == 'container': return self._from_recon_cache(replication_list, self.container_recon_cache) elif recon_type == 'object': replication_list += ['object_replication_time', 'object_replication_last'] return self._from_recon_cache(replication_list, self.object_recon_cache) else: return None def get_device_info(self): """get devices""" try: return {self.devices: os.listdir(self.devices)} except Exception: self.logger.exception(_('Error listing devices')) return {self.devices: None} def get_updater_info(self, recon_type): """get updater info""" if recon_type == 'container': return self._from_recon_cache(['container_updater_sweep'], self.container_recon_cache) elif recon_type == 'object': return self._from_recon_cache(['object_updater_sweep'], self.object_recon_cache) else: return None def get_expirer_info(self, recon_type): """get expirer info""" if recon_type == 'object': return self._from_recon_cache(['object_expiration_pass', 'expired_last_pass'], self.object_recon_cache) def get_auditor_info(self, recon_type): """get auditor info""" if recon_type == 'account': return self._from_recon_cache(['account_audits_passed', 'account_auditor_pass_completed', 'account_audits_since', 'account_audits_failed'], self.account_recon_cache) elif recon_type == 'container': return self._from_recon_cache(['container_audits_passed', 'container_auditor_pass_completed', 'container_audits_since', 'container_audits_failed'], self.container_recon_cache) elif recon_type == 'object': return self._from_recon_cache(['object_auditor_stats_ALL', 'object_auditor_stats_ZBF'], self.object_recon_cache) else: return None def get_unmounted(self): """list unmounted (failed?) devices""" mountlist = [] for entry in os.listdir(self.devices): if not os.path.isdir(os.path.join(self.devices, entry)): continue try: check_mount(self.devices, entry) except OSError as err: mounted = str(err) except ValueError: mounted = False else: continue mountlist.append({'device': entry, 'mounted': mounted}) return mountlist def get_diskusage(self): """get disk utilization statistics""" devices = [] for entry in os.listdir(self.devices): if not os.path.isdir(os.path.join(self.devices, entry)): continue try: check_mount(self.devices, entry) except OSError as err: devices.append({'device': entry, 'mounted': str(err), 'size': '', 'used': '', 'avail': ''}) except ValueError: devices.append({'device': entry, 'mounted': False, 'size': '', 'used': '', 'avail': ''}) else: path = os.path.join(self.devices, entry) disk = os.statvfs(path) capacity = disk.f_bsize * disk.f_blocks available = disk.f_bsize * disk.f_bavail used = disk.f_bsize * (disk.f_blocks - disk.f_bavail) devices.append({'device': entry, 'mounted': True, 'size': capacity, 'used': used, 'avail': available}) return devices def get_ring_md5(self): """get all ring md5sum's""" sums = {} for ringfile in self.rings: if os.path.exists(ringfile): try: sums[ringfile] = md5_hash_for_file(ringfile) except IOError as err: sums[ringfile] = None if err.errno != errno.ENOENT: self.logger.exception(_('Error reading ringfile')) return sums def get_swift_conf_md5(self): """get md5 of swift.conf""" hexsum = None try: hexsum = md5_hash_for_file(SWIFT_CONF_FILE) except IOError as err: if err.errno != errno.ENOENT: self.logger.exception(_('Error reading swift.conf')) return {SWIFT_CONF_FILE: hexsum} def get_quarantine_count(self): """get obj/container/account quarantine counts""" qcounts = {"objects": 0, "containers": 0, "accounts": 0, "policies": {}} qdir = "quarantined" for device in os.listdir(self.devices): qpath = os.path.join(self.devices, device, qdir) if os.path.exists(qpath): for qtype in os.listdir(qpath): qtgt = os.path.join(qpath, qtype) linkcount = os.lstat(qtgt).st_nlink if linkcount > 2: if qtype.startswith('objects'): if '-' in qtype: pkey = qtype.split('-', 1)[1] else: pkey = '0' qcounts['policies'].setdefault(pkey, {'objects': 0}) qcounts['policies'][pkey]['objects'] \ += linkcount - 2 qcounts['objects'] += linkcount - 2 else: qcounts[qtype] += linkcount - 2 return qcounts def get_socket_info(self, openr=open): """ get info from /proc/net/sockstat and sockstat6 Note: The mem value is actually kernel pages, but we return bytes allocated based on the systems page size. """ sockstat = {} try: with openr('/proc/net/sockstat', 'r') as proc_sockstat: for entry in proc_sockstat: if entry.startswith("TCP: inuse"): tcpstats = entry.split() sockstat['tcp_in_use'] = int(tcpstats[2]) sockstat['orphan'] = int(tcpstats[4]) sockstat['time_wait'] = int(tcpstats[6]) sockstat['tcp_mem_allocated_bytes'] = \ int(tcpstats[10]) * getpagesize() except IOError as e: if e.errno != errno.ENOENT: raise try: with openr('/proc/net/sockstat6', 'r') as proc_sockstat6: for entry in proc_sockstat6: if entry.startswith("TCP6: inuse"): sockstat['tcp6_in_use'] = int(entry.split()[2]) except IOError as e: if e.errno != errno.ENOENT: raise return sockstat def get_time(self): """get current time""" return time.time() def get_relinker_info(self): """get relinker info, if any""" stat_keys = ['devices', 'workers'] return self._from_recon_cache(stat_keys, self.relink_recon_cache, ignore_missing=True) def GET(self, req): root, rcheck, rtype = req.split_path(1, 3, True) all_rtypes = ['account', 'container', 'object'] if rcheck == "mem": content = self.get_mem() elif rcheck == "load": content = self.get_load() elif rcheck == "async": content = self.get_async_info() elif rcheck == 'replication' and rtype in all_rtypes: content = self.get_replication_info(rtype) elif rcheck == 'replication' and rtype is None: # handle old style object replication requests content = self.get_replication_info('object') elif rcheck == "devices": content = self.get_device_info() elif rcheck == "updater" and rtype in ['container', 'object']: content = self.get_updater_info(rtype) elif rcheck == "auditor" and rtype in all_rtypes: content = self.get_auditor_info(rtype) elif rcheck == "expirer" and rtype == 'object': content = self.get_expirer_info(rtype) elif rcheck == "mounted": content = self.get_mounted() elif rcheck == "unmounted": content = self.get_unmounted() elif rcheck == "diskusage": content = self.get_diskusage() elif rcheck == "ringmd5": content = self.get_ring_md5() elif rcheck == "swiftconfmd5": content = self.get_swift_conf_md5() elif rcheck == "quarantined": content = self.get_quarantine_count() elif rcheck == "sockstat": content = self.get_socket_info() elif rcheck == "version": content = self.get_version() elif rcheck == "driveaudit": content = self.get_driveaudit_error() elif rcheck == "time": content = self.get_time() elif rcheck == "sharding": content = self.get_sharding_info() elif rcheck == "relinker": content = self.get_relinker_info() else: content = "Invalid path: %s" % req.path return Response(request=req, status="404 Not Found", body=content, content_type="text/plain") if content is not None: return Response(request=req, body=json.dumps(content), content_type="application/json") else: return Response(request=req, status="500 Server Error", body="Internal server error.", content_type="text/plain") def __call__(self, env, start_response): req = Request(env) if req.path.startswith('/recon/'): return self.GET(req)(env, start_response) else: return self.app(env, start_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def recon_filter(app): return ReconMiddleware(app, conf) return recon_filter
apache-2.0
-7,486,379,650,210,433,000
40.415888
79
0.518278
false
4.36601
false
false
false
eragonruan/text-detection-ctpn
utils/text_connector/detectors.py
1
2097
# coding:utf-8 import numpy as np from utils.bbox.nms import nms from .text_connect_cfg import Config as TextLineCfg from .text_proposal_connector import TextProposalConnector from .text_proposal_connector_oriented import TextProposalConnector as TextProposalConnectorOriented class TextDetector: def __init__(self, DETECT_MODE="H"): self.mode = DETECT_MODE if self.mode == "H": self.text_proposal_connector = TextProposalConnector() elif self.mode == "O": self.text_proposal_connector = TextProposalConnectorOriented() def detect(self, text_proposals, scores, size): # 删除得分较低的proposal keep_inds = np.where(scores > TextLineCfg.TEXT_PROPOSALS_MIN_SCORE)[0] text_proposals, scores = text_proposals[keep_inds], scores[keep_inds] # 按得分排序 sorted_indices = np.argsort(scores.ravel())[::-1] text_proposals, scores = text_proposals[sorted_indices], scores[sorted_indices] # 对proposal做nms keep_inds = nms(np.hstack((text_proposals, scores)), TextLineCfg.TEXT_PROPOSALS_NMS_THRESH) text_proposals, scores = text_proposals[keep_inds], scores[keep_inds] # 获取检测结果 text_recs = self.text_proposal_connector.get_text_lines(text_proposals, scores, size) keep_inds = self.filter_boxes(text_recs) return text_recs[keep_inds] def filter_boxes(self, boxes): heights = np.zeros((len(boxes), 1), np.float) widths = np.zeros((len(boxes), 1), np.float) scores = np.zeros((len(boxes), 1), np.float) index = 0 for box in boxes: heights[index] = (abs(box[5] - box[1]) + abs(box[7] - box[3])) / 2.0 + 1 widths[index] = (abs(box[2] - box[0]) + abs(box[6] - box[4])) / 2.0 + 1 scores[index] = box[8] index += 1 return np.where((widths / heights > TextLineCfg.MIN_RATIO) & (scores > TextLineCfg.LINE_MIN_SCORE) & (widths > (TextLineCfg.TEXT_PROPOSALS_WIDTH * TextLineCfg.MIN_NUM_PROPOSALS)))[0]
mit
-1,105,246,956,184,877,400
41.854167
108
0.633933
false
3.107251
false
false
false
ashleyjsands/machine-learning
benchmark.py
1
1576
""" This script is used to benchmark neural network performance to determine which optimisations are useful. """ from neural_network import * from data import * from neural_network import get_index_of_maximum_value import time def print_intro(): print "Benchmarking neural network implementation" def get_neural_network(): number_of_inputs = 28 * 28 number_of_ouputs = 10 sizes = [128, number_of_ouputs] return create_random_neural_network(number_of_inputs, sizes) def benchmark_neural_network(neural_network, training_set, validation_set, test_set): runs = 10 batch_size = 10 learning_rate = 3.0 durations = [] for i in range(runs): random.shuffle(training_set) batch = training_set[0:batch_size] start = time.clock() error = neural_network.train_batch(batch, learning_rate) end = time.clock() durations.append(end - start) return sum(durations) / len(durations) def main(): """ Benchmark a specific type of neural network a number of times and print out the average duration. """ print_intro() neural_network = get_neural_network() training_set, validation_set, test_set = load_digit_data() average_duration = benchmark_neural_network(neural_network, convert_data_set_into_data_points_and_labels(training_set), convert_data_set_into_data_points_and_labels(validation_set), convert_data_set_into_data_points_and_labels(test_set)) print "The benchmark took an average %s seconds per run." % average_duration if __name__ == "__main__": main()
mit
7,466,109,914,126,193,000
34.818182
241
0.695431
false
3.533632
false
false
false
amanmehara/programming-app-data
Python/ScrappingHackerNewsWebsite/ScrappingHackerNewsWebsite.py
1
3141
''' Copyright [2020] [Arun Kumar G] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' ''' Scraping the first 2 pages of Hacker news website which gives lot of Tech news(as a articles) which has upvotes more than 100.User can just click on story link to see the article. ''' ''' Program uses requests module to get web data from URL and BeautifulSoup module to parse the web data as HTML using html parser. Install requests and BeautifulSoup module before executing! ''' import requests from bs4 import BeautifulSoup import pprint # prints the Final output in pretty manner which is inbuilt module in Python response1 = requests.get("https://news.ycombinator.com/news") #Storing response of first page of website response2 = requests.get("https://news.ycombinator.com/news?p=2") # Storing response of Second page of website response1_html_parser = BeautifulSoup(response1.text,'html.parser') #parsing the received web data by html parser response2_html_parser = BeautifulSoup(response2.text,'html.parser') linksInPage1 = response1_html_parser.select('.storylink') #All links of tech news are included in class "Storylink" linksInPage2 = response2_html_parser.select('.storylink') votesInPage1 = response1_html_parser.select('.subtext') #All votes are stored inside subclass "score" of class "subtext" votesInPage2 = response2_html_parser.select('.subtext') mega_link = linksInPage1 + linksInPage2 # Combining links of both pages #print(mega_link) mega_votes = votesInPage1 + votesInPage2 def sorted_stories_list(hackerNewsList): """Sorting the list in decreasing order with respect to votes""" return sorted(hackerNewsList,key=lambda x:x['votes'],reverse=True) def create_custom_hackernews(mega_link,mega_votes): hackerNews =[] for index,item in enumerate(mega_link): title = mega_link[index].getText() #To get title of the story(news) href = mega_link[index].get('href',None) # To get link of stroy(news).If no link is present, default is None vote = mega_votes[index].select('.score') # points are stored inside class "score" of class subtext,if points/votes not available, then class score wont be present. if len(vote): #To check if class "score" exists or not points = int(vote[0].getText().replace(' points', '')) if points > 100: # To get votes/points more than 100 hackerNews.append({'title': title, 'link': href,'votes': points}) return sorted_stories_list(hackerNews) if __name__ == '__main__': # Prints story link, story title and its votes in a pretty manner pprint.pprint(create_custom_hackernews(mega_link,mega_votes))
apache-2.0
-5,395,644,967,868,740,000
45.191176
172
0.737663
false
3.734839
false
false
false
Bezoar/surrender-rides
bp_content/themes/default/handlers/forms.py
1
5264
# *-* coding: UTF-8 *-* """ Created on June 10, 2012 @author: peta15 """ __author__ = 'coto' from datetime import datetime from wtforms import fields from wtforms import Form from wtforms import validators, ValidationError from webapp2_extras.i18n import lazy_gettext as _ from webapp2_extras.i18n import ngettext, gettext from bp_includes.lib import utils from bp_includes.forms import BaseForm, PasswordConfirmMixin, UsernameMixin FIELD_MAXLENGTH = 80 # intended to stop maliciously long input class FormTranslations(object): def gettext(self, string): return gettext(string) def ngettext(self, singular, plural, n): return ngettext(singular, plural, n) class EmailMixin(BaseForm): email = fields.TextField(_('Email'), [validators.Required(), validators.Length(min=8, max=FIELD_MAXLENGTH, message=_( "Field must be between %(min)d and %(max)d characters long.")), validators.regexp(utils.EMAIL_REGEXP, message=_('Invalid email address.'))]) pass # ==== Forms ==== class DeleteAccountForm(BaseForm): password = fields.TextField(_('Password'), [validators.Required(), validators.Length(max=FIELD_MAXLENGTH, message=_( "Field cannot be longer than %(max)d characters."))], id='l_password') pass class ContactForm(EmailMixin): name = fields.TextField(_('Name'), [validators.Required(), validators.Length(max=FIELD_MAXLENGTH, message=_( "Field cannot be longer than %(max)d characters.")), validators.regexp(utils.NAME_LASTNAME_REGEXP, message=_( "Name invalid. Use only letters and numbers."))]) message = fields.TextAreaField(_('Message'), [validators.Required(), validators.Length(max=65536)]) pass def inbound_date_range_check(form, field): if (None not in (form.inbound_departure_dt.data, form.inbound_arrival_dt.data) and (form.inbound_departure_dt.data > form.inbound_arrival_dt.data)): raise ValidationError("Inbound departure time, if provided, must be before your planned arrival at Surrender.") def outbound_date_range_check(form, field): if (None not in (form.outbound_departure_dt.data, form.outbound_arrival_dt.data) and (form.outbound_departure_dt.data > form.outbound_arrival_dt.data)): raise ValidationError("Outbound arrival time, if provided, must be after your planned departure from Surrender.") class RequiredNameMixin(BaseForm): NAME_LASTNAME_REGEXP = "^[0-9a-zA-ZàáâäãåąćęèéêëìíîïłńòóôöõøùúûüÿýżźñçčšžÀÁÂÄÃÅĄĆĘÈÉÊËÌÍÎÏŁŃÒÓÔÖÕØÙÚÛÜŸÝŻŹÑßÇŒÆČŠŽ∂ð ,.'-]*$" FIELD_MAXLENGTH = 80 name = fields.TextField(_('First name'), [validators.Required(), validators.Length(max=FIELD_MAXLENGTH, message=_("Field cannot be longer than %(max)d characters.")), validators.regexp(NAME_LASTNAME_REGEXP, message=_( "First name invalid. Use only letters and numbers."))]) last_name = fields.TextField(_('Last name'), [validators.Required(), validators.Length(max=FIELD_MAXLENGTH, message=_("Field cannot be longer than %(max)d characters.")), validators.regexp(NAME_LASTNAME_REGEXP, message=_( "Last name invalid. Use only letters and numbers."))]) pass class RequiredCityStateMixin(BaseForm): city = fields.TextField(_('City'), [validators.Required()]) state = fields.TextField(_('State/Province'), [validators.Required()]) pass class SurrenderRegisterForm(PasswordConfirmMixin, RequiredCityStateMixin, UsernameMixin, RequiredNameMixin, EmailMixin): country = fields.SelectField(_('Country'), choices=[]) tz = fields.SelectField(_('Timezone'), choices=[]) pass class EditProfileForm(UsernameMixin, RequiredCityStateMixin, RequiredNameMixin): DT_FORMAT = '%m/%d/%Y %I:%M %p' # for use with jquery-ui country = fields.SelectField(_('Country'), choices=[]) tz = fields.SelectField(_('Timezone'), choices=[]) inbound_departure_dt = fields.DateTimeField(_('Estimated departure for Surrender'), [validators.optional(), inbound_date_range_check], format=DT_FORMAT) inbound_arrival_dt = fields.DateTimeField(_('Estimated arrival at Surrender'), [validators.optional()], format=DT_FORMAT) outbound_departure_dt = fields.DateTimeField(_('Estimated departure from Surrender'), [validators.optional()], format=DT_FORMAT) outbound_arrival_dt = fields.DateTimeField(_('Estimated arrival at home'), [validators.optional(), outbound_date_range_check], format=DT_FORMAT) needs = fields.TextAreaField(_('Needs')) needs_met = fields.BooleanField(_('Needs met')) offers = fields.TextAreaField(_('Offers')) offers_taken = fields.BooleanField(_('Offers taken')) notes = fields.TextAreaField(_('Notes')) # No methods, just field definitions pass
mit
4,451,003,861,496,569,000
48.352381
156
0.650328
false
3.861401
false
false
false
shengqh/ngsperl
lib/GATK/mergeMutect.py
1
4103
import argparse import sys import logging import os import errno import gzip from asyncore import read from Mutect import MutectItem, MutectResult def check_file_exists(file): if not os.path.exists(file): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), file) def readFileMap(fileName): check_file_exists(fileName) result = {} with open(fileName) as fh: for line in fh: filepath, name = line.strip().split('\t', 1) result[name] = filepath.strip() return(result) def checkFileMap(fileMap): for sname in fileMap.keys(): sfile = fileMap[sname] check_file_exists(sfile) def mergeMutect(logger, listFile, outputFile): fileMap = readFileMap(listFile) checkFileMap(fileMap) fileValueMap = {} chroms = [] comments = [] fileNames = sorted(fileMap.keys()) for fileName in fileNames: filePath = fileMap[fileName] logger.info("Reading %s ..." % filePath) mutect = MutectResult() mutect.readFromFile(logger, fileName, filePath) fileValueMap[fileName] = mutect if len(chroms) == 0: chroms = mutect.findChromosomeFromComments() comments = mutect.Comments has_normal = any(v.NormalSampleName != None for v in fileValueMap.values()) logger.info("Output result to %s ..." % outputFile) with open(outputFile, "wt") as fout: for comment in comments: if comment.startswith("##INFO=<ID=LOD"): if has_normal: fout.write('##FORMAT=<ID=ND,Number=1,Type=Integer,Description="Approximate normal sample read depth (reads with MQ=255 or with bad mates are filtered)">\n') fout.write("%s\n" % comment.replace("##INFO=", "##FORMAT=")) else: fout.write("%s\n" % comment) fout.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n" % "\t".join(fileNames)) for chrom in chroms: items = [] for mutect in fileValueMap.values(): if chrom in mutect.ChromosomeItemMap: items.extend(mutect.ChromosomeItemMap[chrom]) posMap = {} for item in items: posMap.setdefault(item.POS, {}).setdefault(item.LocusKey, {})[item.SampleName] = item for pos in sorted(posMap.keys()): locusMap = posMap[pos] for locus in sorted(locusMap.keys()): sampleMap = locusMap[locus] item = [v for v in sampleMap.values()][0] if has_normal: fout.write("%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s:ND:LOD" % (item.CHROM, item.POS, item.ID, item.REF, item.ALT, item.QUAL, item.FILTER, item.INFO, item.FORMAT)) else: fout.write("%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s:LOD" % (item.CHROM, item.POS, item.ID, item.REF, item.ALT, item.QUAL, item.FILTER, item.INFO, item.FORMAT)) for sampleName in fileNames: if sampleName in sampleMap: item = sampleMap[sampleName] if has_normal: fout.write("\t%s:%d:%s" % (item.TumorData, item.NormalDepth, item.LOD)) else: fout.write("\t%s:%s" % (item.TumorData, item.LOD)) else: fout.write("\t./.") fout.write("\n") def main(): DEBUG=False NotDEBUG=not DEBUG parser = argparse.ArgumentParser(description="merge mutect result and keep tumor sample only.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-i', '--input', action='store', nargs='?', help='Input vcf list file', required=NotDEBUG) parser.add_argument('-o', '--output', action='store', nargs='?', help="Output vcf file", required=NotDEBUG) args = parser.parse_args() if DEBUG: args.input = "H:/shengquanhu/projects/20190610_Ciombior_ExomeSeq/Ciombor_ExomeSeq__fileList1.list" args.output = "H:/shengquanhu/projects/20190610_Ciombior_ExomeSeq/combined.tumor.vcf" logger = logging.getLogger('mergeMutect') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s') mergeMutect(logger, args.input, args.output) logger.info("done.") if __name__ == "__main__": main()
apache-2.0
5,800,809,459,799,280,000
33.771186
169
0.639288
false
3.235804
false
false
false
gprMax/gprMax
setup.py
1
7984
# Copyright (C) 2015-2020: The University of Edinburgh # Authors: Craig Warren and Antonis Giannopoulos # # This file is part of gprMax. # # gprMax is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # gprMax is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gprMax. If not, see <http://www.gnu.org/licenses/>. try: from setuptools import setup, Extension except ImportError: from distutils.core import setup from distutils.extension import Extension try: import numpy as np except ImportError: raise ImportError('gprMax requires the NumPy package.') import glob import os import pathlib import re import shutil import sys # Importing _version__.py before building can cause issues. with open('gprMax/_version.py', 'r') as fd: version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) # Parse package name from init file. Importing __init__.py / gprMax will break as gprMax depends on compiled .pyx files. with open('gprMax/__init__.py', 'r') as fd: packagename = re.search(r'^__name__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) packages = [packagename, 'tests', 'tools', 'user_libs'] # Parse long_description from README.rst file. with open('README.rst','r') as fd: long_description = fd.read() # Python version if sys.version_info[:2] < (3, 4): sys.exit('\nExited: Requires Python 3.4 or newer!\n') # Process 'build' command line argument if 'build' in sys.argv: print("Running 'build_ext --inplace'") sys.argv.remove('build') sys.argv.append('build_ext') sys.argv.append('--inplace') # Process '--no-cython' command line argument - either Cythonize or just compile the .c files if '--no-cython' in sys.argv: USE_CYTHON = False sys.argv.remove('--no-cython') else: USE_CYTHON = True # Build a list of all the files that need to be Cythonized looking in gprMax directory cythonfiles = [] for root, dirs, files in os.walk(os.path.join(os.getcwd(), packagename), topdown=True): for file in files: if file.endswith('.pyx'): cythonfiles.append(os.path.relpath(os.path.join(root, file))) # Process 'cleanall' command line argument - cleanup Cython files if 'cleanall' in sys.argv: USE_CYTHON = False for file in cythonfiles: filebase = os.path.splitext(file)[0] # Remove Cython C files if os.path.isfile(filebase + '.c'): try: os.remove(filebase + '.c') print('Removed: {}'.format(filebase + '.c')) except OSError: print('Could not remove: {}'.format(filebase + '.c')) # Remove compiled Cython modules libfile = glob.glob(os.path.join(os.getcwd(), os.path.splitext(file)[0]) + '*.pyd') + glob.glob(os.path.join(os.getcwd(), os.path.splitext(file)[0]) + '*.so') if libfile: libfile = libfile[0] try: os.remove(libfile) print('Removed: {}'.format(os.path.abspath(libfile))) except OSError: print('Could not remove: {}'.format(os.path.abspath(libfile))) # Remove build, dist, egg and __pycache__ directories shutil.rmtree(os.path.join(os.getcwd(), 'build'), ignore_errors=True) shutil.rmtree(os.path.join(os.getcwd(), 'dist'), ignore_errors=True) shutil.rmtree(os.path.join(os.getcwd(), 'gprMax.egg-info'), ignore_errors=True) for p in pathlib.Path(os.getcwd()).rglob('__pycache__'): shutil.rmtree(p, ignore_errors=True) print('Removed: {}'.format(p)) # Now do a normal clean sys.argv[1] = 'clean' # this is what distutils understands # Set compiler options # Windows if sys.platform == 'win32': compile_args = ['/O2', '/openmp', '/w'] # No static linking as no static version of OpenMP library; /w disables warnings linker_args = [] extra_objects = [] libraries=[] # Mac OS X - needs gcc (usually via HomeBrew) because the default compiler LLVM (clang) does not support OpenMP # - with gcc -fopenmp option implies -pthread elif sys.platform == 'darwin': gccpath = glob.glob('/usr/local/bin/gcc-[4-9]*') gccpath += glob.glob('/usr/local/bin/gcc-[10-11]*') if gccpath: # Use newest gcc found os.environ['CC'] = gccpath[-1].split(os.sep)[-1] rpath = '/usr/local/opt/gcc/lib/gcc/' + gccpath[-1].split(os.sep)[-1][-1] + '/' else: raise('Cannot find gcc 4-10 in /usr/local/bin. gprMax requires gcc to be installed - easily done through the Homebrew package manager (http://brew.sh). Note: gcc with OpenMP support is required.') compile_args = ['-O3', '-w', '-fopenmp', '-march=native'] # Sometimes worth testing with '-fstrict-aliasing', '-fno-common' linker_args = ['-fopenmp', '-Wl,-rpath,' + rpath] libraries = ['iomp5', 'pthread'] extra_objects = [] # Linux elif sys.platform == 'linux': compile_args = ['-O3', '-w', '-fopenmp', '-march=native'] linker_args = ['-fopenmp'] extra_objects = [] libraries=[] # Build a list of all the extensions extensions = [] for file in cythonfiles: tmp = os.path.splitext(file) if USE_CYTHON: fileext = tmp[1] else: fileext = '.c' extension = Extension(tmp[0].replace(os.sep, '.'), [tmp[0] + fileext], language='c', include_dirs=[np.get_include()], libraries=libraries, extra_compile_args=compile_args, extra_link_args=linker_args, extra_objects=extra_objects) extensions.append(extension) # Cythonize (build .c files) if USE_CYTHON: from Cython.Build import cythonize extensions = cythonize(extensions, compiler_directives={ 'boundscheck': False, 'wraparound': False, 'initializedcheck': False, 'embedsignature': True, 'language_level': 3 }, annotate=False) # SetupTools Required to make package import setuptools setup(name=packagename, version=version, author='Craig Warren and Antonis Giannopoulos', url='http://www.gprmax.com', description='Electromagnetic Modelling Software based on the Finite-Difference Time-Domain (FDTD) method', long_description=long_description, long_description_content_type="text/x-rst", license='GPLv3+', classifiers=[ 'Environment :: Console', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Operating System :: MacOS', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux', 'Programming Language :: Cython', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering' ], #requirements python_requires=">3.6", install_requires=[ "colorama", "cython", "h5py", "jupyter", "matplotlib", "numpy", "psutil", "scipy", "terminaltables", "tqdm", ], ext_modules=extensions, packages=packages, include_package_data=True, include_dirs=[np.get_include()], zip_safe=False)
gpl-3.0
-5,500,311,628,205,997,000
37.200957
204
0.600952
false
3.766038
false
false
false
lare-team/django-lare
django_lare/models.py
1
2035
from django_lare import VERSION class Lare(object): enabled = False current_namespace = "" previous_namespace = "" version = VERSION supported_version = "1.0.0" def __init__(self, request): super(Lare, self).__init__() if 'HTTP_X_LARE' in request.META: if 'HTTP_X_LARE_VERSION' in request.META: frontend_version = request.META['HTTP_X_LARE_VERSION'] frontend_versions = frontend_version.split('.') supported_versions = self.supported_version.split('.') i = 0 while i < len(supported_versions): if frontend_versions[i] < supported_versions[i]: self.enabled = False return i += 1 self.enabled = True self.previous_namespace = request.META['HTTP_X_LARE'] def set_current_namespace(self, namespace): self.current_namespace = namespace def get_current_namespace(self): return self.current_namespace def is_enabled(self): return self.enabled def get_matching_count(self, extension_namespace=None): if not self.enabled: return 0 if extension_namespace is None: extension_namespace = self.current_namespace matching_count = 0 previous_namespaces = self.previous_namespace.split('.') extension_namespaces = extension_namespace.split('.') while matching_count < len(previous_namespaces) and matching_count < len(extension_namespaces): if previous_namespaces[matching_count] == extension_namespaces[matching_count]: matching_count += 1 else: break return matching_count def matches(self, extension_namespace=None): if extension_namespace is None: extension_namespace = self.current_namespace return self.get_matching_count(extension_namespace) == len(extension_namespace.split('.'))
mit
3,086,362,984,252,951,600
34.701754
103
0.594595
false
4.625
false
false
false
previtus/MGR-Project-Code
Downloader/PreprocessData/GenListOfUrls.py
1
1499
# GenListOfUrls.py import sys sys.path.append('..') def GenListOfUrls(Segments, PIXELS_X, PIXELS_Y, PrependPath='', minimal_length=20, custom=False): ''' Iterates over the segment list and returns a list of urls needed for download Outputs list of tripples in [ (<url>, <filename>, <edge id>), ... ] ''' FilenameMap = [] verbose = False num_of_segments_with_score = 0 num_of_image_urls_to_attempt_to_down = 0 for segment in Segments: if verbose: segment.displaySegment() if custom or not segment.hasUnknownScore(): # We only care about scored segments now... num_of_segments_with_score += 1 if custom: [urls, filenames] = segment.getGoogleViewUrls(PIXELS_X,PIXELS_Y) else: [urls, filenames] = segment.getGoogleViewUrls_whileUsingFractionsOfMinEdgeLen(PIXELS_X, PIXELS_Y, minimal_length) #print len(urls), urls num_of_image_urls_to_attempt_to_down += len(urls) for i_nth_image in range(0, len(urls)): if verbose: print urls, '\n', filenames, '\n' #print filenames[i_nth_image] FilenameMap.append((urls[i_nth_image], PrependPath+filenames[i_nth_image], segment.SegmentId, i_nth_image)) print "num_of_segments_with_score", num_of_segments_with_score print "num_of_image_urls_to_attempt_to_down", num_of_image_urls_to_attempt_to_down return FilenameMap
mit
9,217,931,892,103,464,000
36.475
129
0.623082
false
3.486047
false
false
false
yeleman/snisi
snisi_core/migrations/0005_auto_20150205_1516.py
1
1889
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('snisi_core', '0004_auto_20150114_1650'), ] operations = [ migrations.CreateModel( name='Accreditation', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('location', models.ForeignKey(to='snisi_core.Entity')), ], options={ 'verbose_name': 'Accreditation', 'verbose_name_plural': 'Accreditations', }, bases=(models.Model,), ), migrations.CreateModel( name='Privilege', fields=[ ('slug', models.SlugField(serialize=False, verbose_name='Slug', primary_key=True)), ('name', models.CharField(max_length=100, verbose_name='Name')), ], options={ 'verbose_name': 'Privilege', 'verbose_name_plural': 'Privileges', }, bases=(models.Model,), ), migrations.AddField( model_name='accreditation', name='privilege', field=models.ForeignKey(to='snisi_core.Privilege'), preserve_default=True, ), migrations.AddField( model_name='accreditation', name='provider', field=models.ForeignKey(to=settings.AUTH_USER_MODEL), preserve_default=True, ), migrations.AddField( model_name='provider', name='privileges', field=models.ManyToManyField(to='snisi_core.Privilege', through='snisi_core.Accreditation'), preserve_default=True, ), ]
mit
-7,704,077,489,148,694,000
32.140351
114
0.535733
false
4.562802
false
false
false
DomBennett/pG-lt
pglt/tools/setup_tools.py
1
16996
#! /bin/usr/env python # D.J. Bennett # 07/11/2014 """ pglt setup tools """ # PACKAGES import argparse import sys import os import re import pickle import csv import logging import platform from datetime import datetime from reseter_tools import Reseter from special_tools import clean from special_tools import stats from special_tools import getThreads from pglt import _PTHREADS as pthreads # GLOBALS PARS = None # both set at init GPARS = None pglt_version = None # set at run_pglt.py pglt_doc = None pglt_year = None description = """ ---------------------------------------------------------------------- pG-lt version {0}, Copyright (C) {1} Bennett ---------------------------------------------------------------------- This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it under certain conditions. For more details, type `run_pglt.py --details`. ---------------------------------------------------------------------- """ # MESSAGES nonamestxt_msg = '\nERROR: No folders containing \'names.txt\' files \ found! All taxonomic names should be placed in subdirectories and \ called: \'names.txt\'' ioerror_msg = "[{0}] file could not be opened in [{1}]. Check that \ it is not opened by another program" priming_msg = '\nERROR: The program was unable to start due to a \ problem with the files and folders in the study directory. Check the \ parameters and gene parameters .csv for any potential conflicts.' # PROGRESS DICT progress = {'1': 'not run', '2': 'not run', '3': 'not run', '4': 'not run'} # ERROR CLASSES class PrimingError(Exception): pass # FUNCTIONS def printHeader(): """Print a nice program description header""" print description.format(pglt_version, pglt_year) def calcWorkers(threads, nfolders, min_threads_per_worker=2, max_threads_per_worker=100): """Calculate the number of workers for parallel running of folders""" # get available threads on machine available_threads = getThreads() if available_threads: # make sure threads arg is not greater than those available if threads > available_threads: sys.exit('More threads specified than avaiable on machine') if threads == -1: threads = available_threads # make sure threads is absolute threads = abs(threads) # calc min_threads_per_worker if it is greater than threads if min_threads_per_worker > threads: min_threads_per_worker = threads # calc max_threads_per_worker if it is greater than threads if max_threads_per_worker > threads: max_threads_per_worker = threads # calc nworkers and threads_per_worker # increase workers before threads_per_worker threads_per_worker = min_threads_per_worker for i in range(nfolders): if (float(i)*threads_per_worker) > threads: nworkers = i-1 break else: nworkers = nfolders for i in range(min_threads_per_worker, max_threads_per_worker): if (float(nworkers)*i) > threads: threads_per_worker = i-1 break else: threads_per_worker = max_threads_per_worker spare_threads = int(threads - (float(nworkers)*threads_per_worker)) return nworkers, threads_per_worker, spare_threads def parseArguments(args=None): """Read command-line arguments""" # TODO: too complex stages_err_msg = 'Invalid stage argument. Use \'-s [from]-[to]\' for \ numbers 1 through 4.' # get args if not args: args = createParser().parse_args() if args.details: print '\nThis is pG-lt version: ', pglt_version print pglt_doc sys.exit() # check them if args.stats: stats() sys.exit() if args.clean: clean() sys.exit('Files and folders deleted') if args.reset: # first read default paradict and genedict paradict = readInPars('') genedict = readInGenePars('') reseter = Reseter(paradict=paradict, genedict=genedict) reseter.run() if args.restart: if args.retry: print('Restarting and retrying folders that failed ....') else: print('Restarting ....') return True, args.retry, None, None, None, None, None if not args.email: # stop if no email sys.exit('An email address must be provided. Use \'-e\'.') # extract stages if not re.match('[1-4]-[1-4]', args.stages): sys.exit(stages_err_msg) startend = [int(e) for e in args.stages.split('-')] stages = [str(e) for e in range(startend[0], startend[1]+1)] if not stages: sys.exit(stages_err_msg) # check threads is a valid argument if args.threads == 0 or args.threads < -1: sys.exit('Invalid threads argument, must be -1 or >0.') if pthreads and args.threads < 2: sys.exit('pG-lt is set to use a parallelised version of RAxML, threads must be >= 2') return False, False, args.email, args.threads, args.verbose, args.debug,\ stages def getFolders(): """Return folders in directory with names.txt files""" # list all folders unchecked_dirs = [f for f in os.listdir('.') if not os.path.isfile(f)] # remove hidden folders unchecked_dirs = [d for d in unchecked_dirs if not re.match('^\.', d)] # loop through each and check they contain a names.txt checked_dirs = [] for each in unchecked_dirs: path = os.path.join(os.getcwd(), each) files = os.listdir(path) if 'names.txt' in files: checked_dirs.append(each) # TODO: change this to have folders with any of the stage folders too if len(checked_dirs) > 0: return checked_dirs else: sys.exit(nonamestxt_msg) def setUpLogging(verbose, debug, logname, directory=os.getcwd()): """Set up logging : direct and control log statements""" # get logger logger = logging.getLogger(logname) if debug: # log all statements above DEBUG level logger.setLevel(logging.DEBUG) else: # log all statements above INFO level # (which is higher than DEBUG) logger.setLevel(logging.INFO) # add file hander to root logfile = os.path.join(directory, 'log.txt') loghandler = logging.FileHandler(logfile, 'a') # set statement format -- I only want the message loghandler.setFormatter(logging.Formatter('%(message)s')) logger.addHandler(loghandler) if verbose: # if verbose, copy all info statements to console console = logging.StreamHandler() console.setFormatter(logging.Formatter('%(message)s')) logger.addHandler(console) logger.propagate = False return logger def tearDownLogging(logname): """Remove a logger""" # get logger logger = logging.getLogger(logname) # remove handlers handlers = logger.handlers[:] for h in handlers: logger.removeHandler(h) def createParser(): """Create parser for command-line""" parser = argparse.ArgumentParser() parser.add_argument("-email", "-e", help="please provide email \ for NCBI") parser.add_argument('--restart', help='restart pipeline if stopped', action='store_true') parser.add_argument('--retry', help='if restarting, retry failed stages \ and folders?', action='store_true') parser.add_argument('--reset', help='open reset mode to change files and \ folders', action='store_true') parser.add_argument('--stats', help='get stats on status of folders', action='store_true') parser.add_argument("-threads", "-t", help="number of threads, default\ \'-1\', will use all available on machine", default=-1, type=int) parser.add_argument("-stages", "-s", help="stages to run, default \ \'1-4\'", default='1-4') parser.add_argument("--verbose", help="increase output verbosity", action="store_true") parser.add_argument('--details', help='display information about the \ program', action='store_true') parser.add_argument("--debug", help="log warnings (developer only)", action="store_true") parser.add_argument("--clean", help="remove all pG-lt files and \ folders (developer only)", action="store_true") return parser def logMessage(phase, logger, folders=None, stage=None, threads=None, spare_threads=None, email=None, stages=None, counter=None, retry=None): # TODO: too complex if phase == 'program-start': logger.info(description.format(pglt_version, pglt_year)) logger.info('-' * 28 + ' Run details ' + '-' * 29) logger.info('Running on [{0}] [{1}]'.format(platform.node(), platform.platform())) logger.info('Python [{0}]'.format(sys.version)) logger.info('Using [{0}] threads with [{1}] spare'. format(threads, spare_threads)) logger.info('Using [{0}] as Entrez email'.format(email)) logger.info('Running stages {0}'.format(stages)) logger.info('Working with the following [{0}] folders:'. format(len(folders))) # convert folders to string folder_string = '' chars_counter = 0 for each in folders[:-1]: chars_counter += len(each) if chars_counter > 70: # stop at 70 columns folder_string += each + ',\n' chars_counter = 0 else: folder_string += each + ', ' folder_string += folders[-1] logger.info('[{0}]'.format(folder_string)) logger.info('-' * 70 + '\n') logger.info('-' * 31 + ' Start ' + '-' * 32) elif phase == 'program-end': logger.info('-' * 32 + ' End ' + '-' * 33) elif phase == 'stage-start': logger.info('Stage [{0}] started at [{1}]'.format(stage, timestamp())) elif phase == 'stage-end': logger.info('Stage [{0}] finished at [{1}] for [{2}] folders'. format(stage, timestamp(), counter)) elif phase == 'program-restart': if retry: logger.info('{0}- Restarting and retrying [{1}] {0}'. format('-' * 6, timestamp())) else: logger.info('{0}- Restarting [{1}] {0}'. format('-' * 11, timestamp())) else: raise(ValueError('Unrecognised phase')) def prime(directory, arguments, threads): """Write pickle files, print arguments""" # Write pickle files temp_dir = os.path.join(directory, 'tempfiles') if not os.path.isdir(temp_dir): os.mkdir(temp_dir) with open(os.path.join(temp_dir, "genedict.p"), "wb") as file: pickle.dump(arguments['genedict'], file) with open(os.path.join(temp_dir, "paradict.p"), "wb") as file: pickle.dump(arguments['paradict'], file) with open(os.path.join(temp_dir, "terms.p"), "wb") as file: pickle.dump(arguments['terms'], file) with open(os.path.join(temp_dir, 'threads.p'), "wb") as file: pickle.dump(threads, file) with open(os.path.join(temp_dir, 'progress.p'), "wb") as file: pickle.dump(progress, file) # Print arguments and parameters to file record = 'Working with [{0}] names\n'.format(len(arguments['terms'])) record += recordPars(arguments['paradict']) record += recordGpars(arguments['genedict']) with open(os.path.join(directory, 'info.txt'), 'w') as file: file.write(record) def timestamp(): timestamp = datetime.today().strftime("%A, %d %B %Y %I:%M%p") return timestamp def recordPars(paradict): """Return pglt parameters string""" record = '\nUsing the following parameters:\n' for key in paradict.keys(): record += ' [{0}] = [{1}]\n'.format(key, paradict[key]) return record def recordGpars(genedict): """Return gene parameters string""" record = '\nUsing the following genes and gene parameters:\n' for gene in genedict.keys(): record += ' Gene: [{0}]\n'.format(gene) for par in genedict[gene]: record += ' [{0}] = [{1}]\n'.format(par, genedict[gene][par]) return record def readInNames(directory): """Read names from text file in dir""" terms = [] with open(os.path.join(directory, 'names.txt')) as names: for name in names: terms.append(name.strip()) terms = [term for term in terms if not term == ''] return terms def readInGenePars(gpars_file): """Read gene_parameters.csv. Return list of dictionaries.""" # TODO: too complex, consider breaking up def _read(gpars_file, template, genes=None): # open csv file and replace parameters in template # if they are None. If genes specified, only read # rows for those genes. with open(gpars_file, 'rb') as csvfile: reader = csv.DictReader(csvfile) for row in reader: if genes: if not row['gene'] in genes: continue temp = template.copy() for key in temp.keys(): if row[key]: if temp[key] is None: if key == 'names': # for names, split into a list of syns temp[key] = row[key].split(':') else: temp[key] = row[key] genedict[row['gene']] = temp return genedict # check if file exists, else use default if not os.path.isfile(gpars_file): return readInGenePars(GPARS) # genedicts genedict = {} # template of dict in genedict template = {'names': None, 'taxid': None, 'minlen': None, 'maxlen': None, 'maxgaps': None, 'minoverlap': None, 'maxfails': None, 'maxtrys': None, 'minseedsize': None, 'maxseedsize': None, 'maxseedtrys': None, 'partition': None, 'type': None} # open file, read each row and fill in template genedict = _read(gpars_file, template) # if Nones, use defaults nones = False for gene in genedict.keys(): for par in genedict[gene].keys(): if genedict[gene][par] is None: nones = True break if nones: # run _read for defaults and limit to genes in genedict genedict = _read(GPARS, template, genedict.keys()) return genedict def readInPars(pars_file): """Read gene_parameters.csv. Return dictionary.""" def _read(pars_file, paradict): # open csv, and replace all Nones with open(pars_file, 'rb') as csvfile: reader = csv.DictReader(csvfile) for row in reader: if paradict[row["Parameter"]] is None: paradict[row["Parameter"]] = row["Value"] return paradict # check if file exists, else use default if not os.path.isfile(pars_file): return readInPars(PARS) # template paradict = {'nseqs': None, 'naligns': None, 'nphylos': None, 'thoroughness': None, 'maxtrys': None, 'rttstat': None, 'parentid': None, 'outgroupid': None, 'constraint': None, 'minspecies': None, 'minspecies_gene': None, 'minnseqs_gene': None, 'target_ngenes': None, 'maxpn': None, 'votesize': None, 'maxvotetrys': None, 'taxonomic_constraint': None} # open file, read each row, extract value paradict = _read(pars_file, paradict) # if Nones remain, use default nones = False for key in paradict.keys(): if paradict[key] is None: nones = True break if nones: paradict = _read(PARS, paradict) return paradict def sortArgs(directory, email, logger): """Search for relevant files in dir, return list of arguments""" # find text file and read, raise error if fail try: terms = readInNames(directory) except IOError: logger.error(ioerror_msg.format('names.txt', directory)) raise PrimingError() # find gene parameter file and read, raise error if fail try: genedict = readInGenePars(os.path.join(directory, 'gene_parameters.csv')) except IOError: logger.error(ioerror_msg.format('gene_parameters.csv', directory)) raise PrimingError() # find parameter file and read, raise error if fail try: paradict = readInPars(os.path.join(directory, 'parameters.csv')) except IOError: logger.error(ioerror_msg.format('parameters.csv', directory)) raise PrimingError() # add email to paradict paradict['email'] = email return {'terms': terms, 'genedict': genedict, 'paradict': paradict}
gpl-2.0
-6,247,521,024,659,388,000
36.853007
93
0.595552
false
3.878594
false
false
false
mediafactory/yats
sites/web/web/models.py
1
3394
# -*- coding: utf-8 -*- from django.db import models from django.utils.functional import lazy from django.core.cache import cache from django.conf import settings from django.utils.translation import ugettext_lazy as _ from yats.models import tickets from yats.models import base import datetime import base64 import httplib2 try: import json except ImportError: from django.utils import simplejson as json class ticket_component(base): name = models.CharField(max_length=255) def __str__(self): return self.name class Meta: verbose_name = _('module') verbose_name_plural = _(u'modules') ordering = ['name'] def getGibthubTags(): owner = settings.GITHUB_OWNER repo = settings.GITHUB_REPO user = settings.GITHUB_USER password = settings.GITHUB_PASS if not owner or not repo: return () cache_name = 'yats.%s.%s.tags.github' % (owner, repo) tags = cache.get(cache_name) if tags: return tuple(reversed(sorted(tags))) # https://developer.github.com/v3/repos/#list-tags result = [] headers = { 'Accept': 'application/vnd.github.v3+json', 'User-Agent': 'yats' } if user: headers['Authorization'] = 'Basic %s' % base64.b64encode('%s:%s' % (user, password)) try: h = httplib2.Http() header, content = h.request('https://api.github.com/repos/%s/%s/tags' % (owner, repo), 'GET', headers=headers) if header['status'] != '200': print('ERROR fetching data from GitHub: %s' % content) return () except Exception: print('ERROR fetching data from GitHub') return () tags = json.loads(content) for tag in tags: result.append((tag['name'], tag['name'],)) cache.set(cache_name, result, 60 * 10) return tuple(reversed(sorted(result))) BILLING_TYPE_CHOICES = ( ('service', 'service'), ('development', 'development'), ) class test(tickets): component = models.ForeignKey(ticket_component, on_delete=models.CASCADE, verbose_name=_('component')) version = models.CharField(_('version'), max_length=255, choices=lazy(getGibthubTags, tuple)()) keywords = models.CharField(_('keywords'), max_length=255, blank=True) reproduction = models.TextField(_('reproduction'), null=True) billing_needed = models.NullBooleanField(_('billing needed'), default=True) billing_done = models.NullBooleanField(_('billing done'), default=None) billing_reason = models.TextField(_('billing reason'), null=True, blank=True) billing_estimated_time = models.FloatField(_('billing estimated time'), null=True, blank=True) billing_time_taken = models.FloatField(_('billing tike taken'), null=True, blank=True) billing_type = models.CharField(_('billing type'), max_length=255, choices=BILLING_TYPE_CHOICES, null=True, blank=True) solution = models.TextField(_('solution'), null=True, blank=True) fixed_in_version = models.CharField(_('fixed in version'), max_length=255, choices=lazy(getGibthubTags, tuple)(), blank=True) deadline = models.DateTimeField(_('deadline'), null=True, blank=True) def is_late(self): if self.deadline < datetime.date.today(): return 2 if self.deadline < datetime.date.today() + datetime.timedelta(days=7): return 1 return 0
mit
-4,443,041,059,347,932,700
33.632653
129
0.652033
false
3.822072
false
false
false
hh-italian-group/h-tautau
Production/crab/split_dataset.py
1
2726
#!/usr/bin/env python # Create json files to split dataset into several parts. # This file is part of https://github.com/hh-italian-group/h-tautau. import argparse from sets import Set from FWCore.PythonUtilities.LumiList import LumiList from dbs.apis.dbsClient import DbsApi parser = argparse.ArgumentParser(description='Create json files to split dataset into several parts.', formatter_class = lambda prog: argparse.HelpFormatter(prog,width=90)) parser.add_argument('--dataset', required=True, dest='dataset', type=str, help="Dataset name") parser.add_argument('--output-prefix', required=True, dest='output_prefix', type=str, help="Prefix for output splitted json files") parser.add_argument('--output-suffix', required=False, dest='output_suffix', type=str, default='sub', help="Prefix for output splitted json files") parser.add_argument('--n-splits', required=True, dest='n_splits', type=int, help="Number of splits") args = parser.parse_args() if args.n_splits < 1: raise RuntimeError('Number of splits should be >= 1.') def FindMaxLumi(dbs, dataset): blocks = dbs.listBlocks(dataset=dataset) max_lumi = 0 for block_entry in blocks: block_lumis = dbs.listFileLumis(block_name=block_entry['block_name']) for file_entry in block_lumis: file_lumis = file_entry['lumi_section_num'] max_file_lumi = max(file_lumis) max_lumi = max(max_lumi, max_file_lumi) return max_lumi def GetRunList(dbs, dataset): runs = dbs.listRuns(dataset=dataset) run_list = [] for run in runs: run_list.extend(run['run_num']) run_set = Set(run_list) return list(run_set) def SaveLumis(file_name, lumis): lumi_file = open(file_name, 'w') lumi_file.write(str(lumis)) lumi_file.close() dbs = DbsApi('https://cmsweb.cern.ch/dbs/prod/global/DBSReader') print("Loading runs...") runs = GetRunList(dbs, args.dataset) if len(runs) != 1: raise RuntimeError('Only datasets with one run are currently supported.') print("Loading lumis...") max_lumi = FindMaxLumi(dbs, args.dataset) splits = [ int(float(n + 1) / args.n_splits * max_lumi) for n in range(0, args.n_splits) ] print("Max lumi: {}".format(max_lumi)) print("Lumi splits: {}".format(splits)) last_lumi = 0 for split_number in range(0, len(splits)): split = splits[split_number] lumis = {} lumis[runs[0]] = [] lumis[runs[0]].append([last_lumi + 1, split]) file_name = '{}_{}{}.json'.format(args.output_prefix, args.output_suffix, split_number + 1) SaveLumis(file_name, LumiList(compactList=lumis)) last_lumi = split print("Dataset lumis are split into {} parts.".format(args.n_splits))
gpl-2.0
-1,129,987,620,451,551,900
37.394366
102
0.676816
false
3.173458
false
false
false
UPDDI/mps-database-server
assays/forms.py
1
247404
import datetime from django import forms from django.contrib.auth.models import Group from django.forms.models import ( BaseInlineFormSet, inlineformset_factory, BaseModelFormSet, modelformset_factory, ) from cellsamples.models import Biosensor from assays.models import ( AssayStudyConfiguration, AssayStudy, AssayStudySupportingData, AssayStudyAssay, AssayMatrix, AssayCategory, TEST_TYPE_CHOICES, PhysicalUnits, AssaySampleLocation, AssaySetting, AssaySetupCompound, AssaySetupCell, AssaySetupSetting, AssayMatrixItem, AssayStudyStakeholder, AssayTarget, AssayMethod, AssayStudyModel, AssayStudySet, AssayReference, AssayStudyReference, AssayStudySetReference, AssayTarget, AssayMeasurementType, AssayMethod, AssaySetting, AssaySupplier, AssayCategory, AssayPlateReaderMap, AssayPlateReaderMapItem, AssayPlateReaderMapItemValue, AssayPlateReaderMapDataFile, AssayPlateReaderMapDataFileBlock, # ... AssayGroup, AssayGroupCell, AssayGroupCompound, AssayGroupSetting, assay_plate_reader_time_unit_choices, assay_plate_reader_main_well_use_choices, assay_plate_reader_blank_well_use_choices, assay_plate_reader_map_info_plate_size_choices, assay_plate_reader_volume_unit_choices, assay_plate_reader_file_delimiter_choices, upload_file_location, AssayOmicDataFileUpload, AssayOmicDataPoint, AssayOmicAnalysisTarget, AssayOmicSampleMetadata, # AssayOmicDataGroup, AssayDataFileUpload, assay_omic_data_type_choices, ) from compounds.models import Compound, CompoundInstance, CompoundSupplier from microdevices.models import ( MicrophysiologyCenter, Microdevice, OrganModel, OrganModelProtocol, OrganModelLocation, ) from mps.forms import SignOffMixin, BootstrapForm, tracking import string from captcha.fields import CaptchaField from .utils import ( # validate_file, # get_chip_details, # get_plate_details, TIME_CONVERSIONS, # EXCLUDED_DATA_POINT_CODE, AssayFileProcessor, get_user_accessible_studies, plate_reader_data_file_process_data, CALIBRATION_CURVE_MASTER_DICT, calibration_choices, omic_data_file_processing_data_main_for_all_data_types, COLUMN_HEADERS, omic_data_quality_clean_check_for_omic_file_upload, omic_metadata_find_the_labels_needed_for_the_indy_metadata_table, sck_general_convert_time_from_mintues_to_unit_given, sck_general_convert_time_unit_given_to_minutes, sck_general_given_pk_of_organ_model_make_dictionary_with_location_pk_and_location_name, data_quality_clean_check_for_omic_metadata_empty_fields, omic_process_the_omic_sample_metadata, omic_data_quality_clean_check_for_omic_form_fields, ) from mps.utils import ( get_split_times, ) from django.utils import timezone from mps.templatetags.custom_filters import is_group_admin, filter_groups, ADMIN_SUFFIX from django.core.exceptions import NON_FIELD_ERRORS, ValidationError from mps.settings import MEDIA_ROOT import ujson as json import os import csv import re import operator # TODO REFACTOR WHITTLING TO BE HERE IN LIEU OF VIEW # TODO REFACTOR FK QUERYSETS TO AVOID N+1 # These are all of the tracking fields # tracking = ( # 'created_by', # 'created_on', # 'modified_on', # 'modified_by', # 'signed_off_by', # 'signed_off_date', # 'locked', # 'restricted' # ) # Excluding restricted is likewise useful restricted = ('restricted',) # Group group = ('group',) # For flagging flag_group = ( 'flagged', 'reason_for_flag' ) def get_dic_for_custom_choice_field(form, filters=None): dic = {} fields = form.custom_fields parent = form.model for field in fields: model = parent._meta.get_field(field).related_model if filters and filters.get(field, None): dic.update({ field: {str(instance.id): instance for instance in model.objects.filter(**filters.get(field))} }) else: dic.update({ field: {str(instance.id): instance for instance in model.objects.all()} }) return dic class SetupFormsMixin(BootstrapForm): ### ADDING SETUP CELLS cell_cell_sample = forms.IntegerField(required=False) cell_biosensor = forms.ModelChoiceField( queryset=Biosensor.objects.all().prefetch_related('supplier'), required=False, # Default is naive initial=2 ) cell_density = forms.FloatField(required=False) # TODO THIS IS TO BE HAMMERED OUT cell_density_unit = forms.ModelChoiceField( queryset=PhysicalUnits.objects.filter( availability__contains='cell' ).order_by('unit'), required=False ) cell_passage = forms.CharField(required=False) cell_addition_location = forms.ModelChoiceField( # Avoid duplicate query queryset=AssaySampleLocation.objects.all().order_by('name'), # queryset=AssaySampleLocation.objects.none(), required=False ) ### ?ADDING SETUP SETTINGS setting_setting = forms.ModelChoiceField( queryset=AssaySetting.objects.all().order_by('name'), required=False ) setting_unit = forms.ModelChoiceField( queryset=PhysicalUnits.objects.all().order_by('base_unit','scale_factor'), required=False ) setting_value = forms.CharField(required=False) setting_addition_location = forms.ModelChoiceField( # Avoid duplicate query queryset=AssaySampleLocation.objects.all().order_by('name'), # queryset=AssaySampleLocation.objects.none(), required=False ) ### ADDING COMPOUNDS compound_compound = forms.ModelChoiceField( queryset=Compound.objects.all().order_by('name'), required=False ) # Notice the special exception for % compound_concentration_unit = forms.ModelChoiceField( queryset=(PhysicalUnits.objects.filter( unit_type__unit_type='Concentration' ).order_by( 'base_unit__unit', 'scale_factor' ) | PhysicalUnits.objects.filter(unit='%')), required=False, initial=4 ) compound_concentration = forms.FloatField(required=False) compound_addition_location = forms.ModelChoiceField( # Avoid duplicate query queryset=AssaySampleLocation.objects.all().order_by('name'), # queryset=AssaySampleLocation.objects.none(), required=False ) # Text field (un-saved) for supplier compound_supplier_text = forms.CharField( required=False, initial='' ) # Text field (un-saved) for lot compound_lot_text = forms.CharField( required=False, initial='' ) # Receipt date compound_receipt_date = forms.DateField(required=False) # For MPS Models etc. test_type = forms.ChoiceField( initial='control', choices=TEST_TYPE_CHOICES, required=False ) organ_model_full = forms.ModelChoiceField( queryset=OrganModel.objects.all().order_by('name'), required=False, label='Matrix Item MPS Model' ) organ_model_protocol_full = forms.ModelChoiceField( queryset=OrganModelProtocol.objects.all().order_by('name'), required=False, label='Matrix Item MPS Model Version' ) def __init__(self, *args, **kwargs): super(SetupFormsMixin, self).__init__(*args, **kwargs) sections_with_times = ( 'compound', 'cell', 'setting' ) for time_unit in list(TIME_CONVERSIONS.keys()): for current_section in sections_with_times: # Create fields for Days, Hours, Minutes self.fields[current_section + '_addition_time_' + time_unit] = forms.FloatField( initial=0, required=False, widget=forms.NumberInput(attrs={ 'class': 'form-control required', 'style': 'width:75px;' }) ) self.fields[current_section + '_duration_' + time_unit] = forms.FloatField( initial=0, required=False, widget=forms.NumberInput(attrs={ 'class': 'form-control required', 'style': 'width:75px;' }) ) self.fields['cell_cell_sample'].widget.attrs['style'] = 'width:75px;' self.fields['cell_passage'].widget.attrs['style'] = 'width:75px;' # DUMB, BAD (can't have them be "actually" required or they prevent submission add_required_to = [ 'cell_cell_sample', 'cell_biosensor', 'cell_density', 'cell_density_unit', 'cell_addition_location', 'setting_setting', 'setting_unit', 'setting_value', 'setting_addition_location', 'compound_compound', 'compound_concentration_unit', 'compound_concentration', 'compound_addition_location', ] for current_field in add_required_to: self.fields[current_field].widget.attrs['class'] += ' required' # Sloppy if hasattr(self.fields[current_field], '_queryset'): if hasattr(self.fields[current_field]._queryset, 'model'): # Usually one would use a hyphen rather than an underscore # self.fields[field].widget.attrs['data-app'] = self.fields[field]._queryset.model._meta.app_label self.fields[current_field].widget.attrs['data_app'] = self.fields[current_field]._queryset.model._meta.app_label # self.fields[field].widget.attrs['data-model'] = self.fields[field]._queryset.model._meta.object_name self.fields[current_field].widget.attrs['data_model'] = self.fields[current_field]._queryset.model._meta.object_name self.fields[current_field].widget.attrs['data_verbose_name'] = self.fields[current_field]._queryset.model._meta.verbose_name # Possibly dumber # In Bootstrap Form # if hasattr(self.fields[current_field]._queryset.model, 'get_add_url_manager'): # self.fields[current_field].widget.attrs['data_add_url'] = self.fields[current_field]._queryset.model.get_add_url_manager() # Avoid duplicate queries for the sample locations # sample_locations = AssaySampleLocation.objects.all().order_by('name') # self.fields['cell_addition_location'].queryset = sample_locations # self.fields['compound_addition_location'].queryset = sample_locations # self.fields['setting_addition_location'].queryset = sample_locations # CRUDE: MAKE SURE NO SELECTIZE INTERFERING self.fields['organ_model_full'].widget.attrs['class'] = 'no-selectize' self.fields['organ_model_protocol_full'].widget.attrs['class'] = 'no-selectize' self.fields['test_type'].widget.attrs['class'] = 'no-selectize' # DEPRECATED NO LONGER NEEDED AS CHARFIELDS NOW STRIP AUTOMATICALLY class ModelFormStripWhiteSpace(BootstrapForm): """Strips the whitespace from char and text fields""" def clean(self): cd = self.cleaned_data for field_name, field in list(self.fields.items()): if isinstance(field, forms.CharField): if self.fields[field_name].required and not cd.get(field_name, None): self.add_error(field_name, "This is a required field.") else: cd[field_name] = cd[field_name].strip() return super(ModelFormStripWhiteSpace, self).clean() class ModelFormSplitTime(BootstrapForm): def __init__(self, *args, **kwargs): super(ModelFormSplitTime, self).__init__(*args, **kwargs) for time_unit in list(TIME_CONVERSIONS.keys()): if self.fields.get('addition_time', None): # Create fields for Days, Hours, Minutes self.fields['addition_time_' + time_unit] = forms.FloatField( initial=0, widget=forms.NumberInput(attrs={ 'class': 'form-control', 'style': 'width:75px;' }) ) # Set default self.fields['addition_time_' + time_unit].widget.attrs['data-default'] = 0 if self.fields.get('duration', None): self.fields['duration_' + time_unit] = forms.FloatField( initial=0, widget=forms.NumberInput(attrs={ 'class': 'form-control', 'style': 'width:75px;' }) ) # Set default self.fields['duration_' + time_unit].widget.attrs['data-default'] = 0 # Fill additional time if self.fields.get('addition_time', None): addition_time_in_minutes_remaining = getattr(self.instance, 'addition_time', 0) if not addition_time_in_minutes_remaining: addition_time_in_minutes_remaining = 0 for time_unit, conversion in list(TIME_CONVERSIONS.items()): initial_time_for_current_field = int(addition_time_in_minutes_remaining / conversion) if initial_time_for_current_field: self.fields['addition_time_' + time_unit].initial = initial_time_for_current_field addition_time_in_minutes_remaining -= initial_time_for_current_field * conversion # Add fractions of minutes if necessary if addition_time_in_minutes_remaining: self.fields['addition_time_minute'].initial += addition_time_in_minutes_remaining # Fill duration if self.fields.get('duration', None): duration_in_minutes_remaining = getattr(self.instance, 'duration', 0) if not duration_in_minutes_remaining: duration_in_minutes_remaining = 0 for time_unit, conversion in list(TIME_CONVERSIONS.items()): initial_time_for_current_field = int(duration_in_minutes_remaining / conversion) if initial_time_for_current_field: self.fields['duration_' + time_unit].initial = initial_time_for_current_field duration_in_minutes_remaining -= initial_time_for_current_field * conversion # Add fractions of minutes if necessary if duration_in_minutes_remaining: self.fields['duration_minute'].initial += duration_in_minutes_remaining def clean(self): cleaned_data = super(ModelFormSplitTime, self).clean() if cleaned_data and not cleaned_data.get('DELETE', False): cleaned_data.update({ 'addition_time': 0, 'duration': 0 }) for time_unit, conversion in list(TIME_CONVERSIONS.items()): cleaned_data.update({ 'addition_time': cleaned_data.get('addition_time') + cleaned_data.get('addition_time_' + time_unit, 0) * conversion, 'duration': cleaned_data.get('duration') + cleaned_data.get('duration_' + time_unit, 0) * conversion }) return cleaned_data # TODO TODO TODO PLEASE, PLEASE GET RID OF THIS TRASH! class BaseModelFormSetForcedUniqueness(BaseModelFormSet): def clean(self): self.validate_unique() def validate_unique(self): # Collect unique_checks and date_checks to run from all the forms. all_unique_checks = set() all_date_checks = set() forms_to_delete = self.deleted_forms valid_forms = [form for form in self.forms if form not in forms_to_delete and form.is_valid()] for form in valid_forms: # exclude = form._get_validation_exclusions() # unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude) unique_checks, date_checks = form.instance._get_unique_checks() all_unique_checks = all_unique_checks.union(set(unique_checks)) all_date_checks = all_date_checks.union(set(date_checks)) errors = [] # Do each of the unique checks (unique and unique_together) for uclass, unique_check in all_unique_checks: seen_data = set() for form in valid_forms: # PLEASE NOTE: SPECIAL EXCEPTION FOR FORMS WITH NO ID TO AVOID TRIGGERING ID DUPLICATE if unique_check == ('id',) and not form.cleaned_data.get('id', ''): # IN POOR TASTE, BUT EXPEDIENT continue # get data for each field of each of unique_check # PLEASE NOTE THAT THIS GETS ALL FIELDS, EVEN IF NOT IN THE FORM row_data = ( form.cleaned_data[field] if field in form.cleaned_data else getattr(form.instance, field, None) for field in unique_check ) # Reduce Model instances to their primary key values row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d for d in row_data) # if row_data and None not in row_data: # if we've already seen it then we have a uniqueness failure if row_data in seen_data: # poke error messages into the right places and mark # the form as invalid errors.append(self.get_unique_error_message(unique_check)) form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()]) # remove the data from the cleaned_data dict since it was invalid for field in unique_check: if field in form.cleaned_data: del form.cleaned_data[field] # mark the data as seen seen_data.add(row_data) # iterate over each of the date checks now for date_check in all_date_checks: seen_data = set() uclass, lookup, field, unique_for = date_check for form in valid_forms: # see if we have data for both fields if (form.cleaned_data and form.cleaned_data[field] is not None and form.cleaned_data[unique_for] is not None): # if it's a date lookup we need to get the data for all the fields if lookup == 'date': date = form.cleaned_data[unique_for] date_data = (date.year, date.month, date.day) # otherwise it's just the attribute on the date/datetime # object else: date_data = (getattr(form.cleaned_data[unique_for], lookup),) data = (form.cleaned_data[field],) + date_data # if we've already seen it then we have a uniqueness failure if data in seen_data: # poke error messages into the right places and mark # the form as invalid errors.append(self.get_date_error_message(date_check)) form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()]) # remove the data from the cleaned_data dict since it was invalid del form.cleaned_data[field] # mark the data as seen seen_data.add(data) if errors: raise forms.ValidationError(errors) # TODO TODO TODO WILL NEED TO CHANGE THIS WITH DJANGO VERSION NO DOUBT class BaseInlineFormSetForcedUniqueness(BaseModelFormSetForcedUniqueness, BaseInlineFormSet): def clean(self): self.validate_unique() class DicModelChoiceField(forms.Field): """Special field using dictionary instead of queryset as choices This is to prevent ludicrous numbers of queries """ widget = forms.TextInput def __init__(self, name, parent, dic, *args, **kwargs): self.name = name self.parent = parent self.dic = dic self.model = self.parent._meta.get_field(self.name).related_model super(DicModelChoiceField, self).__init__(*args, **kwargs) # Make sure required is set properly self.required = self.widget.required = not ( self.parent._meta.get_field(self.name).null and self.parent._meta.get_field(self.name).blank ) def to_python(self, value): if value in self.empty_values: return None try: value = self.dic.get(self.name).get(value) except: raise forms.ValidationError(self.error_messages['invalid_choice'], code='invalid_choice') return value def valid_value(self, value): """Check to see if the provided value is a valid choice""" if str(value.id) in self.dic.get(self.name): return True return False class AssayStudyConfigurationForm(SignOffMixin, BootstrapForm): """Frontend Form for Study Configurations""" class Meta(object): model = AssayStudyConfiguration widgets = { 'name': forms.Textarea(attrs={'cols': 50, 'rows': 1}), 'media_composition': forms.Textarea(attrs={'cols': 50, 'rows': 3}), 'hardware_description': forms.Textarea(attrs={'cols': 50, 'rows': 3}), } exclude = tracking class AssayStudyModelForm(BootstrapForm): class Meta(object): model = AssayStudyModel exclude = ('' ,) def __init__(self, *args, **kwargs): super(AssayStudyModelForm, self).__init__(*args, **kwargs) self.fields['label'].widget.attrs.update({ 'size': '4', 'max_length': '2' }) self.fields['sequence_number'].widget.attrs.update({ 'size': '4', 'max_length': '2' }) self.fields['output'].widget.attrs.update({ 'size': '20', 'max_length': '20' }) # FormSet for Study Models AssayStudyModelFormSet = inlineformset_factory( AssayStudyConfiguration, AssayStudyModel, extra=1, form=AssayStudyModelForm, widgets={ 'label': forms.TextInput(attrs={'size': 2}), 'sequence_number': forms.TextInput(attrs={'size': 2}) } ) def label_to_number(label): """Returns a numeric index from an alphabetical index""" num = 0 for char in label: if char in string.ascii_letters: num = num * 26 + (ord(char.upper()) - ord('A')) + 1 return num # Now uses unicode instead of string def stringify_excel_value(value): """Given an excel value, return a unicode cast of it This also converts floats to integers when possible """ # If the value is just a string literal, return it if type(value) == str or type(value) == str: return str(value) else: try: # If the value can be an integer, make it into one if int(value) == float(value): return str(int(value)) else: return str(float(value)) except: return str(value) class AssayStudyAssayInlineFormSet(BaseInlineFormSet): def __init__(self, *args, **kwargs): """Init APRA inline Filters units so that only units marked 'readout' appear """ super(AssayStudyAssayInlineFormSet, self).__init__(*args, **kwargs) target_queryset = AssayTarget.objects.all().order_by('name') method_queryset = AssayMethod.objects.all().order_by('name') # unit_queryset = PhysicalUnits.objects.filter( # availability__icontains='readout' # ).order_by('unit_type__unit_type', 'base_unit__unit', 'scale_factor') unit_queryset = PhysicalUnits.objects.order_by('unit_type__unit_type', 'base_unit__unit', 'scale_factor') category_queryset = AssayCategory.objects.all().order_by('name') for form in self.forms: form.fields['target'].queryset = target_queryset form.fields['method'].queryset = method_queryset form.fields['unit'].queryset = unit_queryset form.fields['category'] = forms.ModelChoiceField( queryset=category_queryset, required=False, empty_label='All' ) class ReadyForSignOffForm(forms.Form): captcha = CaptchaField() message = forms.TextInput() # TODO PLEASE REVIEW class AssayStudyForm(SignOffMixin, BootstrapForm): def __init__(self, *args, **kwargs): """Init the Study Form Kwargs: groups -- a queryset of groups (allows us to avoid N+1 problem) """ super(AssayStudyForm, self).__init__(*args, **kwargs) self.fields['group'].queryset = filter_groups(self.user) # Crudely force required class for current_field in ['total_device_volume', 'flow_rate', 'number_of_relevant_cells']: self.fields[current_field].widget.attrs['class'] += ' required' class Meta(object): model = AssayStudy widgets = { 'assay_run_id': forms.Textarea(attrs={'rows': 1}), 'name': forms.Textarea(attrs={'rows': 1}), 'description': forms.Textarea(attrs={'rows': 5, 'cols': 100}), } exclude = tracking + restricted + ('access_groups', 'signed_off_notes', 'bulk_file', 'collaborator_groups') def clean(self): """Checks for at least one study type""" # clean the form data, before validation data = super(AssayStudyForm, self).clean() if not any([data['toxicity'], data['efficacy'], data['disease'], data['cell_characterization'], data['omics'], data['pbpk_steady_state'], data['pbpk_bolus']]): raise forms.ValidationError('Please select at least one study type') if data.get('pbpk_steady_state', '') and (not data.get('number_of_relevant_cells', '') or not data.get('flow_rate', '')): raise forms.ValidationError('Continuous Infusion PBPK Requires Number of Cells Per MPS Model and Flow Rate') if data.get('pbpk_bolus', '') and (not data.get('number_of_relevant_cells', '') or not data.get('total_device_volume', '')): raise forms.ValidationError('Bolus PBPK Requires Number of Cells Per MPS Model and Total Device Volume') return data class AssayStudyDetailForm(SignOffMixin, BootstrapForm): def __init__(self, *args, **kwargs): super(AssayStudyDetailForm, self).__init__(*args, **kwargs) # Get valid groups for the dropdown self.fields['group'].queryset = filter_groups(self.user) # Crudely force required class for current_field in ['total_device_volume', 'flow_rate', 'number_of_relevant_cells']: self.fields[current_field].widget.attrs['class'] += ' required' class Meta(object): model = AssayStudy widgets = { 'name': forms.Textarea(attrs={'rows': 1}), 'description': forms.Textarea(attrs={'rows': 5, 'cols': 100}), } # Since we are splitting into multiple forms, includes are safer fields = ( 'group', 'toxicity', 'efficacy', 'disease', 'cell_characterization', 'omics', 'diseases', 'start_date', 'use_in_calculations', 'protocol', 'image', 'pbpk_steady_state', 'pbpk_bolus', 'number_of_relevant_cells', 'total_device_volume', 'flow_rate', 'name', 'description', ) + flag_group def clean(self): """Checks for at least one study type""" # clean the form data, before validation data = super(AssayStudyDetailForm, self).clean() if not any([data['toxicity'], data['efficacy'], data['disease'], data['cell_characterization'], data['omics'], data['pbpk_steady_state'], data['pbpk_bolus']]): raise forms.ValidationError('Please select at least one study type') if data.get('pbpk_steady_state', '') and (not data.get('number_of_relevant_cells', '') or not data.get('flow_rate', '')): raise forms.ValidationError('Continuous Infusion PBPK Requires Number of Cells Per MPS Model and Flow Rate') if data.get('pbpk_bolus', '') and (not data.get('number_of_relevant_cells', '') or not data.get('total_device_volume', '')): raise forms.ValidationError('Bolus PBPK Requires Number of Cells Per MPS Model and Total Device Volume') return data # TODO: OBVIOUSLY, WE WOULD RATHER NOT USE THIS FOR DETAIL PAGES ETC. # Instead we could hook up an AJAX request that does this? We would need to revise the difference table generation class AssayStudyGroupForm(SetupFormsMixin, SignOffMixin, BootstrapForm): # CONTRIVANCES # test_type = forms.ChoiceField( # initial='control', # choices=TEST_TYPE_CHOICES, # required=False # ) # organ_model_full = forms.ModelChoiceField( # queryset=OrganModel.objects.all().order_by('name'), # required=False, # label='Matrix Item MPS Model' # ) # organ_model_protocol_full = forms.ModelChoiceField( # queryset=OrganModelProtocol.objects.all().order_by('name'), # required=False, # label='Matrix Item MPS Model Version' # ) # number_of_items = forms.CharField( # initial='', # required=False # ) # group_name = forms.CharField( # initial='', # required=False # ) # CONTRIVED! series_data = forms.CharField(required=False) # Contrivance organ_model = forms.ModelChoiceField( queryset=OrganModel.objects.all().order_by('name'), required=False, label='Matrix Item MPS Model' ) update_group_fields = [ 'name', 'test_type', 'organ_model_id', 'organ_model_protocol_id', ] class Meta(object): model = AssayStudy # Since we are splitting into multiple forms, includes are safer # Only temporary, will change when finished fields = ( # TEMPORARY -> 'series_data', # <- TEMPORARY 'test_type', 'organ_model', 'organ_model_full', # 'group_name', # TEMP! 'organ_model_protocol', 'organ_model_protocol_full', 'cell_cell_sample', 'cell_biosensor', 'cell_density', 'cell_density_unit', 'cell_passage', 'cell_addition_location', 'setting_setting', 'setting_unit', 'setting_value', 'setting_addition_location', 'compound_compound', 'compound_concentration_unit', 'compound_concentration', 'compound_addition_location', 'compound_supplier_text', 'compound_lot_text', 'compound_receipt_date', ) + flag_group def __init__(self, *args, **kwargs): # TODO TODO TODO REVISE REVISE REVISE # WE PROBABLY WON'T NEED THIS KWARG AFTER WE FIX DIFFERENCE TABLE DISPLAY TO NO LONGER USE KLUDGE special_filter = kwargs.pop('special_filter', '') # LIKEWISE CONTRIVED get_chips = kwargs.pop('get_chips', True) super(AssayStudyGroupForm, self).__init__(*args, **kwargs) # Contrivances self.fields['test_type'].widget.attrs['class'] = 'no-selectize required form-control' # Prepopulate series_data from thing # PLEASE NOTE: The special_filter is contrived self.fields['series_data'].initial = self.instance.get_group_data_string(get_chips=get_chips, special_filter=special_filter) def clean(self): """Checks for at least one study type""" # clean the form data, before validation data = super(AssayStudyGroupForm, self).clean() # SLOPPY NOT DRY new_setup_data = {} # This matrix is only for chips # WARNING: THIS WILL BREAK IN STUDIES WITH MULTIPLE CHIP SETS # IMPORTANT NOTE: WHEN BACK-FILLING, WE WILL NEED TO CONSOLIDATE CHIP MATRICES! Otherwise this flow will not work correctly... current_matrix = AssayMatrix.objects.filter( # The study must exist in order to visit this page, so getting the id this was is fine study_id=self.instance.id, representation='chips' ) # Current group ids so that we can match for deletes and edits current_groups = AssayGroup.objects.filter( study_id=self.instance.id ) current_group_ids = { group.id: group for group in current_groups } # Exceedingly stupid: kludge for the "re-order" problem special_group_handling_required = False # Crude, but we need current groups current_group_names = {group.name: group.id for group in current_group_ids.values()} if current_matrix: current_matrix = current_matrix[0].id else: current_matrix = None # Ditto for items (chips, in this case as wells are managed elsewhere) # We only care about chips, at the moment current_items = AssayMatrixItem.objects.filter( study_id=self.instance.id, matrix_id=current_matrix ) current_item_ids = { item.id: item for item in current_items } # For auto-name assignation current_item_names = { item.name: True for item in current_items } # TODO TODO TODO CONTRIVED: CHECK MAX FOR COLUMN INDEX ASSIGNATION current_column_max = max([int(column_index) for column_index in current_items.values_list('column_index', flat=True)], default=-1) # Need to get the current groups (this could be an edit of groups) new_groups = None # Note that the instance is None for new adds, of course current_groups = AssayGroup.objects.filter(study_id=self.instance.id) new_items = None # Likewise with chips, some may need to be edited or removed etc. # DO NOT DUPLICATE QUERY # current_items = AssayMatrixItem.objects.filter(matrix_id=current_matrix) # This is supposed to contain data for cells, compounds, and settings (perhaps more later) new_related = None # Just have the errors be non-field errors for the moment all_errors = {'series_data': [], '__all__': []} current_errors = all_errors.get('series_data') non_field_errors = all_errors.get('__all__') # Am I sticking with the name 'series_data'? if self.cleaned_data.get('series_data', None): all_data = json.loads(self.cleaned_data.get('series_data', '[]')) else: # Contrived defaults all_data = { 'series_data': [], 'chips': [], 'plates': {} } # The data for groups is currently stored in series_data all_setup_data = all_data.get('series_data') all_chip_data = all_data.get('chips') # Catch technically empty setup data setup_data_is_empty = True for group_set in all_setup_data: if group_set: setup_data_is_empty = not any(group_set.values()) if setup_data_is_empty: all_setup_data = [] # if commit and all_setup_data: # SEE BASE MODELS FOR WHY COMMIT IS NOT HERE if all_setup_data: created_by = self.user created_on = timezone.now() # current_item_number = 1 # CRUDE: JUST MAKE ONE LARGE ROW? number_of_items = 0 for setup_group in all_setup_data: if setup_group.get('number_of_items'): number_of_items += int(setup_group.get('number_of_items', '0')) # Alternative for one row per group # # Find max for number of columns # number_of_columns = 0 # for setup_group in all_setup_data: # if int(setup_group.get('number_of_items', '0')) > number_of_columns: # number_of_columns = int(setup_group.get('number_of_items', '0')) if not current_matrix: new_matrix = AssayMatrix( # Just name the chip matrix the same thing as the study? # PLEASE NOTE: Study names can be huge for some reason # FOR NOW: just make sure they can save # We should edit max length of study name later name=self.instance.name[:255], # Does not work with plates at the moment representation='chips', study=self.instance, # Doesn't matter for chips device=None, organ_model=None, # Alternative that looks nicer, but these matrices probably won't be accessible anyway # number_of_rows=len(all_setup_data), # number_of_columns=number_of_columns, number_of_rows=1, number_of_columns=number_of_items, created_by=created_by, created_on=created_on, modified_by=created_by, modified_on=created_on, ) try: new_matrix.full_clean() except forms.ValidationError as e: non_field_errors.append(e) else: new_matrix = None # COMPOUND STUFF BECAUSE COMPOUND SCHEMA IS MISERABLE # Get all chip setup assay compound instances assay_compound_instances = {} # Get all Compound Instances compound_instances = { ( instance.compound.id, instance.supplier.id, instance.lot, str(instance.receipt_date) ): instance for instance in CompoundInstance.objects.all().prefetch_related( 'compound', 'supplier' ) } # Get all suppliers suppliers = { supplier.name: supplier for supplier in CompoundSupplier.objects.all() } # SLOPPY TODO TODO TODO # Would be much neater to have this in an object or something new_groups = [] update_groups = [] deleted_groups = [] # CHECK UNIQUENESS group_names = {} new_items = [] update_items = [] deleted_items = [] # Superfluous? new_item_to_group_name = {} new_cells = [] update_cells = [] deleted_cells = [] new_compounds = [] update_compounds = [] deleted_compounds = [] new_settings = [] update_settings = [] deleted_settings = [] # For now, chips are are all in one row for setup_row, setup_group in enumerate(all_setup_data): if setup_group.get('number_of_items') is None or setup_group.get('number_of_items') is '': continue items_in_group = int(setup_group.get('number_of_items', '0')) test_type = setup_group.get('test_type', '') # To break out to prevent repeat errors group_has_error = False # Make the group # Add the group to the new_groups # TODO DIFFERENTIATE NEW AND EXISTING GROUPS HERE # We can identify and existing group by checking for an id current_group = current_group_ids.get(int(setup_group.get('id', 0)), None) if current_group: # ??? # new_group = current_group if setup_group.get('deleted', False): deleted_groups.append(current_group.id) else: # TODO LOGIC FOR UPDATE HERE? # We need to update name, test_type, organ_model, and organ_model_protocol # IDEALLY ONLY IF THEY NEED TO BE UPDATED group_needs_to_be_updated = False for field in self.update_group_fields: if getattr(current_group, field) != setup_group.get(field, None): # Contrived: Replace empty string with None if field.endswith('id') and setup_group.get(field, None) is '': setattr(current_group, field, None) else: setattr(current_group, field, setup_group.get(field, None)) group_needs_to_be_updated = True if group_needs_to_be_updated: try: # Interestingly, we must exclude name # Please see the Chips form about the the re-order-kludge current_group.full_clean( exclude=['name'] ) update_groups.append(current_group) # MUST BE MODIFIED TO ADD TO CORRECT ROW (we could display all above too?) except forms.ValidationError as e: current_errors.append( process_error_with_annotation( 'group', setup_row, 0, e ) ) group_has_error = True # Add to group names # Check uniqueness if setup_group.get('name', '') in group_names: non_field_errors.append('The Group name "{}" is duplicated. The names of Groups must be unique.'.format( setup_group.get('name', '') )) else: group_names.update({ setup_group.get('name', ''): True }) # Note that we need special handling when there is a conflict in current group names if setup_group.get('name', '') in current_group_names and setup_group.get('id', 0) != current_group_names.get(setup_group.get('name', '')): special_group_handling_required = True else: # CRUDE current_organ_model_id = setup_group.get('organ_model_id', None) if current_organ_model_id: current_organ_model_id = int(current_organ_model_id) else: current_organ_model_id = None current_organ_model_protocol_id = setup_group.get('organ_model_protocol_id', None) if current_organ_model_protocol_id: current_organ_model_protocol_id = int(current_organ_model_protocol_id) else: current_organ_model_protocol_id = None new_group = AssayGroup( # Study should just be instance study=self.instance, name=setup_group.get('name', ''), test_type=setup_group.get('test_type', ''), organ_model_id=current_organ_model_id, organ_model_protocol_id=current_organ_model_protocol_id, ) # TODO Logic for first clean and adding to new_groups here try: # I think we are fine with no exclusions new_group.full_clean( exclude=['name'] ) new_groups.append(new_group) # MUST BE MODIFIED TO ADD TO CORRECT ROW (we could display all above too?) except forms.ValidationError as e: current_errors.append( process_error_with_annotation( 'group', setup_row, 0, e ) ) group_has_error = True # Add to group names # Check uniqueness if setup_group.get('name', '') in group_names: non_field_errors.append('The Group name "{}" is duplicated. The names of Groups must be unique.'.format( setup_group.get('name', '') )) else: group_names.update({ setup_group.get('name', ''): True }) # Note that we need special handling when there is a conflict in current group names if setup_group.get('name', '') in current_group_names and setup_group.get('id', 0) != current_group_names.get(setup_group.get('name', '')): special_group_handling_required = True # Always iterate for cells, compounds, and settings # Keep in mind that to decrease sparsity related data is now tied to a group for prefix, current_objects in setup_group.items(): # Related are tied to group, not item # Groups are INDEX DEPENDENT, *NOT* by ID (group may or may not exist) # If we don't want to wipe things every time, we CAN'T JUST DO THIS! # Obviously, we would need to differentiate adds, updates, and deletes # current_related_list = new_related.setdefault( # str(setup_row), [] # ) if prefix in ['cell', 'compound', 'setting'] and setup_group[prefix]: for setup_column, current_object in enumerate(current_objects): # Just to filter out anything that isn't related data we need # TODO: NOTE: The big problem here is that we do not differentiate updates and adds! # That is, we would need to wipe all of the existing related data for this to work... # That is *possible*, but unwise # We could, alternatively, see if there is an entry at the INDEX (setup_column) # This can get quite convoluted! AND ALSO NEEDS TO ACCOMMODATE DELETIONS! # TODO TODO TODO NOTE: Basically to deal with deletions, we could see if number_of_deletions > number_of_new_columns # Of course, we get number_of_new_columns from the passed data # On the other hand, if we didn't really care about maximizing efficiency we could just kill anything marked for deletion # The performance hit for adding a new entry instead of updating an existing would be negligible # Why bother twisiting oneself into a knot to do so? # Besides, if we mark deletions rather than totally removing them, we would know for sure whether "they were in the right column" and thus whether they needed to be added # Anyway, we would need a query that matched the data to "columns" # The query for this, hopefully, shouldn't be too big! # We only care about that which is associated with groups in THIS study, so it should be fine? # Skip if nothing if not current_object: continue # Crudely convert to int for current_field, current_value in current_object.items(): if current_field.endswith('_id'): if current_value: current_object.update({ current_field: int(current_value) }) else: current_object.update({ current_field: None }) # NOTE TODO TODO TODO # I am probably just going to blow up all of the old related data for the moment and always add # This is much faster to write but more expensive than it needs to be # On the bright side, it won't orphan any data because data is bound to a Group rather than the constituent pieces... current_object.update({ # GROUP NOT ITEM # 'group_id': new_group, # SOMEWHAT TRICKY: START WITH NAME AND OVERWRITE 'group_id': setup_group.get('name', '') }) # Breaks rule of 3 if prefix == 'cell': new_cell = AssayGroupCell(**current_object) try: new_cell.full_clean(exclude=['group']) # current_related_list.append(new_cell) new_cells.append(new_cell) except forms.ValidationError as e: # May need to revise process_error current_errors.append( process_error_with_annotation( prefix, setup_row, setup_column, e ) ) group_has_error = True elif prefix == 'setting': new_setting = AssayGroupSetting(**current_object) try: new_setting.full_clean(exclude=['group']) # current_related_list.append(new_setting) new_settings.append(new_setting) except forms.ValidationError as e: current_errors.append( process_error_with_annotation( prefix, setup_row, setup_column, e ) ) group_has_error = True elif prefix == 'compound': # CONFUSING NOT DRY BAD try: compound = int(current_object.get('compound_id', '0')) except TypeError: current_errors.append( process_error_with_annotation( prefix, setup_row, setup_column, { 'compound': ['A compound is required'] } ) ) # CRUDE! Break here to prevent further processing break supplier_text = current_object.get('supplier_text', 'N/A').strip() lot_text = current_object.get('lot_text', 'N/A').strip() receipt_date = current_object.get('receipt_date', '') # NOTE THE DEFAULT, PLEASE DO THIS IN A WAY THAT IS MORE DRY if not supplier_text: supplier_text = 'N/A' if not lot_text: lot_text = 'N/A' # Check if the supplier already exists supplier = suppliers.get(supplier_text, '') concentration = current_object.get('concentration', '0') # Annoying, bad if not concentration: concentration = 0.0 else: concentration = float(concentration) concentration_unit_id = current_object.get('concentration_unit_id', '0') if concentration_unit_id: concentration_unit_id = int(concentration_unit_id) else: concentration_unit_id = None addition_location_id = current_object.get('addition_location_id', '0') if addition_location_id: addition_location_id = int(addition_location_id) else: addition_location_id = None addition_time = current_object.get('addition_time', '0') duration = current_object.get('duration', '0') if not addition_time: addition_time = 0.0 else: addition_time = float(addition_time) if not duration: duration = 0.0 else: duration = float(duration) # Otherwise create the supplier if not supplier: supplier = CompoundSupplier( name=supplier_text, created_by=created_by, created_on=created_on, modified_by=created_by, modified_on=created_on, ) try: supplier.full_clean() supplier.save() except forms.ValidationError as e: current_errors.append( process_error_with_annotation( prefix, setup_row, setup_column, e ) ) group_has_error = True suppliers.update({ supplier_text: supplier }) # FRUSTRATING EXCEPTION if not receipt_date: receipt_date = None # Check if compound instance exists compound_instance = compound_instances.get((compound, supplier.id, lot_text, str(receipt_date)), '') if not compound_instance: compound_instance = CompoundInstance( compound_id=compound, supplier=supplier, lot=lot_text, receipt_date=receipt_date, created_by=created_by, created_on=created_on, modified_by=created_by, modified_on=created_on, ) try: compound_instance.full_clean() compound_instance.save() except forms.ValidationError as e: current_errors.append( process_error_with_annotation( prefix, setup_row, setup_column, e ) ) group_has_error = True compound_instances.update({ (compound, supplier.id, lot_text, str(receipt_date)): compound_instance }) # Save the AssayCompoundInstance conflicting_assay_compound_instance = assay_compound_instances.get( ( # NOPE! HAVE TO USE # Hmmm... not sure what to use... # We need something that is id agnostic # But we can't use just the name! # We need the study id and group name # Should be enough! self.instance.id, setup_group.get('name', ''), compound_instance.id, concentration, concentration_unit_id, addition_time, duration, addition_location_id ), None ) if not conflicting_assay_compound_instance: new_compound = AssayGroupCompound( # matrix_item_id=new_item.id, compound_instance_id=compound_instance.id, concentration=concentration, concentration_unit_id=concentration_unit_id, addition_time=addition_time, duration=duration, addition_location_id=addition_location_id, # MANUALLY ADD THE GROUP NAME AS A CONTRIVED VALUE group_id=setup_group.get('name', '') ) try: new_compound.full_clean(exclude=['group']) # current_related_list.append(new_compound) new_compounds.append(new_compound) except forms.ValidationError as e: current_errors.append( process_error_with_annotation( prefix, setup_row, setup_column, e ) ) group_has_error = True assay_compound_instances.update({ ( self.instance.id, setup_group.get('name', ''), compound_instance.id, concentration, concentration_unit_id, addition_time, duration, addition_location_id ): True }) # We ought to process items separately for a number of reasons # TODO # Start of numbering for items # We begin with the number of current_items + 1 # We assume, for the moment, that there will not be name collisions # TODO: FOR TOTAL ASSURANCE, PREVENT NAME COLLISIONS current_item_number = current_items.count() + 1 for current_chip in all_chip_data: # Terminate early if no group # BE CAREFUL, ZERO IS FALSY if current_chip.get('group_index', None) is not None and len(all_setup_data) > current_chip.get('group_index'): setup_group = all_setup_data[current_chip.get('group_index')] else: continue # We know whether this is a current item if the id matches one in our list current_item = current_item_ids.get(int(current_chip.get('id', 0)), None) # TODO if current_item: # ?? # new_item = current_item # TODO LOGIC FOR UPDATE HERE? # It might be overkill, but user could change the organ model and protocol # So just always pass the organ model, protocol, test_type, modified on and by # NEVER BOTHER WITH THE GROUP # WE KNOW WHAT THE GROUP IS BECAUSE YOU CANNOT CHANGE IT HERE # WE AREN'T BOTHERING WITH UPDATING ITEMS HERE # We will delete them, though if current_chip.get('deleted', False): deleted_items.append(current_item.id) else: # TODO NOTE: New chip names *theoretically* can conflict with existing chips # For instance, someone can rename their 3 chips "3,4,5" and add three new chips, bad news! # CRUDE # We add "NEW-" until there is a valid name new_item_name = str(current_item_number) while new_item_name in current_item_names: new_item_name = 'NEW-' + new_item_name new_item = AssayMatrixItem( # study=study, # matrix=new_matrix, name=new_item_name, # JUST MAKE SETUP DATE THE STUDY DATE FOR NOW setup_date=self.instance.start_date, # Alternative row and column # row_index=setup_row, # column_index=iteration, row_index=0, column_index=current_column_max+1, # Irrelevant (but required, unfortunately, maybe will remove later) # device=study.organ_model.device, organ_model_id=setup_group.get('organ_model_id', None), # Some nuances here that we will gloss over organ_model_protocol_id=setup_group.get('organ_model_protocol_id', None), test_type=setup_group.get('test_type', ''), created_by=created_by, created_on=created_on, modified_by=created_by, modified_on=created_on, study_id=self.instance.id, # SOMEWHAT UNORTHODOX: # We put the group name here # THEN OVERRIDE IT WITH THE ID LATER group_id=setup_group.get('name', ''), ) current_item_names.update({ new_item_name: True }) current_column_max += 1 try: new_item.full_clean(exclude=[ # The matrix needs to be excluded because it might not exist yet 'matrix', # DEFINITELY EXCLUDE GROUP 'group', # Why exclude these? 'device', # 'organ_model', # 'organ_model_protocol', ]) new_items.append(new_item) except forms.ValidationError as e: non_field_errors.append(e) group_has_error = True # CAN CAUSE UNUSUAL BEHAVIOR DURING UPDATES! current_item_number += 1 if current_errors or non_field_errors: non_field_errors.append(['Please review the table below for errors.']) raise forms.ValidationError(all_errors) # Kind of odd at first blush, but we reverse to save in order # new_items = list(reversed(new_items)) new_setup_data.update({ 'new_matrix': new_matrix, 'new_items': new_items, # NO LONGER HOW THINGS ARE HANDLED: # 'new_related': new_related, 'new_compounds': new_compounds, 'new_cells': new_cells, 'new_settings': new_settings, 'new_groups': new_groups, # TODO current_matrix? # TODO updates? # We PROBABLY don't need to modify the matrix # I mean, if someone REALLY wanted to look at it, then it would be messed up if the number of chips changed # 'update_matrix': update_matrix, # Swapping groups etc (renaming is in a different interface) # Maybe we ought to overkill update all in current items? # Or, I suppose we can avoid superfluous updates by doing a comparison prior? 'update_groups': update_groups, # NEEDED FOR KLUDGE: 'special_group_handling_required': special_group_handling_required, # WE DON'T REALLY HAVE TO UPDATE ITEMS, BUT WE DO HAVE TO DELETE THEM # Probably not needed here 'update_items': update_items, # TODO TODO TODO # First pass we are not going to bother with this 'update_compounds': update_compounds, 'update_cells': update_cells, 'update_settings': update_settings, # WE NEED THE GROUP IDS! 'current_group_ids': current_group_ids, # WE WOULD WANT TO KNOW IF THERE IS ALREADY A MATRIX! 'current_matrix': current_matrix, # Probably not needed # 'item_ids': item_ids, 'deleted_groups': deleted_groups, 'deleted_items': deleted_items, # THESE GET TRICKY! IDEALLY WE WANT TO DELETE AS FEW RELATED AS POSSIBLE # TODO TODO TODO # First pass we are not going to bother with this 'deleted_compounds': deleted_compounds, 'deleted_cells': deleted_cells, 'deleted_settings': deleted_settings, }) data.update({ 'processed_setup_data': new_setup_data }) return data # TODO: REVISE TO USE bulk_create # TODO: REVISE TO PROPERLY DEAL WITH UPDATES WITH bulk_update def save(self, commit=True): all_setup_data = self.cleaned_data.get('processed_setup_data', None) # Sloppy study = self.instance if all_setup_data and commit: # VERY SLOPPY created_by = self.user created_on = timezone.now() study.modified_by = created_by study.modified_on = created_on study.save() # SLOPPY: REVISE study_id = study.id # TODO TODO TODO: STUPID, BUT ONE WAY TO DEAL WITH THE DEVICE ISSUE # Otherwise I would need to cut it out and immediately revise every place it was called... # Light query anyway (relative to the others) I guess organ_model_id_to_device_id = { organ_model.id: organ_model.device_id for organ_model in OrganModel.objects.all() } new_matrix = all_setup_data.get('new_matrix', None) new_groups = all_setup_data.get('new_groups', None) update_groups = all_setup_data.get('update_groups', None) current_group_ids = all_setup_data.get('current_group_ids', None) new_items = all_setup_data.get('new_items', None) update_items = all_setup_data.get('update_items', None) # Why? new_compounds = all_setup_data.get('new_compounds', None) new_cells = all_setup_data.get('new_cells', None) new_settings = all_setup_data.get('new_settings', None) update_compounds = all_setup_data.get('update_compounds', None) update_cells = all_setup_data.get('update_cells', None) update_settings = all_setup_data.get('update_settings', None) current_matrix = all_setup_data.get('current_matrix', None) deleted_groups = all_setup_data.get('deleted_groups', None) deleted_items = all_setup_data.get('deleted_items', None) deleted_compounds = all_setup_data.get('deleted_compounds', None) deleted_cells = all_setup_data.get('deleted_cells', None) deleted_settings = all_setup_data.get('deleted_settings', None) if new_matrix: new_matrix.study_id = study_id new_matrix.save() new_matrix_id = new_matrix.id current_matrix_id = new_matrix_id # Uh... why use an elif here? # elif not new_matrix and (new_items or update_items): else: # TODO TODO TODO # Have some way to get the "current matrix" if there isn't a new matrix # TODO Be careful, if there are no chips, don't bother! current_matrix_id = all_setup_data.get('current_matrix') # UPDATE CURRENT GROUPS IF NECESSARY # Bulk update should be relatively fast if update_groups: if not all_setup_data.get('special_group_handling_required'): AssayGroup.objects.bulk_update(update_groups, self.update_group_fields) else: # Stupid, each of these is a query # (Quite expensive, hopefully this edge case is rare) for index, group in enumerate(update_groups): # Store the desired name desired_name = group.name # Just make a dumb temp name to get rid of conflicts # If a user is seriously using this... temp_name = 'WALLA1029XYZABCTEMPTODEALWITHCONFLICTS-{}'.format(index) # We may need more temp names (hopefully not...) group.name = temp_name group.save() group.name = desired_name # NOW try performing the bulk save AssayGroup.objects.bulk_update(update_groups, self.update_group_fields) # These dics match names (which are specific to this study! They are not from queries!) to ids # We want to avoid superfluous queries, and we might as well take advantage of the uniqueness constraints new_group_ids = {} # Combine current_group ids into new_group_ids # Way more verbose than it needs to be for current_group_id, current_group in current_group_ids.items(): new_group_ids.update({ current_group.name: current_group_id }) # new_item_ids = {} # TODO # for new_group in new_groups: # We don't really need to tie to anything? # Be careful with conditionals if new_groups: # Bulk create the groups BE SURE ONLY NEW GROUPS IN THIS LIST AssayGroup.objects.bulk_create(new_groups) # TODO NOTE: WE NEED ALL GROUPS, NOT JUST THESE for new_group in new_groups: new_group_ids.update({ new_group.name: new_group.id }) # NOTE: WE MAY HAVE NEW ITEMS WITHOUT A NEW MATRIX for new_item in new_items: # ADD MATRIX and tracking # TODO TODO TODO TODO DO NOT USE new_matrix_id HERE new_item.matrix_id = current_matrix_id # IDEALLY WE WILL JUST CUT THESE ANYWAY?? new_item.device_id = organ_model_id_to_device_id.get(new_item.organ_model_id) # TODO TODO TODO # We perform a little bit of sleight of hand here! current_group_name = new_item.group_id # Assign the correct group # "new_group_ids" is a misnomer! new_group_id = new_group_ids.get(current_group_name, None) new_item.group_id = new_group_id # To get the group id with have to use a dic TODO TODO # Ideally we won't save this way! We want to use bulk_create # new_item.save() # Ideally we would do this after the bulk_create # new_item_ids.update({ # new_item.name: new_item.id # }) if new_items: AssayMatrixItem.objects.bulk_create(new_items) # We shouldn't actually need these for anything # for new_item in new_items: # new_item_ids.update({ # new_item.name: new_item.id # }) # WE WILL WANT TO SAVE EACH COMPONENT SEPARATELY FOR BULK CREATE/UPDATE (otherwise gets odd) # Additionally, this data is no longer tied to an individual item # for current_item_name, new_related_data_set in new_related.items(): # new_item_id = new_item_ids.get(current_item_name, None) # if new_item_id: # for new_related_data in new_related_data_set: # # ADD MATRIX ITEM # new_related_data.matrix_item_id = new_item_id # new_related_data.save() # Can't use ids due to the fact that some ids are new! # Be sure you can access ALL groups for this! # (EXCLUDING DELETED GROUPS, WHICH OUGHT NOT TO BE ACCESSED) # Barbaric, just obscene in its grotesque cruelty # How could one do such a thing and not be deemed heartless? # KILL ALL COMPOUNDS, CELL, AND SETTINGS: AssayGroupCompound.objects.filter(group_id__in=new_group_ids.values()).delete() AssayGroupCell.objects.filter(group_id__in=new_group_ids.values()).delete() AssayGroupSetting.objects.filter(group_id__in=new_group_ids.values()).delete() # Breaks rule of three # Stupid for new_compound in new_compounds: # We perform a little bit of sleight of hand here! current_group_name = new_compound.group_id # Assign the correct group # "new_group_ids" is a misnomer! new_group_id = new_group_ids.get(current_group_name, None) new_compound.group_id = new_group_id if new_compounds: AssayGroupCompound.objects.bulk_create(reversed(new_compounds)) for new_cell in new_cells: # We perform a little bit of sleight of hand here! current_group_name = new_cell.group_id # Assign the correct group # "new_group_ids" is a misnomer! new_group_id = new_group_ids.get(current_group_name, None) new_cell.group_id = new_group_id if new_cells: AssayGroupCell.objects.bulk_create(reversed(new_cells)) for new_setting in new_settings: # We perform a little bit of sleight of hand here! current_group_name = new_setting.group_id # Assign the correct group # "new_group_ids" is a misnomer! new_group_id = new_group_ids.get(current_group_name, None) new_setting.group_id = new_group_id if new_settings: AssayGroupSetting.objects.bulk_create(reversed(new_settings)) # Perform deletions if deleted_items: AssayMatrixItem.objects.filter(id__in=deleted_items, matrix_id=current_matrix_id).delete() if deleted_groups: AssayGroup.objects.filter(id__in=deleted_groups, study_id=self.instance.id).delete() return study class AssayStudyChipForm(SetupFormsMixin, SignOffMixin, BootstrapForm): series_data = forms.CharField(required=False) class Meta(object): model = AssayStudy # Since we are splitting into multiple forms, includes are safer fields = ( 'series_data', 'organ_model_full', 'organ_model_protocol_full' ) + flag_group def __init__(self, *args, **kwargs): super(AssayStudyChipForm, self).__init__(*args, **kwargs) # Prepopulate series_data from thing self.fields['series_data'].initial = self.instance.get_group_data_string(get_chips=True) # TODO NEEDS TO BE REVISED # TODO TODO TODO CLEAN AND SAVE def clean(self): cleaned_data = super(AssayStudyChipForm, self).clean() # TODO TODO TODO NOTE CRAMMED IN # Am I sticking with the name 'series_data'? if self.cleaned_data.get('series_data', None): all_data = json.loads(self.cleaned_data.get('series_data', '[]')) else: # Contrived defaults all_data = { 'series_data': [], 'chips': [], 'plates': {} } # The data for groups is currently stored in series_data all_setup_data = all_data.get('series_data') all_chip_data = all_data.get('chips') # Catch technically empty setup data setup_data_is_empty = True for group_set in all_setup_data: if group_set: setup_data_is_empty = not any(group_set.values()) if setup_data_is_empty: all_setup_data = [] # Variables must always exist chip_data = [] current_errors = [] # if commit and all_setup_data: # SEE BASE MODELS FOR WHY COMMIT IS NOT HERE if all_chip_data: chip_names = {} # Interestingly, we need to prevent overlap with wells as well well_names = { well.name: True for well in AssayMatrixItem.objects.filter(study_id=self.instance.id).exclude(matrix__representation='chips') } current_matrix = AssayMatrix.objects.filter( # The study must exist in order to visit this page, so getting the id this was is fine study_id=self.instance.id, representation='chips' ) # Get the group ids group_ids = { group.id: True for group in AssayGroup.objects.filter( study_id=self.instance.id ) } chip_id_to_chip = { chip.id: chip for chip in AssayMatrixItem.objects.filter( study_id=self.instance.id, matrix_id=current_matrix[0].id ) } special_handling_required = False # Crude, but we need current chips current_chip_names = {chip.name: chip.id for chip in chip_id_to_chip.values()} # We basically just modify group_id and name # TODO MAKE SURE THE GROUP IDS ARE VALID # That is, check against the group ids of the study for chip in all_chip_data: if chip_id_to_chip.get(chip.get('id', 0)): current_chip = chip_id_to_chip.get(chip.get('id', 0)) else: current_chip = False current_errors.append('A Chip is missing, please refresh and try again.') if current_chip: # Add to group names # Check uniqueness if chip.get('name', '') in chip_names: current_errors.append('The Chip name "{}" is duplicated. The names of Chips must be unique.'.format( chip.get('name', '') )) elif chip.get('name', '') in well_names: current_errors.append('The Chip name "{}" is the name of a Well. Either rename the well or choose a different name for the Chip.'.format( chip.get('name', '') )) else: chip_names.update({ chip.get('name', ''): True }) current_chip.name = chip.get('name', '') # Note that we need special handling when there is a conflict in current chip names if chip.get('name', '') in current_chip_names and current_chip.id != current_chip_names.get(chip.get('name', '')): special_handling_required = True if chip.get('group_id', '') in group_ids: current_chip.group_id = chip.get('group_id', '') try: # INTERESTINGLY: We actually exclude name! # Why? Because we don't want it erroring out from *existing* names # It is an edge case, but someone might have, say, 100 chips # If they rename the chips starting with 50... # There would be an error because "50" was already a chosen name! current_chip.full_clean( exclude=['name'] ) chip_data.append(current_chip) except forms.ValidationError as e: current_errors.append(e) self.cleaned_data.update({ 'chip_data': chip_data, 'special_handling_required': special_handling_required }) if current_errors: raise forms.ValidationError(current_errors) return cleaned_data def save(self, commit=True): # Just do the bulk update # We don't need to do anything else # TODO TODO TODO # NOTE NOTE NOTE # WE TECHNICALLY SHOULD CHANGE THE SHARED VALUES HERE (organ_model, test_type, etc.) # ON THE OTHER HAND, IT IS PROBABLY BEST TO LEAVE THEM OUT if commit: # Due to the nature of bulk_update and uniqueness checks... # We need to catch integrity errors and handle them if not self.cleaned_data.get('special_handling_required'): AssayMatrixItem.objects.bulk_update( self.cleaned_data.get('chip_data', None), [ 'group_id', 'name' ] ) else: # Stupid, each of these is a query # (Quite expensive, hopefully this edge case is rare) for index, chip in enumerate(self.cleaned_data.get('chip_data', None)): # Store the desired name desired_name = chip.name # Just make a dumb temp name to get rid of conflicts # If a user is seriously using this... temp_name = 'WALLA1029XYZABCTEMPTODEALWITHCONFLICTS-{}'.format(index) # We may need more temp names (hopefully not...) chip.name = temp_name chip.save() chip.name = desired_name # NOW try performing the bulk save AssayMatrixItem.objects.bulk_update( self.cleaned_data.get('chip_data', None), [ 'group_id', 'name' ] ) return self.instance class AssayStudyPlateForm(SetupFormsMixin, SignOffMixin, BootstrapForm): series_data = forms.CharField(required=False) class Meta(object): model = AssayMatrix fields = ( 'name', 'notes', # Don't care about device anymore, I guess # 'device', # DO care about organ model, I guess 'organ_model', # Maybe a bit unorthodox 'number_of_columns', 'number_of_rows', # TODO 'series_data', ) + flag_group widgets = { 'name': forms.Textarea(attrs={'rows': 1}), 'notes': forms.Textarea(attrs={'rows': 10}), } def __init__(self, *args, **kwargs): self.study = kwargs.pop('study', None) super(AssayStudyPlateForm, self).__init__(*args, **kwargs) if self.study: self.instance.study = self.study else: self.study = self.instance.study # Crude! TEMPORARY if self.instance.id: self.fields['series_data'].initial = self.study.get_group_data_string(plate_id=self.instance.id) else: self.fields['series_data'].initial = self.study.get_group_data_string() # Predicate the organ model options on the current groups # Dumb query plate_groups = AssayGroup.objects.filter( study_id=self.instance.study.id, # See above organ_model__device__device_type='plate' ).prefetch_related('organ_model__device') self.fields['organ_model'].queryset = OrganModel.objects.filter( id__in=plate_groups.values_list('organ_model_id', flat=True) ) # Improper, but one method to make organ model required self.fields['organ_model'].widget.attrs['class'] += ' required' self.fields['organ_model'].required = True # FORCE UNIQUENESS CHECK def clean(self): # RATHER CRUDE: WE FORCE THE PLATE TO HAVE A REPRESENTATION OF PLATE self.instance.representation = 'plate' cleaned_data = super(AssayStudyPlateForm, self).clean() # VERY SLOPPY created_by = self.user created_on = timezone.now() # TODO TODO TODO NOTE CRAMMED IN # Am I sticking with the name 'series_data'? if self.cleaned_data.get('series_data', None): all_data = json.loads(self.cleaned_data.get('series_data', '[]')) else: # Contrived defaults all_data = { 'series_data': [], 'chips': [], 'plates': {} } # The data for groups is currently stored in series_data all_setup_data = all_data.get('series_data') all_plate_data = all_data.get('plates') # Catch technically empty setup data setup_data_is_empty = True for group_set in all_setup_data: if group_set: setup_data_is_empty = not any(group_set.values()) if setup_data_is_empty: all_setup_data = [] # Plate name if AssayMatrix.objects.filter( study_id=self.instance.study.id, name=self.cleaned_data.get('name', '') ).exclude(pk=self.instance.pk).count(): raise forms.ValidationError({'name': ['Plate name must be unique within Study.']}) current_wells = { well.id: well for well in AssayMatrixItem.objects.filter( matrix_id=self.instance.id ) } # Technically ought to restrict to JUST PLATE GROUPS # However, that makes the query uglier # First pass, we won't make sure a check current_groups = { group.id: group for group in AssayGroup.objects.filter( study_id=self.instance.study.id ) } new_wells = [] update_wells = [] delete_wells = [] # NOTE: We will be alerted during clean for anything that ISN'T INTERNAL to the plate taken_names = {} current_errors = [] for row_column, well in all_plate_data.items(): row_column_split = row_column.split('_') row = int(row_column_split[0]) column = int(row_column_split[1]) current_well = current_wells.get(well.get('id', 0), None) current_group = current_groups.get(well.get('group_id', 0), None) # Junk data if not current_group: continue # BE SURE TO TEST THE NAMES! current_name = well.get('name', '') if current_name in taken_names: current_errors.append('The name "{}" is already in use, please make sure well names are unique.'.format( current_name )) else: taken_names.update({ current_name: True }) # Update if already exists if current_well: # If slated for deletion if well.get('deleted', ''): delete_wells.append(current_well.id) else: # Update name current_well.name = current_name if well.get('group_id', '') in current_groups: # Update group id current_well.group_id = well.get('group_id', '') # Make sure nothing broke try: current_well.full_clean() # Add it to those slated to update update_wells.append(current_well) except forms.ValidationError as e: current_errors.append(e) # Add otherwise else: new_item = AssayMatrixItem( # We know this one study=self.instance.study, # TRICKY! # matrix=new_matrix, name=current_name, # JUST MAKE SETUP DATE THE STUDY DATE FOR NOW setup_date=self.instance.study.start_date, row_index=row, column_index=column, # Irrelevant (but required, unfortunately, maybe will remove later) # device=study.organ_model.device, organ_model_id=current_group.organ_model_id, # Some nuances here that we will gloss over organ_model_protocol_id=current_group.organ_model_protocol_id, test_type=current_group.test_type, created_by=created_by, created_on=created_on, modified_by=created_by, modified_on=created_on, group_id=current_group.id ) try: new_item.full_clean(exclude=[ # The matrix needs to be excluded because it might not exist yet 'matrix', # Why exclude these? # Get rid of device for now because it is still required 'device', # 'organ_model', # 'organ_model_protocol', ]) new_wells.append(new_item) except forms.ValidationError as e: current_errors.append(e) group_has_error = True cleaned_data.update({ 'new_wells': new_wells, 'update_wells': update_wells, 'delete_wells': delete_wells, }) if current_errors: raise forms.ValidationError(current_errors) return cleaned_data # CRUDE, TEMPORARY # TODO REVISE ASAP def save(self, commit=True): # Takes care of saving the Plate in and of itself matrix = super(AssayStudyPlateForm, self).save(commit) if commit: matrix_id = self.instance.id study_id = self.instance.study.id # TODO TODO TODO: STUPID, BUT ONE WAY TO DEAL WITH THE DEVICE ISSUE # Otherwise I would need to cut it out and immediately revise every place it was called... # Light query anyway (relative to the others) I guess organ_model_id_to_device_id = { organ_model.id: organ_model.device_id for organ_model in OrganModel.objects.all() } new_wells = self.cleaned_data.get('new_wells') update_wells = self.cleaned_data.get('update_wells') delete_wells = self.cleaned_data.get('delete_wells') # Add new wells if new_wells: # Need to iterate through the new wells and add the matrix id # (The matrix might not exist yet) for well in new_wells: well.matrix_id = matrix_id # IDEALLY WE WILL JUST CUT THESE ANYWAY?? well.device_id = organ_model_id_to_device_id.get(well.organ_model_id) AssayMatrixItem.objects.bulk_create(new_wells) # Update wells # TODO TODO TODO # NOTE NOTE NOTE # WE TECHNICALLY SHOULD CHANGE THE SHARED VALUES HERE (organ_model, test_type, etc.) # ON THE OTHER HAND, IT IS PROBABLY BEST TO LEAVE THEM OUT if update_wells: AssayMatrixItem.objects.bulk_update( update_wells, [ 'name', 'group_id' ] ) if delete_wells: AssayMatrixItem.objects.filter(id__in=delete_wells).delete() return matrix # Need to make plural to distinguish # CONTRIVED ANYWAY class AssayStudyAssaysForm(BootstrapForm): class Meta(object): model = AssayStudy # Since we are splitting into multiple forms, includes are safer fields = flag_group class AssayStudyFormAdmin(BootstrapForm): """Admin Form for Assay Runs (now referred to as Studies)""" class Meta(object): model = AssayStudy widgets = { 'assay_run_id': forms.Textarea(attrs={'rows': 1}), 'name': forms.Textarea(attrs={'rows': 1}), 'description': forms.Textarea(attrs={'rows': 10}), 'signed_off_notes': forms.Textarea(attrs={'rows': 10}), } exclude = ('',) def __init__(self, *args, **kwargs): super(AssayStudyFormAdmin, self).__init__(*args, **kwargs) groups_with_center = MicrophysiologyCenter.objects.all().values_list('groups', flat=True) groups_with_center_full = Group.objects.filter( id__in=groups_with_center ).order_by( 'name' ) self.fields['group'].queryset = groups_with_center_full groups_without_repeat = groups_with_center_full if self.instance and getattr(self.instance, 'group', ''): groups_without_repeat.exclude(pk=self.instance.group.id) self.fields['access_groups'].queryset = groups_without_repeat self.fields['collaborator_groups'].queryset = groups_without_repeat # Crudely force required class for current_field in ['total_device_volume', 'flow_rate', 'number_of_relevant_cells']: self.fields[current_field].widget.attrs['class'] += ' required' def clean(self): # clean the form data, before validation data = super(AssayStudyFormAdmin, self).clean() if not any([data['toxicity'], data['efficacy'], data['disease'], data['cell_characterization'], data['omics'], data['pbpk_steady_state'], data['pbpk_bolus']]): raise forms.ValidationError('Please select at least one study type') if data.get('pbpk_steady_state', '') and (not data.get('number_of_relevant_cells', '') or not data.get('flow_rate', '')): raise forms.ValidationError('Continuous Infusion PBPK Requires Number of Cells Per MPS Model and Flow Rate') if data.get('pbpk_bolus', '') and (not data.get('number_of_relevant_cells', '') or not data.get('total_device_volume', '')): raise forms.ValidationError('Bolus PBPK Requires Number of Cells Per MPS Model and Total Device Volume') return data class AssayStudyAccessForm(forms.ModelForm): """Form for changing access to studies""" def __init__(self, *args, **kwargs): super(AssayStudyAccessForm, self).__init__(*args, **kwargs) # NEED A MORE ELEGANT WAY TO GET THIS first_center = self.instance.group.center_groups.first() groups_without_repeat = Group.objects.filter( id__in=first_center.accessible_groups.all().values_list('id', flat=True), ).order_by( 'name' ).exclude( id=self.instance.group.id ) self.fields['access_groups'].queryset = groups_without_repeat self.fields['collaborator_groups'].queryset = groups_without_repeat class Meta(object): model = AssayStudy fields = ( 'collaborator_groups', 'access_groups', ) class AssayStudySupportingDataForm(BootstrapForm): class Meta(object): model = AssayStudySupportingData exclude = ('',) class AssayStudyAssayForm(BootstrapForm): class Meta(object): model = AssayStudyAssay exclude = ('',) class AssayStudySupportingDataInlineFormSet(BaseInlineFormSet): """Form for Study Supporting Data (as part of an inline)""" class Meta(object): model = AssayStudySupportingData exclude = ('',) AssayStudySupportingDataFormSetFactory = inlineformset_factory( AssayStudy, AssayStudySupportingData, form=AssayStudySupportingDataForm, formset=AssayStudySupportingDataInlineFormSet, extra=1, exclude=[], widgets={ 'description': forms.Textarea(attrs={'rows': 3}), } ) AssayStudyAssayFormSetFactory = inlineformset_factory( AssayStudy, AssayStudyAssay, form=AssayStudyAssayForm, formset=AssayStudyAssayInlineFormSet, extra=1, exclude=[] ) # TODO ADD STUDY class AssayMatrixForm(SetupFormsMixin, SignOffMixin, BootstrapForm): class Meta(object): model = AssayMatrix exclude = ('study',) + tracking widgets = { 'number_of_columns': forms.NumberInput(attrs={'style': 'width: 100px;'}), 'number_of_rows': forms.NumberInput(attrs={'style': 'width: 100px;'}), 'name': forms.Textarea(attrs={'rows': 1}), 'notes': forms.Textarea(attrs={'rows': 3}), 'variance_from_organ_model_protocol': forms.Textarea(attrs={'rows': 3}), } def __init__(self, *args, **kwargs): self.study = kwargs.pop('study', None) # self.user = kwargs.pop('user', None) super(AssayMatrixForm, self).__init__(*args, **kwargs) if self.study: self.instance.study = self.study # sections_with_times = ( # 'compound', # 'cell', # 'setting' # ) # # for time_unit in list(TIME_CONVERSIONS.keys()): # for current_section in sections_with_times: # # Create fields for Days, Hours, Minutes # self.fields[current_section + '_addition_time_' + time_unit] = forms.FloatField( # initial=0, # required=False, # widget=forms.NumberInput(attrs={ # 'class': 'form-control', # 'style': 'width:75px;' # }) # ) # self.fields[current_section + '_duration_' + time_unit] = forms.FloatField( # initial=0, # required=False, # widget=forms.NumberInput(attrs={ # 'class': 'form-control', # 'style': 'width:75px;' # }) # ) # Changing these things in init is bad self.fields['matrix_item_notebook_page'].widget.attrs['style'] = 'width:75px;' # self.fields['cell_cell_sample'].widget.attrs['style'] = 'width:75px;' # self.fields['cell_passage'].widget.attrs['style'] = 'width:75px;' # Make sure no selectize # CONTRIVED self.fields['matrix_item_full_organ_model'].widget.attrs['class'] = 'no-selectize' self.fields['matrix_item_full_organ_model_protocol'].widget.attrs['class'] = 'no-selectize' # No selectize on action either (hides things, looks odd) # CONTRIVED # self.fields['action'].widget.attrs['class'] += ' no-selectize' # DUMB, BAD (can't have them be "actually" required or they prevent submission add_required_to = [ 'matrix_item_name', 'matrix_item_setup_date', 'matrix_item_test_type', 'matrix_item_name', 'matrix_item_device', 'matrix_item_organ_model', ] for current_field in add_required_to: self.fields[current_field].widget.attrs['class'] += ' required' ### ADDITIONAL MATRIX FIELDS (unsaved) number_of_items = forms.IntegerField(required=False) ### ITEM FIELD HELPERS # action = forms.ChoiceField(choices=( # ('', 'Please Select an Action'), # ('add_name', 'Add Names/IDs*'), # ('add_test_type', 'Add Test Type*'), # ('add_date', 'Add Setup Date*'), # ('add_device', 'Add Device/MPS Model Information*'), # ('add_settings', 'Add Settings'), # ('add_compounds', 'Add Compounds'), # ('add_cells', 'Add Cells'), # ('add_notes', 'Add Notes/Notebook Information'), # # ADD BACK LATER # # ('copy', 'Copy Contents'), # # TODO TODO TODO TENTATIVE # # ('clear', 'Clear Contents'), # ('delete', 'Delete Selected'), # ), required=False) # The matrix_item isn't just to be annoying, I want to avoid conflicts with other fields ### ADDING ITEM FIELDS matrix_item_name = forms.CharField( required=False, widget=forms.Textarea(attrs={'rows': 1}), label='Matrix Item Name' ) matrix_item_setup_date = forms.DateField( required=False, label='Matrix Item Setup Date' ) # Foolish! matrix_item_setup_date_popup = forms.DateField(required=False) matrix_item_test_type = forms.ChoiceField( required=False, choices=TEST_TYPE_CHOICES, label='Matrix Item Test Type' ) matrix_item_scientist = forms.CharField( required=False, widget=forms.Textarea(attrs={'rows': 1}), label='Scientist' ) matrix_item_notebook = forms.CharField( required=False, label='Notebook' ) matrix_item_notebook_page = forms.CharField( required=False, label='Notebook Page' ) matrix_item_notes = forms.CharField( required=False, widget=forms.Textarea(attrs={'rows': 3}), label='Notes' ) ### ADDING SETUP FIELDS matrix_item_device = forms.ModelChoiceField( queryset=Microdevice.objects.all().order_by('name'), required=False, label='Matrix Item Device' ) matrix_item_organ_model = forms.ModelChoiceField( queryset=OrganModel.objects.all().order_by('name'), required=False, label='Matrix Item MPS Model' ) matrix_item_organ_model_protocol = forms.ModelChoiceField( queryset=OrganModelProtocol.objects.all().order_by('version'), required=False, label='Matrix Item MPS Model Version' ) matrix_item_variance_from_organ_model_protocol = forms.CharField( required=False, widget=forms.Textarea(attrs={'rows': 3}), label='Matrix Item Variance from Protocol' ) matrix_item_full_organ_model = forms.ModelChoiceField( queryset=OrganModel.objects.all().order_by('name'), required=False ) matrix_item_full_organ_model_protocol = forms.ModelChoiceField( queryset=OrganModelProtocol.objects.all(), required=False ) ### INCREMENTER compound_concentration_increment = forms.FloatField(required=False, initial=1) compound_concentration_increment_type = forms.ChoiceField( choices=( ('/', 'Divide'), ('*', 'Multiply'), ('+', 'Add'), ('-', 'Subtract') ), required=False ) compound_concentration_increment_direction = forms.ChoiceField( choices=( ('lr', 'Left to Right'), ('d', 'Down'), ('rl', 'Right to Left'), ('u', 'Up'), ('lrd', 'Left to Right and Down'), ('rlu', 'Right to Left and Up') ), required=False, initial='lr' ) # Options for deletion delete_option = forms.ChoiceField( required=False, choices=( ('all', 'Everything'), ('cell', 'Cells'), ('compound', 'Compounds'), ('setting', 'Settings'), ), label='Delete Option' ) # FORCE UNIQUENESS CHECK def clean(self): super(AssayMatrixForm, self).clean() if AssayMatrix.objects.filter( study_id=self.instance.study.id, name=self.cleaned_data.get('name', '') ).exclude(pk=self.instance.pk).count(): raise forms.ValidationError({'name': ['Matrix name must be unique within study.']}) class AssaySetupCompoundForm(ModelFormSplitTime): compound = forms.CharField() class Meta(object): model = AssaySetupCompound exclude = tracking # TODO: IDEALLY THE CHOICES WILL BE PASSED VIA A KWARG class AssaySetupCompoundFormSet(BaseModelFormSetForcedUniqueness): custom_fields = ( 'matrix_item', 'compound_instance', 'concentration_unit', 'addition_location' ) def __init__(self, *args, **kwargs): # TODO EVENTUALLY PASS WITH KWARG # self.suppliers = kwargs.pop('suppliers', None) # self.compound_instances = kwargs.pop('compound_instances', None) # self.compound_instances_dic = kwargs.pop('compound_instances_dic', None) # self.setup_compounds = kwargs.pop('setup_compounds', None) # Get all chip setup assay compound instances self.matrix = kwargs.pop('matrix', None) self.setup_compounds = { ( instance.matrix_item_id, instance.compound_instance_id, instance.concentration, instance.concentration_unit_id, instance.addition_time, instance.duration, instance.addition_location_id ): True for instance in AssaySetupCompound.objects.filter( matrix_item__matrix=self.matrix ) } self.compound_instances = {} self.compound_instances_dic = {} for instance in CompoundInstance.objects.all().prefetch_related('supplier'): self.compound_instances.update({ ( instance.compound_id, instance.supplier_id, instance.lot, instance.receipt_date ): instance }) # NOTE use of name instead of id! self.compound_instances_dic.update({ instance.id: ( instance.compound_id, instance.supplier.name, instance.lot, instance.receipt_date ) }) # Get all suppliers self.suppliers = { supplier.name: supplier for supplier in CompoundSupplier.objects.all() } super(AssaySetupCompoundFormSet, self).__init__(*args, **kwargs) filters = {'matrix_item': {'matrix_id': self.matrix.id}} self.dic = get_dic_for_custom_choice_field(self, filters=filters) for form in self.forms: for field in self.custom_fields: form.fields[field] = DicModelChoiceField(field, self.model, self.dic) # Purge all classes for field in form.fields: form.fields[field].widget.attrs['class'] = '' def _construct_form(self, i, **kwargs): form = super(AssaySetupCompoundFormSet, self)._construct_form(i, **kwargs) # Text field (un-saved) for supplier form.fields['supplier_text'] = forms.CharField(initial='N/A', required=False) # Text field (un-saved) for lot form.fields['lot_text'] = forms.CharField(initial='N/A', required=False) # Receipt date form.fields['receipt_date'] = forms.DateField(required=False) if form.instance: current_compound_instance_id = form.instance.compound_instance_id else: current_compound_instance_id = None if current_compound_instance_id: current_compound_instance = self.compound_instances_dic.get(current_compound_instance_id) # form.fields['compound'].initial = current_compound_instance.compound # form.fields['supplier_text'].initial = current_compound_instance.supplier.name # form.fields['lot_text'].initial = current_compound_instance.lot # form.fields['receipt_date'].initial = current_compound_instance.receipt_date form.fields['compound'].initial = current_compound_instance[0] form.fields['supplier_text'].initial = current_compound_instance[1] form.fields['lot_text'].initial = current_compound_instance[2] form.fields['receipt_date'].initial = current_compound_instance[3] return form # TODO TODO TODO # Will either have to decouple compound instance and supplier or else have a dic ALL FORMSETS reference # Ostensibly, I can pass a pointer to a dictionary so that all of the formsets see the same thing def save(self, commit=True): # Get forms_data (excluding those with delete or no data) forms_data = [f for f in self.forms if f.cleaned_data and not f.cleaned_data.get('DELETE', False)] forms_to_delete = [f for f in self.forms if f.cleaned_data and f.cleaned_data.get('DELETE', False)] # Forms to be deleted for form in forms_to_delete: try: instance = BootstrapForm.save(form, commit=False) if instance and instance.id and commit: instance.delete() # ValueError here indicates that the instance couldn't even validate and so should be ignored except ValueError: pass # Forms to save for form in forms_data: instance = BootstrapForm.save(form, commit=False) matrix_item = instance.matrix_item current_data = form.cleaned_data # Bad if not current_data.get('supplier_text'): current_data['supplier_text'] = 'N/A' if not current_data.get('lot_text'): current_data['lot_text'] = 'N/A' compound_id = int(current_data.get('compound')) supplier_text = current_data.get('supplier_text').strip() lot_text = current_data.get('lot_text').strip() receipt_date = current_data.get('receipt_date') # Should be acquired straight from form # concentration = current_data.get('concentration') # concentration_unit = current_data.get('concentration_unit') addition_time = 0 duration = 0 for time_unit, conversion in list(TIME_CONVERSIONS.items()): addition_time += current_data.get('addition_time_' + time_unit, 0) * conversion duration += current_data.get('duration_' + time_unit, 0) * conversion # Check if the supplier already exists supplier = self.suppliers.get(supplier_text, '') # Otherwise create the supplier if not supplier: supplier = CompoundSupplier( name=supplier_text, created_by=matrix_item.created_by, created_on=matrix_item.created_on, modified_by=matrix_item.modified_by, modified_on=matrix_item.modified_on ) # if commit: # supplier.save() # Always save the supplier supplier.save() self.suppliers.update({ supplier_text: supplier }) # Check if compound instance exists compound_instance = self.compound_instances.get((compound_id, supplier.id, lot_text, receipt_date), '') if not compound_instance: compound_instance = CompoundInstance( compound_id=compound_id, supplier=supplier, lot=lot_text, receipt_date=receipt_date, created_by=matrix_item.created_by, created_on=matrix_item.created_on, modified_by=matrix_item.modified_by, modified_on=matrix_item.modified_on ) # if commit: # compound_instance.save() # ALWAYS MAKE A NEW COMPOUND INSTANCE compound_instance.save() self.compound_instances.update({ (compound_id, supplier.id, lot_text, receipt_date): compound_instance }) # Update the instance with new data # instance.matrix_item = matrix_item instance.compound_instance = compound_instance instance.addition_time = addition_time instance.duration = duration # Save the instance if commit: conflicting_assay_compound_instance = self.setup_compounds.get( ( instance.matrix_item_id, instance.compound_instance_id, instance.concentration, instance.concentration_unit_id, instance.addition_time, instance.duration, instance.addition_location_id ), None ) # If there is not conflict or if this is an update if not conflicting_assay_compound_instance: instance.save() # Do nothing otherwise (it already exists) self.setup_compounds.update({ ( instance.matrix_item_id, instance.compound_instance_id, instance.concentration, instance.concentration_unit_id, instance.addition_time, instance.duration, instance.addition_location_id ): True }) # UGLY SOLUTION # DEPRECATED class AssaySetupCompoundInlineFormSet(BaseInlineFormSet): """Frontend Inline FormSet for Compound Instances""" class Meta(object): model = AssaySetupCompound exclude = ('',) def __init__(self, *args, **kwargs): """Init Chip Setup Form Filters physical units to include only Concentration """ super(AssaySetupCompoundInlineFormSet, self).__init__(*args, **kwargs) # Filter compound instances compound_instances = CompoundInstance.objects.all().prefetch_related( 'compound', 'supplier' ) compound_instances_dic = { instance.id: instance for instance in compound_instances } # Filter on concentration but make a special exception for percent (%) concentration_unit_queryset = PhysicalUnits.objects.filter( unit_type__unit_type='Concentration' ).order_by( 'base_unit__unit', 'scale_factor' ) | PhysicalUnits.objects.filter(unit='%') for form in self.forms: # form.fields['start_time_unit'].queryset = time_unit_queryset # form.fields['duration_unit'].queryset = time_unit_queryset form.fields['concentration_unit'].queryset = concentration_unit_queryset form.fields['compound_instance'].queryset = compound_instances # All available compounds form.fields['compound'] = forms.ModelChoiceField( queryset=Compound.objects.all(), widget=forms.Select(attrs={'class': 'form-control'}) ) # Text field (un-saved) for supplier form.fields['supplier_text'] = forms.CharField( initial='', widget=forms.TextInput(attrs={'class': 'form-control'}), required=False ) # Text field (un-saved) for lot form.fields['lot_text'] = forms.CharField( initial='', widget=forms.TextInput(attrs={'class': 'form-control'}), required=False ) # Receipt date form.fields['receipt_date'] = forms.DateField( required=False, widget=forms.DateInput(attrs={ 'class': 'form-control datepicker-input', 'autocomplete': 'off' }) ) # If instance, apply initial values if form.instance.compound_instance_id: current_compound_instance = compound_instances_dic.get(form.instance.compound_instance_id) form.fields['compound'].initial = current_compound_instance.compound form.fields['supplier_text'].initial = current_compound_instance.supplier.name form.fields['lot_text'].initial = current_compound_instance.lot form.fields['receipt_date'].initial = current_compound_instance.receipt_date # VERY SLOPPY form.fields['compound'].widget.attrs['class'] += ' required' current_field = 'compound' if hasattr(form.fields[current_field], '_queryset'): if hasattr(form.fields[current_field]._queryset, 'model'): # Usually one would use a hyphen rather than an underscore # form.fields[field].widget.attrs['data-app'] = form.fields[field]._queryset.model._meta.app_label form.fields[current_field].widget.attrs['data_app'] = form.fields[current_field]._queryset.model._meta.app_label # form.fields[field].widget.attrs['data-model'] = form.fields[field]._queryset.model._meta.object_name form.fields[current_field].widget.attrs['data_model'] = form.fields[current_field]._queryset.model._meta.object_name form.fields[current_field].widget.attrs['data_verbose_name'] = form.fields[current_field]._queryset.model._meta.verbose_name # Possibly dumber if hasattr(form.fields[current_field]._queryset.model, 'get_add_url_manager'): form.fields[current_field].widget.attrs['data_add_url'] = form.fields[current_field]._queryset.model.get_add_url_manager() # TODO THIS IS NOT DRY def save(self, commit=True): # Get forms_data (excluding those with delete or no data) forms_data = [f for f in self.forms if f.cleaned_data and not f.cleaned_data.get('DELETE', False)] forms_to_delete = [f for f in self.forms if f.cleaned_data and f.cleaned_data.get('DELETE', False)] # Forms to be deleted for form in forms_to_delete: instance = super(BootstrapForm, form).save(commit=False) if instance and instance.id and commit: instance.delete() matrix_item = self.instance # Get all chip setup assay compound instances assay_compound_instances = { ( instance.compound_instance.id, instance.concentration, instance.concentration_unit.id, instance.addition_time, instance.duration, instance.addition_location_id ): True for instance in AssaySetupCompound.objects.filter( matrix_item_id=matrix_item.id ).prefetch_related( 'compound_instance__compound', 'concentration_unit' ) } # Get all Compound Instances compound_instances = { ( instance.compound.id, instance.supplier.id, instance.lot, instance.receipt_date ): instance for instance in CompoundInstance.objects.all().prefetch_related( 'compound', 'supplier' ) } # Get all suppliers suppliers = { supplier.name: supplier for supplier in CompoundSupplier.objects.all() } # Forms to save for form in forms_data: instance = super(BootstrapForm, form).save(commit=False) current_data = form.cleaned_data # Bad if not current_data.get('supplier_text'): current_data['supplier_text'] = 'N/A' if not current_data.get('lot_text'): current_data['lot_text'] = 'N/A' compound = current_data.get('compound') supplier_text = current_data.get('supplier_text').strip() lot_text = current_data.get('lot_text').strip() receipt_date = current_data.get('receipt_date') # Should be acquired straight from form # concentration = current_data.get('concentration') # concentration_unit = current_data.get('concentration_unit') addition_time = 0 duration = 0 for time_unit, conversion in list(TIME_CONVERSIONS.items()): addition_time += current_data.get('addition_time_' + time_unit, 0) * conversion duration += current_data.get('duration_' + time_unit, 0) * conversion # Check if the supplier already exists supplier = suppliers.get(supplier_text, '') # Otherwise create the supplier if not supplier: supplier = CompoundSupplier( name=supplier_text, created_by=matrix_item.created_by, created_on=matrix_item.created_on, modified_by=matrix_item.modified_by, modified_on=matrix_item.modified_on ) # if commit: # supplier.save() # Always save the supplier supplier.save() suppliers.update({ supplier_text: supplier }) # Check if compound instance exists compound_instance = compound_instances.get((compound.id, supplier.id, lot_text, receipt_date), '') if not compound_instance: compound_instance = CompoundInstance( compound=compound, supplier=supplier, lot=lot_text, receipt_date=receipt_date, created_by=matrix_item.created_by, created_on=matrix_item.created_on, modified_by=matrix_item.modified_by, modified_on=matrix_item.modified_on ) # if commit: # compound_instance.save() # ALWAYS MAKE A NEW COMPOUND INSTANCE compound_instance.save() compound_instances.update({ (compound.id, supplier.id, lot_text, receipt_date): compound_instance }) # Update the instance with new data instance.matrix_item = matrix_item instance.compound_instance = compound_instance instance.addition_time = addition_time instance.duration = duration # Save the AssayCompoundInstance if commit: conflicting_assay_compound_instance = assay_compound_instances.get( ( instance.compound_instance.id, instance.concentration, instance.concentration_unit.id, instance.addition_time, instance.duration, instance.addition_location_id ), None ) if not conflicting_assay_compound_instance: instance.save() assay_compound_instances.update({ ( instance.compound_instance.id, instance.concentration, instance.concentration_unit.id, instance.addition_time, instance.duration, instance.addition_location_id ): True }) class AssaySetupCellForm(ModelFormSplitTime): class Meta(object): model = AssaySetupCell exclude = tracking def __init__(self, *args, **kwargs): # self.static_choices = kwargs.pop('static_choices', None) super(AssaySetupCellForm, self).__init__(*args, **kwargs) # Change widget size self.fields['cell_sample'].widget.attrs['style'] = 'width:75px;' self.fields['passage'].widget.attrs['style'] = 'width:75px;' self.fields['density_unit'].queryset = PhysicalUnits.objects.filter(availability__contains='cell').order_by('unit') # TODO: IDEALLY THE CHOICES WILL BE PASSED VIA A KWARG class AssaySetupCellFormSet(BaseModelFormSetForcedUniqueness): custom_fields = ( 'matrix_item', 'cell_sample', 'biosensor', 'density_unit', 'addition_location' ) def __init__(self, *args, **kwargs): self.matrix = kwargs.pop('matrix', None) super(AssaySetupCellFormSet, self).__init__(*args, **kwargs) filters = {'matrix_item': {'matrix_id': self.matrix.id}} self.dic = get_dic_for_custom_choice_field(self, filters=filters) for form in self.forms: for field in self.custom_fields: form.fields[field] = DicModelChoiceField(field, self.model, self.dic) # Purge all classes for field in form.fields: form.fields[field].widget.attrs['class'] = '' class AssaySetupSettingForm(ModelFormSplitTime): class Meta(object): model = AssaySetupCell exclude = tracking class AssaySetupSettingFormSet(BaseModelFormSetForcedUniqueness): custom_fields = ( 'matrix_item', 'setting', 'unit', 'addition_location' ) def __init__(self, *args, **kwargs): self.matrix = kwargs.pop('matrix', None) super(AssaySetupSettingFormSet, self).__init__(*args, **kwargs) filters = {'matrix_item': {'matrix_id': self.matrix.id }} self.dic = get_dic_for_custom_choice_field(self, filters=filters) for form in self.forms: for field in self.custom_fields: form.fields[field] = DicModelChoiceField(field, self.model, self.dic) # Purge all classes for field in form.fields: form.fields[field].widget.attrs['class'] = '' def _construct_form(self, i, **kwargs): form = super(AssaySetupSettingFormSet, self)._construct_form(i, **kwargs) for time_unit in list(TIME_CONVERSIONS.keys()): # Create fields for Days, Hours, Minutes form.fields['addition_time_' + time_unit] = forms.FloatField(initial=0) form.fields['duration_' + time_unit] = forms.FloatField(initial=0) # Change style # form.fields['addition_time_' + time_unit].widget.attrs['style'] = 'width:75px;' # form.fields['duration_' + time_unit].widget.attrs['style'] = 'width:75px;' if form.instance.addition_time: # Fill additional time addition_time_in_minutes_remaining = form.instance.addition_time for time_unit, conversion in list(TIME_CONVERSIONS.items()): initial_time_for_current_field = int(addition_time_in_minutes_remaining / conversion) if initial_time_for_current_field: form.fields['addition_time_' + time_unit].initial = initial_time_for_current_field addition_time_in_minutes_remaining -= initial_time_for_current_field * conversion # Add fractions of minutes if necessary if addition_time_in_minutes_remaining: form.fields['addition_time_minute'].initial += addition_time_in_minutes_remaining if form.instance.duration: # Fill duration duration_in_minutes_remaining = form.instance.duration for time_unit, conversion in list(TIME_CONVERSIONS.items()): initial_time_for_current_field = int(duration_in_minutes_remaining / conversion) if initial_time_for_current_field: form.fields['duration_' + time_unit].initial = initial_time_for_current_field duration_in_minutes_remaining -= initial_time_for_current_field * conversion # Add fractions of minutes if necessary if duration_in_minutes_remaining: form.fields['duration_minute'].initial += duration_in_minutes_remaining return form AssaySetupCompoundFormSetFactory = modelformset_factory( AssaySetupCompound, extra=1, exclude=[tracking], form=AssaySetupCompoundForm, formset=AssaySetupCompoundFormSet, can_delete=True ) AssaySetupCellFormSetFactory = modelformset_factory( AssaySetupCell, extra=1, exclude=[tracking], form=AssaySetupCellForm, formset=AssaySetupCellFormSet, can_delete=True ) AssaySetupSettingFormSetFactory = modelformset_factory( AssaySetupSetting, extra=1, exclude=[tracking], form=AssaySetupSettingForm, formset=AssaySetupSettingFormSet, can_delete=True ) AssaySetupCompoundInlineFormSetFactory = inlineformset_factory( AssayMatrixItem, AssaySetupCompound, extra=1, exclude=[tracking], form=AssaySetupCompoundForm, formset=AssaySetupCompoundInlineFormSet, can_delete=True ) AssaySetupCellInlineFormSetFactory = inlineformset_factory( AssayMatrixItem, AssaySetupCell, extra=1, exclude=[tracking], form=AssaySetupCellForm, # formset=AssaySetupCellFormSet, can_delete=True ) AssaySetupSettingInlineFormSetFactory = inlineformset_factory( AssayMatrixItem, AssaySetupSetting, extra=1, exclude=[tracking], form=AssaySetupSettingForm, # formset=AssaySetupSettingFormSet, can_delete=True ) class AssayMatrixItemFullForm(SignOffMixin, BootstrapForm): """Frontend form for Items""" class Meta(object): model = AssayMatrixItem widgets = { 'concentration': forms.NumberInput(attrs={'style': 'width:75px;'}), 'notebook_page': forms.NumberInput(attrs={'style': 'width:75px;'}), 'notes': forms.Textarea(attrs={'cols': 50, 'rows': 3}), 'variance_from_organ_model_protocol': forms.Textarea(attrs={'cols': 50, 'rows': 2}), } # Assay Run ID is always bound to the parent Study exclude = ('study', 'matrix', 'column_index', 'row_index') + tracking + restricted def clean(self): """Cleans the Chip Setup Form Ensures the the name is unique in the current study Ensures that the data for a compound is complete Prevents changes to the chip if data has been uploaded (avoiding conflicts between data and entries) """ super(AssayMatrixItemFullForm, self).clean() # Make sure the barcode/ID is unique in the study if AssayMatrixItem.objects.filter( study_id=self.instance.study.id, name=self.cleaned_data.get('name') ).exclude(id=self.instance.id): raise forms.ValidationError({'name': ['ID/Barcode must be unique within study.']}) # Make sure the device matches if necessary if self.instance.matrix.device and (self.instance.matrix.device != self.cleaned_data.get('device')): raise forms.ValidationError( {'device': ['The item\'s device must match the one specified in the Matrix: "{}"'.format(self.instance.matrix.device)]} ) # SetupFormsMixin is unfortunate, but expedient class AssayMatrixItemForm(SetupFormsMixin, SignOffMixin, BootstrapForm): # CONTRIVED! series_data = forms.CharField(required=False) class Meta(object): model = AssayMatrixItem # WE OUGHT TO BE ABLE TO EDIT A SELECT FEW THINGS fields = ( 'name', 'group', # Notes stuff worth keeping?? 'scientist', 'notebook', 'notebook_page', 'notes' ) + flag_group def __init__(self, *args, **kwargs): super(AssayMatrixItemForm, self).__init__(*args, **kwargs) # Gee, it might be nice to have a better way to query groups! # Use chip groups if chip if self.instance.matrix.representation == 'chips': self.fields['group'].queryset = AssayGroup.objects.filter( # We will always know the study, this can never be an add page study_id=self.instance.study_id, organ_model__device__device_type='chip' ).prefetch_related('organ_model__device') # UGLY: DO NOT LIKE THIS # Prepopulate series_data self.fields['series_data'].initial = self.instance.study.get_group_data_string(get_chips=True) # Otherwise use plate groups # TODO: IF WE ARE BINDING PLATES TO MODELS, WE CANNOT DO THIS! else: self.fields['group'].queryset = AssayGroup.objects.filter( study_id=self.instance.study_id, # See above # OOPS! WE NEED TO RESPECT THE ORGAN MODEL OR WHATEVER # organ_model__device__device_type='plate' # MATCH THE ORGAN MODEL ID OF CURRENT GROUP! organ_model_id=self.instance.group.organ_model_id ).prefetch_related('organ_model__device') # UGLY: DO NOT LIKE THIS # Prepopulate series_data self.fields['series_data'].initial = self.instance.study.get_group_data_string(plate_id=self.instance.matrix_id) # DEPRECATED JUNK class AssayMatrixItemInlineForm(forms.ModelForm): class Meta(object): model = AssayMatrixItem exclude = ('study', 'matrix') + tracking # TODO NEED TO TEST (NOTE FROM THE FUTURE: "NOT ANYMORE I DON'T, THIS IS DEPRECATED TRASH!") class AssayMatrixItemFormSet(BaseInlineFormSetForcedUniqueness): custom_fields = ( 'device', 'organ_model', 'organ_model_protocol', 'failure_reason' ) def __init__(self, *args, **kwargs): # Get the study self.study = kwargs.pop('study', None) self.user = kwargs.pop('user', None) super(AssayMatrixItemFormSet, self).__init__(*args, **kwargs) if not self.study: self.study = self.instance.study self.dic = get_dic_for_custom_choice_field(self) for form in self.forms: for field in self.custom_fields: form.fields[field] = DicModelChoiceField(field, self.model, self.dic) if self.study: form.instance.study = self.study if form.instance.pk: form.instance.modified_by = self.user else: form.instance.created_by = self.user self.invalid_matrix_item_names = { item.name: item.id for item in AssayMatrixItem.objects.filter(study_id=self.study.id) } def clean(self): super(AssayMatrixItemFormSet, self).clean() for index, form in enumerate(self.forms): current_data = form.cleaned_data if current_data and not current_data.get('DELETE', False): if self.instance.number_of_columns: if current_data.get('column_index') > self.instance.number_of_columns: raise forms.ValidationError( 'An Item extends beyond the columns of the Matrix.' ' Increase the size of the Matrix and/or delete the offending Item if necessary.' ) if current_data.get('row_index') > self.instance.number_of_rows: raise forms.ValidationError( 'An Item extends beyond the rows of the Matrix.' ' Increase the size of the Matrix and/or delete the offending Item if necessary.' ) # Make sure the barcode/ID is unique in the study conflicting_name_item_id = self.invalid_matrix_item_names.get(current_data.get('name'), None) if conflicting_name_item_id and conflicting_name_item_id != form.instance.pk: form.add_error('name', 'This name conflicts with existing Item names in this Study.') # Make sure the device matches if necessary if self.instance.device and (self.instance.device != current_data.get('device')): form.add_error('device', 'This device conflicts with the one listed in the Matrix.') AssayMatrixItemFormSetFactory = inlineformset_factory( AssayMatrix, AssayMatrixItem, formset=AssayMatrixItemFormSet, form=AssayMatrixItemInlineForm, extra=1, exclude=('study',) + tracking ) # CONTRIVED class AssayStudyDeleteForm(forms.ModelForm): class Meta(object): model = AssayStudy fields = [] class AssayStudySignOffForm(SignOffMixin, BootstrapForm): class Meta(object): model = AssayStudy fields = [ 'signed_off', 'signed_off_notes', 'release_date', ] widgets = { 'signed_off_notes': forms.Textarea(attrs={'cols': 50, 'rows': 2}), } class AssayStudyStakeholderSignOffForm(SignOffMixin, BootstrapForm): class Meta(object): model = AssayStudyStakeholder fields = ['signed_off', 'signed_off_notes'] widgets = { 'signed_off_notes': forms.Textarea(attrs={'cols': 50, 'rows': 2}), } class AssayStudyStakeholderFormSet(BaseInlineFormSet): class Meta(object): model = AssayStudyStakeholder def __init__(self, *args, **kwargs): self.user = kwargs.pop('user', None) super(AssayStudyStakeholderFormSet, self).__init__(*args, **kwargs) def get_queryset(self): if not hasattr(self, '_queryset'): # TODO FILTER OUT THOSE USER ISN'T ADMIN OF # TODO REVIEW user_admin_groups = self.user.groups.filter(name__contains=ADMIN_SUFFIX) potential_groups = [group.name.replace(ADMIN_SUFFIX, '') for group in user_admin_groups] queryset = super(AssayStudyStakeholderFormSet, self).get_queryset() # Only include unsigned off forms that user is admin of! self._queryset = queryset.filter( group__name__in=potential_groups, signed_off_by=None ) return self._queryset def save(self, commit=True): for form in self.forms: signed_off = form.cleaned_data.get('signed_off', False) if signed_off and is_group_admin(self.user, form.instance.group.name): form.instance.signed_off_by = self.user form.instance.signed_off_date = timezone.now() form.save(commit=True) # Really, all factories should be declared like so (will have to do this for upcoming revision) AssayStudyStakeholderFormSetFactory = inlineformset_factory( AssayStudy, AssayStudyStakeholder, form=AssayStudyStakeholderSignOffForm, formset=AssayStudyStakeholderFormSet, extra=0, can_delete=False ) class AssayStudyDataUploadForm(BootstrapForm): """Form for Bulk Uploads""" # Excluded for now # overwrite_option = OVERWRITE_OPTIONS_BULK # EVIL WAY TO GET PREVIEW DATA preview_data = forms.BooleanField(initial=False, required=False) class Meta(object): model = AssayStudy fields = ('bulk_file',) def __init__(self, *args, **kwargs): """Init the Bulk Form kwargs: request -- the current request """ self.request = kwargs.pop('request', None) super(AssayStudyDataUploadForm, self).__init__(*args, **kwargs) def clean(self): data = super(AssayStudyDataUploadForm, self).clean() # Get the study in question study = self.instance # test_file = None # TODO TODO TODO TODO TODO if self.request and self.request.FILES and data.get('bulk_file'): # Make sure that this isn't the current file if not study.bulk_file or study.bulk_file != data.get('bulk_file'): test_file = data.get('bulk_file', '') file_processor = AssayFileProcessor(test_file, study, self.request.user) # Process the file file_processor.process_file() # Evil attempt to acquire preview data self.cleaned_data['preview_data'] = file_processor.preview_data return self.cleaned_data class AssayStudySetForm(SignOffMixin, BootstrapForm): class Meta(object): model = AssayStudySet exclude = tracking widgets = { 'description': forms.Textarea(attrs={'rows': 10}) } def __init__(self, *args, **kwargs): super(AssayStudySetForm, self).__init__(*args, **kwargs) study_queryset = get_user_accessible_studies( self.user ).prefetch_related( 'group__center_groups', ) assay_queryset = AssayStudyAssay.objects.filter( study_id__in=study_queryset.values_list('id', flat=True) ).prefetch_related( 'target', 'method', 'unit' ) self.fields['studies'].queryset = study_queryset self.fields['assays'].queryset = assay_queryset # CONTRIVED self.fields['studies'].widget.attrs['class'] = 'no-selectize' self.fields['assays'].widget.attrs['class'] = 'no-selectize' class AssayReferenceForm(BootstrapForm): query_term = forms.CharField( initial='', required=False, label='PubMed ID / DOI' ) class Meta(object): model = AssayReference exclude = tracking widgets = { 'query_term': forms.Textarea(attrs={'rows': 1}), 'title': forms.Textarea(attrs={'rows': 2}), 'authors': forms.Textarea(attrs={'rows': 1}), 'abstract': forms.Textarea(attrs={'rows': 10}), 'publication': forms.Textarea(attrs={'rows': 1}), } AssayStudyReferenceFormSetFactory = inlineformset_factory( AssayStudy, AssayStudyReference, extra=1, exclude=[] ) AssayStudySetReferenceFormSetFactory = inlineformset_factory( AssayStudySet, AssayStudySetReference, extra=1, exclude=[] ) # Convoluted def process_error_with_annotation(prefix, row, column, full_error): current_error = dict(full_error) modified_error = [] for error_field, error_values in current_error.items(): for error_value in error_values: modified_error.append([ '|'.join([str(x) for x in [ prefix, row, column, error_field ]]) + '-' + error_value ]) return modified_error class AssayMatrixFormNew(SetupFormsMixin, SignOffMixin, BootstrapForm): # ADD test_types test_type = forms.ChoiceField( initial='control', choices=TEST_TYPE_CHOICES ) class Meta(object): model = AssayMatrix # ODD fields = [] def __init__(self, *args, **kwargs): """Init the Study Form Kwargs: user -- the user in question """ # PROBABLY DON'T NEED THIS? # self.user = kwargs.pop('user', None) super(AssayMatrixFormNew, self).__init__(*args, **kwargs) # SLOPPY self.fields['test_type'].widget.attrs['class'] += ' no-selectize test-type' # Bad self.fields['test_type'].widget.attrs['style'] = 'width:100px;' # PLEASE NOTE CRUDE HANDLING OF m2m class AssayTargetForm(BootstrapForm): # For adding to category m2m category = forms.ModelMultipleChoiceField( queryset=AssayCategory.objects.all().order_by('name'), # Should this be required? required=False, # empty_label='All' ) class Meta(object): model = AssayTarget exclude = tracking widgets = { 'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}), } def __init__(self, *args, **kwargs): super(AssayTargetForm, self).__init__(*args, **kwargs) # Get category if possible if self.instance and self.instance.id: self.initial_categories = AssayCategory.objects.filter( targets__id=self.instance.id ) self.fields['category'].initial = ( self.initial_categories ) else: self.initial_categories = AssayCategory.objects.none() # Sort the methods # Would it be better to have this applied to all method queries? self.fields['methods'].queryset = AssayMethod.objects.all().order_by('name') def save(self, commit=True): new_target = super(AssayTargetForm, self).save(commit) if commit: if self.cleaned_data.get('category', None): for current_category in self.cleaned_data.get('category', None): current_category.targets.add(self.instance) # Permit removals for the moment # Crude removal for initial_category in self.initial_categories: if initial_category not in self.cleaned_data.get('category', None): initial_category.targets.remove(self.instance) return new_target class AssayTargetRestrictedForm(BootstrapForm): # For adding to category m2m category = forms.ModelMultipleChoiceField( queryset=AssayCategory.objects.all().order_by('name'), # Should this be required? required=False, # empty_label='All' ) # We don't actually want restricted users to meddle with the methods straight (they could remove methods) method_proxy = forms.ModelMultipleChoiceField( queryset=AssayMethod.objects.all().order_by('name'), label='Methods' ) class Meta(object): model = AssayTarget fields = ['category', 'method_proxy'] def __init__(self, *args, **kwargs): super(AssayTargetRestrictedForm, self).__init__(*args, **kwargs) # Get category if possible # (It should always be possible, this form is for editing) if self.instance and self.instance.id: self.fields['category'].initial = ( AssayCategory.objects.filter( targets__id=self.instance.id ) ) self.fields['method_proxy'].initial = ( self.instance.methods.all() ) def save(self, commit=True): new_target = super(AssayTargetRestrictedForm, self).save(commit) if commit: if self.cleaned_data.get('category', None): for current_category in self.cleaned_data.get('category', None): current_category.targets.add(self.instance) if self.cleaned_data.get('method_proxy', None): for current_method in self.cleaned_data.get('method_proxy', None): self.instance.methods.add(current_method) return new_target class AssayMethodForm(BootstrapForm): # For adding to target m2m targets = forms.ModelMultipleChoiceField( queryset=AssayTarget.objects.all().order_by('name'), # No longer required to prevent circularity with Target required=False ) class Meta(object): model = AssayMethod exclude = tracking widgets = { 'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}), } def __init__(self, *args, **kwargs): super(AssayMethodForm, self).__init__(*args, **kwargs) # Get target if possible if self.instance and self.instance.id: self.initial_targets = AssayTarget.objects.filter( methods__id=self.instance.id ) self.fields['targets'].initial = ( self.initial_targets ) else: self.initial_targets = AssayTarget.objects.none() def save(self, commit=True): new_method = super(AssayMethodForm, self).save(commit) if commit: for current_target in self.cleaned_data.get('targets', None): current_target.methods.add(self.instance) # Permit removals for the moment # Crude removal for initial_target in self.initial_targets: if initial_target not in self.cleaned_data.get('targets', None): initial_target.methods.remove(self.instance) return new_method class AssayMethodRestrictedForm(BootstrapForm): # For adding to target m2m targets = forms.ModelMultipleChoiceField( queryset=AssayTarget.objects.all().order_by('name'), # No longer required to prevent circularity with Target required=False ) class Meta(object): model = AssayMethod # Only include the target, we don't want anything else to change fields = ['targets'] def __init__(self, *args, **kwargs): super(AssayMethodRestrictedForm, self).__init__(*args, **kwargs) # Get target if possible # (It should always be possible, this form is only for editing) if self.instance and self.instance.id: self.fields['targets'].initial = ( AssayTarget.objects.filter( methods__id=self.instance.id ) ) def save(self, commit=True): new_method = super(AssayMethodRestrictedForm, self).save(commit) if commit: # In the restricted form, one is allowed to add targets ONLY for current_target in self.cleaned_data.get('targets', None): current_target.methods.add(self.instance) return new_method class PhysicalUnitsForm(BootstrapForm): class Meta(object): model = PhysicalUnits exclude = tracking + ('availability',) widgets = { 'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}), } class AssayMeasurementTypeForm(BootstrapForm): class Meta(object): model = AssayMeasurementType exclude = tracking widgets = { 'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}), } class AssaySampleLocationForm(BootstrapForm): class Meta(object): model = AssaySampleLocation exclude = tracking widgets = { 'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}), } class AssaySettingForm(BootstrapForm): class Meta(object): model = AssaySetting exclude = tracking widgets = { 'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}), } class AssaySupplierForm(BootstrapForm): class Meta(object): model = AssaySupplier exclude = tracking widgets = { 'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}), } # CONTRIVED class AssayDataFileUploadDeleteForm(forms.ModelForm): class Meta(object): model = AssayDataFileUpload fields = [] ##### # sck - ASSAY PLATE MAP START # monkey patch to display method target and unit combo as needed in the assay plate map page class AbstractClassAssayStudyAssay(AssayStudyAssay): class Meta: proxy = True def __str__(self): # return 'TARGET: {0} METHOD: {1} UNIT: {2}'.format(self.target, self.method, self.unit) return '{2} --- TARGET: {0} by METHOD: {1}'.format(self.target, self.method, self.unit) # Get info to populate pick lists; no qc needed on this form, just to use on webpage to allow user selections class AssayPlateReadMapAdditionalInfoForm(forms.Form): """Form for Assay Plate Reader Map add/update/view extra info for dropdowns that are just used in GUI (not saved).""" def __init__(self, *args, **kwargs): study_id = kwargs.pop('study_id', None) self.user = kwargs.pop('user', None) super(AssayPlateReadMapAdditionalInfoForm, self).__init__(*args, **kwargs) # note that the non-selectized versions are manipulated in javascript to facilitate the plate map # they are not displayed to the user (they are hidden) # something did very early in development...probably would do differently now self.fields['se_matrix_item'].queryset = AssayMatrixItem.objects.filter(study_id=study_id).order_by('name',) self.fields['ns_matrix_item'].queryset = AssayMatrixItem.objects.filter(study_id=study_id).order_by('name',) self.fields['ns_matrix_item'].widget.attrs.update({'class': 'no-selectize'}) self.fields['ns_location'].widget.attrs.update({'class': 'no-selectize'}) self.fields['se_matrix'].queryset = AssayMatrix.objects.filter( study_id=study_id ).order_by('name',) self.fields['se_matrix'].widget.attrs.update({'class': ' required'}) self.fields['se_platemap'].queryset = AssayPlateReaderMap.objects.filter( study_id=study_id ).order_by('name',) self.fields['se_platemap'].widget.attrs.update({'class': ' required'}) # before got to development of calibration/processing the data ns_matrix_item = forms.ModelChoiceField( queryset=AssayMatrixItem.objects.none(), required=False, ) se_matrix_item = forms.ModelChoiceField( queryset=AssayMatrixItem.objects.none(), required=False, ) se_matrix = forms.ModelChoiceField( queryset=AssayMatrix.objects.none(), required=False, ) se_platemap = forms.ModelChoiceField( queryset=AssayPlateReaderMap.objects.none(), required=False, ) se_main_well_use = forms.ChoiceField( choices=assay_plate_reader_main_well_use_choices ) se_blank_well_use = forms.ChoiceField( choices=assay_plate_reader_blank_well_use_choices ) se_time_unit = forms.ChoiceField( choices=assay_plate_reader_time_unit_choices ) se_location = forms.ModelChoiceField( queryset=AssaySampleLocation.objects.all().order_by( 'name' ), required=False, ) ns_location = forms.ModelChoiceField( queryset=AssaySampleLocation.objects.all(), required=False, ) se_increment_operation = forms.ChoiceField( choices=(('divide', 'Divide'), ('multiply', 'Multiply'), ('subtract', 'Subtract'), ('add', 'Add')) ) form_number_time = forms.DecimalField( required=False, initial=1, ) form_number_time.widget.attrs.update({'class': 'form-control'}) form_number_default_time = forms.DecimalField( required=False, initial=1, ) form_number_default_time.widget.attrs.update({'class': 'form-control'}) form_number_standard_value = forms.DecimalField( required=False, initial=0, ) form_number_standard_value.widget.attrs.update({'class': 'form-control'}) form_number_dilution_factor = forms.DecimalField( required=False, initial=1, ) form_number_dilution_factor.widget.attrs.update({'class': 'form-control'}) form_number_collection_volume = forms.DecimalField( required=False, initial=1, ) form_number_collection_volume.widget.attrs.update({'class': 'form-control'}) form_number_collection_time = forms.DecimalField( required=False, initial=1, ) form_number_collection_time.widget.attrs.update({'class': 'form-control'}) form_number_increment_value = forms.DecimalField( required=False, initial=1, ) form_number_increment_value.widget.attrs.update({'class': 'form-control'}) # Parent for plate reader map page class AssayPlateReaderMapForm(BootstrapForm): """Form for Assay Plate Reader Map""" class Meta(object): model = AssayPlateReaderMap fields = [ # 'id', do not need in queryset 'name', 'description', 'device', 'study_assay', 'time_unit', 'volume_unit', 'standard_unit', 'cell_count', 'standard_molecular_weight', 'well_volume' ] widgets = { 'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}), } def __init__(self, *args, **kwargs): self.study = kwargs.pop('study', None) # self.user = kwargs.pop('user', None) super(AssayPlateReaderMapForm, self).__init__(*args, **kwargs) # need these or get blank study_assay in the update page (add page worked okay without) if not self.study and self.instance.study: self.study = self.instance.study if self.study: self.instance.study = self.study # plate map instance - note, based on model, this is the name, use .id for the pk my_instance = self.instance # note that, if leave selectize on, will need to change js file $("#id_device").val(matrix_size); # for tips on working with selectize, search in path for HANDY # self.fields['device'].widget.attrs['class'] += ' no-selectize' self.fields['name'].initial = "map-" + datetime.datetime.now().strftime("%Y%m%d")+"-"+datetime.datetime.now().strftime('%H:%M:%S') self.fields['name'].widget.attrs['class'] += ' required' self.fields['device'].widget.attrs['class'] += ' required' self.fields['time_unit'].widget.attrs['class'] += ' required' self.fields['standard_unit'].widget.attrs['class'] += ' required' self.fields['study_assay'].queryset = AbstractClassAssayStudyAssay.objects.filter( study_id=self.study ).prefetch_related( 'target', 'method', 'unit', ) self.fields['study_assay'].widget.attrs['class'] += ' required' # the selectize was causing PROBLEMS, I turned it off this field # HANDY - turn of selectize at the form level # self.fields['volume_unit'].widget.attrs.update({'class': 'no-selectize'}) # self.fields['volume_unit'].widget.attrs['class'] += ' form-control' self.fields['standard_molecular_weight'].widget.attrs['class'] += ' form-control' ###### # START section to deal with raw data showing in the plate map after file assignment # this will populate a dropdown that lets the user pick which file block to see on the page (map and calibrate) # For the dropdown, only look for those file blocks that have a populated file block id # get a record in the table with the plate index of 0 and that have a file block id as_value_formset_with_file_block = AssayPlateReaderMapItemValue.objects.filter( assayplatereadermap=my_instance.id ).prefetch_related( 'assayplatereadermapitem', ).filter( assayplatereadermapitem__plate_index=0 ).filter( assayplatereadermapdatafileblock__isnull=False ).order_by( 'assayplatereadermapdatafileblock__id', ) distinct_plate_map_with_select_string = [] distinct_plate_map_with_block_pk = [] number_filed_combos = len(as_value_formset_with_file_block) # print("print number of filed combos-forms.py: ", number_filed_combos) # queryset should have one record for each value SET that HAS a file-block associated to it # make a choice list/field for the file-block combos for this plate map if number_filed_combos > 0: i = 0 for record in as_value_formset_with_file_block: short_file_name = os.path.basename(str(record.assayplatereadermapdatafile.plate_reader_file)) data_block_label = str(record.assayplatereadermapdatafileblock.data_block) data_block_metadata = record.assayplatereadermapdatafileblock.data_block_metadata # data_file_id_str = str(record.assayplatereadermapdatafile.id) data_file_block_id_str = str(record.assayplatereadermapdatafileblock.id) # make a choice tuple list for showing selections and a choice tuple list of containing the file pk and block pk for javascript pick_value = str(i) pick_string = 'FILE: ' + short_file_name + ' BLOCK: ' + data_block_label + ' ' + data_block_metadata # pick_string_pk = data_file_id_str + '-' + data_file_block_id_str pick_string_block_pk = data_file_block_id_str distinct_plate_map_with_select_string.append((pick_value, pick_string)) distinct_plate_map_with_block_pk.append((pick_value, pick_string_block_pk)) # print("looking for unique blocks counter ", i) i = i + 1 # self.fields['ns_file_pk_block_pk'].widget.attrs['class'] += ' no-selectize' self.fields['form_number_file_block_combos'].required = False self.fields['form_number_file_block_combos'].initial = number_filed_combos # file block options associated with a specific plate map self.fields['se_block_select_string'].required = False self.fields['se_block_select_string'].widget.attrs['class'] += ' required' self.fields['se_block_select_string'].choices = distinct_plate_map_with_select_string self.fields['ns_block_select_pk'].required = False self.fields['ns_block_select_pk'].widget.attrs.update({'class': 'no-selectize'}) self.fields['ns_block_select_pk'].choices = distinct_plate_map_with_block_pk self.fields['se_form_calibration_curve'].widget.attrs.update({'class': ' required'}) self.fields['form_make_mifc_on_submit'].widget.attrs.update({'class': ' big-checkbox'}) self.fields['se_form_calibration_curve'].required = False self.fields['se_form_blank_handling'].required = False self.fields['radio_replicate_handling_average_or_not'].required = False # HANDY - save problems, this is likely the cause (required fields!) # self.fields['form_data_processing_multiplier_string'].required = False # # self.fields['form_data_processing_multiplier_string_short'].required = False # self.fields['form_data_processing_multiplier_value_short'].required = False # these multiplier fields were added to explain the multiplier in a table # the long string was unacceptable to the project PI # these really don't have to be form fields (not needed for data processing), but it was just easier/faster # self.fields['form_data_processing_multiplier_string1'].required = False # self.fields['form_data_processing_multiplier_string2'].required = False # self.fields['form_data_processing_multiplier_string3'].required = False # self.fields['form_data_processing_multiplier_string4'].required = False # self.fields['form_data_processing_multiplier_string5'].required = False # self.fields['form_data_processing_multiplier_string6'].required = False # self.fields['form_data_processing_multiplier_string7'].required = False # self.fields['form_data_processing_multiplier_string8'].required = False # self.fields['form_data_processing_multiplier_string9'].required = False # calibration fields - only a few are really needed as form fields (eg the calibration curve used, bounds) # many are not really needed in the data processing and could be handled differently self.fields['form_data_parsable_message'].required = False self.fields['form_calibration_curve_method_used'].required = False # self.fields['form_calibration_equation'].required = False # self.fields['form_calibration_rsquared'].required = False # self.fields['form_calibration_parameter_1_string'].required = False # self.fields['form_calibration_parameter_2_string'].required = False # self.fields['form_calibration_parameter_3_string'].required = False # self.fields['form_calibration_parameter_4_string'].required = False # self.fields['form_calibration_parameter_5_string'].required = False # self.fields['form_calibration_parameter_1_value'].required = False # self.fields['form_calibration_parameter_2_value'].required = False # self.fields['form_calibration_parameter_3_value'].required = False # self.fields['form_calibration_parameter_4_value'].required = False # self.fields['form_calibration_parameter_5_value'].required = False self.fields['form_calibration_standard_fitted_min_for_e'].required = False self.fields['form_calibration_standard_fitted_max_for_e'].required = False self.fields['form_calibration_sample_blank_average'].required = False self.fields['form_calibration_standard_standard0_average'].required = False self.fields['form_calibration_method'].required = False self.fields['form_calibration_target'].required = False self.fields['form_calibration_unit'].required = False self.fields['form_number_standards_this_plate'].required = False self.fields['form_hold_the_data_block_metadata_string'].required = False self.fields['form_hold_the_omits_string'].required = False self.fields['form_hold_the_notes_string'].required = False # Need a valid choice field. # When the selected plate map has standards, the user will never see this field and will not need it. # If the plate does not have standards, the user will need the option to pick to borrow standards from another plate. # Lab representative (ie Richard) indicated that standards, standard blanks, and sample blanks would all be borrowed from the same plate! # does this plate map have standards? does_this_plate_have_standards = AssayPlateReaderMapItem.objects.filter( assayplatereadermap=my_instance.id ).filter( well_use='standard' ) number_standards_wells_on_plate = len(does_this_plate_have_standards) choiceBorrowData = (0, 'Select One'), choiceBorrowDataToPlateMap = (0, 0), if number_standards_wells_on_plate > 0: # left - file block pk in both # right is a string of the data block meta data for selection of data block pk (left) choiceBorrowData = choiceBorrowData # right is plate map pk choiceBorrowDataToPlateMap = choiceBorrowDataToPlateMap else: # if we have to borrow standards, need a list to pick from - add to choiceBorrowData # need to borrow standards from another plate # 20200510 - moving this to here from ajax call. Might move back depending on performance. # most users will not do it this way.... as_value_formset_with_file_block_standard = AssayPlateReaderMapItemValue.objects.filter( study_id=self.study ).filter( assayplatereadermapdatafileblock__isnull=False ).prefetch_related( 'assayplatereadermapdatafileblock', 'assayplatereadermap', 'assayplatereadermapitem', ).filter( assayplatereadermapitem__well_use='standard' ).order_by( 'assayplatereadermapdatafileblock__id', 'assayplatereadermapitem__well_use' ) # print('as_value_formset_with_file_block_standard') # print(as_value_formset_with_file_block_standard) prev_file = "none" prev_data_block_file_specific_pk = 0 # queryset should have one record for each value SET that HAS a file-block and at least one standard associated to it if len(as_value_formset_with_file_block_standard) > 0: for record in as_value_formset_with_file_block_standard: short_file_name = os.path.basename(str(record.assayplatereadermapdatafile.plate_reader_file)) # this is the data block of the file (for file 0 to something...) data_block_file_specific_pk = record.assayplatereadermapdatafileblock.data_block if prev_file == short_file_name and prev_data_block_file_specific_pk == data_block_file_specific_pk: pass else: data_platemap_pk = record.assayplatereadermap_id data_platemap_name = record.assayplatereadermap.name data_block_metadata = record.assayplatereadermapdatafileblock.data_block_metadata data_block_database_pk = record.assayplatereadermapdatafileblock.id # make a choice tuple list for showing selections and a choice tuple list of containing the file pk and block pk for javascript pick_string = 'PLATEMAP: ' + data_platemap_name + ' FILE: ' + short_file_name + ' BLOCK: ' + data_block_metadata + ' (' + str( data_block_file_specific_pk) + ')' addString1 = (data_block_database_pk, pick_string), choiceBorrowData = choiceBorrowData + addString1 addString2 = (data_block_database_pk, data_platemap_pk), choiceBorrowDataToPlateMap = choiceBorrowDataToPlateMap + (addString2) prev_file = short_file_name prev_data_block_file_specific_pk = data_block_file_specific_pk # print('choiceBorrowData') # print(choiceBorrowData) # print('choiceBorrowDataToPlateMap') # print(choiceBorrowDataToPlateMap) self.fields['se_block_standard_borrow_string'].choices = choiceBorrowData self.fields['ns_block_standard_borrow_string_to_block_pk_back_to_platemap_pk'].choices = choiceBorrowDataToPlateMap self.fields['ns_block_standard_borrow_string_to_block_pk_back_to_platemap_pk'].required = False self.fields['se_block_standard_borrow_string'].widget.attrs['class'] += ' required' self.fields['se_block_standard_borrow_string'].required = False # enable the selection of a plate to borrow standards from by letting the user see a string of info about the DATA BLOCK (not just the plate map!) se_block_standard_borrow_string = forms.ChoiceField() ns_block_standard_borrow_string_to_block_pk_back_to_platemap_pk = forms.ChoiceField() # pk of the file block borrowing when no standards on the current plate (store it here) form_block_standard_borrow_pk_single_for_storage = forms.IntegerField( required=False, ) # pk of the plate map associated with the file block borrowing when no standards on the current plate (store it here) form_block_standard_borrow_pk_platemap_single_for_storage = forms.IntegerField( required=False, ) # here here, remove these next two after checking other way works # form_hold_the_study_id = forms.IntegerField( # required=False, # ) # form_hold_the_platemap_id = forms.IntegerField( # required=False, # ) form_hold_the_data_block_metadata_string = forms.CharField( widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'}) ) form_hold_the_omits_string = forms.CharField( widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'}) ) form_hold_the_notes_string = forms.CharField( widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'}) ) form_block_file_data_block_selected_pk_for_storage = forms.IntegerField( required=False, ) form_number_file_block_combos = forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'})) # string of selected file block (selected in dropdown) se_block_select_string = forms.ChoiceField() # pk of selected file block (stays lined up with the string) ns_block_select_pk = forms.ChoiceField() # END section to deal with raw data showing in the plate map after file assignment and deal with standard in a different file block # print(calibration_choices) # processing the data fields added se_form_calibration_curve = forms.ChoiceField( choices=( calibration_choices # ('select_one', 'Select One'), # ('no_calibration', 'No Calibration'), # ('best_fit', 'Best Fit'), # ('logistic4', '4 Parameter Logistic w/fitted bounds'), # ('logistic4a0', '4 Parameter Logistic w/lower bound = 0'), # ('logistic4f', '4 Parameter Logistic w/user specified bound(s)'), # ('linear', 'Linear w/fitted intercept'), # ('linear0', 'Linear w/intercept = 0'), # ('log', 'Logarithmic'), # ('poly2', 'Quadratic Polynomial'), # ('select_one', 'Select One (n = standard concentration, s = signal)'), # ('no_calibration', 'No Calibration'), # ('best_fit', 'Best Fit'), # ('logistic4', '4 Parameter Logistic (s = ((A-D)/(1.0+((n/C)**B))) + D)'), # ('linear', 'Linear w/fitted intercept (s = B*n + A)'), # ('linear0', 'Linear w/intercept = 0 (s = B*n)'), # ('log', 'Logarithmic (s = B*ln(n) + A)'), # ('poly2', 'Polynomial (s = C*n**2 + B*n + A)'), ) ) # forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'})) # se_form_blank_handling = forms.ChoiceField(widget=forms.RadioSelect(attrs={'disabled': 'disabled'}), se_form_blank_handling = forms.ChoiceField( choices=(('subtracteachfromeach', 'Subtracting Average STANDARD Blanks from STANDARDS and Average SAMPLE Blanks from SAMPLES'), ('subtractstandardfromstandard', 'Subtracting Average STANDARD Blanks from STANDARDS (ignore sample blanks)'), ('subtractsamplefromsample', 'Subtracting Average SAMPLE Blanks from SAMPLES (ignore standard blanks)'), ('subtractstandardfromall', 'Subtracting Average STANDARD Blanks from the STANDARDS and SAMPLES'), ('subtractsamplefromall', 'Subtracting Average SAMPLE Blanks from the STANDARDS and SAMPLES'), ('ignore', 'Ignoring the Blanks')), initial='subtracteachfromeach' ) form_min_standard = forms.DecimalField( required=False, ) form_min_standard.widget.attrs.update({'class': 'form-control'}) form_max_standard = forms.DecimalField( required=False, ) form_max_standard.widget.attrs.update({'class': 'form-control'}) form_logistic4_A = forms.DecimalField( required=False, ) form_logistic4_A.widget.attrs.update({'class': 'form-control'}) form_logistic4_D = forms.DecimalField( required=False, ) form_logistic4_D.widget.attrs.update({'class': 'form-control'}) form_data_processing_multiplier = forms.DecimalField( required=False, initial=1, ) form_data_processing_multiplier.widget.attrs.update({'class': 'form-control'}) # works but only one line # form_data_processing_multiplier_string = forms.CharField( # required=False, # initial="", # ) # works but only one line # form_data_processing_multiplier_string = forms.CharField() # form_data_processing_multiplier_string.widget.attrs.update({'required': False, 'initial': ""}) # HANDY - how to make an extra field a widget so can manipulate it eg readonly # form_data_processing_multiplier_string = forms.CharField( # widget=forms.Textarea(attrs={'rows': 3, 'readonly': 'readonly', 'required': False}) # ) # # form_data_processing_multiplier_string_short = forms.CharField( # widget=forms.Textarea(attrs={'rows': 1, 'readonly': 'readonly'})) # form_data_processing_multiplier_value_short = forms.CharField( # widget=forms.Textarea(attrs={'rows': 1, 'readonly': 'readonly'})) # # form_data_processing_multiplier_string1 = forms.CharField( # widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'})) # form_data_processing_multiplier_string2 = forms.CharField( # widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'})) # form_data_processing_multiplier_string3 = forms.CharField( # widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'})) # form_data_processing_multiplier_string4 = forms.CharField( # widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'})) # form_data_processing_multiplier_string5 = forms.CharField( # widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'})) # form_data_processing_multiplier_string6 = forms.CharField( # widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'})) # form_data_processing_multiplier_string7 = forms.CharField( # widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'})) # form_data_processing_multiplier_string8 = forms.CharField( # widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'})) # form_data_processing_multiplier_string9 = forms.CharField( # widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'})) form_data_parsable_message = forms.CharField( widget=forms.Textarea(attrs={'rows': 6, 'readonly': 'readonly', 'required': False}) ) form_calibration_curve_method_used = forms.CharField( widget=forms.TextInput(attrs={'readonly': 'readonly', 'required': False, 'initial': '-'}) ) # form_calibration_equation = forms.CharField( # widget=forms.TextInput(attrs={'readonly': 'readonly', 'required': False, 'initial': '-'}) # ) # form_calibration_rsquared = forms.CharField( # widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'}) # ) radio_replicate_handling_average_or_not = forms.ChoiceField( # widget=forms.RadioSelect(attrs={'id': 'value'}), widget=forms.RadioSelect, choices=[ ('average', 'Show Averages the Replicate Samples'), ('each', 'Show Each Sample')]) # ('average', 'Send the Average of the Replicates to the Study Summary'), # ('each', 'Send Each Replicates Value to the Study Summary')]) radio_standard_option_use_or_not = forms.ChoiceField( required=False, widget=forms.RadioSelect, choices=[('no_calibration', 'No Calibration'), ('pick_block', 'Pick a Block of Data with Standards')]) # going to need to pass some calibration parameters # think the max I will need is 5 for 5 parameter logistic # going to need to keep track of order # form_calibration_parameter_1_string = forms.CharField( # widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'}) # ) # form_calibration_parameter_2_string = forms.CharField( # widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'}) # ) # form_calibration_parameter_3_string = forms.CharField( # widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'}) # ) # form_calibration_parameter_4_string = forms.CharField( # widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'}) # ) # form_calibration_parameter_5_string = forms.CharField( # widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'}) # ) # form_calibration_parameter_1_value = forms.CharField( # widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'}) # ) # form_calibration_parameter_2_value = forms.CharField( # widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'}) # ) # form_calibration_parameter_3_value = forms.CharField( # widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'}) # ) # form_calibration_parameter_4_value = forms.CharField( # widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'}) # ) # form_calibration_parameter_5_value = forms.CharField( # widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'}) # ) form_calibration_standard_fitted_min_for_e = forms.CharField( widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'}) ) form_calibration_standard_fitted_max_for_e = forms.CharField( widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'}) ) form_calibration_sample_blank_average = forms.CharField( widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'}) ) form_calibration_standard_standard0_average = forms.CharField( widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'}) ) form_calibration_method = forms.CharField( widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'}) ) form_calibration_target = forms.CharField( widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'}) ) form_calibration_unit = forms.CharField( widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'}) ) form_number_standards_this_plate = forms.IntegerField( required=False, initial=1, ) form_make_mifc_on_submit = forms.BooleanField(required=False) # Let them name the maps the same if they really want to. Does not really matter to me # def clean(self): # # HANDY FORCE UNIQUE - this will return back to the form instead of showing the user an error # cleaned_data = super(AssayPlateReaderMapForm, self).clean() # # if AssayPlateReaderMap.objects.filter( # study_id=self.instance.study.id, # name=self.cleaned_data.get('name', '') # ).exclude(pk=self.instance.pk).count(): # raise forms.ValidationError({'name': ['Plate Map name must be unique within study. This plate map is now corrupted. Go back to the Plate Map List and click to Add Plate Map and start again.']}) # # return cleaned_data def clean(self): # First thing in clean # Call super for data data = super(AssayPlateReaderMapForm, self).clean() # After initial stuff done self.process_file(save=False, called_from='clean') return data def save(self, commit=True): # First thing in save # Make sure to pass commit to the super call (don't want accidental saves) map = super(AssayPlateReaderMapForm, self).save(commit=commit) # Only save the file if commit is true if commit: self.process_file(save=True, called_from="save") return map def process_file(self, save=False, called_from="c"): #### START When saving AssayPlateReaderMapUpdate after a calibration # if user checked the box to send to study summary, make that happen data = self.cleaned_data # study = get_object_or_404(AssayStudy, pk=self.kwargs['study_id']) if data.get('form_make_mifc_on_submit'): # print(".unit ",data.get('standard_unit').unit) # print(".id ", data.get('standard_unit').id) # .unit # µg / mL # .id # 6 # print(".unit ",data.get('standard_unit').unit) # print(".id ", data.get('standard_unit').id) if data.get('form_block_standard_borrow_pk_single_for_storage') == None: borrowed_block_pk = -1 else: borrowed_block_pk = data.get('form_block_standard_borrow_pk_single_for_storage') if data.get('form_block_standard_borrow_pk_platemap_single_for_storage') == None: borrowed_platemap_pk = -1 else: borrowed_platemap_pk = data.get( 'form_block_standard_borrow_pk_platemap_single_for_storage') # 20201104 when no_calibration is selected, the _used field does not get populated..deal with it here use_curve = 'no_calibration' use_curve_long = data.get('form_calibration_curve_method_used') if data.get('se_form_calibration_curve') == 'no_calibration' or data.get('se_form_calibration_curve') == 'select_one': use_curve_long = 'no_calibration' else: use_curve = find_a_key_by_value_in_dictionary(CALIBRATION_CURVE_MASTER_DICT, use_curve_long) if len(use_curve.strip()) == 0: err_msg = "The calibration method " + use_curve_long + " was not found in the cross reference list." # print(err_msg) raise forms.ValidationError(err_msg) # form.instance.study # make a dictionary to send to the utils.py when call the function set_dict = { 'called_from': 'form_save', 'study': self.instance.study.id, 'pk_platemap': self.instance.id, 'pk_data_block': data.get('form_block_file_data_block_selected_pk_for_storage'), 'plate_name': data.get('name'), 'form_calibration_curve': use_curve, 'multiplier': data.get('form_data_processing_multiplier'), 'unit': data.get('form_calibration_unit'), 'standard_unit': data.get('standard_unit').unit, 'form_min_standard': data.get('form_calibration_standard_fitted_min_for_e'), 'form_max_standard': data.get('form_calibration_standard_fitted_max_for_e'), 'form_logistic4_A': data.get('form_logistic4_A'), 'form_logistic4_D': data.get('form_logistic4_D'), 'form_blank_handling': data.get('se_form_blank_handling'), 'radio_standard_option_use_or_not': data.get('radio_standard_option_use_or_not'), 'radio_replicate_handling_average_or_not_0': data.get( 'radio_replicate_handling_average_or_not'), 'borrowed_block_pk': borrowed_block_pk, 'borrowed_platemap_pk': borrowed_platemap_pk, 'count_standards_current_plate': data.get('form_number_standards_this_plate'), 'target': data.get('form_calibration_target'), 'method': data.get('form_calibration_method'), 'time_unit': data.get('time_unit'), 'volume_unit': data.get('volume_unit'), 'user_notes': data.get('form_hold_the_notes_string'), 'user_omits': data.get('form_hold_the_omits_string'), 'plate_size': data.get('device'), } # this function is in utils.py that returns data data_mover = plate_reader_data_file_process_data(set_dict) # 20201105 one row of data mover # {'matrix_item_name': '13', 'cross_reference': 'Plate Reader Tool', 'plate_name': 'map-20201105-07:47:13', # 'well_name': 'D7 C7 E7', 'day': '1.0', 'hour': '0', 'minute': '0', 'target': 'Decay Time', # 'subtarget': 'none', 'method': 'EarlyTox Cardiotoxicity Kit (Molecular Devices: R8211)', # 'location_name': 'Basolateral', 'processed_value': '25195871.42980029', 'unit': 'ng/mL', 'replicate': 1, # 'caution_flag': '', 'exclude': ' ', 'notes': '', # 'sendmessage': 'Fitting method: linear; Standard minimum: 0.0; Standard maximum: 100.0; '}, { # 'matrix_item_name': '13', 'cross_reference': 'Plate Reader Tool', 'plate_name': 'map-20201105-07:47:13', # 'well_name': 'C8 E8 D8', 'day': '2.0', 'hour': '0', 'minute': '0', 'target': 'Decay Time', # 'subtarget': 'none', 'method': 'EarlyTox Cardiotoxicity Kit (Molecular Devices: R8211)', # 'location_name': 'Basolateral', 'processed_value': '24630641.60638611', 'unit': 'ng/mL', 'replicate': 1, # 'caution_flag': '', 'exclude': ' ', 'notes': '', # 'sendmessage': 'Fitting method: linear; Standard minimum: 0.0; Standard maximum: 100.0; '}, { # 'matrix_item_name': '13', 'cross_reference': 'Plate Reader Tool', 'plate_name': 'map-20201105-07:47:13', # 'well_name': 'C9 E9 D9', 'day': '3.0', 'hour': '0', 'minute': '0', 'target': 'Decay Time', # 'subtarget': 'none', 'method': 'EarlyTox Cardiotoxicity Kit (Molecular Devices: R8211)', # 'location_name': 'Basolateral', 'processed_value': '34903839.32472848', 'unit': 'ng/mL', 'replicate': 1, # 'caution_flag': '', 'exclude': ' ', 'notes': '', # 'sendmessage': 'Fitting method: linear; Standard minimum: 0.0; Standard maximum: 100.0; '} utils_key_column_header = { 'matrix_item_name': COLUMN_HEADERS[0], 'cross_reference': COLUMN_HEADERS[1], 'plate_name': COLUMN_HEADERS[2], 'well_name': COLUMN_HEADERS[3], 'day': COLUMN_HEADERS[4], 'hour': COLUMN_HEADERS[5], 'minute': COLUMN_HEADERS[6], 'target': COLUMN_HEADERS[7], 'subtarget': COLUMN_HEADERS[8], 'method': COLUMN_HEADERS[9], 'location_name': COLUMN_HEADERS[10], 'processed_value': COLUMN_HEADERS[11], 'unit': COLUMN_HEADERS[12], 'replicate': COLUMN_HEADERS[13], 'caution_flag': COLUMN_HEADERS[14], 'exclude': COLUMN_HEADERS[15], 'notes': COLUMN_HEADERS[16], 'sendmessage': 'Processing Details' } column_table_headers_average = list(COLUMN_HEADERS) column_table_headers_average.append('Processing Details') # what comes back in 9 is a dictionary of data rows with dict keys as shown in utils_key_column_header list_of_dicts = data_mover[9] list_of_lists_mifc_headers_row_0 = [None] * (len(list_of_dicts) + 1) list_of_lists_mifc_headers_row_0[0] = column_table_headers_average i = 1 for each_dict_in_list in list_of_dicts: list_each_row = [] for this_mifc_header in column_table_headers_average: # find the key in the dictionary that we need utils_dict_header = find_a_key_by_value_in_dictionary(utils_key_column_header, this_mifc_header) # get the value that is associated with this header in the dict this_value = each_dict_in_list.get(utils_dict_header) # add the value to the list for this dict in the list of dicts list_each_row.append(this_value) # when down with the dictionary, add the complete list for this row to the list of lists list_of_lists_mifc_headers_row_0[i] = list_each_row i = i + 1 # First make a csv from the list_of_lists (using list_of_lists_mifc_headers_row_0) # or self.objects.study my_study = self.instance.study # my_user = self.request.user my_user = self.user my_platemap = self.instance my_data_block_pk = data.get('form_block_file_data_block_selected_pk_for_storage') platenamestring1 = str(my_platemap) metadatastring1 = str(data.get('form_hold_the_data_block_metadata_string')) # Specify the file for use with the file uploader class # some of these caused errors in the file name so remove them # Luke and Quinn voted for all the symbols out instead of a few platenamestring = re.sub('[^a-zA-Z0-9_]', '', platenamestring1) metadatastring = re.sub('[^a-zA-Z0-9_]', '', metadatastring1) name_the_file = 'PLATE-{}-{}--METADATA-{}-{}'.format( my_platemap.id, platenamestring, my_data_block_pk, metadatastring ) # PLEASE NOTE THE NAIVE TRUNCATION HERE # Revise soon bulk_location = upload_file_location( my_study, name_the_file )[:97] # Make sure study has directories if not os.path.exists(MEDIA_ROOT + '/data_points/{}'.format(my_study.id)): os.makedirs(MEDIA_ROOT + '/data_points/{}'.format(my_study.id)) # Need to import from models # Avoid magic string, use media location file_location = MEDIA_ROOT.replace('mps/../', '', 1) + '/' + bulk_location + '.csv' # Should make a csv writer to avoid repetition file_to_write = open(file_location, 'w') csv_writer = csv.writer(file_to_write, dialect=csv.excel) # Add the UTF-8 BOM list_of_lists_mifc_headers_row_0[0][0] = '\ufeff' + list_of_lists_mifc_headers_row_0[0][0] # Write the lines here here uncomment this for one_line_of_data in list_of_lists_mifc_headers_row_0: csv_writer.writerow(one_line_of_data) file_to_write.close() new_mifc_file = open(file_location, 'rb') file_processor = AssayFileProcessor( new_mifc_file, my_study, my_user, save=save, full_path='/media/' + bulk_location + '.csv' ) # Process the file file_processor.process_file() #### END When saving AssayPlateReaderMapUpdate after a calibration def find_a_key_by_value_in_dictionary(this_dict, this_header): """This is a function to find a key by value.""" my_key = '' for key, value in this_dict.items(): if value == this_header: my_key = key break return my_key # There should be a complete set of items for each saved plate map (one for each well in the selected plate) class AssayPlateReaderMapItemForm(forms.ModelForm): """Form for Assay Plate Reader Map Item""" class Meta(object): model = AssayPlateReaderMapItem # exclude = tracking + ('study',) fields = [ # 'id', do not need 'matrix_item', 'location', 'name', # 'row_index', # 'column_index', 'plate_index', 'standard_value', 'dilution_factor', 'collection_volume', 'collection_time', 'default_time', 'well_use', ] # keep here for reference of what not to do if want form to be selectized # def __init__(self, *args, **kwargs): # super(AssayPlateReaderMapItemForm, self).__init__(*args, **kwargs) # self.fields['name'].widget.attrs.update({'class': ' no-selectize'}) # # 20200428 for user entered information # # 20200609 these are adding too much overhead, and did not use in data process, so remove these # form_user_entered_notes = forms.CharField( # initial='-', # required=False, # widget=forms.Textarea(attrs={'cols': 10, 'rows': 1}), # ) # form_user_entered_omit_from_average = forms.BooleanField(required=False, ) ########### # 20200522 getting rid of the value form all together since not allowing editing after values attached to plate map. # GET RID OF THIS # # Item VALUES are sets that correspond to items. Each set should have a match to a well in the plate map. # # If not file/blocks attached to plate map, will have one set of values (with one value for each item) # # If one file/block attached to plate map, will have two sets of values (one for the file, one null file) etc. # class AssayPlateReaderMapItemValueForm(forms.ModelForm): # """Form for Assay Plate Reader Map Item Value""" # # # 20200113 - changing so this formset is only called when adding and when update or view when no data are yet attached # # class Meta(object): # model = AssayPlateReaderMapItemValue # # it is worth noting that there is a nuance to excluding or setting fields # # exclude = tracking + ('study', ) # fields = [ # # 'id', do not need # # 'assayplatereadermapdatafile', do not need # # 'assayplatereadermapitem', do not need # # next item - can remove later - do not need since, if there are matches, this formset will not be called # # but check rest is working first since will also affect formset (the custom_fields) # # 'assayplatereadermapdatafileblock', # 'plate_index', # 'raw_value', # 'time', # 'well_use', # ] ########### # Formset for items # IMPORTANT - custom_fields remove the select options for all the formsets - saves ALOT of page load time is long lists class AssayPlateReaderMapItemFormSet(BaseInlineFormSetForcedUniqueness): custom_fields = ( 'matrix_item', 'location', ) def __init__(self, *args, **kwargs): self.study = kwargs.pop('study', None) self.user = kwargs.pop('user', None) super(AssayPlateReaderMapItemFormSet, self).__init__(*args, **kwargs) if not self.study: self.study = self.instance.study # use the filter to get matrix items in this study ONLY - makes the dic much smaller # HANDY - this speed up the custom_fields filters = {'matrix_item': {'study_id': self.study.id}} self.dic = get_dic_for_custom_choice_field(self, filters=filters) for form in self.forms: for field in self.custom_fields: form.fields[field] = DicModelChoiceField(field, self.model, self.dic) if self.study: form.instance.study = self.study if form.instance.pk: form.instance.modified_by = self.user else: form.instance.created_by = self.user ########### # 20200522 getting rid of the value form all together since not allowing editing after values attached to plate map. # # GET RID OF THIS # # Formset for item values # class AssayPlateReaderMapItemValueFormSet(BaseInlineFormSetForcedUniqueness): # # changed way this worked on 20200114 and do not need this field any more # # custom_fields = ( # # 'assayplatereadermapdatafileblock', # # ) # # def __init__(self, *args, **kwargs): # self.study = kwargs.pop('study', None) # self.user = kwargs.pop('user', None) # super(AssayPlateReaderMapItemValueFormSet, self).__init__(*args, **kwargs) # # if not self.study: # self.study = self.instance.study # # # changed way this worked on 20200114 and do not need this field any more - skip making the dic... # # # use the filter to get matrix items in this study ONLY - makes the dic much smaller # # # this speed up the custom_fields # # filters = {'assayplatereadermapdatafileblock': {'study_id': self.study.id}} # # self.dic = get_dic_for_custom_choice_field(self, filters=filters) # # # print(self.dic) # # # for form in self.forms: # # for field in self.custom_fields: # # form.fields[field] = DicModelChoiceField(field, self.model, self.dic) # # if self.study: # form.instance.study = self.study # if form.instance.pk: # form.instance.modified_by = self.user # else: # form.instance.created_by = self.user # # # HANDY had this up before the self.forms loop, but needed to move it down to work # # HANDY to know how to print a queryset to the console # # self.queryset = self.queryset.order_by('assayplatereadermapdatafile', 'assayplatereadermapdatafileblock', 'plate_index') # # https://stackoverflow.com/questions/13387446/changing-the-display-order-of-forms-in-a-formset # # print(self.queryset) # self.queryset = self.queryset.order_by('assayplatereadermapdatafileblock', 'plate_index') # # print(self.queryset) ########### # Formset factory for item and value # https://stackoverflow.com/questions/29881734/creating-django-form-from-more-than-two-models AssayPlateReaderMapItemFormSetFactory = inlineformset_factory( AssayPlateReaderMap, AssayPlateReaderMapItem, formset=AssayPlateReaderMapItemFormSet, form=AssayPlateReaderMapItemForm, extra=1, exclude=tracking + ('study',), ) ########### # 20200522 getting rid of the value form all together since not allowing editing after values attached to plate map. # # GET RID OF THIS # AssayPlateReaderMapItemValueFormSetFactory = inlineformset_factory( # AssayPlateReaderMap, # AssayPlateReaderMapItemValue, # formset=AssayPlateReaderMapItemValueFormSet, # form=AssayPlateReaderMapItemValueForm, # extra=1, # exclude=tracking + ('study',), # ) ########## # end plate reader map page ##### ##### # Start plate reader file page # Add a plate reader file to the study (just add the file and check the file extension, no data processing) class AssayPlateReaderMapDataFileAddForm(BootstrapForm): """Form for Plate Reader Data File Upload""" class Meta(object): model = AssayPlateReaderMapDataFile fields = ('plate_reader_file', ) def __init__(self, *args, **kwargs): self.request = kwargs.pop('request', None) self.study = kwargs.pop('study', None) super(AssayPlateReaderMapDataFileAddForm, self).__init__(*args, **kwargs) # need or blank study_assay in the update page (add page worked okay) if not self.study and self.instance.study: self.study = self.instance.study if self.study: self.instance.study = self.study # check the file extension of the loaded file to make sure the user is not adding spreadsheet files # https://medium.com/@literallywords/server-side-file-extension-validation-in-django-2-1-b8c8bc3245a0 def clean_plate_reader_file(self): data = self.cleaned_data['plate_reader_file'] # Run file extension check file_extension = os.path.splitext(data.name)[1] if file_extension not in ['.csv', '.tsv', '.txt']: if '.xl' in file_extension or '.wk' in file_extension or '.12' in file_extension: raise ValidationError( 'This appears to be an spreadsheet file. To upload, export to a tab delimited file and try again.', code='invalid' ) else: raise ValidationError( 'Invalid file extension - must be in [.csv, .tsv, .txt]', code='invalid' ) return data # UPDATE and VIEW (ADD is separate - above) - user routed here after adding a file to complete other needed info class AssayPlateReaderMapDataFileForm(BootstrapForm): """Form for Assay Plate Reader Map Data File""" class Meta(object): model = AssayPlateReaderMapDataFile fields = ['id', 'description', 'file_delimiter', 'upload_plate_size', 'plate_reader_file', ] widgets = { 'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}), 'upload_plate_size': forms.TextInput(attrs={'readonly': 'readonly', 'style': 'border-style: none;', 'style': 'background-color: transparent;' }), } def __init__(self, *args, **kwargs): self.study = kwargs.pop('study', None) # self.user = kwargs.pop('user', None) # # ?? # filename_only = kwargs.pop('extra', 0) super(AssayPlateReaderMapDataFileForm, self).__init__(*args, **kwargs) # need this because, remember, the plate map doesn't come WITH a study, must tell it which if not self.study and self.instance.study: self.study = self.instance.study if self.study: self.instance.study = self.study my_instance = self.instance # to display the file name without the whole path form_filename_only = os.path.basename(str(my_instance.plate_reader_file)) self.fields['form_filename_only'].initial = form_filename_only se_form_plate_size = forms.ChoiceField( required=False, choices=assay_plate_reader_map_info_plate_size_choices ) form_number_blocks = forms.IntegerField( required=False, initial=1, ) form_number_blank_columns = forms.IntegerField( required=False, initial=0, ) form_number_blank_rows = forms.IntegerField( required=False, initial=0, ) form_filename_only = forms.CharField( required=False, ) # PI wants to select options for file processing # Currently, the choices for file formats are HARDCODED here # if we actually iron out the 'sanctioned' file formats, these could go into a table and be available in the admin # BUT, reading/processing of the format would still need to be build, so maybe better NOT to put in admin.... se_file_format_select = forms.ChoiceField( required=False, initial=0, choices=( (0, 'COMPUTERS BEST GUESS'), (1, 'Softmax Pro 5.3 Molecular Devices M5 (UPDDI DoO)'), (10, 'Single data block with 1 column of row labels and 1 row of column headers'), # (96, 'One 96 plate (8 lines by 12 columns) starting at line 1 column 1 (CSV) - requested by Larry V.'), # (384, 'One 384 plate (16 lines by 24 columns) starting at line 1 column 1 (CSV) - requested by Larry V.'), # (2, 'Wallac EnVision Manager Version 1.12 (EnVision)'), (9999, 'USER CUSTOMIZES by Setting Format Information'), ) ) class AssayPlateReaderMapDataFileBlockForm(forms.ModelForm): """Form for Assay Plate Reader Data File Block """ class Meta(object): model = AssayPlateReaderMapDataFileBlock # fields = ('id', 'data_block', 'data_block_metadata', 'line_start', 'line_end', 'delimited_start', 'delimited_end', 'over_write_sample_time', 'assayplatereadermap']) exclude = tracking + ('study',) # this could go in AssayPlateReaderMapDataFileBlockForm or AssayPlateReaderMapFileBlockFormSet # but if do in formset, the widgets down get the form control! # fields = ('id', 'data_block', 'data_block_metadata', 'line_start', 'line_end', 'delimited_start', 'delimited_end', 'over_write_sample_time', 'assayplatereadermap') widgets = { # 'form_selected_plate_map_time_unit': forms.TextInput(attrs={'readonly': 'readonly', # 'style': 'background-color: transparent;', # }), 'data_block': forms.NumberInput(attrs={'readonly': 'readonly', # 'style': 'box-shadow:inset 0px, 0px 0px ;', # 'style': 'border-style: none;', # 'style': 'border-width: 0;', # 'style': 'border-color: transparent;', 'style': 'background-color: transparent;', }), 'line_start': forms.NumberInput(attrs={'class': 'form-control '}), 'line_end': forms.NumberInput(attrs={'class': 'form-control'}), # 'line_end': forms.NumberInput(attrs={'class': 'form-control required'}), # 'line_end': forms.NumberInput(attrs={'readonly': 'readonly', # 'style': 'background-color: transparent;',}), 'delimited_start': forms.NumberInput(attrs={'class': 'form-control '}), 'delimited_end': forms.NumberInput(attrs={'class': 'form-control'}), # 'delimited_end': forms.NumberInput(attrs={'readonly': 'readonly', # 'style': 'background-color: transparent;',}), 'over_write_sample_time': forms.NumberInput(attrs={'class': 'form-control'}), 'form_selected_plate_map_time_unit': forms.NumberInput(attrs={'readonly': 'readonly', 'style': 'background-color: transparent;',}), 'data_block_metadata': forms.Textarea(attrs={'cols': 80, 'rows': 1, 'class': 'form-control'}), } def __init__(self, *args, **kwargs): # Get the study self.study = kwargs.pop('study', None) self.user = kwargs.pop('user', None) super(AssayPlateReaderMapDataFileBlockForm, self).__init__(*args, **kwargs) # this made the dropdown behave when copied with the formset! # SUPER IMPORTANT and HANDY when need to copy formsets with dropdowns - if have selectized, it is a big mess self.fields['assayplatereadermap'].widget.attrs.update({'class': ' no-selectize required'}) # not currently using to limit what is removed from the map item table - consider added this feature later form_changed_something_in_block = forms.IntegerField( initial=0, required=False, ) form_selected_plate_map_time_unit = forms.CharField( required=False, ) # formsets class AssayPlateReaderMapFileBlockFormSet(BaseInlineFormSetForcedUniqueness): custom_fields_for_limiting_list = ( 'assayplatereadermap', ) # tried putting this in the Form, but had some issues # print(self.fields['assayplatereadermap'].queryset) # # # # next line makes it work # self.study = 293 # print(self.study) # self.fields['assayplatereadermap'].queryset = AssayPlateReaderMap.objects.filter( # study_id=self.study # ) # # # print(self.fields['assayplatereadermap'].queryset) def __init__(self, *args, **kwargs): # Get the study self.study = kwargs.pop('study', None) self.user = kwargs.pop('user', None) super(AssayPlateReaderMapFileBlockFormSet, self).__init__(*args, **kwargs) if not self.study: self.study = self.instance.study idx = 0 for formset in self.forms: for field in self.custom_fields_for_limiting_list: formset.fields[field].queryset = AssayPlateReaderMap.objects.filter( study_id=self.study # study_id=293 ) if self.study: formset.instance.study = self.study if formset.instance.pk: formset.instance.modified_by = self.user else: formset.instance.created_by = self.user idx = idx + 1 AssayPlateReaderMapDataFileBlockFormSetFactory = inlineformset_factory( AssayPlateReaderMapDataFile, AssayPlateReaderMapDataFileBlock, formset=AssayPlateReaderMapFileBlockFormSet, form=AssayPlateReaderMapDataFileBlockForm, extra=1, exclude=tracking + ('study',), ) # ASSAY PLATE MAP END ##### ##### # Start omics section # to work correctly, there is a study method, target, unit that is stored in the study setup # these are saved with the uploaded file # the only thing we care about is that the target selected is associated with category "Gene Expression" # OMIC RULES - All method, target, unit (for both category "Gene Expression" and "Computational") must be IN a priori # OMIC RULES - the target selected in the assay setup must have category "Gene Expression" # example: # make/confirm methods, such as TempO-Seq and DESeq2 # make/confirm targets, such as Human 1500+ and assign to method TempO-Seq (category: Gene Expression) # make/confirm targets, such as baseMean and assign to method DESeq2 (category: Computational) # make/confirm categories Gene Expression and Computational and assign the targets to them (as indicated above) # OMIC RULES - The table AssayOmicAnalysisTarget must have a row for each computational target a priori # OMIC RULES - The table AssayOmicAnalysisTarget field data_type content must match exactly to the hard coded options in assay_omic_data_type_choices # OMIC RULES - The table AssayOmicAnalysisTarget field name content must match exactly the column headers of the input file (INCLUDING THE CASE - at least, as of 20200902) # OMIC RULES - The table AssayOmicAnalysisTarget field method content must match exactly method selected in the GUI as the Data Analysis Method # monkey patch to display method target and unit combo as needed in the assay omic page # originally was going to display this, but not sure if will need to display anywhere, but, since already used for querying, just keep it class AbstractClassAssayStudyAssayOmic(AssayStudyAssay): class Meta: proxy = True def __str__(self): return 'TARGET: {0} METHOD: {1} UNIT: {2}'.format(self.target, self.method, self.unit) omic_upload_fields_require_file_reprocessing = ['omic_data_file', 'data_type', 'analysis_method'] class AssayOmicDataFileUploadForm(BootstrapForm): """Form Upload an AssayOmicDataFileUpload file and associated metadata """ # since the metadata for the log2fc is by group, and is collected once for each file, it is stored with the upload file # the metadata for the count data is stored separately (and linked by sample name/file column header) # this was partly due to the evolution of the project and partly due to the expressed preference of a project PI class Meta(object): model = AssayOmicDataFileUpload exclude = tracking + ('study',) def __init__(self, *args, **kwargs): self.study = kwargs.pop('study', None) super(AssayOmicDataFileUploadForm, self).__init__(*args, **kwargs) if not self.study and self.instance.study: self.study = self.instance.study if self.study: self.instance.study = self.study # for now, limit to the same study - we may need to revisit this when we think about inter-study data_groups_filtered = AssayGroup.objects.filter( study_id=self.instance.study.id ) # The rules for getting the list of study assays in the upload GUI # Rule 1: category = gene expression; Rule 2 the target must be associated to that category study_assay_queryset = AbstractClassAssayStudyAssayOmic.objects.filter( study_id=self.instance.study.id, ).filter( target__in=AssayTarget.objects.filter(assaycategory__name="Gene Expression") ).prefetch_related( 'target', 'method', 'unit', ) number_of_omic_targets_in_study = study_assay_queryset.count() # above, tried to get the omic targets, but, if did not find any, get all the assays if number_of_omic_targets_in_study == 0: study_assay_queryset = AbstractClassAssayStudyAssayOmic.objects.filter( study_id=self.study ).prefetch_related( 'target', 'method', 'unit', ) self.fields['study_assay'].queryset = study_assay_queryset # making the best guess, based on what is in Assays tab, on what should be the form initials initial_study_assay = None initial_data_type = None for each in study_assay_queryset: #note that the unit table does not use name, it uses unit, hence unit.unit this_unit = each.unit.unit.lower() # may need to change this to give something else a priority (this is just to get an initial one) # Mark had units of 'Fold Change' and 'Count', then switched to not specified # Tongying used 'Unitless' for all omic data. if 'ct' in this_unit: # get one of the counts initial_study_assay = each.id initial_data_type = 'normcounts' break elif 'fold' in this_unit or 'fc' in this_unit: # so a unit with a fold in it will get priority initial_study_assay = each.id initial_data_type = 'log2fc' break else: # result will be it just gets the last one initial_study_assay = each.id initial_data_type = 'log2fc' self.fields['study_assay'].initial = initial_study_assay self.fields['default_data_type'].initial = initial_data_type omic_computational_methods_s = AssayOmicAnalysisTarget.objects.values('method') omic_computational_methods = AssayMethod.objects.filter( id__in=omic_computational_methods_s ) initial_computational_method = None # just get the first one for the default, if there is one if len(omic_computational_methods) > 0: for each in omic_computational_methods: initial_computational_method = each break initial_computational_methods = omic_computational_methods self.fields['analysis_method'].queryset = initial_computational_methods self.fields['analysis_method'].initial = initial_computational_method # HANDY to limit options in a dropdown on a model field in a form self.fields['group_1'].queryset = data_groups_filtered self.fields['group_2'].queryset = data_groups_filtered # when these are visible, they should be class required # HANDY for adding classes in forms # the following could remove other classes, so stick with the below # NO self.fields['group_1'].widget.attrs.update({'class': ' required'}) # YES self.fields['group_1'].widget.attrs['class'] += 'required' self.fields['group_1'].widget.attrs['class'] += ' required' self.fields['group_2'].widget.attrs['class'] += ' required' if self.instance.time_1: time_1_instance = self.instance.time_1 times_1 = get_split_times(time_1_instance) self.fields['time_1_day'].initial = times_1.get('day') self.fields['time_1_hour'].initial = times_1.get('hour') self.fields['time_1_minute'].initial = times_1.get('minute') if self.instance.time_2: time_2_instance = self.instance.time_2 times_2 = get_split_times(time_2_instance) self.fields['time_2_day'].initial = times_2.get('day') self.fields['time_2_hour'].initial = times_2.get('hour') self.fields['time_2_minute'].initial = times_2.get('minute') # HANDY for adding classes in forms # NO self.fields['group_1'].widget.attrs.update({'class': ' required'}) # YES self.fields['group_1'].widget.attrs['class'] += 'required' # BUT, the above does not work on selectized, just do addClass in javascript # i.e.: $('#id_time_unit').next().addClass('required'); # Luke wanted to use DHM, so, went back to that. Hold in case gets outvoted # self.fields['time_1_display'].widget.attrs.update({'class': ' form-control required'}) # self.fields['time_2_display'].widget.attrs.update({'class': ' form-control required'}) # time_unit_instance = self.instance.time_unit # if self.instance.time_1: # time_1_instance = self.instance.time_1 # ctime = sck_general_convert_time_from_minutes_to_unit_given(time_1_instance, time_unit_instance) # self.fields['time_1_display'].initial = ctime # # if self.instance.time_2: # time_2_instance = self.instance.time_2 # ctime = sck_general_convert_time_from_minutes_to_unit_given(time_2_instance, time_unit_instance) # self.fields['time_2_display'].initial = ctime # not using right now, but may want later if do something different by computational method # assay_omic_analysis_target_queryset = AssayOmicAnalysisTarget.objects.all() # data_type_to_computational_method_dict = {} # for index, each in enumerate(assay_omic_analysis_target_queryset): # data_type_to_computational_method_dict[each.data_type] = each.method.name # self.fields['data_type_to_computational_method_dict'].initial = json.dumps(data_type_to_computational_method_dict) # for the inline help table to show what is programmed to handle assay_omic_analysis_table_rows = AssayOmicAnalysisTarget.objects.all().order_by( 'data_type', 'method', 'method_order', 'name', 'target', ).prefetch_related( 'target', 'method', 'unit', ) list_of_dicts_of_assay_omic_analysis_table_rows = [] for each in assay_omic_analysis_table_rows: data_type = [item[1] for item in assay_omic_data_type_choices if item[0] == each.data_type] name = each.name target = each.target.name method = each.method.name method_order = each.method_order dict1 = { 'data_type': data_type, 'file_header': name, 'computational_target': target, 'method': method, 'method_order': method_order, } list_of_dicts_of_assay_omic_analysis_table_rows.append(dict1) self.fields['list_of_dicts_of_assay_omic_analysis_table_rows'].initial = json.dumps(list_of_dicts_of_assay_omic_analysis_table_rows) # for the template generator indy_sample_labels, dict_of_method_to_name, dict_of_method_to_header_list = get_dict_of_method_to_header_list(self.study) self.fields['indy_sample_labels'].initial = indy_sample_labels self.fields['dict_of_method_to_name'].initial = json.dumps(dict_of_method_to_name) self.fields['dict_of_method_to_header_list'].initial = json.dumps(dict_of_method_to_header_list) default_data_type = forms.CharField(required=False,) time_1_day = forms.DecimalField( required=False, label='Day' ) time_1_hour = forms.DecimalField( required=False, label='Hour' ) time_1_minute = forms.DecimalField( required=False, label='Minute' ) time_2_day = forms.DecimalField( required=False, label='Day' ) time_2_hour = forms.DecimalField( required=False, label='Hour' ) time_2_minute = forms.DecimalField( required=False, label='Minute' ) # time_1_display = forms.DecimalField( # required=False, # label='Sample Time 1*' # ) # time_2_display = forms.DecimalField( # required=False, # label='Sample Time 2*' # ) # not using right now # data_type_to_computational_method_dict = forms.CharField(widget=forms.TextInput(), required=False, ) # using for the long page help table list_of_dicts_of_assay_omic_analysis_table_rows = forms.CharField(widget=forms.TextInput(), required=False, ) indy_sample_labels = forms.CharField(widget=forms.TextInput(), required=False, ) dict_of_method_to_header_list = forms.CharField(widget=forms.TextInput(), required=False, ) dict_of_method_to_name = forms.CharField(widget=forms.TextInput(), required=False, ) def clean(self): data = super(AssayOmicDataFileUploadForm, self).clean() if self.instance.id: previous_data_type = self.instance.data_type else: previous_data_type = 'adding' # If saving a new instance data['previous_data_type'] = previous_data_type # data are changed here, so NEED to return the data data['time_1'] = 0 for time_unit, conversion in list(TIME_CONVERSIONS.items()): if data.get('time_1_' + time_unit) is not None: int_time = (data.get('time_1_' + time_unit)) data.update({'time_1': data.get('time_1') + int_time * conversion,}) data['time_2'] = 0 for time_unit, conversion in list(TIME_CONVERSIONS.items()): if data.get('time_2_' + time_unit) is not None: int_time = data.get('time_2_' + time_unit) data.update({'time_2': data.get('time_2') + int_time * conversion,}) # there is QC that needs run on the form fields and, if not passed, do not even bother with the file QC # do that QC first true_to_continue = self.qc_form_fields_only(save=False, called_from='clean') if not true_to_continue: validation_message = 'This did not pass form field QC (the file QC cannot be run until the form field QC passes).' raise ValidationError(validation_message, code='invalid') # only want to run huge code (and replace data in the point data files) # if something was changed that affected the point data # so, let us check that first # what are form fields REQUIRE that we look again/reprocess (delete and read) the data point data? need_to_run_long = False if true_to_continue: # we are continuing if self.instance.id: # this is a update form for each in omic_upload_fields_require_file_reprocessing: if each in self.changed_data: # one of the critical fields was changed, must run the long version need_to_run_long = True break else: # this is an add form, have to run the long version need_to_run_long = True if true_to_continue: # we are continuing, run the QC that affects the reprocessing of the data file if need_to_run_long: # run the QC that affects the file reprocessing true_to_continue = self.qc_file(save=False, called_from='clean') if not true_to_continue: validation_message = 'This did not pass file upload QC.' raise ValidationError(validation_message, code='invalid') else: # passed the file QC, so process the file in clean self.process_file(save=False, called_from='clean') return data def save(self, commit=True): new_file = None need_to_run_long = False if commit: if self.instance.id: # an update page for each in omic_upload_fields_require_file_reprocessing: if each in self.changed_data: # a critical field was found in the changed list need_to_run_long = True break else: # add page, have to run the long version need_to_run_long = True new_file = super(AssayOmicDataFileUploadForm, self).save(commit=commit) if need_to_run_long: self.process_file(save=True, called_from='save') return new_file def qc_form_fields_only(self, save=False, called_from='clean'): data = self.cleaned_data data_file_pk = 0 if self.instance.id: data_file_pk = self.instance.id true_to_continue = omic_data_quality_clean_check_for_omic_form_fields(self, data, data_file_pk) return true_to_continue def qc_file(self, save=False, called_from='clean'): data = self.cleaned_data data_file_pk = 0 # self.instance.id is None for the add form if self.instance.id: data_file_pk = self.instance.id # the data_type specific QC is in the utils.py true_to_continue = omic_data_quality_clean_check_for_omic_file_upload(self, data, data_file_pk, called_from) return true_to_continue def process_file(self, save=False, called_from='clean'): data = self.cleaned_data data_file_pk = 0 if self.instance.id: data_file_pk = self.instance.id file_extension = os.path.splitext(data.get('omic_data_file').name)[1] data_type = data['data_type'] analysis_method = data['analysis_method'] # HANDY for getting a file object and a file queryset when doing clean vrs save # this has to be different because the file is not saved yet when add form or when updated the file if called_from == 'clean': data_file = data.get('omic_data_file') a_returned = omic_data_file_processing_data_main_for_all_data_types(save, self.study.id, data_file_pk, data_file, file_extension, called_from, data_type, analysis_method) else: queryset = AssayOmicDataFileUpload.objects.get(id=data_file_pk) data_file = queryset.omic_data_file.open() a_returned = omic_data_file_processing_data_main_for_all_data_types(save, self.study.id, data_file_pk, data_file, file_extension, called_from, data_type, analysis_method) return data # sck need for more than one form, so make is a function def get_dict_of_method_to_header_list(study_id): indy_list_of_sample_labels = list(AssayOmicSampleMetadata.objects.filter( study_id=study_id ).values_list('sample_name', flat=True)) # exclude the name field so can deal with it all in one place indy_samples = '' for each in indy_list_of_sample_labels: indy_samples = indy_samples + ', ' + each indy_sample_labels = indy_samples[2:] assay_omic_analysis_target_queryset = AssayOmicAnalysisTarget.objects.filter( data_type='log2fc' ).order_by( 'method', 'method_order', ).prefetch_related( 'method', ) dict_of_method_to_header_list = {} dict_of_method_to_name = {} prev_method = '' prev_method_name = '' list_of_headers = [] for index, each in enumerate(assay_omic_analysis_target_queryset): name = each.name method = each.method.pk method_name = each.method.name if method == prev_method: list_of_headers.append(name) else: # save the last set if not null if prev_method != '': dict_of_method_to_header_list[prev_method] = list_of_headers dict_of_method_to_name[prev_method] = prev_method_name # reset the empties for restarting the next set list_of_headers = [] list_of_headers.append(name) prev_method = method prev_method_name = method_name # do the last one dict_of_method_to_header_list[method] = list_of_headers dict_of_method_to_name[method] = method_name return indy_sample_labels, dict_of_method_to_name, dict_of_method_to_header_list # End Omic Data File Upload Section # Start Omic Metadata Collection Section sample_option_choices = ( ('clt', 'Chip/Well - Location - Time'), ('cus1', 'Enter a prefix and starting counter'), ('cus2', 'Enter a prefix and/or suffix for Chip/Well'), ('sn1', 'Sample-1 to Sample-99999 etc'), ('sn2', 'Sample-01 to Sample-99'), ('sn3', 'Sample-001 to Sample-999'), ) # Form to use to collect the omic sample metadata integrated with workflow tabs # following the pattern for AssayStudyAssaysForm # This was formerly class AssayOmicSampleMetadataAdditionalInfoForm(BootstrapForm): # The actual metadata will be stuffed into a field for performance # NOTE TO SCK - this will be one record per form (the rest will be crammed in a field...) # when done as a button, before tab workflow - the form will not have an index page, so, there is a conditional in the call (click to go there) and # this uses the AssayStudy model so that the study id is easily passed in and out class AssayStudySamplesForm(BootstrapForm): class Meta(object): model = AssayStudy fields1 = flag_group fields2 = ( 'indy_list_of_dicts_of_table_rows', 'indy_list_of_column_labels', 'indy_list_of_column_labels_show_hide', 'indy_sample_location', 'indy_sample_location_all', 'indy_matrix_item', 'indy_matrix_item_list', 'indy_matrix_item_name_to_pk_dict', 'indy_list_time_units_to_include_initially', 'indy_dict_time_units_to_table_column', 'indy_add_or_update' ) fields = fields1 + fields2 def __init__(self, *args, **kwargs): super(AssayStudySamplesForm, self).__init__(*args, **kwargs) # self.instance will be the study self.instance.id is the study id indy_table_labels = omic_metadata_find_the_labels_needed_for_the_indy_metadata_table('form', self.instance.id) indy_list_of_column_labels = indy_table_labels.get('indy_list_of_column_labels') indy_list_of_column_labels_show_hide = indy_table_labels.get('indy_list_of_column_labels_show_hide') indy_list_of_dicts_of_table_rows = indy_table_labels.get('indy_list_of_dicts_of_table_rows') indy_list_time_units_to_include_initially = indy_table_labels.get('indy_list_time_units_to_include_initially') indy_dict_time_units_to_table_column = indy_table_labels.get('indy_dict_time_units_to_table_column') indy_add_or_update = indy_table_labels.get('indy_add_or_update') self.fields['indy_list_of_column_labels'].initial = json.dumps(indy_list_of_column_labels) self.fields['indy_list_of_column_labels_show_hide'].initial = json.dumps(indy_list_of_column_labels_show_hide) self.fields['indy_list_of_dicts_of_table_rows'].initial = json.dumps(indy_list_of_dicts_of_table_rows) self.fields['indy_list_time_units_to_include_initially'].initial = json.dumps(indy_list_time_units_to_include_initially) self.fields['indy_dict_time_units_to_table_column'].initial = json.dumps(indy_dict_time_units_to_table_column) self.fields['indy_add_or_update'].initial = json.dumps(indy_add_or_update) # get the queryset of matrix items in this study matrix_item_queryset = AssayMatrixItem.objects.filter(study_id=self.instance.id).order_by('name', ) self.fields['indy_matrix_item'].queryset = matrix_item_queryset # get the matrix items names in this study matrix_item_list = list(matrix_item_queryset.values_list('name', flat=True)) self.fields['indy_matrix_item_list'].initial = json.dumps(matrix_item_list) matrix_item_name_and_pk = {} for index, each in enumerate(matrix_item_queryset): matrix_item_name_and_pk[each.name] = each.id self.fields['indy_matrix_item_name_to_pk_dict'].initial = json.dumps(matrix_item_name_and_pk) # if, the models in the study have locations, pull them organ_models_in_study_list = list(AssayGroup.objects.filter( # We will always know the study, this can never be an add page study_id=self.instance, ).prefetch_related( 'organ_model', ).distinct().only( 'organ_model' ).values_list('organ_model__id', flat=True)) sample_locations_in_list = list(OrganModelLocation.objects.filter( organ_model__in=organ_models_in_study_list ).prefetch_related( 'sample_location', # ).values('sample_location__id', 'sample_location__name') # < QuerySet[{'sample_location__id': 31, 'sample_location__name': 'Well'}, {'sample_location__id': 30, 'sample_location__name': 'Media'}] > ).values_list('sample_location__id', flat=True)) sample_locations_queryset = AssaySampleLocation.objects.filter( id__in=sample_locations_in_list ).order_by( 'name', ) # what if the study has more than one model, # and one has locations and one does not, # the queryset len will be > 0, # but not all the locations would be in the sub list # might have to deal with this, (location needed might not be in list) but # for now, if there is a sub list, use it if len(sample_locations_queryset) > 0: self.fields['indy_sample_location'].queryset = sample_locations_queryset # for the template generator indy_sample_labels, dict_of_method_to_name, dict_of_method_to_header_list = get_dict_of_method_to_header_list(self.instance) self.fields['indy_sample_labels'].initial = indy_sample_labels self.fields['dict_of_method_to_name'].initial = json.dumps(dict_of_method_to_name) self.fields['dict_of_method_to_header_list'].initial = json.dumps(dict_of_method_to_header_list) self.fields['indy_custom_sample_label_starter_prefix'].initial = 'A' self.fields['indy_custom_sample_label_starter_counter'].initial = '01' self.fields['indy_custom_sample_label_prefix'].initial = '' self.fields['indy_custom_sample_label_suffix'].initial = '' indy_list_of_dicts_of_table_rows = forms.CharField(widget=forms.TextInput(), required=False,) indy_list_of_column_labels = forms.CharField(widget=forms.TextInput(), required=False,) indy_list_of_column_labels_show_hide = forms.CharField(widget=forms.TextInput(), required=False, ) indy_list_time_units_to_include_initially = forms.CharField(widget=forms.TextInput(), required=False, ) indy_dict_time_units_to_table_column = forms.CharField(widget=forms.TextInput(), required=False, ) list_of_dicts_of_assay_omic_analysis_table_rows = forms.CharField(widget=forms.TextInput(), required=False, ) indy_sample_labels = forms.CharField(widget=forms.TextInput(), required=False, ) dict_of_method_to_header_list = forms.CharField(widget=forms.TextInput(), required=False, ) dict_of_method_to_name = forms.CharField(widget=forms.TextInput(), required=False, ) # this will return all the sample locations indy_sample_location = forms.ModelChoiceField( queryset=AssaySampleLocation.objects.all().order_by( 'name' ), required=False, ) indy_sample_location_all = forms.ModelChoiceField( queryset=AssaySampleLocation.objects.all().order_by( 'name' ), required=False, ) indy_matrix_item = forms.ModelChoiceField( queryset=AssayMatrixItem.objects.none(), required=False, ) indy_matrix_item_list = forms.CharField(widget=forms.TextInput(), required=False,) indy_matrix_item_name_to_pk_dict = forms.CharField(widget=forms.TextInput(), required=False, ) indy_sample_label_options = forms.ChoiceField( choices=sample_option_choices, required=False, ) indy_add_or_update = forms.CharField(widget=forms.TextInput(), required=False,) indy_sample_labels = forms.CharField(widget=forms.TextInput(), required=False, ) dict_of_method_to_header_list = forms.CharField(widget=forms.TextInput(), required=False, ) dict_of_method_to_name = forms.CharField(widget=forms.TextInput(), required=False, ) indy_custom_sample_label_starter_prefix = forms.CharField(widget=forms.TextInput(), required=False, ) indy_custom_sample_label_starter_counter = forms.CharField(widget=forms.TextInput(), required=False, ) indy_custom_sample_label_prefix = forms.CharField(widget=forms.TextInput(), required=False, ) indy_custom_sample_label_suffix = forms.CharField(widget=forms.TextInput(), required=False, ) def clean(self): data = super(AssayStudySamplesForm, self).clean() if 'indy_list_of_dicts_of_table_rows' in self.changed_data or data['indy_add_or_update'].replace('"', '') == 'add': # print('indy_list_of_dicts_of_table_rows') # print(data['indy_list_of_dicts_of_table_rows']) error_message = data_quality_clean_check_for_omic_metadata_empty_fields(self, data) # print("error message returned ", error_message) if len(error_message) > 0: validation_message = 'This did not pass QC. ' + error_message raise ValidationError(validation_message, code='invalid') else: self.process_metadata(save=False, called_from='clean') return data def save(self, commit=True): # note: even though we are not really saving anything to the AssayStudy model/table # the save expects a model instance to be returned, or an error will result study = self.instance if commit: self.process_metadata(save=True, called_from='save') return study def process_metadata(self, save=False, called_from='clean'): data = self.cleaned_data a_returned = omic_process_the_omic_sample_metadata(self, called_from, data) # print(a_returned) return data # End omic sample metadata collection section
mit
1,930,131,250,192,173,600
41.53103
207
0.56811
false
4.19384
false
false
false
google-research/tensor2robot
hooks/async_export_hook_builder_tpu_test.py
1
2533
# coding=utf-8 # Copyright 2021 The Tensor2Robot Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for TD3 Hooks.""" import os import gin from tensor2robot.hooks import async_export_hook_builder from tensor2robot.predictors import exported_savedmodel_predictor from tensor2robot.preprocessors import noop_preprocessor from tensor2robot.utils import mocks from tensor2robot.utils import train_eval import tensorflow.compat.v1 as tf # tf _EXPORT_DIR = 'export_dir' _BATCH_SIZES_FOR_EXPORT = [128] _MAX_STEPS = 4 _BATCH_SIZE = 4 class AsyncExportHookBuilderTest(tf.test.TestCase): def test_with_mock_training(self): model_dir = self.create_tempdir().full_path mock_t2r_model = mocks.MockT2RModel( preprocessor_cls=noop_preprocessor.NoOpPreprocessor, device_type='tpu', use_avg_model_params=True) mock_input_generator = mocks.MockInputGenerator(batch_size=_BATCH_SIZE) export_dir = os.path.join(model_dir, _EXPORT_DIR) hook_builder = async_export_hook_builder.AsyncExportHookBuilder( export_dir=export_dir, create_export_fn=async_export_hook_builder.default_create_export_fn) gin.parse_config('tf.contrib.tpu.TPUConfig.iterations_per_loop=1') gin.parse_config('tf.estimator.RunConfig.save_checkpoints_steps=1') # We optimize our network. train_eval.train_eval_model( t2r_model=mock_t2r_model, input_generator_train=mock_input_generator, train_hook_builders=[hook_builder], model_dir=model_dir, max_train_steps=_MAX_STEPS) self.assertNotEmpty(tf.io.gfile.listdir(model_dir)) self.assertNotEmpty(tf.io.gfile.listdir(export_dir)) for exported_model_dir in tf.io.gfile.listdir(export_dir): self.assertNotEmpty( tf.io.gfile.listdir(os.path.join(export_dir, exported_model_dir))) predictor = exported_savedmodel_predictor.ExportedSavedModelPredictor( export_dir=export_dir) self.assertTrue(predictor.restore()) if __name__ == '__main__': tf.test.main()
apache-2.0
-8,682,825,229,037,684,000
35.710145
76
0.731938
false
3.441576
true
false
false
mrven/origin_align_addon
OriginAlign.py
1
5309
#02.05.2015 #Ivan "mrven" Vostrikov bl_info = { "name": "Origin Align", 'author': 'Ivan Vostrikov', 'version': (1, 0, 1), 'blender': (2, 7, 4), 'location': '3d view > Object > Origin Align X/Y/Z', 'description': 'In object mode, sets object origin to selected axis of 3D Cursor', 'wiki_url': 'lowpolyart3d.blogspot.ru', 'tracker_url': '', "category": "Object", } import bpy #-------------------------------------------------------- # Align Selected object's origin points for Z Axis class OriginAlignZ(bpy.types.Operator): """Origin Align Z""" bl_idname = "object.origin_align_z" bl_label = "Origin Align Z" bl_options = {'REGISTER', 'UNDO'} def execute(self, context): # Save selected objects and current position of 3D Cursor current_selected_obj = bpy.context.selected_objects saved_cursor_loc = bpy.context.scene.cursor_location.copy() bpy.ops.object.mode_set(mode = 'OBJECT') # Change individual origin point for x in current_selected_obj: # Select only current object (for setting origin) bpy.ops.object.select_all(action='DESELECT') x.select = True # Save current origin and relocate 3D Cursor to (X-Origin, Y-Origin, Z-Cursor) saved_origin_loc = x.location.copy() bpy.context.scene.cursor_location = [saved_origin_loc[0], saved_origin_loc[1], saved_cursor_loc[2]] # Apply origin to Cursor position bpy.ops.object.origin_set(type='ORIGIN_CURSOR') # Reset 3D Cursor position bpy.context.scene.cursor_location = saved_cursor_loc # Select again objects for j in current_selected_obj: j.select = True; return {'FINISHED'} #------------------------------------------------------------ # Align Selected object's origin points for Y Axis class OriginAlignY(bpy.types.Operator): """Origin Align Y""" bl_idname = "object.origin_align_y" bl_label = "Origin Align Y" bl_options = {'REGISTER', 'UNDO'} def execute(self, context): # Save selected objects and current position of 3D Cursor current_selected_obj = bpy.context.selected_objects saved_cursor_loc = bpy.context.scene.cursor_location.copy() bpy.ops.object.mode_set(mode = 'OBJECT') # Change individual origin point for x in current_selected_obj: # Select only current object (for setting origin) bpy.ops.object.select_all(action='DESELECT') x.select = True # Save current origin and relocate 3D Cursor to (X-Origin, Y-Cursor, Z-Origin) saved_origin_loc = x.location.copy() bpy.context.scene.cursor_location = [saved_origin_loc[0], saved_cursor_loc[1], saved_origin_loc[2]] # Apply origin to Cursor position bpy.ops.object.origin_set(type='ORIGIN_CURSOR') # Reset 3D Cursor position bpy.context.scene.cursor_location = saved_cursor_loc # Select again objects for j in current_selected_obj: j.select = True; return {'FINISHED'} #------------------------------------------------------------ # Align Selected object's origin points for X Axis class OriginAlignX(bpy.types.Operator): """Origin Align X""" bl_idname = "object.origin_align_x" bl_label = "Origin Align X" bl_options = {'REGISTER', 'UNDO'} def execute(self, context): # Save selected objects and current position of 3D Cursor current_selected_obj = bpy.context.selected_objects saved_cursor_loc = bpy.context.scene.cursor_location.copy() bpy.ops.object.mode_set(mode = 'OBJECT') # Change individual origin point for x in current_selected_obj: # Select only current object (for setting origin) bpy.ops.object.select_all(action='DESELECT') x.select = True # Save current origin and relocate 3D Cursor to (X-Cursor, Y-Origin, Z-Origin) saved_origin_loc = x.location.copy() bpy.context.scene.cursor_location = [saved_cursor_loc[0], saved_origin_loc[1], saved_origin_loc[2]] # Apply origin to Cursor position bpy.ops.object.origin_set(type='ORIGIN_CURSOR') # Reset 3D Cursor position bpy.context.scene.cursor_location = saved_cursor_loc # Select again objects for j in current_selected_obj: j.select = True; return {'FINISHED'} def menu_func(self, context): self.layout.operator(OriginAlignX.bl_idname) self.layout.operator(OriginAlignY.bl_idname) self.layout.operator(OriginAlignZ.bl_idname) def register(): bpy.utils.register_class(OriginAlignZ) bpy.utils.register_class(OriginAlignY) bpy.utils.register_class(OriginAlignX) bpy.types.VIEW3D_MT_object.prepend(menu_func) def unregister(): bpy.utils.unregister_class(OriginAlignZ) bpy.utils.unregister_class(OriginAlignY) bpy.utils.unregister_class(OriginAlignX) bpy.types.VIEW3D_MT_object.remove(menu_func) if __name__ == "__main__": register()
gpl-2.0
-5,504,240,410,435,975,000
38.619403
111
0.600301
false
3.833213
false
false
false
googleads/google-ads-python
google/ads/googleads/v6/services/services/google_ads_field_service/client.py
1
21308
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from distutils import util import os import re from typing import Dict, Optional, Sequence, Tuple, Type, Union from google.api_core import client_options as client_options_lib # type: ignore from google.api_core import exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.ads.googleads.v6.resources.types import google_ads_field from google.ads.googleads.v6.services.services.google_ads_field_service import ( pagers, ) from google.ads.googleads.v6.services.types import google_ads_field_service from .transports.base import GoogleAdsFieldServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import GoogleAdsFieldServiceGrpcTransport class GoogleAdsFieldServiceClientMeta(type): """Metaclass for the GoogleAdsFieldService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = ( OrderedDict() ) # type: Dict[str, Type[GoogleAdsFieldServiceTransport]] _transport_registry["grpc"] = GoogleAdsFieldServiceGrpcTransport def get_transport_class( cls, label: str = None, ) -> Type[GoogleAdsFieldServiceTransport]: """Return an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class GoogleAdsFieldServiceClient(metaclass=GoogleAdsFieldServiceClientMeta): """Service to fetch Google Ads API fields.""" @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Convert api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "googleads.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: GoogleAdsFieldServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info( info ) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: GoogleAdsFieldServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file( filename ) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> GoogleAdsFieldServiceTransport: """Return the transport used by the client instance. Returns: GoogleAdsFieldServiceTransport: The transport used by the client instance. """ return self._transport @staticmethod def google_ads_field_path(google_ads_field: str,) -> str: """Return a fully-qualified google_ads_field string.""" return "googleAdsFields/{google_ads_field}".format( google_ads_field=google_ads_field, ) @staticmethod def parse_google_ads_field_path(path: str) -> Dict[str, str]: """Parse a google_ads_field path into its component segments.""" m = re.match(r"^googleAdsFields/(?P<google_ads_field>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Return a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: """Return a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: """Return a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: """Return a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: """Parse a project path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: """Return a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: """Parse a location path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path ) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[credentials.Credentials] = None, transport: Union[str, GoogleAdsFieldServiceTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the google ads field service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.GoogleAdsFieldServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. use_client_cert = bool( util.strtobool( os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") ) ) ssl_credentials = None is_mtls = False if use_client_cert: if client_options.client_cert_source: import grpc # type: ignore cert, key = client_options.client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) is_mtls = True else: creds = SslCredentials() is_mtls = creds.is_mtls ssl_credentials = creds.ssl_credentials if is_mtls else None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": api_endpoint = ( self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, GoogleAdsFieldServiceTransport): # transport is a GoogleAdsFieldServiceTransport instance. if credentials: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) self._transport = transport elif isinstance(transport, str): Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, host=self.DEFAULT_ENDPOINT ) else: self._transport = GoogleAdsFieldServiceGrpcTransport( credentials=credentials, host=api_endpoint, ssl_channel_credentials=ssl_credentials, client_info=client_info, ) def get_google_ads_field( self, request: google_ads_field_service.GetGoogleAdsFieldRequest = None, *, resource_name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> google_ads_field.GoogleAdsField: r"""Returns just the requested field. Args: request (:class:`google.ads.googleads.v6.services.types.GetGoogleAdsFieldRequest`): The request object. Request message for [GoogleAdsFieldService.GetGoogleAdsField][google.ads.googleads.v6.services.GoogleAdsFieldService.GetGoogleAdsField]. resource_name (:class:`str`): Required. The resource name of the field to get. This corresponds to the ``resource_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.ads.googleads.v6.resources.types.GoogleAdsField: A field or resource (artifact) used by GoogleAdsService. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. if request is not None and any([resource_name]): raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a google_ads_field_service.GetGoogleAdsFieldRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance( request, google_ads_field_service.GetGoogleAdsFieldRequest ): request = google_ads_field_service.GetGoogleAdsFieldRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if resource_name is not None: request.resource_name = resource_name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[ self._transport.get_google_ads_field ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("resource_name", request.resource_name),) ), ) # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # Done; return the response. return response def search_google_ads_fields( self, request: google_ads_field_service.SearchGoogleAdsFieldsRequest = None, *, query: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.SearchGoogleAdsFieldsPager: r"""Returns all fields that match the search query. Args: request (:class:`google.ads.googleads.v6.services.types.SearchGoogleAdsFieldsRequest`): The request object. Request message for [GoogleAdsFieldService.SearchGoogleAdsFields][google.ads.googleads.v6.services.GoogleAdsFieldService.SearchGoogleAdsFields]. query (:class:`str`): Required. The query string. This corresponds to the ``query`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.ads.googleads.v6.services.services.google_ads_field_service.pagers.SearchGoogleAdsFieldsPager: Response message for [GoogleAdsFieldService.SearchGoogleAdsFields][google.ads.googleads.v6.services.GoogleAdsFieldService.SearchGoogleAdsFields]. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. if request is not None and any([query]): raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a google_ads_field_service.SearchGoogleAdsFieldsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance( request, google_ads_field_service.SearchGoogleAdsFieldsRequest ): request = google_ads_field_service.SearchGoogleAdsFieldsRequest( request ) # If we have keyword arguments corresponding to fields on the # request, apply these. if query is not None: request.query = query # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[ self._transport.search_google_ads_fields ] # Send the request. response = rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.SearchGoogleAdsFieldsPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response __all__ = ("GoogleAdsFieldServiceClient",)
apache-2.0
3,742,854,204,615,069,700
40.055877
140
0.62324
false
4.535547
false
false
false
labase/brython_crafty
src/crafty/graphics.py
1
7243
#! /usr/bin/env python # -*- coding: UTF8 -*- """ ############################################################ Graphic handling classes ############################################################ :Author: *Carlo E. T. Oliveira* :Contact: [email protected] :Date: 2014/09/17 :Status: This is a "work in progress" :Revision: 0.1.0 :Home: `Labase <http://labase.selfip.org/>`__ :Copyright: 2013, `GPL <http://is.gd/3Udt>`__. .. moduleauthor:: Carlo Oliveira <[email protected]> """ class Canvas: """Canvas. :ref:`canvas` When this component is added to an entity it will be drawn to the global canvas element. The canvas element (and hence all Canvas entities) is always rendered below any DOM entities. Crafty.canvas.init() will be automatically called if it is not called already to initialize the canvas element. """ def __init__(self, stage, cmp): self.__stage = stage self.__elt = stage.e(cmp) def draw(self, ctx, x, y, w, h): """ draw([[Context ctx, ]Number x, Number y, Number w, Number h]). """ #print(kwarg) self.__elt.draw(ctx, x, y, w, h) return self # .__elt # .attr(dict(**kwarg)).fourway(4) class Sprite: """Sprite. :ref:`sprite` Component for using tiles in a sprite map. """ def __init__(self, ent): # , x, y, w, h): self.__ent = ent def sprite(self, x, y, w, h): self.__ent.requires('Sprite') self.__ent.sprite(x, y, w, h) return self @property def coord(self): """The coordinate of the slide within the sprite in the format of [x, y, w, h]. """ return self.__ent.__coord def crop(self, x, y, w, h): """Crop the sprite. If the entity needs to be smaller than the tile size, use this method to crop it. The values should be in pixels rather than tiles. :param x: Offset x position :param y: Offset y position :param w: New width :param h: New height :returns: Self, this same entity """ self.__ent.requires('Sprite') self.__ent.crop(reelId, x, y, w, h) return self def reel(self, reelId, duration, fromX, fromY, frameCount): """Create animation reel. :param: String reelId, Duration duration, Number fromX, Number fromY, Number frameCount :returns: Self, this same entity """ self.__ent.requires('SpriteAnimation') self.__ent.reel(reelId, duration, fromX, fromY, frameCount) return self def animate(self, reelId=None, loopCount=1): """Animate Entity. :param reelId: String reel identification :param loopCount: Integer number of loops, default 1, indefinite if -1 :returns: Self, this same entity """ self.__ent.requires('SpriteAnimation') if reelId: self.__ent.animate(reelId, loopCount) else: self.__ent.animate(loopCount) return self def isPlaying(self, reelId=''): """Return is the reel is playing. :param reelId: The reelId of the reel we wish to examine, if missing default to current reel :returns: The current animation state """ self.__ent.requires('SpriteAnimation') if reelId: return self.__ent.isPlaying(reelId) else: return self.__ent.isPlaying() def resumeAnimation(self): """This will resume animation of the current reel from its current state. If a reel is already playing, or there is no current reel, there will be no effect. """ self.__ent.resumeAnimation() def pauseAnimation(self): """Pauses the currently playing animation, or does nothing if no animation is playing. """ self.__ent.pauseAnimation() def resetAnimation(self): """Resets the current animation to its initial state. Resets the number of loops to the last specified value, which defaults to 1. Neither pauses nor resumes the current animation. """ self.__ent.resetAnimation() def loops(self, loopCount=None): """Set or return the number of loops. Sets the number of times the animation will loop for. If called while an animation is in progress, the current state will be considered the first loop. :param loopCount: The number of times to play the animation, if missig retun loops left. :returns: The number of loops left. Returns 0 if no reel is active. """ if loopCount is None: return self.__ent.loops(loopCount) else: return self.__ent.loops() def reelPosition(self, position=None): """Sets the position of the current reel by frame number. :param position: The frame to jump to. This is zero-indexed. A negative values counts back from the last frame. Sets the position of the current reel by percent progress if number is float. Jumps to the specified position if string. The only currently accepted value is "end", which will jump to the end of the reel. :returns: The current frame number """ if position is None: return self.__ent.reelPosition(position) else: return self.__ent.reelPosition() def tween(self, duration, **properties): """This method will animate numeric properties over the specified duration. These include x, y, w, h, alpha and rotation in degrees. :param properties: Object of numeric properties and what they should animate to :param duration: Duration to animate the properties over, in milliseconds. :returns: The current frame number """ self.__ent.requires('Tween') self.__ent.tween(dict(**properties), duration) class Draggable: """Enable drag and drop of the entity. :ref:`draggable` """ def __init__(self, ent): self.__ent = ent def dragDirection(self, degrees=None, x=None, y=None): """Specify the dragging direction. if no parameters are given, remove dragging. :param degrees: A number, the degree (clockwise) of the move direction with respect to the x axis. :param x: the vector (valx, valy) denotes the move direction. :param y: the vector (valx, valy) denotes the move direction. """ if not degrees is None: self.__ent.dragDirection(degrees) elif not x is None: self.__ent.dragDirection(dict(x=x, y=y)) else: self.__ent.dragDirection() def startDrag(self): """Make the entity follow the mouse positions. """ self.__ent.startDrag() def stopDrag(self): """Stop the entity from dragging. Essentially reproducing the drop. """ self.__ent.stopDrag() def enableDrag(self): """Rebind the mouse events. Use if .disableDrag has been called. """ self.__ent.enableDrag() def disableDrag(self): """Stops entity from being draggable. Reenable with .enableDrag(). """ self.__ent.disableDrag()
gpl-2.0
-4,888,606,017,845,747
31.195556
115
0.597542
false
4.057703
false
false
false
joa/haxe-sublime2-bundle
features/haxe_create_type.py
1
3178
import sublime_plugin import sublime import os try: # Python 3 from ..HaxeHelper import HaxeComplete_inst, isType except (ValueError): # Python 2 from HaxeHelper import HaxeComplete_inst, isType class HaxeCreateType( sublime_plugin.WindowCommand ): classpath = None currentFile = None currentSrc = None currentType = None def run( self , paths = [] , t = "class" ) : builds = HaxeComplete_inst().builds HaxeCreateType.currentType = t view = sublime.active_window().active_view() scopes = view.scope_name(view.sel()[0].end()).split() fn = view.file_name() pack = []; if fn is None : return if len(builds) == 0 : HaxeComplete_inst().extract_build_args(view) if len(paths) == 0 : paths.append(fn) for path in paths : if os.path.isfile( path ) : path = os.path.dirname( path ) if HaxeCreateType.classpath is None : HaxeCreateType.classpath = path for b in builds : for cp in b.classpaths : if path.startswith( cp ) : HaxeCreateType.classpath = path[0:len(cp)] for p in path[len(cp):].split(os.sep) : if "." in p : break elif p : pack.append(p) if HaxeCreateType.classpath is None : if len(builds) > 0 : HaxeCreateType.classpath = builds[0].classpaths[0] # so default text ends with . if len(pack) > 0 : pack.append("") win = sublime.active_window() sublime.status_message( "Current classpath : " + HaxeCreateType.classpath ) win.show_input_panel("Enter "+t+" name : " , ".".join(pack) , self.on_done , self.on_change , self.on_cancel ) def on_done( self , inp ) : fn = self.classpath; parts = inp.split(".") pack = [] cl = "${2:ClassName}" while( len(parts) > 0 ): p = parts.pop(0) fn = os.path.join( fn , p ) if isType.match( p ) : cl = p break; else : pack.append(p) if len(parts) > 0 : cl = parts[0] fn += ".hx" HaxeCreateType.currentFile = fn t = HaxeCreateType.currentType src = "\npackage " + ".".join(pack) + ";\n\n"+t+" "+cl+" " if t == "typedef" : src += "= " src += "\n{\n\n\t$1\n\n}" HaxeCreateType.currentSrc = src v = sublime.active_window().open_file( fn ) @staticmethod def on_activated( view ) : if view.file_name() == HaxeCreateType.currentFile and view.size() == 0 : view.run_command( "insert_snippet" , { "contents" : HaxeCreateType.currentSrc }) def on_change( self , inp ) : sublime.status_message( "Current classpath : " + HaxeCreateType.classpath ) #print( inp ) def on_cancel( self ) : None
apache-2.0
42,504,940,573,944,810
27.890909
118
0.499056
false
3.801435
false
false
false
hasgeek/funnel
funnel/models/reorder_mixin.py
1
5123
from __future__ import annotations from typing import TypeVar, Union from uuid import UUID from coaster.sqlalchemy import Query from . import db __all__ = ['ReorderMixin'] # Use of TypeVar for subclasses of ReorderMixin as defined in this mypy ticket: # https://github.com/python/mypy/issues/1212 Reorderable = TypeVar('Reorderable', bound='ReorderMixin') class ReorderMixin: """Adds support for re-ordering sequences within a parent container.""" #: Subclasses must have a created_at column created_at: db.Column #: Subclass must have a primary key that is int or uuid id: Union[int, UUID] # noqa: A003 #: Subclass must declare a parent_id synonym to the parent model fkey column parent_id: Union[int, UUID] #: Subclass must declare a seq column or synonym, holding a sequence id. It need not #: be unique, but reordering is meaningless when both items have the same number seq: db.Column #: Subclass must offer a SQLAlchemy query (this is standard from base classes) query: Query @property def parent_scoped_reorder_query_filter(self: Reorderable): """ Return a query filter that includes a scope limitation to the parent. Used alongside the :attr:`seq` column to retrieve a sequence value. Subclasses may need to override if they have additional criteria relative to the parent, such as needing to exclude revoked membership records. """ cls = self.__class__ return cls.parent_id == self.parent_id def reorder_item(self: Reorderable, other: Reorderable, before: bool) -> None: """Reorder self before or after other item.""" cls = self.__class__ # Safety checks if other.__class__ is not cls: raise TypeError("Other must be of the same type") if other.parent_id != self.parent_id: raise ValueError("Other must have the same parent") if self.seq is None or other.seq is None: raise ValueError("Sequence numbers must be pre-assigned to reorder") if before: if self.seq <= other.seq: # We're already before or equal. Nothing to do. return order_columns = (cls.seq.desc(), cls.created_at.desc()) else: if self.seq >= other.seq: # We're already after or equal. Nothing to do. return order_columns = (cls.seq.asc(), cls.created_at.asc()) # Get all sequence numbers between self and other inclusive. Use: # descending order if moving up (before other), # ascending order if moving down (after other) items_to_reorder = ( cls.query.filter( self.parent_scoped_reorder_query_filter, cls.seq >= min(self.seq, other.seq), cls.seq <= max(self.seq, other.seq), ) .options(db.load_only(cls.id, cls.seq)) .order_by(*order_columns) .all() ) # Pop-off items that share a sequence number and don't need to be moved while items_to_reorder[0].id != self.id: items_to_reorder.pop(0) # Reordering! Move down the list (reversed if `before`), reassigning numbers. # This list will always start with `self` and end with `other` (with a possible # tail of items that share the same sequence number as `other`). We assign # self's sequence number to the next item in the list, and that one's to the # next and so on until we reach `other`. Then we assign other's sequence # number to self and we're done. new_seq_number = self.seq # Temporarily give self an out-of-bounds number self.seq = ( db.select([db.func.coalesce(db.func.max(cls.seq) + 1, 1)]) .where(self.parent_scoped_reorder_query_filter) .scalar_subquery() ) # Flush it so the db doesn't complain when there's a unique constraint db.session.flush() # Reassign all remaining sequence numbers for reorderable_item in items_to_reorder[1:]: # Skip 0, which is self reorderable_item.seq, new_seq_number = new_seq_number, reorderable_item.seq # Flush to force execution order. This does not expunge SQLAlchemy cache as # of SQLAlchemy 1.3.x. Should that behaviour change, a switch to # bulk_update_mappings will be required db.session.flush() if reorderable_item.id == other.id: # Don't bother reordering anything after `other` break # Assign other's previous sequence number to self self.seq = new_seq_number db.session.flush() def reorder_before(self: Reorderable, other: Reorderable) -> None: """Reorder to be before another item's sequence number.""" self.reorder_item(other, True) def reorder_after(self: Reorderable, other: Reorderable) -> None: """Reorder to be after another item's sequence number.""" self.reorder_item(other, False)
agpl-3.0
1,974,439,064,453,346,300
40.314516
88
0.626586
false
4.16843
false
false
false
csparkresearch/ExpEYES17-Qt
SPARK17/textManual/MarkdownPP/Processor.py
1
2387
# Copyright 2015 John Reese # Licensed under the MIT license from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import sys if sys.version_info[0] != 2: basestring = str class Processor: """ Framework for allowing modules to modify the input data as a set of transforms. Once the original input data is loaded, the preprocessor iteratively allows Modules to inspect the data and generate a list of Transforms against the data. The Transforms are applied in descending order by line number, and the resulting data is used for the next pass. Once all modules have transformed the data, it is ready for writing out to a file. """ data = [] transforms = {} modules = [] def register(self, module): """ This method registers an individual module to be called when processing """ self.modules.append(module) def input(self, file): """ This method reads the original data from an object following the file interface. """ self.data = file.readlines() def process(self): """ This method handles the actual processing of Modules and Transforms """ self.modules.sort(key=lambda x: x.priority) for module in self.modules: transforms = module.transform(self.data) transforms.sort(key=lambda x: x.linenum, reverse=True) for transform in transforms: linenum = transform.linenum if isinstance(transform.data, basestring): transform.data = [transform.data] if transform.oper == "prepend": self.data[linenum:linenum] = transform.data elif transform.oper == "append": self.data[linenum+1:linenum+1] = transform.data elif transform.oper == "swap": self.data[linenum:linenum+1] = transform.data elif transform.oper == "drop": self.data[linenum:linenum+1] = [] elif transform.oper == "noop": pass def output(self, file): """ This method writes the resulting data to an object following the file interface. """ file.writelines(self.data)
mit
-5,170,128,148,573,232,000
29.602564
79
0.600335
false
4.802817
false
false
false
rocky/python3-trepan
trepan/processor/command/ipython.py
1
6554
# -*- coding: utf-8 -*- # Copyright (C) 2009-2010, 2013, 2015, 2017, 2020 Rocky Bernstein # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import code, sys # Our local modules from trepan.processor.command.base_cmd import DebuggerCommand from traitlets.config.loader import Config class IPythonCommand(DebuggerCommand): """**ipython** [**-d**] Run IPython as a command subshell. If *-d* is passed, you can access debugger state via local variable *debugger*. To issue a debugger command use function *dbgr()*. For example: dbgr('info program') See also: --------- `python`, `bpython` """ short_help = "Run IPython as a command subshell" DebuggerCommand.setup(locals(), category="support", max_args=1) def dbgr(self, string): """Invoke a debugger command from inside a IPython shell called inside the debugger. """ self.proc.cmd_queue.append(string) self.proc.process_command() return def run(self, args): # See if python's code module is around # Python does it's own history thing. # Make sure it doesn't damage ours. have_line_edit = self.debugger.intf[-1].input.line_edit if have_line_edit: try: self.proc.write_history_file() except IOError: pass pass cfg = Config() banner_tmpl = """IPython trepan3k shell%s Use dbgr(*string*) to issue non-continuing debugger command: *string*""" debug = len(args) > 1 and args[1] == "-d" if debug: banner_tmpl += "\nVariable 'debugger' contains a trepan " "debugger object." pass try: from IPython.terminal.embed import InteractiveShellEmbed except ImportError: from IPython.frontend.terminal.embed import InteractiveShellEmbed # Now create an instance of the embeddable shell. The first # argument is a string with options exactly as you would type them # if you were starting IPython at the system command line. Any # parameters you want to define for configuration can thus be # specified here. # Add common classes and methods our namespace here so that # inside the ipython shell users don't have run imports my_locals = {} my_globals = None if self.proc.curframe: my_globals = self.proc.curframe.f_globals if self.proc.curframe.f_locals: my_locals = self.proc.curframe.f_locals pass pass # Give IPython and the user a way to get access to the debugger. if debug: my_locals["debugger"] = self.debugger my_locals["dbgr"] = self.dbgr cfg.TerminalInteractiveShell.confirm_exit = False # sys.ps1 = 'trepan3 >>> ' if len(my_locals): banner = banner_tmpl % " with locals" else: banner = banner_tmpl % "" pass InteractiveShellEmbed( config=cfg, banner1=banner, user_ns=my_locals, module=my_globals, exit_msg="IPython exiting to trepan3k...", )() # restore completion and our history if we can do so. if hasattr(self.proc.intf[-1], "complete"): try: from readline import set_completer, parse_and_bind parse_and_bind("tab: complete") set_completer(self.proc.intf[-1].complete) except ImportError: pass pass if have_line_edit: self.proc.read_history_file() pass return pass # Monkey-patched from code.py # FIXME: get changes into Python. def interact(banner=None, readfunc=None, my_locals=None, my_globals=None): """Almost a copy of code.interact Closely emulate the interactive Python interpreter. This is a backwards compatible interface to the InteractiveConsole class. When readfunc is not specified, it attempts to import the readline module to enable GNU readline if it is available. Arguments (all optional, all default to None): banner -- passed to InteractiveConsole.interact() readfunc -- if not None, replaces InteractiveConsole.raw_input() local -- passed to InteractiveInterpreter.__init__() """ console = code.InteractiveConsole(my_locals, filename="<trepan>") console.runcode = lambda code_obj: runcode(console, code_obj) setattr(console, "globals", my_globals) if readfunc is not None: console.raw_input = readfunc else: try: import readline except ImportError: pass console.interact(banner) pass # Also monkey-patched from code.py # FIXME: get changes into Python. def runcode(obj, code_obj): """Execute a code object. When an exception occurs, self.showtraceback() is called to display a traceback. All exceptions are caught except SystemExit, which is reraised. A note about KeyboardInterrupt: this exception may occur elsewhere in this code, and may not always be caught. The caller should be prepared to deal with it. """ try: exec(code_obj, obj.locals, obj.globals) except SystemExit: raise except: obj.showtraceback() else: if code.softspace(sys.stdout, 0): print() pass pass return if __name__ == "__main__": from trepan.debugger import Trepan d = Trepan() command = IPythonCommand(d.core.processor) command.proc.frame = sys._getframe() command.proc.setup() if len(sys.argv) > 1: print("Type Python commands and exit to quit.") print(sys.argv[1]) if sys.argv[1] == "-d": print(command.run(["bpython", "-d"])) else: print(command.run(["bpython"])) pass pass pass
gpl-3.0
5,708,448,502,736,158,000
29.769953
88
0.623741
false
4.158629
false
false
false
alcides/rdflib
rdflib/syntax/parsers/rdfa/state.py
1
20868
# -*- coding: utf-8 -*- """ Parser's execution context (a.k.a. state) object and handling. The state includes: - dictionary for namespaces. Keys are the namespace prefixes, values are RDFLib Namespace instances - language, retrieved from C{@xml:lang} - URI base, determined by <base> (or set explicitly). This is a little bit superfluous, because the current RDFa syntax does not make use of C{@xml:base}; ie, this could be a global value. But the structure is prepared to add C{@xml:base} easily, if needed. - options, in the form of an L{Options<pyRdfa.Options>} instance The execution context object is also used to turn relative URI-s and CURIES into real URI references. @summary: RDFa core parser processing step @requires: U{RDFLib package<http://rdflib.net>} @organization: U{World Wide Web Consortium<http://www.w3.org>} @author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">} @license: This software is available for use under the U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">} @var XHTML_PREFIX: prefix for the XHTML vocabulary namespace @var XHTML_URI: URI prefix of the XHTML vocabulary @var RDFa_PROFILE: the official RDFa profile URI @var RDFa_VERSION: the official version string of RDFa @var usual_protocols: list of "usual" protocols (used to generate warnings when CURIES are not protected) @var _predefined_rel: list of predefined C{@rev} and C{@rel} values that should be mapped onto the XHTML vocabulary URI-s. @var _predefined_property: list of predefined C{@property} values that should be mapped onto the XHTML vocabulary URI-s. (At present, this list is empty, but this has been an ongoing question in the group, so the I{mechanism} of checking is still there.) @var __bnodes: dictionary of blank node names to real blank node @var __empty_bnode: I{The} Bnode to be associated with the CURIE of the form "C{_:}". """ from rdflib.namespace import Namespace, RDF, RDFS from rdflib.term import BNode, URIRef from rdflib.syntax.parsers.rdfa.options import Options, GENERIC_XML, XHTML_RDFA, HTML5_RDFA import re import random import urlparse RDFa_PROFILE = "http://www.w3.org/1999/xhtml/vocab" RDFa_VERSION = "XHTML+RDFa 1.0" RDFa_PublicID = "-//W3C//DTD XHTML+RDFa 1.0//EN" RDFa_SystemID = "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd" usual_protocols = ["http", "https", "mailto", "ftp", "urn", "gopher", "tel", "ldap", "doi", "news"] ####Predefined @rel/@rev/@property values # predefined values for the @rel and @rev values. These are considered to be part of a specific # namespace, defined by the RDFa document. # At the moment, there are no predefined @property values, but the code is there in case # some will be defined XHTML_PREFIX = "xhv" XHTML_URI = "http://www.w3.org/1999/xhtml/vocab#" _predefined_rel = ['alternate', 'appendix', 'cite', 'bookmark', 'chapter', 'contents', 'copyright', 'glossary', 'help', 'icon', 'index', 'meta', 'next', 'p3pv1', 'prev', 'role', 'section', 'subsection', 'start', 'license', 'up', 'last', 'stylesheet', 'first', 'top'] _predefined_property = [] #### Managing blank nodes for CURIE-s __bnodes = {} __empty_bnode = BNode() def _get_bnode_from_Curie(var): """ 'Var' gives the string after the coloumn in a CURIE of the form C{_:XXX}. If this variable has been used before, then the corresponding BNode is returned; otherwise a new BNode is created and associated to that value. @param var: CURIE BNode identifier @return: BNode """ if len(var) == 0: return __empty_bnode if var in __bnodes: return __bnodes[var] else: retval = BNode() __bnodes[var] = retval return retval #### Quote URI-s import urllib # 'safe' characters for the URI quoting, ie, characters that can safely stay as they are. Other # special characters are converted to their %.. equivalents for namespace prefixes _unquotedChars = ':/\?=#' _warnChars = [' ', '\n', '\r', '\t'] def _quote(uri, options): """ 'quote' a URI, ie, exchange special characters for their '%..' equivalents. Some of the characters may stay as they are (listed in L{_unquotedChars}. If one of the characters listed in L{_warnChars} is also in the uri, an extra warning is also generated. @param uri: URI @param options: @type options: L{Options<pyRdfa.Options>} """ suri = uri.strip() for c in _warnChars: if suri.find(c) != -1: if options != None: options.comment_graph.add_warning('Unusual character in uri:%s; possible error?' % suri) break return urllib.quote(suri, _unquotedChars) #### Core Class definition class ExecutionContext(object): """State at a specific node, including the current set of namespaces in the RDFLib sense, the current language, and the base. The class is also used to interpret URI-s and CURIE-s to produce URI references for RDFLib. @ivar options: reference to the overall options @type ivar: L{Options.Options} @ivar base: the 'base' URI @ivar defaultNS: default namespace @ivar lang: language tag (possibly None) @ivar ns: dictionary of namespaces @type ns: dictionary, each value is an RDFLib Namespace object """ def __init__(self, node, graph, inherited_state=None, base="", options=None): """ @param node: the current DOM Node @param graph: the RDFLib Graph @keyword inherited_state: the state as inherited from upper layers. This inherited_state is mixed with the state information retrieved from the current node. @type inherited_state: L{State.ExecutionContext} @keyword base: string denoting the base URI for the specific node. This overrides the possible base inherited from the upper layers. The current XHTML+RDFa syntax does not allow the usage of C{@xml:base}, but SVG1.2 does, so this is necessary for SVG (and other possible XML dialects that accept C{@xml:base}) @keyword options: invocation option @type options: L{Options<pyRdfa.Options>} """ #----------------------------------------------------------------- # settling the base # note that, strictly speaking, it is not necessary to add the base to the # context, because there is only one place to set it (<base> element of the <header>). # It is done because it is prepared for a possible future change in direction of # accepting xml:base on each element. # At the moment, it is invoked with a 'None' at the top level of parsing, that is # when the <base> element is looked for. if inherited_state: self.base = inherited_state.base self.options = inherited_state.options # for generic XML versions the xml:base attribute should be handled if self.options.host_language == GENERIC_XML and node.hasAttribute("xml:base"): self.base = node.getAttribute("xml:base") else: # this is the branch called from the very top self.base = "" for bases in node.getElementsByTagName("base"): if bases.hasAttribute("href"): self.base = bases.getAttribute("href") continue if self.base == "": self.base = base # this is just to play safe. I believe this branch should actually not happen... if options == None: from pyRdfa import Options self.options = Options() else: self.options = options # xml:base is not part of XHTML+RDFa, but it is a valid setting for, say, SVG1.2 if self.options.host_language == GENERIC_XML and node.hasAttribute("xml:base"): self.base = node.getAttribute("xml:base") self.options.comment_graph.set_base_URI(URIRef(_quote(base, self.options))) # check the the presense of the @profile and or @version attribute for the RDFa profile... # This whole branch is, however, irrelevant if the host language is a generic XML one (eg, SVG) if self.options.host_language != GENERIC_XML: doctype = None try: # I am not 100% sure the HTML5 minidom implementation has this, so let us just be # cautious here... doctype = node.ownerDocument.doctype except: pass if doctype == None or not( doctype.publicId == RDFa_PublicID and doctype.systemId == RDFa_SystemID ): # next level: check the version html = node.ownerDocument.documentElement if not( html.hasAttribute("version") and RDFa_VERSION == html.getAttribute("version") ): # see if least the profile has been set # Find the <head> element head = None for index in range(0, html.childNodes.length-1): if html.childNodes.item(index).nodeName == "head": head = html.childNodes.item(index) break if not( head != None and head.hasAttribute("profile") and RDFa_PROFILE in head.getAttribute("profile").strip().split() ): if self.options.host_language == HTML5_RDFA: self.options.comment_graph.add_info("RDFa profile or RFDa version has not been set (for a correct identification of RDFa). This is not a requirement for RDFa, but it is advised to use one of those nevertheless. Note that in the case of HTML5, the DOCTYPE setting may not work...") else: self.options.comment_graph.add_info("None of the RDFa DOCTYPE, RDFa profile, or RFDa version has been set (for a correct identification of RDFa). This is not a requirement for RDFa, but it is advised to use one of those nevertheless.") #----------------------------------------------------------------- # Settling the language tags # check first the lang or xml:lang attribute # RDFa does not allow the lang attribute. HTML5 relies :-( on @lang; # I just want to be prepared here... if options != None and options.host_language == HTML5_RDFA and node.hasAttribute("lang"): self.lang = node.getAttribute("lang") if len(self.lang) == 0 : self.lang = None elif node.hasAttribute("xml:lang"): self.lang = node.getAttribute("xml:lang") if len(self.lang) == 0 : self.lang = None elif inherited_state: self.lang = inherited_state.lang else: self.lang = None #----------------------------------------------------------------- # Handling namespaces # First get the local xmlns declarations/namespaces stuff. dict = {} for i in range(0, node.attributes.length): attr = node.attributes.item(i) if attr.name.find('xmlns:') == 0 : # yep, there is a namespace setting key = attr.localName if key != "" : # exclude the top level xmlns setting... if key == "_": if warning: self.options.comment_graph.add_error("The '_' local CURIE prefix is reserved for blank nodes, and cannot be changed" ) elif key.find(':') != -1: if warning: self.options.comment_graph.add_error("The character ':' is not valid in a CURIE Prefix" ) else : # quote the URI, ie, convert special characters into %.. This is # true, for example, for spaces uri = _quote(attr.value, self.options) # 1. create a new Namespace entry ns = Namespace(uri) # 2. 'bind' it in the current graph to # get a nicer output graph.bind(key, uri) # 3. Add an entry to the dictionary dict[key] = ns # See if anything has been collected at all. # If not, the namespaces of the incoming state is # taken over self.ns = {} if len(dict) == 0 and inherited_state: self.ns = inherited_state.ns else: if inherited_state: for k in inherited_state.ns : self.ns[k] = inherited_state.ns[k] # copying the newly found namespace, possibly overwriting # incoming values for k in dict : self.ns[k] = dict[k] else: self.ns = dict # see if the xhtml core vocabulary has been set self.xhtml_prefix = None for key in self.ns.keys(): if XHTML_URI == str(self.ns[key]): self.xhtml_prefix = key break if self.xhtml_prefix == None: if XHTML_PREFIX not in self.ns: self.ns[XHTML_PREFIX] = Namespace(XHTML_URI) self.xhtml_prefix = XHTML_PREFIX else: # the most disagreeable thing, the user has used # the prefix for something else... self.xhtml_prefix = XHTML_PREFIX + '_' + ("%d" % random.randint(1, 1000)) self.ns[self.xhtml_prefix] = Namespace(XHTML_URI) graph.bind(self.xhtml_prefix, XHTML_URI) # extra tricks for unusual usages... # if the 'rdf' prefix is not used, it is artificially added... if "rdf" not in self.ns: self.ns["rdf"] = RDF if "rdfs" not in self.ns: self.ns["rdfs"] = RDFS # Final touch: setting the default namespace... if node.hasAttribute("xmlns"): self.defaultNS = node.getAttribute("xmlns") elif inherited_state and inherited_state.defaultNS != None: self.defaultNS = inherited_state.defaultNS else: self.defaultNS = None def _get_predefined_rels(self, val, warning): """Get the predefined URI value for the C{@rel/@rev} attribute. @param val: attribute name @param warning: whether a warning should be generated or not @type warning: boolean @return: URIRef for the predefined URI (or None) """ vv = val.strip().lower() if vv in _predefined_rel: return self.ns[self.xhtml_prefix][vv] else: if warning: self.options.comment_graph.add_warning("invalid @rel/@rev value: '%s'" % val) return None def _get_predefined_properties(self, val, warning): """Get the predefined value for the C{@property} attribute. @param val: attribute name @param warning: whether a warning should be generated or not @type warning: boolean @return: URIRef for the predefined URI (or None) """ vv = val.strip().lower() if vv in _predefined_property: return self.ns[self.xhtml_prefix][vv] else: if warning: self.options.comment_graph.add_warning("invalid @property value: '%s'" % val) return None def get_resource(self, val, rel=False, prop=False, warning=True): """Get a resource for a CURIE. The input argument is a CURIE; this is interpreted via the current namespaces and the corresponding URI Reference is returned @param val: string of the form "prefix:lname" @keyword rel: whether the predefined C{@rel/@rev} values should also be interpreted @keyword prop: whether the predefined C{@property} values should also be interpreted @return: an RDFLib URIRef instance (or None) """ if val == "": return None elif val.find(":") != -1: key = val.split(":", 1)[0] lname = val.split(":", 1)[1] if key == "_": # A possible error: this method is invoked for property URI-s, which # should not refer to a blank node. This case is checked and a possible # error condition is handled self.options.comment_graph.add_error("Blank node CURIE cannot be used in property position: _:%s" % lname) return None if key == "": # This is the ":blabla" case key = self.xhtml_prefix else: # if the resources correspond to a @rel or @rev or @property, then there # may be one more possibility here, namely that it is one of the # predefined values if rel: return self._get_predefined_rels(val, warning) elif prop: return self._get_predefined_properties(val, warning) else: self.options.comment_graph.add_warning("Invalid CURIE (without prefix): '%s'" % val) return None if key not in self.ns: self.options.comment_graph.add_error("CURIE used with non declared prefix: %s" % key) return None else: if lname == "": return URIRef(str(self.ns[key])) else: return self.ns[key][lname] def get_resources(self, val, rel=False, prop=False): """Get a series of resources encoded in CURIE-s. The input argument is a list of CURIE-s; these are interpreted via the current namespaces and the corresponding URI References are returned. @param val: strings of the form prefix':'lname, separated by space @keyword rel: whether the predefined C{@rel/@rev} values should also be interpreted @keyword prop: whether the predefined C{@property} values should also be interpreted @return: a list of RDFLib URIRef instances (possibly empty) """ val.strip() resources = [ self.get_resource(v, rel, prop) for v in val.split() if v != None ] return [ r for r in resources if r != None ] def get_URI_ref(self, val): """Create a URI RDFLib resource for a URI. The input argument is a URI. It is checked whether it is a local reference with a '#' or not. If yes, a URIRef combined with the stored base value is returned. In both cases a URIRef for a full URI is created and returned @param val: URI string @return: an RDFLib URIRef instance """ if val == "": return URIRef(self.base) elif val[0] == '[' and val[-1] == ']': self.options.comment_graph.add_error("Illegal usage of CURIE: %s" % val) return None else: return URIRef(urlparse.urljoin(self.base, val)) def get_Curie_ref(self, val): """Create a URI RDFLib resource for a CURIE. The input argument is a CURIE. This means that it is - either of the form [a:b] where a:b should be resolved as an 'unprotected' CURIE, or - it is a traditional URI (relative or absolute) If the second case the URI value is also compared to 'usual' URI protocols ('http', 'https', 'ftp', etc) (see L{usual_protocols}). If there is no match, a warning is generated (indeed, a frequent mistake in authoring RDFa is to forget the '[' and ']' characters to "protect" CURIE-s.) @param val: CURIE string @return: an RDFLib URIRef instance """ if len(val) == 0: return URIRef(self.base) elif val[0] == "[": if val[-1] == "]": curie = val[1:-1] # A possible Blank node reference should be separated here: if len(curie) >= 2 and curie[0] == "_" and curie[1] == ":": return _get_bnode_from_Curie(curie[2:]) else: return self.get_resource(val[1:-1]) else: # illegal CURIE... self.options.comment_graph.add_error("Illegal CURIE: %s" % val) return None else: # check the value, to see if an error may have been made... # Usual protocol values in the URI v = val.strip().lower() protocol = urlparse.urlparse(val)[0] if protocol != "" and protocol not in usual_protocols: err = "Possible URI error with '%s'; the intention may have been to use a protected CURIE" % val self.options.comment_graph.add_warning(err) return self.get_URI_ref(val)
bsd-3-clause
117,368,649,494,514,380
47.983568
312
0.592658
false
4.122284
false
false
false
Connexions/openstax-cms
openstax/settings/base.py
1
11195
# Django settings for openstax project. import os import sys import raven import logging.config from django.utils.log import DEFAULT_LOGGING PROJECT_ROOT = os.path.join(os.path.dirname(__file__), '..', '..') BASE_DIR = PROJECT_ROOT # check if running local dev server - else default to DEBUG=False if len(sys.argv) > 1: DEBUG = (sys.argv[1] == 'runserver') else: DEBUG = False # These should both be set to true. The openstax.middleware will handle resolving the URL # without a redirect if needed. APPEND_SLASH = True WAGTAIL_APPEND_SLASH=True # urls.W002 warns about slashes at the start of URLs. But we need those so # we don't have to have slashes at the end of URLs. So ignore. SILENCED_SYSTEM_CHECKS = ['urls.W002'] ADMINS = ( ('Michael Harrison', '[email protected]'), ) # Default to dummy email backend. Configure dev/production/local backend EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'oscms_prodcms', } } # Local time zone for this installation. TIME_ZONE = 'America/Chicago' # Language code for this installation. LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. # Note that with this set to True, Wagtail will fall back on using numeric dates # in date fields, as opposed to 'friendly' dates like "24 Sep 2013", because # Python's strptime doesn't support localised month names: https://code.djangoproject.com/ticket/13339 USE_L10N = False # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True DATE_FORMAT = 'j F Y' # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media') # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static') # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ] # ** You would never normally put the SECRET_KEY in a public repository, # ** however this is a demo app so we're using the default settings. # ** Don't use this key in any non-demo usage! # Make this unique, and don't share it with anybody. SECRET_KEY = 'wq21wtjo3@d_qfjvd-#td!%7gfy2updj2z+nev^k$iy%=m4_tr' MIDDLEWARE = [ 'whitenoise.middleware.WhiteNoiseMiddleware', 'openstax.middleware.CommonMiddlewareAppendSlashWithoutRedirect', 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'wagtail.contrib.redirects.middleware.RedirectMiddleware', ] AUTHENTICATION_BACKENDS = ( 'oxauth.backend.OpenStax', 'django.contrib.auth.backends.ModelBackend', ) SOCIAL_AUTH_PIPELINE = ( 'social_core.pipeline.social_auth.social_details', 'social_core.pipeline.social_auth.social_uid', #'social_core.pipeline.social_auth.social_user', 'oxauth.pipelines.social_user', 'social_core.pipeline.user.create_user', 'oxauth.pipelines.save_profile', 'oxauth.pipelines.update_email', 'social_core.pipeline.social_auth.associate_user', 'social_core.pipeline.social_auth.load_extra_data', 'social_core.pipeline.user.user_details', ) IMPORT_USER_PIPELINE = ( 'social_django.pipeline.social_auth.social_user', 'social_django.pipeline.user.create_user', 'oxauth.pipelines.save_profile', 'social_django.pipeline.social_auth.associate_user', 'social_django.pipeline.user.user_details', ) SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.request', 'social_django.context_processors.backends', 'social_django.context_processors.login_redirect', ], }, }, ] ROOT_URLCONF = 'openstax.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'openstax.wsgi.application' INSTALLED_APPS = [ 'scout_apm.django', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.postgres', 'django.contrib.admin', 'django.contrib.sitemaps', # contrib 'compressor', 'taggit', 'modelcluster', 'rest_framework', 'rest_framework.authtoken', 'rest_auth', 'raven.contrib.django.raven_compat', 'django_filters', 'social_django', 'storages', 'django_ses', 'import_export', 'django_extensions', 'inline_actions', # custom 'accounts', 'admin_templates', # this overrides the admin templates 'api', 'pages', 'books', 'news', 'allies', 'snippets', 'salesforce', 'mail', 'global_settings', 'errata', 'extraadminfilters', 'rangefilter', 'reversion', 'redirects', 'oxauth', 'events', 'webinars', # wagtail 'wagtail.core', 'wagtail.admin', 'wagtail.documents', 'wagtail.snippets', 'wagtail.users', 'wagtail.images', 'wagtail.embeds', 'wagtail.search', 'wagtail.contrib.redirects', 'wagtail.contrib.forms', 'wagtail.sites', #'wagtail.contrib.wagtailapi', 'wagtail.api.v2', 'wagtail.contrib.settings', 'wagtail.contrib.modeladmin', 'wagtailimportexport', 'flags', 'duplicatebooks' ] EMAIL_SUBJECT_PREFIX = '[openstax] ' INTERNAL_IPS = ('127.0.0.1', '10.0.2.2') # django-compressor settings COMPRESS_PRECOMPILERS = ( ('text/x-scss', 'django_libsass.SassCompiler'), ) #django rest framework settings REST_FRAMEWORK = { 'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'rest_framework.authentication.TokenAuthentication', ), 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.AllowAny', ), # Schools API is timing out, use this to paginate the results #'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination', #'PAGE_SIZE': 100 } LOGGING_CONFIG = None LOGLEVEL = os.environ.get('LOGLEVEL', 'error').upper() logging.config.dictConfig({ 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'default': { # exact format is not important, this is the minimum information 'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s', }, 'django.server': DEFAULT_LOGGING['formatters']['django.server'], }, 'handlers': { #disable logs set with null handler 'null': { 'class': 'logging.NullHandler', }, # console logs to stderr 'console': { 'class': 'logging.StreamHandler', 'formatter': 'default', }, 'django.server': DEFAULT_LOGGING['handlers']['django.server'], }, 'loggers': { # default for all undefined Python modules '': { 'level': 'ERROR', 'handlers': ['console'], }, # Our application code 'openstax': { 'level': LOGLEVEL, 'handlers': ['console'], 'propagate': False, }, 'django.security.DisallowedHost': { 'handlers': ['null'], 'propagate': False, }, 'django.request': { 'level': 'ERROR', 'handlers': ['console'], 'propagate': False, }, # Default runserver request logging 'django.server': DEFAULT_LOGGING['loggers']['django.server'], }, }) # FLAGS FLAGS = { 'hide_faculty_resources': [], } # WAGTAIL SETTINGS WAGTAIL_SITE_NAME = 'openstax' # Wagtail API number of results WAGTAILAPI_LIMIT_MAX = None WAGTAILUSERS_PASSWORD_ENABLED = False WAGTAIL_USAGE_COUNT_ENABLED = False # used in page.models to retrieve book information CNX_ARCHIVE_URL = 'https://archive.cnx.org' # Server host (used to populate links in the email) HOST_LINK = 'https://openstax.org' WAGTAIL_GRAVATAR_PROVIDER_URL = '//www.gravatar.com/avatar' MAPBOX_TOKEN = '' # should be the sk from mapbox, put in the appropriate settings file # Openstax Accounts ACCOUNTS_URL = 'https://accounts.openstax.org' AUTHORIZATION_URL = 'https://accounts.openstax.org/oauth/authorize' ACCESS_TOKEN_URL = 'https://accounts.openstax.org/oauth/token' USER_QUERY = 'https://accounts.openstax.org/api/user?' USERS_QUERY = 'https://accounts.openstax.org/api/users?' SOCIAL_AUTH_LOGIN_REDIRECT_URL = 'https://openstax.org' SOCIAL_AUTH_SANITIZE_REDIRECTS = False SSO_COOKIE_NAME = 'oxa' BYPASS_SSO_COOKIE_CHECK = False SIGNATURE_PUBLIC_KEY = "-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjvO/E8lO+ZJ7JMglbJyiF5/Ae\nIIS2NKbIAMLBMPVBQY7mSqo6j/yxdVNKZCzYAMDWc/VvEfXQQJ2ipIUuDvO+SOwz\nMewQ70hC71hC4s3dmOSLnixDJlnsVpcnKPEFXloObk/fcpK2Vw27e+yY+kIFmV2X\nzrvTnmm9UJERp6tVTQIDAQAB\n-----END PUBLIC KEY-----\n" ENCRYPTION_PRIVATE_KEY = "c6d9b8683fddce8f2a39ac0565cf18ee" ENCRYPTION_METHOD = 'A256GCM' SIGNATURE_ALGORITHM = 'RS256' DATA_UPLOAD_MAX_NUMBER_FIELDS = 10240 from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True STATIC_HOST = 'https://d3bxy9euw4e147.cloudfront.net' if not DEBUG else '' STATIC_URL = STATIC_HOST + '/static/' AWS_HEADERS = { 'Access-Control-Allow-Origin': '*' } # to override any of the above settings use a local.py file in this directory try: from .local import * except ImportError: pass
agpl-3.0
-222,336,772,868,262,720
30.183844
303
0.677177
false
3.424595
false
false
false
LTD-Beget/sprutio-rpc
lib/FileManager/workers/local/moveToWebDav.py
1
5842
import os import shutil import threading import time import traceback from lib.FileManager.FM import REQUEST_DELAY from lib.FileManager.WebDavConnection import WebDavConnection from lib.FileManager.workers.baseWorkerCustomer import BaseWorkerCustomer class MoveToWebDav(BaseWorkerCustomer): def __init__(self, source, target, paths, overwrite, *args, **kwargs): super(MoveToWebDav, self).__init__(*args, **kwargs) self.source = source self.target = target self.paths = paths self.overwrite = overwrite self.operation_progress = { "total_done": False, "total": 0, "operation_done": False, "processed": 0, "previous_percent": 0 } def run(self): try: self.preload() success_paths = [] error_paths = [] source_path = self.source.get('path') target_path = self.target.get('path') if source_path is None: raise Exception("Source path empty") if target_path is None: raise Exception("Target path empty") source_path = self.get_abs_path(source_path) webdav = WebDavConnection.create(self.login, self.target.get('server_id'), self.logger) self.logger.info("MoveToWebDav process run source = %s , target = %s" % (source_path, target_path)) t_total = threading.Thread(target=self.get_total, args=(self.operation_progress, self.paths)) t_total.start() for path in self.paths: try: abs_path = self.get_abs_path(path) file_basename = os.path.basename(abs_path) uploading_path = abs_path if os.path.isdir(abs_path): uploading_path += '/' file_basename += '/' result_upload = webdav.upload(uploading_path, target_path, self.overwrite, file_basename, self.uploading_progress) if result_upload['success']: success_paths.append(path) if os.path.isfile(abs_path): os.remove(abs_path) elif os.path.islink(abs_path): os.unlink(abs_path) elif os.path.isdir(abs_path): shutil.rmtree(abs_path) else: error_paths.append(abs_path) break except Exception as e: self.logger.error( "Error copy %s , error %s , %s" % (str(path), str(e), traceback.format_exc())) error_paths.append(path) self.operation_progress["operation_done"] = True result = { "success": success_paths, "errors": error_paths } # иначе пользователям кажется что скопировалось не полностью ) progress = { 'percent': round(float(len(success_paths)) / float(len(self.paths)), 2), 'text': str(int(round(float(len(success_paths)) / float(len(self.paths)), 2) * 100)) + '%' } time.sleep(REQUEST_DELAY) self.on_success(self.status_id, data=result, progress=progress, pid=self.pid, pname=self.name) except Exception as e: result = { "error": True, "message": str(e), "traceback": traceback.format_exc() } self.on_error(self.status_id, result, pid=self.pid, pname=self.name) def get_total(self, progress_object, paths, count_files=True): self.logger.debug("start get_total() files = %s" % count_files) for path in paths: try: abs_path = self.get_abs_path(path) for current, dirs, files in os.walk(abs_path): if count_files: progress_object["total"] += len(files) if os.path.isfile(abs_path): progress_object["total"] += 1 except Exception as e: self.logger.error("Error get_total file %s , error %s" % (str(path), str(e))) continue progress_object["total_done"] = True self.logger.debug("done get_total(), found %s objects" % progress_object.get("total")) return def uploading_progress(self, download_t, download_d, upload_t, upload_d): try: percent_upload = 0 if upload_t != 0: percent_upload = round(float(upload_d) / float(upload_t), 2) if percent_upload != self.operation_progress.get("previous_percent"): if percent_upload == 0 and self.operation_progress.get("previous_percent") != 0: self.operation_progress["processed"] += 1 self.operation_progress["previous_percent"] = percent_upload total_percent = percent_upload + self.operation_progress.get("processed") percent = round(float(total_percent) / float(self.operation_progress.get("total")), 2) progress = { 'percent': percent, 'text': str(int(percent * 100)) + '%' } self.on_running(self.status_id, progress=progress, pid=self.pid, pname=self.name) except Exception as ex: self.logger.error("Error in MoveToWebDav uploading_progress(): %s, traceback = %s" % (str(ex), traceback.format_exc()))
gpl-3.0
-9,210,427,962,494,208,000
37.85906
111
0.518135
false
4.254225
false
false
false
dhylands/upy-examples
rotary.py
1
2290
import pyb class rotary(): def __init__(self,Apin='X21',Bpin='X22'): self.B = pyb.Pin(Bpin) self.A = pyb.Pin(Apin) self.prevA = self.A.value() self.prevB = self.B.value() self.CWcount = 0 self.CCWcount = 0 self.position = 0 self.Bint = pyb.ExtInt(self.B,pyb.ExtInt.IRQ_RISING_FALLING,pyb.Pin.PULL_UP,self.callback) self.Aint = pyb.ExtInt(self.A,pyb.ExtInt.IRQ_RISING_FALLING,pyb.Pin.PULL_UP,self.callback) def callback(self,line): # self.Bint.disable() # self.Aint.disable() A = self.A.value() B = self.B.value() #previous state 11 if self.prevA==1 and self.prevB==1: if A==1 and B==0: #print( "CCW 11 to 10") self.CCWcount += 1 self.prevA = A self.prevB = B elif A==0 and B==0: #print ("CW 11 to 00") self.CWcount += 1 self.prevA = A self.prevB = B #previous state 10 elif self.prevA==1 and self.prevB==0: if A==1 and B==1: #print ("CW 10 to 11") self.CWcount += 1 self.prevA = A self.prevB = B elif A==0 and B==0: #print ("CCW 10 to 00") self.CCWcount += 1 self.prevA = A self.prevB = B #previous state 00 elif self.prevA==0 and self.prevB==0: if A==1 and B==1: #print ("CCW 00 to 11") self.CCWcount += 1 self.prevA = A self.prevB = B elif A==1 and B==0: #print ("CW 00 to 10") self.CWcount+=1 self.prevA = A self.prevB = B # self.Bint.enable() # self.Aint.enable() if A==1 and B==1: if self.CWcount>=3 and self.CWcount>self.CCWcount: self.position+=1 print (self.position) if self.CCWcount>=3 and self.CCWcount>self.CWcount: self.position-=1 print(self.position) self.CCWcount = 0 self.CWcount = 0
mit
4,238,425,825,374,999,600
26.926829
98
0.448472
false
3.453997
false
false
false
mozvip/Sick-Beard
sickbeard/versionChecker.py
1
18840
# Author: Nic Wolfe <[email protected]> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import sickbeard from sickbeard import version, ui from sickbeard import logger from sickbeard import scene_exceptions from sickbeard.exceptions import ex import os, platform, shutil import subprocess, re import urllib, urllib2 import zipfile, tarfile from urllib2 import URLError import gh_api as github class CheckVersion(): """ Version check class meant to run as a thread object with the SB scheduler. """ def __init__(self): self.install_type = self.find_install_type() if self.install_type == 'win': self.updater = WindowsUpdateManager() elif self.install_type == 'git': self.updater = GitUpdateManager() elif self.install_type == 'source': self.updater = SourceUpdateManager() else: self.updater = None def run(self): self.check_for_new_version() # refresh scene exceptions too scene_exceptions.retrieve_exceptions() def find_install_type(self): """ Determines how this copy of SB was installed. returns: type of installation. Possible values are: 'win': any compiled windows build 'git': running from source using git 'source': running from source without git """ # check if we're a windows build if version.SICKBEARD_VERSION.startswith('build '): install_type = 'win' elif os.path.isdir(os.path.join(sickbeard.PROG_DIR, '.git')): install_type = 'git' else: install_type = 'source' return install_type def check_for_new_version(self, force=False): """ Checks the internet for a newer version. returns: bool, True for new version or False for no new version. force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced """ if not sickbeard.VERSION_NOTIFY and not force: logger.log(u"Version checking is disabled, not checking for the newest version") return False logger.log(u"Checking if "+self.install_type+" needs an update") if not self.updater.need_update(): logger.log(u"No update needed") if force: ui.notifications.message('No update needed') return False self.updater.set_newest_text() return True def update(self): if self.updater.need_update(): return self.updater.update() class UpdateManager(): def get_update_url(self): return sickbeard.WEB_ROOT+"/home/update/?pid="+str(sickbeard.PID) class WindowsUpdateManager(UpdateManager): def __init__(self): self._cur_version = None self._cur_commit_hash = None self._newest_version = None self.gc_url = 'http://code.google.com/p/sickbeard/downloads/list' self.version_url = 'https://raw.github.com/sarakha63/Sick-Beard/windows_binaries/updates.txt' def _find_installed_version(self): return int(sickbeard.version.SICKBEARD_VERSION[6:]) def _find_newest_version(self, whole_link=False): """ Checks git for the newest Windows binary build. Returns either the build number or the entire build URL depending on whole_link's value. whole_link: If True, returns the entire URL to the release. If False, it returns only the build number. default: False """ regex = ".*SickBeard\-win32\-alpha\-build(\d+)(?:\.\d+)?\.zip" svnFile = urllib.urlopen(self.version_url) for curLine in svnFile.readlines(): logger.log(u"checking line "+curLine, logger.DEBUG) match = re.match(regex, curLine) if match: logger.log(u"found a match", logger.DEBUG) if whole_link: return curLine.strip() else: return int(match.group(1)) return None def need_update(self): self._cur_version = self._find_installed_version() self._newest_version = self._find_newest_version() logger.log(u"newest version: "+repr(self._newest_version), logger.DEBUG) if self._newest_version and self._newest_version > self._cur_version: return True def set_newest_text(self): new_str = 'There is a <a href="'+self.gc_url+'" onclick="window.open(this.href); return false;">newer version available</a> (build '+str(self._newest_version)+')' new_str += "&mdash; <a href=\""+self.get_update_url()+"\">Update Now</a>" sickbeard.NEWEST_VERSION_STRING = new_str def update(self): new_link = self._find_newest_version(True) logger.log(u"new_link: " + repr(new_link), logger.DEBUG) if not new_link: logger.log(u"Unable to find a new version link on google code, not updating") return False # download the zip try: logger.log(u"Downloading update file from "+str(new_link)) (filename, headers) = urllib.urlretrieve(new_link) #@UnusedVariable # prepare the update dir sb_update_dir = os.path.join(sickbeard.PROG_DIR, 'sb-update') logger.log(u"Clearing out update folder "+sb_update_dir+" before unzipping") if os.path.isdir(sb_update_dir): shutil.rmtree(sb_update_dir) # unzip it to sb-update logger.log(u"Unzipping from "+str(filename)+" to "+sb_update_dir) update_zip = zipfile.ZipFile(filename, 'r') update_zip.extractall(sb_update_dir) update_zip.close() # find update dir name update_dir_contents = os.listdir(sb_update_dir) if len(update_dir_contents) != 1: logger.log("Invalid update data, update failed. Maybe try deleting your sb-update folder?", logger.ERROR) return False content_dir = os.path.join(sb_update_dir, update_dir_contents[0]) old_update_path = os.path.join(content_dir, 'updater.exe') new_update_path = os.path.join(sickbeard.PROG_DIR, 'updater.exe') logger.log(u"Copying new update.exe file from "+old_update_path+" to "+new_update_path) shutil.move(old_update_path, new_update_path) # delete the zip logger.log(u"Deleting zip file from "+str(filename)) os.remove(filename) except Exception, e: logger.log(u"Error while trying to update: "+ex(e), logger.ERROR) return False return True class GitUpdateManager(UpdateManager): def __init__(self): self._cur_commit_hash = None self._newest_commit_hash = None self._num_commits_behind = 0 self.git_url = 'http://code.google.com/p/sickbeard/downloads/list' self.branch = self._find_git_branch() def _git_error(self): error_message = 'Unable to find your git executable - either delete your .git folder and run from source OR <a href="http://code.google.com/p/sickbeard/wiki/AdvancedSettings" onclick="window.open(this.href); return false;">set git_path in your config.ini</a> to enable updates.' sickbeard.NEWEST_VERSION_STRING = error_message return None def _run_git(self, args): if sickbeard.GIT_PATH: git_locations = ['"'+sickbeard.GIT_PATH+'"'] else: git_locations = ['git'] # osx people who start SB from launchd have a broken path, so try a hail-mary attempt for them if platform.system().lower() == 'darwin': git_locations.append('/usr/local/git/bin/git') output = err = None for cur_git in git_locations: cmd = cur_git+' '+args try: logger.log(u"Executing "+cmd+" with your shell in "+sickbeard.PROG_DIR, logger.DEBUG) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=sickbeard.PROG_DIR) output, err = p.communicate() logger.log(u"git output: "+output, logger.DEBUG) except OSError: logger.log(u"Command "+cmd+" didn't work, couldn't find git.") continue if p.returncode != 0 or 'not found' in output or "not recognized as an internal or external command" in output: logger.log(u"Unable to find git with command "+cmd, logger.DEBUG) output = None elif 'fatal:' in output or err: logger.log(u"Git returned bad info, are you sure this is a git installation?", logger.ERROR) output = None elif output: break return (output, err) def _find_installed_version(self): """ Attempts to find the currently installed version of Sick Beard. Uses git show to get commit version. Returns: True for success or False for failure """ output, err = self._run_git('rev-parse HEAD') #@UnusedVariable if not output: return self._git_error() logger.log(u"Git output: "+str(output), logger.DEBUG) cur_commit_hash = output.strip() if not re.match('^[a-z0-9]+$', cur_commit_hash): logger.log(u"Output doesn't look like a hash, not using it", logger.ERROR) return self._git_error() self._cur_commit_hash = cur_commit_hash return True def _find_git_branch(self): branch_info = self._run_git('symbolic-ref -q HEAD') if not branch_info or not branch_info[0]: return 'master' branch = branch_info[0].strip().replace('refs/heads/', '', 1) return branch or 'master' def _check_github_for_update(self): """ Uses pygithub to ask github if there is a newer version that the provided commit hash. If there is a newer version it sets Sick Beard's version text. commit_hash: hash that we're checking against """ self._num_commits_behind = 0 self._newest_commit_hash = None gh = github.GitHub() # find newest commit for curCommit in gh.commits('sarakha63', 'Sick-Beard', self.branch): if not self._newest_commit_hash: self._newest_commit_hash = curCommit['sha'] if not self._cur_commit_hash: break if curCommit['sha'] == self._cur_commit_hash: break self._num_commits_behind += 1 logger.log(u"newest: "+str(self._newest_commit_hash)+" and current: "+str(self._cur_commit_hash)+" and num_commits: "+str(self._num_commits_behind), logger.DEBUG) def set_newest_text(self): # if we're up to date then don't set this if self._num_commits_behind == 100: message = "or else you're ahead of master" elif self._num_commits_behind > 0: message = "you're %d commit" % self._num_commits_behind if self._num_commits_behind > 1: message += 's' message += ' behind' else: return if self._newest_commit_hash: url = 'http://github.com/sarakha63/Sick-Beard/compare/'+self._cur_commit_hash+'...'+self._newest_commit_hash else: url = 'http://github.com/sarakha63/Sick-Beard/commits/' new_str = 'There is a <a href="'+url+'" onclick="window.open(this.href); return false;">newer version available</a> ('+message+')' new_str += "&mdash; <a href=\""+self.get_update_url()+"\">Update Now</a>" sickbeard.NEWEST_VERSION_STRING = new_str def need_update(self): self._find_installed_version() try: self._check_github_for_update() except Exception, e: logger.log(u"Unable to contact github, can't check for update: "+repr(e), logger.ERROR) return False logger.log(u"After checking, cur_commit = "+str(self._cur_commit_hash)+", newest_commit = "+str(self._newest_commit_hash)+", num_commits_behind = "+str(self._num_commits_behind), logger.DEBUG) if self._num_commits_behind > 0: return True return False def update(self): """ Calls git pull origin <branch> in order to update Sick Beard. Returns a bool depending on the call's success. """ self._run_git('config remote.origin.url git://github.com/sarakha63/Sick-Beard.git') self._run_git('stash') output, err = self._run_git('pull git://github.com/sarakha63/Sick-Beard.git '+self.branch) #@UnusedVariable if not output: return self._git_error() pull_regex = '(\d+) .+,.+(\d+).+\(\+\),.+(\d+) .+\(\-\)' (files, insertions, deletions) = (None, None, None) for line in output.split('\n'): if 'Already up-to-date.' in line: logger.log(u"No update available, not updating") logger.log(u"Output: "+str(output)) return False elif line.endswith('Aborting.'): logger.log(u"Unable to update from git: "+line, logger.ERROR) logger.log(u"Output: "+str(output)) return False match = re.search(pull_regex, line) if match: (files, insertions, deletions) = match.groups() break if None in (files, insertions, deletions): logger.log(u"Didn't find indication of success in output, assuming git pull failed", logger.ERROR) logger.log(u"Output: "+str(output)) return False return True class SourceUpdateManager(GitUpdateManager): def _find_installed_version(self): version_file = os.path.join(sickbeard.PROG_DIR, 'version.txt') if not os.path.isfile(version_file): self._cur_commit_hash = None return fp = open(version_file, 'r') self._cur_commit_hash = fp.read().strip(' \n\r') fp.close() if not self._cur_commit_hash: self._cur_commit_hash = None def need_update(self): parent_result = GitUpdateManager.need_update(self) if not self._cur_commit_hash: return True else: return parent_result def set_newest_text(self): if not self._cur_commit_hash: logger.log(u"Unknown current version, don't know if we should update or not", logger.DEBUG) new_str = "Unknown version: If you've never used the Sick Beard upgrade system then I don't know what version you have." new_str += "&mdash; <a href=\""+self.get_update_url()+"\">Update Now</a>" sickbeard.NEWEST_VERSION_STRING = new_str else: GitUpdateManager.set_newest_text(self) def update(self): """ Downloads the latest source tarball from github and installs it over the existing version. """ tar_download_url = 'https://github.com/sarakha63/Sick-Beard/tarball/'+version.SICKBEARD_VERSION sb_update_dir = os.path.join(sickbeard.PROG_DIR, 'sb-update') version_path = os.path.join(sickbeard.PROG_DIR, 'version.txt') # retrieve file try: logger.log(u"Downloading update from "+tar_download_url) data = urllib2.urlopen(tar_download_url) except (IOError, URLError): logger.log(u"Unable to retrieve new version from "+tar_download_url+", can't update", logger.ERROR) return False download_name = data.geturl().split('/')[-1].split('?')[0] tar_download_path = os.path.join(sickbeard.PROG_DIR, download_name) # save to disk f = open(tar_download_path, 'wb') f.write(data.read()) f.close() # extract to temp folder logger.log(u"Extracting file "+tar_download_path) tar = tarfile.open(tar_download_path) tar.extractall(sb_update_dir) tar.close() # delete .tar.gz logger.log(u"Deleting file "+tar_download_path) os.remove(tar_download_path) # find update dir name update_dir_contents = [x for x in os.listdir(sb_update_dir) if os.path.isdir(os.path.join(sb_update_dir, x))] if len(update_dir_contents) != 1: logger.log(u"Invalid update data, update failed: "+str(update_dir_contents), logger.ERROR) return False content_dir = os.path.join(sb_update_dir, update_dir_contents[0]) # walk temp folder and move files to main folder for dirname, dirnames, filenames in os.walk(content_dir): #@UnusedVariable dirname = dirname[len(content_dir)+1:] for curfile in filenames: old_path = os.path.join(content_dir, dirname, curfile) new_path = os.path.join(sickbeard.PROG_DIR, dirname, curfile) if os.path.isfile(new_path): os.remove(new_path) os.renames(old_path, new_path) # update version.txt with commit hash try: ver_file = open(version_path, 'w') ver_file.write(self._newest_commit_hash) ver_file.close() except IOError, e: logger.log(u"Unable to write version file, update not complete: "+ex(e), logger.ERROR) return False return True
gpl-3.0
-4,442,054,767,377,110,000
34.941176
286
0.578556
false
4.001699
false
false
false
Transkribus/TranskribusDU
TranskribusDU/gcn/avg_checkpoints.py
1
4652
# Taken from https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/avg_checkpoints.py # coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script to average values of variables in a list of checkpoint files.""" from __future__ import division from __future__ import print_function import os # Dependency imports import numpy as np import six from six.moves import zip # pylint: disable=redefined-builtin import tensorflow as tf flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_string("checkpoints", "", "Comma-separated list of checkpoints to average.") flags.DEFINE_integer("num_last_checkpoints", 0, "Averages the last N saved checkpoints." " If the checkpoints flag is set, this is ignored.") flags.DEFINE_string("prefix", "", "Prefix (e.g., directory) to append to each checkpoint.") flags.DEFINE_string("output_path", "/tmp/averaged.ckpt", "Path to output the averaged checkpoint to.") def checkpoint_exists(path): return (tf.gfile.Exists(path) or tf.gfile.Exists(path + ".meta") or tf.gfile.Exists(path + ".index")) def main(_): if FLAGS.checkpoints: # Get the checkpoints list from flags and run some basic checks. checkpoints = [c.strip() for c in FLAGS.checkpoints.split(",")] checkpoints = [c for c in checkpoints if c] if not checkpoints: raise ValueError("No checkpoints provided for averaging.") if FLAGS.prefix: checkpoints = [FLAGS.prefix + c for c in checkpoints] else: assert FLAGS.num_last_checkpoints >= 1, "Must average at least one model" assert FLAGS.prefix, ("Prefix must be provided when averaging last" " N checkpoints") checkpoint_state = tf.train.get_checkpoint_state( os.path.dirname(FLAGS.prefix)) # Checkpoints are ordered from oldest to newest. checkpoints = checkpoint_state.all_model_checkpoint_paths[ -FLAGS.num_last_checkpoints:] checkpoints = [c for c in checkpoints if checkpoint_exists(c)] if not checkpoints: if FLAGS.checkpoints: raise ValueError( "None of the provided checkpoints exist. %s" % FLAGS.checkpoints) else: raise ValueError("Could not find checkpoints at %s" % os.path.dirname(FLAGS.prefix)) # Read variables from all checkpoints and average them. tf.logging.info("Reading variables and averaging checkpoints:") for c in checkpoints: tf.logging.info("%s ", c) var_list = tf.contrib.framework.list_variables(checkpoints[0]) var_values, var_dtypes = {}, {} for (name, shape) in var_list: if not name.startswith("global_step"): var_values[name] = np.zeros(shape) for checkpoint in checkpoints: reader = tf.contrib.framework.load_checkpoint(checkpoint) for name in var_values: tensor = reader.get_tensor(name) var_dtypes[name] = tensor.dtype var_values[name] += tensor tf.logging.info("Read from checkpoint %s", checkpoint) for name in var_values: # Average. var_values[name] /= len(checkpoints) tf_vars = [ tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[name]) for v in var_values ] placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars] assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)] global_step = tf.Variable( 0, name="global_step", trainable=False, dtype=tf.int64) saver = tf.train.Saver(tf.all_variables()) # Build a model consisting only of variables, set them to the average values. with tf.Session() as sess: sess.run(tf.initialize_all_variables()) for p, assign_op, (name, value) in zip(placeholders, assign_ops, six.iteritems(var_values)): sess.run(assign_op, {p: value}) # Use the built saver to save the averaged checkpoint. saver.save(sess, FLAGS.output_path, global_step=global_step) tf.logging.info("Averaged checkpoints saved in %s", FLAGS.output_path) if __name__ == "__main__": tf.app.run()
bsd-3-clause
-3,850,041,144,974,980,000
37.775
107
0.678848
false
3.785191
false
false
false
necozay/tulip-control
tulip/transys/export/graph2dot.py
1
17106
# Copyright (c) 2013-2014 by California Institute of Technology # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the California Institute of Technology nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH # OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF # USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT # OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. """Convert labeled graph to dot using pydot and custom filtering """ from __future__ import division import logging import re from collections import Iterable from textwrap import fill from cStringIO import StringIO import numpy as np import networkx as nx from networkx.utils import make_str import pydot # inline: # # import webcolors logger = logging.getLogger(__name__) def _states2dot_str(graph, to_pydot_graph, wrap=10, tikz=False, rankdir='TB'): """Copy nodes to given Pydot graph, with attributes for dot export.""" # TODO generate LaTeX legend table for edge labels states = graph.states # get labeling def if hasattr(graph, '_state_label_def'): label_def = graph._state_label_def if hasattr(graph, '_state_dot_label_format'): label_format = graph._state_dot_label_format else: label_format = {'type?label': '', 'separator': '\n'} for u, d in graph.nodes_iter(data=True): # initial state ? is_initial = u in states.initial is_accepting = _is_accepting(graph, u) # state annotation node_dot_label = _form_node_label( u, d, label_def, label_format, wrap, tikz=tikz ) # node_dot_label = fill(str(state), width=wrap) rim_color = d.get('color', 'black') if tikz: _state2tikz(graph, to_pydot_graph, u, is_initial, is_accepting, rankdir, rim_color, d, node_dot_label) else: _state2dot(graph, to_pydot_graph, u, is_initial, is_accepting, rim_color, d, node_dot_label) def _state2dot(graph, to_pydot_graph, state, is_initial, is_accepting, rim_color, d, node_dot_label): if is_initial: _add_incoming_edge(to_pydot_graph, state) normal_shape = graph.dot_node_shape['normal'] accept_shape = graph.dot_node_shape.get('accepting', '') shape = accept_shape if is_accepting else normal_shape corners = 'rounded' if shape is 'rectangle' else '' rim_color = '"' + _format_color(rim_color, 'dot') + '"' fc = d.get('fillcolor', 'none') filled = '' if fc is 'none' else 'filled' if fc is 'gradient': # top/bottom colors not supported for dot lc = d.get('left_color', d['top_color']) rc = d.get('right_color', d['bottom_color']) if isinstance(lc, basestring): fillcolor = lc elif isinstance(lc, dict): fillcolor = lc.keys()[0] else: raise TypeError('left_color must be str or dict.') if isinstance(rc, basestring): fillcolor += ':' + rc elif isinstance(rc, dict): fillcolor += ':' + rc.keys()[0] else: raise TypeError('right_color must be str or dict.') else: fillcolor = _format_color(fc, 'dot') if corners and filled: node_style = '"' + corners + ', ' + filled + '"' elif corners: node_style = '"' + corners + '"' else: node_style = '"' + filled + '"' to_pydot_graph.add_node( state, label=node_dot_label, shape=shape, style=node_style, color=rim_color, fillcolor='"' + fillcolor + '"') def _state2tikz(graph, to_pydot_graph, state, is_initial, is_accepting, rankdir, rim_color, d, node_dot_label): style = 'state' if rankdir is 'LR': init_dir = 'initial left' elif rankdir is 'RL': init_dir = 'initial right' elif rankdir is 'TB': init_dir = 'initial above' elif rankdir is 'BT': init_dir = 'initial below' else: raise ValueError('Unknown rankdir') if is_initial: style += ', initial by arrow, ' + init_dir + ', initial text=' if is_accepting: style += ', accepting' if graph.dot_node_shape['normal'] is 'rectangle': style += ', shape = rectangle, rounded corners' # darken the rim if 'black' in rim_color: c = _format_color(rim_color, 'tikz') else: c = _format_color(rim_color, 'tikz') + '!black!30' style += ', draw = ' + c fill = d.get('fillcolor') if fill is 'gradient': s = {'top_color', 'bottom_color', 'left_color', 'right_color'} for x in s: if x in d: style += ', ' + x + ' = ' + _format_color(d[x], 'tikz') elif fill is not None: # not gradient style += ', fill = ' + _format_color(fill, 'tikz') else: logger.debug('fillcolor is None') to_pydot_graph.add_node( state, texlbl=node_dot_label, style=style) def _format_color(color, prog='tikz'): """Encode color in syntax for given program. @type color: - C{str} for single color or - C{dict} for weighted color mix @type prog: 'tikz' or 'dot' """ if isinstance(color, basestring): return color if not isinstance(color, dict): raise Exception('color must be str or dict') if prog is 'tikz': s = '!'.join([k + '!' + str(v) for k, v in color.iteritems()]) elif prog is 'dot': t = sum(color.itervalues()) try: import webcolors # mix them result = np.array((0.0, 0.0, 0.0)) for c, w in color.iteritems(): result += w/t * np.array(webcolors.name_to_rgb(c)) s = webcolors.rgb_to_hex(result) except: logger.warn('failed to import webcolors') s = ':'.join([k + ';' + str(v/t) for k, v in color.iteritems()]) else: raise ValueError('Unknown program: ' + str(prog) + '. ' "Available options are: 'dot' or 'tikz'.") return s def _place_initial_states(trs_graph, pd_graph, tikz): init_subg = pydot.Subgraph('initial') init_subg.set_rank('source') for node in trs_graph.states.initial: pd_node = pydot.Node(make_str(node)) init_subg.add_node(pd_node) phantom_node = 'phantominit' + str(node) pd_node = pydot.Node(make_str(phantom_node)) init_subg.add_node(pd_node) pd_graph.add_subgraph(init_subg) def _add_incoming_edge(g, state): phantom_node = 'phantominit' + str(state) g.add_node(phantom_node, label='""', shape='none', width='0') g.add_edge(phantom_node, state) def _form_node_label(state, state_data, label_def, label_format, width=10, tikz=False): # node itself state_str = str(state) state_str = state_str.replace("'", "") # rm parentheses to reduce size of states in fig if tikz: state_str = state_str.replace('(', '') state_str = state_str.replace(')', '') # make indices subscripts if tikz: pattern = '([a-zA-Z]\d+)' make_subscript = lambda x: x.group(0)[0] + '_' + x.group(0)[1:] state_str = re.sub(pattern, make_subscript, state_str) # SVG requires breaking the math environment into # one math env per line. Just make 1st line math env # if latex: # state_str = '$' + state_str + '$' # state_str = fill(state_str, width=width) node_dot_label = state_str # newline between state name and label, only if state is labeled if len(state_data) != 0: node_dot_label += r'\n' # add node annotations from action, AP sets etc # other key,values in state attr_dict ignored pieces = list() for (label_type, label_value) in state_data.iteritems(): if label_type not in label_def: continue # label formatting type_name = label_format[label_type] sep_type_value = label_format['type?label'] # avoid turning strings to lists, # or non-iterables to lists if isinstance(label_value, str): label_str = fill(label_value, width=width) elif isinstance(label_value, Iterable): # and not str s = ', '.join([str(x) for x in label_value]) label_str = r'\\{' + fill(s, width=width) + r'\\}' else: label_str = fill(str(label_value), width=width) pieces.append(type_name + sep_type_value + label_str) sep_label_sets = label_format['separator'] node_dot_label += sep_label_sets.join(pieces) if tikz: # replace LF by latex newline node_dot_label = node_dot_label.replace(r'\n', r'\\\\ ') # dot2tex math mode doesn't handle newlines properly node_dot_label = ( r'$\\begin{matrix} ' + node_dot_label + r'\\end{matrix}$' ) return node_dot_label def _is_accepting(graph, state): """accepting state ?""" # no accepting states defined ? if not hasattr(graph.states, 'accepting'): return False return state in graph.states.accepting def _transitions2dot_str(trans, to_pydot_graph, tikz=False): """Convert transitions to dot str. @rtype: str """ if not hasattr(trans.graph, '_transition_label_def'): return if not hasattr(trans.graph, '_transition_dot_label_format'): return if not hasattr(trans.graph, '_transition_dot_mask'): return # get labeling def label_def = trans.graph._transition_label_def label_format = trans.graph._transition_dot_label_format label_mask = trans.graph._transition_dot_mask for (u, v, key, edge_data) in trans.graph.edges_iter( data=True, keys=True ): edge_dot_label = _form_edge_label( edge_data, label_def, label_format, label_mask, tikz ) edge_color = edge_data.get('color', 'black') to_pydot_graph.add_edge(u, v, key=key, label=edge_dot_label, color=edge_color) def _form_edge_label(edge_data, label_def, label_format, label_mask, tikz): label = '' # dot label for edge sep_label_sets = label_format['separator'] for label_type, label_value in edge_data.iteritems(): if label_type not in label_def: continue # masking defined ? # custom filter hiding based on value if label_type in label_mask: # not show ? if not label_mask[label_type](label_value): continue # label formatting if label_type in label_format: type_name = label_format[label_type] sep_type_value = label_format['type?label'] else: type_name = ':' sep_type_value = r',\n' # format iterable containers using # mathematical set notation: {...} if isinstance(label_value, basestring): # str is Iterable: avoid turning it to list label_str = label_value elif isinstance(label_value, Iterable): s = ', '.join([str(x) for x in label_value]) label_str = r'\\{' + fill(s) + r'\\}' else: label_str = str(label_value) if tikz: type_name = r'\mathrm' + '{' + type_name + '}' label += (type_name + sep_type_value + label_str + sep_label_sets) if tikz: label = r'\\begin{matrix}' + label + r'\\end{matrix}' label = '"' + label + '"' return label def _graph2pydot(graph, wrap=10, tikz=False, rankdir='TB'): """Convert (possibly labeled) state graph to dot str. @type graph: L{LabeledDiGraph} @rtype: str """ dummy_nx_graph = nx.MultiDiGraph() _states2dot_str(graph, dummy_nx_graph, wrap=wrap, tikz=tikz, rankdir=rankdir) _transitions2dot_str(graph.transitions, dummy_nx_graph, tikz=tikz) pydot_graph = nx.drawing.nx_pydot.to_pydot(dummy_nx_graph) _place_initial_states(graph, pydot_graph, tikz) pydot_graph.set_overlap('false') # pydot_graph.set_size('"0.25,1"') # pydot_graph.set_ratio('"compress"') pydot_graph.set_nodesep(0.5) pydot_graph.set_ranksep(0.1) return pydot_graph def graph2dot_str(graph, wrap=10, tikz=False): """Convert graph to dot string. Requires pydot. @type graph: L{LabeledDiGraph} @param wrap: textwrap width @rtype: str """ pydot_graph = _graph2pydot(graph, wrap=wrap, tikz=tikz) return pydot_graph.to_string() def save_dot(graph, path, fileformat, rankdir, prog, wrap, tikz=False): """Save state graph to dot file. @type graph: L{LabeledDiGraph} @return: True upon success @rtype: bool """ pydot_graph = _graph2pydot(graph, wrap=wrap, tikz=tikz, rankdir=rankdir) if pydot_graph is None: # graph2dot must have printed warning already return False pydot_graph.set_rankdir(rankdir) pydot_graph.set_splines('true') # turn off graphviz warnings caused by tikz labels if tikz: prog = [prog, '-q 1'] pydot_graph.write(path, format=fileformat, prog=prog) return True def plot_pydot(graph, prog='dot', rankdir='LR', wrap=10, ax=None): """Plot a networkx or pydot graph using dot. No files written or deleted from the disk. Note that all networkx graph classes are inherited from networkx.Graph See Also ======== dot & pydot documentation @param graph: to plot @type graph: networkx.Graph | pydot.Graph @param prog: GraphViz programto use @type prog: 'dot' | 'neato' | 'circo' | 'twopi' | 'fdp' | 'sfdp' | etc @param rankdir: direction to layout nodes @type rankdir: 'LR' | 'TB' @param ax: axes """ try: pydot_graph = _graph2pydot(graph, wrap=wrap) except: if isinstance(graph, nx.Graph): pydot_graph = nx.drawing.nx_pydot.to_pydot(graph) else: raise TypeError( 'graph not networkx or pydot class.' + 'Got instead: ' + str(type(graph))) pydot_graph.set_rankdir(rankdir) pydot_graph.set_splines('true') pydot_graph.set_bgcolor('gray') png_str = pydot_graph.create_png(prog=prog) # installed ? try: from IPython.display import display, Image logger.debug('IPython installed.') # called by IPython ? try: cfg = get_ipython().config logger.debug('Script called by IPython.') # Caution!!! : not ordinary dict, # but IPython.config.loader.Config # qtconsole ? if cfg['IPKernelApp']: logger.debug('Within IPython QtConsole.') display(Image(data=png_str)) return True except: print('IPython installed, but not called from it.') except ImportError: logger.warn('IPython not found.\nSo loaded dot images not inline.') # not called from IPython QtConsole, try Matplotlib... # installed ? try: import matplotlib.pyplot as plt import matplotlib.image as mpimg except: logger.debug('Matplotlib not installed.') logger.warn('Neither IPython QtConsole nor Matplotlib available.') return None logger.debug('Matplotlib installed.') if ax is None: fig = plt.figure() ax = fig.add_subplot(111) sio = StringIO() sio.write(png_str) sio.seek(0) img = mpimg.imread(sio) ax.imshow(img, aspect='equal') plt.show(block=False) return ax
bsd-3-clause
-4,901,562,265,867,219,000
29.169312
76
0.592365
false
3.661387
false
false
false
moto-timo/robotframework
src/robot/utils/robotpath.py
1
5342
# Copyright 2008-2015 Nokia Solutions and Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import os.path import sys try: from urllib import pathname2url except ImportError: from urllib.request import pathname2url from robot.errors import DataError from .encoding import decode_from_system from .platform import WINDOWS from .robottypes import is_unicode if sys.version_info < (2,7): _abspath = lambda path: os.path.join(os.getcwdu(), path) else: _abspath = os.path.abspath if WINDOWS: CASE_INSENSITIVE_FILESYSTEM = True else: try: CASE_INSENSITIVE_FILESYSTEM = os.listdir('/tmp') == os.listdir('/TMP') except OSError: CASE_INSENSITIVE_FILESYSTEM = False def normpath(path, case_normalize=False): """Replacement for os.path.normpath with some enhancements. 1. Non-Unicode paths are converted to Unicode using file system encoding. 2. Optionally lower-case paths on case-insensitive file systems. That includes Windows and also OSX in default configuration. 3. Turn ``c:`` into ``c:\\`` on Windows instead of keeping it as ``c:``. """ if not is_unicode(path): path = decode_from_system(path) path = os.path.normpath(path) if case_normalize and CASE_INSENSITIVE_FILESYSTEM: path = path.lower() if WINDOWS and len(path) == 2 and path[1] == ':': return path + '\\' return path def abspath(path, case_normalize=False): """Replacement for os.path.abspath with some enhancements and bug fixes. 1. Non-Unicode paths are converted to Unicode using file system encoding. 2. Optionally lower-case paths on case-insensitive file systems. That includes Windows and also OSX in default configuration. 3. Turn ``c:`` into ``c:\\`` on Windows instead of ``c:\\current\\path``. 4. Handle non-ASCII characters on working directory with Python < 2.6.5: http://bugs.python.org/issue3426 """ path = normpath(path, case_normalize) if os.path.isabs(path): return path return normpath(_abspath(path), case_normalize) # TODO: Investigate could this be replaced with os.path.relpath in RF 2.9. def get_link_path(target, base): """Returns a relative path to a target from a base. If base is an existing file, then its parent directory is considered. Otherwise, base is assumed to be a directory. Rationale: os.path.relpath is not available before Python 2.6 """ path = _get_pathname(target, base) url = pathname2url(path.encode('UTF-8')) if os.path.isabs(path): url = 'file:' + url # At least Jython seems to use 'C|/Path' and not 'C:/Path' if os.sep == '\\' and '|/' in url: url = url.replace('|/', ':/', 1) return url.replace('%5C', '/').replace('%3A', ':').replace('|', ':') def _get_pathname(target, base): target = abspath(target) base = abspath(base) if os.path.isfile(base): base = os.path.dirname(base) if base == target: return os.path.basename(target) base_drive, base_path = os.path.splitdrive(base) # if in Windows and base and link on different drives if os.path.splitdrive(target)[0] != base_drive: return target common_len = len(_common_path(base, target)) if base_path == os.sep: return target[common_len:] if common_len == len(base_drive) + len(os.sep): common_len -= len(os.sep) dirs_up = os.sep.join([os.pardir] * base[common_len:].count(os.sep)) return os.path.join(dirs_up, target[common_len + len(os.sep):]) def _common_path(p1, p2): """Returns the longest path common to p1 and p2. Rationale: as os.path.commonprefix is character based, it doesn't consider path separators as such, so it may return invalid paths: commonprefix(('/foo/bar/', '/foo/baz.txt')) -> '/foo/ba' (instead of /foo) """ while p1 and p2: if p1 == p2: return p1 if len(p1) > len(p2): p1 = os.path.dirname(p1) else: p2 = os.path.dirname(p2) return '' def find_file(path, basedir='.', file_type=None): path = os.path.normpath(path.replace('/', os.sep)) for base in [basedir] + sys.path: if not (base and os.path.isdir(base)): continue if not is_unicode(base): base = decode_from_system(base) ret = os.path.abspath(os.path.join(base, path)) if os.path.isfile(ret): return ret if os.path.isdir(ret) and os.path.isfile(os.path.join(ret, '__init__.py')): return ret default = file_type or 'File' file_type = {'Library': 'Test library', 'Variables': 'Variable file', 'Resource': 'Resource file'}.get(file_type, default) raise DataError("%s '%s' does not exist." % (file_type, path))
apache-2.0
160,849,017,757,947,400
35.094595
83
0.647885
false
3.556591
false
false
false
Edraak/edraak-platform
lms/djangoapps/course_api/tests/test_forms.py
1
4312
""" Tests for Course API forms. """ from itertools import product from urllib import urlencode import ddt from django.contrib.auth.models import AnonymousUser from django.http import QueryDict from openedx.core.djangoapps.util.test_forms import FormTestMixin from student.tests.factories import UserFactory from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory from ..forms import CourseDetailGetForm, CourseListGetForm class UsernameTestMixin(object): """ Tests the username Form field. """ shard = 4 def test_no_user_param_anonymous_access(self): self.set_up_data(AnonymousUser()) self.form_data.pop('username') self.assert_valid(self.cleaned_data) def test_no_user_param(self): self.set_up_data(AnonymousUser()) self.form_data.pop('username') self.assert_valid(self.cleaned_data) @ddt.ddt class TestCourseListGetForm(FormTestMixin, UsernameTestMixin, SharedModuleStoreTestCase): """ Tests for CourseListGetForm """ shard = 4 FORM_CLASS = CourseListGetForm @classmethod def setUpClass(cls): super(TestCourseListGetForm, cls).setUpClass() cls.course = CourseFactory.create() def setUp(self): super(TestCourseListGetForm, self).setUp() self.student = UserFactory.create() self.set_up_data(self.student) def set_up_data(self, user): """ Sets up the initial form data and the expected clean data. """ self.initial = {'requesting_user': user} self.form_data = QueryDict( urlencode({ 'username': user.username, }), mutable=True, ) self.cleaned_data = { 'username': user.username, 'org': '', 'mobile': None, 'search_term': '', 'filter_': None, 'ids': None, } def test_basic(self): self.assert_valid(self.cleaned_data) def test_org(self): org_value = 'test org name' self.form_data['org'] = org_value self.cleaned_data['org'] = org_value self.assert_valid(self.cleaned_data) @ddt.data( *product( [('mobile', 'mobile_available')], [(True, True), (False, False), ('1', True), ('0', False), (None, None)], ) ) @ddt.unpack def test_filter(self, param_field_name, param_field_value): param_name, field_name = param_field_name param_value, field_value = param_field_value self.form_data[param_name] = param_value self.cleaned_data[param_name] = field_value if field_value is not None: self.cleaned_data['filter_'] = {field_name: field_value} self.assert_valid(self.cleaned_data) class TestCourseDetailGetForm(FormTestMixin, UsernameTestMixin, SharedModuleStoreTestCase): """ Tests for CourseDetailGetForm """ shard = 4 FORM_CLASS = CourseDetailGetForm @classmethod def setUpClass(cls): super(TestCourseDetailGetForm, cls).setUpClass() cls.course = CourseFactory.create() def setUp(self): super(TestCourseDetailGetForm, self).setUp() self.student = UserFactory.create() self.set_up_data(self.student) def set_up_data(self, user): """ Sets up the initial form data and the expected clean data. """ self.initial = {'requesting_user': user} self.form_data = QueryDict( urlencode({ 'username': user.username, 'course_key': unicode(self.course.id), }), mutable=True, ) self.cleaned_data = { 'username': user.username, 'course_key': self.course.id, } def test_basic(self): self.assert_valid(self.cleaned_data) #-- course key --# def test_no_course_key_param(self): self.form_data.pop('course_key') self.assert_error('course_key', "This field is required.") def test_invalid_course_key(self): self.form_data['course_key'] = 'invalid_course_key' self.assert_error('course_key', "'invalid_course_key' is not a valid course key.")
agpl-3.0
-8,221,865,995,090,685,000
27.368421
91
0.612013
false
3.974194
true
false
false
tastyproject/tasty
tasty/tastyc/tastyc.py
1
9949
# -*- coding: utf-8 -*- """tastyc configuration module""" import copy import sys import os.path from ast import * import gc from tasty import state from tasty.exc import TastySyntaxError from tasty.types import * from tasty.tastyc import bases from tasty.tastyc.codegen import to_source from tasty.tastyc.analyzation import Parenter from tasty.tastyc.analyzation import Qualificator from tasty.tastyc.analyzation import Symbolizer from tasty.tastyc.analyzation import AttributePropagator from tasty.tastyc.analyzation import ConstantSymbolizer from tasty.tastyc.pass_dispatching import OnlinePassDispatcher, SetupPassDispatcher, OnlinePruner, SetupPruner from tasty.tastyc.transformation import DriverParameterPropagator from tasty.tastyc.transformation import OnlineTransformer, SetupTransformer from tasty.tastyc.transformation import KwargsPropagator from tasty.tastyc.transformation import SimpleEvaluator from tasty.tastyc.transformation import PlainTypeConverter from tasty.tastyc.transformation import TypeCompletionTransformer from tasty.tastyc.transformation import ConstantPropagator from tasty.tastyc.transformation import DanglingGarbledBinder from tasty.tastyc.analyze_costs import analyze_costs __all__ = ["compiler_start", "compiler_start_driver_mode"] state.my_globals = globals() def compile_protocol(): """creates custom protocol versions tailored for setup and online phase""" config = state.config full_ast = bases.TastyCBase.full_ast setup_ast = copy.deepcopy(full_ast) online_ast = copy.deepcopy(full_ast) setup_symbol_table = copy.deepcopy(bases.TastyCBase.symbol_table) online_symbol_table = copy.deepcopy(bases.TastyCBase.symbol_table) if "types" not in bases.TastyCBase.imports: types_import = ImportFrom(module='tasty.types', names=[alias(name='*', asname=None)], level=0) setup_ast.body.insert(0, types_import) online_ast.body.insert(0, types_import) if "conversions" not in bases.TastyCBase.imports: con_import = ImportFrom(module='tasty.types', names=[alias(name='conversions', asname=None)], level=0) setup_ast.body.insert(0, con_import) online_ast.body.insert(0, con_import) if __debug__: state.log.info("\ncompiling setup protocol version...") setup_ast = SetupTransformer(setup_symbol_table).visit(setup_ast) SetupPassDispatcher(setup_symbol_table).visit(setup_ast) setup_ast = SetupPruner(setup_symbol_table).visit(setup_ast) setup_ast = TypeCompletionTransformer(setup_symbol_table).visit(setup_ast) fix_missing_locations(setup_ast) setup_filename = protocol_path("{0}.py".format(config.final_setup_protocol)) f = open(setup_filename, "w") f.write(to_source(setup_ast)) f.close() if __debug__: state.log.info("\ncompiling online protocol version...") OnlineTransformer(online_symbol_table).visit(online_ast) OnlinePassDispatcher(online_symbol_table).visit(online_ast) OnlinePruner(online_symbol_table).visit(online_ast) TypeCompletionTransformer(online_symbol_table).visit(online_ast) fix_missing_locations(online_ast) online_filename = protocol_path("{0}.py".format(config.final_online_protocol)) f = open(online_filename, "w") f.write(to_source(online_ast)) f.close() return setup_ast, online_ast def propagate_constants(ast): p = ConstantPropagator() ast = p.visit(ast) p.cleanup_symbol_table() p.visit_Assign = p.visit_Assign_2nd_pass p.visit_Name = p.visit_Name_2nd_pass ast = p.visit(ast) return ast def bind_dangling_garbleds(ast): p = DanglingGarbledBinder() full_ast = p.visit(ast) p.finish() return full_ast def do_driver_selection(original_ast): log = state.log config = state.config num_drivers = len(state.driver_classes) if num_drivers > 1: if config.driver_name in state.driver_classes: state.driver_class = config.driver_name else: while 1: chosen_driver = int(raw_input("Found %d different 'Driver' implementations.\nPlease select intended driver via -D <DriverName> flag, or choose from the following list:\n%s\n:" % (num_drivers, "\n".join("%d - %s" % (ix, cname) for ix,cname in enumerate(state.driver_classes))) )) if 0 <= chosen_driver < len(state.driver_classes): state.driver_class = state.driver_classes[chosen_driver] break elif num_drivers == 1: state.driver_class = state.driver_classes[0] if config.test_mode: config.driver_mode = True bases.assign_driver(original_ast, "TestDriver") state.use_driver = True if "TestDriver" not in bases.TastyCBase.imports: driver_import = ImportFrom(module='tasty.types.driver', names=[alias(name='TestDriver', asname=None)], level=0) bases.TastyCBase.imports.add("TestDriver") original_ast.body.insert(0, driver_import) elif config.use_driver: if not state.driver_class: state.log.error("You selected driver mode without implementing a test driver.\nPlease provide one by subclassing from 'Driver' in the protocol!") sys.exit(-1) if not bases.check_driver_assignment(state.driver_class): bases.assign_driver(original_ast, state.driver_class) if not state.protocol_instrumentated: state.log.error("Error: You requested driver mode, but provided a protocol without the 3rd formal parameter 'params'.\nPlease provide a protocol with the signature 'protocol(client, server, params)'") sys.exit(-1) elif state.driver_class or state.protocol_instrumentated: if not bases.check_driver_assignment("IODriver"): bases.assign_driver(original_ast, "IODriver", True) if "IODriver" not in bases.TastyCBase.imports: driver_import = ImportFrom(module='tasty.types.driver', names=[alias(name='IODriver', asname=None)], level=0) bases.TastyCBase.imports.add("IODriver") original_ast.body.insert(0, driver_import) def clean_protocol_environment(): """cleaning possibly created modules and memory""" bases.TastyCBase.symbol_table.clear() try: del sys.modules[state.config.final_setup_protocol] except KeyError: pass try: del sys.modules[state.config.final_online_protocol] except KeyError: pass gc.collect() def compiler_start(): """analyzes protocol structure, runs several optimization technics, retrieves abstract costs and transforms tasty protocols into internal representation. For now we have implemented constant propagation, partial evaluation and dead code elimination.""" log = state.log config = state.config #if config.exclude_compiler: #return if __debug__: log.info("starting tasty compiler...") # this can be important if there are defined and registered new tasty # primitives get available at analyzation time in tasty protocols old_path = sys.path sys.path = [config.protocol_dir, ] + sys.path g = globals() protocol = __import__("protocol", g, g, []) sys.path = old_path state.my_globals.update(protocol.__dict__) bases.TastyCBase.symbol_table.clear() text = open(config.protocol_file_path).read().replace("\r\n", "\n") bases.TastyCBase.original_ast = original_ast = compile( text, config.protocol_file_path, "exec", PyCF_ONLY_AST) Parenter().visit(original_ast) Qualificator().visit(original_ast) do_driver_selection(original_ast) AttributePropagator().visit(original_ast) fix_missing_locations(original_ast) f = open(protocol_path("protocol_final.py"), "w") f.write(to_source(original_ast)) f.close() protocol = __import__("protocol_final", g, g, []) full_ast = bases.TastyCBase.full_ast = original_ast if not config.use_driver: if state.assigned_driver_node: bases.TastyCBase.full_ast = full_ast = copy.deepcopy( original_ast) bases.TastyCBase.full_ast = full_ast = DriverParameterPropagator( protocol.driver.next_params().next()).visit(full_ast) ConstantSymbolizer().visit(full_ast) full_ast = propagate_constants(full_ast) full_ast = SimpleEvaluator().visit(full_ast) full_ast = PlainTypeConverter().visit(full_ast) fix_missing_locations(full_ast) symbolizer = Symbolizer(state.my_globals) symbolizer.visit(full_ast) try: symbolizer.check() except Exception, e: state.log.exception(e) sys.exit(-1) full_ast = bind_dangling_garbleds(full_ast) setup_ast, online_ast = compile_protocol() analyze_costs(setup_ast, online_ast) if __debug__: log.info("tasty compiler done") def compiler_start_driver_mode(kwargs): """called before each driver run iteration""" # cleanup clean_protocol_environment() bases.TastyCBase.full_ast = full_ast = copy.deepcopy( bases.TastyCBase.original_ast) DriverParameterPropagator(kwargs).visit(full_ast) ConstantSymbolizer().visit(full_ast) full_ast = propagate_constants(full_ast) full_ast = SimpleEvaluator().visit(full_ast) full_ast = PlainTypeConverter().visit(full_ast) fix_missing_locations(full_ast) symbolizer = Symbolizer(state.my_globals) symbolizer.visit(full_ast) symbolizer.check() full_ast = bind_dangling_garbleds(full_ast) # compile ast into internal representation (actually real python code) setup_ast, online_ast = compile_protocol() # static cost analyzation analyze_costs(setup_ast, online_ast)
gpl-3.0
-1,735,557,266,108,462,600
34.280142
212
0.679666
false
3.663108
true
false
false
mitsei/dlkit
dlkit/json_/authorization/searches.py
1
11337
"""JSON implementations of authorization searches.""" # pylint: disable=no-init # Numerous classes don't require __init__. # pylint: disable=too-many-public-methods,too-few-public-methods # Number of methods are defined in specification # pylint: disable=protected-access # Access to protected methods allowed in package json package scope # pylint: disable=too-many-ancestors # Inheritance defined in specification from . import objects from . import queries from .. import utilities from ..osid import searches as osid_searches from ..primitives import Id from ..utilities import get_registry from dlkit.abstract_osid.authorization import searches as abc_authorization_searches from dlkit.abstract_osid.osid import errors class AuthorizationSearch(abc_authorization_searches.AuthorizationSearch, osid_searches.OsidSearch): """``AuthorizationSearch`` defines the interface for specifying authorization search options.""" def __init__(self, runtime): self._namespace = 'authorization.Authorization' self._runtime = runtime record_type_data_sets = get_registry('RESOURCE_RECORD_TYPES', runtime) self._record_type_data_sets = record_type_data_sets self._all_supported_record_type_data_sets = record_type_data_sets self._all_supported_record_type_ids = [] self._id_list = None for data_set in record_type_data_sets: self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set]))) osid_searches.OsidSearch.__init__(self, runtime) @utilities.arguments_not_none def search_among_authorizations(self, authorization_ids): """Execute this search among the given list of authorizations. arg: authorization_ids (osid.id.IdList): list of authorizations raise: NullArgument - ``authorization_ids`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._id_list = authorization_ids @utilities.arguments_not_none def order_authorization_results(self, authorization_search_order): """Specify an ordering to the search results. arg: authorization_search_order (osid.authorization.AuthorizationSearchOrder): authorization search order raise: NullArgument - ``authorization_search_order`` is ``null`` raise: Unsupported - ``authorization_search_order`` is not of this service *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() @utilities.arguments_not_none def get_authorization_search_record(self, authorization_search_record_type): """Gets the authorization search record corresponding to the given authorization search record ``Type``. This method is used to retrieve an object implementing the requested record. arg: authorization_search_record_type (osid.type.Type): an authorization search record type return: (osid.authorization.records.AuthorizationSearchRecord) - the authorization search record raise: NullArgument - ``authorization_search_record_type`` is ``null`` raise: OperationFailed - unable to complete request raise: Unsupported - ``has_record_type(authorization_search_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() class AuthorizationSearchResults(abc_authorization_searches.AuthorizationSearchResults, osid_searches.OsidSearchResults): """This interface provides a means to capture results of a search.""" def __init__(self, results, query_terms, runtime): # if you don't iterate, then .count() on the cursor is an inaccurate representation of limit / skip # self._results = [r for r in results] self._namespace = 'authorization.Authorization' self._results = results self._query_terms = query_terms self._runtime = runtime self.retrieved = False def get_authorizations(self): """Gets the authorization list resulting from the search. return: (osid.authorization.AuthorizationList) - the authorization list raise: IllegalState - list has already been retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.AuthorizationList(self._results, runtime=self._runtime) authorizations = property(fget=get_authorizations) def get_authorization_query_inspector(self): """Gets the inspector for the query to examine the terms used in the search. return: (osid.authorization.AuthorizationQueryInspector) - the query inspector *compliance: mandatory -- This method must be implemented.* """ return queries.AuthorizationQueryInspector(self._query_terms, runtime=self._runtime) authorization_query_inspector = property(fget=get_authorization_query_inspector) @utilities.arguments_not_none def get_authorization_search_results_record(self, authorization_search_record_type): """Gets the authorization search results record corresponding to the given authorization search record ``Type``. This method is used to retrieve an object implementing the requested record. arg: authorization_search_record_type (osid.type.Type): an authorization search record type return: (osid.authorization.records.AuthorizationSearchResultsRe cord) - the authorization search results record raise: NullArgument - ``authorization_search_record_type`` is ``null`` raise: OperationFailed - unable to complete request raise: Unsupported - ``has_record_type(authorization_search_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() class VaultSearch(abc_authorization_searches.VaultSearch, osid_searches.OsidSearch): """The interface for governing vault searches.""" def __init__(self, runtime): self._namespace = 'authorization.Vault' self._runtime = runtime record_type_data_sets = get_registry('RESOURCE_RECORD_TYPES', runtime) self._record_type_data_sets = record_type_data_sets self._all_supported_record_type_data_sets = record_type_data_sets self._all_supported_record_type_ids = [] self._id_list = None for data_set in record_type_data_sets: self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set]))) osid_searches.OsidSearch.__init__(self, runtime) @utilities.arguments_not_none def search_among_vaults(self, vault_ids): """Execute this search among the given list of vaults. arg: vault_ids (osid.id.IdList): list of vaults raise: NullArgument - ``vault_ids`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ self._id_list = vault_ids @utilities.arguments_not_none def order_vault_results(self, vault_search_order): """Specify an ordering to the search results. arg: vault_search_order (osid.authorization.VaultSearchOrder): vault search order raise: NullArgument - ``vault_search_order`` is ``null`` raise: Unsupported - ``vault_search_order`` is not of this service *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() @utilities.arguments_not_none def get_vault_search_record(self, vault_search_record_type): """Gets the vault search record corresponding to the given vault search record ``Type``. This method is used to retrieve an object implementing the requested record. arg: vault_search_record_type (osid.type.Type): a vault search record type return: (osid.authorization.records.VaultSearchRecord) - the vault search record raise: NullArgument - ``vault_search_record_type`` is ``null`` raise: OperationFailed - unable to complete request raise: Unsupported - ``has_record_type(vault_search_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented() class VaultSearchResults(abc_authorization_searches.VaultSearchResults, osid_searches.OsidSearchResults): """This interface provides a means to capture results of a search.""" def __init__(self, results, query_terms, runtime): # if you don't iterate, then .count() on the cursor is an inaccurate representation of limit / skip # self._results = [r for r in results] self._namespace = 'authorization.Vault' self._results = results self._query_terms = query_terms self._runtime = runtime self.retrieved = False def get_vaults(self): """Gets the vault list resulting from the search. return: (osid.authorization.VaultList) - the vault list raise: IllegalState - list has already been retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.VaultList(self._results, runtime=self._runtime) vaults = property(fget=get_vaults) def get_vault_query_inspector(self): """Gets the inspector for the query to examine the terms used in the search. return: (osid.authorization.VaultQueryInspector) - the vault query inspector *compliance: mandatory -- This method must be implemented.* """ return queries.VaultQueryInspector(self._query_terms, runtime=self._runtime) vault_query_inspector = property(fget=get_vault_query_inspector) @utilities.arguments_not_none def get_vault_search_results_record(self, vault_search_record_type): """Gets the vault search results record corresponding to the given vault search record ``Type``. This method is used to retrieve an object implementing the requested record. arg: vault_search_record_type (osid.type.Type): a vault search record type return: (osid.authorization.records.VaultSearchResultsRecord) - the vault search results record raise: NullArgument - ``vault_search_record_type`` is ``null`` raise: OperationFailed - unable to complete request raise: Unsupported - ``has_record_type(vault_search_record_type)`` is ``false`` *compliance: mandatory -- This method must be implemented.* """ raise errors.Unimplemented()
mit
-2,990,941,319,276,016,000
40.988889
121
0.659963
false
4.49168
false
false
false
chrisguiney/mudpy
mudpy/character/__init__.py
1
1901
from functools import partial from .. import races from .. import classes from ..magic import SpellBook class Character(object): def __init__(self, name, description, equipment, inventory, attributes, level=1, race="Human", base_class="Fighter", spellbook=None): """ @param name: Name of Character @type name: str @param description: Description of Character @type description: str @param equipment: Dictionary of item slot to equipped item @type equipment: dict @param inventory: List of items @type inventory: list of item @param attributes: Dictionary of attributes to values @type attributes: dict @param level: Integer level number @type level: int @param race: Name of race as string @type race: str @param base_class: Name of base character class as string @type base_class: str @param spellbook: Spellbook object of memorized spells @type spellbook: SpellBook @return: None """ self.name = name self.level = level self.description = description self.attributes = attributes self.equipment = equipment self.inventory = inventory self.race = getattr(races, race, partial(races.raise_invalid_race, race))() self.base_class = getattr(classes, base_class)() self.spellbook = spellbook if self.base_class.is_caster and spellbook is None: self.spellbook = SpellBook() def cast(self, spell, on_target): return (spell.base_damage.roll() + spell.level_modifier(self.level)) - on_target.resistance(spell.damage_type) def resistance(self, damage_type): pass def equip(self, item): assert(item in self.inventory) self.equipment[item.slot] = item
apache-2.0
-1,466,427,402,006,687,000
27.818182
120
0.624934
false
4.150655
false
false
false
kevinkahn/softconsole
screens/specificscreens/weatherscreen.py
1
6148
from collections import OrderedDict import debug import logsupport from screens import screen import screens.__screens as screens from keyspecs import toucharea from utils import utilities, fonts, hw from stores import valuestore from utils.utilfuncs import wc, fmt from utils.weatherfromatting import CreateWeathBlock fsizes = ((20, False, False), (30, True, False), (45, True, True)) class WeatherScreenDesc(screen.ScreenDesc): def __init__(self, screensection, screenname): debug.debugPrint('Screen', "New WeatherScreenDesc ", screenname) super().__init__(screensection, screenname) butsize = self.ButSize(1, 1, 0) self.Keys = OrderedDict({'condorfcst': toucharea.TouchPoint('condorfcst', ( self.HorizBorder + .5 * butsize[0], self.TopBorder + .5 * butsize[1]), butsize, proc=self.CondOrFcst)}) self.currentconditions = True # show conditions or forecast screen.AddUndefaultedParams(self, screensection, location='', LocationSize=0) # default to no location now that screen title in use self.SetScreenTitle(screen.FlatenScreenLabel(self.label), 50, self.CharColor) self.condformat = u"{d[0]} {d[1]}\u00B0F", u" Feels like: {d[2]}\u00B0", "Wind {d[3]}@{d[4]}" self.condfields = list(((self.location, 'Cond', x) for x in ('Sky', 'Temp', 'Feels', 'WindDir', 'WindMPH'))) # self.dayformat = "Sunrise: {d[0]:02d}:{d[1]:02d}","Sunset: {d[2]:02d}:{d[3]:02d}","Moon rise: {d[4]} set: {d[5]}","{d[6]}% illuminated" # self.dayfields = list(((self.location, 'Cond', x) for x in ('SunriseH','SunriseM','SunsetH','SunsetM','Moonrise','Moonset','MoonPct'))) self.dayformat = "Sunrise: {d[0]}", "Sunset: {d[1]}" # , "Moon rise: {d[2]} set: {d[3]}" self.dayfields = list(((self.location, 'Cond', x) for x in ('Sunrise', 'Sunset'))) # , 'Moonrise', 'Moonset'))) self.footformat = "Readings as of {d[0]}", self.footfields = ((self.location, 'Cond', 'Age'),) self.fcstformat = u"{d[0]} {d[1]}\u00B0/{d[2]}\u00B0 {d[3]}", "Wind: {d[4]}" self.fcstfields = list(((self.location, 'Fcst', x) for x in ('Day', 'High', 'Low', 'Sky', 'WindSpd'))) try: self.store = valuestore.ValueStores[self.location] except KeyError: logsupport.Logs.Log("Weather screen {} using non-existent location {}".format(screenname, self.location), severity=logsupport.ConsoleWarning) raise ValueError utilities.register_example("WeatherScreenDesc", self) # noinspection PyUnusedLocal def CondOrFcst(self): self.currentconditions = not self.currentconditions self.ReInitDisplay() def ScreenContentRepaint(self): # todo given the useable vert space change should check for overflow or auto size font vert_off = self.startvertspace if not self.store.ValidWeather: renderedlines = [ fonts.fonts.Font(45, "").render(x, 0, wc(self.CharColor)) for x in self.store.Status] for l in renderedlines: hw.screen.blit(l, ((hw.screenwidth - l.get_width()) / 2, vert_off)) vert_off = vert_off + 60 # todo use useable space stuff and vert start else: renderedlines = [] if self.LocationSize != 0: locblk = fonts.fonts.Font(self.LocationSize, "").render( fmt.format("{d}", d=self.store.GetVal(('Cond', 'Location'))), 0, wc(self.CharColor)) hw.screen.blit(locblk, ((hw.screenwidth - locblk.get_width()) / 2, vert_off)) vert_off = vert_off + locblk.get_height() + 10 # todo gap of 10 pixels is arbitrary h = vert_off if self.currentconditions: # todo add max width and wrap renderedlines.append( CreateWeathBlock(self.condformat, self.condfields, "", [45, 25, 35], self.CharColor, (self.location, 'Cond', 'Icon'), False)) h = h + renderedlines[-1].get_height() renderedlines.append( CreateWeathBlock(self.dayformat, self.dayfields, "", [30], self.CharColor, None, True)) h = h + renderedlines[-1].get_height() renderedlines.append( CreateWeathBlock(self.footformat, self.footfields, "", [25], self.CharColor, None, True)) h = h + renderedlines[-1].get_height() s = (self.useablevertspace - h) / (len(renderedlines) - 1) if len(renderedlines) > 1 else 0 for l in renderedlines: hw.screen.blit(l, ((hw.screenwidth - l.get_width()) / 2, vert_off)) vert_off = vert_off + l.get_height() + s else: fcstlines = 0 if hw.screenwidth > 350: screenmaxfcstwidth = self.useablehorizspace // 2 - 10 else: screenmaxfcstwidth = self.useablehorizspace fcstdays = min(valuestore.GetVal((self.location, 'FcstDays')), 14) # cap at 2 weeks maxfcstwidth = 0 maxfcstheight = 0 if fcstdays > 0: for i in range(fcstdays): renderedlines.append( CreateWeathBlock(self.fcstformat, self.fcstfields, "", [25], self.CharColor, # todo compute font size based on useable (self.location, 'Fcst', 'Icon'), False, day=i, maxhorizwidth=screenmaxfcstwidth)) if renderedlines[-1].get_width() > maxfcstwidth: maxfcstwidth = renderedlines[-1].get_width() if renderedlines[-1].get_height() > maxfcstheight: maxfcstheight = renderedlines[ -1].get_height() fcstlines += 1 else: renderedlines.append(fonts.fonts.Font(35, "").render("No Forecast Available", 0, wc(self.CharColor))) if hw.screenwidth > 350: h = h + renderedlines[-1].get_height() * 5 fcstlines = 2 + (fcstlines + 1) / 2 usewidth = hw.screenwidth / 2 else: h = h + renderedlines[-1].get_height() * 5 fcstlines = 5 usewidth = hw.screenwidth s = (self.useablevertspace - h) / (fcstlines + 1) startvert = vert_off horiz_off = (usewidth - maxfcstwidth) / 2 swcol = -int(-fcstdays // 2) - 1 for dy, fcst in enumerate(renderedlines): hw.screen.blit(fcst, (horiz_off, vert_off)) vert_off = vert_off + s + maxfcstheight if (dy == swcol) and (hw.screenwidth > 350): horiz_off = horiz_off + usewidth vert_off = startvert def InitDisplay(self, nav): self.currentconditions = True super().InitDisplay(nav) def ReInitDisplay(self): super().ReInitDisplay() screens.screentypes["Weather"] = WeatherScreenDesc
apache-2.0
1,973,004,741,115,924,700
40.540541
141
0.658913
false
2.901369
false
false
false
TotalVerb/territory
territory/recurser.py
1
3793
# ------------------------------------------------------------------------ # # This file is part of Territory. # # Territory is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Territory is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Territory. If not, see <http://www.gnu.org/licenses/>. # # Copyright Territory Development Team # <https://github.com/TotalVerb/territory> # Copyright Conquer Development Team (http://code.google.com/p/pyconquer/) # # ------------------------------------------------------------------------ class Recurser: def __init__(self, board): self.board = board def count_dumps_on_island(self, x, y): dumps_coord_list = [] player = self.board.data[x, y] # Crawl island from (x, y) land_area = self.crawl(x, y, [player]) # Let's iterate through crawled places for coordinate in land_area: # Check if current coordinate has a dump # (data can take the coordinate-string) actor = self.board.actor_at(coordinate) if actor and actor.dump: assert actor.side == player dumps_coord_list.append(coordinate) return [dumps_coord_list, land_area] def find_land(self): """Find a square with land.""" for x in range(self.board.width): for y in range(self.board.height): if self.board.data[x, y] > 0: return x, y def iscontiguous(self): """Return true if every land is connected to every other.""" # Check if there's at least one land. No point handling vacuous truth. land_area = self.board.count_world_area() assert land_area > 0 x, y = self.find_land() return len(self.crawl(x, y, [1, 2, 3, 4, 5, 6])) == land_area def get_island_border_lands(self, x, y): land_area_set = set() island_owner = self.board.data[x, y] self.crawl(x, y, [island_owner], land_area_set) border_area_set = set() for xy in land_area_set: x1, y1 = xy for nx, ny in self.board.neighbours(x1, y1): if self.board.isvalid(nx, ny) \ and self.board.data[nx, ny] != island_owner \ and self.board.data[nx, ny] != 0: # This works because set can't have duplicates border_area_set.add((nx, ny)) return border_area_set def island_size(self, x, y): """Count the amount of land of the specified island.""" return len(self.crawl(x, y, [self.board.data[x, y]])) def crawl(self, x, y, find_list, crawled=None): """ x,y -> coordinates to start "crawling" recursion_set -> set to hold already "crawled" coordinates find_list -> list of players whose lands are to be searched """ crawled = crawled if crawled is not None else set() if self.board.isvalid(x, y) and \ self.board.data[x, y] in find_list and \ (x, y) not in crawled: crawled.add((x, y)) # Crawl neighbours for nx, ny in self.board.neighbours(x, y): self.crawl(nx, ny, find_list, crawled) return crawled # places crawled
gpl-3.0
-9,116,925,893,547,967,000
39.351064
78
0.561033
false
3.770378
false
false
false
CERNDocumentServer/invenio
modules/weblinkback/lib/weblinkback_dblayer.py
1
13993
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2011, 2012 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """WebLinkback - Database Layer""" from invenio.dbquery import run_sql from invenio.weblinkback_config import CFG_WEBLINKBACK_STATUS, \ CFG_WEBLINKBACK_ORDER_BY_INSERTION_TIME, \ CFG_WEBLINKBACK_DEFAULT_USER, \ CFG_WEBLINKBACK_PAGE_TITLE_STATUS from invenio.textutils import xml_entities_to_utf8 def get_all_linkbacks(recid=None, status=None, order=CFG_WEBLINKBACK_ORDER_BY_INSERTION_TIME["ASC"], linkback_type=None, limit=None, full_count_only=False): """ Get all linkbacks @param recid: of one record, of all if None @param status: with a certain status, of all if None @param order: order by insertion time either "ASC" or "DESC" @param linkback_type: of a certain type, of all if None @param limit: maximum result count, all if None @param full_count_only: return only full result count (does not consider "limit"), result set if False @return [(linkback_id, origin_url, recid, additional_properties, linkback_type, linkback_status, insert_time)] in order by id, up to "limited" results OR integer if count_only """ header_sql = """SELECT id, origin_url, id_bibrec, additional_properties, type, status, insert_time FROM lnkENTRY""" if full_count_only: header_sql = 'SELECT count(id) FROM lnkENTRY' conditions = [] params = [] def add_condition(column, value): if value: if not conditions: conditions.append('WHERE %s=%%s' % column) else: conditions.append('AND %s=%%s' % column) params.append(value) add_condition('id_bibrec', recid) add_condition('status', status) add_condition('type', linkback_type) order_sql = 'ORDER by id %s' % order limit_sql = '' if limit: limit_sql = 'LIMIT %s' % limit res = run_sql('%s %s %s %s' % (header_sql, ' '.join(conditions), order_sql, limit_sql), tuple(params)) if full_count_only: return int(res[0][0]) else: return res def approve_linkback(linkbackid, user_info): """ Approve linkback @param linkbackid: linkback id @param user_info: user info """ update_linkback_status(linkbackid, CFG_WEBLINKBACK_STATUS['APPROVED'], user_info) def reject_linkback(linkbackid, user_info): """ Reject linkback @param linkbackid: linkback id @param user_info: user info """ update_linkback_status(linkbackid, CFG_WEBLINKBACK_STATUS['REJECTED'], user_info) def update_linkback_status(linkbackid, new_status, user_info = None): """ Update status of a linkback @param linkbackid: linkback id @param new_status: new status @param user_info: user info """ if user_info == None: user_info = {} user_info['uid'] = CFG_WEBLINKBACK_DEFAULT_USER run_sql("""UPDATE lnkENTRY SET status=%s WHERE id=%s """, (new_status, linkbackid)) logid = run_sql("""INSERT INTO lnkLOG (id_user, action, log_time) VALUES (%s, %s, NOW()); SELECT LAST_INSERT_ID(); """, (user_info['uid'], new_status)) run_sql("""INSERT INTO lnkENTRYLOG (id_lnkENTRY , id_lnkLOG) VALUES (%s, %s); """, (linkbackid, logid)) def create_linkback(origin_url, recid, additional_properties, linkback_type, user_info): """ Create linkback @param origin_url: origin URL, @param recid: recid @param additional_properties: additional properties @param linkback_type: linkback type @param user_info: user info @return id of the created linkback """ linkbackid = run_sql("""INSERT INTO lnkENTRY (origin_url, id_bibrec, additional_properties, type, status, insert_time) VALUES (%s, %s, %s, %s, %s, NOW()); SELECT LAST_INSERT_ID(); """, (origin_url, recid, str(additional_properties), linkback_type, CFG_WEBLINKBACK_STATUS['PENDING'])) logid = run_sql("""INSERT INTO lnkLOG (id_user, action, log_time) VALUES (%s, %s, NOW()); SELECT LAST_INSERT_ID(); """, (user_info['uid'], CFG_WEBLINKBACK_STATUS['INSERTED'])) run_sql("""INSERT INTO lnkENTRYLOG (id_lnkENTRY, id_lnkLOG) VALUES (%s, %s); """, (linkbackid, logid)) # add url title entry if necessary if len(run_sql("""SELECT url FROM lnkENTRYURLTITLE WHERE url=%s """, (origin_url, ))) == 0: manual_set_title = 0 title = "" if additional_properties != "" and 'title' in additional_properties.keys(): manual_set_title = 1 title = additional_properties['title'] run_sql("""INSERT INTO lnkENTRYURLTITLE (url, title, manual_set) VALUES (%s, %s, %s) """, (origin_url, title, manual_set_title)) return linkbackid def get_approved_latest_added_linkbacks(count): """ Get approved latest added linkbacks @param count: count of the linkbacks @return [(linkback_id, origin_url, recid, additional_properties, type, status, insert_time)] in descending order by insert_time """ return run_sql("""SELECT id, origin_url, id_bibrec, additional_properties, type, status, insert_time FROM lnkENTRY WHERE status=%s ORDER BY insert_time DESC LIMIT %s """, (CFG_WEBLINKBACK_STATUS['APPROVED'], count)) def get_url_list(list_type): """ @param list_type: of CFG_WEBLINKBACK_LIST_TYPE @return (url0, ..., urln) in ascending order by url """ result = run_sql("""SELECT url FROM lnkADMINURL WHERE list=%s ORDER by url ASC """, (list_type, )) return tuple(url[0] for (url) in result) def get_urls(): """ Get all URLs and the corresponding listType @return ((url, CFG_WEBLINKBACK_LIST_TYPE), ..., (url, CFG_WEBLINKBACK_LIST_TYPE)) in ascending order by url """ return run_sql("""SELECT url, list FROM lnkADMINURL ORDER by url ASC """) def url_exists(url, list_type=None): """ Check if url exists @param url @param list_type: specific list of CFG_WEBLINKBACK_LIST_TYPE, all if None @return True or False """ header_sql = """SELECT url FROM lnkADMINURL WHERE url=%s """ optional_sql = " AND list=%s" result = None if list_type: result = run_sql(header_sql + optional_sql, (url, list_type)) else: result = run_sql(header_sql, (url, )) if result != (): return True else: return False def add_url_to_list(url, list_type, user_info): """ Add a URL to a list @param url: unique URL string for all lists @param list_type: of CFG_WEBLINKBACK_LIST_TYPE @param user_info: user info @return id of the created url """ urlid = run_sql("""INSERT INTO lnkADMINURL (url, list) VALUES (%s, %s); SELECT LAST_INSERT_ID(); """, (url, list_type)) logid = run_sql("""INSERT INTO lnkLOG (id_user, action, log_time) VALUES (%s, %s, NOW()); SELECT LAST_INSERT_ID(); """, (user_info['uid'], CFG_WEBLINKBACK_STATUS['INSERTED'])) run_sql("""INSERT INTO lnkADMINURLLOG (id_lnkADMINURL, id_lnkLOG) VALUES (%s, %s); """, (urlid, logid)) return urlid def remove_url(url): """ Remove a URL from list @param url: unique URL string for all lists """ # get ids urlid = run_sql("""SELECT id FROM lnkADMINURL WHERE url=%s """, (url, ))[0][0] logids = run_sql("""SELECT log.id FROM lnkLOG log JOIN lnkADMINURLLOG url_log ON log.id=url_log.id_lnkLOG WHERE url_log.id_lnkADMINURL=%s """, (urlid, )) # delete url and url log run_sql("""DELETE FROM lnkADMINURL WHERE id=%s; DELETE FROM lnkADMINURLLOG WHERE id_lnkADMINURL=%s """, (urlid, urlid)) # delete log for logid in logids: run_sql("""DELETE FROM lnkLOG WHERE id=%s """, (logid[0], )) def get_urls_and_titles(title_status=None): """ Get URLs and their corresponding title @param old_new: of CFG_WEBLINKBACK_PAGE_TITLE_STATUS or None @return ((url, title, manual_set),...), all rows of the table if None """ top_query = """SELECT url, title, manual_set, broken_count FROM lnkENTRYURLTITLE WHERE """ where_sql = "" if title_status == CFG_WEBLINKBACK_PAGE_TITLE_STATUS['NEW']: where_sql = " title='' AND manual_set=0 AND" elif title_status == CFG_WEBLINKBACK_PAGE_TITLE_STATUS['OLD']: where_sql = " title<>'' AND manual_set=0 AND" elif title_status == CFG_WEBLINKBACK_PAGE_TITLE_STATUS['MANUALLY_SET']: where_sql = " manual_set=1 AND" where_sql += " broken=0" return run_sql(top_query + where_sql) def update_url_title(url, title): """ Update the corresponding title of a URL @param url: URL @param title: new title """ run_sql("""UPDATE lnkENTRYURLTITLE SET title=%s, manual_set=0, broken_count=0, broken=0 WHERE url=%s """, (title, url)) def remove_url_title(url): """ Remove URL title @param url: URL """ run_sql("""DELETE FROM lnkENTRYURLTITLE WHERE url=%s """, (url, )) def set_url_broken(url): """ Set URL broken @param url: URL """ linkbackids = run_sql("""SELECT id FROM lnkENTRY WHERE origin_url=%s """, (url, )) run_sql("""UPDATE lnkENTRYURLTITLE SET title=%s, broken=1 WHERE url=%s """, (CFG_WEBLINKBACK_STATUS['BROKEN'], url)) # update all linkbacks for linkbackid in linkbackids: update_linkback_status(linkbackid[0], CFG_WEBLINKBACK_STATUS['BROKEN']) def get_url_title(url): """ Get URL title or URL if title does not exist (empty string) @param url: URL @return title or URL if titles does not exist (empty string) """ title = run_sql("""SELECT title FROM lnkENTRYURLTITLE WHERE url=%s and title<>"" and broken=0 """, (url, )) res = url if len(title) != 0: res = title[0][0] return xml_entities_to_utf8(res) def increment_broken_count(url): """ Increment broken count a URL @param url: URL """ run_sql("""UPDATE lnkENTRYURLTITLE SET broken_count=broken_count+1 WHERE url=%s """, (url, )) def remove_linkback(linkbackid): """ Remove a linkback database @param linkbackid: unique URL string for all lists """ # get ids logids = run_sql("""SELECT log.id FROM lnkLOG log JOIN lnkENTRYLOG entry_log ON log.id=entry_log.id_lnkLOG WHERE entry_log.id_lnkENTRY=%s """, (linkbackid, )) # delete linkback entry and entry log run_sql("""DELETE FROM lnkENTRY WHERE id=%s; DELETE FROM lnkENTRYLOG WHERE id_lnkENTRY=%s """, (linkbackid, linkbackid)) # delete log for logid in logids: run_sql("""DELETE FROM lnkLOG WHERE id=%s """, (logid[0], ))
gpl-2.0
-5,230,928,770,171,948,000
30.874715
128
0.517616
false
4.116799
false
false
false
lem9/weblate
openshift/wsgi_install.py
1
3249
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright © 2014 Daniel Tschan <[email protected]> # # This file is part of Weblate <https://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # import os from string import Template VIRTUALENV = os.path.join( os.environ['OPENSHIFT_PYTHON_DIR'], 'virtenv', 'bin', 'activate_this.py' ) with open(VIRTUALENV) as handle: code = compile(handle.read(), 'activate_this.py', 'exec') exec(code, dict(__file__=VIRTUALENV)) # noqa def application(environ, start_response): ctype = 'text/html' response_body = Template('''<!doctype html> <html lang="en"> <head> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta charset="utf-8"> <title>Installing Weblate</title> <style> html { background: #f5f5f5; height: 100%; } body { color: #404040; font-family: "Helvetica Neue",Helvetica,"Liberation Sans",Arial,sans-serif; font-size: 14px; line-height: 1.4; } h1 { color: #000; line-height: 1.38em; margin: .4em 0 .5em; font-size: 25px; font-weight: 300; border-bottom: 1px solid #fff; } h1:after { content: ""; display: block; height: 1px; background-color: #ddd; } p { margin: 0 0 2em; } pre { padding: 13.333px 20px; margin: 0 0 20px; font-size: 13px; line-height: 1.4; background-color: #fff; border-left: 2px solid rgba(120,120,120,0.35); font-family: Menlo,Monaco,"Liberation Mono",Consolas,monospace !important; } .content { display: table; margin-left: -15px; margin-right: -15px; position: relative; min-height: 1px; padding-left: 30px; padding-right: 30px; } </style> </head> <body> <div class="content"> <h1>$action1 Weblate</h1> <p> Weblate is being $action2. Please wait a few minutes and refresh this page. </p> $log </div> </body> </html>''') context = {} if os.path.exists(os.environ['OPENSHIFT_DATA_DIR'] + '/.installed'): context['action1'] = 'Updating' context['action2'] = 'updated' context['log'] = '' else: context['action1'] = 'Installing' context['action2'] = 'installed' log_msg = os.popen( r"cat ${OPENSHIFT_PYTHON_LOG_DIR}/install.log |" r" grep '^[^ ]\|setup.py install' |" r" sed 's,/var/lib/openshift/[a-z0-9]\{24\},~,g'" ).read() context['log'] = '<pre>' + log_msg + '</pre>' response_body = response_body.substitute(context) status = '200 OK' response_headers = [ ('Content-Type', ctype), ('Content-Length', str(len(response_body))) ] start_response(status, response_headers) return [response_body]
gpl-3.0
-2,372,140,518,733,032,000
24.178295
77
0.641933
false
3.120077
false
false
false
gibsjose/SpectrumSite
Utilities/SteeringGenerator.py
1
3470
#!/usr/bin/python import sys # Input dictionary from KVP file d = {} # Defaults dictionary defaults = {} # Checks whether the key exists in the input dictionary def Write(_key, _file): if _key in d: if not d[_key].strip(): _file.write("; ") _file.write(_key + ' = ' + d[_key]) elif _key in defaults: _file.write(_key + ' = ' + defaults[_key] + '\n') inputPath = sys.argv[1] outputPath = sys.argv[2] # Open the key-value-pair file and create a dictionary out of it #with open(inputPath, 'r') as f: #print "inputPath = " inputPath f = open(inputPath, 'r') for line in f: # Split the line based on the '=' (key, val) = line.split(' = ') # Strip newlines from the value val.rstrip('\n'); #Store in the dictionary d[key] = val #Close the file f.close() # Create default dictionary # [GEN] defaults['debug'] = 'false' # [GRAPH] defaults['plot_band'] = 'false' defaults['plot_error_ticks'] = 'false' defaults['plot_marker'] = 'true' defaults['plot_staggered'] = 'true' defaults['match_binning'] = 'true' defaults['grid_corr'] = 'false' defaults['label_sqrt_s'] = 'true' defaults['x_legend'] = '0.9' defaults['y_legend'] = '0.9' # defaults['y_overlay_min'] = '' # defaults['y_overlay_max'] = '' # defaults['y_ratio_min'] = '' # defaults['y_ratio_max'] = '' defaults['band_with_pdf'] = 'true' defaults['band_with_alphas'] = 'false' defaults['band_with_scale'] = 'false' defaults['band_total'] = 'false' # [PLOT_0] defaults['plot_type'] = 'data, grid, pdf' defaults['desc'] = '' defaults['data_directory'] = '.' defaults['grid_directory'] = '.' defaults['pdf_directory'] = '.' defaults['data_steering_files'] = 'none' defaults['grid_steering_files'] = 'none' defaults['pdf_steering_files'] = 'none' # defaults['data_marker_style'] = '20' # defaults['data_marker_color'] = '1' # defaults['pdf_fill_style'] = '' # defaults['pdf_fill_color'] = '' # defaults['pdf_marker_style'] = '' defaults['x_scale'] = '1.0' defaults['y_scale'] = '1.0' defaults['x_log'] = 'true' defaults['y_log'] = 'true' defaults['display_style'] = 'overlay' defaults['overlay_style'] = 'data, convolute' defaults['ratio_title'] = 'Ratio' # Write the Steering File #with open(outputPath, 'w') as f: f = open(outputPath, 'w') # [GEN] f.write('[GEN]\n') Write('debug', f) # [GRAPH] f.write('\n[GRAPH]\n') Write('plot_band', f) Write('plot_error_ticks', f) Write('plot_marker', f) Write('plot_staggered', f) Write('match_binning', f) Write('grid_corr', f) Write('label_sqrt_s', f) Write('x_legend', f) Write('y_legend', f) Write('y_overlay_min', f) Write('y_overlay_max', f) Write('y_ratio_min', f) Write('y_ratio_max', f) Write('band_with_pdf', f) Write('band_with_alphas', f) Write('band_with_scale', f) Write('band_total', f) # [PLOT_0] f.write('\n[PLOT_0]\n') Write('plot_type', f) Write('desc', f) Write('data_directory', f) Write('grid_directory', f) Write('pdf_directory', f) Write('data_steering_files', f) Write('grid_steering_files', f) Write('pdf_steering_files', f) Write('data_marker_style', f) Write('data_marker_color', f) Write('pdf_fill_style', f) Write('pdf_fill_color', f) Write('pdf_marker_style', f) Write('x_scale', f) Write('y_scale', f) Write('x_log', f) Write('y_log', f) Write('display_style', f) Write('overlay_style', f) Write('ratio_title', f) #Look for up to 10 ratios for i in range(0, 10): rs = 'ratio_style_' + str(i) r = 'ratio_' + str(i) Write(rs, f) Write(r, f) f.close()
mit
-3,632,527,644,091,879,400
22.605442
64
0.627089
false
2.71518
false
false
false
mclaughlin6464/pearce
bin/mcmc/pearce_mcmc_xigg_emu1_shot.py
1
2151
from pearce.emulator import OriginalRecipe, ExtraCrispy, SpicyBuffalo from pearce.inference import run_mcmc_iterator import numpy as np from os import path import cPickle as pickle #training_file = '/u/ki/swmclau2/des/xi_cosmo_trainer/PearceRedMagicXiCosmoFixedNd.hdf5' training_file = '/scratch/users/swmclau2/xi_zheng07_cosmo_lowmsat/PearceRedMagicXiCosmoFixedNd.hdf5' em_method = 'gp' split_method = 'random' load_fixed_params = {'z':0.0}#, 'HOD': 0} np.random.seed(0) emu = SpicyBuffalo(training_file, method = em_method, fixed_params=load_fixed_params, custom_mean_function = 'linear', downsample_factor = 0.1) fixed_params = {}#'f_c':1.0}#,'logM1': 13.8 }# 'z':0.0} emulation_point = [('logM0', 14.0), ('sigma_logM', 0.2), ('alpha', 1.083),('logM1', 13.7)]#, ('logMmin', 12.233)] em_params = dict(emulation_point) em_params.update(fixed_params) with open('cosmo_param_dict.pkl', 'r') as f: cosmo_param_dict = pickle.load(f) y = np.loadtxt('xi_gg_true_jk.npy') emu1_cov = emu.ycov shot_cov = np.loadtxt('xi_gg_shot_cov_true.npy') #jk_cov = np.loadtxt('xi_gg_cov_true_jk.npy') #sample_cov = np.loadtxt('xigg_scov_log.npy') cov = emu1_cov + shot_cov #em_params.update( cosmo_param_dict) fixed_params.update(em_params) #fixed_params.update(cosmo_param_dict) em_params = cosmo_param_dict param_names = [k for k in em_params.iterkeys() if k not in fixed_params] nwalkers = 500 nsteps = 20000 nburn = 0 savedir = '/scratch/users/swmclau2/PearceMCMC/' #chain_fname = path.join(savedir, '%d_walkers_%d_steps_chain_cosmo_zheng_xi_lowmsat.npy'%(nwalkers, nsteps )) chain_fname = path.join(savedir, '%d_walkers_%d_steps_xigg_m3_1_lin_emu1_shot.npy'%(nwalkers, nsteps)) with open(chain_fname, 'w') as f: f.write('#' + '\t'.join(param_names)+'\n') print 'starting mcmc' rpoints = emu.scale_bin_centers np.random.seed(0) for pos in run_mcmc_iterator([emu], param_names, [y], [cov], rpoints, fixed_params = fixed_params,nwalkers = nwalkers,\ nsteps = nsteps):#, nburn = nburn, ncores = 1):#, resume_from_previous = chain_fname): with open(chain_fname, 'a') as f: np.savetxt(f, pos)
mit
-2,329,669,638,456,525,000
32.609375
143
0.686657
false
2.478111
false
false
false
m4rx9/rna-pdb-tools
rna_tools/tools/PyMOL4RNA/libs/show_contacts.py
1
15707
#!/usr/bin/python """ PyMOL plugin that provides show_contacts command and GUI for highlighting good and bad polar contacts. Factored out of clustermols by Matthew Baumgartner. The advantage of this package is it requires many fewer dependencies. Modified: Marcin Magnus 2020 Source <https://pymolwiki.org/index.php/Pymol-script-repo> """ from __future__ import print_function import sys import os from pymol import cmd print("""show_contacts ------------------------------------- _polar: good polar interactions according to PyMOL _polar_ok: compute possibly suboptimal polar interactions using the user specified distance _aa: acceptors acceptors _dd: donors donors _all is all ;-) above!""") DEBUG=1 def show_contacts(selection='*', selection2='*', result="contacts", cutoff=3.6, bigcutoff = 4.0, labels=False, SC_DEBUG = DEBUG): """ USAGE show_contacts selection, selection2, [result=contacts],[cutoff=3.6],[bigcutoff=4.0] Show various polar contacts, the good, the bad, and the ugly. Edit MPB 6-26-14: The distances are heavy atom distances, so I upped the default cutoff to 4.0 Returns: True/False - if False, something went wrong """ if SC_DEBUG > 4: print('Starting show_contacts') print('selection = "' + selection + '"') print('selection2 = "' + selection2 + '"') result = cmd.get_legal_name(result) #if the group of contacts already exist, delete them cmd.delete(result) # ensure only N and O atoms are in the selection all_don_acc1 = selection + " and (donor or acceptor)" all_don_acc2 = selection2 + " and (donor or acceptor)" if SC_DEBUG > 4: print('all_don_acc1 = "' + all_don_acc1 + '"') print('all_don_acc2 = "' + all_don_acc2 + '"') #if theses selections turn out not to have any atoms in them, pymol throws cryptic errors when calling the dist function like: #'Selector-Error: Invalid selection name' #So for each one, manually perform the selection and then pass the reference to the distance command and at the end, clean up the selections #the return values are the count of the number of atoms all1_sele_count = cmd.select('all_don_acc1_sele', all_don_acc1) all2_sele_count = cmd.select('all_don_acc2_sele', all_don_acc2) #print out some warnings if DEBUG > 3: if not all1_sele_count: print('Warning: all_don_acc1 selection empty!') if not all2_sele_count: print('Warning: all_don_acc2 selection empty!') ######################################## allres = result + "_all" if all1_sele_count and all2_sele_count: #print(allres) #print(cmd.get_distance(allres, 'all_don_acc1_sele', 'all_don_acc2_sele', bigcutoff, mode = 0)) any = cmd.distance(allres, 'all_don_acc1_sele', 'all_don_acc2_sele', bigcutoff, mode = 0) # if any is 0 it seems that there is no distance! if any: cmd.set("dash_radius", "0.05", allres) if not labels: cmd.hide("labels", allres) else: # just do nothing and clena up print('no contacts') cmd.delete('all_don_acc1_sele') cmd.delete('all_don_acc2_sele') cmd.delete(result + "_all") return None ######################################## # compute good polar interactions according to pymol polres = result + "_polar" if all1_sele_count and all2_sele_count: cmd.distance(polres, 'all_don_acc1_sele', 'all_don_acc2_sele', cutoff, mode = 2) #hopefully this checks angles? Yes #cmd.set("dash_color", "marine", allres) #cmd.set('dash_gap', '0') cmd.set("dash_radius","0.2", polres) #"0.126" #cmd.set("dash_color", "marine", allres) if not labels: cmd.hide("labels", polres) ######################################## # When running distance in mode=2, the cutoff parameter is ignored if set higher then the default of 3.6 # so set it to the passed in cutoff and change it back when you are done. old_h_bond_cutoff_center = cmd.get('h_bond_cutoff_center') # ideal geometry old_h_bond_cutoff_edge = cmd.get('h_bond_cutoff_edge') # minimally acceptable geometry cmd.set('h_bond_cutoff_center', bigcutoff) cmd.set('h_bond_cutoff_edge', bigcutoff) # compute possibly suboptimal polar interactions using the user specified distance pol_ok_res = result + "_polar_ok" if all1_sele_count and all2_sele_count: cmd.distance(pol_ok_res, 'all_don_acc1_sele', 'all_don_acc2_sele', bigcutoff, mode = 2) cmd.set("dash_radius", "0.06", pol_ok_res) if not labels: cmd.hide("labels", pol_ok_res) #now reset the h_bond cutoffs cmd.set('h_bond_cutoff_center', old_h_bond_cutoff_center) cmd.set('h_bond_cutoff_edge', old_h_bond_cutoff_edge) ######################################## onlyacceptors1 = selection + " and (acceptor and !donor)" onlyacceptors2 = selection2 + " and (acceptor and !donor)" onlydonors1 = selection + " and (!acceptor and donor)" onlydonors2 = selection2 + " and (!acceptor and donor)" #perform the selections onlyacceptors1_sele_count = cmd.select('onlyacceptors1_sele', onlyacceptors1) onlyacceptors2_sele_count = cmd.select('onlyacceptors2_sele', onlyacceptors2) onlydonors1_sele_count = cmd.select('onlydonors1_sele', onlydonors1) onlydonors2_sele_count = cmd.select('onlydonors2_sele', onlydonors2) #print out some warnings if SC_DEBUG > 2: if not onlyacceptors1_sele_count: print('Warning: onlyacceptors1 selection empty!') if not onlyacceptors2_sele_count: print('Warning: onlyacceptors2 selection empty!') if not onlydonors1_sele_count: print('Warning: onlydonors1 selection empty!') if not onlydonors2_sele_count: print('Warning: onlydonors2 selection empty!') # acceptors acceptors accres = result+"_aa" if onlyacceptors1_sele_count and onlyacceptors2_sele_count: aa_dist_out = cmd.distance(accres, 'onlyacceptors1_sele', 'onlyacceptors2_sele', cutoff, 0) if aa_dist_out < 0: print('\n\nCaught a pymol selection error in acceptor-acceptor selection of show_contacts') print('accres:', accres) print('onlyacceptors1', onlyacceptors1) print('onlyacceptors2', onlyacceptors2) return False cmd.set("dash_color","red",accres) cmd.set("dash_radius","0.125",accres) if not labels: cmd.hide("labels", accres) ######################################## # donors donors donres = result+"_dd" if onlydonors1_sele_count and onlydonors2_sele_count: dd_dist_out = cmd.distance(donres, 'onlydonors1_sele', 'onlydonors2_sele', cutoff, 0) #try to catch the error state if dd_dist_out < 0: print('\n\nCaught a pymol selection error in dd selection of show_contacts') print('donres:', donres) print('onlydonors1', onlydonors1) print('onlydonors2', onlydonors2) print("cmd.distance('" + donres + "', '" + onlydonors1 + "', '" + onlydonors2 + "', " + str(cutoff) + ", 0)") return False cmd.set("dash_color","red",donres) cmd.set("dash_radius","0.125",donres) if not labels: cmd.hide("labels", donres) ########################################################## ##### find the buried unpaired atoms of the receptor ##### ########################################################## #initialize the variable for when CALC_SASA is False unpaired_atoms = '' ## Group print(allres) # contacts_all cmd.group(result,"%s %s %s %s %s %s" % (polres, allres, accres, donres, pol_ok_res, unpaired_atoms)) ## Clean up the selection objects #if the show_contacts debug level is high enough, don't delete them. if SC_DEBUG < 5: cmd.delete('all_don_acc1_sele') cmd.delete('all_don_acc2_sele') cmd.delete('onlyacceptors1_sele') cmd.delete('onlyacceptors2_sele') cmd.delete('onlydonors1_sele') cmd.delete('onlydonors2_sele') cmd.disable('contacts_all') cmd.disable('contacts_polar_ok') cmd.disable('contacts_aa') cmd.disable('contacts_dd') return True cmd.extend('contacts', show_contacts) #contacts to avoid clashing with cluster_mols version ################################################################################# ########################### Start of pymol plugin code ########################## ################################################################################# about_text = '''show_contacts was factored out of the much more full-featured cluster_mols by Dr. Matt Baumgartner (https://pymolwiki.org/index.php/Cluster_mols). It provides an easy way to highlight polar contacts (and clashes) between two selections without requiring the installation of additional dependencies. ''' class Show_Contacts: ''' Tk version of the Plugin GUI ''' def __init__(self, app): parent = app.root self.parent = parent self.app = app import Pmw ############################################################################################ ### Open a window with options to select to loaded objects ### ############################################################################################ self.select_dialog = Pmw.Dialog(parent, buttons = ('Ok','Cancel'), title = 'Show Contacts Plugin', command = self.button_pressed ) self.select_dialog.withdraw() #allow the user to select from objects already loaded in pymol self.select_object_combo_box = Pmw.ComboBox(self.select_dialog.interior(), scrolledlist_items=[], labelpos='w', label_text='Select loaded object:', listbox_height = 2, dropdown=True) self.select_object_combo_box2 = Pmw.ComboBox(self.select_dialog.interior(), scrolledlist_items=[], labelpos='w', label_text='Select loaded object:', listbox_height = 2, dropdown=True) self.select_object_combo_box.grid(column=1, row=0) self.select_object_combo_box2.grid(column=2, row=0) self.populate_ligand_select_list() self.select_dialog.show() def button_pressed(self, result): if hasattr(result,'keycode'): if result.keycode == 36: print('keycode:', result.keycode) elif result == 'Ok' or result == 'Exit' or result == None: s1 = self.select_object_combo_box.get() s2 = self.select_object_combo_box2.get() show_contacts(s1,s2,'%s_%s'%(s1,s2)) self.select_dialog.withdraw() elif result == 'Cancel' or result == None: self.select_dialog.withdraw() def populate_ligand_select_list(self): ''' Go thourgh the loaded objects in PyMOL and add them to the selected list. ''' #get the loaded objects loaded_objects = _get_select_list() self.select_object_combo_box.clear() self.select_object_combo_box2.clear() for ob in loaded_objects: self.select_object_combo_box.insert('end', ob) self.select_object_combo_box2.insert('end', ob) def _get_select_list(): ''' Get either a list of object names, or a list of chain selections ''' loaded_objects = [name for name in cmd.get_names('all', 1) if '_cluster_' not in name] # if single object, try chain selections if len(loaded_objects) == 1: chains = cmd.get_chains(loaded_objects[0]) if len(chains) > 1: loaded_objects = ['{} & chain {}'.format(loaded_objects[0], chain) for chain in chains] return loaded_objects class Show_Contacts_Qt_Dialog(object): ''' Qt version of the Plugin GUI ''' def __init__(self): from pymol.Qt import QtWidgets dialog = QtWidgets.QDialog() self.setupUi(dialog) self.populate_ligand_select_list() dialog.accepted.connect(self.accept) dialog.exec_() def accept(self): s1 = self.select_object_combo_box.currentText() s2 = self.select_object_combo_box2.currentText() show_contacts(s1, s2, '%s_%s' % (s1, s2)) def populate_ligand_select_list(self): loaded_objects = _get_select_list() self.select_object_combo_box.clear() self.select_object_combo_box2.clear() self.select_object_combo_box.addItems(loaded_objects) self.select_object_combo_box2.addItems(loaded_objects) if len(loaded_objects) > 1: self.select_object_combo_box2.setCurrentIndex(1) def setupUi(self, Dialog): # Based on auto-generated code from ui file from pymol.Qt import QtCore, QtWidgets Dialog.resize(400, 50) self.gridLayout = QtWidgets.QGridLayout(Dialog) label = QtWidgets.QLabel("Select loaded object:", Dialog) self.gridLayout.addWidget(label, 0, 0, 1, 1) self.select_object_combo_box = QtWidgets.QComboBox(Dialog) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed) self.select_object_combo_box.setSizePolicy(sizePolicy) self.select_object_combo_box.setEditable(True) self.gridLayout.addWidget(self.select_object_combo_box, 0, 1, 1, 1) label = QtWidgets.QLabel("Select loaded object:", Dialog) self.gridLayout.addWidget(label, 1, 0, 1, 1) self.select_object_combo_box2 = QtWidgets.QComboBox(Dialog) self.select_object_combo_box2.setSizePolicy(sizePolicy) self.select_object_combo_box2.setEditable(True) self.gridLayout.addWidget(self.select_object_combo_box2, 1, 1, 1, 1) self.buttonBox = QtWidgets.QDialogButtonBox(Dialog) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 2) self.buttonBox.accepted.connect(Dialog.accept) self.buttonBox.rejected.connect(Dialog.reject) def __init__(self): try: from pymol.plugins import addmenuitemqt addmenuitemqt('Show Contacts', Show_Contacts_Qt_Dialog) return except Exception as e: print(e) self.menuBar.addmenuitem('Plugin', 'command', 'Show Contacts', label = 'Show Contacts', command = lambda s=self : Show_Contacts(s))
mit
992,679,597,646,326,900
39.903646
144
0.569364
false
3.886909
false
false
false
Axelrod-Python/axelrod-evolver
tests/integration/test_cycler_integration.py
1
1543
import os import tempfile import unittest import axelrod as axl import axelrod_dojo as axl_dojo class TestCyclerParams(unittest.TestCase): def setUp(self): pass def test_default_single_opponent_e2e(self): temp_file = tempfile.NamedTemporaryFile() # we will set the objective to be cycler_objective = axl_dojo.prepare_objective(name="score", turns=10, repetitions=1) # Lets use an opponent_list of just one: opponent_list = [axl.TitForTat(), axl.Calculator()] cycler = axl.EvolvableCycler # params to pass through cycler_kwargs = { "cycle_length": 10 } # assert file is empty to start self.assertEqual(temp_file.readline(), b'') # note that .readline() reads bytes hence b'' population = axl_dojo.Population(player_class=cycler, params_kwargs=cycler_kwargs, size=20, objective=cycler_objective, output_filename=temp_file.name, opponents=opponent_list) generations = 5 population.run(generations, print_output=False) # assert the output file exists and is not empty self.assertTrue(os.path.exists(temp_file.name)) self.assertNotEqual(temp_file.readline(), b'') # note that .readline() reads bytes hence b'' # close the temp file temp_file.close()
mit
2,311,307,298,383,570,400
33.288889
101
0.573558
false
4.227397
false
false
false
airelil/pywinauto
pywinauto/findbestmatch.py
1
20748
# GUI Application automation and testing library # Copyright (C) 2006-2018 Mark Mc Mahon and Contributors # https://github.com/pywinauto/pywinauto/graphs/contributors # http://pywinauto.readthedocs.io/en/latest/credits.html # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of pywinauto nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Module to find the closest match of a string in a list""" from __future__ import unicode_literals import re import difflib import six #import ctypes #import ldistance #levenshtein_distance = ctypes.cdll.levenshtein.levenshtein_distance #levenshtein_distance = ldistance.distance find_best_control_match_cutoff = .6 #==================================================================== class MatchError(IndexError): """A suitable match could not be found""" def __init__(self, items = None, tofind = ''): """Init the parent with the message""" self.tofind = tofind self.items = items if self.items is None: self.items = [] IndexError.__init__(self, "Could not find '{0}' in '{1}'".format(tofind, self.items)) _cache = {} # given a list of texts return the match score for each # and the best score and text with best score #==================================================================== def _get_match_ratios(texts, match_against): """Get the match ratio of how each item in texts compared to match_against""" # now time to figure out the matching ratio_calc = difflib.SequenceMatcher() ratio_calc.set_seq1(match_against) ratios = {} best_ratio = 0 best_text = '' for text in texts: if 0: pass if (text, match_against) in _cache: ratios[text] = _cache[(text, match_against)] elif(match_against, text) in _cache: ratios[text] = _cache[(match_against, text)] else: # set up the SequenceMatcher with other text ratio_calc.set_seq2(text) # try using the levenshtein distance instead #lev_dist = levenshtein_distance(six.text_type(match_against), six.text_type(text)) #ratio = 1 - lev_dist / 10.0 #ratios[text] = ratio # calculate ratio and store it ratios[text] = ratio_calc.ratio() _cache[(match_against, text)] = ratios[text] # if this is the best so far then update best stats if ratios[text] > best_ratio: best_ratio = ratios[text] best_text = text return ratios, best_ratio, best_text #==================================================================== def find_best_match(search_text, item_texts, items, limit_ratio = .5): """Return the item that best matches the search_text * **search_text** The text to search for * **item_texts** The list of texts to search through * **items** The list of items corresponding (1 to 1) to the list of texts to search through. * **limit_ratio** How well the text has to match the best match. If the best match matches lower then this then it is not considered a match and a MatchError is raised, (default = .5) """ search_text = _cut_at_eol(_cut_at_tab(search_text)) text_item_map = UniqueDict() # Clean each item, make it unique and map to # to the item index for text, item in zip(item_texts, items): text_item_map[_cut_at_eol(_cut_at_tab(text))] = item ratios, best_ratio, best_text = \ _get_match_ratios(text_item_map.keys(), search_text) if best_ratio < limit_ratio: raise MatchError(items = text_item_map.keys(), tofind = search_text) return text_item_map[best_text] #==================================================================== _after_tab = re.compile(r"\t.*", re.UNICODE) _after_eol = re.compile(r"\n.*", re.UNICODE) _non_word_chars = re.compile(r"\W", re.UNICODE) def _cut_at_tab(text): """Clean out non characters from the string and return it""" # remove anything after the first tab return _after_tab.sub("", text) def _cut_at_eol(text): """Clean out non characters from the string and return it""" # remove anything after the first EOL return _after_eol.sub("", text) def _clean_non_chars(text): """Remove non word characters""" # should this also remove everything after the first tab? # remove non alphanumeric characters return _non_word_chars.sub("", text) def is_above_or_to_left(ref_control, other_ctrl): """Return true if the other_ctrl is above or to the left of ref_control""" text_r = other_ctrl.rectangle() ctrl_r = ref_control.rectangle() # skip controls where text win is to the right of ctrl if text_r.left >= ctrl_r.right: return False # skip controls where text win is below ctrl if text_r.top >= ctrl_r.bottom: return False # text control top left corner is below control # top left corner - so not to the above or left :) if text_r.top >= ctrl_r.top and text_r.left >= ctrl_r.left: return False return True #==================================================================== distance_cuttoff = 999 def get_non_text_control_name(ctrl, controls, text_ctrls): """ return the name for this control by finding the closest text control above and to its left """ names = [] # simply look for an instance of the control in the list, # we don't use list.index() method as it invokes __eq__ ctrl_index = 0 for i, c in enumerate(controls): if c is ctrl: ctrl_index = i break ctrl_friendly_class_name = ctrl.friendly_class_name() if ctrl_index != 0: prev_ctrl = controls[ctrl_index-1] prev_ctrl_text = prev_ctrl.window_text() if prev_ctrl.friendly_class_name() == "Static" and \ prev_ctrl.is_visible() and prev_ctrl_text and \ is_above_or_to_left(ctrl, prev_ctrl): names.append( prev_ctrl_text + ctrl_friendly_class_name) best_name = '' closest = distance_cuttoff # now for each of the visible text controls for text_ctrl in text_ctrls: # get aliases to the control rectangles text_r = text_ctrl.rectangle() ctrl_r = ctrl.rectangle() # skip controls where text win is to the right of ctrl if text_r.left >= ctrl_r.right: continue # skip controls where text win is below ctrl if text_r.top >= ctrl_r.bottom: continue # calculate the distance between the controls # at first I just calculated the distance from the top left # corner of one control to the top left corner of the other control # but this was not best, so as a text control should either be above # or to the left of the control I get the distance between # the top left of the non text control against the # Top-Right of the text control (text control to the left) # Bottom-Left of the text control (text control above) # then I get the min of these two # We do not actually need to calculate the difference here as we # only need a comparative number. As long as we find the closest one # the actual distance is not all that important to us. # this reduced the unit tests run on my by about 1 second # (from 61 ->60 s) # (x^2 + y^2)^.5 #distance = ( # (text_r.left - ctrl_r.left) ** 2 + # (x^2 + y^2) # (text_r.bottom - ctrl_r.top) ** 2) \ # ** .5 # ^.5 #distance2 = ( # (text_r.right - ctrl_r.left) ** 2 + # (x^2 + y^2) # (text_r.top - ctrl_r.top) ** 2) \ # ** .5 # ^.5 distance = abs(text_r.left - ctrl_r.left) + abs(text_r.bottom - ctrl_r.top) distance2 = abs(text_r.right - ctrl_r.left) + abs(text_r.top - ctrl_r.top) distance = min(distance, distance2) # UpDown control should use Static text only because edit box text is often useless if ctrl_friendly_class_name == "UpDown" and \ text_ctrl.friendly_class_name() == "Static" and distance < closest: # TODO: use search in all text controls for all non-text ones # (like Dijkstra algorithm vs Floyd one) closest = distance ctrl_text = text_ctrl.window_text() if ctrl_text is None: # the control probably doesn't exist so skip it continue best_name = ctrl_text + ctrl_friendly_class_name # if this distance was closer than the last one elif distance < closest: closest = distance #if text_ctrl.window_text() == '': # best_name = ctrl_friendly_class_name + ' '.join(text_ctrl.texts()[1:2]) #else: ctrl_text = text_ctrl.window_text() if ctrl_text is None: # the control probably doesn't exist so skip it continue best_name = ctrl_text + ctrl_friendly_class_name names.append(best_name) return names #==================================================================== def get_control_names(control, allcontrols, textcontrols): """Returns a list of names for this control""" names = [] # if it has a reference control - then use that #if hasattr(control, 'ref') and control.ref: # control = control.ref # Add the control based on it's friendly class name friendly_class_name = control.friendly_class_name() names.append(friendly_class_name) # if it has some character text then add it base on that # and based on that with friendly class name appended cleaned = control.window_text() # Todo - I don't like the hardcoded classnames here! if cleaned and control.has_title: names.append(cleaned) names.append(cleaned + friendly_class_name) elif control.has_title and friendly_class_name != 'TreeView': try: for text in control.texts()[1:]: names.append(friendly_class_name + text) except Exception: #import traceback #from .actionlogger import ActionLogger pass #ActionLogger().log('Warning! Cannot get control.texts()') #\nTraceback:\n' + traceback.format_exc()) # so find the text of the nearest text visible control non_text_names = get_non_text_control_name(control, allcontrols, textcontrols) # and if one was found - add it if non_text_names: names.extend(non_text_names) # it didn't have visible text else: # so find the text of the nearest text visible control non_text_names = get_non_text_control_name(control, allcontrols, textcontrols) # and if one was found - add it if non_text_names: names.extend(non_text_names) # return the names - and make sure there are no duplicates or empty values cleaned_names = set(names) - set([None, ""]) return cleaned_names #==================================================================== class UniqueDict(dict): """A dictionary subclass that handles making its keys unique""" def __setitem__(self, text, item): """Set an item of the dictionary""" # this text is already in the map # so we need to make it unique if text in self: # find next unique text after text1 unique_text = text counter = 2 while unique_text in self: unique_text = text + str(counter) counter += 1 # now we also need to make sure the original item # is under text0 and text1 also! if text + '0' not in self: dict.__setitem__(self, text+'0', self[text]) dict.__setitem__(self, text+'1', self[text]) # now that we don't need original 'text' anymore # replace it with the uniq text text = unique_text # add our current item dict.__setitem__(self, text, item) def find_best_matches( self, search_text, clean = False, ignore_case = False): """Return the best matches for search_text in the items * **search_text** the text to look for * **clean** whether to clean non text characters out of the strings * **ignore_case** compare strings case insensitively """ # now time to figure out the matching ratio_calc = difflib.SequenceMatcher() if ignore_case: search_text = search_text.lower() ratio_calc.set_seq1(search_text) ratios = {} best_ratio = 0 best_texts = [] ratio_offset = 1 if clean: ratio_offset *= .9 if ignore_case: ratio_offset *= .9 for text_ in self: # make a copy of the text as we need the original later text = text_ if clean: text = _clean_non_chars(text) if ignore_case: text = text.lower() # check if this item is in the cache - if yes, then retrieve it if (text, search_text) in _cache: ratios[text_] = _cache[(text, search_text)] elif(search_text, text) in _cache: ratios[text_] = _cache[(search_text, text)] # not in the cache - calculate it and add it to the cache else: # set up the SequenceMatcher with other text ratio_calc.set_seq2(text) # if a very quick check reveals that this is not going # to match then ratio = ratio_calc.real_quick_ratio() * ratio_offset if ratio >= find_best_control_match_cutoff: ratio = ratio_calc.quick_ratio() * ratio_offset if ratio >= find_best_control_match_cutoff: ratio = ratio_calc.ratio() * ratio_offset # save the match we got and store it in the cache ratios[text_] = ratio _cache[(text, search_text)] = ratio # try using the levenshtein distance instead #lev_dist = levenshtein_distance(six.text_type(search_text), six.text_type(text)) #ratio = 1 - lev_dist / 10.0 #ratios[text_] = ratio #print "%5s" %("%0.2f"% ratio), search_text, `text` # if this is the best so far then update best stats if ratios[text_] > best_ratio and \ ratios[text_] >= find_best_control_match_cutoff: best_ratio = ratios[text_] best_texts = [text_] elif ratios[text_] == best_ratio: best_texts.append(text_) #best_ratio *= ratio_offset return best_ratio, best_texts #==================================================================== def build_unique_dict(controls): """Build the disambiguated list of controls Separated out to a different function so that we can get the control identifiers for printing. """ name_control_map = UniqueDict() # get the visible text controls so that we can get # the closest text if the control has no text text_ctrls = [ctrl_ for ctrl_ in controls if ctrl_.can_be_label and ctrl_.is_visible() and ctrl_.window_text()] # collect all the possible names for all controls # and build a list of them for ctrl in controls: ctrl_names = get_control_names(ctrl, controls, text_ctrls) # for each of the names for name in ctrl_names: name_control_map[name] = ctrl return name_control_map #==================================================================== def find_best_control_matches(search_text, controls): """Returns the control that is the the best match to search_text This is slightly differnt from find_best_match in that it builds up the list of text items to search through using information from each control. So for example for there is an OK, Button then the following are all added to the search list: "OK", "Button", "OKButton" But if there is a ListView (which do not have visible 'text') then it will just add "ListView". """ name_control_map = build_unique_dict(controls) #print ">>>>>>>", repr(name_control_map).decode("ascii", "ignore") # # collect all the possible names for all controls # # and build a list of them # for ctrl in controls: # ctrl_names = get_control_names(ctrl, controls) # # # for each of the names # for name in ctrl_names: # name_control_map[name] = ctrl search_text = six.text_type(search_text) best_ratio, best_texts = name_control_map.find_best_matches(search_text) best_ratio_ci, best_texts_ci = \ name_control_map.find_best_matches(search_text, ignore_case = True) best_ratio_clean, best_texts_clean = \ name_control_map.find_best_matches(search_text, clean = True) best_ratio_clean_ci, best_texts_clean_ci = \ name_control_map.find_best_matches( search_text, clean = True, ignore_case = True) if best_ratio_ci > best_ratio: best_ratio = best_ratio_ci best_texts = best_texts_ci if best_ratio_clean > best_ratio: best_ratio = best_ratio_clean best_texts = best_texts_clean if best_ratio_clean_ci > best_ratio: best_ratio = best_ratio_clean_ci best_texts = best_texts_clean_ci if best_ratio < find_best_control_match_cutoff: raise MatchError(items = name_control_map.keys(), tofind = search_text) return [name_control_map[best_text] for best_text in best_texts] # #def GetControlMatchRatio(text, ctrl): # # get the texts for the control # ctrl_names = get_control_names(ctrl) # # #get the best match for these # matcher = UniqueDict() # for name in ctrl_names: # matcher[name] = ctrl # # best_ratio, unused = matcher.find_best_matches(text) # # return best_ratio # # # #def get_controls_ratios(search_text, controls): # name_control_map = UniqueDict() # # # collect all the possible names for all controls # # and build a list of them # for ctrl in controls: # ctrl_names = get_control_names(ctrl) # # # for each of the names # for name in ctrl_names: # name_control_map[name] = ctrl # # match_ratios, best_ratio, best_text = \ # _get_match_ratios(name_control_map.keys(), search_text) # # return match_ratios, best_ratio, best_text,
bsd-3-clause
5,249,343,812,821,854,000
34.272727
118
0.581309
false
4.02952
false
false
false
RIKILT/CITESspeciesDetect
CheckCriteriaBlastSingleSample.py
1
5204
#!/usr/bin/env python # Script which test the different filtering thresholds per barcode # Returns per barcode the detected species which match the criteria import sys import os ### Get the OTU abundance from the file (This is per barcode) def GetOTUabundance(statFile, pOTU): # Local variables f = open(statFile) abundance={} #OTUabun=100 for line in f: # Remove the enter from the end of the line line = line.rstrip() ### Get the different barcode from the statistics file if (line.startswith("############ Statistics for barcode: ")): barcode=line.split("############ Statistics for barcode: ")[1].replace(" ############", "") if not(barcode in abundance.keys()): abundance[barcode]=1 #print barcode else: if (line.startswith("# combined file: ")): assignedReads=int(line.split("\t")[1]) OTUabun=assignedReads*(pOTU/100) #print barcode+"\t"+str(assignedReads)+"\t"+str(OTUabun) abundance[barcode]=OTUabun ### Close the file and return the dictionary f.close() return abundance ### Function to retrieve the different organisms from the blast summary def GetHitsPerBarcode(abundance, InFile, pident, OutFile): # Local variables f = open(InFile, "r") output = open(OutFile, "w") CountSpec={} OTU="" qlen=0 for line in f: # Remove the enter from the end of the line line = line.rstrip() ### Get barcodes but ignore title lines if (line.startswith("#####")): if (line.startswith("##### Results for:")): output.write("\n"+line+"\n") barcode=line.split("##### Results for: ")[1].replace(" #####", "") output.write("OTU abun "+barcode+":\t"+str(abundance[barcode])+"\n") ### Get a different length per barcode if ( barcode == "ITS2" ): qlen=100 elif (barcode == "rbcL-mini"): qlen=140 elif ( barcode == "trnL_P6loop" ): qlen=10 else: qlen=200 else: ### Ignore the blast line of the output if (line.startswith("OTU")): splitLine = line.split("\t") ### Check if the size of the OTU is above the OTU abundance if (abundance[barcode] <= int(splitLine[0].split("size=")[1].replace(";",""))): ### Get the top hit (based on bitscore) if (OTU == splitLine[0]): if not (splitLine[4] < bitscore): ### Is your line matching the criteria (Query length and percentage of identity) if ( (int(splitLine[1] ) >= qlen) and (float(splitLine[3]) >= pident) ): output.write(line+"\n") else: ### Get the next values OTU=splitLine[0] bitscore=splitLine[4] ### Is your line matching the criteria (Query length and percentage of identity) if ( (int(splitLine[1] ) >= qlen) and (float(splitLine[3]) >= pident) ): output.write(line+"\n") else: ### Skip the empty lines if (line != ""): ### Only get the title lines from the blast output if (line.startswith("qseqid")): #print line output.write(line+"\n") ### Close the files output.close() f.close() ### Retrieve the hits per barcode def GetAllHitsPerBarcode(abundance, InFile, pident, OutFile): # Local variables f = open(InFile, "r") output = open(OutFile, "w") CountSpec={} OTU="" qlen=0 for line in f: # Remove the enter from the end of the line line = line.rstrip() ### Get barcodes but ignore title lines if (line.startswith("#####")): if (line.startswith("##### Results for:")): output.write("\n"+line+"\n") barcode=line.split("##### Results for: ")[1].replace(" #####", "") output.write("OTU abun "+barcode+":\t"+str(abundance[barcode])+"\n") ### Get a different length per barcode if ( barcode == "ITS2" ): qlen=100 elif (barcode == "rbcL-mini"): qlen=140 elif ( barcode == "trnL_P6loop" ): qlen=10 else: qlen=200 else: ### Ignore the blast line of the output if (line.startswith("OTU")): splitLine = line.split("\t") ### Check if the size of the OTU is above the OTU abundance if (abundance[barcode] <= int(splitLine[0].split("size=")[1].replace(";",""))): if ( (int(splitLine[1] ) >= qlen) and (float(splitLine[3]) >= pident) ): output.write(line+"\n") else: ### Skip the empty lines if (line != ""): ### Only get the title lines from the blast output if (line.startswith("qseqid")): output.write(line+"\n") ### Close the files output.close() f.close() ### Check all the input and call all the functions def main(argv): ### Check the input if (len(argv) == 6 ): ### Catch the variable files statFile=argv[0] InFile=argv[1] FullInFile=argv[2] OutName=argv[3] ### Variables pOTU=float(argv[4]) pident=int(argv[5]) ### Local variables OutFile=OutName+"_"+str(pident)+"_"+str(pOTU)+".tsv" FullOutFile=OutName+"_"+str(pident)+"_"+str(pOTU)+"_Full.tsv" ### Call your functions abundance=GetOTUabundance(statFile, pOTU) GetHitsPerBarcode(abundance, InFile, pident, OutFile) GetAllHitsPerBarcode(abundance, FullInFile, pident, FullOutFile) else: print "Wrong type of arguments: python CheckCriteriaBlastSingleFile.py <inFile> <OutFile>" ### Call your main function if __name__ == "__main__": main(sys.argv[1:])
bsd-3-clause
-4,412,462,166,043,438,600
26.828877
94
0.62452
false
3.048623
false
false
false
jberci/resolwe
resolwe/flow/views/data.py
1
8515
"""Data viewset.""" from elasticsearch_dsl.query import Q from django.db import transaction from django.db.models import Count from rest_framework import exceptions, mixins, status, viewsets from rest_framework.decorators import list_route from rest_framework.response import Response from resolwe.elastic.composer import composer from resolwe.elastic.viewsets import ElasticSearchCombinedViewSet from resolwe.flow.models import Collection, Data, Entity, Process from resolwe.flow.models.utils import fill_with_defaults from resolwe.flow.serializers import DataSerializer from resolwe.flow.utils import get_data_checksum from resolwe.permissions.loader import get_permissions_class from resolwe.permissions.mixins import ResolwePermissionsMixin from resolwe.permissions.shortcuts import get_objects_for_user from resolwe.permissions.utils import assign_contributor_permissions, copy_permissions from ..elastic_indexes import DataDocument from .mixins import ResolweCheckSlugMixin, ResolweCreateModelMixin, ResolweUpdateModelMixin class DataViewSet(ElasticSearchCombinedViewSet, ResolweCreateModelMixin, mixins.RetrieveModelMixin, ResolweUpdateModelMixin, mixins.DestroyModelMixin, ResolwePermissionsMixin, ResolweCheckSlugMixin, viewsets.GenericViewSet): """API view for :class:`Data` objects.""" queryset = Data.objects.all().prefetch_related('process', 'descriptor_schema', 'contributor') serializer_class = DataSerializer permission_classes = (get_permissions_class(),) document_class = DataDocument filtering_fields = ('id', 'slug', 'version', 'name', 'created', 'modified', 'contributor', 'owners', 'status', 'process', 'process_type', 'type', 'process_name', 'tags', 'collection', 'parents', 'children', 'entity', 'started', 'finished', 'text') filtering_map = { 'name': 'name.raw', 'contributor': 'contributor_id', 'owners': 'owner_ids', 'process_name': 'process_name.ngrams', } ordering_fields = ('id', 'created', 'modified', 'started', 'finished', 'name', 'contributor', 'process_name', 'process_type', 'type') ordering_map = { 'name': 'name.raw', 'process_type': 'process_type.raw', 'type': 'type.raw', 'process_name': 'process_name.raw', 'contributor': 'contributor_sort', } ordering = '-created' def get_always_allowed_arguments(self): """Return query arguments which are always allowed.""" return super().get_always_allowed_arguments() + [ 'hydrate_data', 'hydrate_collections', 'hydrate_entities', ] def custom_filter_tags(self, value, search): """Support tags query.""" if not isinstance(value, list): value = value.split(',') filters = [Q('match', **{'tags': item}) for item in value] search = search.query('bool', must=filters) return search def custom_filter_text(self, value, search): """Support general query using the 'text' attribute.""" if isinstance(value, list): value = ' '.join(value) should = [ Q('match', slug={'query': value, 'operator': 'and', 'boost': 10.0}), Q('match', **{'slug.ngrams': {'query': value, 'operator': 'and', 'boost': 5.0}}), Q('match', name={'query': value, 'operator': 'and', 'boost': 10.0}), Q('match', **{'name.ngrams': {'query': value, 'operator': 'and', 'boost': 5.0}}), Q('match', contributor_name={'query': value, 'operator': 'and', 'boost': 5.0}), Q('match', **{'contributor_name.ngrams': {'query': value, 'operator': 'and', 'boost': 2.0}}), Q('match', owner_names={'query': value, 'operator': 'and', 'boost': 5.0}), Q('match', **{'owner_names.ngrams': {'query': value, 'operator': 'and', 'boost': 2.0}}), Q('match', process_name={'query': value, 'operator': 'and', 'boost': 5.0}), Q('match', **{'process_name.ngrams': {'query': value, 'operator': 'and', 'boost': 2.0}}), Q('match', status={'query': value, 'operator': 'and', 'boost': 2.0}), Q('match', type={'query': value, 'operator': 'and', 'boost': 2.0}), ] # Add registered text extensions. for extension in composer.get_extensions(self): if hasattr(extension, 'text_filter'): should += extension.text_filter(value) search = search.query('bool', should=should) return search def create(self, request, *args, **kwargs): """Create a resource.""" collections = request.data.get('collections', []) # check that user has permissions on all collections that Data # object will be added to for collection_id in collections: try: collection = Collection.objects.get(pk=collection_id) except Collection.DoesNotExist: return Response({'collections': ['Invalid pk "{}" - object does not exist.'.format(collection_id)]}, status=status.HTTP_400_BAD_REQUEST) if not request.user.has_perm('add_collection', obj=collection): if request.user.has_perm('view_collection', obj=collection): raise exceptions.PermissionDenied( "You don't have `ADD` permission on collection (id: {}).".format(collection_id) ) else: raise exceptions.NotFound( "Collection not found (id: {}).".format(collection_id) ) self.define_contributor(request) if kwargs.pop('get_or_create', False): response = self.perform_get_or_create(request, *args, **kwargs) if response: return response return super().create(request, *args, **kwargs) @list_route(methods=['post']) def get_or_create(self, request, *args, **kwargs): """Get ``Data`` object if similar already exists, otherwise create it.""" kwargs['get_or_create'] = True return self.create(request, *args, **kwargs) def perform_get_or_create(self, request, *args, **kwargs): """Perform "get_or_create" - return existing object if found.""" serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) process = serializer.validated_data.get('process') process_input = request.data.get('input', {}) fill_with_defaults(process_input, process.input_schema) checksum = get_data_checksum(process_input, process.slug, process.version) data_qs = Data.objects.filter( checksum=checksum, process__persistence__in=[Process.PERSISTENCE_CACHED, Process.PERSISTENCE_TEMP], ) data_qs = get_objects_for_user(request.user, 'view_data', data_qs) if data_qs.exists(): data = data_qs.order_by('created').last() serializer = self.get_serializer(data) return Response(serializer.data) def perform_create(self, serializer): """Create a resource.""" process = serializer.validated_data.get('process') if not process.is_active: raise exceptions.ParseError( 'Process retired (id: {}, slug: {}/{}).'.format(process.id, process.slug, process.version) ) with transaction.atomic(): instance = serializer.save() assign_contributor_permissions(instance) # Entity is added to the collection only when it is # created - when it only contains 1 Data object. entities = Entity.objects.annotate(num_data=Count('data')).filter(data=instance, num_data=1) # Assign data object to all specified collections. collection_pks = self.request.data.get('collections', []) for collection in Collection.objects.filter(pk__in=collection_pks): collection.data.add(instance) copy_permissions(collection, instance) # Add entities to which data belongs to the collection. for entity in entities: entity.collections.add(collection) copy_permissions(collection, entity)
apache-2.0
-4,215,530,313,429,322,000
43.348958
116
0.601996
false
4.232107
false
false
false
astroclark/bhextractor
bin/bhex_scalemassdemo.py
1
4019
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2014-2015 James Clark <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """ bhextractor_plotpca.py Construct waveform catalogues and PCA for plotting and diagnostics """ import numpy as np from matplotlib import pyplot as pl import bhextractor_pca as bhex import pycbc.types import pycbc.filter from pycbc.psd import aLIGOZeroDetHighPower # ------------------------------- # USER INPUT catalogue_name='Q' theta=90.0 # END USER INPUT # ------------------------------- # ------------------------------- # ANALYSIS catlen=4 # # Setup and then build the catalogue # catalogue = bhex.waveform_catalogue(catalogue_name=catalogue_name, fs=2048, catalogue_len=catlen, mtotal_ref=250, Dist=1., theta=theta) oriwave250 = np.copy(catalogue.aligned_catalogue[0,:]) # # Do the PCA # pca = bhex.waveform_pca(catalogue) # # Build a 350 solar mass waveform from the 250 Msun PCs# # Just use the first waveform betas = pca.projection_plus[catalogue.waveform_names[0]] times = np.arange(0,len(catalogue.aligned_catalogue[0,:])/2048.,1./2048) recwave350 = bhex.reconstruct_waveform(pca.pca_plus, betas, len(catalogue.waveform_names), mtotal_target=350.0) # # Now make a catalogue at 350 solar masses and then compute the overlap # catalogue350 = bhex.waveform_catalogue(catalogue_name=catalogue_name, fs=2048, catalogue_len=catlen, mtotal_ref=350, Dist=1., theta=theta) oriwave350 = np.copy(catalogue350.aligned_catalogue[0,:]) # Finally, compute the match between the reconstructed 350 Msun system and the # system we generated at that mass in the first place recwave350_pycbc = pycbc.types.TimeSeries(np.real(recwave350), delta_t=1./2048) oriwave250_pycbc = pycbc.types.TimeSeries(np.real(oriwave250), delta_t=1./2048) oriwave350_pycbc = pycbc.types.TimeSeries(np.real(oriwave350), delta_t=1./2048) psd = aLIGOZeroDetHighPower(len(recwave350_pycbc.to_frequencyseries()), recwave350_pycbc.to_frequencyseries().delta_f, low_freq_cutoff=10.0) match_cat = pycbc.filter.match(oriwave250_pycbc.to_frequencyseries(), oriwave350_pycbc.to_frequencyseries(), psd=psd, low_frequency_cutoff=10)[0] match_rec = pycbc.filter.match(recwave350_pycbc.to_frequencyseries(), oriwave350_pycbc.to_frequencyseries(), psd=psd, low_frequency_cutoff=10)[0] print 'Match between 250 and 350 Msun catalogue waves: ', match_cat print 'Match between 350 reconstruction and 350 catalogue wave: ', match_rec # # Make plots # if 1: print "Plotting reconstructions" fig, ax = pl.subplots(nrows=2,ncols=1) ax[0].plot(times,np.real(oriwave250), 'b', label='250 M$_{\odot}$ catalogue') ax[0].plot(times,np.real(oriwave350), 'g', label='350 M$_{\odot}$ catalogue') ax[0].set_xlim(0,2.5) ax[0].set_title('Match = %f'% match_cat) ax[0].legend(loc='upper left',prop={'size':10}) ax[1].plot(times,np.real(oriwave350), 'g', label='350 M$_{\odot}$ catalogue') ax[1].plot(times,np.real(recwave350), 'r', label='350 M$_{\odot}$ reconstruction') ax[1].set_xlim(0,2.5) ax[1].set_xlabel('Time (s)') ax[1].set_title('Match = %f'% match_rec) ax[1].legend(loc='upper left',prop={'size':10}) fig.tight_layout() fig.savefig('scalemassdemo.png')
gpl-2.0
-3,245,627,533,540,735,000
30.645669
90
0.693705
false
3.120342
false
false
false
jeremy24/rnn-classifier
letter_tools.py
1
1759
import glob import imghdr import os import PIL from PIL import ImageFont from PIL import Image from PIL import ImageDraw def _is_ascii(s): return all(ord(c) < 128 for c in s) def text2png(text, fullpath, color="#FFF", bgcolor="#000", fontfullpath="assets/fonts/Oswald-Bold.ttf", fontsize=35, leftpadding=3, rightpadding=3, width=20, height=None): REPLACEMENT_CHARACTER = u'\uFFFD' NEWLINE_REPLACEMENT_STRING = ' ' + REPLACEMENT_CHARACTER + ' ' font = ImageFont.load_default() if fontfullpath == None else ImageFont.truetype(fontfullpath, fontsize) text = text.replace('\n', NEWLINE_REPLACEMENT_STRING) lines = [] line = u"" if len(text) == 0: print("\tNo valid text, bailing out...") return for word in text.split(): if word == REPLACEMENT_CHARACTER: # give a blank line lines.append(line[1:]) # slice the white space in the begining of the line line = u"" lines.append(u"") # the blank line elif font.getsize(line + ' ' + word)[0] <= (width - rightpadding - leftpadding): line += ' ' + word else: # start a new line lines.append(line[1:]) # slice the white space in the begining of the line line = u"" # TODO: handle too long words at this point line += ' ' + word # for now, assume no word alone can exceed the line width if len(line) != 0: lines.append(line[1:]) # add the last line line_height = font.getsize(text)[1] width = font.getsize(text)[0] width += int(width * .10) if height is not None: line_height = height img_height = line_height * (len(lines) + 1) img = Image.new("RGBA", (width, img_height), bgcolor) draw = ImageDraw.Draw(img) y = 0 for line in lines: draw.text((leftpadding, y), line, color, font=font) y += line_height img.save(fullpath)
mit
-5,027,793,800,516,839,000
25.253731
104
0.667425
false
3.032759
false
false
false
mihaisoloi/conpaas
conpaas-director/setup.py
1
2293
#!/usr/bin/env python import os import sys import shutil from pwd import getpwnam from grp import getgrnam from setuptools import setup from pkg_resources import Requirement, resource_filename CPSVERSION = '1.2.0' CONFDIR = '/etc/cpsdirector' if not os.geteuid() == 0: CONFDIR = 'cpsdirectorconf' long_description = """ ConPaaS: an integrated runtime environment for elastic Cloud applications ========================================================================= """ setup(name='cpsdirector', version=CPSVERSION, description='ConPaaS director', author='Emanuele Rocca', author_email='[email protected]', url='http://www.conpaas.eu/', download_url='http://www.conpaas.eu/download/', license='BSD', packages=[ 'cpsdirector', ], include_package_data=True, zip_safe=False, package_data={ 'cpsdirector': [ 'ConPaaS.tar.gz', ] }, data_files=[ ( CONFDIR, [ 'director.cfg.example', 'director.cfg.multicloud-example', 'ConPaaS.tar.gz' ] ), ], scripts=[ 'cpsadduser.py', 'director.wsgi', 'cpsconf.py', 'cpscheck.py' ], install_requires=[ 'cpslib', 'flask-sqlalchemy', 'apache-libcloud', 'netaddr' ], dependency_links=[ 'http://www.linux.it/~ema/conpaas/cpslib-%s.tar.gz' % CPSVERSION, ],) if __name__ == "__main__" and sys.argv[1] == "install": # overwrite /etc/cpsdirector/{config,scripts} for what in 'config', 'scripts': targetdir = os.path.join(CONFDIR, what) if os.path.isdir(targetdir): shutil.rmtree(targetdir) shutil.copytree(os.path.join('conpaas', what), targetdir) if not os.path.exists(os.path.join(CONFDIR, "director.cfg")): # copy director.cfg.example under CONFDIR/director.cfg conffile = resource_filename(Requirement.parse("cpsdirector"), "director.cfg.example") shutil.copyfile(conffile, os.path.join(CONFDIR, "director.cfg")) # create 'certs' dir if not os.path.exists(os.path.join(CONFDIR, "certs")): os.mkdir(os.path.join(CONFDIR, "certs")) # set www-data as the owner of CONFDIR try: os.chown(CONFDIR, getpwnam('www-data').pw_uid, getgrnam('www-data').gr_gid) except OSError: print "W: 'chown www-data:www-data %s' failed" % CONFDIR
bsd-3-clause
3,412,883,459,429,648,400
34.828125
115
0.627126
false
3.289813
false
false
false
sssilver/angler
rod/rod/handler/lesson.py
1
3347
import flask import decimal import dateutil.parser import flask.ext.login import rod import rod.model.student import rod.model.lesson import rod.model.group import rod.model.company import rod.model.transaction import rod.model.schemas lesson_handler = flask.Blueprint('lesson', __name__) @lesson_handler.route('/lesson', methods=['GET']) def list_lesson(): teacher_id = flask.request.args.get('teacher_id') query = rod.model.lesson.Lesson.query.filter_by(is_deleted=False) if teacher_id: lessons = query.filter_by(teacher_id=teacher_id).all() else: lessons = query.all() return flask.jsonify({ 'items': rod.model.schemas.LessonSchema(many=True).dump(lessons).data, 'count': len(lessons) }) @lesson_handler.route('/group/<int:group_id>/lessons', methods=['POST']) def file_lesson(group_id): lesson_data = flask.request.json # File the lesson lesson = rod.model.lesson.Lesson() lesson.time = dateutil.parser.parse(lesson_data['datetime']) lesson.teacher_id = flask.ext.login.current_user.id lesson.group_id = group_id rod.model.db.session.add(lesson) companies = set() # Companies that had students in this lesson # Record attendance for student_id, is_absent in lesson_data['attendance'].iteritems(): student_id = int(student_id) # Cast to int, as JSON keys are always strings # Get each student student = rod.model.db.session.query(rod.model.student.Student).get(student_id) # Get their membership in this group membership_query = rod.model.db.session.query(rod.model.student.Membership) membership = membership_query.filter_by(student_id=student_id).filter_by(group_id=group_id).one() if membership.tariff.type == 'student': # Student tariff? # For personal tariffs, we wanna update the student's balance student.balance -= membership.tariff.price student_transaction = rod.model.transaction.StudentTransaction() student_transaction.staff_id = lesson.teacher_id student_transaction.amount = membership.tariff.price student_transaction.student_id = student_id student_transaction.type = 'payment' rod.model.db.session.add(student_transaction) elif membership.tariff.type == 'company': # Company tariff? # For corporate tariffs, we just wanna collect the companies that had students # in this lesson. We'll update their balances separately down the road. companies.add(membership.company) # Corporate balances are updated once, # regardless of how many students were in the group during this lesson for company in companies: # Update the corporate balance company.balance -= membership.tariff.price company_transaction = rod.model.transaction.CompanyTransaction() company_transaction.staff_id = lesson.teacher_id company_transaction.amount = membership.tariff.price company_transaction.company_id = company.id company_transaction.type = 'payment' rod.model.db.session.add(company_transaction) # Finally, commit the entire big transaction rod.model.db.session.commit() return flask.jsonify(rod.model.schemas.LessonSchema().dump(lesson).data)
bsd-3-clause
-5,314,708,541,440,655,000
35.78022
105
0.689573
false
3.614471
false
false
false
henry-ngo/VIP
vip_hci/preproc/skysubtraction.py
1
4110
#! /usr/bin/env python """ Module with sky subtraction function. """ from __future__ import division __author__ = 'C. Gomez @ ULg' __all__ = ['cube_subtract_sky_pca'] import numpy as np def cube_subtract_sky_pca(sci_cube, sky_cube, mask, ref_cube=None, ncomp=2): """ PCA based sky subtraction. Parameters ---------- sci_cube : array_like 3d array of science frames. sky_cube : array_like 3d array of sky frames. mask : array_like Mask indicating the region for the analysis. Can be created with the function vip_hci.var.create_ringed_spider_mask. ref_cube : array_like or None Reference cube. ncomp : int Sets the number of PCs you want to use in the sky subtraction. Returns ------- Sky subtracted cube. """ from ..pca import prepare_matrix, svd_wrapper if sci_cube.shape[1] != sky_cube.shape[1] or sci_cube.shape[2] != \ sky_cube.shape[2]: raise TypeError('Science and Sky frames sizes do not match') if ref_cube is not None: if sci_cube.shape[1] != ref_cube.shape[1] or sci_cube.shape[2] != \ ref_cube.shape[2]: raise TypeError('Science and Reference frames sizes do not match') # Getting the EVs from the sky cube Msky = prepare_matrix(sky_cube, scaling=None, verbose=False) sky_pcs = svd_wrapper(Msky, 'lapack', sky_cube.shape[0], False, False) sky_pcs_cube = sky_pcs.reshape(sky_cube.shape[0], sky_cube.shape[1], sky_cube.shape[1]) # Masking the science cube sci_cube_masked = np.zeros_like(sci_cube) ind_masked = np.where(mask == 0) for i in range(sci_cube.shape[0]): masked_image = np.copy(sci_cube[i]) masked_image[ind_masked] = 0 sci_cube_masked[i] = masked_image Msci_masked = prepare_matrix(sci_cube_masked, scaling=None, verbose=False) # Masking the PCs learned from the skies sky_pcs_cube_masked = np.zeros_like(sky_pcs_cube) for i in range(sky_pcs_cube.shape[0]): masked_image = np.copy(sky_pcs_cube[i]) masked_image[ind_masked] = 0 sky_pcs_cube_masked[i] = masked_image # Project the masked frames onto the sky PCs to get the coefficients transf_sci = np.zeros((sky_cube.shape[0], Msci_masked.shape[0])) for i in range(Msci_masked.shape[0]): transf_sci[:, i] = np.inner(sky_pcs, Msci_masked[i].T) Msky_pcs_masked = prepare_matrix(sky_pcs_cube_masked, scaling=None, verbose=False) mat_inv = np.linalg.inv(np.dot(Msky_pcs_masked, Msky_pcs_masked.T)) transf_sci_scaled = np.dot(mat_inv, transf_sci) # Obtaining the optimized sky and subtraction sci_cube_skysub = np.zeros_like(sci_cube) for i in range(Msci_masked.shape[0]): sky_opt = np.array([np.sum( transf_sci_scaled[j, i] * sky_pcs_cube[j] for j in range(ncomp))]) sci_cube_skysub[i] = sci_cube[i] - sky_opt # Processing the reference cube (if any) if ref_cube is not None: ref_cube_masked = np.zeros_like(ref_cube) for i in range(ref_cube.shape[0]): masked_image = np.copy(ref_cube[i]) masked_image[ind_masked] = 0 ref_cube_masked[i] = masked_image Mref_masked = prepare_matrix(ref_cube_masked, scaling=None, verbose=False) transf_ref = np.zeros((sky_cube.shape[0], Mref_masked.shape[0])) for i in range(Mref_masked.shape[0]): transf_ref[:, i] = np.inner(sky_pcs, Mref_masked[i].T) transf_ref_scaled = np.dot(mat_inv, transf_ref) ref_cube_skysub = np.zeros_like(ref_cube) for i in range(Mref_masked.shape[0]): sky_opt = np.array([np.sum( transf_ref_scaled[j, i] * sky_pcs_cube[j] for j in range(ncomp))]) ref_cube_skysub[i] = ref_cube[i] - sky_opt return sci_cube_skysub, ref_cube_skysub else: return sci_cube_skysub
mit
7,069,799,869,590,812,000
36.027027
82
0.592457
false
3.16641
false
false
false
justrypython/EAST
svm_model_v2.py
1
2801
#encoding:UTF-8 import os import numpy as np import sys import cv2 import matplotlib.pyplot as plt from sklearn.svm import NuSVC, SVC import datetime import pickle #calculate the area def area(p): p = p.reshape((-1, 2)) return 0.5 * abs(sum(x0*y1 - x1*y0 for ((x0, y0), (x1, y1)) in segments(p))) def segments(p): return zip(p, np.concatenate((p[1:], [p[0]]))) def calc_xy(p0, p1, p2): cos = calc_cos(p0, p1, p2) dis = calc_dis(p0, p2) return dis * cos, dis * np.sqrt(1 - np.square(cos)) def calc_dis(p0, p1): return np.sqrt(np.sum(np.square(p0-p1))) def calc_cos(p0, p1, p2): A = p1 - p0 B = p2 - p0 num = np.dot(A, B) demon = np.linalg.norm(A) * np.linalg.norm(B) return num / demon def calc_new_xy(boxes): box0 = boxes[:8] box1 = boxes[8:] x, y = calc_xy(box1[4:6], box1[6:], box0[:2]) dis = calc_dis(box1[4:6], box1[6:]) area0 = area(box0) area1 = area(box1) return x/dis, y/dis if __name__ == '__main__': test = True path = '/media/zhaoke/b0685ee4-63e3-4691-ae02-feceacff6996/data/' paths = os.listdir(path) paths = [i for i in paths if '.txt' in i] boxes = np.empty((800000, 9)) cnt = 0 for txt in paths: f = open(path+txt, 'r') lines = f.readlines() f.close() lines = [i.replace('\n', '').split(',') for i in lines] lines = np.array(lines).astype(np.uint32) boxes[cnt*10:cnt*10+len(lines)] = lines cnt += 1 zeros = boxes==[0, 0, 0, 0, 0, 0, 0, 0, 0] zeros_labels = zeros.all(axis=1) zeros_labels = np.where(zeros_labels==True) idboxes = boxes[boxes[:, 8]==7] idboxes = np.tile(idboxes[:, :8], (1, 10)) idboxes = idboxes.reshape((-1, 8)) boxes = np.delete(boxes, zeros_labels[0], axis=0) idboxes = np.delete(idboxes, zeros_labels[0], axis=0) boxes_idboxes = np.concatenate((boxes[:, :8], idboxes), axis=1) start_time = datetime.datetime.now() print start_time new_xy = np.apply_along_axis(calc_new_xy, 1, boxes_idboxes) end_time = datetime.datetime.now() print end_time - start_time if test: with open('clf_address_v2.pickle', 'rb') as f: clf = pickle.load(f) cnt = 0 for i, xy in enumerate(new_xy): cls = int(clf.predict([xy])[0]) if cls == int(boxes[i, 8]): cnt += 1 if i % 10000 == 0 and i != 0: print i, ':', float(cnt) / i else: clf = SVC() start_time = datetime.datetime.now() print start_time clf.fit(new_xy[:], boxes[:, 8]) end_time = datetime.datetime.now() print end_time - start_time with open('clf.pickle', 'wb') as f: pickle.dump(clf, f) print 'end'
gpl-3.0
3,479,348,540,319,028,700
29.129032
69
0.551946
false
2.843655
false
false
false
BorgERP/borg-erp-6of3
l10n_hr/l10n_hr_fiskal/__openerp__.py
1
2505
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Module: l10n_hr_fiskal # Author: Davor Bojkić # mail: [email protected] # Copyright (C) 2012- Daj Mi 5, # http://www.dajmi5.com # Contributions: Hrvoje ThePython - Free Code! # Goran Kliska (AT) Slobodni Programi # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { "name" : "Croatian localization - Fiscalization module", "description" : """ Fiskalizacija izdanih računa ==================================== Author: Davor Bojkić - Bole @ DAJ MI 5 www.dajmi5.com Contributions: Hrvoje ThePython - Free Code! Goran Kliska @ Slobodni Programi Preduvjeti: na serveru instalirati: python-dev, python-ms2crypto, libxmlsec1-dev build/install pyxmlsec-0.3.2! """, "version" : "1.02", "author" : "DAJ MI 5", "category" : "Localisation/Croatia", "website": "http://www.dajmi5.com", 'depends': [ 'base_vat', 'account_storno', 'l10n_hr_account', 'openerp_crypto', ], #'external_dependencies':{'python':['m2crypto','pyxmlsec'], # 'bin':'libxmlsec-dev'}, 'update_xml': [ 'certificate_view.xml', 'fiskalizacija_view.xml', 'security/ir.model.access.csv', 'account_view.xml', 'account_invoice_view.xml', 'l10n_hr_fiskal_data.xml', ], "active": False, "installable": True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
3,083,689,587,987,897,000
35.26087
78
0.544764
false
3.706667
false
false
false
summychou/CSForOSS
CA/OSSQt_DataMasterRigster.py
1
2535
# -*- coding: utf-8 -*- # import sqlite3 as sqlite import sys import uuid from pysqlcipher3 import dbapi2 as sqlite def main(): print("***************** Welcome to OSS DataMaster-Rigster System *******************") print("* *") print("******************************************************************************") conn = sqlite.connect('DataMasterSystem.db') c = conn.cursor() c.execute("PRAGMA key='data_master_system'") # 对加密的sqlite文件进行解密 try: c.execute('create table data_master_system (data_master_name text, password text, unique_id text)') except sqlite.OperationalError as e: pass unique_id = uuid.uuid1() data_masters = c.execute("select * from data_master_system").fetchall() if len(data_masters) != 0: data_master_name = input("[*] Input your data master name:\n") for col in data_masters: if data_master_name.strip() == col[0]: print("[!] Data Master Name has existed!") print("******************************************************************************") print("* *") print("*********************** Data Master Rigster Is Failed! ***********************") sys.exit(-1) else: data_master_name = input("[*] Input your data master name:\n") password = input("[*] Input your password:\n") repeat_password = input("[*] Input your password again:\n") if password.strip() != repeat_password.strip(): print("[!] Password is not equal to RePassword!") print("******************************************************************************") print("* *") print("*********************** Data Master Rigster Is Failed! ***********************") sys.exit(-1) c.execute('insert into data_master_system values ("{}", "{}", "{}")'.format(data_master_name, password, unique_id)) conn.commit() c.close() print("******************************************************************************") print("* *") print("********************* Data Master Rigster Is Successful! *********************") if __name__ == '__main__': main()
mit
9,171,648,892,522,189,000
45.592593
119
0.394036
false
4.960552
false
false
false
Microvellum/Fluid-Designer
win64-vc/2.78/scripts/startup/fluid_operators/fd_api_doc.py
1
10496
''' Created on Jan 27, 2017 @author: montes ''' import bpy from inspect import * import mv import os import math from reportlab.pdfgen import canvas from reportlab.lib.pagesizes import legal,inch,cm from reportlab.platypus import Image from reportlab.platypus import Paragraph,Table,TableStyle from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph, Frame, Spacer, PageTemplate, PageBreak from reportlab.lib import colors from reportlab.lib.pagesizes import A3, A4, landscape, portrait from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY from reportlab.platypus.flowables import HRFlowable class OPS_create_api_doc(bpy.types.Operator): bl_idname = "fd_api_doc.create_api_doc" bl_label = "Create Fluid API Documentation" output_path = bpy.props.StringProperty(name="Output Path") def esc_uscores(self, string): if string: return string.replace("_", "\_") else: return def exclude_builtins(self, classes, module): new_classes = [] for cls in classes: if module in cls[1].__module__: new_classes.append(cls) return new_classes def write_sidebar(self, modules): filepath = os.path.join(self.output_path, "FD_Sidebar.md") file = open(filepath, "w") fw = file.write fw("# Fluid Designer\n") fw("* [Home](Home)\n") fw("* [Understanding the User Interface](Understanding-the-User-Interface)\n") fw("* [Navigating the 3D Viewport](Navigating-the-3D-Viewport)\n") fw("* [Navigating the Library Browser](Navigating-the-Library-Browser)\n") fw("* [The Room Builder Panel](The-Room-Builder-Panel)\n") fw("* [Hotkeys](Fluid-Designer-Hot-Keys)\n\n") fw("# API Documentation\n") for mod in modules: fw("\n## mv.{}\n".format(mod[0])) classes = self.exclude_builtins(getmembers(mod[1], predicate=isclass), mod[0]) if len(classes) > 0: for cls in classes: fw("* [{}()]({})\n".format(self.esc_uscores(cls[0]), self.esc_uscores(cls[0]))) else: fw("* [mv.{}]({})\n".format(mod[0], mod[0])) file.close() def write_class_doc(self, cls): filepath = os.path.join(self.output_path, cls[0] + ".md") file = open(filepath, "w") fw = file.write fw("# class {}{}{}{}\n\n".format(cls[1].__module__, ".", cls[0], "():")) if getdoc(cls[1]): fw(self.esc_uscores(getdoc(cls[1])) + "\n\n") for func in getmembers(cls[1], predicate=isfunction): if cls[0] in func[1].__qualname__: args = getargspec(func[1])[0] args_str = ', '.join(item for item in args if item != 'self') fw("## {}{}{}{}\n\n".format(self.esc_uscores(func[0]), "(", self.esc_uscores(args_str) if args_str else " ", ")")) if getdoc(func[1]): fw(self.esc_uscores(getdoc(func[1])) + "\n") else: fw("Undocumented.\n\n") file.close() def write_mod_doc(self, mod): filepath = os.path.join(self.output_path, mod[0] + ".md") file = open(filepath, "w") fw = file.write fw("# module {}{}:\n\n".format("mv.", mod[0])) if getdoc(mod[1]): fw(self.esc_uscores(getdoc(mod[1])) + "\n\n") for func in getmembers(mod[1], predicate=isfunction): args = getargspec(func[1])[0] args_str = ', '.join(item for item in args if item != 'self') fw("## {}{}{}{}\n\n".format(self.esc_uscores(func[0]), "(", self.esc_uscores(args_str if args_str else " "), ")")) if getdoc(func[1]): fw(self.esc_uscores(getdoc(func[1])) + "\n") else: fw("Undocumented.\n\n") file.close() def execute(self, context): modules = getmembers(mv, predicate=ismodule) self.write_sidebar(modules) for mod in modules: classes = self.exclude_builtins(getmembers(mod[1], predicate=isclass), mod[0]) if len(classes) > 0: for cls in classes: self.write_class_doc(cls) else: self.write_mod_doc(mod) return {'FINISHED'} class OPS_create_content_overview_doc(bpy.types.Operator): bl_idname = "fd_api_doc.create_content_overview" bl_label = "Create Fluid Content Overview Documentation" INCLUDE_FILE_NAME = "doc_include.txt" write_path = bpy.props.StringProperty(name="Write Path", default="") elements = [] package = None def write_html(self): pass def read_include_file(self, path): dirs = [] file_path = os.path.join(path, self.INCLUDE_FILE_NAME) if os.path.exists(file_path): file = open(os.path.join(path, self.INCLUDE_FILE_NAME), "r") dirs_raw = list(file) for dir in dirs_raw: dirs.append(dir.replace("\n", "")) return dirs def create_hdr(self, name, font_size): hdr_style = TableStyle([('TEXTCOLOR', (0, 0), (-1, -1), colors.black), ('BOTTOMPADDING', (0, 0), (-1, -1), 15), ('TOPPADDING', (0, 0), (-1, -1), 15), ('FONTSIZE', (0, 0), (-1, -1), 8), ('VALIGN', (0, 0), (-1, -1), 'TOP'), ('ALIGN', (0, 0), (-1, 0), 'LEFT'), ('LINEBELOW', (0, 0), (-1, -1), 2, colors.black), ('BACKGROUND', (0, 1), (-1, -1), colors.white)]) name_p = Paragraph(name, ParagraphStyle("Category name style", fontSize=font_size)) hdr_tbl = Table([[name_p]], colWidths = 500, rowHeights = None, repeatRows = 1) hdr_tbl.setStyle(hdr_style) self.elements.append(hdr_tbl) def create_img_table(self, dir): item_tbl_data = [] item_tbl_row = [] for i, file in enumerate(os.listdir(dir)): last_item = len(os.listdir(dir)) - 1 if ".png" in file: img = Image(os.path.join(dir, file), inch, inch) img_name = file.replace(".png", "") if len(item_tbl_row) == 4: item_tbl_data.append(item_tbl_row) item_tbl_row = [] elif i == last_item: item_tbl_data.append(item_tbl_row) i_tbl = Table([[img], [Paragraph(img_name, ParagraphStyle("item name style", wordWrap='CJK'))]]) item_tbl_row.append(i_tbl) if len(item_tbl_data) > 0: item_tbl = Table(item_tbl_data, colWidths=125) self.elements.append(item_tbl) self.elements.append(Spacer(1, inch * 0.5)) def search_dir(self, path): thumb_dir = False for file in os.listdir(path): if ".png" in file: thumb_dir = True if thumb_dir: self.create_img_table(path) for file in os.listdir(path): if os.path.isdir(os.path.join(path, file)): self.create_hdr(file, font_size=14) self.search_dir(os.path.join(path, file)) def write_pdf(self, mod): file_path = os.path.join(self.write_path if self.write_path != "" else mod.__path__[0], "doc") file_name = mod.__package__ + ".pdf" if not os.path.exists(file_path): os.mkdir(file_path) doc = SimpleDocTemplate(os.path.join(file_path, file_name), pagesize = A4, leftMargin = 0.25 * inch, rightMargin = 0.25 * inch, topMargin = 0.25 * inch, bottomMargin = 0.25 * inch) lib_name = mod.__package__.replace("_", " ") self.create_hdr(lib_name, font_size=24) print("\n", lib_name, "\n") dirs = self.read_include_file(os.path.join(mod.__path__[0], "doc")) if len(dirs) > 0: for d in dirs: path = os.path.join(mod.__path__[0], d) if os.path.exists(path): self.create_hdr(d.title(), font_size=18) self.search_dir(path) else: products_path = os.path.join(mod.__path__[0], "products") if os.path.exists(products_path): self.create_hdr("Products", font_size=18) self.search_dir(products_path) inserts_path = os.path.join(mod.__path__[0], "inserts") if os.path.exists(inserts_path): self.create_hdr("Inserts", font_size=18) self.search_dir(inserts_path) doc.build(self.elements) def execute(self, context): packages = mv.utils.get_library_packages(context) for p in packages: mod = __import__(p) self.write_pdf(mod) return {'FINISHED'} classes = [ OPS_create_api_doc, OPS_create_content_overview_doc, ] def register(): for c in classes: bpy.utils.register_class(c) def unregister(): for c in classes: bpy.utils.unregister_class(c) if __name__ == "__main__": register()
gpl-3.0
-1,779,006,538,429,056,000
35.447917
118
0.477229
false
3.864507
false
false
false
Ramyak/CodingPractice
algo_practice/sort/merge_sort.py
1
1251
#!/usr/bin/env python a = [5, 3, 6, 3, 1, 2] def merge_sort(in_array, left, right): print 'Before: ({} , {}) : {}'.format(str(left), str(right), (in_array[left:right + 1])) if right - left >= 1: mid = ((right - left) / 2) + left if mid > right: return merge_sort(in_array, left, mid) merge_sort(in_array, mid + 1, right) # Merge tmp_array = [None] * (right - left + 1) l_start = left r_start = mid + 1 i = 0 for i in range(right + 1 - left): if l_start > mid or r_start > right: break if in_array[l_start] < in_array[r_start]: tmp_array[i] = in_array[l_start] l_start += 1 else: tmp_array[i] = in_array[r_start] r_start += 1 if l_start <= mid: tmp_array[i:right + 1] = in_array[l_start:mid + 1] else: tmp_array[i:right + 1] = in_array[r_start:right + 1] in_array[left:right + 1] = tmp_array print 'After: ({} , {}) : {}'.format(str(left), str(right), (in_array[left:right + 1])) return in_array if __name__ == '__main__': print merge_sort(a, 0, len(a) - 1)
gpl-2.0
6,766,558,600,085,191,000
31.076923
92
0.46283
false
3.135338
false
false
false
AMOboxTV/AMOBox.LegoBuild
plugin.video.exodus/resources/lib/sources/ninemovies_mv_tv.py
1
7333
# -*- coding: utf-8 -*- ''' Exodus Add-on Copyright (C) 2016 Exodus This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import re,urllib,urlparse,json,time from resources.lib.modules import cleantitle from resources.lib.modules import client from resources.lib.modules import cache from resources.lib.modules import directstream class source: def __init__(self): self.domains = ['9movies.to'] self.base_link = 'http://9movies.to' self.search_link = '/sitemap' def movie(self, imdb, title, year): try: url = {'imdb': imdb, 'title': title, 'year': year} url = urllib.urlencode(url) return url except: return def tvshow(self, imdb, tvdb, tvshowtitle, year): try: url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year} url = urllib.urlencode(url) return url except: return def episode(self, url, imdb, tvdb, title, premiered, season, episode): try: if url == None: return url = urlparse.parse_qs(url) url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url]) url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode url = urllib.urlencode(url) return url except: return def ninemovies_cache(self): try: url = urlparse.urljoin(self.base_link, self.search_link) result = client.source(url) result = result.split('>Movies and TV-Shows<')[-1] result = client.parseDOM(result, 'ul', attrs = {'class': 'sub-menu'})[0] result = re.compile('href="(.+?)">(.+?)<').findall(result) result = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in result] return result except: return def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources try: result = '' data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title'] title = cleantitle.get(title) try: episode = data['episode'] except: pass url = cache.get(self.ninemovies_cache, 120) url = [(i[0], i[1], cleantitle.get(i[1])) for i in url] url = [(i[0], i[1], i[2], re.sub('\d*$', '', i[2])) for i in url] url = [i for i in url if title == i[2]] + [i for i in url if title == i[3]] if 'season' in data and int(data['season']) > 1: url = [(i[0], re.compile('\s+(\d*)$').findall(i[1])) for i in url] url = [(i[0], i[1][0]) for i in url if len(i[1]) > 0] url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[1])] url = url[0][0] url = urlparse.urljoin(self.base_link, url) result = client.source(url) years = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year'] years = ['%s' % str(years), '%s' % str(int(years)+1), '%s' % str(int(years)-1)] year = re.compile('<dd>(\d{4})</dd>').findall(result)[0] if not year in years: return sources except: pass try: if not result == '': raise Exception() url = urlparse.urljoin(self.base_link, url) try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0] except: pass result = client.source(url) except: pass try: quality = client.parseDOM(result, 'dd', attrs = {'class': 'quality'})[0].lower() except: quality = 'hd' if quality == 'cam' or quality == 'ts': quality = 'CAM' elif quality == 'hd' or 'hd ' in quality: quality = 'HD' else: quality = 'SD' result = client.parseDOM(result, 'ul', attrs = {'class': 'episodes'}) result = zip(client.parseDOM(result, 'a', ret='data-id'), client.parseDOM(result, 'a')) result = [(i[0], re.findall('(\d+)', i[1])) for i in result] result = [(i[0], ''.join(i[1][:1])) for i in result] try: result = [i for i in result if '%01d' % int(i[1]) == '%01d' % int(episode)] except: pass links = [urllib.urlencode({'hash_id': i[0], 'referer': url}) for i in result] for i in links: sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Ninemovies', 'url': i, 'direct': True, 'debridonly': False}) try: if not quality == 'HD': raise Exception() quality = directstream.googletag(self.resolve(links[0]))[0]['quality'] if not quality == 'SD': raise Exception() for i in sources: i['quality'] = 'SD' except: pass return sources except: return sources def resolve(self, url): try: data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) headers = {'X-Requested-With': 'XMLHttpRequest'} now = time.localtime() url = '/ajax/film/episode?hash_id=%s&f=&p=%s' % (data['hash_id'], now.tm_hour + now.tm_min) url = urlparse.urljoin(self.base_link, url) result = client.source(url, headers=headers, referer=data['referer']) result = json.loads(result) grabber = {'flash': 1, 'json': 1, 's': now.tm_min, 'link': result['videoUrlHash'], '_': int(time.time())} grabber = result['grabber'] + '?' + urllib.urlencode(grabber) result = client.source(grabber, headers=headers, referer=url) result = json.loads(result) url = [(re.findall('(\d+)', i['label']), i['file']) for i in result if 'label' in i and 'file' in i] url = [(int(i[0][0]), i[1]) for i in url if len(i[0]) > 0] url = sorted(url, key=lambda k: k[0]) url = url[-1][1] url = client.request(url, output='geturl') if 'requiressl=yes' in url: url = url.replace('http://', 'https://') else: url = url.replace('https://', 'http://') return url except: return
gpl-2.0
-7,445,599,848,500,218,000
35.849246
157
0.517114
false
3.762442
false
false
false
LibraryOfCongress/gazetteer
etl/parser/hmdb.py
1
1998
import sys, json, os, datetime from shapely.geometry import asShape, mapping from fiona import collection from core import Dump import core import codecs #name, cmt, desc, link1_href def extract_shapefile(shapefile, uri_name, simplify_tolerance=None): for feature in collection(shapefile, "r"): geometry = feature["geometry"] properties = feature["properties"] #calculate centroid geom_obj = asShape(geometry) centroid = feature["geometry"]["coordinates"] name = properties["name"] address = { "street" : feature.get("cmt") } #alternate names alternates = [] feature_code = "HSTS" source = properties #keep all fields anyhow # unique URI which internally gets converted to the place id. uri = properties.get("link1_href") + "#"+feature["id"] timeframe = {} updated = datetime.datetime.utcnow().replace(second=0, microsecond=0).isoformat() place = { "name":name, "centroid":centroid, "feature_code": feature_code, "geometry":geometry, "is_primary": True, "source": source, "alternate": alternates, "updated": updated, "uris":[uri], "relationships": [], "timeframe":timeframe, "admin":[] } dump.write(uri, place) if __name__ == "__main__": shapefile, dump_path = sys.argv[1:3] uri_name = "http://www.hmdb.org/" #simplify_tolerance = .01 # ~ 11km (.001 = 111m) simplify_tolerance = None dump_basename = os.path.basename(shapefile) dump = Dump(dump_path + "/shapefile/"+ dump_basename + ".%04d.json.gz") dump.max_rows = "1000" extract_shapefile(shapefile, uri_name, simplify_tolerance) dump.close() #python hmdb.py ../../../hmdb.shp hmdbdump
mit
-4,251,280,113,043,998,000
24.615385
89
0.553554
false
4.102669
false
false
false
lucasmello/Driloader
driloader/browser/internet_explorer.py
1
4135
# pylint: disable=anomalous-backslash-in-string, too-many-locals, # pylint: disable=multiple-statements """ Module that abstract operations to handle Internet Explorer versions. """ import os import platform import re import xml.etree.ElementTree as ET import requests from driloader.browser.exceptions import BrowserDetectionError from driloader.http.proxy import Proxy from driloader.utils.commands import Commands from .basebrowser import BaseBrowser from .drivers import Driver from ..http.operations import HttpOperations from ..utils.file import FileHandler class IE(BaseBrowser): """ Implements all BaseBrowser methods to find the proper Internet Explorer version. """ _find_version_32_regex = r'IEDriverServer_Win32_([\d]+\.[\d]+\.[\d])' _find_version_64_regex = r'IEDriverServer_x64_([\d]+\.[\d]+\.[\d])' def __init__(self, driver: Driver): super().__init__('IE') self.x64 = IE._is_windows_x64() self._driver = driver def _latest_driver(self): """ Gets the latest ie driver version. :return: the latest ie driver version. """ resp = requests.get(self._config.latest_release_url(), proxies=Proxy().urls) xml_dl = ET.fromstring(resp.text) root = ET.ElementTree(xml_dl) tag = root.getroot().tag tag = tag.rpartition('}')[0] + tag.rpartition('}')[1] contents = root.findall(tag + 'Contents') last_version = 0 version_str = '0.0.0' last_version_str = '0.0.0' if self.x64: pattern = IE._find_version_64_regex else: pattern = IE._find_version_32_regex os_type = 'x64' if self.x64 else 'Win32' for content in contents: key = content.find(tag + 'Key').text driver_section = 'IEDriverServer_{}_'.format(os_type) in key if driver_section: version_nbr = re.search(pattern, key) if version_nbr is not None: version_str = version_nbr.group(1) try: if version_str is not None: version = float(version_str.rpartition('.')[0]) else: version = 0 except ValueError: version = 0 if version >= last_version: last_version = version last_version_str = version_str return last_version_str def _driver_matching_installed_version(self): # TODO: Version matcher for IE. return self._latest_driver() def installed_browser_version(self): """ Returns Internet Explorer version. Args: Returns: Returns an int with the browser version. Raises: BrowserDetectionError: Case something goes wrong when getting browser version. """ if os.name != "nt": raise BrowserDetectionError('Unable to retrieve IE version.', 'System is not Windows.') cmd = ['reg', 'query', 'HKEY_LOCAL_MACHINE\Software\Microsoft\Internet Explorer', '/v', 'svcVersion'] try: output = Commands.run(cmd) reg = re.search(self._config.search_regex_pattern(), str(output)) str_version = reg.group(0) int_version = int(str_version.partition(".")[0]) except Exception as error: raise BrowserDetectionError('Unable to retrieve IE version ' 'from system.', error) from error return int_version @staticmethod def _is_windows_x64(): return platform.machine().endswith('64') def get_driver(self): """ API to expose to client to download the driver and unzip it. """ self._driver.version = self._driver_matching_installed_version() return self._download_and_unzip(HttpOperations(), self._driver, FileHandler())
mit
-4,724,605,588,114,314,000
32.893443
77
0.564692
false
4.361814
false
false
false
PyBossa/pybossa
pybossa/auth/webhook.py
1
1833
# -*- coding: utf8 -*- # This file is part of PYBOSSA. # # Copyright (C) 2015 SF Isle of Man Limited # # PYBOSSA is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PYBOSSA is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>. class WebhookAuth(object): _specific_actions = [] def __init__(self, project_repo): self.project_repo = project_repo @property def specific_actions(self): return self._specific_actions def can(self, user, action, webhook=None, project_id=None): action = ''.join(['_', action]) return getattr(self, action)(user, webhook, project_id) def _create(self, user, webhook, project_id=None): return False def _read(self, user, webhook=None, project_id=None): if user.is_anonymous() or (webhook is None and project_id is None): return False project = self._get_project(webhook, project_id) return user.admin or user.id in project.owners_ids def _update(self, user, webhook, project_id=None): return False def _delete(self, user, webhook, project_id=None): return False def _get_project(self, webhook, project_id): if webhook is not None: return self.project_repo.get(webhook.project_id) return self.project_repo.get(project_id)
agpl-3.0
-6,669,273,108,467,857,000
33.584906
77
0.67976
false
3.826722
false
false
false
spring01/libPSI
lib/python/grendel/chemistry/molecule_stub.py
1
11089
# FAILED ATTEMPT AT REFORMING MOLECULESTUB # Perhaps I'll come back to this later... #from grendel import type_checking_enabled, sanity_checking_enabled #from grendel.chemistry.atom import Atom #from grendel.gmath import magnitude, angle_between_vectors #from grendel.gmath.matrix import Matrix #from grendel.util.decorators import with_flexible_arguments, typechecked, IterableOf #from grendel.util.exceptions import ChemistryError #from grendel.util.overloading import overloaded, OverloadedFunctionCallError #from grendel.util.strings import indented #from grendel.util.units import strip_units, DistanceUnit, AngularUnit, Radians, Degrees, Angstroms, isunit ## Immutable "friend class" of Molecule #class MoleculeStub(object): # """ # Immutable "friend" class of `Molecule`, used for hashing. # """ # # #################### # # Class Attributes # # #################### # # eq_precision = 8 # same_internal_tol = {AngularUnit: 0.0001*Degrees, DistanceUnit: 1e-7*Angstroms} # # ############## # # Attributes # # ############## # # multiplicity = None # """ The multiplicity of the electronic state of the molecule. # (i.e. 2S+1 where S is the total spin). Defaults to singlet. """ # # charge = None # """ The charge on the molecule. Defaults to neutral. """ # # reoriented_matrix = None # # ###################### # # Private Attributes # # ###################### # # _hash = None # _cartesian_representation = None # _cartesian_units = None # _internal_representation = None # _from_molecule = None # _xyz = None # _element_list = None # # ################## # # Initialization # # ################## # # @overloaded # def __init__(self, *args, **kwargs): # raise OverloadedFunctionCallError # # @__init__.overload_with( # atoms=IterableOf('Atom'), # ) # def __init__(self, # atoms, # **kwargs): # self.__init__( # [(atom.element, atom.isotope) for atom in atoms], # Matrix([atom.position for atom in atoms]), # **kwargs) # # @__init__.overload_with( # cartesian_units=isunit, # charge=(int, None), # multiplicity=(int, None) # ) # def __init__(self, # elements_and_isotopes, # xyz, # cartesian_units=DistanceUnit.default, # charge=None, # multiplicity=None): # self._cartesian_units = cartesian_units # self.charge = charge if charge is not None else Molecule.default_charge # self.multiplicity = multiplicity if multiplicity is not None else Molecule.default_multiplicity # self._xyz = xyz # self._element_list = elements_and_isotopes # # TODO strip units # tmpmol = Molecule( # [Atom(el, iso, pos) for (el, iso), pos in zip(self._element_list, self._xyz.iter_rows)], # charge=self.charge, # multiplicity=self.multiplicity # ) # self.reoriented_matrix = tmpmol.reoriented().xyz # # ################### # # Special Methods # # ################### # # def __hash__(self): # if self._hash is not None: # return self._hash # self._hash = MoleculeDict.hash_for(self) # return self._hash # # def __eq__(self, other): # if isinstance(other, MoleculeStub): # if [a.isotope for a in self] != [a.isotope for a in other]: # return False # elif (self.multiplicity, self.charge) != (other.multiplicity, other.charge): # return False # else: # reoriented = self.reoriented_matrix * self._cartesian_units.to(DistanceUnit.default) # rounded = [round(v, MoleculeStub.eq_precision) for v in reoriented.ravel()] # other_oriented = other.reoriented_matrix * other._cartesian_units.to(DistanceUnit.default) # other_rounded = [round(v, MoleculeStub.eq_precision) for v in other_oriented.ravel()] # return rounded == other_rounded # else: # return NotImplemented # # # ########### # # Methods # # ########### # # # TODO document this! # # TODO class variables for default tolerances # def is_valid_stub_for(self, other, cart_tol=None, internal_tol=None, ang_tol=None): # """ # """ # # cart_tol is used for comparison between cartesian positions # cart_tol = cart_tol or 1e-8*Angstroms # # internal_tol should be unitless, since the difference between internal coordinates could have multiple # # units, and we're taking the magnitude across these units # internal_tol = internal_tol or MoleculeStub.same_internal_tol # # ang_tol is used when comparing cartesian geometries. If the angle between two corresponding atoms # # in self and other differs from the angle between the first two corresponding atoms in self and other # # by more than ang_tol, we assume they do not have the same geometry and thus return False # ang_tol = ang_tol or 1e-5*Degrees # #--------------------------------------------------------------------------------# # if type_checking_enabled: # if not isinstance(other, Molecule): # raise TypeError # if isinstance(other, MoleculeStub): # raise TypeError # #--------------------------------------------------------------------------------# # if self.multiplicity != other.multiplicity: # return False # elif self.charge != other.charge: # return False # else: # if len(self._element_list) != other.natoms: # for num, (element, isotope) in enumerate(self._element_list): # if (other[num].element, other[num].isotope) != (element, isotope): # return False # if other.natoms <= 1: # # if we have 1 or 0 atoms and we've gotten this far, we have a match # return True # #--------------------------------------------------------------------------------# # # no failures yet, so we have to compare geometries # # if self has an internal_representation, use it # if self._internal_representation is not None: # diff = self._internal_representation.values - self._internal_representation.values_for_molecule(other) # if any(abs(d) > internal_tol[c.units.genre].in_units(c.units) for d, c in zip(diff, self._internal_representation)): # return False # return True # # if mol has an internal representation, use it: # elif other.internal_representation is not None: # diff = other.internal_representation.values - other.internal_representation.values_for_molecule(self) # if any(abs(d) > internal_tol[c.units.genre].in_units(c.units) for d, c in zip(diff, other.internal_representation)): # return False # return True # else: # # They're both fully cartesian. This could take a while... # # We should first try to short-circuit as many ways as possible # #----------------------------------------# # # first strip units and store stripped versions to speed up the rest of the work # # strip the units off of ang_tol # ang_tol = strip_units(ang_tol, Radians) # # strip the units off of cart_tol # cart_tol = strip_units(cart_tol, self._cartesian_units) # # make a list of positions with stripped units, since we'll use it up to three times # stripped = [strip_units(atom, self._cartesian_units) for atom in (self if self.is_centered() else self.recentered()) ] # other_stripped = [strip_units(atom, self._cartesian_units) for atom in (other if other.is_centered() else other.recentered())] # #----------------------------------------# # # Try to short-circuit negatively by looking for an inconsistancy in the angles # # between pairs of corresponding atoms # # If the first atom is at the origin, use the second one # offset = 1 # if stripped[0].is_zero(): # if magnitude(stripped[0] - other_stripped[0]) > cart_tol: # return False # else: # if sanity_checking_enabled and stripped[1].is_zero(): # raise ChemistryError, "FrozenMolecule:\n{}\nhas two atoms on top of each other.".format(indented(str(self))) # if other_stripped[1].is_zero(): # return False # else: # offset = 2 # first_ang = angle_between_vectors(self.atoms[1].pos, other.atoms[1].pos) # else: # if other_stripped[0].is_zero(): # return False # else: # first_ang = angle_between_vectors(self.atoms[0].pos, other.atoms[0].pos) # for apos, opos in zip(stripped[offset:], other_stripped[offset:]): # if apos.is_zero(): # if magnitude(apos - opos) > cart_tol: # return False # elif opos.is_zero(): # # Try again, since Tensor.zero_cutoff could smaller than cart_tol, causing a false zero # if magnitude(apos - opos) > cart_tol: # return False # else: # ang = angle_between_vectors(apos, opos) # if abs(ang - first_ang) > ang_tol: # return False # # Also, the magnitude of the distance from the center of mass should be the same: # if abs(apos.magnitude() - opos.magnitude()) > cart_tol: # return False # #----------------------------------------# # # Try to short-circuit positively: # exact_match = True # for apos, opos in zip(stripped, other_stripped): # if magnitude(apos - opos) > cart_tol: # exact_match = False # break # if exact_match: # return True # exact_match = True # # Check negative version # for apos, opos in zip(stripped, other_stripped): # if magnitude(apos + opos) > cart_tol: # exact_match = False # break # if exact_match: # return True # #----------------------------------------# # # We can't short-circuit, so this is the only means we have left # # It's far more expensive than the rest, but it always works. # return self.has_same_geometry(other, cart_tol) # # ###################### ## Dependent Imports # ###################### # #from grendel.chemistry.molecule_dict import MoleculeDict #from grendel.chemistry.molecule import Molecule
gpl-2.0
2,493,225,813,903,470,000
43.534137
139
0.541257
false
3.800206
false
false
false
flying-sheep/omnitool
version.py
1
1888
from functools import total_ordering @total_ordering class Version(): """Organization Class for comparable Version System Version integer uses decimal shift: 2 digits major version, 2 digits minor version, 2 digits micro version 170100 -> 17.1.0 """ def __init__(self, integer): if type(integer) == str: self.int = int(integer) elif type(integer) == int: self.int = integer else: raise TypeError("Version accepts int or str, not "+str(type(integer))) def get_version_tuple(self): major, minor = divmod(self.int,10000) minor, micro = divmod(minor, 100) return major, minor, micro def get_name(self): major, minor, micro = tup = self.get_version_tuple() return ".".join((str(i) for i in tup)) def __repr__(self): return self.name def __str__(self): return str(self.int) def __eq__(self, other): if isinstance(other, Version): return self.int == other.int return self.int == other def __lt__(self, other): if isinstance(other, Version): return self.int < other.int return self.int < other def __int__(self):return self.int name = property(get_name) as_tuple = property(get_version_tuple) current = Version(100) if __name__ == "__main__": print (current) print (current > 200) print (current < 100) print (current > Version(50)) assert(Version(100) > 99) assert(99 < Version(100)) assert(100 == Version(100)) assert(100 != Version(99)) assert(Version(100) == Version(100)) assert(Version(str(Version(100))) == Version(100))
mit
-4,095,968,977,931,674,000
29.95082
86
0.533898
false
4.300683
false
false
false
domthu/gasistafelice
gasistafelice/auth/__init__.py
1
2945
from django.utils.translation import ugettext as _, ugettext_lazy from django.db.models.signals import post_syncdb import permissions from permissions.utils import register_role, register_permission ## role-related constants NOBODY = 'NOBODY' GAS_MEMBER = 'GAS_MEMBER' GAS_REFERRER_SUPPLIER = 'GAS_REFERRER_SUPPLIER' GAS_REFERRER_ORDER = 'GAS_REFERRER_ORDER' GAS_REFERRER_WITHDRAWAL = 'GAS_REFERRER_WITHDRAWAL' GAS_REFERRER_DELIVERY = 'GAS_REFERRER_DELIVERY' GAS_REFERRER_CASH = 'GAS_REFERRER_CASH' GAS_REFERRER_TECH = 'GAS_REFERRER_TECH' SUPPLIER_REFERRER = 'SUPPLIER_REFERRER' ROLES_LIST = [ (NOBODY, _('Nobody')), (SUPPLIER_REFERRER, _('Supplier referrer')), (GAS_MEMBER, _('GAS member')), (GAS_REFERRER_SUPPLIER, _('GAS supplier referrer')), (GAS_REFERRER_ORDER, _('GAS order referrer')), (GAS_REFERRER_WITHDRAWAL, _('GAS withdrawal referrer')), (GAS_REFERRER_DELIVERY, _('GAS delivery referrer')), (GAS_REFERRER_CASH, _('GAS cash referrer')), (GAS_REFERRER_TECH, _('GAS technical referrer')), ] valid_params_for_roles = ( ## format # (Role' codename, allowed model for 1st param, allowed model for 2nd param) (SUPPLIER_REFERRER, 'supplier.Supplier', ''), (GAS_MEMBER, 'gas.GAS', ''), (GAS_REFERRER_CASH, 'gas.GAS', '' ), (GAS_REFERRER_TECH, 'gas.GAS', ''), (GAS_REFERRER_SUPPLIER, 'gas.GAS', 'supplier.Supplier'), (GAS_REFERRER_ORDER, 'gas.GASSupplierOrder', ''), (GAS_REFERRER_WITHDRAWAL, 'gas.Withdrawal', ''), (GAS_REFERRER_DELIVERY, 'gas.Delivery', ''), ) ## permission-related constants VIEW = 'view' LIST = 'list' CREATE = 'create' EDIT = 'edit' DELETE = 'delete' ALL = 'all' # catchall PERMISSIONS_LIST = [ (VIEW, _('View')), (LIST, _('List')), (CREATE, _('Create')), (EDIT, _('Edit')), (DELETE, _('Delete')), (ALL, _('All')), # catchall ] class PermissionsRegister(object): """Support global register to hold Role and Permissions dicts""" # a dictionary holding Roles model instances, keyed by name roles_dict = {} # a dictionary holding Permission model instances, keyed by Permission's codename perms_dict = {} @property def roles(cls): return cls.roles_dict.values() @property def perms(cls): return cls.perms_dict.values() @property def role_names(cls): return cls.roles_dict.keys() @property def perm_names(cls): return cls.perms_dict.keys() def get_role(cls, code): return cls.roles_dict[code] def get_perm(cls, code): return cls.perms_dict[code] def init_permissions(sender, **kwargs): ## register project-level Roles for (name, description) in ROLES_LIST: PermissionsRegister.roles_dict[name] = register_role(name) ## register project-level Permissions for (codename, name) in PERMISSIONS_LIST: PermissionsRegister.perms_dict[codename] = register_permission(name, codename) return post_syncdb.connect(init_permissions, sender=permissions.models)
agpl-3.0
7,373,385,643,594,439,000
26.268519
86
0.683531
false
2.977755
false
false
false
cheral/orange3
Orange/canvas/application/outputview.py
6
6738
""" """ import traceback from AnyQt.QtWidgets import QWidget, QPlainTextEdit, QVBoxLayout, QSizePolicy from AnyQt.QtGui import QTextCursor, QTextCharFormat, QFont from AnyQt.QtCore import Qt, QObject, QCoreApplication, QThread, QSize from AnyQt.QtCore import pyqtSignal as Signal class TerminalView(QPlainTextEdit): def __init__(self, *args, **kwargs): QPlainTextEdit.__init__(self, *args, **kwargs) self.setFrameStyle(QPlainTextEdit.NoFrame) self.setTextInteractionFlags(Qt.TextBrowserInteraction) self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn) self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) font = self.font() font.setStyleHint(QFont.Monospace) font.setFamily("Monospace") self.setFont(font) def sizeHint(self): metrics = self.fontMetrics() width = metrics.boundingRect("_" * 81).width() height = metrics.lineSpacing() scroll_width = self.verticalScrollBar().width() size = QSize(width + scroll_width, height * 25) return size class OutputView(QWidget): def __init__(self, parent=None, **kwargs): QWidget.__init__(self, parent, **kwargs) self.__lines = 5000 self.setLayout(QVBoxLayout()) self.layout().setContentsMargins(0, 0, 0, 0) self.__text = TerminalView() self.__currentCharFormat = self.__text.currentCharFormat() self.layout().addWidget(self.__text) def setMaximumLines(self, lines): """ Set the maximum number of lines to keep displayed. """ if self.__lines != lines: self.__lines = lines self.__text.setMaximumBlockCount(lines) def maximumLines(self): """ Return the maximum number of lines in the display. """ return self.__lines def clear(self): """ Clear the displayed text. """ self.__text.clear() def setCurrentCharFormat(self, charformat): """Set the QTextCharFormat to be used when writing. """ if self.__currentCharFormat != charformat: self.__currentCharFormat = charformat def currentCharFormat(self): return self.__currentCharFormat def toPlainText(self): """ Return the full contents of the output view. """ return self.__text.toPlainText() # A file like interface. def write(self, string): self.__text.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor) self.__text.setCurrentCharFormat(self.__currentCharFormat) self.__text.insertPlainText(string) def writelines(self, lines): self.write("".join(lines)) def flush(self): pass def writeWithFormat(self, string, charformat): self.__text.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor) self.__text.setCurrentCharFormat(charformat) self.__text.insertPlainText(string) def writelinesWithFormat(self, lines, charformat): self.writeWithFormat("".join(lines), charformat) def formated(self, color=None, background=None, weight=None, italic=None, underline=None, font=None): """ Return a formated file like object proxy. """ charformat = update_char_format( self.currentCharFormat(), color, background, weight, italic, underline, font ) return formater(self, charformat) def update_char_format(baseformat, color=None, background=None, weight=None, italic=None, underline=None, font=None): """ Return a copy of `baseformat` :class:`QTextCharFormat` with updated color, weight, background and font properties. """ charformat = QTextCharFormat(baseformat) if color is not None: charformat.setForeground(color) if background is not None: charformat.setBackground(background) if font is not None: charformat.setFont(font) else: font = update_font(baseformat.font(), weight, italic, underline) charformat.setFont(font) return charformat def update_font(basefont, weight=None, italic=None, underline=None, pixelSize=None, pointSize=None): """ Return a copy of `basefont` :class:`QFont` with updated properties. """ font = QFont(basefont) if weight is not None: font.setWeight(weight) if italic is not None: font.setItalic(italic) if underline is not None: font.setUnderline(underline) if pixelSize is not None: font.setPixelSize(pixelSize) if pointSize is not None: font.setPointSize(pointSize) return font class formater(object): def __init__(self, outputview, charformat): self.outputview = outputview self.charformat = charformat def write(self, string): self.outputview.writeWithFormat(string, self.charformat) def writelines(self, lines): self.outputview.writelines(lines, self.charformat) def flush(self): self.outputview.flush() def formated(self, color=None, background=None, weight=None, italic=None, underline=None, font=None): charformat = update_char_format(self.charformat, color, background, weight, italic, underline, font) return formater(self.outputview, charformat) def __enter__(self): return self def __exit__(self, *args): self.outputview = None self.charformat = None class TextStream(QObject): stream = Signal(str) flushed = Signal() def __init__(self, parent=None): QObject.__init__(self, parent) def write(self, string): self.stream.emit(string) def writelines(self, lines): self.stream.emit("".join(lines)) def flush(self): self.flushed.emit() class ExceptHook(QObject): handledException = Signal(object) def __init__(self, parent=None, stream=None, canvas=None, **kwargs): QObject.__init__(self, parent, **kwargs) self._stream = stream self._canvas = canvas def __call__(self, exc_type, exc_value, tb): if self._stream: header = exc_type.__name__ + ' Exception' if QThread.currentThread() != QCoreApplication.instance().thread(): header += " (in non-GUI thread)" text = traceback.format_exception(exc_type, exc_value, tb) text.insert(0, '{:-^79}\n'.format(' ' + header + ' ')) text.append('-' * 79 + '\n') self._stream.writelines(text) self.handledException.emit(((exc_type, exc_value, tb), self._canvas))
bsd-2-clause
1,927,550,279,089,523,200
28.552632
79
0.624221
false
4.020286
false
false
false
sumpfgottheit/arps
arps/views.py
1
3504
from flask import render_template, request import numbers from pprint import pprint from arps.restserver import app, db, ApiException, apiview from arps.globals import * from arps.validation import get_schemas_for_endpoint from arps.models import * METHODS = ['GET', 'POST', 'PUT', 'DELETE'] @app.route('/') def main(): l = [] rules = [rule for rule in sorted(list(app.url_map.iter_rules()), key=lambda rule: rule.rule) if rule.rule.startswith('/api/')] for rule in rules: schema_request, schema_response = get_schemas_for_endpoint(rule.endpoint) l.append({ 'path': rule.rule, 'methods': sorted([method for method in rule.methods if method in METHODS]), 'endpoint': rule.endpoint, 'schema_request': schema_request, 'schema_response': schema_response, 'doc': str(app.view_functions[rule.endpoint].__doc__).strip() }) return render_template('apidoc.html', rules=l) def populate_object(o :object, d: dict): changed = set() unchanged = set() unkown = set() for key, value in d.items(): if not isinstance(value, (str, numbers.Number, bool)): unkown.add(key) continue if hasattr(o, key): if getattr(o, key) == value: unchanged.add(key) else: setattr(o, key, value) changed.add(key) else: unkown.add(key) return changed, unchanged, unkown def get_object_or_404(model, *criterion, message=""): r = db.session.query(model).get(criterion) if r is None: raise ApiException(message, code=404) else: return r @app.route('/api/v1.0/users/', methods=['GET'], endpoint=endpoint_user_list) @apiview() def user_list(): """ Return a list of all users """ users = db.session.query(User).all() message = [{**user.as_dict, **{'roles': [role.name for role in user.roles]}} for user in users] return message @app.route('/api/v1.0/users/<int:user_id>', methods=['GET'], endpoint=endpoint_user_get) @apiview() def user_get(user_id): """ Return the user with an specific id. """ user = get_object_or_404(User, user_id, message='No User with id %s found' % user_id) return {**user.as_dict, **{'roles': [role.name for role in user.roles]}} @app.route('/api/v1.0/users/<int:user_id>', methods=['PUT', 'OPTIONS'], endpoint=endpoint_user_update) @apiview(needs_json_in_request=True) def user_update(user_id): """ Update the user with the given id with the dictionary provided. All fields are optional. If the id field is given, it must be the same value as the url leaf. When updating the user, no fields are required. """ data = request.json['content'] if data.get('id', user_id) != user_id: raise ApiException("User ID in json body and in url must be the same.") user = get_object_or_404(User, user_id, message='No User with id %s found' % user_id) populate_object(user, data) if 'roles' in data: user.set_roles(data['roles']) db.session.commit() return {**user.as_dict, **{'roles': [role.name for role in user.roles]}} @app.route('/api/v1.0/roles/', methods=['GET'], endpoint=endpoint_role_list) @apiview() def role_list(): """ Return a list of all roles """ roles = Role.query.all() message = [{**user.as_dict, **{'roles': [role.name for role in user.roles]}} for user in users] return message
mit
2,719,949,691,329,565,000
33.019417
130
0.619863
false
3.469307
false
false
false
repotvsupertuga/tvsupertuga.repository
script.module.streamtvsupertuga/lib/resources/lib/sources/pl/ekinomaniak.py
1
4540
# -*- coding: UTF-8 -*- import urlparse from resources.lib.modules import cleantitle from resources.lib.modules import client from resources.lib.modules import source_utils import urllib class source: def __init__(self): self.priority = 1 self.language = ['pl'] self.domains = ['ekinomaniak.tv'] self.base_link = 'http://ekinomaniak.tv' self.search_link = '/search_movies' def search(self, localtitle, year, search_type): try: url = urlparse.urljoin(self.base_link, self.search_link) r = client.request(url, redirect=False, post={'q': cleantitle.query(localtitle), 'sb': ''}) r = client.parseDOM(r, 'div', attrs={'class': 'small-item'}) local_simple = cleantitle.get(localtitle) for row in r: name_found = client.parseDOM(row, 'a')[1] year_found = name_found[name_found.find("(") + 1:name_found.find(")")] url = client.parseDOM(row, 'a', ret='href')[1] if not search_type in url: continue if cleantitle.get(name_found) == local_simple and year_found == year: return url except: return def movie(self, imdb, title, localtitle, aliases, year): return self.search(localtitle, year, 'watch-movies') def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year): return self.search(localtvshowtitle, year, 'watch-tv-shows') def demix(self, e): result = {"d": "A", "D": "a", "a": "D", "a": "d", "c": "B", "C": "b", "b": "C", "b": "c", "h": "E", "H": "e", "e": "H", "E": "h", "g": "F", "G": "f", "f": "G", "F": "g", "l": "I", "L": "i", "i": "L", "I": "l", "k": "J", "K": "j", "j": "K", "J": "k", "p": "M", "P": "m", "m": "P", "M": "p", "o": "N", "O": "n", "n": "O", "N": "o", "u": "R", "U": "r", "r": "U", "R": "u", "t": "S", "T": "s", "s": "T", "S": "t", "z": "W", "Z": "w", "w": "Z", "W": "z", "y": "X", "Y": "x", "x": "Y", "X": "y", "3": "1", "1": "3", "4": "2", "2": "4", "8": "5", "5": "8", "7": "6", "6": "7", "0": "9", "9": "0" }.get(e) if result == None: result = '%' return result def decodwrd(self, e): r = "" for i in range(len(e)): r += self.demix(e[i]) return r def decodeURIComponent(self, r): return urllib.unquote(r.encode("utf-8")) def shwp(self, e): r = self.decodwrd(e) return self.decodeURIComponent(r) def episode(self, url, imdb, tvdb, title, premiered, season, episode): url = urlparse.urljoin(self.base_link, url) r = client.request(url) r = client.parseDOM(r, 'li', attrs={'class': 'active'}) for row in r: span_season = client.parseDOM(row, 'span')[0] span_season = span_season.split(' ')[1] if span_season == season: eps = client.parseDOM(row, 'li') for ep in eps: ep_no = client.parseDOM(ep, 'a')[0].split(' ')[1] if ep_no == episode: return client.parseDOM(ep, 'a', ret='href')[0] return None def get_lang_by_type(self, lang_type): if 'Lektor' in lang_type: return 'Lektor' if 'Dubbing' in lang_type: return 'Dubbing' if 'Napisy' in lang_type: return 'Napisy' return None def sources(self, url, hostDict, hostprDict): sources = [] try: if url == None: return sources r = client.request(urlparse.urljoin(self.base_link, url), redirect=False) info = self.get_lang_by_type(client.parseDOM(r, 'title')[0]) r = client.parseDOM(r, 'div', attrs={'class': 'tab-pane active'})[0] r = client.parseDOM(r, 'script')[0] script = r.split('"')[1] decoded = self.shwp(script) link = client.parseDOM(decoded, 'iframe', ret='src')[0] valid, host = source_utils.is_host_valid(link, hostDict) if not valid: return sources q = source_utils.check_sd_url(link) sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False}) return sources except: return sources def resolve(self, url): return url
gpl-2.0
-6,697,926,578,966,888,000
38.137931
327
0.490969
false
3.252149
false
false
false
Brazelton-Lab/bio_utils
bio_utils/__init__.py
1
1086
#! /usr/bin/env python3 """Software library containing common bioinformatic functions Copyright: __init__.py software library containing common bioinformatic functions Copyright (C) 2015 William Brazelton, Alex Hyer This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ __author__ = 'Alex Hyer, William Brazelton, Christopher Thornton' __email__ = '[email protected]' __license__ = 'GPLv3' __maintainer__ = 'Alex Hyer' __status__ = 'Production' __version__ = '1.1.1'
gpl-3.0
-5,207,140,348,901,563,000
36.448276
74
0.726519
false
3.892473
false
false
false
iamgp/pyCa
pyCa/Graph.py
1
2559
from . import * # Graphics Stuff import matplotlib.pyplot as plt class Graph(object): """docstring for Graph""" def __init__(self, Experiment): self.Experiment = Experiment self.numberOfStimulantsAdded = 0 self.nameToUse = 0 def plot(self): print '' log(self.Experiment.name, colour="yellow") log('==================', colour="yellow") for i, col in self.Experiment.data.iteritems(): if i == 0: col.name = "time" if col.name == "time": continue fig, ax = plt.subplots(1) plt.plot(self.Experiment.data.time, col, '-') plt.title(col.name) ax.set_ylim( col.min() - (0.1 * col.min()), col.max() + (0.1 * col.max())) self.nameToUse = 0 print '' log(col.name, colour="red") log('--------------------------------------', colour="red") def onclick(event): if self.numberOfStimulantsAdded == 0: x1 = event.xdata y1 = event.ydata log(' > 1st point, adding x1:{} y1:{} to {}'.format( x1, y1, self.Experiment.names[self.nameToUse]), colour="black") self.Experiment.currentCell.addFirstPoint(x1, y1) self.numberOfStimulantsAdded = 1 elif self.numberOfStimulantsAdded == 1: x2 = event.xdata y2 = event.ydata log(' > 2nd point, adding x2:{} y2:{} to {}'.format( x2, y2, self.Experiment.names[self.nameToUse]), colour="black") self.Experiment.currentCell.addSecondPointWithName( x2, y2, self.Experiment.names[self.nameToUse]) self.numberOfStimulantsAdded = 0 self.nameToUse = self.nameToUse + 1 fig.canvas.mpl_connect('button_press_event', onclick) for t in self.Experiment.times: plt.axvspan(t, t + 5, color='red', alpha=0.1) plt.show() self.Experiment.currentCell.cellname = col.name self.Experiment.cells.append(self.Experiment.currentCell) if self.Experiment.currentCell.describe() is not None: log(self.Experiment.currentCell.describe(), colour="black") self.Experiment.currentCell = Cell()
gpl-3.0
8,551,664,604,400,707,000
31.392405
77
0.490035
false
4.243781
false
false
false