repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
19 values
size
stringlengths
4
7
content
stringlengths
721
1.04M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
15
997
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
edineicolli/daruma-exemplo-python
scripts/fiscal/ui_fiscal_icfefetuarpagamentoformatado.py
1
4753
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'ui_fiscal_icfefetuarpagamentoformatado.ui' # # Created: Mon Nov 24 22:25:42 2014 # by: pyside-uic 0.2.15 running on PySide 1.2.2 # # WARNING! All changes made in this file will be lost! from PySide import QtCore, QtGui from pydaruma.pydaruma import iCFEfetuarPagamentoFormatado_ECF_Daruma from scripts.fiscal.retornofiscal import tratarRetornoFiscal class Ui_ui_FISCAL_iCFEfetuarPagamentoFormatado(QtGui.QWidget): def __init__(self): super(Ui_ui_FISCAL_iCFEfetuarPagamentoFormatado, self).__init__() self.setupUi(self) self.pushButtonEnviar.clicked.connect(self.on_pushButtonEnviar_clicked) self.pushButtonCancelar.clicked.connect(self.on_pushButtonCancelar_clicked) def on_pushButtonEnviar_clicked(self): StrFPGTO = self.lineEditFormaPGTO.text() StrValor = self.lineEditValor.text() tratarRetornoFiscal(iCFEfetuarPagamentoFormatado_ECF_Daruma(StrFPGTO,StrValor), self) def on_pushButtonCancelar_clicked(self): self.close() def setupUi(self, ui_FISCAL_iCFEfetuarPagamentoFormatado): ui_FISCAL_iCFEfetuarPagamentoFormatado.setObjectName("ui_FISCAL_iCFEfetuarPagamentoFormatado") ui_FISCAL_iCFEfetuarPagamentoFormatado.resize(309, 132) ui_FISCAL_iCFEfetuarPagamentoFormatado.setMinimumSize(QtCore.QSize(309, 132)) ui_FISCAL_iCFEfetuarPagamentoFormatado.setMaximumSize(QtCore.QSize(309, 132)) self.verticalLayout = QtGui.QVBoxLayout(ui_FISCAL_iCFEfetuarPagamentoFormatado) self.verticalLayout.setObjectName("verticalLayout") self.gridLayout = QtGui.QGridLayout() self.gridLayout.setObjectName("gridLayout") self.labelForma = QtGui.QLabel(ui_FISCAL_iCFEfetuarPagamentoFormatado) self.labelForma.setObjectName("labelForma") self.gridLayout.addWidget(self.labelForma, 0, 0, 1, 1) self.lineEditFormaPGTO = QtGui.QLineEdit(ui_FISCAL_iCFEfetuarPagamentoFormatado) self.lineEditFormaPGTO.setMaximumSize(QtCore.QSize(100, 16777215)) self.lineEditFormaPGTO.setObjectName("lineEditFormaPGTO") self.gridLayout.addWidget(self.lineEditFormaPGTO, 0, 1, 1, 1) self.labelValor = QtGui.QLabel(ui_FISCAL_iCFEfetuarPagamentoFormatado) self.labelValor.setObjectName("labelValor") self.gridLayout.addWidget(self.labelValor, 1, 0, 1, 1) self.lineEditValor = QtGui.QLineEdit(ui_FISCAL_iCFEfetuarPagamentoFormatado) self.lineEditValor.setMaximumSize(QtCore.QSize(70, 25)) self.lineEditValor.setObjectName("lineEditValor") self.gridLayout.addWidget(self.lineEditValor, 1, 1, 1, 1) self.verticalLayout.addLayout(self.gridLayout) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.pushButtonEnviar = QtGui.QPushButton(ui_FISCAL_iCFEfetuarPagamentoFormatado) self.pushButtonEnviar.setObjectName("pushButtonEnviar") self.horizontalLayout.addWidget(self.pushButtonEnviar) self.pushButtonCancelar = QtGui.QPushButton(ui_FISCAL_iCFEfetuarPagamentoFormatado) self.pushButtonCancelar.setObjectName("pushButtonCancelar") self.horizontalLayout.addWidget(self.pushButtonCancelar) spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout) self.retranslateUi(ui_FISCAL_iCFEfetuarPagamentoFormatado) QtCore.QMetaObject.connectSlotsByName(ui_FISCAL_iCFEfetuarPagamentoFormatado) def retranslateUi(self, ui_FISCAL_iCFEfetuarPagamentoFormatado): ui_FISCAL_iCFEfetuarPagamentoFormatado.setWindowTitle(QtGui.QApplication.translate("ui_FISCAL_iCFEfetuarPagamentoFormatado", "iCFEfetuarPagamentoFormatado_ECF_Daruma", None, QtGui.QApplication.UnicodeUTF8)) self.labelForma.setText(QtGui.QApplication.translate("ui_FISCAL_iCFEfetuarPagamentoFormatado", "Forma Pagto:", None, QtGui.QApplication.UnicodeUTF8)) self.labelValor.setText(QtGui.QApplication.translate("ui_FISCAL_iCFEfetuarPagamentoFormatado", "Valor:", None, QtGui.QApplication.UnicodeUTF8)) self.pushButtonEnviar.setText(QtGui.QApplication.translate("ui_FISCAL_iCFEfetuarPagamentoFormatado", "Enviar", None, QtGui.QApplication.UnicodeUTF8)) self.pushButtonCancelar.setText(QtGui.QApplication.translate("ui_FISCAL_iCFEfetuarPagamentoFormatado", "Cancelar", None, QtGui.QApplication.UnicodeUTF8))
gpl-2.0
5,616,585,045,271,257,000
59.164557
214
0.764149
false
komlenic/drubs
drubs/drubs.py
1
9647
import yaml import tasks from os.path import isfile, isdir, dirname, abspath, join, basename, normpath, realpath from os import getcwd from fabric.state import env, output from fabric.tasks import execute from fabric.colors import red, yellow, green, cyan from fabric.contrib.console import confirm from fabric.api import lcd from fabric.operations import local, prompt from pprint import pprint def load_config_file(config_file): ''' Returns yaml file contents as an object. Also sets the following fabric/global env vars: env.config_file - the supplied path to the project config file. Under typical usage without the -f parameter, this will be 'project.yml' env.config_dir - the absolute path to the project config directory env.config - the actual contents of the config file Accepts one parameter 'config_file': the relative or absolute path to a drubs project config file. ''' if isfile(config_file): env.config_file = config_file env.config_dir = dirname(abspath(config_file)) with open(config_file, 'r') as stream: env.config = yaml.load(stream) # If env.config evaluates to false, nothing parseable existed in the file. if not env.config: print(red("The project config file '%s' does not contain anything or is not valid. Exiting..." % (config_file))) exit(1) if 'nodes' not in env.config: print(red("The project config file '%s' does not contain a 'nodes' section. Exiting..." % (config_file))) exit(1) return env.config else: if config_file == 'project.yml': print(red("No project config file found in current working directory. Either run drubs from a directory containing a valid project config file (named 'project.yml'), or use '-f'.")) exit(1) else: print(red("The project config file '%s' does not exist or could not be read." % (config_file))) exit(1) def check_config_requirements_per_node(nodes): ''' Checks for required values per nodes supplied. ''' for node in nodes: if node not in env.config['nodes']: print(red("No node named '%s' found in drubs project config file '%s'. Exiting..." % (node, env.config_file))) exit(1) required_node_keys = [ 'account_mail', 'account_name', 'account_pass', 'backup_directory', 'backup_lifetime_days', 'backup_minimum_count', 'db_host', 'db_name', 'db_pass', 'db_user', 'destructive_action_protection', 'make_file', 'py_file', 'server_host', 'server_port', 'server_user', 'site_mail', 'site_name', 'site_root', ] for key in required_node_keys: if key not in env.config['nodes'][node]: print(red("No key named '%s' for node '%s' found. Exiting..." % (key, node))) exit(1) elif env.config['nodes'][node][key].strip() == '': print(red("No value for '%s' for node '%s' found. Exiting..." % (key, node))) exit(1) def get_fabric_hosts(nodes): ''' Gets fabric hosts from associated node names. Returns a list of fabric host strings (user@host:port) for the supplied nodes. Passing 'all' for nodes, returns a list of fabric host strings for all nodes found in the project's config file. ''' hosts = [] for node in nodes: user = env.config['nodes'][node]['server_user'].strip() host = env.config['nodes'][node]['server_host'].strip() port = env.config['nodes'][node]['server_port'].strip() host_string = '%s@%s:%s' % (user, host, port) hosts.append(host_string) return hosts def set_flags(args): env.verbose = args.verbose env.debug = args.debug env.cache = args.cache env.no_backup = args.no_backup env.no_restore = args.no_restore env.yes = args.yes # If --no-backup is set, also always set --no-restore. if env.no_backup: env.no_restore = True if args.fab_debug: output.debug = True def drubs(args): ''' Main entry point from __init__.py and argparser. ''' env.drubs_dir = dirname(abspath(__file__)) env.drubs_data_dir = join(env.drubs_dir, 'data') set_flags(args) if args.action == 'init': drubs_init(args) else: # Return error if more than one node is specified. if len(args.nodes) > 1: if args.action == 'status': print(red("More than one node parameter specified. Please specify exactly one node name (or the keyword 'all' to get the status of all nodes). Exiting...")) else: print(red("More than one node parameter specified. Please specify exactly one node name. Exiting...")) exit(1) # Return error if 'all' keyword is being attempted to be used on any action # other than 'status'. if args.action != 'status' and args.nodes[0] == 'all': print(red("Cannot use the keyword 'all' with the action '%s' Exiting..." % ( args.action, ) )) exit(1) load_config_file(args.file) # If 'all' has been supplied for the 'nodes' parameter, set 'nodes' to a # list of all nodes found in the project config file. if args.nodes[0] == 'all': args.nodes = env.config['nodes'].keys() check_config_requirements_per_node(args.nodes) # Build/set fabric host strings. hosts = get_fabric_hosts(args.nodes) # Execute the requested task on the specified hosts. For passing variable # task/action names to execute(), getattr() is used to load the tasks from # tasks.py. See: http://stackoverflow.com/questions/23605418/in-fabric- # how-%20can-i-execute-tasks-from-another-python-file execute(getattr(tasks, args.action), hosts=hosts) def drubs_init(args): ''' Stubs out project configuration files. @todo Make this work with -f argument. Presently it is only designed to work if pwd = the project config directory. With -f pwd should be able to be anything. ''' project = dict() if args.file == 'project.yml': # No -f option supplied (or 'project.yml' supplied to -f). project['location'] = realpath(normpath(getcwd())) project['config_filename'] = 'project.yml' else: # -f option supplied and not 'project.yml'. project['location'] = dirname(realpath(normpath(args.file))) project['config_filename'] = basename(realpath(normpath(args.file))) project['name'] = basename(normpath(project['location'])) project['config_file_abs_path'] = join(project['location'], project['config_filename']) # If file exists, ask for confirmation before overwriting. if isfile(args.file): if not confirm(yellow("STOP! A project config file named '%s' already exists. Overwrite?" % args.file), default=False): print(yellow('Exiting...')) exit(0) if not isdir(project['location']): if confirm(yellow("'%s' location does not already exist. Create it and proceed?") % project['location'], default=True): print(cyan("Creating '%s'...") % (project['location'])) local('mkdir -p %s' % (project['location'])) # Ask which drupal core version this project will use. prompt(yellow("What major version of Drupal will this project use? (6,7,8)"), key="drupal_core_version", validate=r'^[6,7,8]{1}$', default="7") # Create config file. print(cyan("Creating a new project config file named '%s' file in '%s' with node(s) %s..." % ( project['config_filename'], project['location'], args.nodes ))) node_output = dict() for node in args.nodes: node_output[node] = dict( db_host = 'localhost', db_name = project['name'], db_user = '', db_pass = '', destructive_action_protection = 'off', backup_directory = "", backup_lifetime_days = "30", backup_minimum_count = "3", server_host = '', site_root = '', server_user = '', server_port = '22', site_name = '', site_mail = '', account_name = 'admin', account_pass = '', account_mail = '', make_file = '%s.make' % (node), py_file = '%s.py' % (node), ) data = dict( nodes = node_output, project_settings = dict ( project_name = project['name'], drupal_core_version = env.drupal_core_version, central_config_repo = '', ) ) with open(project['config_file_abs_path'], 'w') as outfile: outfile.write('# Drubs config file\n') outfile.write(yaml.dump(data, default_flow_style=False, default_style='"')) with lcd(project['location']): # Create make files. print(cyan("Creating drush make files...")) for node in args.nodes: local('cp %s/templates/d%s.make %s.make' % ( env.drubs_data_dir, env.drupal_core_version, node )) # Create py files. print(cyan("Creating python files...")) for node in args.nodes: local('cp %s/templates/d%s.py %s.py' % ( env.drubs_data_dir, env.drupal_core_version, node )) # Make a 'files' directory. local('mkdir files') local('touch files/.gitignore') # Create a .gitignore file for the config repo. print(cyan("Setting up gitignore file...")) local('cp %s/templates/gitignore.txt .gitignore' % (env.drubs_data_dir)) # Create a new repository and commit all config files. print(cyan("Creating new repository for the project...")) local('git init') local('git add -A .') local('git commit -m "Initial commit." --author="Drubs <>" --quiet') print(green("Complete. Before proceeding with further operations such as installing this project, you should manually edit the configuration files (particularly '%s')." % (project['config_file_abs_path']))) exit(0)
gpl-2.0
-1,242,480,681,415,232,500
32.849123
209
0.638022
false
zibawa/zibawa
zibawa/urls.py
1
1400
"""zibawa URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url from django.contrib import admin from django.conf.urls import include from rest_framework import routers from rest_framework.documentation import include_docs_urls from IoT_pki import views router = routers.DefaultRouter() urlpatterns = [ url(r'^devices/', include('devices.urls',namespace='devices')), url(r'^front/', include('front.urls',namespace='front')), url(r'^admin/', admin.site.urls), url(r'^', include('front.urls')), url(r'^IoT_pki/', include('IoT_pki.urls',namespace='IoT_pki')), url(r'^', include(router.urls)), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^docs/', include_docs_urls(title='zibawa_PKI')) ]
gpl-3.0
-6,997,898,641,479,709,000
30.111111
83
0.681429
false
plotly/plotly.py
packages/python/plotly/plotly/validators/_splom.py
1
12883
import _plotly_utils.basevalidators class SplomValidator(_plotly_utils.basevalidators.CompoundValidator): def __init__(self, plotly_name="splom", parent_name="", **kwargs): super(SplomValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, data_class_str=kwargs.pop("data_class_str", "Splom"), data_docs=kwargs.pop( "data_docs", """ customdata Assigns extra data each datum. This may be useful when listening to hover, click and selection events. Note that, "scatter" traces also appends customdata items in the markers DOM elements customdatasrc Sets the source reference on Chart Studio Cloud for customdata . diagonal :class:`plotly.graph_objects.splom.Diagonal` instance or dict with compatible properties dimensions A tuple of :class:`plotly.graph_objects.splom.Dimension` instances or dicts with compatible properties dimensiondefaults When used in a template (as layout.template.data.splom.dimensiondefaults), sets the default property values to use for elements of splom.dimensions hoverinfo Determines which trace information appear on hover. If `none` or `skip` are set, no information is displayed upon hovering. But, if `none` is set, click and hover events are still fired. hoverinfosrc Sets the source reference on Chart Studio Cloud for hoverinfo . hoverlabel :class:`plotly.graph_objects.splom.Hoverlabel` instance or dict with compatible properties hovertemplate Template string used for rendering the information that appear on hover box. Note that this will override `hoverinfo`. Variables are inserted using %{variable}, for example "y: %{y}" as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When showing info for several points, "xother" will be added to those with different x positions from the first point. An underscore before or after "(x|y)other" will add a space on that side, only when this field is shown. Numbers are formatted using d3-format's syntax %{variable:d3-format}, for example "Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api- reference/blob/master/Formatting.md#d3_format for details on the formatting syntax. Dates are formatted using d3-time-format's syntax %{variable|d3-time-format}, for example "Day: %{2019-01-01|%A}". https://github.com/d3/d3-time- format#locale_format for details on the date formatting syntax. The variables available in `hovertemplate` are the ones emitted as event data described at this link https://plotly.com/javascript/plotlyjs- events/#event-data. Additionally, every attributes that can be specified per-point (the ones that are `arrayOk: true`) are available. Anything contained in tag `<extra>` is displayed in the secondary box, for example "<extra>{fullData.name}</extra>". To hide the secondary box completely, use an empty tag `<extra></extra>`. hovertemplatesrc Sets the source reference on Chart Studio Cloud for hovertemplate . hovertext Same as `text`. hovertextsrc Sets the source reference on Chart Studio Cloud for hovertext . ids Assigns id labels to each datum. These ids for object constancy of data points during animation. Should be an array of strings, not numbers or any other type. idssrc Sets the source reference on Chart Studio Cloud for ids . legendgroup Sets the legend group for this trace. Traces part of the same legend group hide/show at the same time when toggling legend items. legendgrouptitle :class:`plotly.graph_objects.splom.Legendgroupt itle` instance or dict with compatible properties legendrank Sets the legend rank for this trace. Items and groups with smaller ranks are presented on top/left side while with `*reversed* `legend.traceorder` they are on bottom/right side. The default legendrank is 1000, so that you can use ranks less than 1000 to place certain items before all unranked items, and ranks greater than 1000 to go after all unranked items. marker :class:`plotly.graph_objects.splom.Marker` instance or dict with compatible properties meta Assigns extra meta information associated with this trace that can be used in various text attributes. Attributes such as trace `name`, graph, axis and colorbar `title.text`, annotation `text` `rangeselector`, `updatemenues` and `sliders` `label` text all support `meta`. To access the trace `meta` values in an attribute in the same trace, simply use `%{meta[i]}` where `i` is the index or key of the `meta` item in question. To access trace `meta` in layout attributes, use `%{data[n[.meta[i]}` where `i` is the index or key of the `meta` and `n` is the trace index. metasrc Sets the source reference on Chart Studio Cloud for meta . name Sets the trace name. The trace name appear as the legend item and on hover. opacity Sets the opacity of the trace. selected :class:`plotly.graph_objects.splom.Selected` instance or dict with compatible properties selectedpoints Array containing integer indices of selected points. Has an effect only for traces that support selections. Note that an empty array means an empty selection where the `unselected` are turned on for all points, whereas, any other non-array values means no selection all where the `selected` and `unselected` styles have no effect. showlegend Determines whether or not an item corresponding to this trace is shown in the legend. showlowerhalf Determines whether or not subplots on the lower half from the diagonal are displayed. showupperhalf Determines whether or not subplots on the upper half from the diagonal are displayed. stream :class:`plotly.graph_objects.splom.Stream` instance or dict with compatible properties text Sets text elements associated with each (x,y) pair to appear on hover. If a single string, the same string appears over all the data points. If an array of string, the items are mapped in order to the this trace's (x,y) coordinates. textsrc Sets the source reference on Chart Studio Cloud for text . uid Assign an id to this trace, Use this to provide object constancy between traces during animations and transitions. uirevision Controls persistence of some user-driven changes to the trace: `constraintrange` in `parcoords` traces, as well as some `editable: true` modifications such as `name` and `colorbar.title`. Defaults to `layout.uirevision`. Note that other user- driven trace attribute changes are controlled by `layout` attributes: `trace.visible` is controlled by `layout.legend.uirevision`, `selectedpoints` is controlled by `layout.selectionrevision`, and `colorbar.(x|y)` (accessible with `config: {editable: true}`) is controlled by `layout.editrevision`. Trace changes are tracked by `uid`, which only falls back on trace index if no `uid` is provided. So if your app can add/remove traces before the end of the `data` array, such that the same trace has a different index, you can still preserve user- driven changes if you give each trace a `uid` that stays with it as it moves. unselected :class:`plotly.graph_objects.splom.Unselected` instance or dict with compatible properties visible Determines whether or not this trace is visible. If "legendonly", the trace is not drawn, but can appear as a legend item (provided that the legend itself is visible). xaxes Sets the list of x axes corresponding to dimensions of this splom trace. By default, a splom will match the first N xaxes where N is the number of input dimensions. Note that, in case where `diagonal.visible` is false and `showupperhalf` or `showlowerhalf` is false, this splom trace will generate one less x-axis and one less y-axis. xhoverformat Sets the hover text formatting rulefor `x` using d3 formatting mini-languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-3.x-api- reference/blob/master/Formatting.md#d3_format. And for dates see: https://github.com/d3/d3-time- format#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By default the values are formatted using `xaxis.hoverformat`. yaxes Sets the list of y axes corresponding to dimensions of this splom trace. By default, a splom will match the first N yaxes where N is the number of input dimensions. Note that, in case where `diagonal.visible` is false and `showupperhalf` or `showlowerhalf` is false, this splom trace will generate one less x-axis and one less y-axis. yhoverformat Sets the hover text formatting rulefor `y` using d3 formatting mini-languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-3.x-api- reference/blob/master/Formatting.md#d3_format. And for dates see: https://github.com/d3/d3-time- format#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display *09~15~23.46*By default the values are formatted using `yaxis.hoverformat`. """, ), **kwargs )
mit
1,610,731,165,411,505,000
48.55
70
0.54731
false
nakagami/reportlab
src/reportlab/platypus/flowables.py
1
68383
#Copyright ReportLab Europe Ltd. 2000-2012 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/flowables.py __version__=''' $Id: flowables.py 3959 2012-09-27 14:39:39Z robin $ ''' __doc__=""" A flowable is a "floating element" in a document whose exact position is determined by the other elements that precede it, such as a paragraph, a diagram interspersed between paragraphs, a section header, etcetera. Examples of non-flowables include page numbering annotations, headers, footers, fixed diagrams or logos, among others. Flowables are defined here as objects which know how to determine their size and which can draw themselves onto a page with respect to a relative "origin" position determined at a higher level. The object's draw() method should assume that (0,0) corresponds to the bottom left corner of the enclosing rectangle that will contain the object. The attributes vAlign and hAlign may be used by 'packers' as hints as to how the object should be placed. Some Flowables also know how to "split themselves". For example a long paragraph might split itself between one page and the next. Packers should set the canv attribute during wrap, split & draw operations to allow the flowable to work out sizes etc in the proper context. The "text" of a document usually consists mainly of a sequence of flowables which flow into a document from top to bottom (with column and page breaks controlled by higher level components). """ import os from copy import deepcopy, copy from reportlab.lib.colors import red, gray, lightgrey from reportlab.lib.utils import fp_str, isStrType from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY from reportlab.lib.styles import _baseFontName from reportlab.pdfbase import pdfutils from reportlab.pdfbase.pdfmetrics import stringWidth from reportlab.rl_config import _FUZZ, overlapAttachedSpace, ignoreContainerActions __all__=('TraceInfo','Flowable','XBox','Preformatted','Image','Spacer','PageBreak','SlowPageBreak', 'CondPageBreak','KeepTogether','Macro','CallerMacro','ParagraphAndImage', 'FailOnWrap','HRFlowable','PTOContainer','KeepInFrame','UseUpSpace', 'ListFlowable','ListItem','DDIndenter','LIIndenter', 'DocAssign', 'DocExec', 'DocAssert', 'DocPara', 'DocIf', 'DocWhile', ) class TraceInfo: "Holder for info about where an object originated" def __init__(self): self.srcFile = '(unknown)' self.startLineNo = -1 self.startLinePos = -1 self.endLineNo = -1 self.endLinePos = -1 ############################################################# # Flowable Objects - a base class and a few examples. # One is just a box to get some metrics. We also have # a paragraph, an image and a special 'page break' # object which fills the space. ############################################################# class Flowable: """Abstract base class for things to be drawn. Key concepts: 1. It knows its size 2. It draws in its own coordinate system (this requires the base API to provide a translate() function. """ _fixedWidth = 0 #assume wrap results depend on arguments? _fixedHeight = 0 def __init__(self): self.width = 0 self.height = 0 self.wrapped = 0 #these are hints to packers/frames as to how the floable should be positioned self.hAlign = 'LEFT' #CENTER/CENTRE or RIGHT self.vAlign = 'BOTTOM' #MIDDLE or TOP #optional holder for trace info self._traceInfo = None self._showBoundary = None #many flowables handle text and must be processed in the #absence of a canvas. tagging them with their encoding #helps us to get conversions right. Use Python codec names. self.encoding = None def _drawOn(self,canv): '''ensure canv is set on and then draw''' self.canv = canv self.draw()#this is the bit you overload del self.canv def _hAlignAdjust(self,x,sW=0): if sW and hasattr(self,'hAlign'): a = self.hAlign if a in ('CENTER','CENTRE', TA_CENTER): x += 0.5*sW elif a in ('RIGHT',TA_RIGHT): x += sW elif a not in ('LEFT',TA_LEFT): raise ValueError("Bad hAlign value "+str(a)) return x def drawOn(self, canvas, x, y, _sW=0): "Tell it to draw itself on the canvas. Do not override" x = self._hAlignAdjust(x,_sW) canvas.saveState() canvas.translate(x, y) self._drawOn(canvas) if hasattr(self, '_showBoundary') and self._showBoundary: #diagnostic tool support canvas.setStrokeColor(gray) canvas.rect(0,0,self.width, self.height) canvas.restoreState() def wrapOn(self, canv, aW, aH): '''intended for use by packers allows setting the canvas on during the actual wrap''' self.canv = canv w, h = self.wrap(aW,aH) del self.canv return w, h def wrap(self, availWidth, availHeight): """This will be called by the enclosing frame before objects are asked their size, drawn or whatever. It returns the size actually used.""" return (self.width, self.height) def minWidth(self): """This should return the minimum required width""" return getattr(self,'_minWidth',self.width) def splitOn(self, canv, aW, aH): '''intended for use by packers allows setting the canvas on during the actual split''' self.canv = canv S = self.split(aW,aH) del self.canv return S def split(self, availWidth, availheight): """This will be called by more sophisticated frames when wrap fails. Stupid flowables should return []. Clever flowables should split themselves and return a list of flowables. If they decide that nothing useful can be fitted in the available space (e.g. if you have a table and not enough space for the first row), also return []""" return [] def getKeepWithNext(self): """returns boolean determining whether the next flowable should stay with this one""" if hasattr(self,'keepWithNext'): return self.keepWithNext elif hasattr(self,'style') and hasattr(self.style,'keepWithNext'): return self.style.keepWithNext else: return 0 def getSpaceAfter(self): """returns how much space should follow this item if another item follows on the same page.""" if hasattr(self,'spaceAfter'): return self.spaceAfter elif hasattr(self,'style') and hasattr(self.style,'spaceAfter'): return self.style.spaceAfter else: return 0 def getSpaceBefore(self): """returns how much space should precede this item if another item precedess on the same page.""" if hasattr(self,'spaceBefore'): return self.spaceBefore elif hasattr(self,'style') and hasattr(self.style,'spaceBefore'): return self.style.spaceBefore else: return 0 def isIndexing(self): """Hook for IndexingFlowables - things which have cross references""" return 0 def identity(self, maxLen=None): ''' This method should attempt to return a string that can be used to identify a particular flowable uniquely. The result can then be used for debugging and or error printouts ''' if hasattr(self, 'getPlainText'): r = self.getPlainText(identify=1) elif hasattr(self, 'text'): r = str(self.text) else: r = '...' if r and maxLen: r = r[:maxLen] return "<%s at %s%s>%s" % (self.__class__.__name__, hex(id(self)), self._frameName(), r) def _doctemplateAttr(self,a): return getattr(getattr(getattr(self,'canv',None),'_doctemplate',None),a,None) def _frameName(self): f = getattr(self,'_frame',None) if not f: f = self._doctemplateAttr('frame') if f and f.id: return ' frame=%s' % f.id return '' class XBox(Flowable): """Example flowable - a box with an x through it and a caption. This has a known size, so does not need to respond to wrap().""" def __init__(self, width, height, text = 'A Box'): Flowable.__init__(self) self.width = width self.height = height self.text = text def __repr__(self): return "XBox(w=%s, h=%s, t=%s)" % (self.width, self.height, self.text) def draw(self): self.canv.rect(0, 0, self.width, self.height) self.canv.line(0, 0, self.width, self.height) self.canv.line(0, self.height, self.width, 0) #centre the text self.canv.setFont(_baseFontName,12) self.canv.drawCentredString(0.5*self.width, 0.5*self.height, self.text) def _trimEmptyLines(lines): #don't want the first or last to be empty while len(lines) and lines[0].strip() == '': lines = lines[1:] while len(lines) and lines[-1].strip() == '': lines = lines[:-1] return lines def _dedenter(text,dedent=0): ''' tidy up text - carefully, it is probably code. If people want to indent code within a source script, you can supply an arg to dedent and it will chop off that many character, otherwise it leaves left edge intact. ''' lines = text.split('\n') if dedent>0: templines = _trimEmptyLines(lines) lines = [] for line in templines: line = line[dedent:].rstrip() lines.append(line) else: lines = _trimEmptyLines(lines) return lines SPLIT_CHARS = "[{( ,.;:/\\-" def splitLines(lines, maximum_length, split_characters, new_line_characters): if split_characters is None: split_characters = SPLIT_CHARS if new_line_characters is None: new_line_characters = "" # Return a table of lines lines_splitted = [] for line in lines: if len(line) > maximum_length: splitLine(line, lines_splitted, maximum_length, \ split_characters, new_line_characters) else: lines_splitted.append(line) return lines_splitted def splitLine(line_to_split, lines_splitted, maximum_length, \ split_characters, new_line_characters): # Used to implement the characters added #at the beginning of each new line created first_line = True # Check if the text can be splitted while line_to_split and len(line_to_split)>0: # Index of the character where we can split split_index = 0 # Check if the line length still exceeds the maximum length if len(line_to_split) <= maximum_length: # Return the remaining of the line split_index = len(line_to_split) else: # Iterate for each character of the line for line_index in range(maximum_length): # Check if the character is in the list # of allowed characters to split on if line_to_split[line_index] in split_characters: split_index = line_index + 1 # If the end of the line was reached # with no character to split on if split_index==0: split_index = line_index + 1 if first_line: lines_splitted.append(line_to_split[0:split_index]) first_line = False maximum_length -= len(new_line_characters) else: lines_splitted.append(new_line_characters + \ line_to_split[0:split_index]) # Remaining text to split line_to_split = line_to_split[split_index:] class Preformatted(Flowable): """This is like the HTML <PRE> tag. It attempts to display text exactly as you typed it in a fixed width "typewriter" font. By default the line breaks are exactly where you put them, and it will not be wrapped. You can optionally define a maximum line length and the code will be wrapped; and extra characters to be inserted at the beginning of each wrapped line (e.g. '> '). """ def __init__(self, text, style, bulletText = None, dedent=0, maxLineLength=None, splitChars=None, newLineChars=""): """text is the text to display. If dedent is set then common leading space will be chopped off the front (for example if the entire text is indented 6 spaces or more then each line will have 6 spaces removed from the front). """ self.style = style self.bulletText = bulletText self.lines = _dedenter(text,dedent) if text and maxLineLength: self.lines = splitLines( self.lines, maxLineLength, splitChars, newLineChars ) def __repr__(self): bT = self.bulletText H = "Preformatted(" if bT is not None: H = "Preformatted(bulletText=%s," % repr(bT) return "%s'''\\ \n%s''')" % (H, '\n'.join(self.lines)) def wrap(self, availWidth, availHeight): self.width = availWidth self.height = self.style.leading*len(self.lines) return (self.width, self.height) def minWidth(self): style = self.style fontSize = style.fontSize fontName = style.fontName return max([stringWidth(line,fontName,fontSize) for line in self.lines]) def split(self, availWidth, availHeight): #returns two Preformatted objects #not sure why they can be called with a negative height if availHeight < self.style.leading: return [] linesThatFit = int(availHeight * 1.0 / self.style.leading) text1 = '\n'.join(self.lines[0:linesThatFit]) text2 = '\n'.join(self.lines[linesThatFit:]) style = self.style if style.firstLineIndent != 0: style = deepcopy(style) style.firstLineIndent = 0 return [Preformatted(text1, self.style), Preformatted(text2, style)] def draw(self): #call another method for historical reasons. Besides, I #suspect I will be playing with alternate drawing routines #so not doing it here makes it easier to switch. cur_x = self.style.leftIndent cur_y = self.height - self.style.fontSize self.canv.addLiteral('%PreformattedPara') if self.style.textColor: self.canv.setFillColor(self.style.textColor) tx = self.canv.beginText(cur_x, cur_y) #set up the font etc. tx.setFont( self.style.fontName, self.style.fontSize, self.style.leading) for text in self.lines: tx.textLine(text) self.canv.drawText(tx) class Image(Flowable): """an image (digital picture). Formats supported by PIL/Java 1.4 (the Python/Java Imaging Library are supported. At the present time images as flowables are always centered horozontally in the frame. We allow for two kinds of lazyness to allow for many images in a document which could lead to file handle starvation. lazy=1 don't open image until required. lazy=2 open image when required then shut it. """ _fixedWidth = 1 _fixedHeight = 1 def __init__(self, filename, width=None, height=None, kind='direct', mask="auto", lazy=1): """If size to draw at not specified, get it from the image.""" self.hAlign = 'CENTER' self._mask = mask fp = hasattr(filename,'read') if fp: self._file = filename self.filename = repr(filename) else: self._file = self.filename = filename if not fp and os.path.splitext(filename)[1] in ['.jpg', '.JPG', '.jpeg', '.JPEG']: # if it is a JPEG, will be inlined within the file - # but we still need to know its size now from reportlab.lib.utils import open_for_read f = open_for_read(filename, 'b') try: try: info = pdfutils.readJPEGInfo(f) except: #couldn't read as a JPEG, try like normal self._setup(width,height,kind,lazy) return finally: f.close() self.imageWidth = info[0] self.imageHeight = info[1] self._img = None self._setup(width,height,kind,0) elif fp: self._setup(width,height,kind,0) else: self._setup(width,height,kind,lazy) def _setup(self,width,height,kind,lazy): self._lazy = lazy self._width = width self._height = height self._kind = kind if lazy<=0: self._setup_inner() def _setup_inner(self): width = self._width height = self._height kind = self._kind img = self._img if img: self.imageWidth, self.imageHeight = img.getSize() if self._lazy>=2: del self._img if kind in ['direct','absolute']: self.drawWidth = width or self.imageWidth self.drawHeight = height or self.imageHeight elif kind in ['percentage','%']: self.drawWidth = self.imageWidth*width*0.01 self.drawHeight = self.imageHeight*height*0.01 elif kind in ['bound','proportional']: factor = min(float(width)/self.imageWidth,float(height)/self.imageHeight) self.drawWidth = self.imageWidth*factor self.drawHeight = self.imageHeight*factor def _restrictSize(self,aW,aH): if self.drawWidth>aW+_FUZZ or self.drawHeight>aH+_FUZZ: self._oldDrawSize = self.drawWidth, self.drawHeight factor = min(float(aW)/self.drawWidth,float(aH)/self.drawHeight) self.drawWidth *= factor self.drawHeight *= factor return self.drawWidth, self.drawHeight def _unRestrictSize(self): dwh = getattr(self,'_oldDrawSize',None) if dwh: self.drawWidth, self.drawHeight = dwh def __getattr__(self,a): if a=='_img': from reportlab.lib.utils import ImageReader #this may raise an error self._img = ImageReader(self._file) del self._file return self._img elif a in ('drawWidth','drawHeight','imageWidth','imageHeight'): self._setup_inner() return self.__dict__[a] raise AttributeError("<Image @ 0x%x>.%s" % (id(self),a)) def wrap(self, availWidth, availHeight): #the caller may decide it does not fit. return self.drawWidth, self.drawHeight def draw(self): lazy = self._lazy if lazy>=2: self._lazy = 1 self.canv.drawImage( self._img or self.filename, getattr(self,'_offs_x',0), getattr(self,'_offs_y',0), self.drawWidth, self.drawHeight, mask=self._mask, ) if lazy>=2: self._img = None self._lazy = lazy def identity(self,maxLen=None): r = Flowable.identity(self,maxLen) if r[-4:]=='>...' and isStrType(self.filename): r = "%s filename=%s>" % (r[:-4],self.filename) return r class NullDraw(Flowable): def draw(self): pass class Spacer(NullDraw): """A spacer just takes up space and doesn't draw anything - it guarantees a gap between objects.""" _fixedWidth = 1 _fixedHeight = 1 def __init__(self, width, height, isGlue=False): self.width = width if isGlue: self.height = 1e-4 self.spacebefore = height self.height = height def __repr__(self): return "%s(%s, %s)" % (self.__class__.__name__,self.width, self.height) class UseUpSpace(NullDraw): def __init__(self): pass def __repr__(self): return "%s()" % self.__class__.__name__ def wrap(self, availWidth, availHeight): self.width = availWidth self.height = availHeight return (availWidth,availHeight-1e-8) #step back a point class PageBreak(UseUpSpace): """Move on to the next page in the document. This works by consuming all remaining space in the frame!""" class SlowPageBreak(PageBreak): pass class CondPageBreak(Spacer): """use up a frame if not enough vertical space effectively CondFrameBreak""" def __init__(self, height): self.height = height def __repr__(self): return "CondPageBreak(%s)" %(self.height,) def wrap(self, availWidth, availHeight): if availHeight<self.height: f = self._doctemplateAttr('frame') if not f: return availWidth, availHeight from reportlab.platypus.doctemplate import FrameBreak f.add_generated_content(FrameBreak) return 0, 0 def identity(self,maxLen=None): return repr(self).replace(')',',frame=%s)'%self._frameName()) def _listWrapOn(F,availWidth,canv,mergeSpace=1,obj=None,dims=None): '''return max width, required height for a list of flowables F''' doct = getattr(canv,'_doctemplate',None) cframe = getattr(doct,'frame',None) if cframe: from reportlab.platypus.doctemplate import _addGeneratedContent doct_frame = cframe cframe = doct.frame = deepcopy(doct_frame) cframe._generated_content = None del cframe._generated_content try: W = 0 H = 0 pS = 0 atTop = 1 F = F[:] while F: f = F.pop(0) if hasattr(f,'frameAction'): continue w,h = f.wrapOn(canv,availWidth,0xfffffff) if dims is not None: dims.append((w,h)) if cframe: _addGeneratedContent(F,cframe) if w<=_FUZZ or h<=_FUZZ: continue W = max(W,w) H += h if not atTop: h = f.getSpaceBefore() if mergeSpace: h = max(h-pS,0) H += h else: if obj is not None: obj._spaceBefore = f.getSpaceBefore() atTop = 0 pS = f.getSpaceAfter() H += pS if obj is not None: obj._spaceAfter = pS return W, H-pS finally: if cframe: doct.frame = doct_frame def _flowableSublist(V): "if it isn't a list or tuple, wrap it in a list" if not isinstance(V,(list,tuple)): V = V is not None and [V] or [] from reportlab.platypus.doctemplate import LCActionFlowable assert not [x for x in V if isinstance(x,LCActionFlowable)],'LCActionFlowables not allowed in sublists' return V class _ContainerSpace: #Abstract some common container like behaviour def getSpaceBefore(self): for c in self._content: if not hasattr(c,'frameAction'): return c.getSpaceBefore() return 0 def getSpaceAfter(self,content=None): #this needs 2.4 #for c in reversed(content or self._content): reverseContent = (content or self._content)[:] reverseContent.reverse() for c in reverseContent: if not hasattr(c,'frameAction'): return c.getSpaceAfter() return 0 class KeepTogether(_ContainerSpace,Flowable): def __init__(self,flowables,maxHeight=None): self._content = _flowableSublist(flowables) self._maxHeight = maxHeight def __repr__(self): f = self._content L = map(repr,f) L = "\n"+"\n".join(L) L = L.replace("\n", "\n ") return "%s(%s,maxHeight=%s)" % (self.__class__.__name__,L,self._maxHeight) def wrap(self, aW, aH): dims = [] W,H = _listWrapOn(self._content,aW,self.canv,dims=dims) self._H = H self._H0 = dims and dims[0][1] or 0 self._wrapInfo = aW,aH return W, 0xffffff # force a split def split(self, aW, aH): if getattr(self,'_wrapInfo',None)!=(aW,aH): self.wrap(aW,aH) S = self._content[:] atTop = getattr(self,'_frame',None) if atTop: atTop = getattr(atTop,'_atTop',None) C0 = self._H>aH and (not self._maxHeight or aH>self._maxHeight) C1 = (self._H0>aH) or C0 and atTop if C0 or C1: if C0: from reportlab.platypus.doctemplate import FrameBreak A = FrameBreak else: from reportlab.platypus.doctemplate import NullActionFlowable A = NullActionFlowable S.insert(0,A()) return S def identity(self, maxLen=None): msg = "<%s at %s%s> containing :%s" % (self.__class__.__name__,hex(id(self)),self._frameName(),"\n".join([f.identity() for f in self._content])) if maxLen: return msg[0:maxLen] else: return msg class Macro(Flowable): """This is not actually drawn (i.e. it has zero height) but is executed when it would fit in the frame. Allows direct access to the canvas through the object 'canvas'""" def __init__(self, command): self.command = command def __repr__(self): return "Macro(%s)" % repr(self.command) def wrap(self, availWidth, availHeight): return (0,0) def draw(self): exec(self.command) in globals(), {'canvas':self.canv} class CallerMacro(Flowable): ''' like Macro, but with callable command(s) drawCallable(self) wrapCallable(self,aW,aH) ''' def __init__(self, drawCallable=None, wrapCallable=None): _ = lambda *args: None self._drawCallable = drawCallable or _ self._wrapCallable = wrapCallable or _ def __repr__(self): return "CallerMacro(%s)" % repr(self.command) def wrap(self, aW, aH): self._wrapCallable(self,aW,aH) return (0,0) def draw(self): self._drawCallable(self) class ParagraphAndImage(Flowable): '''combine a Paragraph and an Image''' def __init__(self,P,I,xpad=3,ypad=3,side='right'): self.P = P self.I = I self.xpad = xpad self.ypad = ypad self._side = side def getSpaceBefore(self): return max(self.P.getSpaceBefore(),self.I.getSpaceBefore()) def getSpaceAfter(self): return max(self.P.getSpaceAfter(),self.I.getSpaceAfter()) def wrap(self,availWidth,availHeight): wI, hI = self.I.wrap(availWidth,availHeight) self.wI = wI self.hI = hI # work out widths array for breaking self.width = availWidth P = self.P style = P.style xpad = self.xpad ypad = self.ypad leading = style.leading leftIndent = style.leftIndent later_widths = availWidth - leftIndent - style.rightIndent intermediate_widths = later_widths - xpad - wI first_line_width = intermediate_widths - style.firstLineIndent P.width = 0 nIW = int((hI+ypad)/(leading*1.0)) P.blPara = P.breakLines([first_line_width] + nIW*[intermediate_widths]+[later_widths]) if self._side=='left': self._offsets = [wI+xpad]*(1+nIW)+[0] P.height = len(P.blPara.lines)*leading self.height = max(hI,P.height) return (self.width, self.height) def split(self,availWidth, availHeight): P, wI, hI, ypad = self.P, self.wI, self.hI, self.ypad if hI+ypad>availHeight or len(P.frags)<=0: return [] S = P.split(availWidth,availHeight) if not S: return S P = self.P = S[0] del S[0] style = P.style P.height = len(self.P.blPara.lines)*style.leading self.height = max(hI,P.height) return [self]+S def draw(self): canv = self.canv if self._side=='left': self.I.drawOn(canv,0,self.height-self.hI) self.P._offsets = self._offsets try: self.P.drawOn(canv,0,0) finally: del self.P._offsets else: self.I.drawOn(canv,self.width-self.wI-self.xpad,self.height-self.hI) self.P.drawOn(canv,0,0) class FailOnWrap(NullDraw): def wrap(self, availWidth, availHeight): raise ValueError("FailOnWrap flowable wrapped and failing as ordered!") class FailOnDraw(Flowable): def wrap(self, availWidth, availHeight): return 0,0 def draw(self): raise ValueError("FailOnDraw flowable drawn, and failing as ordered!") class HRFlowable(Flowable): '''Like the hr tag''' def __init__(self, width="80%", thickness=1, lineCap='round', color=lightgrey, spaceBefore=1, spaceAfter=1, hAlign='CENTER', vAlign='BOTTOM', dash=None): Flowable.__init__(self) self.width = width self.lineWidth = thickness self.lineCap=lineCap self.spaceBefore = spaceBefore self.spaceAfter = spaceAfter self.color = color self.hAlign = hAlign self.vAlign = vAlign self.dash = dash def __repr__(self): return "HRFlowable(width=%s, height=%s)" % (self.width, self.height) def wrap(self, availWidth, availHeight): w = self.width if type(w) is type(''): w = w.strip() if w.endswith('%'): w = availWidth*float(w[:-1])*0.01 else: w = float(w) w = min(w,availWidth) self._width = w return w, self.lineWidth def draw(self): canv = self.canv canv.saveState() canv.setLineWidth(self.lineWidth) canv.setLineCap({'butt':0,'round':1, 'square': 2}[self.lineCap.lower()]) canv.setStrokeColor(self.color) if self.dash: canv.setDash(self.dash) canv.line(0, 0, self._width, self.height) canv.restoreState() class _PTOInfo: def __init__(self,trailer,header): self.trailer = _flowableSublist(trailer) self.header = _flowableSublist(header) def cdeepcopy(obj): if hasattr(obj,'deepcopy'): return obj.deepcopy() else: return deepcopy(obj) class _Container(_ContainerSpace): #Abstract some common container like behaviour def drawOn(self, canv, x, y, _sW=0, scale=1.0, content=None, aW=None): '''we simulate being added to a frame''' from reportlab.platypus.doctemplate import ActionFlowable pS = 0 if aW is None: aW = self.width aW *= scale if content is None: content = self._content x = self._hAlignAdjust(x,_sW*scale) y += self.height*scale for c in content: if not ignoreContainerActions and isinstance(c,ActionFlowable): c.apply(self.canv._doctemplate) continue w, h = c.wrapOn(canv,aW,0xfffffff) if (w<_FUZZ or h<_FUZZ) and not getattr(c,'_ZEROSIZE',None): continue if c is not content[0]: h += max(c.getSpaceBefore()-pS,0) y -= h c.drawOn(canv,x,y,_sW=aW-w) if c is not content[-1]: pS = c.getSpaceAfter() y -= pS def copyContent(self,content=None): C = [].append for c in (content or self._content): C(cdeepcopy(c)) self._content = C.__self__ class PTOContainer(_Container,Flowable): '''PTOContainer(contentList,trailerList,headerList) A container for flowables decorated with trailer & header lists. If the split operation would be called then the trailer and header lists are injected before and after the split. This allows specialist "please turn over" and "continued from previous" like behaviours.''' def __init__(self,content,trailer=None,header=None): I = _PTOInfo(trailer,header) self._content = C = [] for _ in _flowableSublist(content): if isinstance(_,PTOContainer): C.extend(_._content) else: C.append(_) if not hasattr(_,'_ptoinfo'): _._ptoinfo = I def wrap(self,availWidth,availHeight): self.width, self.height = _listWrapOn(self._content,availWidth,self.canv) return self.width,self.height def split(self, availWidth, availHeight): if availHeight<0: return [] canv = self.canv C = self._content x = i = H = pS = hx = 0 n = len(C) I2W = {} for x in range(n): c = C[x] I = c._ptoinfo if I not in I2W.keys(): T = I.trailer Hdr = I.header tW, tH = _listWrapOn(T, availWidth, self.canv) if len(T): #trailer may have no content tSB = T[0].getSpaceBefore() else: tSB = 0 I2W[I] = T,tW,tH,tSB else: T,tW,tH,tSB = I2W[I] _, h = c.wrapOn(canv,availWidth,0xfffffff) if x: hx = max(c.getSpaceBefore()-pS,0) h += hx pS = c.getSpaceAfter() H += h+pS tHS = tH+max(tSB,pS) if H+tHS>=availHeight-_FUZZ: break i += 1 #first retract last thing we tried H -= (h+pS) #attempt a sub split on the last one we have aH = (availHeight-H-tHS-hx)*0.99999 if aH>=0.05*availHeight: SS = c.splitOn(canv,availWidth,aH) else: SS = [] if not SS: j = i while i>1 and C[i-1].getKeepWithNext(): i -= 1 C[i].keepWithNext = 0 if i==1 and C[0].getKeepWithNext(): #robin's black sheep i = j C[0].keepWithNext = 0 F = [UseUpSpace()] if len(SS)>1: R1 = C[:i] + SS[:1] + T + F R2 = Hdr + SS[1:]+C[i+1:] elif not i: return [] else: R1 = C[:i]+T+F R2 = Hdr + C[i:] T = R1 + [PTOContainer(R2,[copy(x) for x in I.trailer],[copy(x) for x in I.header])] return T #utility functions used by KeepInFrame def _hmodel(s0,s1,h0,h1): # calculate the parameters in the model # h = a/s**2 + b/s a11 = 1./s0**2 a12 = 1./s0 a21 = 1./s1**2 a22 = 1./s1 det = a11*a22-a12*a21 b11 = a22/det b12 = -a12/det b21 = -a21/det b22 = a11/det a = b11*h0+b12*h1 b = b21*h0+b22*h1 return a,b def _qsolve(h,ab): '''solve the model v = a/s**2 + b/s for an s which gives us v==h''' a,b = ab if abs(a)<=_FUZZ: return b/h t = 0.5*b/a from math import sqrt f = -h/a r = t*t-f if r<0: return None r = sqrt(r) if t>=0: s1 = -t - r else: s1 = -t + r s2 = f/s1 return max(1./s1, 1./s2) class KeepInFrame(_Container,Flowable): def __init__(self, maxWidth, maxHeight, content=[], mergeSpace=1, mode='shrink', name='',hAlign='LEFT',vAlign='BOTTOM'): '''mode describes the action to take when overflowing error raise an error in the normal way continue ignore ie just draw it and report maxWidth, maxHeight shrink shrinkToFit truncate fit as much as possible ''' self.name = name self.maxWidth = maxWidth self.maxHeight = maxHeight self.mode = mode assert mode in ('error','overflow','shrink','truncate'), '%s invalid mode value %s' % (self.identity(),mode) assert maxHeight>=0, '%s invalid maxHeight value %s' % (self.identity(),maxHeight) if mergeSpace is None: mergeSpace = overlapAttachedSpace self.mergespace = mergeSpace self._content = content or [] self.vAlign = vAlign self.hAlign = hAlign def _getAvailableWidth(self): return self.maxWidth - self._leftExtraIndent - self._rightExtraIndent def identity(self, maxLen=None): return "<%s at %s%s%s> size=%sx%s" % (self.__class__.__name__, hex(id(self)), self._frameName(), getattr(self,'name','') and (' name="%s"'% getattr(self,'name','')) or '', getattr(self,'maxWidth','') and (' maxWidth=%s'%fp_str(getattr(self,'maxWidth',0))) or '', getattr(self,'maxHeight','')and (' maxHeight=%s' % fp_str(getattr(self,'maxHeight')))or '') def wrap(self,availWidth,availHeight): from reportlab.platypus.doctemplate import LayoutError mode = self.mode maxWidth = float(min(self.maxWidth or availWidth,availWidth)) maxHeight = float(min(self.maxHeight or availHeight,availHeight)) W, H = _listWrapOn(self._content,maxWidth,self.canv) if (mode=='error' and (W>maxWidth+_FUZZ or H>maxHeight+_FUZZ)): ident = 'content %sx%s too large for %s' % (W,H,self.identity(30)) #leave to keep apart from the raise raise LayoutError(ident) elif W<=maxWidth+_FUZZ and H<=maxHeight+_FUZZ: self.width = W-_FUZZ #we take what we get self.height = H-_FUZZ elif mode in ('overflow','truncate'): #we lie self.width = min(maxWidth,W)-_FUZZ self.height = min(maxHeight,H)-_FUZZ else: def func(x): W, H = _listWrapOn(self._content,x*maxWidth,self.canv) W /= x H /= x return W, H W0 = W H0 = H s0 = 1 if W>maxWidth+_FUZZ: #squeeze out the excess width and or Height s1 = W/maxWidth W, H = func(s1) if H<=maxHeight+_FUZZ: self.width = W-_FUZZ self.height = H-_FUZZ self._scale = s1 return W,H s0 = s1 H0 = H W0 = W s1 = H/maxHeight W, H = func(s1) self.width = W-_FUZZ self.height = H-_FUZZ self._scale = s1 if H<min(0.95*maxHeight,maxHeight-10) or H>=maxHeight+_FUZZ: #the standard case W should be OK, H is short we want #to find the smallest s with H<=maxHeight H1 = H for f in 0, 0.01, 0.05, 0.10, 0.15: #apply the quadratic model s = _qsolve(maxHeight*(1-f),_hmodel(s0,s1,H0,H1)) W, H = func(s) if H<=maxHeight+_FUZZ and W<=maxWidth+_FUZZ: self.width = W-_FUZZ self.height = H-_FUZZ self._scale = s break return self.width, self.height def drawOn(self, canv, x, y, _sW=0): scale = getattr(self,'_scale',1.0) truncate = self.mode=='truncate' ss = scale!=1.0 or truncate if ss: canv.saveState() if truncate: p = canv.beginPath() p.rect(x, y, self.width,self.height) canv.clipPath(p,stroke=0) else: canv.translate(x,y) x=y=0 canv.scale(1.0/scale, 1.0/scale) _Container.drawOn(self, canv, x, y, _sW=_sW, scale=scale) if ss: canv.restoreState() class ImageAndFlowables(_Container,Flowable): '''combine a list of flowables and an Image''' def __init__(self,I,F,imageLeftPadding=0,imageRightPadding=3,imageTopPadding=0,imageBottomPadding=3, imageSide='right', imageHref=None): self._content = _flowableSublist(F) self._I = I self._irpad = imageRightPadding self._ilpad = imageLeftPadding self._ibpad = imageBottomPadding self._itpad = imageTopPadding self._side = imageSide self.imageHref = imageHref def deepcopy(self): c = copy(self) #shallow self._reset() c.copyContent() #partially deep? return c def getSpaceAfter(self): if hasattr(self,'_C1'): C = self._C1 elif hasattr(self,'_C0'): C = self._C0 else: C = self._content return _Container.getSpaceAfter(self,C) def getSpaceBefore(self): return max(self._I.getSpaceBefore(),_Container.getSpaceBefore(self)) def _reset(self): for a in ('_wrapArgs','_C0','_C1'): try: delattr(self,a) except: pass def wrap(self,availWidth,availHeight): canv = self.canv I = self._I if hasattr(self,'_wrapArgs'): if self._wrapArgs==(availWidth,availHeight) and getattr(I,'_oldDrawSize',None) is None: return self.width,self.height self._reset() I._unRestrictSize() self._wrapArgs = availWidth, availHeight I.wrap(availWidth,availHeight) wI, hI = I._restrictSize(availWidth,availHeight) self._wI = wI self._hI = hI ilpad = self._ilpad irpad = self._irpad ibpad = self._ibpad itpad = self._itpad self._iW = availWidth - irpad - wI - ilpad aH = itpad + hI + ibpad W,H0,self._C0,self._C1 = self._findSplit(canv,self._iW,aH) if W>self._iW+_FUZZ: self._C0 = [] self._C1 = self._content aH = self._aH = max(aH,H0) self.width = availWidth if not self._C1: self.height = aH else: W1,H1 = _listWrapOn(self._C1,availWidth,canv) self.height = aH+H1 return self.width, self.height def split(self,availWidth, availHeight): if hasattr(self,'_wrapArgs'): I = self._I if self._wrapArgs!=(availWidth,availHeight) or getattr(I,'_oldDrawSize',None) is not None: self._reset() I._unRestrictSize() W,H=self.wrap(availWidth,availHeight) if self._aH>availHeight: return [] C1 = self._C1 if C1: S = C1[0].split(availWidth,availHeight-self._aH) if not S: _C1 = [] else: _C1 = [S[0]] C1 = S[1:]+C1[1:] else: _C1 = [] return [ImageAndFlowables( self._I, self._C0+_C1, imageLeftPadding=self._ilpad, imageRightPadding=self._irpad, imageTopPadding=self._itpad, imageBottomPadding=self._ibpad, imageSide=self._side, imageHref=self.imageHref) ]+C1 def drawOn(self, canv, x, y, _sW=0): if self._side=='left': Ix = x + self._ilpad Fx = Ix+ self._irpad + self._wI else: Ix = x + self.width-self._wI-self._irpad Fx = x self._I.drawOn(canv,Ix,y+self.height-self._itpad-self._hI) if self.imageHref: canv.linkURL(self.imageHref, (Ix, y+self.height-self._itpad-self._hI, Ix + self._wI, y+self.height), relative=1) if self._C0: _Container.drawOn(self, canv, Fx, y, content=self._C0, aW=self._iW) if self._C1: _Container.drawOn(self, canv, x, y-self._aH,content=self._C1) def _findSplit(self,canv,availWidth,availHeight,mergeSpace=1,obj=None): '''return max width, required height for a list of flowables F''' W = 0 H = 0 pS = sB = 0 atTop = 1 F = self._content for i,f in enumerate(F): w,h = f.wrapOn(canv,availWidth,0xfffffff) if w<=_FUZZ or h<=_FUZZ: continue W = max(W,w) if not atTop: s = f.getSpaceBefore() if mergeSpace: s = max(s-pS,0) H += s else: if obj is not None: obj._spaceBefore = f.getSpaceBefore() atTop = 0 if H>=availHeight or w>availWidth: return W, availHeight, F[:i],F[i:] H += h if H>availHeight: from reportlab.platypus.paragraph import Paragraph aH = availHeight-(H-h) if isinstance(f,(Paragraph,Preformatted)): leading = f.style.leading nH = leading*int(aH/float(leading))+_FUZZ if nH<aH: nH += leading availHeight += nH-aH aH = nH S = cdeepcopy(f).splitOn(canv,availWidth,aH) if not S: return W, availHeight, F[:i],F[i:] else: return W,availHeight,F[:i]+S[:1],S[1:]+F[i+1:] pS = f.getSpaceAfter() H += pS if obj is not None: obj._spaceAfter = pS return W, H-pS, F, [] class AnchorFlowable(Spacer): '''create a bookmark in the pdf''' _ZEROSIZE=1 def __init__(self,name): Spacer.__init__(self,0,0) self._name = name def __repr__(self): return "%s(%s)" % (self.__class__.__name__,self._name) def wrap(self,aW,aH): return 0,0 def draw(self): self.canv.bookmarkHorizontal(self._name,0,0) class FrameSplitter(NullDraw): '''When encountered this flowable should either switch directly to nextTemplate if remaining space in the current frame is less than gap+required or it should temporarily modify the current template to have the frames from nextTemplate that are listed in nextFrames and switch to the first of those frames. ''' _ZEROSIZE=1 def __init__(self,nextTemplate,nextFrames=[],gap=10,required=72): self.nextTemplate=nextTemplate self.nextFrames=nextFrames or [] self.gap=gap self.required=required def wrap(self,aW,aH): frame = self._frame from reportlab.platypus.doctemplate import NextPageTemplate,CurrentFrameFlowable,LayoutError G=[NextPageTemplate(self.nextTemplate)] if aH<self.gap+self.required-_FUZZ: #we are going straight to the nextTemplate with no attempt to modify the frames G.append(PageBreak()) else: #we are going to modify the incoming templates templates = self._doctemplateAttr('pageTemplates') if templates is None: raise LayoutError('%s called in non-doctemplate environment'%self.identity()) T=[t for t in templates if t.id==self.nextTemplate] if not T: raise LayoutError('%s.nextTemplate=%s not found' % (self.identity(),self.nextTemplate)) T=T[0] F=[f for f in T.frames if f.id in self.nextFrames] N=[f.id for f in F] N=[f for f in self.nextFrames if f not in N] if N: raise LayoutError('%s frames=%r not found in pageTemplate(%s)\n%r has frames %r' % (self.identity(),N,T.id,T,[f.id for f in T.frames])) T=self._doctemplateAttr('pageTemplate') def unwrap(canv,doc,T=T,onPage=T.onPage,oldFrames=T.frames): T.frames=oldFrames T.onPage=onPage onPage(canv,doc) T.onPage=unwrap h=aH-self.gap for i,f in enumerate(F): f=copy(f) f.height=h f._reset() F[i]=f T.frames=F G.append(CurrentFrameFlowable(F[0].id)) frame.add_generated_content(*G) return 0,0 from reportlab.lib.sequencer import _type2formatter _bulletNames = dict( circle=u'\u25cf', square=u'\u25a0', disc=u'\u25cf', diamond=u'\u25c6', rarrowhead=u'\u27a4', ) def _bulletFormat(value,type='1',format=None): if type=='bullet': s = _bulletNames.get(value,value) else: s = _type2formatter[type](int(value)) if format: if isinstance(format,basestring): s = format % s elif callable(format): s = format(s) else: raise ValueError('unexpected BulletDrawer format %r' % format) return s class BulletDrawer: def __init__(self, value='0', bulletAlign='left', bulletType='1', bulletColor='black', bulletFontName='Helvetica', bulletFontSize=12, bulletOffsetY=0, bulletDedent=0, bulletDir='ltr', bulletFormat=None, ): self.value = value self._bulletAlign = bulletAlign self._bulletType = bulletType self._bulletColor = bulletColor self._bulletFontName = bulletFontName self._bulletFontSize = bulletFontSize self._bulletOffsetY = bulletOffsetY self._bulletDedent = bulletDedent self._bulletDir = bulletDir self._bulletFormat = bulletFormat def drawOn(self,indenter,canv,x,y,_sW=0): value = self.value if not value: return canv.saveState() canv.translate(x, y) y = indenter.height-self._bulletFontSize+self._bulletOffsetY if self._bulletDir=='rtl': x = indenter.width - indenter._rightIndent + self._bulletDedent else: x = indenter._leftIndent - self._bulletDedent canv.setFont(self._bulletFontName,self._bulletFontSize) canv.setFillColor(self._bulletColor) bulletAlign = self._bulletAlign value = _bulletFormat(value,self._bulletType,self._bulletFormat) if bulletAlign=='left': canv.drawString(x,y,value) elif bulletAlign=='right': canv.drawRightString(x,y,value) elif bulletAlign in ('center','centre'): canv.drawCentredString(x,y,value) elif bulletAlign.startswith('numeric') or bulletAlign.startswith('decimal'): pc = bulletAlign[7:].strip() or '.' canv.drawAlignedString(x,y,value,pc) else: raise ValueError('Invalid bulletAlign: %r' % bulletAlign) canv.restoreState() def _computeBulletWidth(b,value): value = _bulletFormat(value,b._bulletType,b._bulletFormat) return stringWidth(value,b._bulletFontName,b._bulletFontSize) class DDIndenter(Flowable): _IndenterAttrs = '_flowable _leftIndent _rightIndent width height'.split() def __init__(self,flowable,leftIndent=0,rightIndent=0): self._flowable = flowable self._leftIndent = leftIndent self._rightIndent = rightIndent self.width = None self.height = None def split(self, aW, aH): S = self._flowable.split(aW-self._leftIndent-self._rightIndent, aH) return [ DDIndenter(s, leftIndent=self._leftIndent, rightIndent=self._rightIndent, ) for s in S ] def drawOn(self, canv, x, y, _sW=0): self._flowable.drawOn(canv,x+self._leftIndent,y,max(0,_sW-self._leftIndent-self._rightIndent)) def wrap(self, aW, aH): w,h = self._flowable.wrap(aW-self._leftIndent-self._rightIndent, aH) self.width = w+self._leftIndent+self._rightIndent self.height = h return self.width,h def __getattr__(self,a): if a in self._IndenterAttrs: try: return self.__dict__[a] except KeyError: if a not in ('spaceBefore','spaceAfter'): raise return getattr(self._flowable,a) def __setattr__(self,a,v): if a in self._IndenterAttrs: self.__dict__[a] = v else: setattr(self._flowable,a,v) def __delattr__(self,a): if a in self._IndenterAttrs: del self.__dict__[a] else: delattr(self._flowable,a) def identity(self,maxLen=None): return '%s containing %s' % (self.__class__.__name__,self._flowable.identity(maxLen)) class LIIndenter(DDIndenter): _IndenterAttrs = '_flowable _bullet _leftIndent _rightIndent width height spaceBefore spaceAfter'.split() def __init__(self,flowable,leftIndent=0,rightIndent=0,bullet=None, spaceBefore=None, spaceAfter=None): self._flowable = flowable self._bullet = bullet self._leftIndent = leftIndent self._rightIndent = rightIndent self.width = None self.height = None if spaceBefore is not None: self.spaceBefore = spaceBefore if spaceAfter is not None: self.spaceAfter = spaceAfter def split(self, aW, aH): S = self._flowable.split(aW-self._leftIndent-self._rightIndent, aH) return [ LIIndenter(s, leftIndent=self._leftIndent, rightIndent=self._rightIndent, bullet = (s is S[0] and self._bullet or None), ) for s in S ] def drawOn(self, canv, x, y, _sW=0): if self._bullet: self._bullet.drawOn(self,canv,x,y,0) self._flowable.drawOn(canv,x+self._leftIndent,y,max(0,_sW-self._leftIndent-self._rightIndent)) from reportlab.lib.styles import ListStyle class ListItem: def __init__(self, flowables, #the initial flowables style=None, #leftIndent=18, #rightIndent=0, #spaceBefore=None, #spaceAfter=None, #bulletType='1', #bulletColor='black', #bulletFontName='Helvetica', #bulletFontSize=12, #bulletOffsetY=0, #bulletDedent='auto', #bulletDir='ltr', #bulletFormat=None, **kwds ): if not isinstance(flowables,(list,tuple)): flowables = (flowables,) self._flowables = flowables params = self._params = {} if style: if not isinstance(style,ListStyle): raise ValueError('%s style argument (%r) not a ListStyle' % (self.__class__.__name__,style)) self._style = style for k in ListStyle.defaults: if k in kwds: v = kwds.get(k) elif style: v = getattr(style,k) else: continue params[k] = v for k in ('value', 'spaceBefore','spaceAfter'): v = kwds.get(k,getattr(style,k,None)) if v is not None: params[k] = v class _LIParams: def __init__(self,flowable,params,value,first): self.flowable = flowable self.params = params self.value = value self.first= first class ListFlowable(_Container,Flowable): def __init__(self, flowables, #the initial flowables start=1, style=None, #leftIndent=18, #rightIndent=0, #spaceBefore=None, #spaceAfter=None, #bulletType='1', #bulletColor='black', #bulletFontName='Helvetica', #bulletFontSize=12, #bulletOffsetY=0, #bulletDedent='auto', #bulletDir='ltr', #bulletFormat=None, **kwds ): self._flowables = flowables if style: if not isinstance(style,ListStyle): raise ValueError('%s style argument not a ListStyle' % self.__class__.__name__) self.style = style for k,v in ListStyle.defaults.items(): setattr(self,'_'+k,kwds.get(k,getattr(style,k,v))) if start is None: start = getattr(self,'_start',None) if start is None: if getattr(self,'_bulletType','1')=='bullet': start = 'circle' else: start = '1' self._start = start for k in ('spaceBefore','spaceAfter'): v = kwds.get(k,getattr(style,k,None)) if v is not None: setattr(self,k,v) self._content = self._getContent() del self._flowables self._dims = None def wrap(self,aW,aH): if self._dims!=aW: self.width, self.height = _listWrapOn(self._content,aW,self.canv) self._dims = aW return self.width,self.height def split(self,aW,aH): return self._content def _flowablesIter(self): for f in self._flowables: if isinstance(f,(list,tuple)): if f: for i, z in enumerate(f): yield i==0 and not isinstance(z,LIIndenter), z elif isinstance(f,ListItem): params = f._params if not params: #meerkat simples just a list like object for i, z in enumerate(f._flowables): if isinstance(z,LIIndenter): raise ValueError('LIIndenter not allowed in ListItem') yield i==0, z else: params = params.copy() value = params.pop('value',None) spaceBefore = params.pop('spaceBefore',None) spaceAfter = params.pop('spaceAfter',None) n = len(f._flowables) - 1 for i, z in enumerate(f._flowables): P = params.copy() if not i and spaceBefore is not None: P['spaceBefore'] = spaceBefore if i==n and spaceAfter is not None: P['spaceAfter'] = spaceAfter if i: value=None yield 0, _LIParams(z,P,value,i==0) else: yield not isinstance(f,LIIndenter), f def _makeLIIndenter(self,flowable, bullet, params=None): if params: leftIndent = params.get('leftIndent',self._leftIndent) rightIndent = params.get('rightIndent',self._rightIndent) spaceBefore = params.get('spaceBefore',None) spaceAfter = params.get('spaceAfter',None) return LIIndenter(flowable,leftIndent,rightIndent,bullet,spaceBefore=spaceBefore,spaceAfter=spaceAfter) else: return LIIndenter(flowable,self._leftIndent,self._rightIndent,bullet) def _makeBullet(self,value,params=None): if params is None: def getp(a): return getattr(self,'_'+a) else: style = getattr(params,'style',None) def getp(a): if a in params: return params[a] if style and a in style.__dict__: return getattr(self,a) return getattr(self,'_'+a) return BulletDrawer( value=value, bulletAlign=getp('bulletAlign'), bulletType=getp('bulletType'), bulletColor=getp('bulletColor'), bulletFontName=getp('bulletFontName'), bulletFontSize=getp('bulletFontSize'), bulletOffsetY=getp('bulletOffsetY'), bulletDedent=getp('calcBulletDedent'), bulletDir=getp('bulletDir'), bulletFormat=getp('bulletFormat'), ) def _getContent(self): value = self._start bt = self._bulletType inc = int(bt in '1aAiI') if inc: value = int(value) bd = self._bulletDedent if bd=='auto': align = self._bulletAlign dir = self._bulletDir if dir=='ltr' and align=='left': bd = self._leftIndent elif align=='right': bd = self._rightIndent else: #we need to work out the maximum width of any of the labels tvalue = value maxW = 0 for d,f in self._flowablesIter(): if d: maxW = max(maxW,_computeBulletWidth(self,tvalue)) if inc: tvalue += inc elif isinstance(f,LIIndenter): b = f._bullet if b: if b.bulletType==bt: maxW = max(maxW,_computeBulletWidth(b,b.value)) tvalue = int(b.value) else: maxW = max(maxW,_computeBulletWidth(self,tvalue)) if inc: tvalue += inc if dir=='ltr': if align=='right': bd = self._leftIndent - maxW else: bd = self._leftIndent - maxW*0.5 elif align=='left': bd = self._rightIndent - maxW else: bd = self._rightIndent - maxW*0.5 self._calcBulletDedent = bd S = [] aS = S.append i=0 for d,f in self._flowablesIter(): fparams = {} if not i: i += 1 spaceBefore = getattr(self,'spaceBefore',None) if spaceBefore is not None: fparams['spaceBefore'] = spaceBefore if d: aS(self._makeLIIndenter(f,bullet=self._makeBullet(value),params=fparams)) if inc: value += inc elif isinstance(f,LIIndenter): b = f._bullet if b: if b.bulletType!=bt: raise ValueError('Included LIIndenter bulletType=%s != OrderedList bulletType=%s' % (b.bulletType,bt)) value = int(b.value) else: f._bullet = self._makeBullet(value,params=getattr(f,'params',None)) if fparams: f.__dict__['spaceBefore'] = max(f.__dict__.get('spaceBefore',0),spaceBefore) aS(f) if inc: value += inc elif isinstance(f,_LIParams): fparams.update(f.params) z = self._makeLIIndenter(f.flowable,bullet=None,params=fparams) if f.first: if f.value is not None: value = f.value if inc: value = int(value) z._bullet = self._makeBullet(value,f.params) if inc: value += inc aS(z) else: aS(self._makeLIIndenter(f,bullet=None,params=fparams)) spaceAfter = getattr(self,'spaceAfter',None) if spaceAfter is not None: f=S[-1] f.__dict__['spaceAfter'] = max(f.__dict__.get('spaceAfter',0),spaceAfter) return S class TopPadder(Flowable): '''wrap a single flowable so that its first bit will be padded to fill out the space so that it appears at the bottom of its frame''' def __init__(self,f): self.__dict__['_TopPadder__f'] = f def wrap(self,aW,aH): w,h = self.__f.wrap(aW,aH) self.__dict__['_TopPadder__dh'] = aH-h return w,h def split(self,aW,aH): S = self.__f.split(aW,aH) if len(S)>1: S[0] = TopPadder(S[0]) return S def drawOn(self, canvas, x, y, _sW=0): self.__f.drawOn(canvas,x,y-max(0,self.__dh-1e-8),_sW) def __setattr__(self,a,v): setattr(self.__f,a,v) def __getattr__(self,a): return getattr(self.__f,a) def __delattr__(self,a): delattr(self.__f,a) class DocAssign(NullDraw): '''At wrap time this flowable evaluates var=expr in the doctemplate namespace''' _ZEROSIZE=1 def __init__(self,var,expr,life='forever'): Flowable.__init__(self) self.args = var,expr,life def funcWrap(self,aW,aH): NS=self._doctemplateAttr('_nameSpace') NS.update(dict(availableWidth=aW,availableHeight=aH)) try: return self.func() finally: for k in 'availableWidth','availableHeight': try: del NS[k] except: pass def func(self): return self._doctemplateAttr('d'+self.__class__.__name__[1:])(*self.args) def wrap(self,aW,aH): self.funcWrap(aW,aH) return 0,0 class DocExec(DocAssign): '''at wrap time exec stmt in doc._nameSpace''' def __init__(self,stmt,lifetime='forever'): Flowable.__init__(self) self.args=stmt,lifetime class DocPara(DocAssign): '''at wrap time create a paragraph with the value of expr as text if format is specified it should use %(__expr__)s for string interpolation of the expression expr (if any). It may also use %(name)s interpolations for other variables in the namespace. suitable defaults will be used if style and klass are None ''' def __init__(self,expr,format=None,style=None,klass=None,escape=True): Flowable.__init__(self) self.expr=expr self.format=format self.style=style self.klass=klass self.escape=escape def func(self): expr = self.expr if expr: if not isStrType(expr): expr = str(expr) return self._doctemplateAttr('docEval')(expr) def add_content(self,*args): self._doctemplateAttr('frame').add_generated_content(*args) def get_value(self,aW,aH): value = self.funcWrap(aW,aH) if self.format: NS=self._doctemplateAttr('_nameSpace').copy() NS.update(dict(availableWidth=aW,availableHeight=aH)) NS['__expr__'] = value value = self.format % NS else: value = str(value) return value def wrap(self,aW,aH): value = self.get_value(aW,aH) P = self.klass if not P: from reportlab.platypus.paragraph import Paragraph as P style = self.style if not style: from reportlab.lib.styles import getSampleStyleSheet style=getSampleStyleSheet()['Code'] if self.escape: from xml.sax.saxutils import escape value=escape(value) self.add_content(P(value,style=style)) return 0,0 class DocAssert(DocPara): def __init__(self,cond,format=None): Flowable.__init__(self) self.expr=cond self.format=format def funcWrap(self,aW,aH): self._cond = DocPara.funcWrap(self,aW,aH) return self._cond def wrap(self,aW,aH): value = self.get_value(aW,aH) if not bool(self._cond): raise AssertionError(value) return 0,0 class DocIf(DocPara): def __init__(self,cond,thenBlock,elseBlock=[]): Flowable.__init__(self) self.expr = cond self.blocks = elseBlock or [],thenBlock def checkBlock(self,block): if not isinstance(block,(list,tuple)): block = (block,) return block def wrap(self,aW,aH): self.add_content(*self.checkBlock(self.blocks[int(bool(self.funcWrap(aW,aH)))])) return 0,0 class DocWhile(DocIf): def __init__(self,cond,whileBlock): Flowable.__init__(self) self.expr = cond self.block = self.checkBlock(whileBlock) def wrap(self,aW,aH): if bool(self.funcWrap(aW,aH)): self.add_content(*(list(self.block)+[self])) return 0,0
bsd-3-clause
-4,955,683,830,492,444,000
35.490395
152
0.553047
false
catharsis/gerrit-notify
gerrit_notify/main.py
1
2887
#coding=utf-8 from gerrit_notify import GerritNotify from gi.repository import GObject, Gtk from os.path import expanduser import subprocess from ConfigParser import SafeConfigParser class TrayiconPlugin (GObject.Object): notify = None def do_activate (self, notify): self.notify = notify self.staticon = Gtk.StatusIcon.new_from_file(self.notify.icon) self.staticon.connect ("activate", self.trayicon_activate) self.staticon.connect ("popup_menu", self.trayicon_popup) self.staticon.set_tooltip_markup("<span font_size='small'>Gerrit Notifier</span>") self.staticon.set_visible (True) def trayicon_activate (self, widget, data = None): pass def trayicon_quit (self, widget, data = None): Gtk.main_quit() def populate_menuitem_submenu(self, menuitem, changes): menu = Gtk.Menu () menuitem.set_submenu(menu) if len(changes) == 0: menuitem.set_sensitive(False) return for c in changes: menutitem_change = Gtk.MenuItem (str(c)) menutitem_change.connect("activate", self.change_clicked, c) menu.append(menutitem_change) def new_change_menuitem(self, name, populator_func): new_menuitem = Gtk.MenuItem(name) self.populate_menuitem_submenu(new_menuitem, populator_func()) return new_menuitem def change_clicked(self, widget, change = None): subprocess.Popen(["xdg-open", change.permalink()]) def trayicon_popup (self, widget, button, time, data = None): self.menu = Gtk.Menu () menuitem_quit = Gtk.MenuItem ("Quit") self.menu.append(self.new_change_menuitem("Incoming changes", self.notify.incoming_changes)) self.menu.append(self.new_change_menuitem("Outgoing changes", self.notify.outgoing_changes)) self.menu.append(self.new_change_menuitem("Open changes", self.notify.open_changes)) menuitem_quit.connect ("activate", self.trayicon_quit) self.menu.append (menuitem_quit) self.menu.show_all () self.menu.popup(None, None, lambda w,x: self.staticon.position_menu(self.menu, self.staticon), self.staticon, 3, time) def do_deactivate (self): self.staticon.set_visible (False) del self.staticon def main(): # add a 'Settings' menu item to edit these values default_config = { 'url': 'http://update-your-settings.net', 'username': None, 'password': None } config = SafeConfigParser(default_config) p = expanduser('~/.config/gerrit-notify/config') config.readfp(open(p)) notify = GerritNotify( config.get('server', 'url'), config.get('server', 'username'), config.get('server', 'password') ) TrayiconPlugin().do_activate(notify) Gtk.main()
bsd-2-clause
5,062,877,376,304,711,000
34.641975
126
0.641496
false
stoilov/Programming101
week3/HackBulgariaAPI/team_matcher.py
1
2715
import requests import random class MatchCourse: def __init__(self): self.url = "https://hackbulgaria.com/api/students/" self.records = [] self.courses = None def get_info(self): self.records = requests.get(self.url, verify=False) if self.records.status_code != 200: return False self.records = self.records.json() return self.records def print_messages(self): print("\nHello, you can use one the following commands") print("list_courses - this lists all the courses that are available now.") print("match_teams <course_id>, <team_size>, <group_time>\n\n") def list_courses(self): if self.records is False: return False self.courses = set() for record in self.records: for course in record["courses"]: self.courses.add(course["name"]) self.courses = list(self.courses) for key, course in enumerate(self.courses): print("[{}] {}".format(key + 1, course)) def match_teams(self, course_id, team_size, group_time): people_in_teams = [] for record in self.records: for course in record["courses"]: course_group = course["group"] == group_time course_name = course["name"] == self.courses[course_id - 1] available = record["available"] is True if course_name and course_group and available: people_in_teams.append(record["name"]) random.shuffle(people_in_teams) for key, student in enumerate(people_in_teams): print(student) if (key + 1) % team_size == 0: print("==========") def get_input(self): command = input("Enter command> ") command = command.split(" ") return command def interface(self): command = self.get_input() while command[0] != "exit": if command[0] == "list_courses": self.list_courses() command = self.get_input() elif command[0] == "match_teams": command[1] = int(command[1]) command[2] = int(command[2]) command[3] = int(command[3]) self.match_teams(command[1], command[2], command[3]) command = self.get_input() else: print("Bad input!") command = self.get_input() else: print("Goodbye!") def main(): hackbulgaria = MatchCourse() hackbulgaria.get_info() hackbulgaria.print_messages() hackbulgaria.interface() if __name__ == "__main__": main()
mit
-4,352,641,921,128,700,400
30.569767
82
0.539963
false
PnX-SI/GeoNature
backend/geonature/utils/module.py
1
4711
import os import sys from pathlib import Path from importlib import import_module from pkg_resources import load_entry_point, get_entry_info, iter_entry_points from geonature.utils.utilstoml import load_and_validate_toml from geonature.utils.config_schema import ManifestSchemaProdConf from geonature.utils.env import GN_EXTERNAL_MODULE from geonature.core.gn_commons.models import TModules class NoManifestFound(Exception): pass def import_legacy_module(module_object): sys.path.insert(0, str(GN_EXTERNAL_MODULE)) # to be able to import non-packaged modules try: # module dist is module_code.lower() because the symlink is created like this # in utils.gn_module_import.copy_in_external_mods module_dist = module_object.module_code.lower() module_dir = GN_EXTERNAL_MODULE / module_dist manifest_path = module_dir / 'manifest.toml' if not manifest_path.is_file(): raise NoManifestFound() module_manifest = load_and_validate_toml(manifest_path, ManifestSchemaProdConf) module_blueprint = import_module(f'{module_dist}.backend.blueprint').blueprint module_config = { 'ID_MODULE': module_object.id_module, 'MODULE_CODE': module_object.module_code, 'MODULE_URL': '/' + module_object.module_path.replace(' ', ''), 'FRONTEND_PATH': str(module_dir / 'frontend'), } module_schema = import_module(f'{module_object.module_code.lower()}.config.conf_schema_toml').GnModuleSchemaConf config_path = module_dir / "config/conf_gn_module.toml" module_config.update(load_and_validate_toml(config_path, module_schema)) module_blueprint.config = module_config return module_config, module_blueprint finally: sys.path.pop(0) def import_packaged_module(module_dist, module_object): module_code = module_object.module_code module_dir = GN_EXTERNAL_MODULE / module_object.module_path frontend_path = os.environ.get(f'GEONATURE_{module_code}_FRONTEND_PATH', str(module_dir / 'frontend')) module_config = { 'MODULE_CODE': module_code, 'MODULE_URL': '/' + module_object.module_path, 'FRONTEND_PATH': frontend_path, } module_schema = load_entry_point(module_dist, 'gn_module', 'config_schema') config_path = os.environ.get(f'GEONATURE_{module_object.module_code}_CONFIG_FILE') if not config_path: # fallback to legacy conf path guessing config_path = str(module_dir / 'config/conf_gn_module.toml') module_config.update(load_and_validate_toml(config_path, module_schema)) blueprint_entry_point = get_entry_info(module_dist, 'gn_module', 'blueprint') if blueprint_entry_point: module_blueprint = blueprint_entry_point.load() module_blueprint.config = module_config else: module_blueprint = None return (module_object, module_config, module_blueprint) def get_dist_from_code(module_code): for entry_point in iter_entry_points('gn_module', 'code'): if module_code == entry_point.load(): return entry_point.dist def import_gn_module(module_object): """ return (module_object, module_config, module_blueprint) module_blueprint may be None in case of front-only module """ # try to find a packaged module with the given code module_dist = get_dist_from_code(module_object.module_code) if module_dist: return import_packaged_module(module_dist, module_object) else: module_config, module_blueprint = import_legacy_module(module_object) return (module_object, module_config, module_blueprint) def import_backend_enabled_modules(): """ yield (module_object, module_config, module_blueprint) for backend-enabled modules in gn_commons.t_modules """ enabled_modules = TModules.query.filter_by(active_backend=True).all() for module_object in enabled_modules: # ignore internal module (i.e. without symlink in external module directory) if not Path(GN_EXTERNAL_MODULE / module_object.module_code.lower()).exists(): continue yield import_gn_module(module_object) def list_frontend_enabled_modules(): """ yield module_config for frontend-enabled modules in gn_commons.t_modules """ enabled_modules = TModules.query.filter_by(active_frontend=True).all() for module_object in enabled_modules: # ignore internal module (i.e. without symlink in external module directory) if not Path(GN_EXTERNAL_MODULE / module_object.module_code.lower()).exists(): continue yield module_object
gpl-3.0
4,306,982,387,413,549,600
40.690265
120
0.680959
false
alexanderfefelov/nav
python/nav/eventengine/topology.py
1
7788
# # Copyright (C) 2012 UNINETT # # This file is part of Network Administration Visualized (NAV). # # NAV is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License version 2 as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. You should have received a copy of the GNU General Public License # along with NAV. If not, see <http://www.gnu.org/licenses/>. # """Topology evaluation functions for event processing""" import socket import datetime import networkx from networkx.exception import NetworkXException from nav.models.manage import SwPortVlan, Netbox, Prefix, Arp, Cam import logging _logger = logging.getLogger(__name__) def netbox_appears_reachable(netbox): """Returns True if netbox appears to be reachable through the known topology. """ target_path = get_path_to_netbox(netbox) nav = NAVServer.make_for(netbox.ip) nav_path = get_path_to_netbox(nav) if nav else True _logger.debug("reachability paths, target_path=%(target_path)r, " "nav_path=%(nav_path)r", locals()) return bool(target_path and nav_path) def get_path_to_netbox(netbox): """Returns a likely path from netbox to its apparent gateway/router. If any switches on the path, or the router itself is down, no current path exists and a False value is returned. However, if there is insufficient information for NAV to find a likely path, a True value is returned. """ prefix = netbox.get_prefix() if not prefix: _logger.warning("couldn't find prefix for %s", netbox) return True router_ports = prefix.get_router_ports() if router_ports: router_port = router_ports[0] else: _logger.warning("couldn't find router ports for %s", prefix) return True router = router_port.interface.netbox _logger.debug("reachability check for %s on %s (router: %s)", netbox, prefix, router) graph = get_graph_for_vlan(prefix.vlan) try: netbox.add_to_graph(graph) except AttributeError: pass strip_down_nodes_from_graph(graph, keep=netbox) if netbox not in graph or router not in graph: if router.up == router.UP_UP: _logger.warning("%(netbox)s topology problem: router %(router)s " "is up, but not in VLAN graph for %(prefix)r. " "Defaulting to 'reachable' status.", locals()) return True _logger.debug("%s not reachable, router or box not in graph: %r", netbox, graph.edges()) return False try: path = networkx.shortest_path(graph, netbox, router) except NetworkXException as error: _logger.debug("an internal networkx exception was raised in " "shortest_path, assuming no path was found: %s", error) path = [] else: _logger.debug("path to %s: %r", netbox, path) return path def get_graph_for_vlan(vlan): """Builds a simple topology graph of the active netboxes in vlan. Any netbox that seems to be down at the moment will not be included in the graph. :returns: A networkx.Graph object. """ swpvlan = SwPortVlan.objects.filter(vlan=vlan).select_related( 'interface', 'interface__netbox', 'interface__to_netbox', 'interface__to_interface') graph = networkx.MultiGraph(name='graph for vlan %s' % vlan) for swp in swpvlan: source = swp.interface.netbox source_ifc = swp.interface target = swp.interface.to_netbox target_ifc = swp.interface.to_interface if target: key = tuple(sorted( (source_ifc.id, target_ifc.id if target_ifc else None))) data = set([source_ifc, target_ifc]) graph.add_edge(source, target, key=key, data=data) return graph def strip_down_nodes_from_graph(graph, keep=None): """Strips all nodes (netboxes) from graph that are currently down. :param keep: A node to keep regardless of its current status. """ removable = set(node for node in graph.nodes_iter() if node.up != node.UP_UP and node != keep) graph.remove_nodes_from(removable) return len(removable) def strip_down_links_from_graph(graph): """Strips all edges (links) from graph where any of the involved interfaces are down. """ def _is_down(data): ifcs = data.get('data', []) return any(ifc and ifc.ifoperstatus == ifc.OPER_DOWN for ifc in ifcs) removable = set( (u, v, key) for u, v, key, data in graph.edges_iter(data=True, keys=True) if _is_down(data) ) graph.remove_edges_from(removable) return len(removable) ### ### Functions for locating the NAV server itself ### class NAVServer(object): """A simple mockup of a Netbox representing the NAV server itself""" UP_UP = Netbox.UP_UP @classmethod def make_for(cls, dest): """Creates a NAVServer instance with the source IP address of the local host used for routing traffic to dest. :param dest: An IP address """ ipaddr = get_source_address_for(dest) if ipaddr: return cls(ipaddr) def __init__(self, ip): self.sysname = "NAV" self.ip = ip self.up = Netbox.UP_UP def get_prefix(self): """Gets the prefix for the NAV servers ip""" matches = Prefix.objects.contains_ip(self.ip) if matches: return matches[0] def add_to_graph(self, graph): """Adds edge between myself and all neighboring switches""" for switch in self.get_switches_from_cam(): graph.add_edge(self, switch) def get_switches_from_cam(self): """Gets all neighboring switches""" mac = self.get_mac_from_arp() if mac: records = Cam.objects.filter( mac=mac, end_time__gte=datetime.datetime.max ).select_related('netbox') return list(set(cam.netbox for cam in records)) else: return [] def get_mac_from_arp(self): """Finds the NAV server's MAC address based on its IP address""" arp = Arp.objects.extra( where=['ip = %s'], params=[self.ip] ).filter(end_time__gte=datetime.datetime.max) if arp: return arp[0].mac def __repr__(self): return "{self.__class__.__name__}({self.ip!r})".format(self=self) def get_source_address_for(dest): """Gets the source IP address used by this host when attempting to contact the destination host. :param dest: An IP address string. :return: And IP address string, or None if no address was found. """ family, sockaddr = _get_target_dgram_addr(dest) sock = socket.socket(family, socket.SOCK_DGRAM) try: sock.connect(sockaddr) except socket.error, err: _logger.warning("Error when getting NAV's source address for " "connecting to %(dest)s: %(err)s", locals()) return addrinfo = sock.getsockname() sock.close() return addrinfo[0] def _get_target_dgram_addr(target): """Returns a (family, sockaddr) tuple for the target address for a SOCK_DGRAM socket type. """ for (family, socktype, _proto, _canonname, sockaddr) in socket.getaddrinfo(target, 1): if socktype == socket.SOCK_DGRAM: return family, sockaddr
gpl-2.0
4,639,183,928,962,233,000
31.45
79
0.626605
false
MikeLaptev/sandbox_python
mera/unittest_example/generate_and_load_unittest_update_four.py
1
4101
''' Created on Jul 30, 2015 @author: Mikhail ''' import unittest import re from json_file_generator import MyOwnJSONProcessing as json_processing from json_file_generator import __version__ as json_file_generator_version from unittest.case import skip, skipIf class GenerateAndLoadJSONTestUpdateFour(unittest.TestCase): expected_data = {} @classmethod def setUpClass(cls): print "{} for {} has been called".format(cls.setUpClass.__name__, cls.__name__) cls.expected_data = json_processing.generate_data_for_json_obj() def setUp(self): print "{} for {} has been called".format(self.setUp.__name__, self._testMethodName) self.file_name = "generate_and_load_unittest.json" self.original_name = json_processing.generate_json_file_with_data(self.file_name, self.expected_data) def tearDown(self): print "{} for {} has been called".format(self.tearDown.__name__, self._testMethodName) @classmethod def tearDownClass(cls): print "{} for {} has been called".format(cls.tearDownClass.__name__, cls.__name__) json_processing.clean_up() def testGenerateAndLoadJSONValidKeys(self): print "Processing file {}".format(self.original_name) actual_data = json_processing.load_data_from_json_file(self.original_name) for exp_key in self.expected_data.keys(): self.assertTrue(actual_data.has_key(exp_key), "Expected key '{}' has not been found in loaded json".format(exp_key)) for act_key in actual_data.keys(): self.assertTrue(self.expected_data.has_key(act_key), "Loaded key '{}' has not been found in dumped json".format(act_key)) # General version of skip @skip("old functionality") def testGenerateAndLoadJSONValidKeysHasOnlyLetters1(self): print "Processing file {}".format(self.original_name) actual_data = json_processing.load_data_from_json_file(self.original_name) for act_key in actual_data.keys(): self.assertTrue(re.match("[^a-zA-Z]", act_key) is None, "Key should contains only alpha symbols: {}".format(act_key)) # Version of skip that check version of our json_file_generator @skipIf(json_file_generator_version > 1, "This functionality is not supported in this version on the json file generator") def testGenerateAndLoadJSONValidKeysHasOnlyLetters2(self): print "Processing file {}".format(self.original_name) actual_data = json_processing.load_data_from_json_file(self.original_name) for act_key in actual_data.keys(): self.assertIsNone(re.match("[^a-zA-Z]", act_key), "Key should contains only alpha symbols: {}".format(act_key)) def testGenerateAndLoadJSONValidValues(self): print "Processing file {}".format(self.original_name) actual_data = json_processing.load_data_from_json_file(self.original_name) for exp_key, exp_value in self.expected_data.items(): self.assertEquals(exp_value, actual_data.get(exp_key), "Dictionaries have different values '{}' for first and '{}' for second for the same key".format(exp_value, actual_data.get(exp_key))) for act_key, act_value in actual_data.items(): self.assertEquals(act_value, self.expected_data.get(act_key), "Dictionaries have different values '{}' for first and '{}' for second for the same key".format(act_value, self.expected_data.get(act_key))) def testGenerateAndLoadJSONForInvalidFile(self): """ This test checks that valid exception will be raised if required file will not be found """ invalid_name = "invalid_" + self.original_name print "Processing file {}".format(invalid_name) with self.assertRaises(IOError) as io_exception: # attempt to read file that doesn't exist json_processing.load_data_from_json_file(invalid_name) self.assertEqual(io_exception.exception.errno, 2) self.assertEqual(io_exception.exception.strerror, 'No such file or directory') if __name__ == "__main__": unittest.main(verbosity=2)
apache-2.0
-1,147,400,482,822,408,300
50.275
214
0.683248
false
AutorestCI/azure-sdk-for-python
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/express_route_circuits_routes_table_list_result.py
1
1252
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ExpressRouteCircuitsRoutesTableListResult(Model): """Response for ListRoutesTable associated with the Express Route Circuits API. :param value: The list of routes table. :type value: list[~azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitRoutesTable] :param next_link: The URL to get the next set of results. :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTable]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__(self, value=None, next_link=None): super(ExpressRouteCircuitsRoutesTableListResult, self).__init__() self.value = value self.next_link = next_link
mit
-8,715,857,573,506,023,000
35.823529
80
0.610224
false
appleseedhq/cortex
python/IECoreScene/RemovePrimitiveVariables.py
5
2937
########################################################################## # # Copyright (c) 2007-2010, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## from fnmatch import fnmatchcase import IECore import IECoreScene class RemovePrimitiveVariables( IECoreScene.PrimitiveOp ) : def __init__( self ) : IECoreScene.PrimitiveOp.__init__( self, "Removes variables from primitives" ) self.parameters().addParameters( [ IECore.StringParameter( name = "mode", description = """This chooses whether or not the names parameter specifies the names of variables to keep or the names of variables to remove.""", defaultValue = "remove", presets = ( ( "keep", "keep" ), ( "remove", "remove" ) ), presetsOnly = True ), IECore.StringVectorParameter( name = "names", description = "The names of variables. These can include * or ? characters to match many names.", defaultValue = IECore.StringVectorData() ) ] ) def modifyPrimitive( self, primitive, args ) : keep = args["mode"].value == "keep" for key in primitive.keys() : for n in args["names"] : m = fnmatchcase( key, n ) if (m and not keep) or (not m and keep) : del primitive[key] IECore.registerRunTimeTyped( RemovePrimitiveVariables )
bsd-3-clause
5,771,326,003,735,331,000
36.177215
102
0.677562
false
eJRF/ejrf
questionnaire/migrations/0002_copy_question_text_to_export_label.py
1
25463
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): def forwards(self, orm): "Write your forwards methods here." for question in orm.question.objects.filter(export_label=''): question.export_label = question.text question.save() def backwards(self, orm): "Write your backwards methods here." models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'questionnaire.answer': { 'Meta': {'object_name': 'Answer'}, 'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}), 'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']", 'null': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.Question']"}), 'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.Questionnaire']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "'Draft'", 'max_length': '15'}), 'version': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}) }, 'questionnaire.answergroup': { 'Meta': {'object_name': 'AnswerGroup'}, 'answer': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'answergroup'", 'null': 'True', 'to': "orm['questionnaire.Answer']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'grouped_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answer_groups'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'row': ('django.db.models.fields.CharField', [], {'max_length': '6'}) }, 'questionnaire.comment': { 'Meta': {'object_name': 'Comment'}, 'answer_group': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'comments'", 'symmetrical': 'False', 'to': "orm['questionnaire.AnswerGroup']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, 'questionnaire.country': { 'Meta': {'object_name': 'Country'}, 'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'regions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'countries'", 'null': 'True', 'to': "orm['questionnaire.Region']"}) }, 'questionnaire.countryquestionnairesubmission': { 'Meta': {'object_name': 'CountryQuestionnaireSubmission'}, 'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': "orm['questionnaire.Country']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': "orm['questionnaire.Questionnaire']"}), 'version': ('django.db.models.fields.IntegerField', [], {'default': '1'}) }, 'questionnaire.dateanswer': { 'Meta': {'object_name': 'DateAnswer', '_ormbases': ['questionnaire.Answer']}, u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}), 'response': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}) }, 'questionnaire.multichoiceanswer': { 'Meta': {'object_name': 'MultiChoiceAnswer', '_ormbases': ['questionnaire.Answer']}, u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}), 'response': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answer'", 'null': 'True', 'to': "orm['questionnaire.QuestionOption']"}) }, 'questionnaire.multipleresponseanswer': { 'Meta': {'object_name': 'MultipleResponseAnswer', '_ormbases': ['questionnaire.Answer']}, u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}), 'response': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'answers'", 'null': 'True', 'to': "orm['questionnaire.QuestionOption']"}) }, 'questionnaire.numericalanswer': { 'Meta': {'object_name': 'NumericalAnswer', '_ormbases': ['questionnaire.Answer']}, u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}), 'response': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}) }, 'questionnaire.organization': { 'Meta': {'object_name': 'Organization'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}) }, 'questionnaire.question': { 'Meta': {'object_name': 'Question'}, 'UID': ('django.db.models.fields.CharField', [], {'max_length': '6'}), 'answer_sub_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'export_label': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instructions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'is_primary': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child'", 'null': 'True', 'to': "orm['questionnaire.Question']"}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'null': 'True', 'to': "orm['questionnaire.Region']"}), 'text': ('django.db.models.fields.TextField', [], {}), 'theme': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'null': 'True', 'to': "orm['questionnaire.Theme']"}) }, 'questionnaire.questiongroup': { 'Meta': {'ordering': "('order',)", 'object_name': 'QuestionGroup'}, 'allow_multiples': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'display_all': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'grid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'hybrid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instructions': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'is_core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}), 'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_group'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"}), 'question': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'question_group'", 'symmetrical': 'False', 'to': "orm['questionnaire.Question']"}), 'subsection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_group'", 'to': "orm['questionnaire.SubSection']"}) }, 'questionnaire.questiongrouporder': { 'Meta': {'ordering': "('order',)", 'unique_together': "(('order', 'question_group', 'question'),)", 'object_name': 'QuestionGroupOrder'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'order': ('django.db.models.fields.PositiveIntegerField', [], {}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'orders'", 'to': "orm['questionnaire.Question']"}), 'question_group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'orders'", 'null': 'True', 'to': "orm['questionnaire.QuestionGroup']"}) }, 'questionnaire.questionnaire': { 'Meta': {'object_name': 'Questionnaire'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['questionnaire.Questionnaire']"}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'questionnaire'", 'null': 'True', 'to': "orm['questionnaire.Region']"}), 'status': ('model_utils.fields.StatusField', [], {'default': "'draft'", 'max_length': '100', u'no_check_for_status': 'True'}), 'year': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}) }, 'questionnaire.questionoption': { 'Meta': {'ordering': "('modified',)", 'object_name': 'QuestionOption'}, 'UID': ('django.db.models.fields.CharField', [], {'max_length': '6', 'unique': 'True', 'null': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instructions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['questionnaire.Question']"}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'questionnaire.region': { 'Meta': {'object_name': 'Region'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'regions'", 'null': 'True', 'to': "orm['questionnaire.Organization']"}) }, 'questionnaire.section': { 'Meta': {'ordering': "('order',)", 'object_name': 'Section'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['questionnaire.Questionnaire']"}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'null': 'True', 'to': "orm['questionnaire.Region']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'questionnaire.skipquestion': { 'Meta': {'object_name': 'SkipQuestion', '_ormbases': ['questionnaire.SkipRule']}, 'skip_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skip_rules'", 'to': "orm['questionnaire.Question']"}), u'skiprule_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.SkipRule']", 'unique': 'True', 'primary_key': 'True'}) }, 'questionnaire.skiprule': { 'Meta': {'object_name': 'SkipRule'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'skip_rules'", 'null': 'True', 'to': "orm['questionnaire.Region']"}), 'response': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skip_rules'", 'to': "orm['questionnaire.QuestionOption']"}), 'root_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'root_skip_rules'", 'to': "orm['questionnaire.Question']"}), 'subsection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skip_rules'", 'to': "orm['questionnaire.SubSection']"}) }, 'questionnaire.skipsubsection': { 'Meta': {'object_name': 'SkipSubsection', '_ormbases': ['questionnaire.SkipRule']}, 'skip_subsection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.SubSection']"}), u'skiprule_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.SkipRule']", 'unique': 'True', 'primary_key': 'True'}) }, 'questionnaire.subsection': { 'Meta': {'ordering': "('order',)", 'object_name': 'SubSection'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_sections'", 'null': 'True', 'to': "orm['questionnaire.Region']"}), 'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_sections'", 'to': "orm['questionnaire.Section']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}) }, 'questionnaire.supportdocument': { 'Meta': {'object_name': 'SupportDocument'}, 'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'path': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'support_documents'", 'to': "orm['questionnaire.Questionnaire']"}) }, 'questionnaire.textanswer': { 'Meta': {'object_name': 'TextAnswer', '_ormbases': ['questionnaire.Answer']}, u'answer_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['questionnaire.Answer']", 'unique': 'True', 'primary_key': 'True'}), 'response': ('django.db.models.fields.TextField', [], {'null': 'True'}) }, 'questionnaire.theme': { 'Meta': {'object_name': 'Theme'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'themes'", 'null': 'True', 'to': "orm['questionnaire.Region']"}) }, 'questionnaire.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Country']", 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Organization']", 'null': 'True', 'blank': 'True'}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['questionnaire.Region']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_profile'", 'unique': 'True', 'to': u"orm['auth.User']"}) } } complete_apps = ['questionnaire'] symmetrical = True
bsd-3-clause
-352,401,073,382,339,500
88.031469
195
0.571535
false
ishidawataru/gobgp
tools/pyang_plugins/bgpyang2golang.py
1
24971
# Copyright (C) 2014,2015 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import optparse import StringIO import sys from pyang import plugin from collections import namedtuple _COPYRIGHT_NOTICE = """ // DO NOT EDIT // generated by pyang using OpenConfig https://github.com/openconfig/public // // Copyright (C) 2014-2016 Nippon Telegraph and Telephone Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. // See the License for the specific language governing permissions and // limitations under the License. """ emitted_type_names = {} EQUAL_TYPE_LEAF = 0 EQUAL_TYPE_ARRAY = 1 EQUAL_TYPE_MAP = 2 EQUAL_TYPE_CONTAINER = 3 def pyang_plugin_init(): plugin.register_plugin(GolangPlugin()) class GolangPlugin(plugin.PyangPlugin): def add_output_format(self, fmts): self.multiple_modules = True fmts['golang'] = self def emit(self, ctx, modules, fd): ctx.golang_identity_map = {} ctx.golang_typedef_map = {} ctx.golang_struct_def = [] ctx.golang_struct_names = {} ctx.prefix_rel = {} ctx.module_deps = [] for m in modules: check_module_deps(ctx, m) # visit yang statements visit_modules(ctx) # emit bgp_configs emit_go(ctx) def visit_modules(ctx): # visit typedef and identity for module in ctx.module_deps: visit_typedef(ctx, module) visit_identity(ctx, module) # visit container for module in ctx.module_deps: visit_children(ctx, module, module.i_children) def emit_go(ctx): ctx.golang_struct_def.reverse() done = set() # emit generate_header(ctx) generate_common_functions(ctx) for mod in ctx.module_deps: if mod not in _module_excluded: emit_typedef(ctx, mod) emit_identity(ctx, mod) for struct in ctx.golang_struct_def: struct_name = struct.uniq_name if struct_name in done: continue emit_class_def(ctx, struct, struct_name, struct.module_prefix) done.add(struct_name) def check_module_deps(ctx, module): own_prefix = module.i_prefix for k, v in module.i_prefixes.items(): mod = ctx.get_module(v[0]) if mod.i_prefix != own_prefix: check_module_deps(ctx, mod) ctx.prefix_rel[mod.i_prefix] = k if mod not in ctx.module_deps \ and mod.i_modulename not in _module_excluded: ctx.module_deps.append(mod) def dig_leafref(type_obj): reftype = type_obj.i_type_spec.i_target_node.search_one('type') if is_leafref(reftype): return dig_leafref(reftype) else: return reftype def emit_class_def(ctx, yang_statement, struct_name, prefix): o = StringIO.StringIO() if len(yang_statement.i_children) == 1 and is_list(yang_statement.i_children[0]): return print >> o, '//struct for container %s:%s' % (prefix, yang_statement.arg) print >> o, 'type %s struct {' % convert_to_golang(struct_name) equal_elems = [] for child in yang_statement.i_children: if child.path in _path_exclude: continue container_or_list_name = child.uniq_name val_name_go = convert_to_golang(child.arg) child_prefix = get_orig_prefix(child.i_orig_module) tag_name = child.uniq_name.lower() equal_type = EQUAL_TYPE_LEAF equal_data = None print >> o, ' // original -> %s:%s' % \ (child_prefix, container_or_list_name) # case leaf if is_leaf(child): type_obj = child.search_one('type') type_name = type_obj.arg # case identityref if type_name == 'identityref': emit_type_name = convert_to_golang(type_obj.search_one('base').arg.split(':')[-1]) # case leafref elif type_name == 'leafref': if type_obj.search_one('path').arg.startswith('../config'): continue t = dig_leafref(type_obj) if is_translation_required(t): print >> o, ' //%s:%s\'s original type is %s' \ % (child_prefix, container_or_list_name, t.arg) emit_type_name = translate_type(t.arg) elif is_identityref(t): emit_type_name = convert_to_golang(t.search_one('base').arg.split(':')[-1]) else: emit_type_name = t.arg # case embeded enumeration elif type_name == 'enumeration': emit_type_name = val_name_go # case translation required elif is_translation_required(type_obj): print >> o, ' //%s:%s\'s original type is %s'\ % (child_prefix, container_or_list_name, type_name) emit_type_name = translate_type(type_name) # case other primitives elif is_builtin_type(type_obj): emit_type_name = type_name # default else: base_module = type_obj.i_orig_module.i_prefix t = lookup_typedef(ctx, base_module, type_name) # print(t) emit_type_name = t.golang_name # case 'case' if is_case(child): continue if is_choice(child) and is_enum_choice(child): emit_type_name = val_name_go # case leaflist if is_leaflist(child): type_obj = child.search_one('type') type_name = type_obj.arg val_name_go = val_name_go + 'List' tag_name += '-list' equal_type = EQUAL_TYPE_ARRAY # case leafref if type_name == 'leafref': t = type_obj.i_type_spec.i_target_node.search_one('type') emit_type_name = '[]'+t.arg elif type_name == 'identityref': emit_type_name = '[]'+convert_to_golang(type_obj.search_one('base').arg.split(':')[-1]) # case translation required elif is_translation_required(type_obj): print >> o, ' // original type is list of %s' % (type_obj.arg) emit_type_name = '[]'+translate_type(type_name) # case other primitives elif is_builtin_type(type_obj): emit_type_name = '[]'+type_name # default else: base_module = type_obj.i_orig_module.i_prefix t = lookup_typedef(ctx, base_module, type_name) emit_type_name = '[]'+t.golang_name # case container elif is_container(child) or (is_choice(child) and not is_enum_choice(child)): key = child_prefix+':'+container_or_list_name t = ctx.golang_struct_names[key] val_name_go = t.golang_name if len(t.i_children) == 1 and is_list(t.i_children[0]): l = t.i_children[0] emit_type_name = '[]' + l.golang_name equal_type = EQUAL_TYPE_MAP equal_data = l.search_one('key').arg leaf = l.search_one('leaf').search_one('type') if leaf.arg == 'leafref' and leaf.search_one('path').arg.startswith('../config'): equal_data = 'config.' + equal_data else: emit_type_name = t.golang_name equal_type = EQUAL_TYPE_CONTAINER # case list elif is_list(child): key = child_prefix+':'+container_or_list_name t = ctx.golang_struct_names[key] val_name_go = val_name_go + 'List' tag_name += '-list' emit_type_name = '[]' + t.golang_name equal_type = EQUAL_TYPE_MAP equal_data = child.search_one('key').arg if is_container(child): name = emit_type_name if name.startswith(convert_to_golang(struct_name)) and name.endswith("Config"): tag_name = 'config' val_name_go = 'Config' elif name.startswith(convert_to_golang(struct_name)) and name.endswith("State"): tag_name = 'state' val_name_go = 'State' print >> o, ' {0}\t{1} `mapstructure:"{2}" json:"{2},omitempty"`'.format(val_name_go, emit_type_name, tag_name) equal_elems.append((val_name_go, emit_type_name, equal_type, equal_data)) print >> o, '}' if not struct_name.endswith('state'): print >> o, 'func (lhs *{0}) Equal(rhs *{0}) bool {{'.format(convert_to_golang(struct_name)) print >> o, 'if lhs == nil || rhs == nil {' print >> o, 'return false' print >> o, '}' for val_name, type_name, typ, elem in equal_elems: if val_name == 'State': continue if typ == EQUAL_TYPE_LEAF: if type_name == '[]byte': print >> o, 'if bytes.Compare(lhs.{0}, rhs.{0}) != 0 {{'.format(val_name) else: print >> o, 'if lhs.{0} != rhs.{0} {{'.format(val_name) print >> o, 'return false' print >> o, '}' elif typ == EQUAL_TYPE_CONTAINER: print >> o, 'if !lhs.{0}.Equal(&(rhs.{0})) {{'.format(val_name) print >> o, 'return false' print >> o, '}' elif typ == EQUAL_TYPE_ARRAY: print >> o, 'if len(lhs.{0}) != len(rhs.{0}) {{'.format(val_name) print >> o, 'return false' print >> o, '}' print >> o, 'for idx, l := range lhs.{0} {{'.format(val_name) if type_name == '[][]byte': print >> o, 'if bytes.Compare(l, rhs.{0}[idx]) != 0 {{'.format(val_name) else: print >> o, 'if l != rhs.{0}[idx] {{'.format(val_name) print >> o, 'return false' print >> o, '}' print >> o, '}' elif typ == EQUAL_TYPE_MAP: print >> o, 'if len(lhs.{0}) != len(rhs.{0}) {{'.format(val_name) print >> o, 'return false' print >> o, '}' print >> o, '{' print >> o, 'lmap := make(map[string]*{0})'.format(type_name[2:]) print >> o, 'for i, l := range lhs.{0} {{'.format(val_name) print >> o, 'lmap[mapkey(i, string({0}))] = &lhs.{1}[i]'.format(' + '.join('l.{0}'.format(convert_to_golang(v)) for v in elem.split(' ')), val_name) print >> o, '}' print >> o, 'for i, r := range rhs.{0} {{'.format(val_name) print >> o, 'if l, y := lmap[mapkey(i, string({0}))]; !y {{'.format('+'.join('r.{0}'.format(convert_to_golang(v)) for v in elem.split(' '))) print >> o, 'return false' print >> o, '} else if !r.Equal(l) {' print >> o, 'return false' print >> o, '}' print >> o, '}' print >> o, '}' else: sys.stderr.write("invalid equal type %s", typ) print >> o, 'return true' print >> o, '}' print o.getvalue() def get_orig_prefix(module): orig = module.i_orig_module if orig: get_orig_prefix(orig) else: return module.i_prefix def get_path(c): path = '' if c.parent is not None: p = '' if hasattr(c, 'i_module'): mod = c.i_module prefix = mod.search_one('prefix') p = prefix.arg + ":" if prefix else '' path = get_path(c.parent) + "/" + p + c.arg return path def visit_children(ctx, module, children): for c in children: prefix = '' if is_case(c): prefix = get_orig_prefix(c.parent.i_orig_module) c.i_orig_module = c.parent.i_orig_module else: prefix = get_orig_prefix(c.i_orig_module) c.uniq_name = c.arg if c.arg == 'config': c.uniq_name = c.parent.uniq_name + '-config' if c.arg == 'state': c.uniq_name = c.parent.uniq_name + '-state' if c.arg == 'graceful-restart' and prefix == 'bgp-mp': c.uniq_name = 'mp-graceful-restart' t = c.search_one('type') # define container embeded enums def define_enum(c): prefix = module.i_prefix c.path = get_path(c) c.golang_name = convert_to_golang(c.arg) if prefix in ctx.golang_typedef_map: ctx.golang_typedef_map[prefix][c.arg] = c else: ctx.golang_typedef_map[prefix] = {c.arg: c} if is_leaf(c) and c.search_one('type').arg == 'enumeration': define_enum(c) elif is_list(c) or is_container(c) or is_choice(c): c.golang_name = convert_to_golang(c.uniq_name) if is_choice(c): picks = pickup_choice(c) c.i_children = picks if is_enum_choice(c): define_enum(c) continue if ctx.golang_struct_names.get(prefix+':'+c.uniq_name): ext_c = ctx.golang_struct_names.get(prefix+':'+c.uniq_name) ext_c_child_count = len(getattr(ext_c, "i_children")) current_c_child_count = len(getattr(c, "i_children")) if ext_c_child_count < current_c_child_count: c.module_prefix = prefix ctx.golang_struct_names[prefix+':'+c.uniq_name] = c idx = ctx.golang_struct_def.index(ext_c) ctx.golang_struct_def[idx] = c else: c.module_prefix = prefix ctx.golang_struct_names[prefix+':'+c.uniq_name] = c ctx.golang_struct_def.append(c) c.path = get_path(c) # print(c.path) if hasattr(c, 'i_children'): visit_children(ctx, module, c.i_children) def pickup_choice(c): element = [] for child in c.i_children: if is_case(child): element = element + child.i_children return element def get_type_spec(stmt): for s in stmt.substmts: if hasattr(s, 'i_type_spec'): type_sp = s.i_type_spec return type_sp.name return None def visit_typedef(ctx, module): prefix = module.i_prefix child_map = {} for stmts in module.substmts: if stmts.keyword == 'typedef': stmts.path = get_path(stmts) # print(stmts.path) name = stmts.arg stmts.golang_name = convert_to_golang(name) child_map[name] = stmts ctx.golang_typedef_map[prefix] = child_map if ctx.prefix_rel[prefix] != prefix: ctx.golang_typedef_map[ctx.prefix_rel[prefix]] = child_map def visit_identity(ctx, module): prefix = module.i_prefix child_map = {} for stmts in module.substmts: if stmts.keyword == 'identity': name = stmts.arg stmts.golang_name = convert_to_golang(name) child_map[name] = stmts base = stmts.search_one('base') if base: elems = base.arg.split(':') if len(elems) > 1: ctx.golang_identity_map[elems[0]][elems[1]].substmts.append(stmts) else: child_map[base.arg].substmts.append(stmts) ctx.golang_identity_map[prefix] = child_map def lookup_identity(ctx, default_prefix, identity_name): result = lookup(ctx.golang_identity_map, default_prefix, identity_name) return result def lookup_typedef(ctx, default_prefix, type_name): result = lookup(ctx.golang_typedef_map, default_prefix, type_name) return result def lookup(basemap, default_prefix, key): if ':' in key: pref, name = key.split(':') else: pref = default_prefix name = key if pref in basemap: return basemap[pref].get(name, None) else: return key def emit_enum(prefix, name, stmt, substmts): type_name_org = name type_name = stmt.golang_name o = StringIO.StringIO() print >> o, '// typedef for identity %s:%s' % (prefix, type_name_org) print >> o, 'type %s string' % (type_name) const_prefix = convert_const_prefix(type_name_org) print >> o, 'const (' m = {} if is_choice(stmt) and is_enum_choice(stmt): n = namedtuple('Statement', ['arg']) n.arg = 'none' substmts = [n] + substmts for sub in substmts: enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg)) m[sub.arg.lower()] = enum_name print >> o, ' %s %s = "%s"' % (enum_name, type_name, sub.arg.lower()) print >> o, ')\n' print >> o, 'var %sToIntMap = map[%s]int {' % (type_name, type_name) for i, sub in enumerate(substmts): enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg)) print >> o, ' %s: %d,' % (enum_name, i) print >> o, '}\n' print >> o, 'func (v %s) ToInt() int {' % (type_name) print >> o, 'i, ok := %sToIntMap[v]' % (type_name) print >> o, 'if !ok {' print >> o, 'return -1' print >> o, '}' print >> o, 'return i' print >> o, '}' print >> o, 'var IntTo%sMap = map[int]%s {' % (type_name, type_name) for i, sub in enumerate(substmts): enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg)) print >> o, ' %d: %s,' % (i, enum_name) print >> o, '}\n' print >> o, 'func (v %s) Validate() error {' % (type_name) print >> o, 'if _, ok := %sToIntMap[v]; !ok {' % (type_name) print >> o, 'return fmt.Errorf("invalid %s: %%s", v)' % (type_name) print >> o, '}' print >> o, 'return nil' print >> o, '}\n' if stmt.search_one('default'): default = stmt.search_one('default') print >> o, 'func (v %s) Default() %s {' % (type_name, type_name) print >> o, 'return %s' % m[default.arg.lower()] print >> o, '}\n' print >> o, 'func (v %s) DefaultAsNeeded() %s {' % (type_name, type_name) print >> o, ' if string(v) == "" {' print >> o, ' return v.Default()' print >> o, '}' print >> o, ' return v' print >> o, '}' print o.getvalue() def emit_typedef(ctx, module): prefix = module.i_prefix t_map = ctx.golang_typedef_map[prefix] for name, stmt in t_map.items(): if stmt.path in _typedef_exclude: continue # skip identityref type because currently skip identity if get_type_spec(stmt) == 'identityref': continue type_name_org = name type_name = stmt.golang_name if type_name in emitted_type_names: warn = "warning %s: %s has already been emitted from %s.\n"\ % (prefix+":"+type_name_org, type_name_org, emitted_type_names[type_name]) sys.stderr.write(warn) continue emitted_type_names[type_name] = prefix+":"+type_name_org t = stmt.search_one('type') o = StringIO.StringIO() if not t and is_choice(stmt): emit_enum(prefix, type_name_org, stmt, stmt.i_children) elif t.arg == 'enumeration': emit_enum(prefix, type_name_org, stmt, t.substmts) elif t.arg == 'union': print >> o, '// typedef for typedef %s:%s'\ % (prefix, type_name_org) print >> o, 'type %s string' % (type_name) else: print >> o, '// typedef for typedef %s:%s'\ % (prefix, type_name_org) if not is_builtin_type(t): m = ctx.golang_typedef_map for k in t.arg.split(':'): m = m[k] print >> o, 'type %s %s' % (type_name, m.golang_name) else: print >> o, 'type %s %s' % (type_name, t.arg) print o.getvalue() def emit_identity(ctx, module): prefix = module.i_prefix i_map = ctx.golang_identity_map[prefix] for name, stmt in i_map.items(): enums = stmt.search('identity') if len(enums) > 0: emit_enum(prefix, name, stmt, enums) def is_reference(s): return s.arg in ['leafref', 'identityref'] def is_leafref(s): return s.arg in ['leafref'] def is_identityref(s): return s.arg in ['identityref'] def is_leaf(s): return s.keyword in ['leaf'] def is_leaflist(s): return s.keyword in ['leaf-list'] def is_list(s): return s.keyword in ['list'] def is_container(s): return s.keyword in ['container'] def is_case(s): return s.keyword in ['case'] def is_choice(s): return s.keyword in ['choice'] def is_enum_choice(s): return all(e.search_one('type').arg in _type_enum_case for e in s.i_children) _type_enum_case = [ 'empty', ] def is_builtin_type(t): return t.arg in _type_builtin def is_translation_required(t): return t.arg in _type_translation_map.keys() _type_translation_map = { 'union': 'string', 'decimal64': 'float64', 'boolean': 'bool', 'empty': 'bool', 'inet:ip-address': 'string', 'inet:ip-prefix': 'string', 'inet:ipv4-address': 'string', 'inet:as-number': 'uint32', 'bgp-set-community-option-type': 'string', 'inet:port-number': 'uint16', 'yang:timeticks': 'int64', 'ptypes:install-protocol-type': 'string', 'binary': '[]byte', 'bgp-capability': 'bgp.ParameterCapabilityInterface', 'bgp-open-message': '*bgp.BGPMessage', } _type_builtin = ["union", "int8", "int16", "int32", "int64", "string", "uint8", "uint16", "uint32", "uint64", ] _module_excluded = ["ietf-inet-types", "ietf-yang-types", ] _path_exclude = ["/rpol:routing-policy/rpol:defined-sets/rpol:neighbor-sets/rpol:neighbor-set/rpol:neighbor", "/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:community-sets/bgp-pol:community-set/bgp-pol:community-member", "/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:ext-community-sets/bgp-pol:ext-community-set/bgp-pol:ext-community-member", "/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:as-path-sets/bgp-pol:as-path-set/bgp-pol:as-path-set-member"] _typedef_exclude =["/gobgp:bgp-capability", "/gobgp:bgp-open-message"] def generate_header(ctx): print _COPYRIGHT_NOTICE print 'package config' print '' print 'import (' print '"fmt"' print '' print '"github.com/osrg/gobgp/packet/bgp"' print ')' print '' def generate_common_functions(ctx): print 'func mapkey(index int, name string) string {' print 'if name != "" {' print 'return name' print '}' print 'return fmt.Sprintf("%v", index)' print '}' def translate_type(key): if key in _type_translation_map.keys(): return _type_translation_map[key] else: return key # 'hoge-hoge' -> 'HogeHoge' def convert_to_golang(type_string): a = type_string.split('.') a = map(lambda x: x.capitalize(), a) # XXX locale sensitive return '.'.join( ''.join(t.capitalize() for t in x.split('-')) for x in a) # 'hoge-hoge' -> 'HOGE_HOGE' def convert_const_prefix(type_string): a = type_string.split('-') a = map(lambda x: x.upper(), a) # XXX locale sensitive return '_'.join(a) def chop_suf(s, suf): if not s.endswith(suf): return s return s[:-len(suf)]
apache-2.0
853,701,812,380,600,200
31.514323
165
0.53446
false
SaschaMester/delicium
tools/telemetry/telemetry/core/platform/profiler/java_heap_profiler.py
1
3432
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import subprocess import threading from telemetry.core.platform import profiler from telemetry.core import util from telemetry.internal.backends.chrome import android_browser_finder util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android') try: from pylib import constants # pylint: disable=F0401 from pylib.device import device_errors # pylint: disable=F0401 except ImportError: constants = None device_errors = None class JavaHeapProfiler(profiler.Profiler): """Android-specific, trigger and fetch java heap dumps.""" _DEFAULT_DEVICE_DIR = '/data/local/tmp/javaheap' # TODO(bulach): expose this as a command line option somehow. _DEFAULT_INTERVAL = 20 def __init__(self, browser_backend, platform_backend, output_path, state): super(JavaHeapProfiler, self).__init__( browser_backend, platform_backend, output_path, state) self._run_count = 1 self._DumpJavaHeap(False) self._timer = threading.Timer(self._DEFAULT_INTERVAL, self._OnTimer) self._timer.start() @classmethod def name(cls): return 'java-heap' @classmethod def is_supported(cls, browser_type): if browser_type == 'any': return android_browser_finder.CanFindAvailableBrowsers() return browser_type.startswith('android') def CollectProfile(self): self._timer.cancel() self._DumpJavaHeap(True) try: self._browser_backend.adb.device().PullFile( self._DEFAULT_DEVICE_DIR, self._output_path) except: logging.exception('New exception caused by DeviceUtils conversion') raise self._browser_backend.adb.RunShellCommand( 'rm ' + os.path.join(self._DEFAULT_DEVICE_DIR, '*')) output_files = [] for f in os.listdir(self._output_path): if os.path.splitext(f)[1] == '.aprof': input_file = os.path.join(self._output_path, f) output_file = input_file.replace('.aprof', '.hprof') hprof_conv = os.path.join(constants.ANDROID_SDK_ROOT, 'tools', 'hprof-conv') subprocess.call([hprof_conv, input_file, output_file]) output_files.append(output_file) return output_files def _OnTimer(self): self._DumpJavaHeap(False) def _DumpJavaHeap(self, wait_for_completion): if not self._browser_backend.adb.device().FileExists( self._DEFAULT_DEVICE_DIR): self._browser_backend.adb.RunShellCommand( 'mkdir -p ' + self._DEFAULT_DEVICE_DIR) self._browser_backend.adb.RunShellCommand( 'chmod 777 ' + self._DEFAULT_DEVICE_DIR) device_dump_file = None for pid in self._GetProcessOutputFileMap().iterkeys(): device_dump_file = '%s/%s.%s.aprof' % (self._DEFAULT_DEVICE_DIR, pid, self._run_count) self._browser_backend.adb.RunShellCommand('am dumpheap %s %s' % (pid, device_dump_file)) if device_dump_file and wait_for_completion: util.WaitFor(lambda: self._FileSize(device_dump_file) > 0, timeout=2) self._run_count += 1 def _FileSize(self, file_name): try: return self._browser_backend.adb.device().Stat(file_name).st_size except device_errors.CommandFailedError: return 0
bsd-3-clause
5,593,076,294,993,118,000
34.75
76
0.666084
false
HaydenFaulkner/phd
tensorflow_code/word2vec_basic.py
1
11354
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math import os import random import zipfile import numpy as np # from six.moves import urllib # from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf # Step 1: Download the data. # url = 'http://mattmahoney.net/dc/' # def maybe_download(filename, expected_bytes): # """Download a file if not present, and make sure it's the right size.""" # if not os.path.exists(filename): # filename, _ = urllib.request.urlretrieve(url + filename, filename) # statinfo = os.stat(filename) # if statinfo.st_size == expected_bytes: # print('Found and verified', filename) # else: # print(statinfo.st_size) # raise Exception( # 'Failed to verify ' + filename + '. Can you get to it with a browser?') # return filename # filename = maybe_download('text8.zip', 31344016) # filename = '/home/hayden/Downloads/text8.zip' # # Read the data into a list of strings. # def read_data(filename): # """Extract the first file enclosed in a zip file as a list of words""" # with zipfile.ZipFile(filename) as f: # data = tf.compat.as_str(f.read(f.namelist()[0])).split() # return data # # words = read_data(filename) def word2_vec_basic(sentence_paths, extra_path=None, plot_path=None): def get_tennis_words(): words = [] for sentence_path in sentence_paths: with open(sentence_path) as f: lines = f.readlines() for line in lines: for word in ('<BOS> '+line.split('\t')[1].rstrip()+' <EOS>').split(): words.append(word) if extra_path is not None: with open(extra_path) as f: lines = f.readlines() for line in lines: for word in ('<BOS> ' + line.split('\t')[1].rstrip() + ' <EOS>').split(): words.append(word) return words words = get_tennis_words() print('Data size', len(words)) # Step 2: Build the dictionary and replace rare words with UNK token. vocabulary_size = min(len(collections.Counter(words)), 50000) def build_dataset(words): count = [['<UNK>', -1]] count.extend(collections.Counter(words).most_common(vocabulary_size - 1)) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() unk_count = 0 for word in words: if word in dictionary: index = dictionary[word] else: index = 0 # dictionary['UNK'] unk_count += 1 data.append(index) count[0][1] = unk_count reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reverse_dictionary data, count, dictionary, reverse_dictionary = build_dataset(words) del words # Hint to reduce memory. print('Most common words (+<UNK>)', count[:5]) print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]]) global data_index data_index = 0 # Step 3: Function to generate a training batch for the skip-gram model. def generate_batch(batch_size, num_skips, skip_window): global data_index assert batch_size % num_skips == 0 assert num_skips <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 # [ skip_window target skip_window ] buffer = collections.deque(maxlen=span) for _ in range(span): buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) for i in range(batch_size // num_skips): target = skip_window # target label at the center of the buffer targets_to_avoid = [skip_window] for j in range(num_skips): while target in targets_to_avoid: target = random.randint(0, span - 1) targets_to_avoid.append(target) batch[i * num_skips + j] = buffer[skip_window] labels[i * num_skips + j, 0] = buffer[target] buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) return batch, labels batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1) for i in range(8): print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]]) # Step 4: Build and train a skip-gram model. batch_size = 128 embedding_size = 64 # Dimension of the embedding vector. skip_window = 1 # How many words to consider left and right. num_skips = 2 # How many times to reuse an input to generate a label. # We pick a random validation set to sample nearest neighbors. Here we limit the # validation samples to the words that have a low numeric ID, which by # construction are also the most frequent. valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # Only pick dev samples in the head of the distribution. valid_examples = np.random.choice(valid_window, valid_size, replace=False) num_sampled = 64 # Number of negative examples to sample. graph = tf.Graph() with graph.as_default(): # Input data. train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # Ops and variables pinned to the CPU because of missing GPU implementation with tf.device('/cpu:0'): # Look up embeddings for inputs. embeddings = tf.Variable( tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs) # Construct the variables for the NCE loss nce_weights = tf.Variable( tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) nce_biases = tf.Variable(tf.zeros([vocabulary_size])) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. loss = tf.reduce_mean( tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size)) # Construct the SGD optimizer using a learning rate of 1.0. optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss) # Compute the cosine similarity between minibatch examples and all embeddings. norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup( normalized_embeddings, valid_dataset) similarity = tf.matmul( valid_embeddings, normalized_embeddings, transpose_b=True) # Add variable initializer. init = tf.global_variables_initializer() # Step 5: Begin training. num_steps = 50001 with tf.Session(graph=graph) as session: # We must initialize all variables before we use them. init.run() print("Initialized") average_loss = 0 for step in range(num_steps): batch_inputs, batch_labels = generate_batch( batch_size, num_skips, skip_window) feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels} # We perform one update step by evaluating the optimizer op (including it # in the list of returned values for session.run() _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict) average_loss += loss_val if step % 1000 == 0: if step > 0: average_loss /= 2000 # The average loss is an estimate of the loss over the last 2000 batches. print("Average loss at step ", step, ": ", average_loss) average_loss = 0 # Note that this is expensive (~20% slowdown if computed every 500 steps) if step % 10000 == 0: sim = similarity.eval() for i in range(valid_size): valid_word = reverse_dictionary[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k + 1] log_str = "Nearest to %s:" % valid_word for k in range(top_k): close_word = reverse_dictionary[nearest[k]] log_str = "%s %s," % (log_str, close_word) print(log_str) final_embeddings = normalized_embeddings.eval() embeds = {} for i in range(len(reverse_dictionary)): embeds[reverse_dictionary[i]] = final_embeddings[i] # Step 6: Visualize the embeddings. def plot_with_labels(low_dim_embs, labels, filename=plot_path+'tsne.png'): assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings" plt.figure(figsize=(18, 18)) # in inches for i, label in enumerate(labels): x, y = low_dim_embs[i, :] plt.scatter(x, y) plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') plt.savefig(filename) if plot_path is not None: try: from sklearn.manifold import TSNE import matplotlib.pyplot as plt tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) plot_only = min(vocabulary_size, 500) low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :]) labels = [reverse_dictionary[i] for i in range(plot_only)] plot_with_labels(low_dim_embs, labels) except ImportError: print("Please install sklearn, matplotlib, and scipy to visualize embeddings.") return len(embeds), embeds, None
mit
5,478,328,674,893,871,000
39.123675
91
0.59239
false
jacob-meacham/chain-cli
chain/cli.py
1
4526
"""CLI for chain. This module is not intended to be used programmatically - if this is something you want, use chain.client instead. """ import click from termcolor import colored from chain.chain import ChainClient, Frequency, NoChainExistsException, ChainExistsException # No docstrings for this file, as the functions are not meant to be called directly. # pylint: disable=missing-docstring DEFAULT_DATA_PATH = '~/.chain/chains.json' DONT_BREAK_TEXT = colored("Don't break the chain!", 'red', attrs=['underline']) # This is idiomatic for click # pylint: disable=C0103 pass_chain_context = click.make_pass_decorator(ChainClient) def _format_chain_name(name): return colored('"{}"'.format(name), 'green', attrs=['bold']) @click.group() @click.option('--file', metavar='FILE', help='Data file path, default is ~/.chain/chains.json', type=click.Path(), default=DEFAULT_DATA_PATH) @click.version_option('0.3.2') @click.pass_context def cli(ctx, file): ctx.obj = ChainClient(file) @cli.command(name='new', help='add a new chain') @click.argument('name') @click.option('--title', '-t', help='Title of this chain. If not specified, the title will be the name') @click.option('--daily', is_flag=True, help='Add daily links (Default)') @click.option('--weekly', is_flag=True, help='Add weekly links') @click.option('--monthly', is_flag=True, help='Add monthly links') @click.option('--required', help='Number of links required for the chain to be considered unbroken', default=1) @click.option('--description', '-d', help='Description of this chain', default='') @pass_chain_context def new_chain(client, name, title, daily, weekly, monthly, required, description): if [daily, weekly, monthly].count(True) > 1: raise click.BadArgumentUsage('One and only one of --daily, --weekly, --monthly must be set.') # Pylint has bugs with enums # pylint: disable=redefined-variable-type if weekly: frequency = Frequency.weekly elif monthly: frequency = Frequency.monthly else: frequency = Frequency.daily try: client.new_chain(name, title=title, frequency=frequency, description=description, num_required=required) except ChainExistsException as e: raise click.BadArgumentUsage(e.message) click.echo("New chain {} created. {}".format(_format_chain_name(name), DONT_BREAK_TEXT)) @cli.command(name='add', help='add a link to the chain') @click.argument('name') @click.option('--num', '-n', help='Number of links to add', default=1) @click.option('--message', '-m', help='Message attached to the added link', default='') @pass_chain_context def add_link(client, name, num, message): try: client.add_link_to_chain(name, num, message=message) except NoChainExistsException as e: raise click.BadArgumentUsage(e.message) num_links_text = colored('{}'.format(num), "blue", attrs=['bold']) link_pluralization = 'link' if num == 1 else 'links' click.echo('Added {} {} to chain {}. {}'.format(num_links_text, link_pluralization, _format_chain_name(name), DONT_BREAK_TEXT)) @cli.command(name='ls', help='List chains') @click.option('-q', help='List name only', is_flag=True) @click.option('--prefix', help='List only those chains whose name matches this prefix') @pass_chain_context def list_chains(client, q, prefix): try: chains = [c for c in client.list_chains() if prefix is None or c['id'].startswith(prefix)] if q: for c in chains: click.echo(c['id']) else: for c in chains: # TODO: List them using termtable click.echo(c) except NoChainExistsException as e: raise click.BadArgumentUsage(e.message) @cli.command(name='archive', help='Archive a chain') @click.argument('name') @pass_chain_context def archive_chain(client, name): try: client.archive_chain(name) except NoChainExistsException as e: raise click.BadArgumentUsage(e.message) click.echo('Archived chain {}'.format(_format_chain_name(name))) @cli.command(name='rm', help='Remove a chain') @click.argument('name') @pass_chain_context def remove_chain(client, name): try: client.remove_chain(name) except NoChainExistsException as e: raise click.BadArgumentUsage(e.message) click.echo('Removed chain {}'.format(_format_chain_name(name))) if __name__ == '__main__': # pylint: disable=E1120 cli()
mit
-6,670,421,807,501,067,000
35.208
114
0.67057
false
DiCarloLab-Delft/PycQED_py3
pycqed/analysis_v2/randomized_benchmarking_analysis.py
1
94884
import lmfit from uncertainties import ufloat import pandas as pd from copy import deepcopy from pycqed.analysis import analysis_toolbox as a_tools from collections import OrderedDict from pycqed.analysis import measurement_analysis as ma_old import pycqed.analysis_v2.base_analysis as ba import numpy as np import logging from scipy.stats import sem from pycqed.analysis.tools.data_manipulation import populations_using_rate_equations from pycqed.analysis.tools.plotting import set_xlabel, set_ylabel, plot_fit from pycqed.utilities.general import format_value_string import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap, PowerNorm from sklearn import linear_model from matplotlib import colors as c from pycqed.analysis_v2.tools import geometry_utils as geo log = logging.getLogger(__name__) class RandomizedBenchmarking_SingleQubit_Analysis(ba.BaseDataAnalysis): def __init__( self, t_start: str = None, t_stop: str = None, label="", options_dict: dict = None, auto=True, close_figs=True, classification_method="rates", rates_I_quad_ch_idx: int = 0, rates_Q_quad_ch_idx: int = None, rates_ch_idx=None, # Deprecated cal_pnts_in_dset: list = np.repeat(["0", "1", "2"], 2), ignore_f_cal_pts: bool = False, do_fitting: bool = True, **kwargs ): """ Analysis for single qubit randomized benchmarking. For basic options see docstring of BaseDataAnalysis Args: classification_method ["rates", ] sets method to determine populations of g,e and f states. Currently only supports "rates" rates: uses calibration points and rate equation from Asaad et al. to determine populations rates_I_quad_ch_idx (int) : sets the I quadrature channel from which to use the data for the rate equations, `rates_I_quad_ch_idx + 1` is assumed to be the Q quadrature, both quadratures are used in the rate equation, this analysis expects the RO mode to be "optimal IQ" ignore_f_cal_pts (bool) : if True, ignores the f-state calibration points and instead makes the approximation that the f-state looks the same as the e-state in readout. This is useful when the ef-pulse is not calibrated. """ if options_dict is None: options_dict = dict() super().__init__( t_start=t_start, t_stop=t_stop, label=label, options_dict=options_dict, close_figs=close_figs, do_fitting=do_fitting, **kwargs ) # used to determine how to determine 2nd excited state population self.classification_method = classification_method # [2020-07-09 Victor] RB has been used with the "optimal IQ" RO mode # for a while in the lab, both quadratures are necessary for plotting # and correct calculation using the rates equation if rates_ch_idx is not None: log.warning( "`rates_ch_idx` is deprecated `rates_I_quad_ch_idx` " + "and `rates_I_quad_ch_idx + 1` are used for population " + "rates calculation! Please apply changes to `pycqed`." ) self.rates_I_quad_ch_idx = rates_I_quad_ch_idx self.rates_Q_quad_ch_idx = rates_Q_quad_ch_idx if self.rates_Q_quad_ch_idx is None: self.rates_Q_quad_ch_idx = rates_I_quad_ch_idx + 1 self.d1 = 2 self.cal_pnts_in_dset = np.array(cal_pnts_in_dset) self.ignore_f_cal_pts = ignore_f_cal_pts # Allows to run this analysis for different qubits in same dataset self.overwrite_qois = False if auto: self.run_analysis() # NB all the fit_res, plot_dicts, qois are appended the `value_name` # corresponding to `rates_I_quad_ch_idx` so that this analysis can be # run several times targeting a different measured qubit def extract_data(self): """ Custom data extraction for this specific experiment. """ self.raw_data_dict = OrderedDict() self.timestamps = a_tools.get_timestamps_in_range( self.t_start, self.t_stop, label=self.labels ) a = ma_old.MeasurementAnalysis( timestamp=self.timestamps[0], auto=False, close_file=False ) a.get_naming_and_values() if "bins" in a.data_file["Experimental Data"]["Experimental Metadata"].keys(): bins = a.data_file["Experimental Data"]["Experimental Metadata"]["bins"][()] num_cal_pnts = len(self.cal_pnts_in_dset) self.raw_data_dict["ncl"] = bins[:-num_cal_pnts:2] self.raw_data_dict["bins"] = bins self.raw_data_dict["value_names"] = a.value_names self.raw_data_dict["value_units"] = a.value_units self.raw_data_dict["measurementstring"] = a.measurementstring self.raw_data_dict["timestamp_string"] = a.timestamp_string self.raw_data_dict["binned_vals"] = OrderedDict() self.raw_data_dict["cal_pts_zero"] = OrderedDict() self.raw_data_dict["cal_pts_one"] = OrderedDict() self.raw_data_dict["cal_pts_two"] = OrderedDict() self.raw_data_dict["measured_values_I"] = OrderedDict() self.raw_data_dict["measured_values_X"] = OrderedDict() # [2020-07-08 Victor] don't know why is this here, seems like # a nasty hack... will keep it to avoid braking some more stuff... selection = a.measured_values[0] == 0 for i in range(1, len(a.measured_values)): selection &= a.measured_values[i] == 0 invalid_idxs = np.where(selection)[0] if len(invalid_idxs): log.warning( "Found zero values at {} indices!".format(len(invalid_idxs)) ) log.warning(invalid_idxs[:10]) a.measured_values[:, invalid_idxs] = np.array( [[np.nan] * len(invalid_idxs)] * len(a.value_names) ) zero_idxs = np.where(self.cal_pnts_in_dset == "0")[0] - num_cal_pnts one_idxs = np.where(self.cal_pnts_in_dset == "1")[0] - num_cal_pnts two_idxs = np.where(self.cal_pnts_in_dset == "2")[0] - num_cal_pnts for i, val_name in enumerate(a.value_names): binned_yvals = np.reshape( a.measured_values[i], (len(bins), -1), order="F" ) self.raw_data_dict["binned_vals"][val_name] = binned_yvals vlns = a.value_names if val_name in ( vlns[self.rates_I_quad_ch_idx], vlns[self.rates_Q_quad_ch_idx], ): self.raw_data_dict["cal_pts_zero"][val_name] = binned_yvals[ zero_idxs, : ].flatten() self.raw_data_dict["cal_pts_one"][val_name] = binned_yvals[ one_idxs, : ].flatten() if self.ignore_f_cal_pts: self.raw_data_dict["cal_pts_two"][ val_name ] = self.raw_data_dict["cal_pts_one"][val_name] else: self.raw_data_dict["cal_pts_two"][val_name] = binned_yvals[ two_idxs, : ].flatten() self.raw_data_dict["measured_values_I"][val_name] = binned_yvals[ :-num_cal_pnts:2, : ] self.raw_data_dict["measured_values_X"][val_name] = binned_yvals[ 1:-num_cal_pnts:2, : ] else: bins = None self.raw_data_dict["folder"] = a.folder self.raw_data_dict["timestamps"] = self.timestamps a.finish() # closes data file def process_data(self): rdd = self.raw_data_dict self.proc_data_dict = deepcopy(rdd) pdd = self.proc_data_dict for key in [ "V0", "V1", "V2", "SI", "SI_corr", "SX", "SX_corr", "P0", "P1", "P2", "M_inv", "M0", "X1", ]: # Nesting dictionaries allows to generate all this quantities # for different qubits by just running the analysis several times # with different rates_I_quad_ch_idx and cal points pdd[key] = OrderedDict() val_name_I = rdd["value_names"][self.rates_I_quad_ch_idx] val_name_Q = rdd["value_names"][self.rates_Q_quad_ch_idx] V0_I = np.nanmean(rdd["cal_pts_zero"][val_name_I]) V1_I = np.nanmean(rdd["cal_pts_one"][val_name_I]) V2_I = np.nanmean(rdd["cal_pts_two"][val_name_I]) V0_Q = np.nanmean(rdd["cal_pts_zero"][val_name_Q]) V1_Q = np.nanmean(rdd["cal_pts_one"][val_name_Q]) V2_Q = np.nanmean(rdd["cal_pts_two"][val_name_Q]) pdd["V0"][val_name_I] = V0_I pdd["V1"][val_name_I] = V1_I pdd["V2"][val_name_I] = V2_I pdd["V0"][val_name_Q] = V0_Q pdd["V1"][val_name_Q] = V1_Q pdd["V2"][val_name_Q] = V2_Q SI_I = np.nanmean(rdd["measured_values_I"][val_name_I], axis=1) SX_I = np.nanmean(rdd["measured_values_X"][val_name_I], axis=1) SI_Q = np.nanmean(rdd["measured_values_I"][val_name_Q], axis=1) SX_Q = np.nanmean(rdd["measured_values_X"][val_name_Q], axis=1) pdd["SI"][val_name_I] = SI_I pdd["SX"][val_name_I] = SX_I pdd["SI"][val_name_Q] = SI_Q pdd["SX"][val_name_Q] = SX_Q cal_triangle = np.array([[V0_I, V0_Q], [V1_I, V1_Q], [V2_I, V2_Q]]) pdd["cal_triangle"] = cal_triangle # [2020-07-11 Victor] # Here we correct for the cases when the measured points fall outside # the triangle of the calibration points, such a case breaks the # assumptions that S = V0 * P0 + V1 * P1 + V2 * P2 SI_I_corr, SI_Q_corr = geo.constrain_to_triangle(cal_triangle, SI_I, SI_Q) SX_I_corr, SX_Q_corr = geo.constrain_to_triangle(cal_triangle, SX_I, SX_Q) pdd["SI_corr"][val_name_I] = SI_I_corr pdd["SX_corr"][val_name_I] = SX_I_corr pdd["SI_corr"][val_name_Q] = SI_Q_corr pdd["SX_corr"][val_name_Q] = SX_Q_corr P0, P1, P2, M_inv = populations_using_rate_equations( SI_I_corr + 1j * SI_Q_corr, SX_I_corr + 1j * SX_Q_corr, V0_I + 1j * V0_Q, V1_I + 1j * V1_Q, V2_I + 1j * V2_Q, ) # There might be other qubits being measured at some point so we keep # the results with the I quadrature label pdd["P0"][val_name_I] = P0 pdd["P1"][val_name_I] = P1 pdd["P2"][val_name_I] = P2 pdd["M_inv"][val_name_I] = M_inv # [2020-07-09 Victor] This is not being used for anything... # classifier = logisticreg_classifier_machinelearning( # pdd["cal_pts_zero"], # pdd["cal_pts_one"], # pdd["cal_pts_two"], # ) # pdd["classifier"] = classifier if self.classification_method == "rates": pdd["M0"][val_name_I] = P0 pdd["X1"][val_name_I] = 1 - P2 else: raise NotImplementedError() def run_fitting(self, fit_input_tag: str = None): """ Args: fit_input_tag (str): allows to fit specific M0 and X1 intended for use in 2Q RBs """ super().run_fitting() rdd = self.raw_data_dict pdd = self.proc_data_dict if fit_input_tag is None: # Default value for single qubit RB analysis fit_input_tag = rdd["value_names"][self.rates_I_quad_ch_idx] leak_mod = lmfit.Model(leak_decay, independent_vars="m") leak_mod.set_param_hint("A", value=0.95, min=0, vary=True) leak_mod.set_param_hint("B", value=0.1, min=0, vary=True) leak_mod.set_param_hint("lambda_1", value=0.99, vary=True) leak_mod.set_param_hint("L1", expr="(1-A)*(1-lambda_1)") leak_mod.set_param_hint("L2", expr="A*(1-lambda_1)") leak_mod.set_param_hint("L1_cz", expr="1-(1-(1-A)*(1-lambda_1))**(1/1.5)") leak_mod.set_param_hint("L2_cz", expr="1-(1-(A*(1-lambda_1)))**(1/1.5)") params = leak_mod.make_params() try: fit_res_leak = leak_mod.fit( data=pdd["X1"][fit_input_tag], m=pdd["ncl"], params=params, ) self.fit_res["leakage_decay_" + fit_input_tag] = fit_res_leak lambda_1 = fit_res_leak.best_values["lambda_1"] L1 = fit_res_leak.params["L1"].value except Exception as e: log.warning("Fitting {} failed!".format("leakage_decay")) log.warning(e) lambda_1 = 1 L1 = 0 self.fit_res["leakage_decay_" + fit_input_tag] = {} fit_res_rb = self.fit_rb_decay( fit_input_tag, lambda_1=lambda_1, L1=L1, simple=False ) self.fit_res["rb_decay_" + fit_input_tag] = fit_res_rb fit_res_rb_simple = self.fit_rb_decay( fit_input_tag, lambda_1=1, L1=0, simple=True ) self.fit_res["rb_decay_simple_" + fit_input_tag] = fit_res_rb_simple def safe_get_par_from_fit_result(fit_res, par_name): """ Ensures an `lmfit.Parameter` is always returned even when the fit failed and an empty dict is provided """ if fit_res: # Check for empty dict params = fit_res.params par = params[par_name] else: par = lmfit.Parameter(par_name) par.value = np.NaN par.stderr = np.NaN return par fr_rb_dict = self.fit_res["rb_decay_" + fit_input_tag] eps = safe_get_par_from_fit_result(fr_rb_dict, "eps") fr_rb_simple_dict = self.fit_res["rb_decay_simple_" + fit_input_tag] eps_simple = safe_get_par_from_fit_result(fr_rb_simple_dict, "eps") fr_dec = self.fit_res["leakage_decay_" + fit_input_tag] L1 = safe_get_par_from_fit_result(fr_dec, "L1") L2 = safe_get_par_from_fit_result(fr_dec, "L2") text_msg = "Summary: \n" text_msg += format_value_string( r"$\epsilon_{{\mathrm{{simple}}}}$", eps_simple, "\n" ) text_msg += format_value_string(r"$\epsilon_{{\chi_1}}$", eps, "\n") text_msg += format_value_string(r"$L_1$", L1, "\n") text_msg += format_value_string(r"$L_2$", L2, "\n") pdd["rb_msg_" + fit_input_tag] = text_msg pdd["quantities_of_interest"] = {} qoi = pdd["quantities_of_interest"] qoi["eps_simple_" + fit_input_tag] = ufloat( eps_simple.value, eps_simple.stderr or np.NaN ) qoi["eps_X1_" + fit_input_tag] = ufloat(eps.value, eps.stderr or np.NaN) qoi["L1_" + fit_input_tag] = ufloat(L1.value, L1.stderr or np.NaN) qoi["L2_" + fit_input_tag] = ufloat(L2.value, L2.stderr or np.NaN) def fit_rb_decay( self, val_name: str, lambda_1: float, L1: float, simple: bool = False ): """ Fits the data """ pdd = self.proc_data_dict fit_mod_rb = lmfit.Model(full_rb_decay, independent_vars="m") fit_mod_rb.set_param_hint("A", value=0.5, min=0, vary=True) if simple: fit_mod_rb.set_param_hint("B", value=0, vary=False) else: fit_mod_rb.set_param_hint("B", value=0.1, min=0, vary=True) fit_mod_rb.set_param_hint("C", value=0.4, min=0, max=1, vary=True) fit_mod_rb.set_param_hint("lambda_1", value=lambda_1, vary=False) fit_mod_rb.set_param_hint("lambda_2", value=0.95, vary=True) # d1 = dimensionality of computational subspace fit_mod_rb.set_param_hint("d1", value=self.d1, vary=False) fit_mod_rb.set_param_hint("L1", value=L1, vary=False) # Note that all derived quantities are expressed directly in fit_mod_rb.set_param_hint("F", expr="1/d1*((d1-1)*lambda_2+1-L1)", vary=True) fit_mod_rb.set_param_hint("eps", expr="1-(1/d1*((d1-1)*lambda_2+1-L1))") # Only valid for single qubit RB assumption equal error rates fit_mod_rb.set_param_hint( "F_g", expr="(1/d1*((d1-1)*lambda_2+1-L1))**(1/1.875)" ) fit_mod_rb.set_param_hint( "eps_g", expr="1-(1/d1*((d1-1)*lambda_2+1-L1))**(1/1.875)" ) # Only valid for two qubit RB assumption all error in CZ fit_mod_rb.set_param_hint("F_cz", expr="(1/d1*((d1-1)*lambda_2+1-L1))**(1/1.5)") fit_mod_rb.set_param_hint( "eps_cz", expr="1-(1/d1*((d1-1)*lambda_2+1-L1))**(1/1.5)" ) params = fit_mod_rb.make_params() try: fit_res_rb = fit_mod_rb.fit( data=pdd["M0"][val_name], m=pdd["ncl"], params=params ) except Exception as e: log.warning("Fitting failed!") log.warning(e) fit_res_rb = {} return fit_res_rb def prepare_plots(self, fit_input_tag: str = None): """ Args: fit_input_tag (str): allows to fit specific M0 and X1 intended for use in 2Q RBs """ rdd = self.raw_data_dict pdd = self.proc_data_dict if fit_input_tag is None: val_name_I = rdd["value_names"][self.rates_I_quad_ch_idx] fit_input_tag = val_name_I val_names = rdd["value_names"] for i, val_name in enumerate(val_names): self.plot_dicts["binned_data_{}".format(val_name)] = { "plotfn": self.plot_line, "xvals": rdd["bins"], "yvals": np.nanmean(rdd["binned_vals"][val_name], axis=1), "yerr": sem(rdd["binned_vals"][val_name], axis=1), "xlabel": "Number of Cliffords", "xunit": "#", "ylabel": val_name, "yunit": rdd["value_units"][i], "title": rdd["timestamp_string"] + "\n" + rdd["measurementstring"], } fs = plt.rcParams["figure.figsize"] fig_id_hex = "cal_points_hexbin_{}".format(val_name_I) self.plot_dicts[fig_id_hex] = { "plotfn": plot_cal_points_hexbin, "shots_0": ( rdd["cal_pts_zero"][val_names[self.rates_I_quad_ch_idx]], rdd["cal_pts_zero"][val_names[self.rates_Q_quad_ch_idx]], ), "shots_1": ( rdd["cal_pts_one"][val_names[self.rates_I_quad_ch_idx]], rdd["cal_pts_one"][val_names[self.rates_Q_quad_ch_idx]], ), "shots_2": ( rdd["cal_pts_two"][val_names[self.rates_I_quad_ch_idx]], rdd["cal_pts_two"][val_names[self.rates_Q_quad_ch_idx]], ), "xlabel": val_names[self.rates_I_quad_ch_idx], "xunit": rdd["value_units"][0], "ylabel": val_names[self.rates_Q_quad_ch_idx], "yunit": rdd["value_units"][1], "title": rdd["timestamp_string"] + "\n" + rdd["measurementstring"] + " hexbin plot", "plotsize": (fs[0] * 1.5, fs[1]), } num_cal_pnts = len(pdd["cal_triangle"]) fig_id_RB_on_IQ = "rb_on_iq_{}".format(val_name_I) for ax_id in [fig_id_hex, fig_id_RB_on_IQ]: self.plot_dicts[ax_id + "_cal_pnts"] = { "plotfn": self.plot_line, "ax_id": ax_id, "xvals": pdd["cal_triangle"].T[0].reshape(num_cal_pnts, 1), "yvals": pdd["cal_triangle"].T[1].reshape(num_cal_pnts, 1), "setlabel": [ r"V$_{\left |" + str(i) + r"\right >}$" for i in range(num_cal_pnts) ], "marker": "d", "line_kws": {"markersize": 14, "markeredgecolor": "white"}, "do_legend": True, # "legend_title": "Calibration points", "legend_ncol": 3, "linestyle": "", } # define figure and axes here to have custom layout self.figs[fig_id_RB_on_IQ], axs = plt.subplots( ncols=2, figsize=(fs[0] * 2.0, fs[1]) ) self.figs[fig_id_RB_on_IQ].patch.set_alpha(0) self.axs[fig_id_RB_on_IQ] = axs[0] fig_id_RB_on_IQ_det = fig_id_RB_on_IQ + "_detailed" self.axs[fig_id_RB_on_IQ_det] = axs[1] axs[1].yaxis.set_label_position("right") axs[1].yaxis.tick_right() close_triangle = list(range(num_cal_pnts)) + [0] self.plot_dicts[fig_id_RB_on_IQ] = { "ax_id": fig_id_RB_on_IQ, "plotfn": self.plot_line, "xvals": pdd["cal_triangle"].T[0][close_triangle], "yvals": pdd["cal_triangle"].T[1][close_triangle], "xlabel": val_names[self.rates_I_quad_ch_idx], "xunit": rdd["value_units"][0], "ylabel": val_names[self.rates_Q_quad_ch_idx], "yunit": rdd["value_units"][1], "title": rdd["timestamp_string"] + "\n" + rdd["measurementstring"] + " hexbin plot", "marker": "", "color": "black", "line_kws": {"linewidth": 1}, "setlabel": "NONE", } self.plot_dicts[fig_id_RB_on_IQ_det] = { "ax_id": fig_id_RB_on_IQ_det, "plotfn": self.plot_line, "xvals": pdd["cal_triangle"].T[0][:2], "yvals": pdd["cal_triangle"].T[1][:2], "xlabel": val_names[self.rates_I_quad_ch_idx], "xunit": rdd["value_units"][0], "ylabel": val_names[self.rates_Q_quad_ch_idx], "yunit": rdd["value_units"][1], "title": r"Detailed view", "marker": "", "color": "black", "line_kws": {"linewidth": 1}, "setlabel": "NONE", } val_name_Q = rdd["value_names"][self.rates_Q_quad_ch_idx] rb_SI = (pdd["SI"][val_name_I], pdd["SI"][val_name_Q]) rb_SX = (pdd["SX"][val_name_I], pdd["SX"][val_name_Q]) rb_SI_corr = (pdd["SI_corr"][val_name_I], pdd["SI_corr"][val_name_Q]) rb_SX_corr = (pdd["SX_corr"][val_name_I], pdd["SX_corr"][val_name_Q]) sigs = (rb_SI, rb_SI_corr, rb_SX, rb_SX_corr) ids = ("SI", "SI_corr", "SX", "SX_corr") labels = ("SI", "SI corrected", "SX", "SX corrected") cols = ["royalblue", "dodgerblue", "red", "salmon"] mks = [8, 4, 8, 4] for ax_id, do_legend in zip( [fig_id_RB_on_IQ, fig_id_RB_on_IQ_det], [True, False] ): for S, col, mk_size, ID, label in zip(sigs, cols, mks, ids, labels): self.plot_dicts[ax_id + "_{}".format(ID)] = { "plotfn": self.plot_line, "ax_id": ax_id, "xvals": S[0], "yvals": S[1], "setlabel": label, "marker": "o", "line_kws": {"markersize": mk_size}, "color": col, "do_legend": do_legend, "legend_ncol": 3, "linestyle": "", } for idx in [self.rates_I_quad_ch_idx, self.rates_Q_quad_ch_idx]: val_name = rdd["value_names"][idx] self.plot_dicts["raw_RB_curve_data_{}".format(val_name)] = { "plotfn": plot_raw_RB_curve, "ncl": pdd["ncl"], "SI": pdd["SI"][val_name], "SX": pdd["SX"][val_name], "V0": pdd["V0"][val_name], "V1": pdd["V1"][val_name], "V2": pdd["V2"][val_name], "xlabel": "Number of Cliffords", "xunit": "#", "ylabel": val_name, "yunit": pdd["value_units"][idx], "title": pdd["timestamp_string"] + "\n" + pdd["measurementstring"], } self.plot_dicts["rb_rate_eq_pops_{}".format(val_name_I)] = { "plotfn": plot_populations_RB_curve, "ncl": pdd["ncl"], "P0": pdd["P0"][val_name_I], "P1": pdd["P1"][val_name_I], "P2": pdd["P2"][val_name_I], "title": pdd["timestamp_string"] + "\n" + "Population using rate equations ch{}".format(val_name_I), } # [2020-07-09 Victor] This is not being used for anything... # self.plot_dicts["logres_decision_bound"] = { # "plotfn": plot_classifier_decission_boundary, # "classifier": pdd["classifier"], # "shots_0": ( # pdd["cal_pts_zero"][val_names[ch_idx_0]], # pdd["cal_pts_zero"][val_names[ch_idx_1]], # ), # "shots_1": ( # pdd["cal_pts_one"][val_names[ch_idx_0]], # pdd["cal_pts_one"][val_names[ch_idx_1]], # ), # "shots_2": ( # pdd["cal_pts_two"][val_names[ch_idx_0]], # pdd["cal_pts_two"][val_names[ch_idx_1]], # ), # "xlabel": val_names[ch_idx_0], # "xunit": pdd["value_units"][0], # "ylabel": val_names[ch_idx_1], # "yunit": pdd["value_units"][1], # "title": pdd["timestamp_string"] # + "\n" # + pdd["measurementstring"] # + " Decision boundary", # "plotsize": (fs[0] * 1.5, fs[1]), # } # ##################################################################### # End of plots for single qubit only # ##################################################################### if self.do_fitting: # define figure and axes here to have custom layout rb_fig_id = "main_rb_decay_{}".format(fit_input_tag) leak_fig_id = "leak_decay_{}".format(fit_input_tag) self.figs[rb_fig_id], axs = plt.subplots( nrows=2, sharex=True, gridspec_kw={"height_ratios": (2, 1)} ) self.figs[rb_fig_id].patch.set_alpha(0) self.axs[rb_fig_id] = axs[0] self.axs[leak_fig_id] = axs[1] self.plot_dicts[rb_fig_id] = { "plotfn": plot_rb_decay_woods_gambetta, "ncl": pdd["ncl"], "M0": pdd["M0"][fit_input_tag], "X1": pdd["X1"][fit_input_tag], "ax1": axs[1], "title": pdd["timestamp_string"] + "\n" + pdd["measurementstring"], } self.plot_dicts["fit_leak"] = { "plotfn": self.plot_fit, "ax_id": leak_fig_id, "fit_res": self.fit_res["leakage_decay_" + fit_input_tag], "setlabel": "Leakage fit", "do_legend": True, "color": "C2", } self.plot_dicts["fit_rb_simple"] = { "plotfn": self.plot_fit, "ax_id": rb_fig_id, "fit_res": self.fit_res["rb_decay_simple_" + fit_input_tag], "setlabel": "Simple RB fit", "do_legend": True, } self.plot_dicts["fit_rb"] = { "plotfn": self.plot_fit, "ax_id": rb_fig_id, "fit_res": self.fit_res["rb_decay_" + fit_input_tag], "setlabel": "Full RB fit", "do_legend": True, "color": "C2", } self.plot_dicts["rb_text"] = { "plotfn": self.plot_text, "text_string": pdd["rb_msg_" + fit_input_tag], "xpos": 1.05, "ypos": 0.6, "ax_id": rb_fig_id, "horizontalalignment": "left", } class RandomizedBenchmarking_TwoQubit_Analysis( RandomizedBenchmarking_SingleQubit_Analysis ): def __init__( self, t_start: str = None, t_stop: str = None, label="", options_dict: dict = None, auto=True, close_figs=True, classification_method="rates", rates_I_quad_ch_idxs: list = [0, 2], ignore_f_cal_pts: bool = False, extract_only: bool = False, ): if options_dict is None: options_dict = dict() super(RandomizedBenchmarking_SingleQubit_Analysis, self).__init__( t_start=t_start, t_stop=t_stop, label=label, options_dict=options_dict, close_figs=close_figs, do_fitting=True, extract_only=extract_only, ) self.d1 = 4 self.rates_I_quad_ch_idxs = rates_I_quad_ch_idxs # used to determine how to determine 2nd excited state population self.classification_method = classification_method # The interleaved analysis does a bit of nasty things and this becomes # necessary self.overwrite_qois = True if auto: self.run_analysis() def extract_data(self): """ Custom data extraction for this specific experiment. """ self.raw_data_dict = OrderedDict() # We run the single qubit analysis twice for each qubit # It will generate all the quantities we want for each qubit cal_2Q = ["00", "01", "10", "11", "02", "20", "22"] rates_I_quad_ch_idx = self.rates_I_quad_ch_idxs[0] cal_1Q = [state[rates_I_quad_ch_idx // 2] for state in cal_2Q] a_q0 = RandomizedBenchmarking_SingleQubit_Analysis( t_start=self.t_start, rates_I_quad_ch_idx=rates_I_quad_ch_idx, cal_pnts_in_dset=cal_1Q, do_fitting=False, extract_only=self.extract_only, ) rates_I_quad_ch_idx = self.rates_I_quad_ch_idxs[1] cal_1Q = [state[rates_I_quad_ch_idx // 2] for state in cal_2Q] a_q1 = RandomizedBenchmarking_SingleQubit_Analysis( t_start=self.t_start, rates_I_quad_ch_idx=rates_I_quad_ch_idx, cal_pnts_in_dset=cal_1Q, do_fitting=False, extract_only=self.extract_only, ) # Upwards and downwards hierarchical compatibilities rdd = self.raw_data_dict self.timestamps = a_q0.timestamps rdd["analyses"] = {"q0": a_q0, "q1": a_q1} rdd["folder"] = a_q0.raw_data_dict["folder"] rdd["timestamps"] = a_q0.raw_data_dict["timestamps"] rdd["timestamp_string"] = a_q0.raw_data_dict["timestamp_string"] rdd["measurementstring"] = a_q1.raw_data_dict["measurementstring"] def process_data(self): self.proc_data_dict = OrderedDict() pdd = self.proc_data_dict for key in ["M0", "X1"]: # Keeping it compatible with 1Q on purpose pdd[key] = OrderedDict() rdd = self.raw_data_dict pdd["folder"] = rdd["folder"] pdd["timestamps"] = rdd["timestamps"] pdd["timestamp_string"] = rdd["timestamp_string"] pdd["measurementstring"] = rdd["measurementstring"] val_names = rdd["analyses"]["q0"].raw_data_dict["value_names"] if self.classification_method == "rates": val_name_q0 = val_names[self.rates_I_quad_ch_idxs[0]] val_name_q1 = val_names[self.rates_I_quad_ch_idxs[1]] fit_input_tag = "2Q" self.proc_data_dict["M0"][fit_input_tag] = ( rdd["analyses"]["q0"].proc_data_dict["P0"][val_name_q0] * rdd["analyses"]["q1"].proc_data_dict["P0"][val_name_q1] ) self.proc_data_dict["X1"][fit_input_tag] = ( 1 - rdd["analyses"]["q0"].proc_data_dict["P2"][val_name_q0] - rdd["analyses"]["q1"].proc_data_dict["P2"][val_name_q1] ) else: raise NotImplementedError() # Required for the plotting in super() pdd["ncl"] = rdd["analyses"]["q0"].raw_data_dict["ncl"] def run_fitting(self): # Call the prepare plots of the class above fit_input_tag = "2Q" super().run_fitting(fit_input_tag=fit_input_tag) def prepare_plots(self): # Call the prepare plots of the class above fit_input_tag = "2Q" super().prepare_plots(fit_input_tag=fit_input_tag) class UnitarityBenchmarking_TwoQubit_Analysis( RandomizedBenchmarking_SingleQubit_Analysis ): def __init__( self, t_start: str = None, t_stop: str = None, label="", options_dict: dict = None, auto=True, close_figs=True, classification_method="rates", rates_ch_idxs: list = [0, 2], ignore_f_cal_pts: bool = False, nseeds: int = None, **kwargs ): """Analysis for unitarity benchmarking. This analysis is based on """ log.error( "[2020-07-12 Victor] This analysis requires to be " "upgraded to the new version of the 1Q-RB analysis." ) if nseeds is None: raise TypeError("You must specify number of seeds!") self.nseeds = nseeds if options_dict is None: options_dict = dict() super(RandomizedBenchmarking_SingleQubit_Analysis, self).__init__( t_start=t_start, t_stop=t_stop, label=label, options_dict=options_dict, close_figs=close_figs, do_fitting=True, **kwargs ) self.d1 = 4 # used to determine how to determine 2nd excited state population self.classification_method = classification_method self.rates_ch_idxs = rates_ch_idxs self.ignore_f_cal_pts = ignore_f_cal_pts if auto: self.run_analysis() def extract_data(self): """Custom data extraction for Unitarity benchmarking. To determine the unitarity data is acquired in different bases. This method extracts that data and puts it in specific bins. """ self.raw_data_dict = OrderedDict() self.timestamps = a_tools.get_timestamps_in_range( self.t_start, self.t_stop, label=self.labels ) a = ma_old.MeasurementAnalysis( timestamp=self.timestamps[0], auto=False, close_file=False ) a.get_naming_and_values() if "bins" in a.data_file["Experimental Data"]["Experimental Metadata"].keys(): bins = a.data_file["Experimental Data"]["Experimental Metadata"]["bins"][()] self.raw_data_dict["ncl"] = bins[:-7:10] # 7 calibration points self.raw_data_dict["bins"] = bins self.raw_data_dict["value_names"] = a.value_names self.raw_data_dict["value_units"] = a.value_units self.raw_data_dict["measurementstring"] = a.measurementstring self.raw_data_dict["timestamp_string"] = a.timestamp_string self.raw_data_dict["binned_vals"] = OrderedDict() self.raw_data_dict["cal_pts_x0"] = OrderedDict() self.raw_data_dict["cal_pts_x1"] = OrderedDict() self.raw_data_dict["cal_pts_x2"] = OrderedDict() self.raw_data_dict["cal_pts_0x"] = OrderedDict() self.raw_data_dict["cal_pts_1x"] = OrderedDict() self.raw_data_dict["cal_pts_2x"] = OrderedDict() self.raw_data_dict["measured_values_ZZ"] = OrderedDict() self.raw_data_dict["measured_values_XZ"] = OrderedDict() self.raw_data_dict["measured_values_YZ"] = OrderedDict() self.raw_data_dict["measured_values_ZX"] = OrderedDict() self.raw_data_dict["measured_values_XX"] = OrderedDict() self.raw_data_dict["measured_values_YX"] = OrderedDict() self.raw_data_dict["measured_values_ZY"] = OrderedDict() self.raw_data_dict["measured_values_XY"] = OrderedDict() self.raw_data_dict["measured_values_YY"] = OrderedDict() self.raw_data_dict["measured_values_mZmZ"] = OrderedDict() for i, val_name in enumerate(a.value_names): invalid_idxs = np.where( (a.measured_values[0] == 0) & (a.measured_values[1] == 0) & (a.measured_values[2] == 0) & (a.measured_values[3] == 0) )[0] a.measured_values[:, invalid_idxs] = np.array( [[np.nan] * len(invalid_idxs)] * 4 ) binned_yvals = np.reshape( a.measured_values[i], (len(bins), -1), order="F" ) self.raw_data_dict["binned_vals"][val_name] = binned_yvals # 7 cal points: [00, 01, 10, 11, 02, 20, 22] # col_idx: [-7, -6, -5, -4, -3, -2, -1] self.raw_data_dict["cal_pts_x0"][val_name] = binned_yvals[ (-7, -5), : ].flatten() self.raw_data_dict["cal_pts_x1"][val_name] = binned_yvals[ (-6, -4), : ].flatten() self.raw_data_dict["cal_pts_x2"][val_name] = binned_yvals[ (-3, -1), : ].flatten() self.raw_data_dict["cal_pts_0x"][val_name] = binned_yvals[ (-7, -6), : ].flatten() self.raw_data_dict["cal_pts_1x"][val_name] = binned_yvals[ (-5, -4), : ].flatten() self.raw_data_dict["cal_pts_2x"][val_name] = binned_yvals[ (-2, -1), : ].flatten() self.raw_data_dict["measured_values_ZZ"][val_name] = binned_yvals[ 0:-7:10, : ] self.raw_data_dict["measured_values_XZ"][val_name] = binned_yvals[ 1:-7:10, : ] self.raw_data_dict["measured_values_YZ"][val_name] = binned_yvals[ 2:-7:10, : ] self.raw_data_dict["measured_values_ZX"][val_name] = binned_yvals[ 3:-7:10, : ] self.raw_data_dict["measured_values_XX"][val_name] = binned_yvals[ 4:-7:10, : ] self.raw_data_dict["measured_values_YX"][val_name] = binned_yvals[ 5:-7:10, : ] self.raw_data_dict["measured_values_ZY"][val_name] = binned_yvals[ 6:-7:10, : ] self.raw_data_dict["measured_values_XY"][val_name] = binned_yvals[ 7:-7:10, : ] self.raw_data_dict["measured_values_YY"][val_name] = binned_yvals[ 8:-7:10, : ] self.raw_data_dict["measured_values_mZmZ"][val_name] = binned_yvals[ 9:-7:10, : ] else: bins = None self.raw_data_dict["folder"] = a.folder self.raw_data_dict["timestamps"] = self.timestamps a.finish() # closes data file def process_data(self): """Averages shot data and calculates unitarity from raw_data_dict. Note: this doe not correct the outcomes for leakage. """ self.proc_data_dict = deepcopy(self.raw_data_dict) keys = [ "Vx0", "V0x", "Vx1", "V1x", "Vx2", "V2x", "SI", "SX", "Px0", "P0x", "Px1", "P1x", "Px2", "P2x", "M_inv_q0", "M_inv_q1", ] keys += [ "XX", "XY", "XZ", "YX", "YY", "YZ", "ZX", "ZY", "ZZ", "XX_sq", "XY_sq", "XZ_sq", "YX_sq", "YY_sq", "YZ_sq", "ZX_sq", "ZY_sq", "ZZ_sq", "unitarity_shots", "unitarity", ] keys += [ "XX_q0", "XY_q0", "XZ_q0", "YX_q0", "YY_q0", "YZ_q0", "ZX_q0", "ZY_q0", "ZZ_q0", ] keys += [ "XX_q1", "XY_q1", "XZ_q1", "YX_q1", "YY_q1", "YZ_q1", "ZX_q1", "ZY_q1", "ZZ_q1", ] for key in keys: self.proc_data_dict[key] = OrderedDict() for val_name in self.raw_data_dict["value_names"]: for idx in ["x0", "x1", "x2", "0x", "1x", "2x"]: self.proc_data_dict["V{}".format(idx)][val_name] = np.nanmean( self.raw_data_dict["cal_pts_{}".format(idx)][val_name] ) SI = np.nanmean(self.raw_data_dict["measured_values_ZZ"][val_name], axis=1) SX = np.nanmean( self.raw_data_dict["measured_values_mZmZ"][val_name], axis=1 ) self.proc_data_dict["SI"][val_name] = SI self.proc_data_dict["SX"][val_name] = SX Px0, Px1, Px2, M_inv_q0 = populations_using_rate_equations( SI, SX, self.proc_data_dict["Vx0"][val_name], self.proc_data_dict["Vx1"][val_name], self.proc_data_dict["Vx2"][val_name], ) P0x, P1x, P2x, M_inv_q1 = populations_using_rate_equations( SI, SX, self.proc_data_dict["V0x"][val_name], self.proc_data_dict["V1x"][val_name], self.proc_data_dict["V2x"][val_name], ) for key, val in [ ("Px0", Px0), ("Px1", Px1), ("Px2", Px2), ("P0x", P0x), ("P1x", P1x), ("P2x", P2x), ("M_inv_q0", M_inv_q0), ("M_inv_q1", M_inv_q1), ]: self.proc_data_dict[key][val_name] = val for key in ["XX", "XY", "XZ", "YX", "YY", "YZ", "ZX", "ZY", "ZZ"]: Vmeas = self.raw_data_dict["measured_values_" + key][val_name] Px2 = self.proc_data_dict["Px2"][val_name] V0 = self.proc_data_dict["Vx0"][val_name] V1 = self.proc_data_dict["Vx1"][val_name] V2 = self.proc_data_dict["Vx2"][val_name] val = Vmeas + 0 # - (Px2*V2 - (1-Px2)*V1)[:,None] val -= V1 val /= V0 - V1 val = np.mean(np.reshape(val, (val.shape[0], self.nseeds, -1)), axis=2) self.proc_data_dict[key + "_q0"][val_name] = val * 2 - 1 P2x = self.proc_data_dict["P2x"][val_name] V0 = self.proc_data_dict["V0x"][val_name] V1 = self.proc_data_dict["V1x"][val_name] # Leakage is ignored in this analysis. # V2 = self.proc_data_dict['V2x'][val_name] val = Vmeas + 0 # - (P2x*V2 - (1-P2x)*V1)[:,None] val -= V1 val /= V0 - V1 val = np.mean(np.reshape(val, (val.shape[0], self.nseeds, -1)), axis=2) self.proc_data_dict[key + "_q1"][val_name] = val * 2 - 1 if self.classification_method == "rates": val_name_q0 = self.raw_data_dict["value_names"][self.rates_ch_idxs[0]] val_name_q1 = self.raw_data_dict["value_names"][self.rates_ch_idxs[1]] self.proc_data_dict["M0"] = ( self.proc_data_dict["Px0"][val_name_q0] * self.proc_data_dict["P0x"][val_name_q1] ) self.proc_data_dict["X1"] = ( 1 - self.proc_data_dict["Px2"][val_name_q0] - self.proc_data_dict["P2x"][val_name_q1] ) # The unitarity is calculated here. self.proc_data_dict["unitarity_shots"] = ( self.proc_data_dict["ZZ_q0"][val_name_q0] * 0 ) # Unitarity according to Eq. (10) Wallman et al. New J. Phys. 2015 # Pj = d/(d-1)*|n(rho_j)|^2 # Note that the dimensionality prefix is ignored here as it # should drop out in the fits. for key in ["XX", "XY", "XZ", "YX", "YY", "YZ", "ZX", "ZY", "ZZ"]: self.proc_data_dict[key] = ( self.proc_data_dict[key + "_q0"][val_name_q0] * self.proc_data_dict[key + "_q1"][val_name_q1] ) self.proc_data_dict[key + "_sq"] = self.proc_data_dict[key] ** 2 self.proc_data_dict["unitarity_shots"] += self.proc_data_dict[ key + "_sq" ] self.proc_data_dict["unitarity"] = np.mean( self.proc_data_dict["unitarity_shots"], axis=1 ) else: raise NotImplementedError() def run_fitting(self): super().run_fitting() self.fit_res["unitarity_decay"] = self.fit_unitarity_decay() unitarity_dec = self.fit_res["unitarity_decay"].params text_msg = "Summary: \n" text_msg += format_value_string( "Unitarity\n" + r"$u$", unitarity_dec["u"], "\n" ) text_msg += format_value_string( "Error due to\nincoherent mechanisms\n" + r"$\epsilon$", unitarity_dec["eps"], ) self.proc_data_dict["unitarity_msg"] = text_msg def fit_unitarity_decay(self): """Fits the data using the unitarity model.""" fit_mod_unitarity = lmfit.Model(unitarity_decay, independent_vars="m") fit_mod_unitarity.set_param_hint("A", value=0.1, min=0, max=1, vary=True) fit_mod_unitarity.set_param_hint("B", value=0.8, min=0, max=1, vary=True) fit_mod_unitarity.set_param_hint("u", value=0.9, min=0, max=1, vary=True) fit_mod_unitarity.set_param_hint("d1", value=self.d1, vary=False) # Error due to incoherent sources # Feng Phys. Rev. Lett. 117, 260501 (2016) eq. (4) fit_mod_unitarity.set_param_hint("eps", expr="((d1-1)/d1)*(1-u**0.5)") params = fit_mod_unitarity.make_params() fit_mod_unitarity = fit_mod_unitarity.fit( data=self.proc_data_dict["unitarity"], m=self.proc_data_dict["ncl"], params=params, ) return fit_mod_unitarity def prepare_plots(self): val_names = self.proc_data_dict["value_names"] for i, val_name in enumerate(val_names): self.plot_dicts["binned_data_{}".format(val_name)] = { "plotfn": self.plot_line, "xvals": self.proc_data_dict["bins"], "yvals": np.nanmean( self.proc_data_dict["binned_vals"][val_name], axis=1 ), "yerr": sem(self.proc_data_dict["binned_vals"][val_name], axis=1), "xlabel": "Number of Cliffords", "xunit": "#", "ylabel": val_name, "yunit": self.proc_data_dict["value_units"][i], "title": self.proc_data_dict["timestamp_string"] + "\n" + self.proc_data_dict["measurementstring"], } fs = plt.rcParams["figure.figsize"] # define figure and axes here to have custom layout self.figs["rb_populations_decay"], axs = plt.subplots( ncols=2, sharex=True, sharey=True, figsize=(fs[0] * 1.5, fs[1]) ) self.figs["rb_populations_decay"].suptitle( self.proc_data_dict["timestamp_string"] + "\n" + "Population using rate equations", y=1.05, ) self.figs["rb_populations_decay"].patch.set_alpha(0) self.axs["rb_pops_q0"] = axs[0] self.axs["rb_pops_q1"] = axs[1] val_name_q0 = val_names[self.rates_ch_idxs[0]] val_name_q1 = val_names[self.rates_ch_idxs[1]] self.plot_dicts["rb_rate_eq_pops_{}".format(val_name_q0)] = { "plotfn": plot_populations_RB_curve, "ncl": self.proc_data_dict["ncl"], "P0": self.proc_data_dict["Px0"][val_name_q0], "P1": self.proc_data_dict["Px1"][val_name_q0], "P2": self.proc_data_dict["Px2"][val_name_q0], "title": " {}".format(val_name_q0), "ax_id": "rb_pops_q0", } self.plot_dicts["rb_rate_eq_pops_{}".format(val_name_q1)] = { "plotfn": plot_populations_RB_curve, "ncl": self.proc_data_dict["ncl"], "P0": self.proc_data_dict["P0x"][val_name_q1], "P1": self.proc_data_dict["P1x"][val_name_q1], "P2": self.proc_data_dict["P2x"][val_name_q1], "title": " {}".format(val_name_q1), "ax_id": "rb_pops_q1", } self.plot_dicts["cal_points_hexbin_q0"] = { "plotfn": plot_cal_points_hexbin, "shots_0": ( self.proc_data_dict["cal_pts_x0"][val_names[0]], self.proc_data_dict["cal_pts_x0"][val_names[1]], ), "shots_1": ( self.proc_data_dict["cal_pts_x1"][val_names[0]], self.proc_data_dict["cal_pts_x1"][val_names[1]], ), "shots_2": ( self.proc_data_dict["cal_pts_x2"][val_names[0]], self.proc_data_dict["cal_pts_x2"][val_names[1]], ), "xlabel": val_names[0], "xunit": self.proc_data_dict["value_units"][0], "ylabel": val_names[1], "yunit": self.proc_data_dict["value_units"][1], "common_clims": False, "title": self.proc_data_dict["timestamp_string"] + "\n" + self.proc_data_dict["measurementstring"] + " hexbin plot q0", "plotsize": (fs[0] * 1.5, fs[1]), } self.plot_dicts["cal_points_hexbin_q1"] = { "plotfn": plot_cal_points_hexbin, "shots_0": ( self.proc_data_dict["cal_pts_0x"][val_names[2]], self.proc_data_dict["cal_pts_0x"][val_names[3]], ), "shots_1": ( self.proc_data_dict["cal_pts_1x"][val_names[2]], self.proc_data_dict["cal_pts_1x"][val_names[3]], ), "shots_2": ( self.proc_data_dict["cal_pts_2x"][val_names[2]], self.proc_data_dict["cal_pts_2x"][val_names[3]], ), "xlabel": val_names[2], "xunit": self.proc_data_dict["value_units"][2], "ylabel": val_names[3], "yunit": self.proc_data_dict["value_units"][3], "common_clims": False, "title": self.proc_data_dict["timestamp_string"] + "\n" + self.proc_data_dict["measurementstring"] + " hexbin plot q1", "plotsize": (fs[0] * 1.5, fs[1]), } # define figure and axes here to have custom layout self.figs["main_rb_decay"], axs = plt.subplots( nrows=2, sharex=True, gridspec_kw={"height_ratios": (2, 1)} ) self.figs["main_rb_decay"].patch.set_alpha(0) self.axs["main_rb_decay"] = axs[0] self.axs["leak_decay"] = axs[1] self.plot_dicts["main_rb_decay"] = { "plotfn": plot_rb_decay_woods_gambetta, "ncl": self.proc_data_dict["ncl"], "M0": self.proc_data_dict["M0"], "X1": self.proc_data_dict["X1"], "ax1": axs[1], "title": self.proc_data_dict["timestamp_string"] + "\n" + self.proc_data_dict["measurementstring"], } self.plot_dicts["fit_leak"] = { "plotfn": self.plot_fit, "ax_id": "leak_decay", "fit_res": self.fit_res["leakage_decay"], "setlabel": "Leakage fit", "do_legend": True, "color": "C2", } self.plot_dicts["fit_rb_simple"] = { "plotfn": self.plot_fit, "ax_id": "main_rb_decay", "fit_res": self.fit_res["rb_decay_simple"], "setlabel": "Simple RB fit", "do_legend": True, } self.plot_dicts["fit_rb"] = { "plotfn": self.plot_fit, "ax_id": "main_rb_decay", "fit_res": self.fit_res["rb_decay"], "setlabel": "Full RB fit", "do_legend": True, "color": "C2", } self.plot_dicts["rb_text"] = { "plotfn": self.plot_text, "text_string": self.proc_data_dict["rb_msg"], "xpos": 1.05, "ypos": 0.6, "ax_id": "main_rb_decay", "horizontalalignment": "left", } self.plot_dicts["correlated_readouts"] = { "plotfn": plot_unitarity_shots, "ncl": self.proc_data_dict["ncl"], "unitarity_shots": self.proc_data_dict["unitarity_shots"], "xlabel": "Number of Cliffords", "xunit": "#", "ylabel": "Unitarity", "yunit": "", "title": self.proc_data_dict["timestamp_string"] + "\n" + self.proc_data_dict["measurementstring"], } self.figs["unitarity"] = plt.subplots(nrows=1) self.plot_dicts["unitarity"] = { "plotfn": plot_unitarity, "ax_id": "unitarity", "ncl": self.proc_data_dict["ncl"], "P": self.proc_data_dict["unitarity"], "xlabel": "Number of Cliffords", "xunit": "#", "ylabel": "Unitarity", "yunit": "frac", "title": self.proc_data_dict["timestamp_string"] + "\n" + self.proc_data_dict["measurementstring"], } self.plot_dicts["fit_unitarity"] = { "plotfn": self.plot_fit, "ax_id": "unitarity", "fit_res": self.fit_res["unitarity_decay"], "setlabel": "Simple unitarity fit", "do_legend": True, } self.plot_dicts["unitarity_text"] = { "plotfn": self.plot_text, "text_string": self.proc_data_dict["unitarity_msg"], "xpos": 0.6, "ypos": 0.8, "ax_id": "unitarity", "horizontalalignment": "left", } class InterleavedRandomizedBenchmarkingAnalysis(ba.BaseDataAnalysis): """ Analysis for two qubit interleaved randomized benchmarking of a CZ gate. [2020-07-12 Victor] upgraded to allow for analysis of iRB for the parked qubit during CZ on the other qubits This is a meta-analysis. It runs "RandomizedBenchmarking_TwoQubit_Analysis" for each of the individual datasets in the "extract_data" method and uses the quantities of interest to create the combined figure. The figure as well as the quantities of interest are stored in the interleaved data file. """ def __init__( self, ts_base: str = None, ts_int: str = None, ts_int_idle: str = None, label_base: str = "", label_int: str = "", label_int_idle: str = "", options_dict: dict = {}, auto=True, close_figs=True, rates_I_quad_ch_idxs: list = [0, 2], ignore_f_cal_pts: bool = False, plot_label="", extract_only=False, ): super().__init__( do_fitting=True, close_figs=close_figs, options_dict=options_dict, extract_only=extract_only, ) self.ts_base = ts_base self.ts_int = ts_int self.ts_int_idle = ts_int_idle self.label_base = label_base self.label_int = label_int self.label_int_idle = label_int_idle self.include_idle = self.ts_int_idle or self.label_int_idle assert ts_base or label_base assert ts_int or label_int self.rates_I_quad_ch_idxs = rates_I_quad_ch_idxs self.options_dict = options_dict self.close_figs = close_figs self.ignore_f_cal_pts = ignore_f_cal_pts self.plot_label = plot_label # For other classes derived from this one this will change self.fit_tag = "2Q" self.int_name = "CZ" if auto: self.run_analysis() def extract_data(self): self.raw_data_dict = OrderedDict() a_base = RandomizedBenchmarking_TwoQubit_Analysis( t_start=self.ts_base, label=self.label_base, options_dict=self.options_dict, auto=True, close_figs=self.close_figs, rates_I_quad_ch_idxs=self.rates_I_quad_ch_idxs, extract_only=True, ignore_f_cal_pts=self.ignore_f_cal_pts, ) a_int = RandomizedBenchmarking_TwoQubit_Analysis( t_start=self.ts_int, label=self.label_int, options_dict=self.options_dict, auto=True, close_figs=self.close_figs, rates_I_quad_ch_idxs=self.rates_I_quad_ch_idxs, extract_only=True, ignore_f_cal_pts=self.ignore_f_cal_pts, ) if self.include_idle: a_int_idle = RandomizedBenchmarking_TwoQubit_Analysis( t_start=self.ts_int_idle, label=self.label_int_idle, options_dict=self.options_dict, auto=True, close_figs=self.close_figs, rates_I_quad_ch_idxs=self.rates_I_quad_ch_idxs, extract_only=True, ignore_f_cal_pts=self.ignore_f_cal_pts, ) # order is such that any information (figures, quantities of interest) # are saved in the interleaved file. self.timestamps = [a_int.timestamps[0], a_base.timestamps[0]] self.raw_data_dict["timestamps"] = self.timestamps self.raw_data_dict["timestamp_string"] = a_int.proc_data_dict[ "timestamp_string" ] self.raw_data_dict["folder"] = a_int.proc_data_dict["folder"] a_dict = {"base": a_base, "int": a_int} if self.include_idle: a_dict["int_idle"] = a_int_idle self.raw_data_dict["analyses"] = a_dict if not self.plot_label: self.plot_label = a_int.proc_data_dict["measurementstring"] def process_data(self): self.proc_data_dict = OrderedDict() self.proc_data_dict["quantities_of_interest"] = {} qoi = self.proc_data_dict["quantities_of_interest"] qoi_base = self.raw_data_dict["analyses"]["base"].proc_data_dict[ "quantities_of_interest" ] qoi_int = self.raw_data_dict["analyses"]["int"].proc_data_dict[ "quantities_of_interest" ] self.overwrite_qois = True qoi.update({k + "_ref": v for k, v in qoi_base.items()}) qoi.update({k + "_int": v for k, v in qoi_int.items()}) # The functionality of this analysis was extended to make it usable for # interleaved parking idle flux pulse fit_tag = self.fit_tag int_name = self.int_name qoi["eps_%s_X1" % int_name] = interleaved_error( eps_int=qoi_int["eps_X1_%s" % fit_tag], eps_base=qoi_base["eps_X1_%s" % fit_tag], ) qoi["eps_%s_simple" % int_name] = interleaved_error( eps_int=qoi_int["eps_simple_%s" % fit_tag], eps_base=qoi_base["eps_simple_%s" % fit_tag], ) qoi["L1_%s" % int_name] = interleaved_error( eps_int=qoi_int["L1_%s" % fit_tag], eps_base=qoi_base["L1_%s" % fit_tag] ) if self.include_idle: qoi_int_idle = self.raw_data_dict["analyses"]["int_idle"].proc_data_dict[ "quantities_of_interest" ] qoi.update({k + "_int_idle": v for k, v in qoi_int_idle.items()}) qoi["eps_idle_X1"] = interleaved_error( eps_int=qoi_int_idle["eps_X1_%s" % fit_tag], eps_base=qoi_base["eps_X1_%s" % fit_tag], ) qoi["eps_idle_simple"] = interleaved_error( eps_int=qoi_int_idle["eps_simple_%s" % fit_tag], eps_base=qoi_base["eps_simple_%s" % fit_tag], ) qoi["L1_idle"] = interleaved_error( eps_int=qoi_int_idle["L1_%s" % fit_tag], eps_base=qoi_base["L1_%s" % fit_tag], ) if int_name == "CZ": # This is the naive estimate, when all observed error is assigned # to the CZ gate try: qoi["L1_%s_naive" % int_name] = 1 - ( 1 - qoi_base["L1_%s" % fit_tag] ) ** (1 / 1.5) qoi["eps_%s_simple_naive" % int_name] = 1 - ( 1 - qoi_base["eps_simple_%s" % fit_tag] ) ** (1 / 1.5) qoi["eps_%s_X1_naive" % int_name] = 1 - ( 1 - qoi_base["eps_X1_%s" % fit_tag] ) ** (1 / 1.5) except ValueError: # prevents the analysis from crashing if the fits are bad. qoi["L1_%s_naive" % int_name] = ufloat(np.NaN, np.NaN) qoi["eps_%s_simple_naive" % int_name] = ufloat(np.NaN, np.NaN) qoi["eps_%s_X1_naive" % int_name] = ufloat(np.NaN, np.NaN) def prepare_plots(self): # Might seem that are not used but there is an `eval` below dd_ref = self.raw_data_dict["analyses"]["base"].proc_data_dict dd_int = self.raw_data_dict["analyses"]["int"].proc_data_dict fr_ref = self.raw_data_dict["analyses"]["base"].fit_res fr_int = self.raw_data_dict["analyses"]["int"].fit_res dds = { "int": dd_int, "ref": dd_ref, } frs = { "int": fr_int, "ref": fr_ref, } if self.include_idle: fr_int_idle = self.raw_data_dict["analyses"]["int_idle"].fit_res dd_int_idle = self.raw_data_dict["analyses"]["int_idle"].proc_data_dict dds["int_idle"] = dd_int_idle frs["int_idle"] = fr_int_idle fs = plt.rcParams["figure.figsize"] self.figs["main_irb_decay"], axs = plt.subplots( nrows=2, sharex=True, gridspec_kw={"height_ratios": (2, 1)}, figsize=(fs[0] * 1.3, fs[1] * 1.3), ) self.figs["main_irb_decay"].patch.set_alpha(0) self.axs["main_irb_decay"] = axs[0] self.axs["leak_decay"] = axs[1] self.plot_dicts["main_irb_decay"] = { "plotfn": plot_irb_decay_woods_gambetta, "ncl": dd_ref["ncl"], "include_idle": self.include_idle, "fit_tag": self.fit_tag, "int_name": self.int_name, "qoi": self.proc_data_dict["quantities_of_interest"], "ax1": axs[1], "title": "{} - {}\n{}".format( self.timestamps[0], self.timestamps[1], self.plot_label ), } def add_to_plot_dict( plot_dict: dict, tag: str, dd_quantities: list, fit_quantities: list, dds: dict, frs: dict, ): for dd_q in dd_quantities: plot_dict[dd_q + "_" + tag] = dds[tag][dd_q][self.fit_tag] for fit_q in fit_quantities: trans = { "rb_decay": "fr_M0", "rb_decay_simple": "fr_M0_simple", "leakage_decay": "fr_X1", } plot_dict[trans[fit_q] + "_" + tag] = frs[tag][ fit_q + "_{}".format(self.fit_tag) ] tags = ["ref", "int"] if self.include_idle: tags.append("int_idle") for tag in tags: add_to_plot_dict( self.plot_dicts["main_irb_decay"], tag=tag, dd_quantities=["M0", "X1"], fit_quantities=["rb_decay", "rb_decay_simple", "leakage_decay"], dds=dds, frs=frs, ) class InterleavedRandomizedBenchmarkingParkingAnalysis( InterleavedRandomizedBenchmarkingAnalysis, ba.BaseDataAnalysis ): """ Analysis for single qubit interleaved randomized benchmarking where the interleaved gate is a parking identity (with the corresponding CZ being applied on the other two qubits) This is a meta-analysis. It runs "RandomizedBenchmarking_SingleQubit_Analysis" for each of the individual datasets in the "extract_data" method and uses the quantities of interest to create the combined figure. The figure as well as the quantities of interest are stored in the interleaved data file. """ def __init__( self, ts_base: str = None, ts_int: str = None, label_base: str = "", label_int: str = "", options_dict: dict = {}, auto=True, close_figs=True, rates_I_quad_ch_idx: int = -2, rates_Q_quad_ch_idx: int = None, ignore_f_cal_pts: bool = False, plot_label="", ): # Here we don't want to run the __init__ of the Interleaved analysis, # only the __init__ of the base class ba.BaseDataAnalysis.__init__( self, do_fitting=True, close_figs=close_figs, options_dict=options_dict ) self.ts_base = ts_base self.ts_int = ts_int self.label_base = label_base self.label_int = label_int assert ts_base or label_base assert ts_int or label_int self.rates_I_quad_ch_idx = rates_I_quad_ch_idx self.rates_Q_quad_ch_idx = rates_Q_quad_ch_idx if self.rates_Q_quad_ch_idx is None: self.rates_Q_quad_ch_idx = rates_I_quad_ch_idx + 1 self.options_dict = options_dict self.close_figs = close_figs self.ignore_f_cal_pts = ignore_f_cal_pts self.plot_label = plot_label # For other classes derived from this one this will change self.fit_tag = None # to be set in the extract data self.int_name = "Idle flux" self.include_idle = False if auto: self.run_analysis() def extract_data(self): self.raw_data_dict = OrderedDict() a_base = RandomizedBenchmarking_SingleQubit_Analysis( t_start=self.ts_base, label=self.label_base, options_dict=self.options_dict, auto=True, close_figs=self.close_figs, rates_I_quad_ch_idx=self.rates_I_quad_ch_idx, extract_only=True, ignore_f_cal_pts=self.ignore_f_cal_pts, ) a_int = RandomizedBenchmarking_SingleQubit_Analysis( t_start=self.ts_int, label=self.label_int, options_dict=self.options_dict, auto=True, close_figs=self.close_figs, rates_I_quad_ch_idx=self.rates_I_quad_ch_idx, extract_only=True, ignore_f_cal_pts=self.ignore_f_cal_pts, ) self.fit_tag = a_base.raw_data_dict["value_names"][self.rates_I_quad_ch_idx] # order is such that any information (figures, quantities of interest) # are saved in the interleaved file. self.timestamps = [a_int.timestamps[0], a_base.timestamps[0]] self.raw_data_dict["timestamps"] = self.timestamps self.raw_data_dict["timestamp_string"] = a_int.proc_data_dict[ "timestamp_string" ] self.raw_data_dict["folder"] = a_int.proc_data_dict["folder"] self.raw_data_dict["analyses"] = {"base": a_base, "int": a_int} if not self.plot_label: self.plot_label = a_int.proc_data_dict["measurementstring"] class CharacterBenchmarking_TwoQubit_Analysis(ba.BaseDataAnalysis): """ Analysis for character benchmarking. """ def __init__( self, t_start: str = None, t_stop: str = None, label="", options_dict: dict = None, auto=True, close_figs=True, ch_idxs: list = [0, 2], ): if options_dict is None: options_dict = dict() super().__init__( t_start=t_start, t_stop=t_stop, label=label, options_dict=options_dict, close_figs=close_figs, do_fitting=True, ) self.d1 = 4 self.ch_idxs = ch_idxs if auto: self.run_analysis() def extract_data(self): self.raw_data_dict = OrderedDict() self.timestamps = a_tools.get_timestamps_in_range( self.t_start, self.t_stop, label=self.labels ) a = ma_old.MeasurementAnalysis( timestamp=self.timestamps[0], auto=False, close_file=False ) a.get_naming_and_values() bins = a.data_file["Experimental Data"]["Experimental Metadata"]["bins"][()] a.finish() self.raw_data_dict["measurementstring"] = a.measurementstring self.raw_data_dict["timestamp_string"] = a.timestamp_string self.raw_data_dict["folder"] = a.folder self.raw_data_dict["timestamps"] = self.timestamps df = pd.DataFrame( columns={"ncl", "pauli", "I_q0", "Q_q0", "I_q1", "Q_q1", "interleaving_cl"} ) df["ncl"] = bins # Assumptions on the structure of the datafile are made here. # For every Clifford, 4 random pauli's are sampled from the different # sub sets: paulis = [ "II", # 'IZ', 'ZI', 'ZZ', # P00 "IX", # 'IY', 'ZX', 'ZY', # P01 "XI", # 'XZ', 'YI', 'YZ', # P10 "XX", ] # 'XY', 'YX', 'YY'] # P11 paulis_df = np.tile(paulis, 34)[: len(bins)] # The calibration points do not correspond to a Pauli paulis_df[-7:] = np.nan df["pauli"] = paulis_df # The four different random Pauli's are performed both with # and without the interleaving CZ gate. df["interleaving_cl"] = np.tile([""] * 4 + ["CZ"] * 4, len(bins) // 8 + 1)[ : len(bins) ] # Data is grouped and single shots are averaged. for i, ch in enumerate(["I_q0", "Q_q0", "I_q1", "Q_q1"]): binned_yvals = np.reshape(a.measured_values[i], (len(bins), -1), order="F") yvals = np.mean(binned_yvals, axis=1) df[ch] = yvals self.raw_data_dict["df"] = df def process_data(self): self.proc_data_dict = OrderedDict() df = self.raw_data_dict["df"] cal_points = [ # calibration point indices are when ignoring the f-state cal pts [[-7, -5], [-6, -4], [-3, -1]], # q0 [[-7, -5], [-6, -4], [-3, -1]], # q0 [[-7, -6], [-5, -4], [-2, -1]], # q1 [[-7, -6], [-5, -4], [-2, -1]], # q1 ] for ch, cal_pt in zip(["I_q0", "Q_q0", "I_q1", "Q_q1"], cal_points): df[ch + "_normed"] = a_tools.normalize_data_v3( df[ch].values, cal_zero_points=cal_pt[0], cal_one_points=cal_pt[1] ) df["P_|00>"] = (1 - df["I_q0_normed"]) * (1 - df["Q_q1_normed"]) P00 = ( df.loc[df["pauli"].isin(["II", "IZ", "ZI", "ZZ"])] .loc[df["interleaving_cl"] == ""] .groupby("ncl") .mean() ) P01 = ( df.loc[df["pauli"].isin(["IX", "IY", "ZX", "ZY"])] .loc[df["interleaving_cl"] == ""] .groupby("ncl") .mean() ) P10 = ( df.loc[df["pauli"].isin(["XI", "XZ", "YI", "YZ"])] .loc[df["interleaving_cl"] == ""] .groupby("ncl") .mean() ) P11 = ( df.loc[df["pauli"].isin(["XX", "XY", "YX", "YY"])] .loc[df["interleaving_cl"] == ""] .groupby("ncl") .mean() ) P00_CZ = ( df.loc[df["pauli"].isin(["II", "IZ", "ZI", "ZZ"])] .loc[df["interleaving_cl"] == "CZ"] .groupby("ncl") .mean() ) P01_CZ = ( df.loc[df["pauli"].isin(["IX", "IY", "ZX", "ZY"])] .loc[df["interleaving_cl"] == "CZ"] .groupby("ncl") .mean() ) P10_CZ = ( df.loc[df["pauli"].isin(["XI", "XZ", "YI", "YZ"])] .loc[df["interleaving_cl"] == "CZ"] .groupby("ncl") .mean() ) P11_CZ = ( df.loc[df["pauli"].isin(["XX", "XY", "YX", "YY"])] .loc[df["interleaving_cl"] == "CZ"] .groupby("ncl") .mean() ) # Calculate the character function # Eq. 7 of Xue et al. ArXiv 1811.04002v1 C1 = P00["P_|00>"] - P01["P_|00>"] + P10["P_|00>"] - P11["P_|00>"] C2 = P00["P_|00>"] + P01["P_|00>"] - P10["P_|00>"] - P11["P_|00>"] C12 = P00["P_|00>"] - P01["P_|00>"] - P10["P_|00>"] + P11["P_|00>"] C1_CZ = ( P00_CZ["P_|00>"] - P01_CZ["P_|00>"] + P10_CZ["P_|00>"] - P11_CZ["P_|00>"] ) C2_CZ = ( P00_CZ["P_|00>"] + P01_CZ["P_|00>"] - P10_CZ["P_|00>"] - P11_CZ["P_|00>"] ) C12_CZ = ( P00_CZ["P_|00>"] - P01_CZ["P_|00>"] - P10_CZ["P_|00>"] + P11_CZ["P_|00>"] ) char_df = pd.DataFrame( { "P00": P00["P_|00>"], "P01": P01["P_|00>"], "P10": P10["P_|00>"], "P11": P11["P_|00>"], "P00_CZ": P00_CZ["P_|00>"], "P01_CZ": P01_CZ["P_|00>"], "P10_CZ": P10_CZ["P_|00>"], "P11_CZ": P11_CZ["P_|00>"], "C1": C1, "C2": C2, "C12": C12, "C1_CZ": C1_CZ, "C2_CZ": C2_CZ, "C12_CZ": C12_CZ, } ) self.proc_data_dict["char_df"] = char_df def run_fitting(self): super().run_fitting() char_df = self.proc_data_dict["char_df"] # Eq. 8 of Xue et al. ArXiv 1811.04002v1 for char_key in ["C1", "C2", "C12", "C1_CZ", "C2_CZ", "C12_CZ"]: char_mod = lmfit.Model(char_decay, independent_vars="m") char_mod.set_param_hint("A", value=1, vary=True) char_mod.set_param_hint("alpha", value=0.95) params = char_mod.make_params() self.fit_res[char_key] = char_mod.fit( data=char_df[char_key].values, m=char_df.index, params=params ) def analyze_fit_results(self): fr = self.fit_res self.proc_data_dict["quantities_of_interest"] = {} qoi = self.proc_data_dict["quantities_of_interest"] qoi["alpha1"] = ufloat( fr["C1"].params["alpha"].value, fr["C1"].params["alpha"].stderr ) qoi["alpha2"] = ufloat( fr["C2"].params["alpha"].value, fr["C2"].params["alpha"].stderr ) qoi["alpha12"] = ufloat( fr["C12"].params["alpha"].value, fr["C12"].params["alpha"].stderr ) # eq. 9 from Xue et al. ArXiv 1811.04002v1 qoi["alpha_char"] = ( 3 / 15 * qoi["alpha1"] + 3 / 15 * qoi["alpha2"] + 9 / 15 * qoi["alpha12"] ) qoi["alpha1_CZ_int"] = ufloat( fr["C1_CZ"].params["alpha"].value, fr["C1_CZ"].params["alpha"].stderr ) qoi["alpha2_CZ_int"] = ufloat( fr["C2_CZ"].params["alpha"].value, fr["C2_CZ"].params["alpha"].stderr ) qoi["alpha12_CZ_int"] = ufloat( fr["C12_CZ"].params["alpha"].value, fr["C12_CZ"].params["alpha"].stderr ) qoi["alpha_char_CZ_int"] = ( 3 / 15 * qoi["alpha1_CZ_int"] + 3 / 15 * qoi["alpha2_CZ_int"] + 9 / 15 * qoi["alpha12_CZ_int"] ) qoi["eps_ref"] = depolarizing_par_to_eps(qoi["alpha_char"], d=4) qoi["eps_int"] = depolarizing_par_to_eps(qoi["alpha_char_CZ_int"], d=4) # Interleaved error calculation Magesan et al. PRL 2012 qoi["eps_CZ"] = 1 - (1 - qoi["eps_int"]) / (1 - qoi["eps_ref"]) def prepare_plots(self): char_df = self.proc_data_dict["char_df"] # self.figs['puali_decays'] self.plot_dicts["pauli_decays"] = { "plotfn": plot_char_RB_pauli_decays, "ncl": char_df.index.values, "P00": char_df["P00"].values, "P01": char_df["P01"].values, "P10": char_df["P10"].values, "P11": char_df["P11"].values, "P00_CZ": char_df["P00_CZ"].values, "P01_CZ": char_df["P01_CZ"].values, "P10_CZ": char_df["P10_CZ"].values, "P11_CZ": char_df["P11_CZ"].values, "title": self.raw_data_dict["measurementstring"] + "\n" + self.raw_data_dict["timestamp_string"] + "\nPauli decays", } self.plot_dicts["char_decay"] = { "plotfn": plot_char_RB_decay, "ncl": char_df.index.values, "C1": char_df["C1"].values, "C2": char_df["C2"].values, "C12": char_df["C12"].values, "C1_CZ": char_df["C1_CZ"].values, "C2_CZ": char_df["C2_CZ"].values, "C12_CZ": char_df["C12_CZ"].values, "fr_C1": self.fit_res["C1"], "fr_C2": self.fit_res["C2"], "fr_C12": self.fit_res["C12"], "fr_C1_CZ": self.fit_res["C1_CZ"], "fr_C2_CZ": self.fit_res["C2_CZ"], "fr_C12_CZ": self.fit_res["C12_CZ"], "title": self.raw_data_dict["measurementstring"] + "\n" + self.raw_data_dict["timestamp_string"] + "\nCharacter decay", } self.plot_dicts["quantities_msg"] = { "plotfn": plot_char_rb_quantities, "ax_id": "char_decay", "qoi": self.proc_data_dict["quantities_of_interest"], } def plot_cal_points_hexbin( shots_0, shots_1, shots_2, xlabel: str, xunit: str, ylabel: str, yunit: str, title: str, ax, common_clims: bool = True, **kw ): # Choose colormap cmaps = [plt.cm.Blues, plt.cm.Reds, plt.cm.Greens] alpha_cmaps = [] for cmap in cmaps: my_cmap = cmap(np.arange(cmap.N)) my_cmap[:, -1] = np.linspace(0, 1, cmap.N) my_cmap = ListedColormap(my_cmap) alpha_cmaps.append(my_cmap) f = plt.gcf() mincnt = 1 hbs = [] shots_list = [shots_0, shots_1, shots_2] for i, shots in enumerate(shots_list): hb = ax.hexbin( x=shots[0], y=shots[1], cmap=alpha_cmaps[i], mincnt=mincnt, norm=PowerNorm(gamma=0.25), ) cb = f.colorbar(hb, ax=ax) cb.set_label(r"Counts $|{}\rangle$".format(i)) hbs.append(hb) if common_clims: clims = [hb.get_clim() for hb in hbs] clim = np.min(clims), np.max(clims) for hb in hbs: hb.set_clim(clim) set_xlabel(ax, xlabel, xunit) set_ylabel(ax, ylabel, yunit) ax.set_title(title) def plot_raw_RB_curve( ncl, SI, SX, V0, V1, V2, title, ax, xlabel, xunit, ylabel, yunit, **kw ): ax.plot(ncl, SI, label="SI", marker="o") ax.plot(ncl, SX, label="SX", marker="o") ax.plot(ncl[-1] + 0.5, V0, label="V0", marker="d", c="C0") ax.plot(ncl[-1] + 1.5, V1, label="V1", marker="d", c="C1") ax.plot(ncl[-1] + 2.5, V2, label="V2", marker="d", c="C2") ax.set_title(title) set_xlabel(ax, xlabel, xunit) set_ylabel(ax, ylabel, yunit) ax.legend() def plot_populations_RB_curve(ncl, P0, P1, P2, title, ax, **kw): ax.axhline(0.5, c="k", lw=0.5, ls="--") ax.plot(ncl, P0, c="C0", label=r"P($|g\rangle$)", marker="v") ax.plot(ncl, P1, c="C3", label=r"P($|e\rangle$)", marker="^") ax.plot(ncl, P2, c="C2", label=r"P($|f\rangle$)", marker="d") ax.set_xlabel("Number of Cliffords (#)") ax.set_ylabel("Population") ax.grid(axis="y") ax.legend() ax.set_ylim(-0.05, 1.05) ax.set_title(title) def plot_unitarity_shots(ncl, unitarity_shots, title, ax=None, **kw): ax.axhline(0.5, c="k", lw=0.5, ls="--") ax.plot(ncl, unitarity_shots, ".") ax.set_xlabel("Number of Cliffords (#)") ax.set_ylabel("unitarity") ax.grid(axis="y") ax.legend() ax.set_ylim(-1.05, 1.05) ax.set_title(title) def plot_unitarity(ncl, P, title, ax=None, **kw): ax.plot(ncl, P, "o") ax.set_xlabel("Number of Cliffords (#)") ax.set_ylabel("unitarity") ax.grid(axis="y") ax.legend() ax.set_ylim(-0.05, 1.05) ax.set_title(title) def plot_char_RB_pauli_decays( ncl, P00, P01, P10, P11, P00_CZ, P01_CZ, P10_CZ, P11_CZ, title, ax, **kw ): """ Plots the raw recovery probabilities for a character RB experiment. """ ax.plot(ncl, P00, c="C0", label=r"$P_{00}$", marker="o", ls="--") ax.plot(ncl, P01, c="C1", label=r"$P_{01}$", marker="o", ls="--") ax.plot(ncl, P10, c="C2", label=r"$P_{10}$", marker="o", ls="--") ax.plot(ncl, P11, c="C3", label=r"$P_{11}$", marker="o", ls="--") ax.plot( ncl, P00_CZ, c="C0", label=r"$P_{00}$-int. CZ", marker="d", alpha=0.5, ls=":" ) ax.plot( ncl, P01_CZ, c="C1", label=r"$P_{01}$-int. CZ", marker="d", alpha=0.5, ls=":" ) ax.plot( ncl, P10_CZ, c="C2", label=r"$P_{10}$-int. CZ", marker="d", alpha=0.5, ls=":" ) ax.plot( ncl, P11_CZ, c="C3", label=r"$P_{11}$-int. CZ", marker="d", alpha=0.5, ls=":" ) ax.set_xlabel("Number of Cliffords (#)") ax.set_ylabel(r"$P |00\rangle$") ax.legend(loc=(1.05, 0)) ax.set_ylim(-0.05, 1.05) ax.set_title(title) def plot_char_RB_decay( ncl, C1, C2, C12, C1_CZ, C2_CZ, C12_CZ, fr_C1, fr_C2, fr_C12, fr_C1_CZ, fr_C2_CZ, fr_C12_CZ, title, ax, **kw ): ncl_fine = np.linspace(np.min(ncl), np.max(ncl), 101) plot_fit(ncl_fine, fr_C1, ax, ls="-", c="C0") ax.plot( ncl, C1, c="C0", label=r"$C_1$: $A_1\cdot {\alpha_{1|2}}^m$", marker="o", ls="" ) plot_fit(ncl_fine, fr_C2, ax, ls="-", c="C1") ax.plot( ncl, C2, c="C1", label=r"$C_2$: $A_1\cdot {\alpha_{2|1}}^m$", marker="o", ls="" ) plot_fit(ncl_fine, fr_C12, ax, ls="-", c="C2") ax.plot( ncl, C12, c="C2", label=r"$C_{12}$: $A_1\cdot {\alpha_{12}}^m$", marker="o", ls="", ) plot_fit(ncl_fine, fr_C1_CZ, ax, ls="--", c="C0", alpha=0.5) ax.plot( ncl, C1_CZ, c="C0", label=r"$C_1^{int.}$: $A_1' \cdot {\alpha_{1|2}'}^m$", marker="d", ls="", alpha=0.5, ) plot_fit(ncl_fine, fr_C2_CZ, ax, ls="--", c="C1", alpha=0.5) ax.plot( ncl, C2_CZ, c="C1", label=r"$C_2^{int.}$: $A_2' \cdot {\alpha_{2|1}'}^m$", marker="d", ls="", alpha=0.5, ) plot_fit(ncl_fine, fr_C12_CZ, ax, ls="--", c="C2", alpha=0.5) ax.plot( ncl, C12_CZ, c="C2", label=r"$C_{12}^{int.}$: $A_{12}' \cdot {\alpha_{12}'}^m$", marker="d", ls="", alpha=0.5, ) ax.set_xlabel("Number of Cliffords (#)") ax.set_ylabel("Population") ax.legend(title="Character decay", ncol=2, loc=(1.05, 0.6)) ax.set_title(title) def plot_char_rb_quantities(ax, qoi, **kw): """ Plots a text message of the main quantities extracted from char rb """ def gen_val_str(alpha, alpha_p): val_str = " {:.3f}$\pm${:.3f} {:.3f}$\pm${:.3f}" return val_str.format( alpha.nominal_value, alpha.std_dev, alpha_p.nominal_value, alpha_p.std_dev ) alpha_msg = " Reference Interleaved" alpha_msg += "\n" r"$\alpha_{1|2}$" + "\t" alpha_msg += gen_val_str(qoi["alpha1"], qoi["alpha1_CZ_int"]) alpha_msg += "\n" r"$\alpha_{2|1}$" + "\t" alpha_msg += gen_val_str(qoi["alpha2"], qoi["alpha2_CZ_int"]) alpha_msg += "\n" r"$\alpha_{12}$" + "\t" alpha_msg += gen_val_str(qoi["alpha12"], qoi["alpha12_CZ_int"]) alpha_msg += "\n" + "_" * 40 + "\n" alpha_msg += "\n" r"$\epsilon_{Ref.}$" + "\t" alpha_msg += "{:.3f}$\pm${:.3f}%".format( qoi["eps_ref"].nominal_value * 100, qoi["eps_ref"].std_dev * 100 ) alpha_msg += "\n" r"$\epsilon_{Int.}$" + "\t" alpha_msg += "{:.3f}$\pm${:.3f}%".format( qoi["eps_int"].nominal_value * 100, qoi["eps_int"].std_dev * 100 ) alpha_msg += "\n" r"$\epsilon_{CZ.}$" + "\t" alpha_msg += "{:.3f}$\pm${:.3f}%".format( qoi["eps_CZ"].nominal_value * 100, qoi["eps_CZ"].std_dev * 100 ) ax.text(1.05, 0.0, alpha_msg, transform=ax.transAxes) def logisticreg_classifier_machinelearning(shots_0, shots_1, shots_2): """ """ # reshaping of the entries in proc_data_dict shots_0 = np.array(list(zip(list(shots_0.values())[0], list(shots_0.values())[1]))) shots_1 = np.array(list(zip(list(shots_1.values())[0], list(shots_1.values())[1]))) shots_2 = np.array(list(zip(list(shots_2.values())[0], list(shots_2.values())[1]))) shots_0 = shots_0[~np.isnan(shots_0[:, 0])] shots_1 = shots_1[~np.isnan(shots_1[:, 0])] shots_2 = shots_2[~np.isnan(shots_2[:, 0])] X = np.concatenate([shots_0, shots_1, shots_2]) Y = np.concatenate( [ 0 * np.ones(shots_0.shape[0]), 1 * np.ones(shots_1.shape[0]), 2 * np.ones(shots_2.shape[0]), ] ) logreg = linear_model.LogisticRegression(C=1e5) logreg.fit(X, Y) return logreg def plot_classifier_decission_boundary( shots_0, shots_1, shots_2, classifier, xlabel: str, xunit: str, ylabel: str, yunit: str, title: str, ax, **kw ): """ Plot decision boundary on top of the hexbin plot of the training dataset. """ grid_points = 200 x_min = np.nanmin([shots_0[0], shots_1[0], shots_2[0]]) x_max = np.nanmax([shots_0[0], shots_1[0], shots_2[0]]) y_min = np.nanmin([shots_0[1], shots_1[1], shots_2[1]]) y_max = np.nanmax([shots_0[1], shots_1[1], shots_2[1]]) xx, yy = np.meshgrid( np.linspace(x_min, x_max, grid_points), np.linspace(y_min, y_max, grid_points) ) Z = classifier.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plot_cal_points_hexbin( shots_0=shots_0, shots_1=shots_1, shots_2=shots_2, xlabel=xlabel, xunit=xunit, ylabel=ylabel, yunit=yunit, title=title, ax=ax, ) ax.pcolormesh(xx, yy, Z, cmap=c.ListedColormap(["C0", "C3", "C2"]), alpha=0.2) def plot_rb_decay_woods_gambetta(ncl, M0, X1, ax, ax1, title="", **kw): ax.plot(ncl, M0, marker="o", linestyle="") ax1.plot(ncl, X1, marker="d", linestyle="") ax.grid(axis="y") ax1.grid(axis="y") ax.set_ylim(-0.05, 1.05) ax1.set_ylim(min(min(0.97 * X1), 0.92), 1.01) ax.set_ylabel(r"$M_0$ probability") ax1.set_ylabel(r"$\chi_1$ population") ax1.set_xlabel("Number of Cliffords") ax.set_title(title) def plot_irb_decay_woods_gambetta( ncl, M0_ref, M0_int, X1_ref, X1_int, fr_M0_ref, fr_M0_int, fr_M0_simple_ref, fr_M0_simple_int, fr_X1_ref, fr_X1_int, qoi, ax, ax1, fit_tag, int_name, title="", include_idle=False, M0_int_idle=None, X1_int_idle=None, fr_M0_int_idle=None, fr_M0_simple_int_idle=None, fr_X1_int_idle=None, **kw ): ncl_fine = np.linspace(ncl[0], ncl[-1], 1001) ax.plot(ncl, M0_ref, marker="o", linestyle="", c="C0", label="Reference") plot_fit(ncl_fine, fr_M0_ref, ax=ax, c="C0") ax.plot( ncl, M0_int, marker="d", linestyle="", c="C1", label="Interleaved {}".format(int_name), ) plot_fit(ncl_fine, fr_M0_int, ax=ax, c="C1") if include_idle: ax.plot( ncl, M0_int_idle, marker="^", linestyle="", c="C2", label="Interleaved Idle" ) plot_fit(ncl_fine, fr_M0_int_idle, ax=ax, c="C2") ax.grid(axis="y") ax.set_ylim(-0.05, 1.05) ax.set_ylabel(r"$M_0$ probability") ax1.plot(ncl, X1_ref, marker="o", linestyle="", c="C0") ax1.plot(ncl, X1_int, marker="d", linestyle="", c="C1") plot_fit(ncl_fine, fr_X1_ref, ax=ax1, c="C0") plot_fit(ncl_fine, fr_X1_int, ax=ax1, c="C1") if include_idle: ax1.plot(ncl, X1_int_idle, marker="^", linestyle="", c="C2") plot_fit(ncl_fine, fr_X1_int_idle, ax=ax1, c="C2") ax1.grid(axis="y") ax1.set_ylim(min(min(0.97 * X1_int), 0.92), 1.01) ax1.set_ylabel(r"$\chi_1$ population") ax1.set_xlabel("Number of Cliffords") ax.set_title(title) ax.legend(loc="best") collabels = [r"$\epsilon_{\chi1}~(\%)$", r"$\epsilon~(\%)$", r"$L_1~(\%)$"] idle_r_labels0 = ["Interl. Idle curve"] if include_idle else [] idle_r_labels1 = ["Idle-interleaved"] if include_idle else [] rowlabels = ( ["Ref. curve"] + idle_r_labels0 + ["Interl. {} curve".format(int_name)] + idle_r_labels1 + ["{}-interleaved".format(int_name)] ) if int_name == "CZ": rowlabels += ["{}-naive".format(int_name)] idle_r_extracted = ( [[qoi["eps_idle_X1"] * 100, qoi["eps_idle_simple"] * 100, qoi["L1_idle"] * 100]] if include_idle else [] ) idle_r_fit = ( [ [ qoi["eps_X1_{}_int_idle".format(fit_tag)] * 100, qoi["eps_simple_{}_int_idle".format(fit_tag)] * 100, qoi["L1_{}_int_idle".format(fit_tag)] * 100, ] ] if include_idle else [] ) table_data = ( [ [ qoi["eps_X1_{}_ref".format(fit_tag)] * 100, qoi["eps_simple_{}_ref".format(fit_tag)] * 100, qoi["L1_{}_ref".format(fit_tag)] * 100, ] ] + idle_r_fit + [ [ qoi["eps_X1_{}_int".format(fit_tag)] * 100, qoi["eps_simple_{}_int".format(fit_tag)] * 100, qoi["L1_{}_int".format(fit_tag)] * 100, ] ] + idle_r_extracted + [ [ qoi["eps_{}_X1".format(int_name)] * 100, qoi["eps_{}_simple".format(int_name)] * 100, qoi["L1_{}".format(int_name)] * 100, ] ] ) if int_name == "CZ": table_data += [ [ qoi["eps_{}_X1_naive".format(int_name)] * 100, qoi["eps_{}_simple_naive".format(int_name)] * 100, qoi["L1_{}_naive".format(int_name)] * 100, ] ] # Avoid too many digits when the uncertainty is np.nan for i, row in enumerate(table_data): for j, u_val in enumerate(row): if np.isnan(u_val.n) and np.isnan(u_val.s): table_data[i][j] = "nan+/-nan" elif np.isnan(u_val.s): # Keep 3 significant digits only table_data[i][j] = "{:.3g}+/-nan".format(u_val.n) ax1.table( cellText=table_data, colLabels=collabels, rowLabels=rowlabels, transform=ax1.transAxes, cellLoc="center", rowLoc="center", bbox=(0.1, -2.5, 1, 2), ) def interleaved_error(eps_int, eps_base): # Interleaved error calculation Magesan et al. PRL 2012 eps = 1 - (1 - eps_int) / (1 - eps_base) return eps def leak_decay(A, B, lambda_1, m): """ Eq. (9) of Wood Gambetta 2018. A ~= L2/ (L1+L2) B ~= L1/ (L1+L2) + eps_m lambda_1 = 1 - L1 - L2 """ return A + B * lambda_1 ** m def full_rb_decay(A, B, C, lambda_1, lambda_2, m): """Eq. (15) of Wood Gambetta 2018.""" return A + B * lambda_1 ** m + C * lambda_2 ** m def unitarity_decay(A, B, u, m): """Eq. (8) of Wallman et al. New J. Phys. 2015.""" return A + B * u ** m def char_decay(A, alpha, m): """ From Helsen et al. A new class of efficient RB protocols. Theory in Helsen et al. arXiv:1806.02048 Eq. 8 of Xue et al. ArXiv 1811.04002v1 (experimental implementation) Parameters ---------- A (float): Scaling factor of the decay alpha (float): depolarizing parameter to be estimated m (array) number of cliffords returns: A * α**m """ return A * alpha ** m def depolarizing_par_to_eps(alpha, d): """ Convert depolarizing parameter to infidelity. Dugas et al. arXiv:1610.05296v2 contains a nice overview table of common RB paramater conversions. Parameters ---------- alpha (float): depolarizing parameter, also commonly referred to as lambda or p. d (int): dimension of the system, 2 for a single qubit, 4 for two-qubits. Returns ------- eps = (1-alpha)*(d-1)/d """ return (1 - alpha) * (d - 1) / d
mit
681,477,638,738,316,000
35.15968
88
0.494314
false
mathstuf/ranger
ranger/gui/widgets/taskview.py
1
2838
# Copyright (C) 2009-2013 Roman Zimbelmann <[email protected]> # This software is distributed under the terms of the GNU GPL version 3. """The TaskView allows you to modify what the loader is doing.""" from . import Widget from ranger.ext.accumulator import Accumulator class TaskView(Widget, Accumulator): old_lst = None def __init__(self, win): Widget.__init__(self, win) Accumulator.__init__(self) self.scroll_begin = 0 def draw(self): base_clr = [] base_clr.append('in_taskview') lst = self.get_list() if self.old_lst != lst: self.old_lst = lst self.need_redraw = True if self.need_redraw: self.win.erase() if not self.pointer_is_synced(): self.sync_index() if self.hei <= 0: return self.addstr(0, 0, "Task View") self.color_at(0, 0, self.wid, tuple(base_clr), 'title') if lst: for i in range(self.hei - 1): i += self.scroll_begin try: obj = lst[i] except IndexError: break y = i + 1 clr = list(base_clr) if self.pointer == i: clr.append('selected') descr = obj.get_description() if obj.progressbar_supported and obj.percent >= 0 \ and obj.percent <= 100: self.addstr(y, 0, "%3.2f%% - %s" % \ (obj.percent, descr), self.wid) wid = int(self.wid / 100.0 * obj.percent) self.color_at(y, 0, self.wid, tuple(clr)) self.color_at(y, 0, wid, tuple(clr), 'loaded') else: self.addstr(y, 0, descr, self.wid) self.color_at(y, 0, self.wid, tuple(clr)) else: if self.hei > 1: self.addstr(1, 0, "No task in the queue.") self.color_at(1, 0, self.wid, tuple(base_clr), 'error') self.color_reset() def finalize(self): y = self.y + 1 + self.pointer - self.scroll_begin self.fm.ui.win.move(y, self.x) def task_remove(self, i=None): if i is None: i = self.pointer if self.fm.loader.queue: self.fm.loader.remove(index=i) def task_move(self, to, i=None): if i is None: i = self.pointer self.fm.loader.move(_from=i, to=to) def press(self, key): self.fm.ui.keymaps.use_keymap('taskview') self.fm.ui.press(key) def get_list(self): return self.fm.loader.queue
gpl-3.0
602,735,406,371,060,500
29.516129
75
0.471811
false
eReuse/DeviceHub
setup.py
1
3827
from setuptools import find_packages, setup from setuptools.command.develop import develop from setuptools.command.install import install tests_require = [ 'assertpy' ] def install_r_packages(): """Installs required R packages""" from rpy2.robjects import packages as rpackages, r # Install R packages # Adapted from https://rpy2.github.io/doc/v2.9.x/html/introduction.html#installing-packages if not rpackages.isinstalled('devtools'): utils = rpackages.importr('utils') utils.chooseCRANmirror(ind=1) utils.install_packages('devtools') r.library('devtools') r.install_github('eReuse/Rdevicescore', ref='1.0') r.install_github('eReuse/Rdeviceprice', ref='1.0.1') class PostInstall(install): def run(self): result = super().run() install_r_packages() return result class PostInstallDevelop(develop): def run(self): result = super().run() install_r_packages() return result setup( name='eReuse-DeviceHub', version='0.7', packages=find_packages(exclude=('contrib', 'docs', 'scripts')), url='https://github.com/eReuse/DeviceHub', license='AGPLv3 License', author='eReuse.org team', author_email='[email protected]', description='DeviceHub is a system to manage devices focused in reusing them. ', # Updated in 2017-07-29 install_requires=[ 'inflection>=0.3.1', 'eve>=0.6.3,<0.8', 'werkzeug>=0.9.4,<=0.11.15', 'flask>=0.11,<0.12', # eve < 0.8 breaks with flask 0.12 'passlib>=1.6.5,<2.0', 'validators>=0.10,<0.20', 'requests>=2.9.1,<3.0', 'python-gnupg>=0.3.8', # To use gnupg, install gpg2 # Superior versions have a version mismatch of werkzeug with eve # Only update flask-caching when updating eve to >=0.8 'flask-caching<=1.2.0', 'python-gnupg', 'iso3166', 'flask-excel>=0.0.7,<0.0.8', 'pyexcel-ods', 'pyexcel-xlsx', 'pydash>=4.0.1,<5.0', 'sortedcontainers>=1.5.7,<1.6', 'geojson_utils', 'geojson>=1.3.4,<1.4', 'geoip2>=2.4.2,<3', 'flask-cors>=3.0.2,<4', 'shortid>=0.1.2,<0.2', 'beautifulsoup4>=4.6,<5', 'wikipedia>=1.4,<2', 'Flask-WeasyPrint', 'toolz>=0.8,<1.0', 'Flask-Mail', 'rpy2', 'ereuse-utils [naming]', 'lxml' ], keywords='eReuse.org DeviceHub devices devicehub reuse recycle it asset management', # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies # Install it with pip install .[docs] or pip install -e .[docs] extras_require={ 'docs': [ 'sphinx>=1.4.7', 'sphinxcontrib-httpdomain>=1.5' ], 'test': tests_require }, # Use `python setup.py test` to run the tests # http://setuptools.readthedocs.io/en/latest/setuptools.html#test-build-package-and-run-a-unittest-suite test_suite='ereuse_devicehub.tests', tests_require=tests_require, include_package_data=True, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Framework :: Flask', 'Intended Audience :: Manufacturing', 'License :: OSI Approved :: GNU Affero General Public License v3', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Office/Business', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content' ], cmdclass={ 'install': PostInstall, 'develop': PostInstallDevelop } )
agpl-3.0
-3,400,840,527,315,715,600
32.570175
127
0.600209
false
maizy/zaglushka
zaglushka_tests/test_proxied_response.py
1
5833
# coding: utf-8 import time import asyncio from http import HTTPStatus from os import path import socket from io import StringIO from tornado.httpclient import HTTPResponse import zaglushka from zaglushka_tests import ZaglushkaAsyncHTTPTestCase, TEST_RESOURCES_DIR class ProxiedResponseTestCase(ZaglushkaAsyncHTTPTestCase): def __init__(self, *args, **kwargs): super(ProxiedResponseTestCase, self).__init__(*args, **kwargs) self._orig_fetch = zaglushka._fetch_request def setUp(self): self._fetch_called = False self._fetch_request = None super(ProxiedResponseTestCase, self).setUp() def stub_response(self, code, emulated_delay=0.1, **response_args): self._fetch_called = False self._fetch_request = None async def _fetch_stub(http_client, request): response = HTTPResponse(request=request, code=code, **response_args) self._fetch_request = request self._fetch_called = True await asyncio.sleep(emulated_delay) return response zaglushka._fetch_request = _fetch_stub def assertFetchCalled(self): self.assertTrue(self._fetch_called) def assertFetchUrl(self, url): self.assertIsNotNone(self._fetch_request) self.assertEqual(self._fetch_request.url, url) def tearDown(self): zaglushka._fetch_request = self._orig_fetch super(ProxiedResponseTestCase, self).tearDown() def get_zaglushka_config(self): return { 'urls': [ { 'path': '/fixed_proxy', 'method': 'POST', 'response_proxy': 'http://example.com/path.json', }, { 'path_regexp': r'^/re_proxy/(\d+)/(\w+).json$', 'response_proxy': 'http://re.example.com/resource/$2/$1/$1.js', }, { 'path_regexp': '^/re_proxy2/(.*)$', 'response_proxy': 'http://re2.example.com/resource/$1.js', 'headers': { 'Overwrite': 'yes', 'Other': ['a', 'b'] } }, { 'path': '/fixed_proxy2', 'response_proxy': 'http://f2.example.com:8008/resp', 'headers_file': path.join(TEST_RESOURCES_DIR, 'issue11.headers') }, { 'path': '/delayed_proxy', 'method': 'PUT', 'response_proxy': 'http://example.com/path.json', 'delay': 0.5, }, ] } def test_fixed_proxy(self): expected_headers = {'Host': 'my.example.com'} self.stub_response(code=200, buffer=StringIO('ok, ggl'), headers=expected_headers) response = self.fetch('/fixed_proxy', method='POST', body='') self.assertFetchCalled() self.assertResponseBody('ok, ggl', response) self.assertResponseHeaders(expected_headers, response) self.assertFetchUrl('http://example.com/path.json') def test_delayed_response(self): self.stub_response(code=HTTPStatus.NOT_FOUND, buffer=StringIO(':(')) start = time.time() response = self.fetch('/delayed_proxy', method='PUT', body='') end = time.time() self.assertFetchCalled() self.assertResponseBody(':(', response) self.assertEqual(response.code, HTTPStatus.NOT_FOUND) self.assertGreaterEqual(end - start, 0.5) def test_regexp_proxy(self): self.stub_response(code=HTTPStatus.OK, buffer=StringIO('yup')) response = self.fetch('/re_proxy/12345/abcd.json') self.assertFetchCalled() self.assertResponseBody('yup', response) self.assertEqual(response.code, HTTPStatus.OK) self.assertFetchUrl('http://re.example.com/resource/abcd/12345/12345.js') def test_hardcoded_headers_overwrite(self): self.stub_response(code=HTTPStatus.OK, buffer=StringIO('over'), headers={'Unique': '1234', 'Overwrite': 'no'}) response = self.fetch('/re_proxy2/ab/cd.html') self.assertFetchCalled() self.assertResponseBody('over', response) self.assertResponseHeaders( { 'Unique': '1234', 'Overwrite': 'yes', 'Other': 'a,b', }, response) self.assertFetchUrl('http://re2.example.com/resource/ab/cd.html.js') def test_filebased_headers_overwrite(self): self.stub_response( code=HTTPStatus.OK, buffer=StringIO(''), headers={'X-GITHUB-REQUEST-ID': 'abc', 'X-ID': '123'} ) response = self.fetch('/fixed_proxy2') self.assertFetchCalled() self.assertResponseBody('', response) self.assertResponseHeaders( { 'Date': 'Wed, 23 Apr 2014 06: 13: 20 GMT', 'X-GitHub-Request-Id': '53950898: 2E4E: 2AD562C: 535759FD', 'X-Id': '123', }, response) self.assertFetchUrl('http://f2.example.com:8008/resp') def test_response_error(self): self.stub_response(code=599, error=socket.error(61, 'Connection refused')) response = self.fetch('/fixed_proxy', method='POST', body='') self.assertFetchCalled() self.assertResponseBody('', response) self.assertResponseHeaders({'X-Zaglushka-Failed-Response': 'true'}, response) self.assertFetchUrl('http://example.com/path.json') self.assertInLogRecords( 'Unable to proxy response to "http://example.com/path.json": [Errno 61] Connection refused', logger_name='zaglushka' )
mit
7,428,599,609,537,569,000
36.876623
118
0.564032
false
altsen/diandiyun-platform
common/lib/xmodule/xmodule/html_module.py
1
11807
import copy from fs.errors import ResourceNotFoundError import logging import os import sys from lxml import etree from path import path from pkg_resources import resource_string from xblock.fields import Scope, String, Boolean, List from xmodule.editing_module import EditingDescriptor from xmodule.html_checker import check_html from xmodule.stringify import stringify_children from xmodule.x_module import XModule from xmodule.xml_module import XmlDescriptor, name_to_pathname import textwrap from xmodule.contentstore.content import StaticContent from xblock.core import XBlock log = logging.getLogger("edx.courseware") class HtmlFields(object): display_name = String( display_name="Display Name", help="This name appears in the horizontal navigation at the top of the page.", scope=Scope.settings, # it'd be nice to have a useful default but it screws up other things; so, # use display_name_with_default for those default="Text" ) data = String(help="Html contents to display for this module", default=u"", scope=Scope.content) source_code = String(help="Source code for LaTeX documents. This feature is not well-supported.", scope=Scope.settings) use_latex_compiler = Boolean( help="Enable LaTeX templates?", default=False, scope=Scope.settings ) class HtmlModule(HtmlFields, XModule): js = { 'coffee': [ resource_string(__name__, 'js/src/javascript_loader.coffee'), resource_string(__name__, 'js/src/collapsible.coffee'), resource_string(__name__, 'js/src/html/display.coffee') ], 'js': [ resource_string(__name__, 'js/src/html/imageModal.js'), resource_string(__name__, 'js/common_static/js/vendor/draggabilly.pkgd.js') ] } js_module_name = "HTMLModule" css = {'scss': [resource_string(__name__, 'css/html/display.scss')]} def get_html(self): if self.system.anonymous_student_id: return self.data.replace("%%USER_ID%%", self.system.anonymous_student_id) return self.data class HtmlDescriptor(HtmlFields, XmlDescriptor, EditingDescriptor): """ Module for putting raw html in a course """ mako_template = "widgets/html-edit.html" module_class = HtmlModule filename_extension = "xml" template_dir_name = "html" js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]} js_module_name = "HTMLEditingDescriptor" css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/html/edit.scss')]} # VS[compat] TODO (cpennington): Delete this method once all fall 2012 course # are being edited in the cms @classmethod def backcompat_paths(cls, path): if path.endswith('.html.xml'): path = path[:-9] + '.html' # backcompat--look for html instead of xml if path.endswith('.html.html'): path = path[:-5] # some people like to include .html in filenames.. candidates = [] while os.sep in path: candidates.append(path) _, _, path = path.partition(os.sep) # also look for .html versions instead of .xml nc = [] for candidate in candidates: if candidate.endswith('.xml'): nc.append(candidate[:-4] + '.html') return candidates + nc @classmethod def filter_templates(cls, template, course): """ Filter template that contains 'latex' from templates. Show them only if use_latex_compiler is set to True in course settings. """ return (not 'latex' in template['template_id'] or course.use_latex_compiler) def get_context(self): """ an override to add in specific rendering context, in this case we need to add in a base path to our c4x content addressing scheme """ _context = EditingDescriptor.get_context(self) # Add some specific HTML rendering context when editing HTML modules where we pass # the root /c4x/ url for assets. This allows client-side substitutions to occur. _context.update({ 'base_asset_url': StaticContent.get_base_url_path_for_course_assets(self.location) + '/', 'enable_latex_compiler': self.use_latex_compiler, }) return _context # NOTE: html descriptors are special. We do not want to parse and # export them ourselves, because that can break things (e.g. lxml # adds body tags when it exports, but they should just be html # snippets that will be included in the middle of pages. @classmethod def load_definition(cls, xml_object, system, location): '''Load a descriptor from the specified xml_object: If there is a filename attribute, load it as a string, and log a warning if it is not parseable by etree.HTMLParser. If there is not a filename attribute, the definition is the body of the xml_object, without the root tag (do not want <html> in the middle of a page) ''' filename = xml_object.get('filename') if filename is None: definition_xml = copy.deepcopy(xml_object) cls.clean_metadata_from_xml(definition_xml) return {'data': stringify_children(definition_xml)}, [] else: # html is special. cls.filename_extension is 'xml', but # if 'filename' is in the definition, that means to load # from .html # 'filename' in html pointers is a relative path # (not same as 'html/blah.html' when the pointer is in a directory itself) pointer_path = "{category}/{url_path}".format( category='html', url_path=name_to_pathname(location.name) ) base = path(pointer_path).dirname() # log.debug("base = {0}, base.dirname={1}, filename={2}".format(base, base.dirname(), filename)) filepath = "{base}/{name}.html".format(base=base, name=filename) # log.debug("looking for html file for {0} at {1}".format(location, filepath)) # VS[compat] # TODO (cpennington): If the file doesn't exist at the right path, # give the class a chance to fix it up. The file will be written out # again in the correct format. This should go away once the CMS is # online and has imported all current (fall 2012) courses from xml if not system.resources_fs.exists(filepath): candidates = cls.backcompat_paths(filepath) # log.debug("candidates = {0}".format(candidates)) for candidate in candidates: if system.resources_fs.exists(candidate): filepath = candidate break try: with system.resources_fs.open(filepath) as file: html = file.read().decode('utf-8') # Log a warning if we can't parse the file, but don't error if not check_html(html) and len(html) > 0: msg = "Couldn't parse html in {0}, content = {1}".format(filepath, html) log.warning(msg) system.error_tracker("Warning: " + msg) definition = {'data': html} # TODO (ichuang): remove this after migration # for Fall 2012 LMS migration: keep filename (and unmangled filename) definition['filename'] = [filepath, filename] return definition, [] except (ResourceNotFoundError) as err: msg = 'Unable to load file contents at path {0}: {1} '.format( filepath, err) # add more info and re-raise raise Exception(msg), None, sys.exc_info()[2] # TODO (vshnayder): make export put things in the right places. def definition_to_xml(self, resource_fs): ''' Write <html filename="" [meta-attrs="..."]> to filename.xml, and the html string to filename.html. ''' # Write html to file, return an empty tag pathname = name_to_pathname(self.url_name) filepath = u'{category}/{pathname}.html'.format( category=self.category, pathname=pathname ) resource_fs.makedir(os.path.dirname(filepath), recursive=True, allow_recreate=True) with resource_fs.open(filepath, 'w') as filestream: html_data = self.data.encode('utf-8') filestream.write(html_data) # write out the relative name relname = path(pathname).basename() elt = etree.Element('html') elt.set("filename", relname) return elt @property def non_editable_metadata_fields(self): non_editable_fields = super(HtmlDescriptor, self).non_editable_metadata_fields non_editable_fields.append(HtmlDescriptor.use_latex_compiler) return non_editable_fields class AboutFields(object): display_name = String( help="Display name for this module", scope=Scope.settings, default="overview", ) data = String( help="Html contents to display for this module", default="", scope=Scope.content ) @XBlock.tag("detached") class AboutModule(AboutFields, HtmlModule): """ Overriding defaults but otherwise treated as HtmlModule. """ pass @XBlock.tag("detached") class AboutDescriptor(AboutFields, HtmlDescriptor): """ These pieces of course content are treated as HtmlModules but we need to overload where the templates are located in order to be able to create new ones """ template_dir_name = "about" module_class = AboutModule class StaticTabFields(object): """ The overrides for Static Tabs """ display_name = String( display_name="Display Name", help="This name appears in the horizontal navigation at the top of the page.", scope=Scope.settings, default="Empty", ) data = String( default=textwrap.dedent("""\ <p>This is where you can add additional pages to your courseware. Click the 'edit' button to begin editing.</p> """), scope=Scope.content, help="HTML for the additional pages" ) @XBlock.tag("detached") class StaticTabModule(StaticTabFields, HtmlModule): """ Supports the field overrides """ pass @XBlock.tag("detached") class StaticTabDescriptor(StaticTabFields, HtmlDescriptor): """ These pieces of course content are treated as HtmlModules but we need to overload where the templates are located in order to be able to create new ones """ template_dir_name = None module_class = StaticTabModule class CourseInfoFields(object): """ Field overrides """ items = List( help="List of course update items", default=[], scope=Scope.content ) data = String( help="Html contents to display for this module", default="<ol></ol>", scope=Scope.content ) @XBlock.tag("detached") class CourseInfoModule(CourseInfoFields, HtmlModule): """ Just to support xblock field overrides """ # statuses STATUS_VISIBLE = 'visible' STATUS_DELETED = 'deleted' @XBlock.tag("detached") class CourseInfoDescriptor(CourseInfoFields, HtmlDescriptor): """ These pieces of course content are treated as HtmlModules but we need to overload where the templates are located in order to be able to create new ones """ template_dir_name = None module_class = CourseInfoModule
agpl-3.0
-4,910,680,462,322,662,000
35.329231
123
0.621665
false
jashworth-isb/cmonkey-python
cmonkey/cmonkey_run.py
1
18160
# vi: sw=4 ts=4 et: import logging import microarray import membership as memb import meme import motif import util import rsat import microbes_online import organism as org import scoring import network as nw import stringdb import os from datetime import date, datetime import json import numpy as np import gc import sizes KEGG_FILE_PATH = 'testdata/KEGG_taxonomy' GO_FILE_PATH = 'testdata/proteome2taxid' RSAT_BASE_URL = 'http://rsat.ccb.sickkids.ca' COG_WHOG_URL = 'ftp://ftp.ncbi.nih.gov/pub/COG/COG/whog' STRING_URL_PATTERN = "http://networks.systemsbiology.net/string9/%s.gz" CACHE_DIR = 'cache' LOG_FORMAT = '%(asctime)s %(levelname)-8s %(message)s' STATS_FREQ = 10 RESULT_FREQ = 10 class CMonkeyRun: def __init__(self, organism_code, ratio_matrix, string_file=None, num_clusters=None): logging.basicConfig(format=LOG_FORMAT, datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG) self.__membership = None self.__organism = None self.config_params = {} self.ratio_matrix = ratio_matrix.sorted_by_row_name() # membership update default parameters # these come first, since a lot depends on clustering numbers self['memb.clusters_per_row'] = 2 if num_clusters == None: num_clusters = int(round(self.ratio_matrix.num_rows() * self['memb.clusters_per_row'] / 20.0)) self['memb.clusters_per_col'] = int(round(num_clusters * 2.0 / 3.0)) self['memb.prob_row_change'] = 0.5 self['memb.prob_col_change'] = 1.0 self['memb.max_changes_per_row'] = 1 self['memb.max_changes_per_col'] = 5 self['organism_code'] = organism_code self['num_clusters'] = num_clusters logging.info("# CLUSTERS: %d", self['num_clusters']) # defaults self.row_seeder = memb.make_kmeans_row_seeder(num_clusters) self.column_seeder = microarray.seed_column_members self['row_scaling'] = 6.0 self['string_file'] = None self['cache_dir'] = CACHE_DIR self['output_dir'] = 'out' self['start_iteration'] = 1 self['num_iterations'] = 2000 self['multiprocessing'] = True # Quantile normalization is false by default in cMonkey-R self['quantile_normalize'] = True # used to select sequences and MEME self['sequence_types'] = ['upstream'] self['search_distances'] = {'upstream': (-20, 150)} # used for background distribution and MAST self['scan_distances'] = {'upstream': (-30, 250)} # membership default parameters self['memb.min_cluster_rows_allowed'] = 3 self['memb.max_cluster_rows_allowed'] = 70 self['string_file'] = string_file today = date.today() self.CHECKPOINT_INTERVAL = None self.__checkpoint_basename = "cmonkey-checkpoint-%s-%d%d%d" % ( organism_code, today.year, today.month, today.day) def report_params(self): logging.info('cmonkey_run config_params:') for param,value in self.config_params.items(): logging.info('%s=%s' %(param,str(value))) def __getitem__(self, key): return self.config_params[key] def __setitem__(self, key, value): self.config_params[key] = value def __make_membership(self): """returns the seeded membership on demand""" return memb.ClusterMembership.create( self.ratio_matrix, self.row_seeder, self.column_seeder, self.config_params) def make_column_scoring(self): """returns the column scoring function""" return scoring.ColumnScoringFunction( self.membership(), self.ratio_matrix, config_params=self.config_params) def make_row_scoring(self): """makes a row scoring function on demand""" # Default row scoring functions row_scoring = microarray.RowScoringFunction( self.membership(), self.ratio_matrix, scaling_func=lambda iteration: self['row_scaling'], config_params=self.config_params) self.row_scoring = row_scoring meme_suite = meme.MemeSuite430() sequence_filters = [ motif.unique_filter, motif.get_remove_low_complexity_filter(meme_suite), motif.get_remove_atgs_filter(self['search_distances']['upstream'])] motif_scaling_fun = scoring.get_default_motif_scaling(self['num_iterations']) motif_scoring = motif.MemeScoringFunction( self.organism(), self.membership(), self.ratio_matrix, meme_suite, sequence_filters=sequence_filters, scaling_func=motif_scaling_fun, num_motif_func=motif.default_nmotif_fun, #update_in_iteration=scoring.schedule(601, 3), #motif_in_iteration=scoring.schedule(600, 100), update_in_iteration=scoring.schedule(100, 10), motif_in_iteration=scoring.schedule(100, 100), config_params=self.config_params) self.motif_scoring = motif_scoring network_scaling_fun = scoring.get_default_network_scaling(self['num_iterations']) network_scoring = nw.ScoringFunction(self.organism(), self.membership(), self.ratio_matrix, scaling_func=network_scaling_fun, run_in_iteration=scoring.schedule(1, 7), config_params=self.config_params) self.network_scoring = network_scoring row_scoring_functions = [row_scoring, motif_scoring, network_scoring] return scoring.ScoringFunctionCombiner(self.membership(), row_scoring_functions, config_params=self.config_params, log_subresults=True) def membership(self): if self.__membership == None: logging.info("creating and seeding memberships") self.__membership = self.__make_membership() return self.__membership def organism(self): """returns the organism object to work on""" if self.__organism == None: self.__organism = self.make_microbe() return self.__organism def make_microbe(self): """returns the organism object to work on""" keggfile = util.DelimitedFile.read(KEGG_FILE_PATH, comment='#') gofile = util.DelimitedFile.read(GO_FILE_PATH) rsatdb = rsat.RsatDatabase(RSAT_BASE_URL, self['cache_dir']) mo_db = microbes_online.MicrobesOnline() stringfile = self.config_params['string_file'] kegg_mapper = org.make_kegg_code_mapper(keggfile) rsat_mapper = org.make_rsat_organism_mapper(rsatdb) # automatically download STRING file if stringfile == None: rsat_info = rsat_mapper(kegg_mapper(self['organism_code'])) ncbi_code = rsat_info.taxonomy_id print "NCBI CODE IS: ", ncbi_code url = STRING_URL_PATTERN % ncbi_code stringfile = "%s/%s.gz" % (self['cache_dir'], ncbi_code) self['string_file'] = stringfile logging.info("Automatically using STRING file in '%s'", stringfile) util.get_url_cached(url, stringfile) nw_factories = [] if stringfile != None: nw_factories.append(stringdb.get_network_factory2(stringfile, 0.5)) else: logging.warn("no STRING file specified !") nw_factories.append(microbes_online.get_network_factory( mo_db, max_operon_size=self.ratio_matrix.num_rows() / 20, weight=0.5)) org_factory = org.MicrobeFactory(kegg_mapper, rsat_mapper, org.make_go_taxonomy_mapper(gofile), mo_db, nw_factories) return org_factory.create(self['organism_code'], self['search_distances'], self['scan_distances']) def __make_dirs_if_needed(self): output_dir = self['output_dir'] if not os.path.exists(output_dir): os.mkdir(output_dir) cache_dir = self['cache_dir'] if not os.path.exists(cache_dir): os.mkdir(cache_dir) def __clear_output_dir(self): output_dir = self['output_dir'] if os.path.exists(output_dir): outfiles = os.listdir(output_dir) for filename in outfiles: os.remove('/'.join([output_dir, filename])) def run(self): self.__make_dirs_if_needed() self.__clear_output_dir() # write the normalized ratio matrix for stats and visualization output_dir = self['output_dir'] if not os.path.exists(output_dir + '/ratios.tsv'): self.ratio_matrix.write_tsv_file(output_dir + '/ratios.tsv') row_scoring = self.make_row_scoring() col_scoring = self.make_column_scoring() self.run_iterations(row_scoring, col_scoring) def run_from_checkpoint(self, checkpoint_filename): row_scoring = self.make_row_scoring() col_scoring = self.make_column_scoring() self.__make_dirs_if_needed() self.init_from_checkpoint(checkpoint_filename, row_scoring, col_scoring) self.run_iterations(row_scoring, col_scoring) def residual_for(self, row_names, column_names): if len(column_names) <= 1 or len(row_names) <= 1: return 1.0 else: matrix = self.ratio_matrix.submatrix_by_name(row_names, column_names) return matrix.residual() def write_results(self, iteration_result): # Write a snapshot iteration = iteration_result['iteration'] iteration_result['columns'] = {} iteration_result['rows'] = {} iteration_result['residuals'] = {} for cluster in range(1, self['num_clusters'] + 1): column_names = self.membership().columns_for_cluster(cluster) row_names = self.membership().rows_for_cluster(cluster) iteration_result['columns'][cluster] = column_names iteration_result['rows'][cluster] = row_names residual = self.residual_for(row_names, column_names) iteration_result['residuals'][cluster] = residual # write results with open('%s/%d-results.json' % (self['output_dir'], iteration), 'w') as outfile: outfile.write(json.dumps(iteration_result)) def write_stats(self, iteration_result): # write stats for this iteration iteration = iteration_result['iteration'] residuals = [] cluster_stats = {} network_scores = iteration_result['networks'] if 'motif-pvalue' in iteration_result: motif_pvalue = iteration_result['motif-pvalue'] else: motif_pvalue = 0.0 if 'fuzzy-coeff' in iteration_result: fuzzy_coeff = iteration_result['fuzzy-coeff'] else: fuzzy_coeff = 0.0 for cluster in range(1, self['num_clusters'] + 1): row_names = iteration_result['rows'][cluster] column_names = iteration_result['columns'][cluster] residual = self.residual_for(row_names, column_names) residuals.append(residual) cluster_stats[cluster] = {'num_rows': len(row_names), 'num_columns': len(column_names), 'residual': residual } stats = {'cluster': cluster_stats, 'median_residual': np.median(residuals), 'motif-pvalue': motif_pvalue, 'network-scores': network_scores, 'fuzzy-coeff': fuzzy_coeff} with open('%s/%d-stats.json' % (self['output_dir'], iteration), 'w') as outfile: try: outfile.write(json.dumps(stats)) except: logging.error("Could not write stats - probably non-serializable values found") # print stats object, likely there is something that is not serializable print stats def write_runlog(self, row_scoring, iteration): logging.info("Writing run map for this iteration") run_infos = [run_log.to_json() for run_log in row_scoring.run_logs()] with open('%s/%d-runlog.json' % (self['output_dir'], iteration), 'w') as outfile: try: outfile.write(json.dumps(run_infos)) except: logging.error("Could not run map - probably non-serializable values found") # print run_infos object, likely there is something that is not serializable print run_infos def write_start_info(self): start_info = { 'start_time': str(datetime.now()), 'num_iterations': self['num_iterations'], 'organism-code': self.organism().code, 'species': self.organism().species(), 'num_rows': self.ratio_matrix.num_rows(), 'num_columns': self.ratio_matrix.num_columns() } with open('%s/start.json' % self['output_dir'], 'w') as outfile: outfile.write(json.dumps(start_info)) def write_finish_info(self): finish_info = { 'finish_time': str(datetime.now()) } with open('%s/finish.json' % self['output_dir'], 'w') as outfile: outfile.write(json.dumps(finish_info)) def run_iterations(self, row_scoring, col_scoring): self.report_params() self.write_start_info() for iteration in range(self['start_iteration'], self['num_iterations'] + 1): logging.info("Iteration # %d", iteration) iteration_result = {'iteration': iteration} rscores = row_scoring.compute(iteration_result) start_time = util.current_millis() cscores = col_scoring.compute(iteration_result) elapsed = util.current_millis() - start_time logging.info("computed column_scores in %f s.", elapsed / 1000.0) self.membership().update(self.ratio_matrix, rscores, cscores, self['num_iterations'], iteration_result) if iteration > 0 and self.CHECKPOINT_INTERVAL and iteration % self.CHECKPOINT_INTERVAL == 0: self.save_checkpoint_data(iteration, row_scoring, col_scoring) mean_net_score = 0.0 mean_mot_pvalue = 0.0 if 'networks' in iteration_result.keys(): mean_net_score = iteration_result['networks'] mean_mot_pvalue = "NA" if 'motif-pvalue' in iteration_result.keys(): mean_mot_pvalue = "" mean_mot_pvalues = iteration_result['motif-pvalue'] mean_mot_pvalue = "" for seqtype in mean_mot_pvalues.keys(): mean_mot_pvalue = mean_mot_pvalue + (" '%s' = %f" % (seqtype, mean_mot_pvalues[seqtype])) logging.info('mean net = %s | mean mot = %s', str(mean_net_score), mean_mot_pvalue) if iteration == 1 or (iteration % RESULT_FREQ == 0): self.write_results(iteration_result) if iteration == 1 or (iteration % STATS_FREQ == 0): self.write_stats(iteration_result) # run infos should be written with the same frequency as stats self.write_runlog(row_scoring, iteration) gc.collect() #print "# ROW SCORING: ", sizes.asizeof(self.row_scoring) #print "# MOT SCORING: ", sizes.asizeof(self.motif_scoring) #print "# NET SCORING: ", sizes.asizeof(self.network_scoring) #print "# COL SCORING: ", sizes.asizeof(col_scoring) #print "# MEMBERSHIP: ", sizes.asizeof(self.membership()) logging.info("Postprocessing: Adjusting the clusters....") self.membership().postadjust() iteration = self['num_iterations'] + 1 iteration_result = {'iteration': iteration } logging.info("Adjusted. Now re-run scoring (iteration: %d)", iteration_result['iteration']) row_scoring.compute_force(iteration_result) self.write_results(iteration_result) self.write_stats(iteration_result) self.write_finish_info() print "Done !!!!" ############################################################ ###### CHECKPOINTING ############################## def save_checkpoint_data(self, iteration, row_scoring, col_scoring): """save checkpoint data for the specified iteration""" with util.open_shelf("%s.%d" % (self.__checkpoint_basename, iteration)) as shelf: shelf['config'] = self.config_params shelf['iteration'] = iteration self.membership().store_checkpoint_data(shelf) row_scoring.store_checkpoint_data(shelf) col_scoring.store_checkpoint_data(shelf) def init_from_checkpoint(self, checkpoint_filename, row_scoring, col_scoring): """initialize this object from a checkpoint file""" logging.info("Continue run using checkpoint file '%s'", checkpoint_filename) with util.open_shelf(checkpoint_filename) as shelf: self.config_params = shelf['config'] self['start_iteration'] = shelf['iteration'] + 1 self.__membership = memb.ClusterMembership.restore_from_checkpoint( self.config_params, shelf) row_scoring.restore_checkpoint_data(shelf) col_scoring.restore_checkpoint_data(shelf) #return row_scoring, col_scoring necessary??
lgpl-3.0
-5,790,948,944,277,834,000
42.444976
109
0.576101
false
taohungyang/cloud-custodian
tests/test_validation.py
1
1757
# Copyright 2017 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals import argparse from .common import BaseTest from c7n.commands import validate as validate_yaml_policies class CommandsValidateTest(BaseTest): def test_failed_validation(self): yaml_validate_options = argparse.Namespace( command="c7n.commands.validate", config=None, configs=[ "tests/data/test_policies/ebs-BADVALIDATION.yml", "tests/data/test_policies/ami-GOODVALIDATION.yml", ], debug=False, subparser="validate", verbose=False, ) with self.assertRaises((SystemExit, ValueError)) as exit: validate_yaml_policies(yaml_validate_options) # if there is a bad policy in the batch being validated, there should be an exit 1 self.assertEqual(exit.exception.code, 1) yaml_validate_options.configs.remove( "tests/data/test_policies/ebs-BADVALIDATION.yml" ) # if there are only good policy, it should exit none self.assertIsNone(validate_yaml_policies(yaml_validate_options))
apache-2.0
-1,632,874,657,305,417,700
38.931818
90
0.689812
false
euphi/homie-esp8266
scripts/ota_updater/ota_updater.py
1
6619
#!/usr/bin/env python from __future__ import division, print_function import paho.mqtt.client as mqtt import base64, sys, math from hashlib import md5 # The callback for when the client receives a CONNACK response from the server. def on_connect(client, userdata, flags, rc): if rc != 0: print("Connection Failed with result code {}".format(rc)) client.disconnect() else: print("Connected with result code {}".format(rc)) # calcluate firmware md5 firmware_md5 = md5(userdata['firmware']).hexdigest() userdata.update({'md5': firmware_md5}) # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. client.subscribe("{base_topic}{device_id}/$implementation/ota/status".format(**userdata)) client.subscribe("{base_topic}{device_id}/$implementation/ota/enabled".format(**userdata)) client.subscribe("{base_topic}{device_id}/$fw/#".format(**userdata)) # Wait for device info to come in and invoke the on_message callback where update will continue print("Waiting for device info...") # The callback for when a PUBLISH message is received from the server. def on_message(client, userdata, msg): # decode string for python2/3 compatiblity msg.payload = msg.payload.decode() if msg.topic.endswith('$implementation/ota/status'): status = int(msg.payload.split()[0]) if userdata.get("published"): if status == 206: # in progress # state in progress, print progress bar progress, total = [int(x) for x in msg.payload.split()[1].split('/')] bar_width = 30 bar = int(bar_width*(progress/total)) print("\r[", '+'*bar, ' '*(bar_width-bar), "] ", msg.payload.split()[1], end='', sep='') if (progress == total): print() sys.stdout.flush() elif status == 304: # not modified print("Device firmware already up to date with md5 checksum: {}".format(userdata.get('md5'))) client.disconnect() elif status == 403: # forbidden print("Device ota disabled, aborting...") client.disconnect() elif msg.topic.endswith('$fw/checksum'): checksum = msg.payload if userdata.get("published"): if checksum == userdata.get('md5'): print("Device back online. Update Successful!") else: print("Expecting checksum {}, got {}, update failed!".format(userdata.get('md5'), checksum)) client.disconnect() else: if checksum != userdata.get('md5'): # save old md5 for comparison with new firmware userdata.update({'old_md5': checksum}) else: print("Device firmware already up to date with md5 checksum: {}".format(checksum)) client.disconnect() elif msg.topic.endswith('ota/enabled'): if msg.payload == 'true': userdata.update({'ota_enabled': True}) else: print("Device ota disabled, aborting...") client.disconnect() if ( not userdata.get("published") ) and ( userdata.get('ota_enabled') ) and \ ( 'old_md5' in userdata.keys() ) and ( userdata.get('md5') != userdata.get('old_md5') ): # push the firmware binary userdata.update({"published": True}) topic = "{base_topic}{device_id}/$implementation/ota/firmware/{md5}".format(**userdata) print("Publishing new firmware with checksum {}".format(userdata.get('md5'))) client.publish(topic, userdata['firmware']) def main(broker_host, broker_port, broker_username, broker_password, base_topic, device_id, firmware): # initialise mqtt client and register callbacks client = mqtt.Client() client.on_connect = on_connect client.on_message = on_message # set username and password if given if broker_username and broker_password: client.username_pw_set(broker_username, broker_password) # save data to be used in the callbacks client.user_data_set({ "base_topic": base_topic, "device_id": device_id, "firmware": firmware }) # start connection print("Connecting to mqtt broker {} on port {}".format(broker_host, broker_port)) client.connect(broker_host, broker_port, 60) # Blocking call that processes network traffic, dispatches callbacks and handles reconnecting. client.loop_forever() if __name__ == '__main__': import argparse parser = argparse.ArgumentParser( description='ota firmware update scirpt for ESP8226 implemenation of the Homie mqtt IoT convention.') # ensure base topic always ends with a '/' def base_topic_arg(s): s = str(s) if not s.endswith('/'): s = s + '/' return s # specify arguments parser.add_argument('-l', '--broker-host', type=str, required=False, help='host name or ip address of the mqtt broker', default="127.0.0.1") parser.add_argument('-p', '--broker-port', type=int, required=False, help='port of the mqtt broker', default=1883) parser.add_argument('-u', '--broker-username', type=str, required=False, help='username used to authenticate with the mqtt broker') parser.add_argument('-d', '--broker-password', type=str, required=False, help='password used to authenticate with the mqtt broker') parser.add_argument('-t', '--base-topic', type=base_topic_arg, required=False, help='base topic of the homie devices on the broker', default="homie/") parser.add_argument('-i', '--device-id', type=str, required=True, help='homie device id') parser.add_argument('firmware', type=argparse.FileType('rb'), help='path to the firmware to be sent to the device') # workaround for http://bugs.python.org/issue9694 parser._optionals.title = "arguments" # get and validate arguments args = parser.parse_args() # read the contents of firmware into buffer fw_buffer = args.firmware.read() args.firmware.close() firmware = bytearray() firmware.extend(fw_buffer) # Invoke the business logic main(args.broker_host, args.broker_port, args.broker_username, args.broker_password, args.base_topic, args.device_id, firmware)
mit
6,659,864,722,361,300,000
41.703226
109
0.610666
false
Saluev/cocos2d-gui
cocosgui/css/__init__.py
1
2523
__all__ = [ 'styles', 'Style', 'CSSNode', 'evaluate' ] # importing basic names to publish them from .style import styles, Style from .node import CSSNode # importing extensions import border, borderimage, background, font import rendering def evaluate(window, element = None): if element is None: element = window element.evaluate_style() children = element.get_nodes() for child in children: assert(child.parent is element) evaluate(window, child) _evaluate_node(element) def _evaluate_node(node): parent, style = node.parent, node.evaluated_style left, bottom = style['left'], style['top'] left = 0 if left == 'auto' else left bottom = 0 if bottom == 'auto' else bottom position = style['position'] if position == 'absolute': raise NotImplementedError # TODO fixed? margin_offset = [left, bottom] border_offset = [margin_offset[0] + style['margin-left'], margin_offset[1] + style['margin-bottom' ]] padding_offset = [border_offset[0] + style['border-left-width'], border_offset[1] + style['border-bottom-width' ]] content_offset = [padding_offset[0] + style['padding-left'], padding_offset[1] + style['padding-bottom' ]] content_box = content_offset + list(node.get_content_size()) padding_box = padding_offset + [sum(( content_box[2], style['padding-left' ], style['padding-right' ], )), sum(( content_box[3], style['padding-top' ], style['padding-bottom'], ))] border_box = border_offset + [sum(( padding_box[2], style['border-left-width' ], style['border-right-width' ], )), sum(( padding_box[3], style['border-top-width' ], style['border-bottom-width'], ))] margin_box = margin_offset + [sum(( border_box[2], style['margin-left' ], style['margin-right' ], )), sum(( border_box[3], style['margin-top' ], style['margin-bottom'], ))] #width, height = style['width'], style['height'] # TODO percentages? #width = margin_box[2] if width == 'auto' else width #height = margin_box[3] if height == 'auto' else height #dw, dh = width - margin_box[2], height - margin_box[3] #if dw != 0 or dh != 0: #for box in [margin_box, border_box, padding_box, content_box]: #box[2] += dw #box[3] += dh info = { 'node': node, 'margin_box' : margin_box, 'border_box' : border_box, 'padding_box': padding_box, 'content_box': content_box, } node.apply_style(**info)
mit
-5,700,135,919,683,113,000
29.768293
70
0.613555
false
mairin/anaconda
pyanaconda/bootloader.py
1
83933
# bootloader.py # Anaconda's bootloader configuration module. # # Copyright (C) 2011 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # # Red Hat Author(s): David Lehman <[email protected]> # Matthew Miller <[email protected]> (extlinux portion) # import collections import os import re import struct import blivet from parted import PARTITION_BIOS_GRUB from pyanaconda import iutil from blivet.devicelibs import raid from pyanaconda.isys import sync from pyanaconda.product import productName from pyanaconda.flags import flags from pyanaconda.constants import ROOT_PATH from blivet.errors import StorageError from blivet.fcoe import fcoe import pyanaconda.network from pyanaconda.nm import nm_device_hwaddress from blivet import platform from blivet.size import Size from pyanaconda.i18n import _, N_ import logging log = logging.getLogger("anaconda") def get_boot_block(device, seek_blocks=0): status = device.status if not status: try: device.setup() except StorageError: return "" block_size = device.partedDevice.sectorSize fd = os.open(device.path, os.O_RDONLY) if seek_blocks: os.lseek(fd, seek_blocks * block_size, 0) block = os.read(fd, 512) os.close(fd) if not status: try: device.teardown(recursive=True) except StorageError: pass return block def is_windows_boot_block(block): try: windows = (len(block) >= 512 and struct.unpack("H", block[0x1fe: 0x200]) == (0xaa55,)) except struct.error: windows = False return windows def has_windows_boot_block(device): return is_windows_boot_block(get_boot_block(device)) class serial_opts(object): def __init__(self): self.speed = None self.parity = None self.word = None self.stop = None self.flow = None def parse_serial_opt(arg): """Parse and split serial console options. Documentation/kernel-parameters.txt says: ttyS<n>[,options] Use the specified serial port. The options are of the form "bbbbpnf", where "bbbb" is the baud rate, "p" is parity ("n", "o", or "e"), "n" is number of bits, and "f" is flow control ("r" for RTS or omit it). Default is "9600n8". but note that everything after the baud rate is optional, so these are all valid: 9600, 19200n, 38400n8, 9600e7r. Also note that the kernel assumes 1 stop bit; this can't be changed. """ opts = serial_opts() m = re.match(r'\d+', arg) if m is None: return opts opts.speed = m.group() idx = len(opts.speed) try: opts.parity = arg[idx+0] opts.word = arg[idx+1] opts.flow = arg[idx+2] except IndexError: pass return opts def _is_on_iscsi(device): """Tells whether a given device is on an iSCSI disk or not.""" return all(isinstance(disk, blivet.devices.iScsiDiskDevice) for disk in device.disks) class BootLoaderError(Exception): pass class Arguments(set): ordering_dict = { "rhgb" : 99, "quiet" : 100 } def _merge_ip(self): """ Find ip= arguments targetting the same interface and merge them. """ # partition the input def partition_p(arg): # we are only interested in ip= parameters that use some kind of # automatic network setup: return arg.startswith("ip=") and arg.count(":") == 1 ip_params = filter(partition_p, self) rest = set(filter(lambda p: not partition_p(p), self)) # split at the colon: ip_params = map(lambda p: p.split(":"), ip_params) # create mapping from nics to their configurations config = collections.defaultdict(list) for (nic, cfg) in ip_params: config[nic].append(cfg) # generate the new parameters: ip_params = set() for nic in config: ip_params.add("%s:%s" % (nic, ",".join(sorted(config[nic])))) # update the set self.clear() self.update(rest) self.update(ip_params) return self def __str__(self): self._merge_ip() # sort the elements according to their values in ordering_dict. The # higher the number the closer to the final string the argument # gets. The default is 50. lst = sorted(self, key=lambda s: self.ordering_dict.get(s, 50)) return " ".join(lst) class BootLoaderImage(object): """ Base class for bootloader images. Suitable for non-linux OS images. """ def __init__(self, device=None, label=None, short=None): self.label = label self.short_label = short self.device = device class LinuxBootLoaderImage(BootLoaderImage): def __init__(self, device=None, label=None, short=None, version=None): super(LinuxBootLoaderImage, self).__init__(device=device, label=label) self.label = label # label string self.short_label = short # shorter label string self.device = device # StorageDevice instance self.version = version # kernel version string self._kernel = None # filename string self._initrd = None # filename string @property def kernel(self): filename = self._kernel if self.version and not filename: filename = "vmlinuz-%s" % self.version return filename @property def initrd(self): filename = self._initrd if self.version and not filename: filename = "initramfs-%s.img" % self.version return filename class TbootLinuxBootLoaderImage(LinuxBootLoaderImage): _multiboot = "tboot.gz" # filename string _mbargs = ["logging=vga,serial,memory"] _args = ["intel_iommu=on"] def __init__(self, device=None, label=None, short=None, version=None): super(TbootLinuxBootLoaderImage, self).__init__( device=device, label=label, short=short, version=version) @property def multiboot(self): return self._multiboot @property def mbargs(self): return self._mbargs @property def args(self): return self._args class BootLoader(object): name = "Generic Bootloader" packages = [] config_file = None config_file_mode = 0600 can_dual_boot = False can_update = False image_label_attr = "label" encryption_support = False stage2_is_valid_stage1 = False # requirements for stage2 devices stage2_device = None stage2_device_types = [] stage2_raid_levels = [] stage2_raid_metadata = [] stage2_raid_member_types = [] stage2_mountpoints = ["/boot", "/"] stage2_bootable = False stage2_must_be_primary = True stage2_description = N_("/boot filesystem") stage2_max_end = Size(spec="2 TiB") @property def stage2_format_types(self): return ["ext4", "ext3", "ext2"] # this is so stupid... global_preserve_args = ["speakup_synth", "apic", "noapic", "apm", "ide", "noht", "acpi", "video", "pci", "nodmraid", "nompath", "nomodeset", "noiswmd", "fips", "selinux", "biosdevname"] preserve_args = [] _trusted_boot = False def __init__(self): self.boot_args = Arguments() self.dracut_args = Arguments() self.disks = [] self._disk_order = [] # timeout in seconds self._timeout = None self.password = None # console/serial stuff self.console = "" self.console_options = "" self._set_console() # list of BootLoaderImage instances representing bootable OSs self.linux_images = [] self.chain_images = [] # default image self._default_image = None self._update_only = False self.skip_bootloader = False self.errors = [] self.warnings = [] self.reset() def reset(self): """ Reset stage1 and stage2 values """ # the device the bootloader will be installed on self.stage1_device = None # the "boot disk", meaning the disk stage1 _will_ go on self.stage1_disk = None self.stage2_device = None self.stage2_is_preferred_stage1 = False self.errors = [] self.problems = [] self.warnings = [] # # disk list access # @property def disk_order(self): """Potentially partial order for disks.""" return self._disk_order @disk_order.setter def disk_order(self, order): log.debug("new disk order: %s", order) self._disk_order = order if self.disks: self._sort_disks() def _sort_disks(self): """Sort the internal disk list. """ for name in reversed(self.disk_order): try: idx = [d.name for d in self.disks].index(name) except ValueError: log.error("bios order specified unknown disk %s", name) continue self.disks.insert(0, self.disks.pop(idx)) def set_disk_list(self, disks): self.disks = disks[:] self._sort_disks() # # image list access # @property def default(self): """The default image.""" if not self._default_image and self.linux_images: self._default_image = self.linux_images[0] return self._default_image @default.setter def default(self, image): if image not in self.images: raise ValueError("new default image not in image list") log.debug("new default image: %s", image) self._default_image = image @property def images(self): """ List of OS images that will be included in the configuration. """ all_images = self.linux_images all_images.extend(i for i in self.chain_images if i.label) return all_images def clear_images(self): """Empty out the image list.""" self.linux_images = [] self.chain_images = [] def add_image(self, image): """Add a BootLoaderImage instance to the image list.""" if isinstance(image, LinuxBootLoaderImage): self.linux_images.append(image) else: self.chain_images.append(image) def image_label(self, image): """Return the appropriate image label for this bootloader.""" return getattr(image, self.image_label_attr) # # platform-specific data access # @property def disklabel_types(self): return platform.platform._disklabel_types @property def device_descriptions(self): return platform.platform.bootStage1ConstraintDict["descriptions"] # # constraint checking for target devices # def _is_valid_md(self, device, raid_levels=None, metadata=None, member_types=None, desc=""): ret = True if device.type != "mdarray": return ret if raid_levels and device.level not in raid_levels: levels_str = ",".join("%s" % l for l in raid_levels) self.errors.append(_("RAID sets that contain '%(desc)s' must have one " "of the following raid levels: %(raid_level)s.") % {"desc" : desc, "raid_level" : levels_str}) ret = False # new arrays will be created with an appropriate metadata format if device.exists and \ metadata and device.metadataVersion not in metadata: self.errors.append(_("RAID sets that contain '%(desc)s' must have one " "of the following metadata versions: %(metadata_versions)s.") % {"desc": desc, "metadata_versions": ",".join(metadata)}) ret = False if member_types: for member in device.devices: if not self._device_type_match(member, member_types): self.errors.append(_("RAID sets that contain '%(desc)s' must " "have one of the following device " "types: %(types)s.") % {"desc" : desc, "types" : ",".join(member_types)}) ret = False log.debug("_is_valid_md(%s) returning %s", device.name, ret) return ret def _is_valid_disklabel(self, device, disklabel_types=None): ret = True if self.disklabel_types: for disk in device.disks: label_type = getattr(disk.format, "labelType", None) if not label_type or label_type not in self.disklabel_types: types_str = ",".join(disklabel_types) self.errors.append(_("%(name)s must have one of the following " "disklabel types: %(types)s.") % {"name" : device.name, "types" : types_str}) ret = False log.debug("_is_valid_disklabel(%s) returning %s", device.name, ret) return ret def _is_valid_format(self, device, format_types=None, mountpoints=None, desc=""): ret = True if format_types and device.format.type not in format_types: self.errors.append(_("%(desc)s cannot be of type %(type)s.") % {"desc" : desc, "type" : device.format.type}) ret = False if mountpoints and hasattr(device.format, "mountpoint") \ and device.format.mountpoint not in mountpoints: self.errors.append(_("%(desc)s must be mounted on one of %(mountpoints)s.") % {"desc" : desc, "mountpoints" : ", ".join(mountpoints)}) ret = False log.debug("_is_valid_format(%s) returning %s", device.name, ret) return ret def _is_valid_size(self, device, desc=""): ret = True msg = None errors = [] if device.format.minSize and device.format.maxSize: msg = (_("%(desc)s must be between %(min)d and %(max)d MB in size") % {"desc" : desc, "min" : device.format.minSize, "max" : device.format.maxSize}) if device.format.minSize and device.size < device.format.minSize: if msg is None: errors.append(_("%(desc)s must not be smaller than %(min)dMB.") % {"desc" : desc, "min" : device.format.minSize}) else: errors.append(msg) ret = False if device.format.maxSize and device.size > device.format.maxSize: if msg is None: errors.append(_("%(desc)s must not be larger than %(max)dMB.") % {"desc" : desc, "max" : device.format.maxSize}) elif msg not in errors: # don't add the same error string twice errors.append(msg) ret = False log.debug("_is_valid_size(%s) returning %s", device.name, ret) return ret def _is_valid_location(self, device, max_end=None, desc=""): ret = True if max_end and device.type == "partition" and device.partedPartition: end_sector = device.partedPartition.geometry.end sector_size = device.partedPartition.disk.device.sectorSize end = Size(bytes=sector_size * end_sector) if end > max_end: self.errors.append(_("%(desc)s must be within the first %(max_end)s of " "the disk.") % {"desc": desc, "max_end": max_end}) ret = False log.debug("_is_valid_location(%s) returning %s", device.name, ret) return ret def _is_valid_partition(self, device, primary=None, desc=""): ret = True if device.type == "partition" and primary and not device.isPrimary: self.errors.append(_("%s must be on a primary partition.") % desc) ret = False log.debug("_is_valid_partition(%s) returning %s", device.name, ret) return ret # # target/stage1 device access # def _device_type_index(self, device, types): """ Return the index of the matching type in types to device's type. Return None if no match is found. """ index = None try: index = types.index(device.type) except ValueError: if "disk" in types and device.isDisk: index = types.index("disk") return index def _device_type_match(self, device, types): """ Return True if device is of one of the types in the list types. """ return self._device_type_index(device, types) is not None def device_description(self, device): device_types = self.device_descriptions.keys() idx = self._device_type_index(device, device_types) if idx is None: raise ValueError("No description available for %s" % device.type) # this looks unnecessarily complicated, but it handles the various # device types that we treat as disks return self.device_descriptions[device_types[idx]] def set_preferred_stage1_type(self, preferred): """ Set a preferred type of stage1 device. """ if not self.stage2_is_valid_stage1: # "partition" means first sector of stage2 and is only meaningful # for bootloaders that can use stage2 as stage1 return if preferred == "mbr": # "mbr" is already the default return # partition means "use the stage2 device for a stage1 device" self.stage2_is_preferred_stage1 = True def is_valid_stage1_device(self, device, early=False): """ Return True if the device is a valid stage1 target device. Also collect lists of errors and warnings. The criteria for being a valid stage1 target device vary from platform to platform. On some platforms a disk with an msdos disklabel is a valid stage1 target, while some platforms require a special device. Some examples of these special devices are EFI system partitions on EFI machines, PReP boot partitions on iSeries, and Apple bootstrap partitions on Mac. The 'early' keyword argument is a boolean flag indicating whether or not this check is being performed at a point where the mountpoint cannot be expected to be set for things like EFI system partitions. """ self.errors = [] self.warnings = [] valid = True constraint = platform.platform.bootStage1ConstraintDict if device is None: return False if not self._device_type_match(device, constraint["device_types"]): log.debug("stage1 device cannot be of type %s", device.type) return False if blivet.arch.isS390() and _is_on_iscsi(device): log.debug("stage1 device cannot be on an iSCSI disk on s390(x)") return False description = self.device_description(device) if self.stage2_is_valid_stage1 and device == self.stage2_device: # special case valid = (self.stage2_is_preferred_stage1 and self.is_valid_stage2_device(device)) # we'll be checking stage2 separately so don't duplicate messages self.problems = [] self.warnings = [] return valid if device.protected: valid = False if not self._is_valid_disklabel(device, disklabel_types=self.disklabel_types): valid = False if not self._is_valid_size(device, desc=description): valid = False if not self._is_valid_location(device, max_end=constraint["max_end"], desc=description): valid = False if not self._is_valid_md(device, raid_levels=constraint["raid_levels"], metadata=constraint["raid_metadata"], member_types=constraint["raid_member_types"], desc=description): valid = False if not self.stage2_bootable and not getattr(device, "bootable", True): log.warning("%s not bootable", device.name) # XXX does this need to be here? if getattr(device.format, "label", None) in ("ANACONDA", "LIVE"): log.info("ignoring anaconda boot disk") valid = False if early: mountpoints = [] else: mountpoints = constraint["mountpoints"] if not self._is_valid_format(device, format_types=constraint["format_types"], mountpoints=mountpoints, desc=description): valid = False if not self.encryption_support and device.encrypted: self.errors.append(_("%s cannot be on an encrypted block " "device.") % description) valid = False log.debug("is_valid_stage1_device(%s) returning %s", device.name, valid) return valid def set_stage1_device(self, devices): self.stage1_device = None if not self.stage1_disk: self.reset() raise BootLoaderError("need stage1 disk to set stage1 device") if self.stage2_is_preferred_stage1: self.stage1_device = self.stage2_device return for device in devices: if self.stage1_disk not in device.disks: continue if self.is_valid_stage1_device(device): if flags.imageInstall and device.isDisk: # GRUB2 will install to /dev/loop0 but not to # /dev/mapper/<image_name> self.stage1_device = device.parents[0] else: self.stage1_device = device break if not self.stage1_device: self.reset() raise BootLoaderError("failed to find a suitable stage1 device") # # boot/stage2 device access # def is_valid_stage2_device(self, device, linux=True, non_linux=False): """ Return True if the device is suitable as a stage2 target device. Also collect lists of errors and warnings. """ self.errors = [] self.warnings = [] valid = True if device is None: return False if device.protected: valid = False if blivet.arch.isS390() and _is_on_iscsi(device): self.errors.append(_("%s cannot be on an iSCSI disk on s390(x)") % self.stage2_description) valid = False if not self._device_type_match(device, self.stage2_device_types): self.errors.append(_("%(desc)s cannot be of type %(type)s") % {"desc" : _(self.stage2_description), "type" : device.type}) valid = False if not self._is_valid_disklabel(device, disklabel_types=self.disklabel_types): valid = False if not self._is_valid_size(device, desc=_(self.stage2_description)): valid = False if not self._is_valid_location(device, max_end=self.stage2_max_end, desc=_(self.stage2_description)): valid = False if not self._is_valid_partition(device, primary=self.stage2_must_be_primary): valid = False if not self._is_valid_md(device, raid_levels=self.stage2_raid_levels, metadata=self.stage2_raid_metadata, member_types=self.stage2_raid_member_types, desc=_(self.stage2_description)): valid = False if linux and \ not self._is_valid_format(device, format_types=self.stage2_format_types, mountpoints=self.stage2_mountpoints, desc=_(self.stage2_description)): valid = False non_linux_format_types = platform.platform._non_linux_format_types if non_linux and \ not self._is_valid_format(device, format_types=non_linux_format_types): valid = False if not self.encryption_support and device.encrypted: self.errors.append(_("%s cannot be on an encrypted block " "device.") % _(self.stage2_description)) valid = False log.debug("is_valid_stage2_device(%s) returning %s", device.name, valid) return valid # # miscellaneous # def has_windows(self, devices): return False @property def timeout(self): """Bootloader timeout in seconds.""" if self._timeout is not None: t = self._timeout else: t = 5 return t def check(self): """ Run additional bootloader checks """ return True @timeout.setter def timeout(self, seconds): self._timeout = seconds @property def update_only(self): return self._update_only @update_only.setter def update_only(self, value): if value and not self.can_update: raise ValueError("this bootloader does not support updates") elif self.can_update: self._update_only = value def set_boot_args(self, *args, **kwargs): """ Set up the boot command line. Keyword Arguments: storage - a blivet.Storage instance All other arguments are expected to have a dracutSetupArgs() method. """ storage = kwargs.pop("storage", None) # # FIPS # if flags.cmdline.get("fips") == "1": self.boot_args.add("boot=%s" % self.stage2_device.fstabSpec) # # dracut # # storage from blivet.devices import NetworkStorageDevice dracut_devices = [storage.rootDevice] if self.stage2_device != storage.rootDevice: dracut_devices.append(self.stage2_device) dracut_devices.extend(storage.fsset.swapDevices) # Does /usr have its own device? If so, we need to tell dracut usr_device = storage.mountpoints.get("/usr") if usr_device: dracut_devices.extend([usr_device]) done = [] for device in dracut_devices: for dep in storage.devices: if dep in done: continue if device != dep and not device.dependsOn(dep): continue setup_args = dep.dracutSetupArgs() if not setup_args: continue self.boot_args.update(setup_args) self.dracut_args.update(setup_args) done.append(dep) # network storage # XXX this is nothing to be proud of if isinstance(dep, NetworkStorageDevice): setup_args = pyanaconda.network.dracutSetupArgs(dep) self.boot_args.update(setup_args) self.dracut_args.update(setup_args) # passed-in objects for cfg_obj in list(args) + kwargs.values(): if hasattr(cfg_obj, "dracutSetupArgs"): setup_args = cfg_obj.dracutSetupArgs() self.boot_args.update(setup_args) self.dracut_args.update(setup_args) else: setup_string = cfg_obj.dracutSetupString() self.boot_args.add(setup_string) self.dracut_args.add(setup_string) # This is needed for FCoE, bug #743784. The case: # We discover LUN on an iface which is part of multipath setup. # If the iface is disconnected after discovery anaconda doesn't # write dracut ifname argument for the disconnected iface path # (in Network.dracutSetupArgs). # Dracut needs the explicit ifname= because biosdevname # fails to rename the iface (because of BFS booting from it). for nic, _dcb, _auto_vlan in fcoe().nics: try: hwaddr = nm_device_hwaddress(nic) except ValueError: continue self.boot_args.add("ifname=%s:%s" % (nic, hwaddr.lower())) # # preservation of some of our boot args # FIXME: this is stupid. # for opt in self.global_preserve_args + self.preserve_args: if opt not in flags.cmdline: continue arg = flags.cmdline.get(opt) new_arg = opt if arg: new_arg += "=%s" % arg self.boot_args.add(new_arg) # # configuration # @property def boot_prefix(self): """ Prefix, if any, to paths in /boot. """ if self.stage2_device.format.mountpoint == "/": prefix = "/boot" else: prefix = "" return prefix def _set_console(self): """ Set console options based on boot arguments. """ console = flags.cmdline.get("console", "") console = os.path.basename(console) self.console, _x, self.console_options = console.partition(",") def write_config_console(self, config): """Write console-related configuration lines.""" pass def write_config_password(self, config): """Write password-related configuration lines.""" pass def write_config_header(self, config): """Write global configuration lines.""" self.write_config_console(config) self.write_config_password(config) def write_config_images(self, config): """Write image configuration entries.""" raise NotImplementedError() def write_config_post(self): try: os.chmod(ROOT_PATH + self.config_file, self.config_file_mode) except OSError as e: log.error("failed to set config file permissions: %s", e) def write_config(self): """ Write the bootloader configuration. """ if not self.config_file: raise BootLoaderError("no config file defined for this bootloader") config_path = os.path.normpath(ROOT_PATH + self.config_file) if os.access(config_path, os.R_OK): os.rename(config_path, config_path + ".anacbak") config = open(config_path, "w") self.write_config_header(config) self.write_config_images(config) config.close() self.write_config_post() @property def trusted_boot(self): return self._trusted_boot @trusted_boot.setter def trusted_boot(self, trusted_boot): self._trusted_boot = trusted_boot # # installation # def write(self): """ Write the bootloader configuration and install the bootloader. """ if self.skip_bootloader: return if self.update_only: self.update() return self.write_config() sync() self.stage2_device.format.sync(root=ROOT_PATH) self.install() def install(self, args=None): raise NotImplementedError() def update(self): """ Update an existing bootloader configuration. """ pass class GRUB(BootLoader): name = "GRUB" _config_dir = "grub" _config_file = "grub.conf" _device_map_file = "device.map" can_dual_boot = True can_update = True stage2_is_valid_stage1 = True stage2_bootable = True stage2_must_be_primary = False # list of strings representing options for boot device types stage2_device_types = ["partition", "mdarray"] stage2_raid_levels = [raid.RAID1] stage2_raid_member_types = ["partition"] stage2_raid_metadata = ["0", "0.90", "1.0"] packages = ["grub"] def __init__(self): super(GRUB, self).__init__() self.encrypted_password = "" # # grub-related conveniences # def grub_device_name(self, device): """ Return a grub-friendly representation of device. """ disk = getattr(device, "disk", device) name = "(hd%d" % self.disks.index(disk) if hasattr(device, "disk"): name += ",%d" % (device.partedPartition.number - 1,) name += ")" return name @property def grub_config_dir(self): """ Config dir, adjusted for grub's view of the world. """ return self.boot_prefix + self._config_dir # # configuration # @property def config_dir(self): """ Full path to configuration directory. """ return "/boot/" + self._config_dir @property def config_file(self): """ Full path to configuration file. """ return "%s/%s" % (self.config_dir, self._config_file) @property def device_map_file(self): """ Full path to device.map file. """ return "%s/%s" % (self.config_dir, self._device_map_file) @property def grub_conf_device_line(self): return "" @property def splash_dir(self): """ relative path to splash image directory.""" return GRUB._config_dir @property def serial_command(self): command = "" if self.console and self.console.startswith("ttyS"): unit = self.console[-1] command = ["serial"] s = parse_serial_opt(self.console_options) if unit and unit != '0': command.append("--unit=%s" % unit) if s.speed and s.speed != '9600': command.append("--speed=%s" % s.speed) if s.parity: if s.parity == 'o': command.append("--parity=odd") elif s.parity == 'e': command.append("--parity=even") if s.word and s.word != '8': command.append("--word=%s" % s.word) if s.stop and s.stop != '1': command.append("--stop=%s" % s.stop) command = " ".join(command) return command def write_config_console(self, config): """ Write console-related configuration. """ if not self.console: return if self.console.startswith("ttyS"): config.write("%s\n" % self.serial_command) config.write("terminal --timeout=%s serial console\n" % self.timeout) console_arg = "console=%s" % self.console if self.console_options: console_arg += ",%s" % self.console_options self.boot_args.add(console_arg) def _encrypt_password(self): """ Make sure self.encrypted_password is set up correctly. """ if self.encrypted_password: return if not self.password: raise BootLoaderError("cannot encrypt empty password") import string import crypt import random salt = "$6$" salt_len = 16 salt_chars = string.letters + string.digits + './' rand_gen = random.SystemRandom() salt += "".join(rand_gen.choice(salt_chars) for i in range(salt_len)) self.encrypted_password = crypt.crypt(self.password, salt) def write_config_password(self, config): """ Write password-related configuration. """ if not self.password and not self.encrypted_password: return self._encrypt_password() password_line = "--encrypted " + self.encrypted_password config.write("password %s\n" % password_line) def write_config_header(self, config): """Write global configuration information. """ if self.boot_prefix: have_boot = "do not " else: have_boot = "" s = """# grub.conf generated by anaconda # Note that you do not have to rerun grub after making changes to this file. # NOTICE: You %(do)shave a /boot partition. This means that all kernel and # initrd paths are relative to %(boot)s, eg. # root %(grub_target)s # kernel %(prefix)s/vmlinuz-version ro root=%(root_device)s # initrd %(prefix)s/initrd-[generic-]version.img """ % {"do": have_boot, "boot": self.stage2_device.format.mountpoint, "root_device": self.stage2_device.path, "grub_target": self.grub_device_name(self.stage1_device), "prefix": self.boot_prefix} config.write(s) config.write("boot=%s\n" % self.stage1_device.path) config.write(self.grub_conf_device_line) # find the index of the default image try: default_index = self.images.index(self.default) except ValueError: e = "Failed to find default image (%s)" % self.default.label raise BootLoaderError(e) config.write("default=%d\n" % default_index) config.write("timeout=%d\n" % self.timeout) self.write_config_console(config) if iutil.isConsoleOnVirtualTerminal(self.console): splash = "splash.xpm.gz" splash_path = os.path.normpath("%s/boot/%s/%s" % (ROOT_PATH, self.splash_dir, splash)) if os.access(splash_path, os.R_OK): grub_root_grub_name = self.grub_device_name(self.stage2_device) config.write("splashimage=%s/%s/%s\n" % (grub_root_grub_name, self.splash_dir, splash)) config.write("hiddenmenu\n") self.write_config_password(config) def write_config_images(self, config): """ Write image entries into configuration file. """ for image in self.images: args = Arguments() if isinstance(image, LinuxBootLoaderImage): grub_root = self.grub_device_name(self.stage2_device) args.update(["ro", "root=%s" % image.device.fstabSpec]) args.update(self.boot_args) if isinstance(image, TbootLinuxBootLoaderImage): args.update(image.args) snippet = ("\tkernel %(prefix)s/%(multiboot)s %(mbargs)s\n" "\tmodule %(prefix)s/%(kernel)s %(args)s\n" "\tmodule %(prefix)s/%(initrd)s\n" % {"prefix": self.boot_prefix, "multiboot": image.multiboot, "mbargs": image.mbargs, "kernel": image.kernel, "args": args, "initrd": image.initrd}) else: snippet = ("\tkernel %(prefix)s/%(kernel)s %(args)s\n" "\tinitrd %(prefix)s/%(initrd)s\n" % {"prefix": self.boot_prefix, "kernel": image.kernel, "args": args, "initrd": image.initrd}) stanza = ("title %(label)s (%(version)s)\n" "\troot %(grub_root)s\n" "%(snippet)s" % {"label": image.label, "version": image.version, "grub_root": grub_root, "snippet": snippet}) else: stanza = ("title %(label)s\n" "\trootnoverify %(grub_root)s\n" "\tchainloader +1\n" % {"label": image.label, "grub_root": self.grub_device_name(image.device)}) log.info("bootloader.py: used boot args: %s ", args) config.write(stanza) def write_device_map(self): """ Write out a device map containing all supported devices. """ map_path = os.path.normpath(ROOT_PATH + self.device_map_file) if os.access(map_path, os.R_OK): os.rename(map_path, map_path + ".anacbak") dev_map = open(map_path, "w") dev_map.write("# this device map was generated by anaconda\n") for disk in self.disks: dev_map.write("%s %s\n" % (self.grub_device_name(disk), disk.path)) dev_map.close() def write_config_post(self): """ Perform additional configuration after writing config file(s). """ super(GRUB, self).write_config_post() # make symlink for menu.lst (grub's default config file name) menu_lst = "%s%s/menu.lst" % (ROOT_PATH, self.config_dir) if os.access(menu_lst, os.R_OK): try: os.rename(menu_lst, menu_lst + '.anacbak') except OSError as e: log.error("failed to back up %s: %s", menu_lst, e) try: os.symlink(self._config_file, menu_lst) except OSError as e: log.error("failed to create grub menu.lst symlink: %s", e) # make symlink to grub.conf in /etc since that's where configs belong etc_grub = "%s/etc/%s" % (ROOT_PATH, self._config_file) if os.access(etc_grub, os.R_OK): try: os.unlink(etc_grub) except OSError as e: log.error("failed to remove %s: %s", etc_grub, e) try: os.symlink("..%s" % self.config_file, etc_grub) except OSError as e: log.error("failed to create /etc/grub.conf symlink: %s", e) def write_config(self): """ Write bootloader configuration to disk. """ # write device.map self.write_device_map() # this writes the actual configuration file super(GRUB, self).write_config() # # installation # @property def install_targets(self): """ List of (stage1, stage2) tuples representing install targets. """ targets = [] if self.stage2_device.type == "mdarray" and \ self.stage2_device.level == 1: # make sure we have stage1 and stage2 installed with redundancy # so that boot can succeed even in the event of failure or removal # of some of the disks containing the member partitions of the # /boot array for stage2dev in self.stage2_device.parents: if self.stage1_device.isDisk: # install to mbr if self.stage2_device.dependsOn(self.stage1_device): # if target disk contains any of /boot array's member # partitions, set up stage1 on each member's disk # and stage2 on each member partition stage1dev = stage2dev.disk else: # if target disk does not contain any of /boot array's # member partitions, install stage1 to the target disk # and stage2 to each of the member partitions stage1dev = self.stage1_device else: # target is /boot device and /boot is raid, so install # grub to each of /boot member partitions stage1dev = stage2dev targets.append((stage1dev, stage2dev)) else: targets.append((self.stage1_device, self.stage2_device)) return targets def install(self, args=None): rc = iutil.execWithRedirect("grub-install", ["--just-copy"], root=ROOT_PATH) if rc: raise BootLoaderError("bootloader install failed") for (stage1dev, stage2dev) in self.install_targets: cmd = ("root %(stage2dev)s\n" "install --stage2=%(config_dir)s/stage2" " /%(grub_config_dir)s/stage1 d %(stage1dev)s" " /%(grub_config_dir)s/stage2 p" " %(stage2dev)s/%(grub_config_dir)s/%(config_basename)s\n" % {"grub_config_dir": self.grub_config_dir, "config_dir": self.config_dir, "config_basename": self._config_file, "stage1dev": self.grub_device_name(stage1dev), "stage2dev": self.grub_device_name(stage2dev)}) (pread, pwrite) = os.pipe() os.write(pwrite, cmd) os.close(pwrite) args = ["--batch", "--no-floppy", "--device-map=%s" % self.device_map_file] rc = iutil.execWithRedirect("grub", args, stdin=pread, root=ROOT_PATH) os.close(pread) if rc: raise BootLoaderError("bootloader install failed") def update(self): self.install() # # miscellaneous # def has_windows(self, devices): """ Potential boot devices containing non-linux operating systems. """ # make sure we don't clobber error/warning lists errors = self.errors[:] warnings = self.warnings[:] ret = [d for d in devices if self.is_valid_stage2_device(d, linux=False, non_linux=True)] self.errors = errors self.warnings = warnings return bool(ret) class GRUB2(GRUB): """ GRUBv2 - configuration - password (insecure), password_pbkdf2 - http://www.gnu.org/software/grub/manual/grub.html#Invoking-grub_002dmkpasswd_002dpbkdf2 - --users per-entry specifies which users can access, otherwise entry is unrestricted - /etc/grub/custom.cfg - how does grub resolve names of md arrays? - disable automatic use of grub-mkconfig? - on upgrades? - BIOS boot partition (GPT) - parted /dev/sda set <partition_number> bios_grub on - can't contain a filesystem - 31KiB min, 1MiB recommended """ name = "GRUB2" packages = ["grub2"] _config_file = "grub.cfg" _config_dir = "grub2" config_file_mode = 0600 defaults_file = "/etc/default/grub" can_dual_boot = True can_update = True terminal_type = "console" # requirements for boot devices stage2_device_types = ["partition", "mdarray"] stage2_raid_levels = [raid.RAID0, raid.RAID1, raid.RAID4, raid.RAID5, raid.RAID6, raid.RAID10] stage2_raid_metadata = ["0", "0.90", "1.0", "1.2"] @property def stage2_format_types(self): if productName.startswith("Red Hat Enterprise Linux"): return ["xfs", "ext4", "ext3", "ext2", "btrfs"] else: return ["ext4", "ext3", "ext2", "btrfs", "xfs"] def __init__(self): super(GRUB2, self).__init__() self.boot_args.add("$([ -x /usr/sbin/rhcrashkernel-param ] && "\ "/usr/sbin/rhcrashkernel-param || :)") # XXX we probably need special handling for raid stage1 w/ gpt disklabel # since it's unlikely there'll be a bios boot partition on each disk # # grub-related conveniences # def grub_device_name(self, device): """ Return a grub-friendly representation of device. Disks and partitions use the (hdX,Y) notation, while lvm and md devices just use their names. """ disk = None name = "(%s)" % device.name if device.isDisk: disk = device elif hasattr(device, "disk"): disk = device.disk if disk is not None: name = "(hd%d" % self.disks.index(disk) if hasattr(device, "disk"): lt = device.disk.format.labelType name += ",%s%d" % (lt, device.partedPartition.number) name += ")" return name def write_config_console(self, config): if not self.console: return console_arg = "console=%s" % self.console if self.console_options: console_arg += ",%s" % self.console_options self.boot_args.add(console_arg) def write_device_map(self): """ Write out a device map containing all supported devices. """ map_path = os.path.normpath(ROOT_PATH + self.device_map_file) if os.access(map_path, os.R_OK): os.rename(map_path, map_path + ".anacbak") devices = self.disks if self.stage1_device not in devices: devices.append(self.stage1_device) for disk in self.stage2_device.disks: if disk not in devices: devices.append(disk) devices = [d for d in devices if d.isDisk] if len(devices) == 0: return dev_map = open(map_path, "w") dev_map.write("# this device map was generated by anaconda\n") for drive in devices: dev_map.write("%s %s\n" % (self.grub_device_name(drive), drive.path)) dev_map.close() def write_defaults(self): defaults_file = "%s%s" % (ROOT_PATH, self.defaults_file) defaults = open(defaults_file, "w+") defaults.write("GRUB_TIMEOUT=%d\n" % self.timeout) defaults.write("GRUB_DISTRIBUTOR=\"$(sed 's, release .*$,,g' /etc/system-release)\"\n") defaults.write("GRUB_DEFAULT=saved\n") defaults.write("GRUB_DISABLE_SUBMENU=true\n") if self.console and self.console.startswith("ttyS"): defaults.write("GRUB_TERMINAL=\"serial console\"\n") defaults.write("GRUB_SERIAL_COMMAND=\"%s\"\n" % self.serial_command) else: defaults.write("GRUB_TERMINAL_OUTPUT=\"%s\"\n" % self.terminal_type) # this is going to cause problems for systems containing multiple # linux installations or even multiple boot entries with different # boot arguments log.info("bootloader.py: used boot args: %s ", self.boot_args) defaults.write("GRUB_CMDLINE_LINUX=\"%s\"\n" % self.boot_args) defaults.write("GRUB_DISABLE_RECOVERY=\"true\"\n") #defaults.write("GRUB_THEME=\"/boot/grub2/themes/system/theme.txt\"\n") defaults.close() def _encrypt_password(self): """ Make sure self.encrypted_password is set up properly. """ if self.encrypted_password: return if not self.password: raise RuntimeError("cannot encrypt empty password") (pread, pwrite) = os.pipe() os.write(pwrite, "%s\n%s\n" % (self.password, self.password)) os.close(pwrite) buf = iutil.execWithCapture("grub2-mkpasswd-pbkdf2", [], stdin=pread, root=ROOT_PATH) os.close(pread) self.encrypted_password = buf.split()[-1].strip() if not self.encrypted_password.startswith("grub.pbkdf2."): raise BootLoaderError("failed to encrypt bootloader password") def write_password_config(self): if not self.password and not self.encrypted_password: return users_file = ROOT_PATH + "/etc/grub.d/01_users" header = open(users_file, "w") header.write("#!/bin/sh -e\n\n") header.write("cat << EOF\n") # XXX FIXME: document somewhere that the username is "root" header.write("set superusers=\"root\"\n") header.write("export superusers\n") self._encrypt_password() password_line = "password_pbkdf2 root " + self.encrypted_password header.write("%s\n" % password_line) header.write("EOF\n") header.close() os.chmod(users_file, 0700) def write_config(self): self.write_config_console(None) # See if we have a password and if so update the boot args before we # write out the defaults file. if self.password or self.encrypted_password: self.boot_args.add("rd.shell=0") self.write_defaults() # if we fail to setup password auth we should complete the # installation so the system is at least bootable try: self.write_password_config() except (BootLoaderError, OSError, RuntimeError) as e: log.error("bootloader password setup failed: %s", e) # make sure the default entry is the OS we are installing entry_title = "%s Linux, with Linux %s" % (productName, self.default.version) rc = iutil.execWithRedirect("grub2-set-default", [entry_title], root=ROOT_PATH) if rc: log.error("failed to set default menu entry to %s", productName) # now tell grub2 to generate the main configuration file rc = iutil.execWithRedirect("grub2-mkconfig", ["-o", self.config_file], root=ROOT_PATH) if rc: raise BootLoaderError("failed to write bootloader configuration") # # installation # def install(self, args=None): if args is None: args = [] # XXX will installing to multiple drives work as expected with GRUBv2? for (stage1dev, stage2dev) in self.install_targets: grub_args = args + ["--no-floppy", stage1dev.path] if stage1dev == stage2dev: # This is hopefully a temporary hack. GRUB2 currently refuses # to install to a partition's boot block without --force. grub_args.insert(0, '--force') rc = iutil.execWithRedirect("grub2-install", grub_args, root=ROOT_PATH, env_prune=['MALLOC_PERTURB_']) if rc: raise BootLoaderError("bootloader install failed") def write(self): """ Write the bootloader configuration and install the bootloader. """ if self.skip_bootloader: return if self.update_only: self.update() return self.write_device_map() self.stage2_device.format.sync(root=ROOT_PATH) sync() self.install() sync() self.stage2_device.format.sync(root=ROOT_PATH) self.write_config() sync() self.stage2_device.format.sync(root=ROOT_PATH) def check(self): """ When installing to the mbr of a disk grub2 needs enough space before the first partition in order to embed its core.img Until we have a way to ask grub2 what the size is we check to make sure it starts >= 512K, otherwise return an error. """ ret = True base_gap_bytes = 32256 # 31.5KiB advanced_gap_bytes = 524288 # 512KiB self.errors = [] self.warnings = [] if self.stage1_device == self.stage2_device: return ret # These are small enough to fit if self.stage2_device.type == "partition": min_start = base_gap_bytes else: min_start = advanced_gap_bytes if not self.stage1_disk: return False # If the first partition starts too low show an error. parts = self.stage1_disk.format.partedDisk.partitions for p in parts: start = p.geometry.start * p.disk.device.sectorSize if not p.getFlag(PARTITION_BIOS_GRUB) and start < min_start: msg = _("%(deviceName)s may not have enough space for grub2 to embed " "core.img when using the %(fsType)s filesystem on %(deviceType)s") \ % {"deviceName": self.stage1_device.name, "fsType": self.stage2_device.format.type, "deviceType": self.stage2_device.type} log.error(msg) self.errors.append(msg) ret = False break return ret class EFIGRUB(GRUB2): packages = ["grub2-efi", "efibootmgr", "shim"] can_dual_boot = False stage2_is_valid_stage1 = False stage2_bootable = False @property def _config_dir(self): return "efi/EFI/%s" % (self.efi_dir,) def __init__(self): super(EFIGRUB, self).__init__() self.efi_dir = 'BOOT' def efibootmgr(self, *args, **kwargs): if kwargs.pop("capture", False): exec_func = iutil.execWithCapture else: exec_func = iutil.execWithRedirect if "root" not in kwargs: kwargs["root"] = ROOT_PATH return exec_func("efibootmgr", list(args), **kwargs) # # installation # def remove_efi_boot_target(self): buf = self.efibootmgr(capture=True) for line in buf.splitlines(): try: (slot, _product) = line.split(None, 1) except ValueError: continue if _product == productName: slot_id = slot[4:8] # slot_id is hex, we can't use .isint and use this regex: if not re.match("^[0-9a-fA-F]+$", slot_id): log.warning("failed to parse efi boot slot (%s)", slot) continue rc = self.efibootmgr("-b", slot_id, "-B") if rc: raise BootLoaderError("failed to remove old efi boot entry") @property def efi_dir_as_efifs_dir(self): ret = self._config_dir.replace('efi/', '') return "\\" + ret.replace('/', '\\') def add_efi_boot_target(self): if self.stage1_device.type == "partition": boot_disk = self.stage1_device.disk boot_part_num = self.stage1_device.partedPartition.number elif self.stage1_device.type == "mdarray": # FIXME: I'm just guessing here. This probably needs the full # treatment, ie: multiple targets for each member. boot_disk = self.stage1_device.parents[0].disk boot_part_num = self.stage1_device.parents[0].partedPartition.number boot_part_num = str(boot_part_num) rc = self.efibootmgr("-c", "-w", "-L", productName, "-d", boot_disk.path, "-p", boot_part_num, "-l", self.efi_dir_as_efifs_dir + "\\shim.efi") if rc: raise BootLoaderError("failed to set new efi boot target") def install(self, args=None): if not flags.leavebootorder: self.remove_efi_boot_target() self.add_efi_boot_target() def update(self): self.install() # # installation # def write(self): """ Write the bootloader configuration and install the bootloader. """ if self.skip_bootloader: return if self.update_only: self.update() return sync() self.stage2_device.format.sync(root=ROOT_PATH) self.install() self.write_config() def check(self): return True # FIXME: We need to include grubby, and omit efibootmgr and shim packages # on aarch64 until we get all the EFI bits in place. class Aarch64EFIGRUB(EFIGRUB): packages = ["grub2-efi", "grubby"] class MacEFIGRUB(EFIGRUB): def mactel_config(self): if os.path.exists(ROOT_PATH + "/usr/libexec/mactel-boot-setup"): rc = iutil.execWithRedirect("/usr/libexec/mactel-boot-setup", [], root=ROOT_PATH) if rc: log.error("failed to configure Mac bootloader") def install(self, args=None): super(MacEFIGRUB, self).install() self.mactel_config() def is_valid_stage1_device(self, device, early=False): valid = super(MacEFIGRUB, self).is_valid_stage1_device(device, early) # Make sure we don't pick the OSX root partition if valid and getattr(device.format, "name", "") != "Linux HFS+ ESP": valid = False if hasattr(device.format, "name"): log.debug("device.format.name is '%s'", device.format.name) log.debug("MacEFIGRUB.is_valid_stage1_device(%s) returning %s", device.name, valid) return valid # Inherit abstract methods from BootLoader # pylint: disable-msg=W0223 class YabootBase(BootLoader): def write_config_password(self, config): if self.password: config.write("password=%s\n" % self.password) config.write("restricted\n") def write_config_images(self, config): for image in self.images: if not isinstance(image, LinuxBootLoaderImage): # mac os images are handled specially in the header on mac continue args = Arguments() if self.password: args.add("rd.shell=0") if image.initrd: initrd_line = "\tinitrd=%s/%s\n" % (self.boot_prefix, image.initrd) else: initrd_line = "" root_device_spec = image.device.fstabSpec if root_device_spec.startswith("/"): root_line = "\troot=%s\n" % root_device_spec else: args.add("root=%s" % root_device_spec) root_line = "" args.update(self.boot_args) log.info("bootloader.py: used boot args: %s ", args) stanza = ("image=%(boot_prefix)s%(kernel)s\n" "\tlabel=%(label)s\n" "\tread-only\n" "%(initrd_line)s" "%(root_line)s" "\tappend=\"%(args)s\"\n\n" % {"kernel": image.kernel, "initrd_line": initrd_line, "label": self.image_label(image), "root_line": root_line, "args": args, "boot_prefix": self.boot_prefix}) config.write(stanza) class Yaboot(YabootBase): name = "Yaboot" _config_file = "yaboot.conf" prog = "ybin" image_label_attr = "short_label" packages = ["yaboot"] # stage2 device requirements stage2_device_types = ["partition", "mdarray"] stage2_device_raid_levels = [raid.RAID1] # # configuration # @property def config_dir(self): conf_dir = "/etc" if self.stage2_device.format.mountpoint == "/boot": conf_dir = "/boot/etc" return conf_dir @property def config_file(self): return "%s/%s" % (self.config_dir, self._config_file) def write_config_header(self, config): if self.stage2_device.type == "mdarray": boot_part_num = self.stage2_device.parents[0].partedPartition.number else: boot_part_num = self.stage2_device.partedPartition.number # yaboot.conf timeout is in tenths of a second. Brilliant. header = ("# yaboot.conf generated by anaconda\n\n" "boot=%(stage1dev)s\n" "init-message=\"Welcome to %(product)s!\\nHit <TAB> for " "boot options\"\n\n" "partition=%(part_num)d\n" "timeout=%(timeout)d\n" "install=/usr/lib/yaboot/yaboot\n" "delay=5\n" "enablecdboot\n" "enableofboot\n" "enablenetboot\n" % {"stage1dev": self.stage1_device.path, "product": productName, "part_num": boot_part_num, "timeout": self.timeout * 10}) config.write(header) self.write_config_variant_header(config) self.write_config_password(config) config.write("\n") def write_config_variant_header(self, config): config.write("nonvram\n") config.write("mntpoint=/boot/yaboot\n") config.write("usemount\n") def write_config_post(self): super(Yaboot, self).write_config_post() # make symlink in /etc to yaboot.conf if config is in /boot/etc etc_yaboot_conf = ROOT_PATH + "/etc/yaboot.conf" if not os.access(etc_yaboot_conf, os.R_OK): try: os.symlink("../boot/etc/yaboot.conf", etc_yaboot_conf) except OSError as e: log.error("failed to create /etc/yaboot.conf symlink: %s", e) def write_config(self): if not os.path.isdir(ROOT_PATH + self.config_dir): os.mkdir(ROOT_PATH + self.config_dir) # this writes the config super(Yaboot, self).write_config() # # installation # def install(self, args=None): args = ["-f", "-C", self.config_file] rc = iutil.execWithRedirect(self.prog, args, root=ROOT_PATH) if rc: raise BootLoaderError("bootloader installation failed") class IPSeriesYaboot(Yaboot): prog = "mkofboot" # # configuration # def write_config_variant_header(self, config): config.write("nonvram\n") # only on pSeries? config.write("fstype=raw\n") # # installation # def install(self, args=None): self.updatePowerPCBootList() super(IPSeriesYaboot, self).install() def updatePowerPCBootList(self): log.debug("updatePowerPCBootList: self.stage1_device.path = %s", self.stage1_device.path) buf = iutil.execWithCapture("nvram", ["--print-config=boot-device"]) if len(buf) == 0: log.error ("FAIL: nvram --print-config=boot-device") return boot_list = buf.strip().split() log.debug("updatePowerPCBootList: boot_list = %s", boot_list) buf = iutil.execWithCapture("ofpathname", [self.stage1_device.path]) if len(buf) > 0: boot_disk = buf.strip() log.debug("updatePowerPCBootList: boot_disk = %s", boot_disk) else: log.error("FAIL: ofpathname %s", self.stage1_device.path) return # Place the disk containing the PReP partition first. # Remove all other occurances of it. boot_list = [boot_disk] + filter(lambda x: x != boot_disk, boot_list) log.debug("updatePowerPCBootList: updated boot_list = %s", boot_list) update_value = "boot-device=%s" % " ".join(boot_list) rc = iutil.execWithRedirect("nvram", ["--update-config", update_value]) if rc: log.error("FAIL: nvram --update-config %s", update_value) else: log.info("Updated PPC boot list with the command: nvram --update-config %s", update_value) class IPSeriesGRUB2(GRUB2): # GRUB2 sets /boot bootable and not the PReP partition. This causes the Open Firmware BIOS not # to present the disk as a bootable target. If stage2_bootable is False, then the PReP partition # will be marked bootable. Confusing. stage2_bootable = False terminal_type = "ofconsole" # # installation # def install(self, args=None): if flags.leavebootorder: log.info("leavebootorder passed as an option. Will not update the NVRAM boot list.") else: self.updateNVRAMBootList() super(IPSeriesGRUB2, self).install(args=["--no-nvram"]) # This will update the PowerPC's (ppc) bios boot devive order list def updateNVRAMBootList(self): log.debug("updateNVRAMBootList: self.stage1_device.path = %s", self.stage1_device.path) buf = iutil.execWithCapture("nvram", ["--print-config=boot-device"]) if len(buf) == 0: log.error ("Failed to determine nvram boot device") return boot_list = buf.strip().replace("\"", "").split() log.debug("updateNVRAMBootList: boot_list = %s", boot_list) buf = iutil.execWithCapture("ofpathname", [self.stage1_device.path]) if len(buf) > 0: boot_disk = buf.strip() else: log.error("Failed to translate boot path into device name") return # Place the disk containing the PReP partition first. # Remove all other occurances of it. boot_list = [boot_disk] + filter(lambda x: x != boot_disk, boot_list) update_value = "boot-device=%s" % " ".join(boot_list) rc = iutil.execWithRedirect("nvram", ["--update-config", update_value]) if rc: log.error("Failed to update new boot device order") # # In addition to the normal grub configuration variable, add one more to set the size of the # console's window to a standard 80x24 # def write_defaults(self): super(IPSeriesGRUB2, self).write_defaults() defaults_file = "%s%s" % (ROOT_PATH, self.defaults_file) defaults = open(defaults_file, "a+") # The terminfo's X and Y size, and output location could change in the future defaults.write("GRUB_TERMINFO=\"terminfo -g 80x24 console\"\n") defaults.close() class MacYaboot(Yaboot): prog = "mkofboot" can_dual_boot = True # # configuration # def write_config_variant_header(self, config): try: mac_os = [i for i in self.chain_images if i.label][0] except IndexError: pass else: config.write("macosx=%s\n" % mac_os.device.path) config.write("magicboot=/usr/lib/yaboot/ofboot\n") class ZIPL(BootLoader): name = "ZIPL" config_file = "/etc/zipl.conf" packages = ["s390utils-base"] # stage2 device requirements stage2_device_types = ["partition", "mdarray", "lvmlv"] stage2_device_raid_levels = [raid.RAID1] @property def stage2_format_types(self): if productName.startswith("Red Hat Enterprise Linux"): return ["xfs", "ext4", "ext3", "ext2"] else: return ["ext4", "ext3", "ext2", "xfs"] image_label_attr = "short_label" preserve_args = ["cio_ignore"] def __init__(self): super(ZIPL, self).__init__() self.stage1_name = None # # configuration # @property def boot_dir(self): return "/boot" def write_config_images(self, config): for image in self.images: args = Arguments() if image.initrd: initrd_line = "\tramdisk=%s/%s\n" % (self.boot_dir, image.initrd) else: initrd_line = "" args.add("root=%s" % image.device.fstabSpec) args.update(self.boot_args) log.info("bootloader.py: used boot args: %s ", args) stanza = ("[%(label)s]\n" "\timage=%(boot_dir)s/%(kernel)s\n" "%(initrd_line)s" "\tparameters=\"%(args)s\"\n" % {"label": self.image_label(image), "kernel": image.kernel, "initrd_line": initrd_line, "args": args, "boot_dir": self.boot_dir}) config.write(stanza) def write_config_header(self, config): header = ("[defaultboot]\n" "defaultauto\n" "prompt=1\n" "timeout=%(timeout)d\n" "default=%(default)s\n" "target=/boot\n" % {"timeout": self.timeout, "default": self.image_label(self.default)}) config.write(header) # # installation # def install(self, args=None): buf = iutil.execWithCapture("zipl", [], root=ROOT_PATH) for line in buf.splitlines(): if line.startswith("Preparing boot device: "): # Output here may look like: # Preparing boot device: dasdb (0200). # Preparing boot device: dasdl. # We want to extract the device name and pass that. name = re.sub(r".+?: ", "", line) self.stage1_name = re.sub(r"(\s\(.+\))?\.$", "", name) if not self.stage1_name: raise BootLoaderError("could not find IPL device") # do the reipl iutil.reIPL(self.stage1_name) class EXTLINUX(BootLoader): name = "EXTLINUX" _config_file = "extlinux.conf" _config_dir = "/boot/extlinux" # stage1 device requirements stage1_device_types = ["disk"] # stage2 device requirements stage2_format_types = ["ext4", "ext3", "ext2"] stage2_device_types = ["partition"] stage2_bootable = True packages = ["syslinux-extlinux"] @property def config_file(self): return "%s/%s" % (self._config_dir, self._config_file) @property def boot_prefix(self): """ Prefix, if any, to paths in /boot. """ if self.stage2_device.format.mountpoint == "/": prefix = "/boot" else: prefix = "" return prefix def write_config_console(self, config): if not self.console: return console_arg = "console=%s" % self.console if self.console_options: console_arg += ",%s" % self.console_options self.boot_args.add(console_arg) def write_config_images(self, config): self.write_config_console(config) for image in self.images: args = Arguments() args.update(["root=%s" % image.device.fstabSpec, "ro"]) if image.device.type == "btrfs subvolume": args.update(["rootflags=subvol=%s" % image.device.name]) args.update(self.boot_args) log.info("bootloader.py: used boot args: %s ", args) stanza = ("label %(label)s (%(version)s)\n" "\tkernel %(boot_prefix)s/%(kernel)s\n" "\tinitrd %(boot_prefix)s/%(initrd)s\n" "\tappend %(args)s\n\n" % {"label": self.image_label(image), "version": image.version, "kernel": image.kernel, "initrd": image.initrd, "args": args, "boot_prefix": self.boot_prefix}) config.write(stanza) def write_config_header(self, config): header = ("# extlinux.conf generated by anaconda\n\n" "ui menu.c32\n\n" "menu autoboot Welcome to %(productName)s. Automatic boot in # second{,s}. Press a key for options.\n" "menu title %(productName)s Boot Options.\n" "menu hidden\n\n" "timeout %(timeout)d\n" "#totaltimeout 9000\n\n" "default %(default)s\n\n" % { "productName": productName, "timeout": self.timeout *10, "default": self.image_label(self.default)}) config.write(header) self.write_config_password(config) def write_config_password(self, config): if self.password: config.write("menu master passwd %s\n" % self.password) config.write("menu notabmsg Press [Tab] and enter the password to edit options") def write_config_post(self): etc_extlinux = os.path.normpath(ROOT_PATH + "/etc/" + self._config_file) if not os.access(etc_extlinux, os.R_OK): try: os.symlink("../boot/%s" % self._config_file, etc_extlinux) except OSError as e: log.warning("failed to create /etc/extlinux.conf symlink: %s", e) def write_config(self): super(EXTLINUX, self).write_config() # # installation # def install(self, args=None): args = ["--install", self._config_dir] rc = iutil.execWithRedirect("extlinux", args, root=ROOT_PATH) if rc: raise BootLoaderError("bootloader install failed") # every platform that wants a bootloader needs to be in this dict bootloader_by_platform = {platform.X86: GRUB2, platform.EFI: EFIGRUB, platform.MacEFI: MacEFIGRUB, platform.PPC: GRUB2, platform.IPSeriesPPC: IPSeriesGRUB2, platform.NewWorldPPC: MacYaboot, platform.S390: ZIPL, platform.Aarch64EFI: Aarch64EFIGRUB, platform.ARM: EXTLINUX, platform.omapARM: EXTLINUX} def get_bootloader(): platform_name = platform.platform.__class__.__name__ if flags.extlinux: cls = EXTLINUX else: cls = bootloader_by_platform.get(platform.platform.__class__, BootLoader) log.info("bootloader %s on %s platform", cls.__name__, platform_name) return cls() # anaconda-specific functions def writeSysconfigKernel(storage, version): # get the name of the default kernel package based on the version kernel_basename = "vmlinuz-" + version kernel_file = "/boot/%s" % kernel_basename if not os.path.isfile(ROOT_PATH + kernel_file): kernel_file = "/boot/efi/EFI/redhat/%s" % kernel_basename if not os.path.isfile(ROOT_PATH + kernel_file): log.error("failed to recreate path to default kernel image") return try: import rpm except ImportError: log.error("failed to import rpm python module") return ts = rpm.TransactionSet(ROOT_PATH) mi = ts.dbMatch('basenames', kernel_file) try: h = mi.next() except StopIteration: log.error("failed to get package name for default kernel") return kernel = h.name f = open(ROOT_PATH + "/etc/sysconfig/kernel", "w+") f.write("# UPDATEDEFAULT specifies if new-kernel-pkg should make\n" "# new kernels the default\n") # only update the default if we're setting the default to linux (#156678) if storage.bootloader.default.device == storage.rootDevice: f.write("UPDATEDEFAULT=yes\n") else: f.write("UPDATEDEFAULT=no\n") f.write("\n") f.write("# DEFAULTKERNEL specifies the default kernel package type\n") f.write("DEFAULTKERNEL=%s\n" % kernel) if storage.bootloader.trusted_boot: f.write("# HYPERVISOR specifies the default multiboot kernel\n") f.write("HYPERVISOR=/boot/tboot.gz\n") f.write("HYPERVISOR_ARGS=logging=vga,serial,memory\n") f.close() def writeBootLoader(storage, payload, instClass, ksdata): """ Write bootloader configuration to disk. When we get here, the bootloader will already have a default linux image. We only have to add images for the non-default kernels and adjust the default to reflect whatever the default variant is. """ from pyanaconda.errors import errorHandler, ERROR_RAISE if not storage.bootloader.skip_bootloader: stage1_device = storage.bootloader.stage1_device log.info("bootloader stage1 target device is %s", stage1_device.name) stage2_device = storage.bootloader.stage2_device log.info("bootloader stage2 target device is %s", stage2_device.name) # get a list of installed kernel packages kernel_versions = payload.kernelVersionList if not kernel_versions: log.warning("no kernel was installed -- bootloader config unchanged") return # all the linux images' labels are based on the default image's base_label = productName base_short_label = "linux" # The first one is the default kernel. Update the bootloader's default # entry to reflect the details of the default kernel. version = kernel_versions.pop(0) default_image = LinuxBootLoaderImage(device=storage.rootDevice, version=version, label=base_label, short=base_short_label) storage.bootloader.add_image(default_image) storage.bootloader.default = default_image if hasattr(storage.bootloader, 'efi_dir'): storage.bootloader.efi_dir = instClass.efi_dir # write out /etc/sysconfig/kernel writeSysconfigKernel(storage, version) if storage.bootloader.skip_bootloader: log.info("skipping bootloader install per user request") return # now add an image for each of the other kernels for version in kernel_versions: label = "%s-%s" % (base_label, version) short = "%s-%s" % (base_short_label, version) if storage.bootloader.trusted_boot: image = TbootLinuxBootLoaderImage( device=storage.rootDevice, version=version, label=label, short=short) else: image = LinuxBootLoaderImage(device=storage.rootDevice, version=version, label=label, short=short) storage.bootloader.add_image(image) # set up dracut/fips boot args # XXX FIXME: do this from elsewhere? #storage.bootloader.set_boot_args(keyboard=anaconda.keyboard, # storage=anaconda.storage, # language=anaconda.instLanguage, # network=anaconda.network) storage.bootloader.set_boot_args(storage=storage, payload=payload, keyboard=ksdata.keyboard) try: storage.bootloader.write() except BootLoaderError as e: if errorHandler.cb(e) == ERROR_RAISE: raise
gpl-2.0
-3,447,852,310,971,648,500
34.65548
120
0.555288
false
pclubuiet/website
home/views.py
1
3396
from django import views from django.shortcuts import render, get_object_or_404 from django.views.generic import TemplateView from django.views.generic.edit import CreateView from .models import * from .forms import * import requests import http from django.urls import reverse_lazy from django.views.decorators.csrf import csrf_exempt from django.http import JsonResponse class Template404(TemplateView): template_name = "404.html" class Home(TemplateView): template_name = 'home/home.html' class Topics(views.View): def get(self, request, *args, **kwargs): return render(request, "home/resources/topics.html", {'topics': Topic.objects.all()}) class Resources(views.View): def get(self, request, pk, *args, **kwargs): topic = get_object_or_404(Topic, pk=pk) return render(request, "home/resources/resources.html", {'resources': topic.resource_set.all(), 'topic' : topic}) class BlogPostList(views.View): def get(self, request, *args, **kwargs): posts = BlogPost.objects.all() return render(request, "home/blog/index.html", {'posts': posts}) class BlogPostView(views.View): def get(self, request, pk, *args, **kwargs): post = get_object_or_404(BlogPost, pk=pk) return render(request, "home/blog/blog_post.html", {'post': post}) class Leaderboard(views.View): def get(self, request, *args, **kwargs): users = Users.objects.all() for user in users: connected = False while not connected: try: user_name = user.github_handle response = requests.get('https://api.github.com/search/issues?sort=created&q=author:{}&type:pr&per_page=100'.format(user_name), verify = False).json() pr_count = 0 print(response) for obj in response['items']: if('pull_request' in obj): if('2018-09-30T00:00:00Z'<obj['created_at']<'2018-10-31T23:59:59Z'): pr_count += 1 user.pr_count = pr_count user.save() connected = True except: pass return render(request, 'home/leaderboard.html', {'users': users}) class RegisterUser(CreateView): form_class = RegisterUserForm template_name = "home/registeruser.html" success_url = reverse_lazy('home:home') @csrf_exempt def GithubEmailCheck(request): github_handle = request.POST.get('github_handle') email = request.POST.get('email') print("Received ", github_handle) users = Users.objects.all() for user in users: if user.github_handle == github_handle: return JsonResponse({'message' : 'Duplicate Github Handle'}) if user.email == email: return JsonResponse({'message' : 'Duplicate Email'}) return JsonResponse({'message' : 'New'}) @csrf_exempt def GithubCheck(request): github_handle = request.POST.get('github_handle') response = requests.get("https://api.github.com/users/{}".format(github_handle), verify = False).json() print("https://api.github.com/users/{}".format(github_handle)) if ('login' in response): print("Found") return JsonResponse({'message' : 'Found'}) else: return JsonResponse({'message' : 'Not Found'})
gpl-3.0
-1,718,221,211,592,258,300
38.045977
170
0.620436
false
brain-research/acai
lib/eval.py
1
4490
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Evaluation functions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from lib import data import numpy as np import scipy.spatial def closest_line(query_lines, metric='cosine'): """Compute the distance to, and parameters for, the closest line to each line in query_lines. Args: - query_lines: Array of lines to compute closest matches for, shape (n_lines, width, height, 1) - metric: String to pass to scipy.spatial.distance.cdist to choose which distance metric to use Returns: - min_dist, starts, ends: Arrays of shape (n_lines,) denoting the distance to the nearest ``true'' line and the start and end points. """ h, w = query_lines.shape[1:-1] # Construct 10000 lines with these dimensions angles = np.linspace(0, 2*np.pi - 2*np.pi/10000, 10000) all_lines = np.array( [(data.draw_line(angle, h, w)) for angle in angles]) # Produce vectorized versions of both for use with scipy.spatial flat_query = query_lines.reshape(query_lines.shape[0], -1) flat_all = all_lines.reshape(all_lines.shape[0], -1) # Compute pairwise distance matrix of query lines with all valid lines distances = scipy.spatial.distance.cdist(flat_query, flat_all, metric) min_dist_idx = np.argmin(distances, axis=-1) min_dist = distances[np.arange(distances.shape[0]), min_dist_idx] angles = np.array([angles[n] for n in min_dist_idx]) return min_dist, angles def smoothness_score(angles): """Computes the smoothness score of a line interpolation according to the angles of each line. Args: - angles: Array of shape (n_interpolations, n_lines_per_interpolation) giving the angle of each line in each interpolation. Returns: - smoothness_scores: Array of shape (n_interpolations,) giving the average smoothness score for all of the provided interpolations. """ angles = np.atleast_2d(angles) # Remove discontinuities larger than np.pi angles = np.unwrap(angles) diffs = np.abs(np.diff(angles, axis=-1)) # Compute the angle difference from the first and last point total_diff = np.abs(angles[:, :1] - angles[:, -1:]) # When total_diff is zero, there's no way to compute this score zero_diff = (total_diff < 1e-4).flatten() normalized_diffs = diffs/total_diff deviation = np.max(normalized_diffs, axis=-1) - 1./(angles.shape[1] - 1) # Set score to NaN when we aren't able to compute it deviation[zero_diff] = np.nan return deviation def line_eval(interpolated_lines): """Given a group of line interpolations, compute mean nearest line distance and mean smoothness score for all of the interpolations. This version of this metric is meant for vertical lines only. Args: - interpolated_lines: Collection of line interpolation images, shape (n_interpolations, n_lines_per_interpolation, height, width, 1) Returns: - mean_distance: Average distance to closest ``real'' line. - mean_smoothness: Average interpolation smoothness """ original_shape = interpolated_lines.shape min_dist, angles = closest_line( interpolated_lines.reshape((-1,) + original_shape[2:])) mean_distance = np.mean(min_dist) smoothness_scores = smoothness_score( angles.reshape(original_shape[0], original_shape[1])) nan_scores = np.isnan(smoothness_scores) # If all scores were NaN, set the mean score to NaN if np.all(nan_scores): mean_smoothness = np.nan # Otherwise only compute mean for non-NaN scores else: sum_smoothness = np.sum(smoothness_scores[np.logical_not(nan_scores)]) mean_smoothness = sum_smoothness/float(len(nan_scores)) return np.float32(mean_distance), np.float32(mean_smoothness)
apache-2.0
3,405,448,565,287,546,400
39.089286
79
0.688641
false
mentaal/r_map
tests/test_data_structure.py
1
1802
from copy import deepcopy def test_get_item(data): "test the different ways that an item can be referenced" #key based lookup spi = data['spi'] #attribute based cfg0 = spi.cfg0 print(f'cfg: {cfg0}') field = spi.cfg0['bf0'] field = spi.cfg0.bf0.bf #field = spi.bf0 print(field) print("Updating field value to spi_enabled") field.value = field.spi_enabled.value print(field) print(spi.cfg0.bf0) assert field.annotation == field.spi_enabled.name print(f'bf0 annotation: {field.annotation}') def test_list_node(data): "display the node's public attributes and the names of its children" print(dir(data)) def test_walk(data): "obtain an iterator from the root object and get the next item" i = iter(data) block = next(i) for item in block._walk(levels=3): print(item) def test_copy(data): c = deepcopy(data) i = iter(data) n = next(i) print(dir(c)) i = iter(c) item = next(i) print(item) print(item.parent) def test_repr(data): item = next(iter(data)) print(repr(item)) def test_bad_name(data): assert data.name != data['name'] assert data['name'] is data._children['name'] def test_bit_reg_linkage(data): #get first available register w = (m for m in data) m = next(w) rs = (r for r in m) r = next(rs) print("Register: ", r) print(f"Register access: {r.access}") v = 0x12345678 r.value = v for ref in r: print("bitfield: ", ref.bf) field_expected_value = (r.value >> ref.reg_offset) & ref.bf.mask assert field_expected_value == ref.bf.value new_value = 12345678 for ref in r: ref.bf.value = (new_value >> ref.reg_offset) & ref.bf.mask assert r.value == new_value
mit
1,948,170,371,187,965,700
23.026667
72
0.614872
false
bklakew/OpenAgClassifier
src/model/server.py
1
6199
""" # Copyright 2017 Foundation Center. All Rights Reserved. # # Licensed under the Foundation Center Public License, Version 1.0 (the “License”); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://gis.foundationcenter.org/licenses/LICENSE-1.0.html # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an “AS IS” BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ from base.prediction import Predictor from base.model import TextClassifier from nltk.data import load from base.database import MySqlDataBase from base.graph import run, bfs from base import config as c import json import os import time import warnings import itertools from concurrent.futures import ThreadPoolExecutor, as_completed from flask import Flask, request, Response from flask_cors import cross_origin app = Flask(__name__) warnings.simplefilter("ignore", UserWarning) def _load_to_memory(name, level): clf = TextClassifier() clf.load(path='model/clf_data/', name=name, in_db=False) del clf.doc_ids return Predictor(classifier=clf, high_t=c.high_threshold[level], low_t=c.low_threshold[level]) def _get_lookup(): db = MySqlDataBase(c.db) query = """ SELECT Code, ifnull(ifnull(ifnull(ifnull(ifnull(L7, L6), L5), L4), L3), L2) AS `description` FROM ( SELECT Code, nullif(L7, '') AS L7, nullif(L6, '') AS L6, nullif(L5, '') AS L5 , nullif(L4, '') AS L4, nullif(L3, '') AS L3, nullif(L2, '') AS L2 , nullif(L1, '') AS L1 FROM agrovoc_autocode.agrovoc_terms WHERE `Use?` = 'Y' ) as a """ db.execute(query) d = {} for row in db.cursor: code = row["Code"].strip() description = row["description"].strip() d[code] = description db.teardown() return d def _validate(js, k): return isinstance(js, dict) and k in js print("[INFO] Loading AGROVOC classifiers") p1 = _load_to_memory(name='hierarchy_1_76021167-b4ce-463d-bab0-bc7fb044b74b', level=1) p2 = _load_to_memory(name='hierarchy_2_2fd8b6a0-6786-42ef-9eea-66ea02a1dfdd', level=2) p3 = _load_to_memory(name='hierarchy_3_2b946288-5eeb-4d35-a1fe-6987c118c3b5', level=3) p4 = _load_to_memory(name='hierarchy_4_3e787d47-5183-4df2-ba4b-509926f029d3', level=4) lookup = _get_lookup() graph = run(MySqlDataBase(c.db)) sentence_detector = load("tokenizers/punkt/english.pickle") def taxonomy_rollup(results): """ Does the taxonomy rollup using a graph breadth-first-search algorithm :param results: (list of dictionaries) :return: (list of dictionaries) """ all_codes = set([r["code"] for r in results]) to_keep = set() node_check = all_codes - to_keep for n in node_check: to_keep.add(n) k = bfs(graph=graph, start=n, to_check=node_check, keep=to_keep) to_keep.add(k) return [r for r in results if r["code"] in to_keep if r["code"] is not None] @app.route('/predict', methods=['POST', 'GET']) @cross_origin(origin='*', headers=['Content-Type', 'Authorization']) def predict(): """ Single text predictions :return: (JSON) """ j = request.get_json() if j is None: j = request.args if not j: j = request.form if _validate(j, 'text'): st = time.time() text = j['text'] threshold = 0 chunk = False if 'chunk' in j and j['chunk'].lower() == 'true': text = [sub for sent in sentence_detector.tokenize(text) for sub in sent.split(';')] chunk = True if 'threshold' in j and j['threshold'] == 'high': threshold = 1 # get all predictions, for every hierarchy asynchronously results = [] with ThreadPoolExecutor(max_workers=4) as executor: future_results = {executor.submit(func, (text, lookup, threshold)): idx + 1 for idx, func in enumerate([p1.predict, p2.predict, p3.predict, p4.predict ])} for future in as_completed(future_results): results.extend(future.result()) # resolve duplication that arises due to chunking (accept the result with the maximum confidence per class) if chunk: results_sort = sorted(results, key=lambda x: (x["code"], x["confidence"])) grouped = itertools.groupby(results_sort, lambda s: s["code"]) results = [max(v, key=lambda x: x["confidence"]) for k, v in grouped] # add logic to toggle the agrovoc graph roll up on and off if 'roll_up' in j and j['roll_up'].lower() == 'false': agg = [r for r in results if r["code"] is not None] else: agg = taxonomy_rollup(results) if not agg: agg = [{"code": None, "description": None, "confidence": 0.0}] agg = sorted(agg, key=lambda s: s["confidence"], reverse=True) return Response(response=json.dumps({"success": True, "duration": time.time() - st, "data": agg}, indent=4), status=200, mimetype='application/json') return Response(response=json.dumps({"success": False, "status": "Incorrect parameters"}, indent=4), status=404, mimetype='application/json') if __name__ == '__main__': debug = os.environ.get('DEBUG', False) port = os.environ.get('PORT', 9091) testing = os.environ.get('TESTING', False) app.run(host='0.0.0.0', port=port, debug=debug)
mpl-2.0
8,984,252,178,927,677,000
33.786127
116
0.581812
false
snipsco/snipsskills
snipsmanager/commands/setup/systemd/snipsmanager.py
1
1887
# -*-: coding utf-8 -*- import os import time from ...base import Base from ....utils.os_helpers import is_raspi_os, which from ....utils.systemd import Systemd from .... import DEFAULT_SNIPSFILE_PATH from snipsmanagercore import pretty_printer as pp class SystemdSnipsManagerException(Exception): pass class SystemdSnipsManager(Base): SNIPSMANAGER_SERVICE_NAME = "snipsmanager" SNIPSMANAGER_COMMAND = "snipsmanager" def run(self): snipsfile_path = self.options['--snipsfile_path'] or os.getcwd() try: SystemdSnipsManager.setup(snipsfile_path=snipsfile_path) except Exception as e: pp.perror(str(e)) @staticmethod def setup(snipsfile_path=None): pp.pcommand("Setting up Snips Manager as a Systemd service") snipsfile_path = snipsfile_path or DEFAULT_SNIPSFILE_PATH working_directory = os.path.dirname(snipsfile_path) if not is_raspi_os(): raise SystemdSnipsManagerException("Snips Systemd configuration is only available on Raspberry Pi. Skipping Systemd setup") snipsmanager_path = which('snipsmanager') if snipsmanager_path is None: raise SystemdSnipsManagerException("Error: cannot find command 'snipsmanager' on the system. Make sure the Snips Manager CLI is correctly installed. Skipping Systemd setup") contents = Systemd.get_template(SystemdSnipsManager.SNIPSMANAGER_SERVICE_NAME) contents = contents.replace("{{SNIPSMANAGER_COMMAND}}", snipsmanager_path) contents = contents.replace("{{WORKING_DIRECTORY}}", working_directory) Systemd.write_systemd_file(SystemdSnipsManager.SNIPSMANAGER_SERVICE_NAME, None, contents) Systemd.enable_service(None, SystemdSnipsManager.SNIPSMANAGER_SERVICE_NAME) pp.psuccess("Successfully set up Snips Manager as a Systemd service")
mit
9,124,192,898,552,893,000
36
185
0.711182
false
haricot/djangocms-bs4forcascade
cmsplugin_bs4forcascade/bootstrap4/utils.py
1
11099
# -*- coding: utf-8 -*- from __future__ import unicode_literals import logging from collections import OrderedDict from django.forms import widgets from cmsplugin_cascade import app_settings from cmsplugin_cascade.plugin_base import CascadePluginBase from cmsplugin_cascade.utils import compute_aspect_ratio, get_image_size, parse_responsive_length __all__ = ['reduce_breakpoints', 'compute_media_queries', 'get_image_tags', 'get_picture_elements', 'get_widget_choices'] logger = logging.getLogger('cascade') BS4_BREAKPOINTS = OrderedDict(app_settings.CMSPLUGIN_CASCADE['bootstrap4']['breakpoints']) BS4_BREAKPOINT_KEYS = list(tp[0] for tp in app_settings.CMSPLUGIN_CASCADE['bootstrap4']['breakpoints']) def get_widget_choices(): breakpoints = list(BS4_BREAKPOINTS) i = 0 widget_choices = [] for br, br_options in BS4_BREAKPOINTS.items(): if i == 0: widget_choices.append((br, '{} (<{}px)'.format(br_options[2], br_options[0]))) elif i == len(breakpoints[:-1]): widget_choices.append((br, '{} (≥{}px)'.format(br_options[2], br_options[0]))) else: widget_choices.append((br, '{} (≥{}px and <{}px)'.format(br_options[2], br_options[0], BS4_BREAKPOINTS[breakpoints[(i + 1)]][0]))) i += 1 return widget_choices def reduce_breakpoints(plugin, field_name, request=None, obj=None): """ Narrow down the number of breakpoints in the widget of the named glossary_field. This is useful in case the container was defined with a subset of these breakpoints: xs, sm, md, lg. """ if not isinstance(plugin, CascadePluginBase): raise ValueError('Plugin is not of type CascadePluginBase') parent_instance = plugin.get_parent_instance(request, obj) if not parent_instance: return complete_glossary = parent_instance.get_complete_glossary() if 'breakpoints' not in complete_glossary: return try: # find the glossary_field named field_name and restrict its breakpoint to the available ones widget = [f for f in plugin.glossary_fields if f.name == field_name][0].widget except IndexError: return if not isinstance(widget, widgets.MultiWidget): raise ValueError('Widget for glossary_field {0} is not a multiple value field') temp = [(l, widget.widgets[k]) for k, l in enumerate(widget.labels) if l in complete_glossary['breakpoints']] widget.labels, widget.widgets = (list(t) for t in zip(*temp)) def compute_media_queries(element): """ For e given Cascade element, compute the current media queries for each breakpoint, even for nested containers, rows and columns. """ parent_glossary = element.get_parent_glossary() # compute the max width and the required media queries for each chosen breakpoint element.glossary['container_max_widths'] = max_widths = {} element.glossary['media_queries'] = media_queries = {} breakpoints = element.glossary.get('breakpoints', parent_glossary.get('breakpoints', [])) last_index = len(breakpoints) - 1 fluid = element.glossary.get('fluid') for index, bp in enumerate(breakpoints): try: key = 'container_fluid_max_widths' if fluid else 'container_max_widths' max_widths[bp] = parent_glossary[key][bp] except KeyError: max_widths[bp] = BS4_BREAKPOINTS[bp][4 if fluid else 3] if last_index > 0: if index == 0: next_bp = breakpoints[1] media_queries[bp] = ['(max-width: {0}px)'.format(BS4_BREAKPOINTS[next_bp][0])] elif index == last_index: media_queries[bp] = ['(min-width: {0}px)'.format(BS4_BREAKPOINTS[bp][0])] else: next_bp = breakpoints[index + 1] media_queries[bp] = ['(min-width: {0}px)'.format(BS4_BREAKPOINTS[bp][0]), '(max-width: {0}px)'.format(BS4_BREAKPOINTS[next_bp][0])] def get_image_tags(context, instance, options): """ Create a context returning the tags to render an <img ...> element: ``sizes``, ``srcset``, a fallback ``src`` and if required inline styles. """ try: aspect_ratio = compute_aspect_ratio(instance.image) except Exception as e: # if accessing the image file fails, abort here return is_responsive = options.get('is_responsive', False) resize_options = options.get('resize_options', {}) crop = 'crop' in resize_options upscale = 'upscale' in resize_options subject_location = instance.image.subject_location if 'subject_location' in resize_options else False resolutions = (False, True) if 'high_resolution' in resize_options else (False,) tags = {'sizes': [], 'srcsets': {}, 'is_responsive': is_responsive, 'extra_styles': {}} if is_responsive: image_width = parse_responsive_length(options.get('image_width_responsive') or '100%') assert(image_width[1]), "The given image has no valid width" if image_width[1] != 1.0: tags['extra_styles'].update({'max-width': '{:.0f}%'.format(100 * image_width[1])}) else: image_width = parse_responsive_length(options['image_width_fixed']) if not image_width[0]: image_width = (instance.image.width, image_width[1]) try: image_height = parse_responsive_length(options['image_height']) except KeyError: image_height = (None, None) set_defaults(options) if is_responsive: max_width = 0 for bp in options['breakpoints']: if bp not in options['container_max_widths']: continue width = int(image_width[1] * options['container_max_widths'][bp]) max_width = max(max_width, width) size = get_image_size(width, image_height, aspect_ratio) if bp in options['media_queries']: tags['sizes'].append('{0} {1}px'.format(' and '.join(options['media_queries'][bp]), width)) for high_res in resolutions: if high_res: size = (size[0] * 2, size[1] * 2) key = '{0}w'.format(size[0]) tags['srcsets'][key] = {'size': size, 'crop': crop, 'upscale': upscale, 'subject_location': subject_location} # use an existing image as fallback for the <img ...> element if not max_width > 0: logger.warning('image tags: image max width is zero') size = (int(round(max_width)), int(round(max_width * aspect_ratio))) else: size = get_image_size(image_width[0], image_height, aspect_ratio) if len(resolutions) > 1: for high_res in resolutions: if high_res: tags['srcsets']['2x'] = {'size': (size[0] * 2, size[1] * 2), 'crop': crop, 'upscale': upscale, 'subject_location': subject_location} else: tags['srcsets']['1x'] = {'size': size, 'crop': crop, 'upscale': upscale, 'subject_location': subject_location} tags['src'] = {'size': size, 'crop': crop, 'upscale': upscale, 'subject_location': subject_location} return tags def set_defaults(options): options.setdefault('breakpoints', ['xs', 'sm', 'md', 'lg', 'xl']) options.setdefault('container_max_widths', {'xs': 576, 'sm': 767, 'md': 991, 'lg': 1199, 'xl': 1980}) options.setdefault('fluid', False) options.setdefault('media_queries', { 'xs': ['(max-width: 576px)'], 'sm': ['(min-width: 576px)', '(max-width: 767px)'], 'md': ['(min-width: 768px)', '(max-width: 991px)'], 'lg': ['(min-width: 992px)','(max-width: 1199px)'], 'xl': ['(min-width: 1200px)'], }) def get_picture_elements(context, instance): """ Create a context, used to render a <picture> together with all its ``<source>`` elements: It returns a list of HTML elements, each containing the information to render a ``<source>`` element. The purpose of this HTML entity is to display images with art directions. For normal images use the ``<img>`` element. """ if not instance.image: return complete_glossary = instance.get_complete_glossary() aspect_ratio = compute_aspect_ratio(instance.image) container_max_heights = complete_glossary.get('container_max_heights', {}) resize_options = instance.glossary.get('resize_options', {}) crop = 'crop' in resize_options upscale = 'upscale' in resize_options subject_location = instance.image.subject_location if 'subject_location' in resize_options else False max_width = 0 max_zoom = 0 elements = [] for bp in complete_glossary['breakpoints']: try: width = float(complete_glossary['container_max_widths'][bp]) except KeyError: width = 0 max_width = max(max_width, round(width)) size = None try: image_height = parse_responsive_length(instance.glossary['responsive_heights'][bp]) except KeyError: image_height = (None, None) if image_height[0]: # height was given in px size = (int(width), image_height[0]) elif image_height[1]: # height was given in % size = (int(width), int(round(width * aspect_ratio * image_height[1]))) elif bp in container_max_heights: container_height = parse_responsive_length(container_max_heights[bp]) if container_height[0]: size = (int(width), container_height[0]) elif container_height[1]: size = (int(width), int(round(width * aspect_ratio * container_height[1]))) try: zoom = int( instance.glossary['responsive_zoom'][bp].strip().rstrip('%') ) except (AttributeError, KeyError, ValueError): zoom = 0 max_zoom = max(max_zoom, zoom) if size is None: # as fallback, adopt height to current width size = (int(width), int(round(width * aspect_ratio))) try: media_queries = complete_glossary['media_queries'][bp][:] except KeyError: media_queries = [] media = ' and '.join(media_queries) elem = {'tag': 'source', 'size': size, 'zoom': zoom, 'crop': crop, 'upscale': upscale, 'subject_location': subject_location, 'media': media} if 'high_resolution' in resize_options: elem['size2'] = (size[0] * 2, size[1] * 2) elements.append(elem) # add a fallback image for old browsers which can't handle the <picture> element if image_height[1]: size = (int(max_width), int(round(max_width * aspect_ratio * image_height[1]))) else: size = (int(max_width), int(round(max_width * aspect_ratio))) elements.append({'tag': 'img', 'size': size, 'zoom': max_zoom, 'crop': crop, 'upscale': upscale, 'subject_location': subject_location}) return elements
mit
-605,115,729,172,687,700
45.422594
142
0.605588
false
lcpt/xc
verif/tests/elements/shell/test_shell_mitc4_11.py
1
3573
# -*- coding: utf-8 -*- ''' Taken from example 2-005 of the SAP 2000 verification manual.''' # The obtained error is near 1.8% it can be the aspect ratio # of the element. See comments on page EXAMPLE 2-005 - 7 # in the SAP 2000 manual. __author__= "Luis C. Pérez Tato (LCPT) and Ana Ortega (AOO)" __copyright__= "Copyright 2015, LCPT and AOO" __license__= "GPL" __version__= "3.0" __email__= "[email protected]" # feProblem.setVerbosityLevel(0) NumDivI= 32 NumDivJ= 32 CooMaxX= 10 CooMaxY= 2 E= 17472000 # Elastic modulus en lb/in2 nu= 0.3 # Poisson's ratio G= 6720000 thickness= 0.0001 # Cross section depth expressed in inches. unifLoad= 0.0001 # Uniform load in lb/in2. ptLoad= 0.0004 # Punctual load in lb. import xc_base import geom import xc from solution import predefined_solutions from model import predefined_spaces from materials import typical_materials # Problem type feProblem= xc.FEProblem() preprocessor= feProblem.getPreprocessor nodes= preprocessor.getNodeHandler modelSpace= predefined_spaces.StructuralMechanics3D(nodes) # Define materials elast= typical_materials.defElasticMaterial(preprocessor, "elast",E) nodes.newSeedNode() # Define materials nmb1= typical_materials.defElasticMembranePlateSection(preprocessor, "memb1",E,nu,0.0,thickness) seedElemHandler= preprocessor.getElementHandler.seedElemHandler seedElemHandler.defaultMaterial= "memb1" seedElemHandler.defaultTag= 1 elem= seedElemHandler.newElement("ShellMITC4",xc.ID([0,0,0,0])) points= preprocessor.getMultiBlockTopology.getPoints pt= points.newPntIDPos3d(1,geom.Pos3d(0.0,0.0,0.0)) pt= points.newPntIDPos3d(2,geom.Pos3d(CooMaxX,0.0,0.0)) pt= points.newPntIDPos3d(3,geom.Pos3d(CooMaxX,CooMaxY,0.0)) pt= points.newPntIDPos3d(4,geom.Pos3d(0.0,CooMaxY,0.0)) surfaces= preprocessor.getMultiBlockTopology.getSurfaces surfaces.defaultTag= 1 s= surfaces.newQuadSurfacePts(1,2,3,4) s.nDivI= NumDivI s.nDivJ= NumDivJ # Constraints f1= preprocessor.getSets.getSet("f1") f1.genMesh(xc.meshDir.I) sides= s.getEdges #Edge iterator for l in sides: for i in l.getEdge.getNodeTags(): modelSpace.fixNode000_000(i) # Loads definition loadHandler= preprocessor.getLoadHandler lPatterns= loadHandler.getLoadPatterns #Load modulation. ts= lPatterns.newTimeSeries("constant_ts","ts") lPatterns.currentTimeSeries= "ts" #Load case definition lp0= lPatterns.newLoadPattern("default","0") #lPatterns.currentLoadPattern= "0" f1= preprocessor.getSets.getSet("f1") nNodes= f1.getNumNodes node= f1.getNodeIJK(1,NumDivI/2+1,NumDivJ/2+1) # print "Central node: ", node.tag # print "Central node coordinates: ", node.getCoo lp0.newNodalLoad(node.tag,xc.Vector([0,0,-ptLoad,0,0,0])) # Concentrated load nElems= f1.getNumElements #We add the load case to domain. lPatterns.addToDomain("0") # Solution procedure analisis= predefined_solutions.simple_static_linear(feProblem) analOk= analisis.analyze(1) f1= preprocessor.getSets.getSet("f1") nodes= preprocessor.getNodeHandler node= f1.getNodeIJK(1,NumDivI/2+1,NumDivJ/2+1) # print "Central node: ", node.tag # print "Central node coordinates: ", node.getCoo # print "Central node displacements: ", node.getDisp UZ= node.getDisp[2] UZTeor= -7.25 ratio1= (abs((UZ-UZTeor)/UZTeor)) ratio2= (abs((nElems-1024)/1024)) ''' print "UZ= ",UZ print "Number of nodes: ",nNodes print "Number of elements: ",nElems print "ratio1: ",ratio1 ''' import os from miscUtils import LogMessages as lmsg fname= os.path.basename(__file__) if (abs(ratio1)<2e-2) & (abs(ratio2)<1e-9): print "test ",fname,": ok." else: lmsg.error(fname+' ERROR.')
gpl-3.0
-1,432,100,207,234,984,400
26.060606
96
0.755879
false
pdelsante/thug
thug/Analysis/virustotal/VirusTotal.py
1
3786
#!/usr/bin/env python # # VirusTotal.py # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307 USA import os import json import tempfile import logging import requests import six.moves.configparser as ConfigParser log = logging.getLogger("Thug") class VirusTotal(object): def __init__(self): self.enabled = True self.opts = dict() self.__init_config() def __init_config(self): conf_file = os.path.join(log.configuration_path, 'thug.conf') if not os.path.exists(conf_file): log.warning("[WARNING] VirusTotal disabled (no configuration file found)") self.enabled = False return config = ConfigParser.ConfigParser() config.read(conf_file) for option in config.options('virustotal'): self.opts[option] = config.get('virustotal', option) runtime_apikey = log.ThugOpts.get_vt_runtime_apikey() if runtime_apikey: self.opts['apikey'] = runtime_apikey if not self.opts.get('apikey', None): self.enabled = False def save_report(self, response_dict, basedir, sample): log_dir = os.path.join(basedir, 'analysis', 'virustotal') content = json.dumps(response_dict) log.ThugLogging.log_virustotal(log_dir, sample, content) positives = str(response_dict.get("positives", {})) total = str(response_dict.get("total", {})) log.warning("[VirusTotal] Sample %s analysis ratio: %s/%s", response_dict['md5'], positives, total) def get_report(self, report): params = { "resource": report, "allinfo" : 1, "apikey" : self.opts['apikey']} response = requests.get(self.opts["reporturl"], params = params) return response def query(self, sample, basedir): md5 = sample['md5'] response = self.get_report(md5) response_dict = response.json() response_code = response_dict.get(u"response_code") if response.ok: if response_code == 1: self.save_report(response_dict, basedir, sample) return True log.warning("[VirusTotal] %s", response_dict['verbose_msg']) return False def submit(self, data, sample): md5 = sample['md5'] fd, s = tempfile.mkstemp() with open(s, "wb") as fd: fd.write(data) params = {'apikey': self.opts['apikey']} files = {'file' : (md5, open(s, "rb"))} response = requests.post(self.opts["scanurl"], files = files, params = params) if response.ok: log.warning("[VirusTotal] Sample %s submitted", md5) os.remove(s) def analyze(self, data, sample, basedir): if not self.enabled: return if not self.opts['apikey']: return if sample.get('md5', None) and log.ThugOpts.vt_query and self.query(sample, basedir): return if log.ThugOpts.vt_submit: self.submit(data, sample)
gpl-2.0
2,766,726,902,379,941,000
30.084746
107
0.591125
false
dsalazarr/pfc_ii
pfc/config/urls.py
1
2260
from django.conf import settings from django.conf.urls import include, url from django.conf.urls.static import static from django.contrib import admin from django.views.generic import TemplateView from django.views import defaults as default_views from pfc.applications.views import ApplicationConfigurationView from pfc.dashboard import MyLoginForm from pfc.users.views import UserListRestView, UserPermissionsView, UserMe, LoginView admin.site.login_form = MyLoginForm urlpatterns = [ url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'), url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'), # Django Admin, use {% url 'admin:index' %} url(settings.ADMIN_URL, admin.site.urls), url('^jet/', include('jet.urls', 'jet')), url('^jet/dashboard/', include('jet.dashboard.urls', 'jet-dashboard')), # User management url(r'^users/', include('pfc.users.urls', namespace='users')), url(r'^accounts/login/', LoginView.as_view()), url(r'^accounts/', include('allauth.urls')), # OAuth2 url(r'oauth2/', include('oauth2_provider.urls', namespace='oauth2_provider')), # Your stuff: custom urls includes go here url(r'configuration/', ApplicationConfigurationView.as_view()), url(r'rest-users/', UserListRestView.as_view()), url(r'user-permissions/', UserPermissionsView.as_view()), url(r'users-me/', UserMe.as_view()) ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) if settings.DEBUG: # This allows the error pages to be debugged during development, just visit # these url in browser to see how these error pages look like. urlpatterns += [ url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}), url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}), url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}), url(r'^500/$', default_views.server_error), ] if 'debug_toolbar' in settings.INSTALLED_APPS: import debug_toolbar urlpatterns = [ url(r'^__debug__/', include(debug_toolbar.urls)), ] + urlpatterns
gpl-3.0
-2,400,147,867,395,149,300
42.461538
110
0.690265
false
rhinstaller/libblockdev
tests/kbd_test.py
1
22929
import unittest import os import re import time from contextlib import contextmanager from distutils.version import LooseVersion from distutils.spawn import find_executable from utils import create_sparse_tempfile, create_lio_device, delete_lio_device, wipe_all, fake_path, read_file, TestTags, tag_test from bytesize import bytesize import overrides_hack from gi.repository import BlockDev, GLib def _can_load_zram(): """Test if we can load the zram module""" if os.system("lsmod|grep zram >/dev/null") != 0: # not loaded return True elif os.system("rmmod zram") == 0: # successfully unloaded return True else: # loaded and failed to unload return False @contextmanager def _track_module_load(test_case, mod_name, loaded_attr): setattr(test_case, loaded_attr, os.system("lsmod|grep %s > /dev/null" % mod_name) == 0) try: yield finally: setattr(test_case, loaded_attr, os.system("lsmod|grep %s > /dev/null" % mod_name) == 0) def _wait_for_bcache_setup(bcache_dev): i = 0 cache_dir = "/sys/block/%s/bcache/cache" % bcache_dev while not os.access(cache_dir, os.R_OK): time.sleep(1) i += 1 if i >= 30: print("WARNING: Giving up waiting for bcache setup!!!") break class KbdZRAMTestCase(unittest.TestCase): requested_plugins = BlockDev.plugin_specs_from_names(("kbd", "swap")) @classmethod def setUpClass(cls): if not BlockDev.is_initialized(): BlockDev.init(cls.requested_plugins, None) else: BlockDev.reinit(cls.requested_plugins, True, None) def setUp(self): self.addCleanup(self._clean_up) self._loaded_zram_module = False def _clean_up(self): # make sure we unload the module if we loaded it if self._loaded_zram_module: os.system("rmmod zram") class KbdZRAMDevicesTestCase(KbdZRAMTestCase): @unittest.skipUnless(_can_load_zram(), "cannot load the 'zram' module") @tag_test(TestTags.SLOW) def test_create_destroy_devices(self): # the easiest case with _track_module_load(self, "zram", "_loaded_zram_module"): self.assertTrue(BlockDev.kbd_zram_create_devices(2, [10 * 1024**2, 10 * 1024**2], [1, 2])) time.sleep(1) self.assertTrue(BlockDev.kbd_zram_destroy_devices()) time.sleep(1) # no nstreams specified with _track_module_load(self, "zram", "_loaded_zram_module"): self.assertTrue(BlockDev.kbd_zram_create_devices(2, [10 * 1024**2, 10 * 1024**2], None)) time.sleep(1) self.assertTrue(BlockDev.kbd_zram_destroy_devices()) time.sleep(1) # with module pre-loaded, but unsed self.assertEqual(os.system("modprobe zram num_devices=2"), 0) time.sleep(1) with _track_module_load(self, "zram", "_loaded_zram_module"): self.assertTrue(BlockDev.kbd_zram_create_devices(2, [10 * 1024**2, 10 * 1024**2], [1, 1])) time.sleep(1) self.assertTrue(BlockDev.kbd_zram_destroy_devices()) time.sleep(1) # with module pre-loaded, and devices used (as active swaps) self.assertEqual(os.system("modprobe zram num_devices=2"), 0) self.assertEqual(os.system("echo 10M > /sys/class/block/zram0/disksize"), 0) self.assertEqual(os.system("echo 10M > /sys/class/block/zram1/disksize"), 0) time.sleep(1) for zram_dev in ("/dev/zram0", "/dev/zram1"): self.assertTrue(BlockDev.swap_mkswap(zram_dev, None)) self.assertTrue(BlockDev.swap_swapon(zram_dev, -1)) with _track_module_load(self, "zram", "_loaded_zram_module"): with self.assertRaises(GLib.GError): self.assertTrue(BlockDev.kbd_zram_create_devices(2, [10 * 1024**2, 10 * 1024**2], [1, 1])) for zram_dev in ("/dev/zram0", "/dev/zram1"): self.assertTrue(BlockDev.swap_swapoff(zram_dev)) self.assertEqual(os.system("rmmod zram"), 0) # should work just fine now self.assertTrue(BlockDev.kbd_zram_create_devices(2, [10 * 1024**2, 10 * 1024**2], [1, 1])) time.sleep(1) self.assertTrue(BlockDev.kbd_zram_destroy_devices()) time.sleep(1) @unittest.skipUnless(_can_load_zram(), "cannot load the 'zram' module") @tag_test(TestTags.SLOW) def test_zram_add_remove_device(self): """Verify that it is possible to add and remove a zram device""" # the easiest case with _track_module_load(self, "zram", "_loaded_zram_module"): succ, device = BlockDev.kbd_zram_add_device (10 * 1024**2, 4) self.assertTrue(succ) self.assertTrue(device.startswith("/dev/zram")) time.sleep(5) self.assertTrue(BlockDev.kbd_zram_remove_device(device)) # no nstreams specified with _track_module_load(self, "zram", "_loaded_zram_module"): succ, device = BlockDev.kbd_zram_add_device (10 * 1024**2, 0) self.assertTrue(succ) self.assertTrue(device.startswith("/dev/zram")) time.sleep(5) self.assertTrue(BlockDev.kbd_zram_remove_device(device)) # create two devices with _track_module_load(self, "zram", "_loaded_zram_module"): succ, device = BlockDev.kbd_zram_add_device (10 * 1024**2, 4) self.assertTrue(succ) self.assertTrue(device.startswith("/dev/zram")) succ, device2 = BlockDev.kbd_zram_add_device (10 * 1024**2, 4) self.assertTrue(succ) self.assertTrue(device2.startswith("/dev/zram")) time.sleep(5) self.assertTrue(BlockDev.kbd_zram_remove_device(device)) self.assertTrue(BlockDev.kbd_zram_remove_device(device2)) # mixture of multiple devices and a single device with _track_module_load(self, "zram", "_loaded_zram_module"): self.assertTrue(BlockDev.kbd_zram_create_devices(2, [10 * 1024**2, 10 * 1024**2], [1, 2])) time.sleep(5) succ, device = BlockDev.kbd_zram_add_device (10 * 1024**2, 4) self.assertTrue(succ) self.assertTrue(device.startswith("/dev/zram")) time.sleep(5) self.assertTrue(BlockDev.kbd_zram_destroy_devices()) time.sleep(5) class KbdZRAMStatsTestCase(KbdZRAMTestCase): @unittest.skipUnless(_can_load_zram(), "cannot load the 'zram' module") def test_zram_get_stats(self): """Verify that it is possible to get stats for a zram device""" # location of some sysfs files we use is different since linux 4.11 kernel_version = os.uname()[2] if LooseVersion(kernel_version) >= LooseVersion("4.11"): self._zram_get_stats_new() else: self._zram_get_stats_old() def _zram_get_stats_new(self): with _track_module_load(self, "zram", "_loaded_zram_module"): self.assertTrue(BlockDev.kbd_zram_create_devices(1, [10 * 1024**2], [2])) time.sleep(1) # XXX: this needs to get more complex/serious stats = BlockDev.kbd_zram_get_stats("zram0") self.assertTrue(stats) # /dev/zram0 should work too stats = BlockDev.kbd_zram_get_stats("/dev/zram0") self.assertTrue(stats) self.assertEqual(stats.disksize, 10 * 1024**2) # XXX: 'max_comp_streams' is currently broken on rawhide # https://bugzilla.redhat.com/show_bug.cgi?id=1352567 # self.assertEqual(stats.max_comp_streams, 2) self.assertTrue(stats.comp_algorithm) # read 'num_reads' and 'num_writes' from '/sys/block/zram0/stat' sys_stats = read_file("/sys/block/zram0/stat").strip().split() self.assertGreaterEqual(len(sys_stats), 11) # 15 stats since 4.19 num_reads = int(sys_stats[0]) num_writes = int(sys_stats[4]) self.assertEqual(stats.num_reads, num_reads) self.assertEqual(stats.num_writes, num_writes) # read 'orig_data_size', 'compr_data_size', 'mem_used_total' and # 'zero_pages' from '/sys/block/zram0/mm_stat' sys_stats = read_file("/sys/block/zram0/mm_stat").strip().split() self.assertGreaterEqual(len(sys_stats), 7) # since 4.18 we have 8 stats orig_data_size = int(sys_stats[0]) compr_data_size = int(sys_stats[1]) mem_used_total = int(sys_stats[2]) zero_pages = int(sys_stats[5]) self.assertEqual(stats.orig_data_size, orig_data_size) self.assertEqual(stats.compr_data_size, compr_data_size) self.assertEqual(stats.mem_used_total, mem_used_total) self.assertEqual(stats.zero_pages, zero_pages) # read 'invalid_io' and 'num_writes' from '/sys/block/zram0/io_stat' sys_stats = read_file("/sys/block/zram0/io_stat").strip().split() self.assertEqual(len(sys_stats), 4) invalid_io = int(sys_stats[2]) self.assertEqual(stats.invalid_io, invalid_io) with _track_module_load(self, "zram", "_loaded_zram_module"): self.assertTrue(BlockDev.kbd_zram_destroy_devices()) def _zram_get_stats_old(self): with _track_module_load(self, "zram", "_loaded_zram_module"): self.assertTrue(BlockDev.kbd_zram_create_devices(1, [10 * 1024**2], [2])) time.sleep(1) stats = BlockDev.kbd_zram_get_stats("zram0") self.assertTrue(stats) # /dev/zram0 should work too stats = BlockDev.kbd_zram_get_stats("/dev/zram0") self.assertTrue(stats) self.assertEqual(stats.disksize, 10 * 1024**2) self.assertEqual(stats.max_comp_streams, 2) self.assertTrue(stats.comp_algorithm) num_reads = int(read_file("/sys/block/zram0/num_reads").strip()) self.assertEqual(stats.num_reads, num_reads) num_writes = int(read_file("/sys/block/zram0/num_writes").strip()) self.assertEqual(stats.num_writes, num_writes) orig_data_size = int(read_file("/sys/block/zram0/orig_data_size").strip()) self.assertEqual(stats.orig_data_size, orig_data_size) compr_data_size = int(read_file("/sys/block/zram0/compr_data_size").strip()) self.assertEqual(stats.compr_data_size, compr_data_size) mem_used_total = int(read_file("/sys/block/zram0/mem_used_total").strip()) self.assertEqual(stats.mem_used_total, mem_used_total) zero_pages = int(read_file("/sys/block/zram0/zero_pages").strip()) self.assertEqual(stats.zero_pages, zero_pages) invalid_io = int(read_file("/sys/block/zram0/invalid_io").strip()) self.assertEqual(stats.invalid_io, invalid_io) with _track_module_load(self, "zram", "_loaded_zram_module"): self.assertTrue(BlockDev.kbd_zram_destroy_devices()) class KbdBcacheNodevTestCase(unittest.TestCase): # no setUp/tearDown methods needed requested_plugins = BlockDev.plugin_specs_from_names(("kbd", "swap")) @classmethod def setUpClass(cls): if not find_executable("make-bcache"): raise unittest.SkipTest("make-bcache executable not found in $PATH, skipping.") if not BlockDev.is_initialized(): BlockDev.init(cls.requested_plugins, None) else: BlockDev.reinit(cls.requested_plugins, True, None) @tag_test(TestTags.NOSTORAGE) def test_bcache_mode_str_bijection(self): """Verify that it's possible to transform between cache modes and their string representations""" mode_mapping = ((BlockDev.KBDBcacheMode.WRITETHROUGH, "writethrough"), (BlockDev.KBDBcacheMode.WRITEBACK, "writeback"), (BlockDev.KBDBcacheMode.WRITEAROUND, "writearound"), (BlockDev.KBDBcacheMode.NONE, "none"), (BlockDev.KBDBcacheMode.UNKNOWN, "unknown"), ) for (mode, mode_str) in mode_mapping: self.assertEqual(mode, BlockDev.kbd_bcache_get_mode_from_str(mode_str)) self.assertEqual(mode_str, BlockDev.kbd_bcache_get_mode_str(mode)) self.assertEqual(mode_str, BlockDev.kbd_bcache_get_mode_str(BlockDev.kbd_bcache_get_mode_from_str(mode_str))) self.assertEqual(mode, BlockDev.kbd_bcache_get_mode_from_str(BlockDev.kbd_bcache_get_mode_str(mode))) class KbdBcacheTestCase(unittest.TestCase): requested_plugins = BlockDev.plugin_specs_from_names(("kbd", "swap")) @classmethod def setUpClass(cls): if not find_executable("make-bcache"): raise unittest.SkipTest("make-bcache executable not found in $PATH, skipping.") if not BlockDev.is_initialized(): BlockDev.init(cls.requested_plugins, None) else: BlockDev.reinit(cls.requested_plugins, True, None) def setUp(self): self.addCleanup(self._clean_up) self.dev_file = create_sparse_tempfile("lvm_test", 10 * 1024**3) self.dev_file2 = create_sparse_tempfile("lvm_test", 10 * 1024**3) try: self.loop_dev = create_lio_device(self.dev_file) except RuntimeError as e: raise RuntimeError("Failed to setup loop device for testing: %s" % e) try: self.loop_dev2 = create_lio_device(self.dev_file2) except RuntimeError as e: raise RuntimeError("Failed to setup loop device for testing: %s" % e) self.bcache_dev = None def _clean_up(self): if self.bcache_dev: try: BlockDev.kbd_bcache_destroy(self.bcache_dev) except: pass try: delete_lio_device(self.loop_dev) except RuntimeError: # just move on, we can do no better here pass os.unlink(self.dev_file) try: delete_lio_device(self.loop_dev2) except RuntimeError: # just move on, we can do no better here pass os.unlink(self.dev_file2) class KbdTestBcacheCreate(KbdBcacheTestCase): @tag_test(TestTags.UNSTABLE) def test_bcache_create_destroy(self): """Verify that it's possible to create and destroy a bcache device""" succ, dev = BlockDev.kbd_bcache_create(self.loop_dev, self.loop_dev2, None) self.assertTrue(succ) self.assertTrue(dev) self.bcache_dev = dev _wait_for_bcache_setup(dev) succ = BlockDev.kbd_bcache_destroy(self.bcache_dev) self.assertTrue(succ) self.bcache_dev = None time.sleep(5) wipe_all(self.loop_dev, self.loop_dev2) @tag_test(TestTags.UNSTABLE) def test_bcache_create_destroy_full_path(self): """Verify that it's possible to create and destroy a bcache device with full device path""" succ, dev = BlockDev.kbd_bcache_create(self.loop_dev, self.loop_dev2, None) self.assertTrue(succ) self.assertTrue(dev) self.bcache_dev = dev _wait_for_bcache_setup(dev) succ = BlockDev.kbd_bcache_destroy("/dev/" + self.bcache_dev) self.assertTrue(succ) self.bcache_dev = None time.sleep(5) wipe_all(self.loop_dev, self.loop_dev2) class KbdTestBcacheAttachDetach(KbdBcacheTestCase): @tag_test(TestTags.UNSTABLE) def test_bcache_attach_detach(self): """Verify that it's possible to detach/attach a cache from/to a bcache device""" succ, dev = BlockDev.kbd_bcache_create(self.loop_dev, self.loop_dev2, None) self.assertTrue(succ) self.assertTrue(dev) self.bcache_dev = dev _wait_for_bcache_setup(dev) succ, c_set_uuid = BlockDev.kbd_bcache_detach(self.bcache_dev) self.assertTrue(succ) self.assertTrue(c_set_uuid) succ = BlockDev.kbd_bcache_attach(c_set_uuid, self.bcache_dev) self.assertTrue(succ) succ = BlockDev.kbd_bcache_destroy(self.bcache_dev) self.assertTrue(succ) self.bcache_dev = None time.sleep(1) wipe_all(self.loop_dev, self.loop_dev2) @tag_test(TestTags.UNSTABLE) def test_bcache_attach_detach_full_path(self): """Verify that it's possible to detach/attach a cache from/to a bcache device with full device path""" succ, dev = BlockDev.kbd_bcache_create(self.loop_dev, self.loop_dev2, None) self.assertTrue(succ) self.assertTrue(dev) self.bcache_dev = dev _wait_for_bcache_setup(dev) succ, c_set_uuid = BlockDev.kbd_bcache_detach("/dev/" + self.bcache_dev) self.assertTrue(succ) self.assertTrue(c_set_uuid) succ = BlockDev.kbd_bcache_attach(c_set_uuid, "/dev/" + self.bcache_dev) self.assertTrue(succ) succ = BlockDev.kbd_bcache_destroy(self.bcache_dev) self.assertTrue(succ) self.bcache_dev = None time.sleep(1) wipe_all(self.loop_dev, self.loop_dev2) @tag_test(TestTags.UNSTABLE) def test_bcache_detach_destroy(self): """Verify that it's possible to destroy a bcache device with no cache attached""" succ, dev = BlockDev.kbd_bcache_create(self.loop_dev, self.loop_dev2, None) self.assertTrue(succ) self.assertTrue(dev) self.bcache_dev = dev _wait_for_bcache_setup(dev) succ, c_set_uuid = BlockDev.kbd_bcache_detach(self.bcache_dev) self.assertTrue(succ) self.assertTrue(c_set_uuid) succ = BlockDev.kbd_bcache_destroy(self.bcache_dev) self.assertTrue(succ) self.bcache_dev = None time.sleep(1) wipe_all(self.loop_dev, self.loop_dev2) class KbdTestBcacheGetSetMode(KbdBcacheTestCase): @tag_test(TestTags.UNSTABLE) def test_bcache_get_set_mode(self): """Verify that it is possible to get and set Bcache mode""" succ, dev = BlockDev.kbd_bcache_create(self.loop_dev, self.loop_dev2, None) self.assertTrue(succ) self.assertTrue(dev) self.bcache_dev = dev _wait_for_bcache_setup(dev) mode = BlockDev.kbd_bcache_get_mode(self.bcache_dev) self.assertNotEqual(mode, BlockDev.KBDBcacheMode.UNKNOWN) for mode_str in ("writethrough", "writeback", "writearound", "none"): mode = BlockDev.kbd_bcache_get_mode_from_str(mode_str) succ = BlockDev.kbd_bcache_set_mode(self.bcache_dev, mode) self.assertTrue(succ) new_mode = BlockDev.kbd_bcache_get_mode(self.bcache_dev) self.assertEqual(mode, new_mode) self.assertEqual(mode_str, BlockDev.kbd_bcache_get_mode_str(new_mode)) mode_str = "unknown" mode = BlockDev.kbd_bcache_get_mode_from_str(mode_str) with self.assertRaises(GLib.GError): # cannot set mode to "uknown" BlockDev.kbd_bcache_set_mode(self.bcache_dev, mode) mode_str = "bla" with self.assertRaises(GLib.GError): mode = BlockDev.kbd_bcache_get_mode_from_str(mode_str) # set back to some caching mode mode_str = "writethrough" mode = BlockDev.kbd_bcache_get_mode_from_str(mode_str) succ = BlockDev.kbd_bcache_set_mode(self.bcache_dev, mode) self.assertTrue(succ) _wait_for_bcache_setup(dev) succ = BlockDev.kbd_bcache_destroy(self.bcache_dev) self.assertTrue(succ) self.bcache_dev = None time.sleep(1) wipe_all(self.loop_dev, self.loop_dev2) class KbdTestBcacheStatusTest(KbdBcacheTestCase): def _get_size(self, bcache_name): cache_dir = '/sys/block/%s/bcache/cache' % bcache_name # sum sizes from all caches caches = ['%s/%s' % (cache_dir, d) for d in os.listdir(cache_dir) if re.match('cache[0-9]*$', d)] return sum(int(read_file(os.path.realpath(c) + '/../size')) for c in caches) @tag_test(TestTags.UNSTABLE) def test_bcache_status(self): succ, dev = BlockDev.kbd_bcache_create(self.loop_dev, self.loop_dev2, None) self.assertTrue(succ) self.assertTrue(dev) self.bcache_dev = dev _wait_for_bcache_setup(dev) # should work with both "bcacheX" and "/dev/bcacheX" status = BlockDev.kbd_bcache_status(self.bcache_dev) self.assertTrue(status) status = BlockDev.kbd_bcache_status("/dev/" + self.bcache_dev) self.assertTrue(status) # check some basic values self.assertTrue(status.state) sys_state = read_file("/sys/block/%s/bcache/state" % self.bcache_dev).strip() self.assertEqual(status.state, sys_state) sys_block = read_file("/sys/block/%s/bcache/cache/block_size" % self.bcache_dev).strip() self.assertEqual(status.block_size, int(bytesize.Size(sys_block))) sys_size = self._get_size(self.bcache_dev) self.assertGreater(status.cache_size, sys_size) succ = BlockDev.kbd_bcache_destroy(self.bcache_dev) self.assertTrue(succ) self.bcache_dev = None time.sleep(1) wipe_all(self.loop_dev, self.loop_dev2) class KbdTestBcacheBackingCacheDevTest(KbdBcacheTestCase): @tag_test(TestTags.UNSTABLE) def test_bcache_backing_cache_dev(self): """Verify that is is possible to get the backing and cache devices for a Bcache""" succ, dev = BlockDev.kbd_bcache_create(self.loop_dev, self.loop_dev2, None) self.assertTrue(succ) self.assertTrue(dev) self.bcache_dev = dev _wait_for_bcache_setup(dev) self.assertEqual("/dev/" + BlockDev.kbd_bcache_get_backing_device(self.bcache_dev), self.loop_dev) self.assertEqual("/dev/" + BlockDev.kbd_bcache_get_cache_device(self.bcache_dev), self.loop_dev2) succ = BlockDev.kbd_bcache_destroy(self.bcache_dev) self.assertTrue(succ) self.bcache_dev = None time.sleep(1) wipe_all(self.loop_dev, self.loop_dev2) class KbdUnloadTest(KbdBcacheTestCase): def setUp(self): # make sure the library is initialized with all plugins loaded for other # tests self.addCleanup(BlockDev.reinit, self.requested_plugins, True, None) @tag_test(TestTags.NOSTORAGE) def test_check_no_bcache_progs(self): """Verify that checking the availability of make-bcache works as expected""" # unload all plugins first self.assertTrue(BlockDev.reinit([], True, None)) with fake_path(all_but="make-bcache"): with self.assertRaises(GLib.GError): BlockDev.reinit(self.requested_plugins, True, None) self.assertNotIn("kbd", BlockDev.get_available_plugin_names()) # load the plugins back self.assertTrue(BlockDev.reinit(self.requested_plugins, True, None)) self.assertIn("kbd", BlockDev.get_available_plugin_names())
lgpl-2.1
7,868,184,235,334,977,000
38.601036
130
0.627066
false
vidartf/hyperspyUI
hyperspyui/uiprogressbar.py
1
10235
# -*- coding: utf-8 -*- # Copyright 2014-2016 The HyperSpyUI developers # # This file is part of HyperSpyUI. # # HyperSpyUI is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpyUI is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpyUI. If not, see <http://www.gnu.org/licenses/>. """ Created on Wed Nov 26 19:11:19 2014 @author: Vidar Tonaas Fauske """ from __future__ import division, absolute_import # future division is important to divide integers and get as # a result precise floating numbers (instead of truncated int) # import compatibility functions and utilities import sys from time import time from QtCore import QObject, Signal, SIGNAL import hyperspy.external.progressbar from tqdm import tqdm from hyperspyui.exceptions import ProcessCanceled # Create signal object which will handle all events signaler = QObject() signaler.created = Signal(object) signaler.progress = Signal((object, int), (object, int, str)) signaler.finished = Signal(int) signaler.cancel = Signal(int) # This is necessary as it bugs out if not (it's a daisy chained event) def _on_cancel(pid): signaler.emit(SIGNAL('cancel(int)'), pid) signaler.on_cancel = _on_cancel # Hook function def _wrap(*args, **kwargs): """ Replacement function for hyperspy.external.progressbar.progressbar(). Causes a UIProgressBar() to be made, which the MainWindow can connect to in order to create a progress indicator. It is important that the connection is made with QtCore.Signals, as they are thread aware, and the signal is processed on the GUI main event loop, i.e. the main thread. This is necessary as all UI operations have to happen on the main thread, and the hyperspy processing might be pushed to a worker thread "threaded.py". """ return UIProgressBar(*args, **kwargs) # Override hyperspy prgoressbar implementation orig = hyperspy.external.progressbar.progressbar def takeover_progressbar(): """ Replace hyperspy.external.progressbar.progressbar() with uiprogressbar.wrap(). The main_window will be connected to all the events whenever a progressbar is created. """ hyperspy.external.progressbar.progressbar = _wrap def reset_progressbar(): hyperspy.external.progressbar.progressbar = orig class UIProgressBar(tqdm): """ Connector between hyperspy process with a progressbar, and the UI. See also the doc for wrap() for more details. """ uid = 1 @classmethod def write(cls, s, file=sys.stdout, end="\n"): """ Print a message via tqdm_gui (just an alias for print) """ # TODO: print text on GUI? file.write(s) file.write(end) def __init__(self, *args, **kwargs): self.id = self.uid self.uid += 1 kwargs['gui'] = True self.cancelled = False super().__init__(*args, **kwargs) # Initialize the GUI display if self.disable or not kwargs['gui']: return self.mininterval = max(self.mininterval, 0.5) # assert maxval >= 0 # self.maxval = maxval self.signal_set = False global signaler signaler.connect(signaler, SIGNAL('cancel(int)'), self.cancel) self.currval = 0 self.finished = False self.start_time = None self.seconds_elapsed = 0 signaler.emit(SIGNAL('created(int, int, QString)'), self.id, self.total, "") def cancel(self, pid): """ Slot for the UI to call if it wants to cancel the process. Thread safe. """ if pid == self.id: self.cancelled = True @staticmethod def format_string(n, total, elapsed, rate=None): return "ETA: " + (tqdm.format_interval((total - n) / rate) if rate else '?') def __iter__(self): iterable = self.iterable if self.disable: for obj in iterable: if self.cancelled is True: raise ProcessCanceled("User cancelled operation") yield obj return # ncols = self.ncols mininterval = self.mininterval maxinterval = self.maxinterval miniters = self.miniters dynamic_miniters = self.dynamic_miniters start_t = self.start_t last_print_t = self.last_print_t last_print_n = self.last_print_n n = self.n # dynamic_ncols = self.dynamic_ncols smoothing = self.smoothing avg_time = self.avg_time for obj in iterable: if self.cancelled is True: raise ProcessCanceled("User cancelled operation") yield obj # Update and print the progressbar. # Note: does not call self.update(1) for speed optimisation. n += 1 delta_it = n - last_print_n # check the counter first (avoid calls to time()) if delta_it >= miniters: cur_t = time() delta_t = cur_t - last_print_t if delta_t >= mininterval: elapsed = cur_t - start_t # EMA (not just overall average) if smoothing and delta_t: avg_time = delta_t / delta_it \ if avg_time is None \ else smoothing * delta_t / delta_it + \ (1 - smoothing) * avg_time txt = self.format_string( n, self.total, elapsed, 1 / avg_time if avg_time else None) global signaler signaler.emit(SIGNAL('progress(int, int, QString)'), self.id, n, txt) # If no `miniters` was specified, adjust automatically # to the maximum iteration rate seen so far. if dynamic_miniters: if maxinterval and delta_t > maxinterval: # Set miniters to correspond to maxinterval miniters = delta_it * maxinterval / delta_t elif mininterval and delta_t: # EMA-weight miniters to converge # towards the timeframe of mininterval miniters = smoothing * delta_it * mininterval \ / delta_t + (1 - smoothing) * miniters else: miniters = smoothing * delta_it + \ (1 - smoothing) * miniters # Store old values for next call last_print_n = n last_print_t = cur_t # Closing the progress bar. # Update some internal variables for close(). self.last_print_n = last_print_n self.n = n self.close() def update(self, n=1): """ Updates the progress bar to a new value. Called by the hyperspy side. Not safe to call from UI. """ if self.disable: return if self.cancelled is True: raise ProcessCanceled("User cancelled operation") if n < 0: n = 1 self.n += n delta_it = self.n - self.last_print_n # should be n? if delta_it >= self.miniters: # We check the counter first, to reduce the overhead of time() cur_t = time() delta_t = cur_t - self.last_print_t if delta_t >= self.mininterval: elapsed = cur_t - self.start_t # EMA (not just overall average) if self.smoothing and delta_t: self.avg_time = delta_t / delta_it \ if self.avg_time is None \ else self.smoothing * delta_t / delta_it + \ (1 - self.smoothing) * self.avg_time txt = self.format_string( self.n, self.total, elapsed, 1 / self.avg_time if self.avg_time else None) global signaler signaler.emit(SIGNAL('progress(int, int, QString)'), self.id, self.n, txt) # If no `miniters` was specified, adjust automatically to the # maximum iteration rate seen so far. # e.g.: After running `tqdm.update(5)`, subsequent # calls to `tqdm.update()` will only cause an update after # at least 5 more iterations. if self.dynamic_miniters: if self.maxinterval and delta_t > self.maxinterval: self.miniters = self.miniters * self.maxinterval \ / delta_t elif self.mininterval and delta_t: self.miniters = self.smoothing * delta_it \ * self.mininterval / delta_t + \ (1 - self.smoothing) * self.miniters else: self.miniters = self.smoothing * delta_it + \ (1 - self.smoothing) * self.miniters # Store old values for next call self.last_print_n = self.n self.last_print_t = cur_t def close(self): if self.disable: return self.disable = True self.finish() self._instances.remove(self) def finish(self): """ Used to tell the progress is finished. Called by hyperspy side. """ global signaler signaler.emit(SIGNAL('finished(int)'), self.id)
gpl-3.0
-3,904,834,961,505,757,700
34.538194
82
0.557792
false
google-research/tensor2robot
layers/resnet_test.py
1
3049
# coding=utf-8 # Copyright 2021 The Tensor2Robot Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as python3 """Tests for tensor2robot.layers.resnet.""" import functools from absl.testing import parameterized from six.moves import range from tensor2robot.layers import resnet import tensorflow.compat.v1 as tf class ResnetTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters(('',), ('fubar',), ('dummy/scope')) def test_intermediate_values(self, scope): with tf.variable_scope(scope): image = tf.zeros((2, 224, 224, 3), dtype=tf.float32) end_points = resnet.resnet_model(image, is_training=True, num_classes=1001, return_intermediate_values=True) tensors = ['initial_conv', 'initial_max_pool', 'pre_final_pool', 'final_reduce_mean', 'final_dense'] tensors += [ 'block_layer{}'.format(i + 1) for i in range(4)] self.assertEqual(set(tensors), set(end_points.keys())) @parameterized.parameters( (18, [True, True, True, True]), (50, [True, False, True, False])) def test_film(self, resnet_size, enabled_blocks): image = tf.zeros((2, 224, 224, 3), dtype=tf.float32) embedding = tf.zeros((2, 100), dtype=tf.float32) film_generator_fn = functools.partial( resnet.linear_film_generator, enabled_block_layers=enabled_blocks) _ = resnet.resnet_model(image, is_training=True, num_classes=1001, resnet_size=resnet_size, return_intermediate_values=True, film_generator_fn=film_generator_fn, film_generator_input=embedding) def test_malformed_film_raises(self): image = tf.zeros((2, 224, 224, 3), dtype=tf.float32) embedding = tf.zeros((2, 100), dtype=tf.float32) film_generator_fn = functools.partial( resnet.linear_film_generator, enabled_block_layers=[True]*5) with self.assertRaises(ValueError): _ = resnet.resnet_model(image, is_training=True, num_classes=1001, resnet_size=18, return_intermediate_values=True, film_generator_fn=film_generator_fn, film_generator_input=embedding) if __name__ == '__main__': tf.test.main()
apache-2.0
-7,691,718,284,358,885,000
40.767123
74
0.602493
false
jaryn/vomit
actions.py
1
14309
#!/usr/bin/python # Copyright (C) 2015 Jaroslav Henner # # This file is part of pyvmomi ansible module. # # pyvmomi_ansible module is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License. # # pyvmomi ansible module is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pyvmomi ansible more. If not, see <http://www.gnu.org/licenses/>. from abc import abstractmethod from contextlib import contextmanager import logging from time import sleep import uuid from pyVim import connect from pyVmomi import vim from pyVmomi import vmodl LOG = logging.getLogger(__name__) def wait_for_tasks(service_instance, tasks): """Given the service instance si and tasks, it returns after all the tasks are complete """ property_collector = service_instance.content.propertyCollector task_list = [str(task) for task in tasks] # Create filter obj_specs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task) for task in tasks] property_spec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task, pathSet=[], all=True) filter_spec = vmodl.query.PropertyCollector.FilterSpec() filter_spec.objectSet = obj_specs filter_spec.propSet = [property_spec] pcfilter = property_collector.CreateFilter(filter_spec, True) try: version, state = None, None # Loop looking for updates till the state moves to a completed state. while len(task_list): update = property_collector.WaitForUpdates(version) for filter_set in update.filterSet: for obj_set in filter_set.objectSet: task = obj_set.obj for change in obj_set.changeSet: if change.name == 'info': state = change.val.state elif change.name == 'info.state': state = change.val else: continue if not str(task) in task_list: continue if state == vim.TaskInfo.State.success: # Remove task from taskList task_list.remove(str(task)) elif state == vim.TaskInfo.State.error: raise task.info.error # Move to next version version = update.version finally: if pcfilter: pcfilter.Destroy() @contextmanager def disconnecting(connection): try: yield connection finally: connect.Disconnect(connection) class NotFound(Exception): pass class Action(object): def __init__(self, si): self.si = si self.tasks = [] def _find_obj(self, path): obj = self.si.content.searchIndex.FindByInventoryPath(path) if not obj: raise NotFound(str(path)) return obj @abstractmethod def start(self): LOG.info("The action %s started.", self) return self def wait(self): if self.tasks: wait_for_tasks(self.si, self.tasks) LOG.info("The action %s have finished all the tasks.", self) def make_so(self): self.start().wait() class CreateCluster(Action): def name(self, name): self.name = name return self def host_folder(self, path): self.host_folder = self._find_obj(path) return self def start(self): Action.start(self) self.host_folder.CreateCluster(self.name, vim.cluster.ConfigSpec()) return self class CloneVm(Action): def name(self, name): self.name_ = name return self def vm_folder_path(self, path): self.folder = self._find_obj(path) return self def source_path(self, path): self.source = self._find_obj(path) return self def resource_pool_path(self, path): self.resource_pool = self._find_obj(path) return self def to_template(self, to_template): self.to_template = to_template return self def _mac(self, mac): if mac: self.mac_ = str(mac) return self def _memory(self, memoryMB): if memoryMB: self.memoryMB = int(memoryMB) return self def start(self): Action.start(self) cs = vim.vm.ConfigSpec(deviceChange=[]) mac = getattr(self, 'mac_', None) memoryMB = getattr(self, 'memoryMB', None) if mac: nics = [vm_device for vm_device in self.source.config.hardware.device if isinstance(vm_device, vim.vm.device.VirtualEthernetCard)] LOG.debug('Found ethernet devices %s', nics) device = nics[0] nicspec = vim.vm.device.VirtualDeviceSpec() nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit nicspec.device = device device.addressType = "manual" device.macAddress = str(self.mac_) cs.deviceChange.append(nicspec) if memoryMB: cs.memoryMB = memoryMB clone_spec = vim.vm.CloneSpec( location=vim.vm.RelocateSpec(pool=self.resource_pool), template=self.to_template, config=cs) self.tasks.append(self.source.Clone( self.folder, self.name_, clone_spec)) return self class PowerOnVm(Action): def vm_path(self, path): self.vm = self._find_obj(path) return self def source_path(self, path): self.source = self._find_obj(path) return self def resource_pool_path(self, path): self.resource_pool = self._find_obj(path) return self def to_template(self, to_template): self.to_template = to_template return self def start(self): Action.start(self) self.tasks.append(self.vm.PowerOn()) return self class PowerOffVm(PowerOnVm): def start(self): Action.start(self) self.tasks.append(self.vm.PowerOff()) return self class CreateVm(Action): def __init__(self, si): Action.__init__(self, si) self.spec = vim.vm.ConfigSpec() self.spec.memoryMB = 512 self.spec.cpuHotAddEnabled = True self.spec.deviceChange = [] self.spec.guestId = "rhel6_64Guest" self._disk_no = 1 self._disk_controller_no = 1 def name(self, name): self.spec.name = name return self def placement(self, vm_folder_path, host_path, datastore_name): self.vm_folder_path(vm_folder_path) self.host_path(host_path) self.datastore_name(datastore_name) return self def vm_folder_path(self, path): self.vm_folder = self._find_obj(path) return self def host_path(self, path): self.host = self._find_obj(path) return self def datastore_name(self, name): self.datastore_name = name return self def start(self): Action.start(self) self.spec.files = vim.vm.FileInfo( vmPathName="[{datastore_name}] {name}".format( datastore_name=self.datastore_name, name=self.spec.name)) self.tasks.append(self.vm_folder.CreateVm( config=self.spec, pool=self.host.resourcePool)) return self def network(self, net_name, mac=None): device = vim.vm.device.VirtualVmxnet3() device.backing = ( vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()) if mac: device.addressType = "manual" device.macAddress = str(mac) device.backing.deviceName = net_name self._add_dev(device) return self def disk(self, size): dev = vim.vm.device disk_uuid = uuid.uuid4() diskspec = dev.VirtualDisk(capacityInKB=int(size)) diskspec.controllerKey = self._disk_controller_no diskspec.unitNumber = self._disk_no self._disk_no += 1 diskspec.backing = dev.VirtualDisk.FlatVer2BackingInfo() diskspec.backing.diskMode = \ vim.vm.device.VirtualDiskOption.DiskMode.persistent diskspec.backing.uuid = str(uuid) diskspec.backing.fileName = ( '[{store}] {vm_name}/{disk_name}.vmdk'.format( store=self.datastore_name, vm_name=self.spec.name, disk_name="disk-{}".format(disk_uuid))) self._add_dev(diskspec).fileOperation = ( dev.VirtualDeviceSpec.FileOperation.create) return self def scsi(self): vctl1 = vim.vm.device.ParaVirtualSCSIController() vctl1.key = 1 vctl1.sharedBus = "noSharing" self._add_dev(vctl1) return self def _add_dev(self, device): spec = vim.vm.device.VirtualDeviceSpec() spec.device = device spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add self.spec.deviceChange.append(spec) return spec class CreateHost(Action): ANY_THUMBPRINT = object() def __init__(self, si): super(CreateHost, self).__init__(si) self.spec = vim.host.ConnectSpec() def name(self, name): self.spec.hostName = name return self def creds(self, user, password): self.spec.userName = user self.spec.password = password return self def cluster_path(self, path): self.cluster = self._find_obj(path) return self def thumbprint(self, thumbprint): self.thumbprint = thumbprint return self def start(self): Action.start(self) if self.thumbprint is self.ANY_THUMBPRINT: self.spec.sslThumbprint = self.get_host_thumbprint() self.tasks.append(self.cluster.AddHost(self.spec, True)) else: self.spec.sslThumbprint = self.thumbprint self.tasks.append(self.cluster.AddHost(self.spec, True)) return self def get_host_thumbprint(self): task = self.cluster.AddHost(self.spec, True) while task.info.state == "running": sleep(1) if isinstance(task.info.error, vim.fault.SSLVerifyFault): thumbprint = task.info.error.thumbprint LOG.warning("Using thumbprint '{}' for host {} from non-secured " "source.".format(thumbprint, self.spec.hostName)) return thumbprint else: raise task.info.error class CreateDVSwitch(Action): def name(self, name): spec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec(name=name) self.switch_spec = vim.DistributedVirtualSwitch.CreateSpec( configSpec=spec) return self def target(self, path): self.network_folder = self._find_obj(path) return self def start(self): Action.start(self) self.task = self.network_folder.CreateDistributedVirtualSwitch( self.switch_spec) return self class CreateDVSwitchPortGroup(Action): def __init__(self, si): Action.__init__(self, si) self.spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() def name(self, name): self.spec.name = name return self def type(self, type): self.spec.type = type return self def target(self, path): self.vswitch = self._find_obj(path) return self def vlan(self, vlan): dvs_vim = vim.dvs.VmwareDistributedVirtualSwitch vlan = dvs_vim.VlanIdSpec(vlanId=vlan) self.spec.defaultPortConfig = dvs_vim.VmwarePortConfigPolicy(vlan=vlan) return self def start(self): Action.start(self) self.task = self.vswitch.AddPortgroup(self.spec) return self class DestroyEntity(Action): def path(self, path, must_exist=True): try: self.entity = self._find_obj(path) except NotFound: self.entity = None pass return self def start(self): Action.start(self) if self.entity: self.tasks.append(self.entity.Destroy()) return self class DestroyVM(DestroyEntity): pass class DestroyHost(DestroyEntity): pass class DisconnectHost(DestroyEntity): def start(self): Action.start(self) if self.entity: self.tasks.append(self.entity.Disconnect()) return self class DestroyCluster(DestroyEntity): pass class DestroyDVSwitch(DestroyEntity): pass class ChangeMAC(Action): def path(self, path): self.entity = self._find_obj(path) return self def mac(self, mac): self._mac = mac return self def start(self): Action.start(self) device = [device for device in self.entity.config.hardware.device if isinstance(device, vim.vm.device.VirtualEthernetCard)][0] cs = vim.vm.ConfigSpec(deviceChange=[]) nicspec = vim.vm.device.VirtualDeviceSpec() nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit nicspec.device = device device.addressType = "manual" device.macAddress = self._mac cs.deviceChange.append(nicspec) self.tasks.append(self.entity.Reconfigure(cs)) return self class BatchExecutor(object): def __enter__(self): self.actions = [] return self def __exit__(self, exc_type, exc_value, traceback): while self.actions: action = self.actions.pop() action.wait() def submit(self, action): action.start() self.actions.append(action)
gpl-2.0
7,803,763,654,408,510,000
28.202041
79
0.594311
false
umax/diabetto2
category/views.py
1
1429
# -*- coding: utf-8 -*- from django.core.urlresolvers import reverse_lazy from django.views.generic import (ListView, DetailView, CreateView, DeleteView, UpdateView) from . import forms from . import models __all__ = ( 'CategoryIndexView', 'CategoryDetailView', 'CategoryCreateView', 'CategoryDeleteView', 'CategoryUpdateView', ) class CategoryIndexView(ListView): context_object_name = 'categories' template_name = 'category/index.html' def get_queryset(self): return models.Category.objects.all().prefetch_related('products') class CategoryDetailView(DetailView): context_object_name = 'category' template_name = 'category/detail.html' def get_queryset(self): return models.Category.objects.all().prefetch_related('products') class CategoryCreateView(CreateView): form_class = forms.CategoryForm template_name = 'category/create.html' success_url = reverse_lazy('index_category') class CategoryUpdateView(UpdateView): model = models.Category form_class = forms.CategoryForm context_object_name = 'category' template_name = 'category/update.html' success_url = reverse_lazy('index_category') class CategoryDeleteView(DeleteView): model = models.Category context_object_name = 'category' template_name = 'category/delete.html' success_url = reverse_lazy('index_category')
gpl-2.0
5,028,435,370,511,008,000
25.962264
73
0.69909
false
dimtruck/magnum
magnum/api/controllers/base.py
1
3932
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from webob import exc import wsme from wsme import types as wtypes from magnum.i18n import _ class APIBase(wtypes.Base): created_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is created""" updated_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is updated""" def as_dict(self): """Render this object as a dict of its fields.""" return dict((k, getattr(self, k)) for k in self.fields if hasattr(self, k) and getattr(self, k) != wsme.Unset) def unset_fields_except(self, except_list=None): """Unset fields so they don't appear in the message body. :param except_list: A list of fields that won't be touched. """ if except_list is None: except_list = [] for k in self.as_dict(): if k not in except_list: setattr(self, k, wsme.Unset) class Version(object): """API Version object.""" string = 'X-OpenStack-Magnum-API-Version' """HTTP Header string carrying the requested version""" min_string = 'X-OpenStack-Magnum-API-Minimum-Version' """HTTP response header""" max_string = 'X-OpenStack-Magnum-API-Maximum-Version' """HTTP response header""" def __init__(self, headers, default_version, latest_version): """Create an API Version object from the supplied headers. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :raises: webob.HTTPNotAcceptable """ (self.major, self.minor) = Version.parse_headers(headers, default_version, latest_version) def __repr__(self): return '%s.%s' % (self.major, self.minor) @staticmethod def parse_headers(headers, default_version, latest_version): """Determine the API version requested based on the headers supplied. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :returns: a tuple of (major, minor) version numbers :raises: webob.HTTPNotAcceptable """ version_str = headers.get(Version.string, default_version) if version_str.lower() == 'latest': parse_str = latest_version else: parse_str = version_str try: version = tuple(int(i) for i in parse_str.split('.')) except ValueError: version = () if len(version) != 2: raise exc.HTTPNotAcceptable(_( "Invalid value for %s header") % Version.string) return version def __lt__(a, b): if (a.major < b.major): return True if (a.major == b.major and a.minor < b.minor): return True return False def __gt__(a, b): if (a.major > b.major): return True if (a.major == b.major and a.minor > b.minor): return True return False
apache-2.0
-7,404,643,373,537,274,000
32.042017
78
0.600458
false
papedaniel/oioioi
oioioi/contests/handlers.py
1
4452
import json import logging import traceback import pprint import socket import time from smtplib import SMTPException from django.core.mail import mail_admins from django.db import transaction from oioioi.contests.models import Contest, ProblemInstance, Submission, \ SubmissionReport, FailureReport logger = logging.getLogger(__name__) WAIT_FOR_SUBMISSION_RETRIES = 9 WAIT_FOR_SUBMISSION_SLEEP_SECONDS = 1 def wait_for_submission_in_db(env, **kwargs): """Celery may start handling a submission before it is actually saved in the DB. This is a workaround for this. """ for _i in xrange(WAIT_FOR_SUBMISSION_RETRIES): with transaction.atomic(): if bool(Submission.objects.filter(id=env['submission_id'])): break time.sleep(WAIT_FOR_SUBMISSION_SLEEP_SECONDS) return env @transaction.atomic def update_report_statuses(env, **kwargs): submission = Submission.objects.get(id=env['submission_id']) problem_instance = submission.problem_instance reports = SubmissionReport.objects.filter(submission=submission) problem_instance.controller.update_report_statuses(submission, reports) return env @transaction.atomic def update_submission_score(env, **kwargs): submission = Submission.objects.get(id=env['submission_id']) problem_instance = submission.problem_instance problem_instance.controller.update_submission_score(submission) return env def update_user_results(env, **kwargs): with transaction.atomic(): submission = Submission.objects.get(id=env['submission_id']) user = submission.user if not user: return env problem_instance = \ ProblemInstance.objects.get(id=env['problem_instance_id']) round = problem_instance.round contest = None if round is not None: assert round.id == env['round_id'] contest = round.contest assert contest.id == env['contest_id'] else: assert 'round_id' not in env assert 'contest_id' not in env problem_instance.controller.update_user_results(user, problem_instance) return env @transaction.atomic def call_submission_judged(env, **kwargs): submission = Submission.objects.get(id=env['submission_id']) contest = submission.problem_instance.contest if contest is None: assert 'contest_id' not in env return env assert contest.id == env['contest_id'] contest.controller.submission_judged(submission, rejudged=env['is_rejudge']) contest.controller.submission_unqueued(submission, env['job_id']) return env @transaction.atomic def create_error_report(env, exc_info, **kwargs): """Builds a :class:`oioioi.contests.models.SubmissionReport` for an evaulation which have failed. USES * `env['submission_id']` """ logger.error("System Error evaluating submission #%s:\n%s", env.get('submission_id', '???'), pprint.pformat(env, indent=4), exc_info=exc_info) if 'submission_id' not in env: return env try: submission = Submission.objects.get(id=env['submission_id']) except Submission.DoesNotExist: return env submission_report = SubmissionReport(submission=submission) submission_report.kind = 'FAILURE' submission_report.save() failure_report = FailureReport(submission_report=submission_report) failure_report.json_environ = json.dumps(env) failure_report.message = traceback.format_exc(exc_info) failure_report.save() return env def mail_admins_on_error(env, exc_info, **kwargs): """Sends email to all admins defined in settings.ADMINS on each grading error occurrence. USES * `env['submission_id']` """ # We don't want to spam admins when the evaluation of a deleted # submission fails. See also SIO-1254. try: if 'submission_id' in env: Submission.objects.get(id=env['submission_id']) except Submission.DoesNotExist: return env try: mail_admins("System Error evaluating submission #%s" % env.get('submission_id', '???'), traceback.format_exc(exc_info)) except (socket.error, SMTPException), e: logger.error("An error occurred while sending email: %s", e.message) return env
gpl-3.0
4,431,240,814,176,651,300
29.703448
75
0.66442
false
joaormatos/anaconda
mmfparser/data/checksum.py
1
2357
# Copyright (c) Mathias Kaerlev 2012. # This file is part of Anaconda. # Anaconda is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # Anaconda is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Anaconda. If not, see <http://www.gnu.org/licenses/>. from mmfparser.bytereader import ByteReader import struct def wrap(value): return value & 0xFFFFFFFF def wrap_signed_char(value): value = value & 0xFF if value > 127: value -= 256 return value def make_checksum(data): result = 0 bufferOffset = 0 numberOfBytes = len(data) numberOfReads = numberOfBytes >> 2 for _ in xrange(numberOfReads): newInt, = struct.unpack_from('<I', data, bufferOffset) result = newInt + (wrap(result) >> 31) + 2 * result result = wrap(result) bufferOffset += 4 for _ in xrange(numberOfBytes & 3): v7 = (wrap(result) >> 31) + struct.unpack_from('<B', data, bufferOffset)[0] bufferOffset += 1 result = wrap(v7 + 2*result) return wrap(result) GROUP_WORDS = list('mqojhm:qskjhdsmkjsmkdjhq\x63clkcdhdlkjhd') def make_group_checksum(password, group_name): v4 = 57 for c in group_name: v4 += ord(c) ^ 0x7F v5 = 0 for c in password: v4 += wrap_signed_char(ord(GROUP_WORDS[v5]) + (ord(c) ^ 0xC3)) ^ 0xF3 v5 += 1 if v5 > len(GROUP_WORDS): v5 = 0 return v4 def make_pame_checksum(data): checksum = make_checksum(data) lastByte = checksum & 0x000000FF # get last byte xorByte = lastByte ^ 13 checksum = checksum & 0xFFFFFF00 | xorByte return int(checksum) class Checksum(object): data = None def __init__(self, data = None): if data: self.data = data def getChecksum(self): return make_pame_checksum(self.data) if __name__ == '__main__': print hex(make_group_checksum('klonoafan', 'yay'))
gpl-3.0
1,744,356,720,394,358,800
29.230769
83
0.647857
false
rlbabyuk/integration_tests
cfme/containers/provider/__init__.py
1
12639
from functools import partial from random import sample from navmazing import NavigateToSibling, NavigateToAttribute from cfme.common.provider import BaseProvider from cfme.fixtures import pytest_selenium as sel from cfme.web_ui import ( Quadicon, Form, AngularSelect, form_buttons, Input, toolbar as tb, InfoBlock, Region, paginator, match_location, PagedTable, CheckboxTable) from cfme.web_ui.tabstrip import TabStripForm from utils import deferred_verpick, version from utils.appliance import Navigatable from utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to from utils.browser import ensure_browser_open from utils.pretty import Pretty from utils.varmeth import variable paged_tbl = PagedTable(table_locator="//div[@id='list_grid']//table") cfg_btn = partial(tb.select, 'Configuration') mon_btn = partial(tb.select, 'Monitoring') pol_btn = partial(tb.select, 'Policy') details_page = Region(infoblock_type='detail') properties_form = Form( fields=[ ('type_select', AngularSelect('server_emstype')), ('name_text', Input('name')), ('hostname_text', Input('hostname')), ('port_text', Input('port')) ]) properties_form_56 = TabStripForm( fields=[ ('type_select', AngularSelect('ems_type')), ('name_text', Input('name')) ], tab_fields={ "Default": [ ('hostname_text', Input("default_hostname")), ('port_text', Input("default_api_port")), ('sec_protocol', AngularSelect("default_security_protocol", exact=True)), ], "Hawkular": [ ('hawkular_hostname', Input("hawkular_hostname")), ('hawkular_api_port', Input("hawkular_api_port")) ], }) properties_form_58 = TabStripForm( fields=[ ('type_select', AngularSelect('ems_type')), ('name_text', Input('name')) ], tab_fields={ "Default": [ ('hostname_text', Input("default_hostname")), ('port_text', Input("default_api_port")), ('sec_protocol', AngularSelect("default_security_protocol", exact=True)), ('trusted_ca_certificates', Input("default_tls_ca_certs")) ], "Hawkular": [ ('hawkular_hostname', Input("hawkular_hostname")), ('hawkular_api_port', Input("hawkular_api_port")), ('hawkular_sec_protocol', AngularSelect("hawkular_security_protocol", exact=True)), ('hawkular_ca_certificates', Input("hawkular_tls_ca_certs")) ], }) prop_region = Region( locators={ 'properties_form': { version.LOWEST: properties_form, '5.6': properties_form_56, '5.8': properties_form_58 } } ) match_page = partial(match_location, controller='ems_container', title='Containers Providers') class ContainersProvider(BaseProvider, Pretty): provider_types = {} in_version = ('5.5', version.LATEST) category = "container" pretty_attrs = ['name', 'key', 'zone'] STATS_TO_MATCH = [ 'num_project', 'num_service', 'num_replication_controller', 'num_pod', 'num_node', 'num_image_registry', 'num_container'] # TODO add 'num_volume' string_name = "Containers" page_name = "containers" detail_page_suffix = 'provider_detail' edit_page_suffix = 'provider_edit_detail' refresh_text = "Refresh items and relationships" quad_name = None db_types = ["ContainerManager"] _properties_region = prop_region # This will get resolved in common to a real form add_provider_button = deferred_verpick( {version.LOWEST: form_buttons.FormButton("Add this Containers Provider"), '5.6': form_buttons.add}) save_button = deferred_verpick( {version.LOWEST: form_buttons.save, '5.6': form_buttons.angular_save}) def __init__( self, name=None, credentials=None, key=None, zone=None, hostname=None, port=None, sec_protocol=None, hawkular_sec_protocol=None, provider_data=None, appliance=None): Navigatable.__init__(self, appliance=appliance) if not credentials: credentials = {} self.name = name self.credentials = credentials self.key = key self.zone = zone self.hostname = hostname self.port = port self.sec_protocol = sec_protocol self.hawkular_sec_protocol = hawkular_sec_protocol self.provider_data = provider_data def _on_detail_page(self): """ Returns ``True`` if on the providers detail page, ``False`` if not.""" ensure_browser_open() return sel.is_displayed( '//div//h1[contains(., "{} (Summary)")]'.format(self.name)) def load_details(self, refresh=False): navigate_to(self, 'Details') if refresh: tb.refresh() def get_detail(self, *ident): """ Gets details from the details infoblock Args: *ident: An InfoBlock title, followed by the Key name, e.g. "Relationships", "Images" Returns: A string representing the contents of the InfoBlock's value. """ navigate_to(self, 'Details') return details_page.infoblock.text(*ident) @variable(alias='db') def num_project(self): return self._num_db_generic('container_projects') @num_project.variant('ui') def num_project_ui(self): return int(self.get_detail("Relationships", "Projects")) @variable(alias='db') def num_service(self): return self._num_db_generic('container_services') @num_service.variant('ui') def num_service_ui(self): if self.appliance.version < "5.7": name = "Services" else: name = "Container Services" return int(self.get_detail("Relationships", name)) @variable(alias='db') def num_replication_controller(self): return self._num_db_generic('container_replicators') @num_replication_controller.variant('ui') def num_replication_controller_ui(self): return int(self.get_detail("Relationships", "Replicators")) @variable(alias='db') def num_container_group(self): return self._num_db_generic('container_groups') @num_container_group.variant('ui') def num_container_group_ui(self): return int(self.get_detail("Relationships", "Pods")) @variable(alias='db') def num_pod(self): # potato tomato return self.num_container_group() @num_pod.variant('ui') def num_pod_ui(self): # potato tomato return self.num_container_group(method='ui') @variable(alias='db') def num_node(self): return self._num_db_generic('container_nodes') @num_node.variant('ui') def num_node_ui(self): return int(self.get_detail("Relationships", "Nodes")) @variable(alias='db') def num_container(self): # Containers are linked to providers through container definitions and then through pods res = self.appliance.db.engine.execute( "SELECT count(*) " "FROM ext_management_systems, container_groups, container_definitions, containers " "WHERE containers.container_definition_id=container_definitions.id " "AND container_definitions.container_group_id=container_groups.id " "AND container_groups.ems_id=ext_management_systems.id " "AND ext_management_systems.name='{}'".format(self.name)) return int(res.first()[0]) @num_container.variant('ui') def num_container_ui(self): return int(self.get_detail("Relationships", "Containers")) @variable(alias='db') def num_image(self): return self._num_db_generic('container_images') @num_image.variant('ui') def num_image_ui(self): if self.appliance.version < "5.7": name = "Images" else: name = "Container Images" return int(self.get_detail("Relationships", name)) @variable(alias='db') def num_image_registry(self): return self._num_db_generic('container_image_registries') @num_image_registry.variant('ui') def num_image_registry_ui(self): return int(self.get_detail("Relationships", "Image Registries")) @navigator.register(ContainersProvider, 'All') class All(CFMENavigateStep): prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn') def am_i_here(self): return match_page(summary='Pods') def step(self): self.prerequisite_view.navigation.select('Compute', 'Containers', 'Providers') def resetter(self): # Reset view and selection tb.select("Grid View") sel.check(paginator.check_all()) sel.uncheck(paginator.check_all()) @navigator.register(ContainersProvider, 'Add') class Add(CFMENavigateStep): prerequisite = NavigateToSibling('All') def step(self): cfg_btn(version.pick({ version.LOWEST: 'Add a New Containers Provider', '5.7': 'Add Existing Containers Provider' })) @navigator.register(ContainersProvider, 'Details') class Details(CFMENavigateStep): prerequisite = NavigateToSibling('All') def am_i_here(self): return match_page(summary="{} (Summary)".format(self.obj.name)) def step(self): sel.click(Quadicon(self.obj.name, self.obj.quad_name)) def resetter(self): tb.select("Summary View") @navigator.register(ContainersProvider, 'Edit') class Edit(CFMENavigateStep): prerequisite = NavigateToSibling('All') def step(self): sel.check(Quadicon(self.obj.name, self.obj.quad_name).checkbox()) cfg_btn('Edit Selected Containers Provider') @navigator.register(ContainersProvider, 'EditFromDetails') class EditFromDetails(CFMENavigateStep): prerequisite = NavigateToSibling('Details') def step(self): cfg_btn('Edit this Containers Provider') @navigator.register(ContainersProvider, 'EditTags') class EditTags(CFMENavigateStep): prerequisite = NavigateToSibling('All') def step(self): sel.check(Quadicon(self.obj.name, self.obj.quad_name).checkbox()) pol_btn('Edit Tags') @navigator.register(ContainersProvider, 'EditTagsFromDetails') class EditTagsFromDetails(CFMENavigateStep): prerequisite = NavigateToSibling('Details') def step(self): pol_btn('Edit Tags') @navigator.register(ContainersProvider, 'TimelinesFromDetails') class TimelinesFromDetails(CFMENavigateStep): prerequisite = NavigateToSibling('Details') def step(self): mon_btn('Timelines') @navigator.register(ContainersProvider, 'TopologyFromDetails') class TopologyFromDetails(CFMENavigateStep): prerequisite = NavigateToSibling('Details') def step(self): sel.click(InfoBlock('Overview', 'Topology')) # Common methods: class ContainersTestItem(object): """This is a generic test item. Especially used for parametrized functions """ __test__ = False def __init__(self, obj, polarion_id, **additional_attrs): """Args: * obj: The container object in this test (e.g. Image) * The polarion test case ID """ self.obj = obj self.polarion_id = polarion_id for name, value in additional_attrs.items(): self.__setattr__(name, value) def pretty_id(self): return '{} ({})'.format( getattr(self.obj, '__name__', str(self.obj)), self.polarion_id) def navigate_and_get_rows(provider, obj, count, table_class=CheckboxTable, silent_failure=False): """Get <count> random rows from the obj list table, if <count> is greater that the number of rows, return number of rows. Args: provider: containers provider obj: the containers object table: the object's Table object count: number of random rows to return silent_failure: If True and no records found for obj, it'll return None instead of raise exception return: list of rows""" navigate_to(obj, 'All') tb.select('List View') if sel.is_displayed_text("No Records Found.") and silent_failure: return paginator.results_per_page(1000) table = table_class(table_locator="//div[@id='list_grid']//table") rows = table.rows_as_list() if not rows: return [] return sample(rows, min(count, len(rows)))
gpl-2.0
-922,744,435,905,846,400
30.997468
96
0.630192
false
mdraeger/gmapcatcher
gmapcatcher/widgets/widComboBoxEntry.py
1
4319
# -*- coding: utf-8 -*- ## @package gmapcatcher.widgets.widComboBoxEntry # ComboBoxEntry widget used to collect data to search import gtk import re from gmapcatcher.mapConst import * ## This widget is where we collect data to search class ComboBoxEntry(gtk.ComboBoxEntry): DEFAULT_TEXT = "Enter location here!" def __init__(self, confirm_clicked, conf): super(ComboBoxEntry, self).__init__() self.connect('changed', self.changed_combo, confirm_clicked) self.connect('key-press-event', self.key_press_combo) # Launch clean_entry for all the signals/events below self.child.connect("button-press-event", self.clean_entry) self.child.connect("cut-clipboard", self.clean_entry) self.child.connect("copy-clipboard", self.clean_entry) self.child.connect("paste-clipboard", self.clean_entry) self.child.connect("move-cursor", self.clean_entry) self.child.connect("populate-popup", self.populate_popup, conf) # Launch the default_entry on the focus out self.child.connect("focus-out-event", self.default_entry) # Start search after hit 'ENTER' self.child.connect('activate', confirm_clicked) ## Clean out the entry box if text = default def clean_entry(self, *args): if (self.child.get_text() == self.DEFAULT_TEXT): self.child.set_text("") self.child.grab_focus() ## Reset the default text if entry is empty def default_entry(self, *args): if (self.child.get_text().strip() == ''): self.child.set_text(self.DEFAULT_TEXT) ## Add a new item to the menu of the EntryBox def populate_popup(self, w, menu, conf): def menuitem_response(w, string, conf): conf.match_func = string subMenu = gtk.Menu() for item in ENTRY_SUB_MENU: iMenuItem = gtk.RadioMenuItem(None, item) iMenuItem.set_active(item == conf.match_func) iMenuItem.connect("activate", menuitem_response, item, conf) subMenu.append(iMenuItem) menuItem = gtk.MenuItem() menu.append(menuItem) menuItem = gtk.MenuItem('Auto-Completion Method') menuItem.set_submenu(subMenu) menu.append(menuItem) menu.show_all() ## Show the combo list if is not empty def combo_popup(self): if self.get_model().get_iter_root() is not None: self.popup() ## Handles the pressing of arrow keys def key_press_combo(self, w, event): if event.keyval in [65362, 65364]: self.combo_popup() return True ## Handles the change event of the ComboBox def changed_combo(self, w, confirm_clicked): str = self.child.get_text() if (str.endswith(SEPARATOR)): self.child.set_text(str.strip()) confirm_clicked(None) ## Set the auto-completion for the entry box def set_completion(self, ctx_map, confirm_clicked, conf): completion = gtk.EntryCompletion() completion.connect('match-selected', self.on_completion_match, confirm_clicked) self.child.set_completion(completion) completion.set_model(ctx_map.completion_model()) completion.set_text_column(0) completion.set_minimum_key_length(3) completion.set_match_func(self.match_func, conf) # Populate the dropdownlist self.set_model(ctx_map.completion_model(SEPARATOR)) self.set_text_column(0) ## Automatically display after selecting def on_completion_match(self, completion, model, iter, confirm_clicked): self.child.set_text(model[iter][0]) confirm_clicked(None) ## Match function for the auto-completion def match_func(self, completion, key, iter, conf): model = completion.get_model() key = key.lower() text = model.get_value(iter, 0).lower() if conf.match_func == ENTRY_SUB_MENU[STARTS_WITH]: return text.startswith(key) elif conf.match_func == ENTRY_SUB_MENU[ENDS_WITH]: return text.endswith(key) elif conf.match_func == ENTRY_SUB_MENU[REGULAR_EXPRESSION]: p = re.compile(key, re.IGNORECASE) return (p.search(text) is not None) else: return (text.find(key) != -1)
gpl-2.0
-1,651,217,596,343,355,100
38.623853
87
0.634869
false
Som-Energia/somenergia-tomatic
tomatic_sandbox.py
1
2204
#!/usr/bin/env python # -*- coding: utf-8 -*- import click import re from consolemsg import warn, step, error, u from datetime import datetime, timedelta from shutil import copyfile from pathlib import Path from slugify import slugify @click.command() @click.help_option() @click.option('-d', '--description', help="Description tagline to add to the schedule", ) @click.option('--fromdate', default=datetime.today().strftime("%Y-%m-%d"), help="Choose a monday for computing schedules. Format: YYYY-MM-DD", ) @click.option('--linenumber', default=7, help="Choose the numer of lines to attend calls", ) def tomatic_sandbox(fromdate, description, linenumber): try: step("Generating graella sandbox for week {}",fromdate) fromdate = datetime.strptime(fromdate, '%Y-%m-%d') if not fromdate.weekday() == 0: fromdate = fromdate + timedelta(days=-fromdate.weekday(), weeks=1) graellaFolder = fromdate.strftime("%Y-%m-%d") if description: graellaFolder = '{}-{}'.format(graellaFolder, slugify(description)) step("Generating directory {}", graellaFolder) Path(graellaFolder).mkdir() linkCertificate = Path(graellaFolder+'/drive-certificate.json') step("Creating certificate link {}", linkCertificate) linkCertificate.symlink_to('../drive-certificate.json') source = Path('config.yaml') destination = Path(graellaFolder+'/config.yaml') step("Creating file {}", source) copyfile(u(source), u(destination)) if linenumber: step("Adding number of lines {} to file {}", linenumber, source) text = destination.read_text() text2fix = re.compile(r'nTelefons: \d+') text = text.replace(text2fix.findall(text)[0], "nTelefons: "+str(linenumber)) destination.write_text(text) source = Path('holidays.conf') destination = Path(graellaFolder+'/holidays.conf') step("Creating {} file", source) copyfile(u(source), u(destination)) except Exception as e: error(e) raise if __name__ == '__main__': tomatic_sandbox() # vim: et ts=4 sw=4
gpl-3.0
4,193,262,625,800,079,000
31.411765
89
0.635662
false
mellenburg/dcos
dcos_installer/test_backend.py
1
13478
import json import logging import os import subprocess import textwrap import uuid import boto3 import passlib.hash import pytest from dcos_installer import backend from dcos_installer.config import Config, make_default_config_if_needed, to_config os.environ["BOOTSTRAP_ID"] = "12345" def test_password_hash(): """Tests that the password hashing method creates de-cryptable hash """ password = 'DcosTestingPassword!@#' # only reads from STDOUT hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password]) print(hash_pw) hash_pw = hash_pw.decode('ascii').strip('\n') assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password' def test_set_superuser_password(tmpdir): """Test that --set-superuser-hash works""" with tmpdir.as_cwd(): tmpdir.join('genconf').ensure(dir=True) # TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml # Setting in a non-empty config.yaml which has no password set make_default_config_if_needed('genconf/config.yaml') assert 'superuser_password_hash' not in Config('genconf/config.yaml').config # Set the password create_fake_build_artifacts(tmpdir) subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir)) # Check that config.yaml has the password set config = Config('genconf/config.yaml') assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash']) def test_generate_node_upgrade_script(tmpdir, monkeypatch): upgrade_config = """ --- # The name of your DC/OS cluster. Visable in the DC/OS user interface. cluster_name: 'DC/OS' master_discovery: static exhibitor_storage_backend: 'static' resolvers: - 8.8.8.8 - 8.8.4.4 ssh_port: 22 process_timeout: 10000 bootstrap_url: file:///opt/dcos_install_tmp master_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5'] """ monkeypatch.setenv('BOOTSTRAP_VARIANT', '') create_config(upgrade_config, tmpdir) create_fake_build_artifacts(tmpdir) output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir)) assert output.decode('utf-8').splitlines()[-1].split("Node upgrade script URL: ", 1)[1]\ .endswith("dcos_node_upgrade.sh") try: subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir)) except subprocess.CalledProcessError as e: print(e.output) assert e.output.decode('ascii') == "Must provide the version of the cluster upgrading from\n" else: raise Exception("Test passed, this should not pass without specifying a version number") def test_version(monkeypatch): monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant') version_data = subprocess.check_output(['dcos_installer', '--version']).decode() assert json.loads(version_data) == { 'version': '1.12-dev', 'variant': 'some-variant' } def test_good_create_config_from_post(tmpdir): """ Test that it creates the config """ # Create a temp config workspace = tmpdir.strpath temp_config_path = workspace + '/config.yaml' make_default_config_if_needed(temp_config_path) temp_ip_detect_path = workspace + '/ip-detect' f = open(temp_ip_detect_path, "w") f.write("#/bin/bash foo") good_post_data = { "agent_list": ["10.0.0.2"], "master_list": ["10.0.0.1"], "cluster_name": "Good Test", "resolvers": ["4.4.4.4"], "ip_detect_filename": temp_ip_detect_path } expected_good_messages = {} create_fake_build_artifacts(tmpdir) with tmpdir.as_cwd(): messages = backend.create_config_from_post( post_data=good_post_data, config_path=temp_config_path) assert messages == expected_good_messages def test_bad_create_config_from_post(tmpdir): # Create a temp config workspace = tmpdir.strpath temp_config_path = workspace + '/config.yaml' make_default_config_if_needed(temp_config_path) bad_post_data = { "agent_list": "foo", "master_list": ["foo"], } expected_bad_messages = { "agent_list": "Must be a JSON formatted list, but couldn't be parsed the given value `foo` as " "one because of: Expecting value: line 1 column 1 (char 0)", "master_list": 'Invalid IPv4 addresses in list: foo', } create_fake_build_artifacts(tmpdir) with tmpdir.as_cwd(): messages = backend.create_config_from_post( post_data=bad_post_data, config_path=temp_config_path) assert messages == expected_bad_messages def test_do_validate_config(tmpdir, monkeypatch): monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant') # Create a temp config genconf_dir = tmpdir.join('genconf') genconf_dir.ensure(dir=True) temp_config_path = str(genconf_dir.join('config.yaml')) # Initialize with defautls make_default_config_if_needed(temp_config_path) create_fake_build_artifacts(tmpdir) expected_output = { 'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist', 'ssh_user': 'Must set ssh_user, no way to calculate value.', 'master_list': 'Must set master_list, no way to calculate value.', 'ssh_key_path': 'could not find ssh private key: genconf/ssh_key' } with tmpdir.as_cwd(): assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output def test_get_config(tmpdir): workspace = tmpdir.strpath temp_config_path = workspace + '/config.yaml' expected_data = { 'cluster_name': 'DC/OS', 'master_discovery': 'static', 'exhibitor_storage_backend': 'static', 'resolvers': ['8.8.8.8', '8.8.4.4'], 'ssh_port': 22, 'process_timeout': 10000, 'bootstrap_url': 'file:///opt/dcos_install_tmp' } make_default_config_if_needed(temp_config_path) config = Config(temp_config_path) assert expected_data == config.config def test_determine_config_type(tmpdir): # Ensure the default created config is of simple type workspace = tmpdir.strpath temp_config_path = workspace + '/config.yaml' make_default_config_if_needed(temp_config_path) got_output = backend.determine_config_type(config_path=temp_config_path) expected_output = { 'message': '', 'type': 'minimal', } assert got_output == expected_output def test_success(): mock_config = to_config({ 'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'], 'agent_list': ['10.0.0.3', '10.0.0.4'] }) expected_output = { "success": "http://10.0.0.1", "master_count": 3, "agent_count": 2 } expected_output_bad = { "success": "", "master_count": 0, "agent_count": 0 } got_output, code = backend.success(mock_config) mock_config.update({'master_list': '', 'agent_list': ''}) bad_out, bad_code = backend.success(mock_config) assert got_output == expected_output assert code == 200 assert bad_out == expected_output_bad assert bad_code == 400 def test_accept_overrides_for_undefined_config_params(tmpdir): temp_config_path = tmpdir.strpath + '/config.yaml' param = ('fake_test_param_name', 'fake_test_param_value') make_default_config_if_needed(temp_config_path) create_fake_build_artifacts(tmpdir) with tmpdir.as_cwd(): messages = backend.create_config_from_post( post_data=dict([param]), config_path=temp_config_path) assert not messages, "unexpected validation error: {}".format(messages) assert Config(config_path=temp_config_path)[param[0]] == param[1] simple_full_config = """--- cluster_name: DC/OS master_discovery: static exhibitor_storage_backend: static master_list: - 127.0.0.1 bootstrap_url: http://example.com """ def test_do_configure(tmpdir, monkeypatch): monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant') create_config(simple_full_config, tmpdir) create_fake_build_artifacts(tmpdir) with tmpdir.as_cwd(): assert backend.do_configure(config_path='genconf/config.yaml') == 0 aws_base_config = """--- # NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS / # s3 credentials in this configuration. aws_template_storage_bucket: psychic aws_template_storage_bucket_path: mofo-the-gorilla aws_template_storage_region_name: us-west-2 aws_template_upload: false """ def test_do_aws_configure(release_config_aws, tmpdir, monkeypatch): monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant') create_config(aws_base_config, tmpdir) create_fake_build_artifacts(tmpdir) with tmpdir.as_cwd(): assert backend.do_aws_cf_configure() == 0 @pytest.fixture def valid_storage_config(release_config_aws): """ Uses the settings from dcos-release.config.yaml ['testing'] to create a new upload and then deletes it when the test is over """ s3_bucket_name = release_config_aws['bucket'] bucket_path = str(uuid.uuid4()) yield """--- master_list: - 127.0.0.1 aws_template_storage_bucket: {bucket} aws_template_storage_bucket_path: {bucket_path} aws_template_upload: true """.format( bucket=release_config_aws['bucket'], bucket_path=bucket_path) session = boto3.session.Session() s3 = session.resource('s3') s3_bucket = s3.Bucket(s3_bucket_name) for o in s3_bucket.objects.filter(Prefix=bucket_path): o.delete() def test_do_aws_cf_configure_valid_storage_config(release_config_aws, valid_storage_config, tmpdir, monkeypatch): assert aws_cf_configure(valid_storage_config, tmpdir, monkeypatch) == 0 # TODO: add an assertion that the config that was resolved inside do_aws_cf_configure # ended up with the correct region where the above testing bucket was created. def test_override_aws_template_storage_region_name(release_config_aws, valid_storage_config, tmpdir, monkeypatch): config_str = valid_storage_config config_str += '\naws_template_storage_region_name: {}'.format(os.environ['AWS_DEFAULT_REGION']) assert aws_cf_configure(config_str, tmpdir, monkeypatch) == 0 def aws_cf_configure(config, tmpdir, monkeypatch): monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant') create_config(config, tmpdir) create_fake_build_artifacts(tmpdir) with tmpdir.as_cwd(): return backend.do_aws_cf_configure() def test_do_configure_valid_config_no_duplicate_logging(tmpdir, monkeypatch, caplog): """ Log messages are logged exactly once. """ monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant') create_config(simple_full_config, tmpdir) create_fake_build_artifacts(tmpdir) with tmpdir.as_cwd(): assert backend.do_configure(config_path='genconf/config.yaml') == 0 # The message comes from gen.get_dcosconfig_source_target_and_templates() function expected_message = 'Generating configuration files...' filtered_messages = [rec.message for rec in caplog.records if rec.message == expected_message] assert [expected_message] == filtered_messages def test_do_configure_logs_validation_errors(tmpdir, monkeypatch, caplog): """ Configuration validation errors are logged as `error` messages. """ monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant') invalid_config = textwrap.dedent("""--- cluster_name: DC/OS master_discovery: static # Remove `exhibitor_storage_backend` from configuration # exhibitor_storage_backend: static master_list: - 127.0.0.1 bootstrap_url: http://example.com """) create_config(invalid_config, tmpdir) create_fake_build_artifacts(tmpdir) with tmpdir.as_cwd(): assert backend.do_configure(config_path='genconf/config.yaml') == 1 expected_error_message = ( 'exhibitor_storage_backend: Must set exhibitor_storage_backend, ' 'no way to calculate value.' ) error_logs = [rec for rec in caplog.records if rec.message == expected_error_message] assert len(error_logs) == 1 error_log = error_logs[0] assert error_log.levelno == logging.ERROR def create_config(config_str, tmpdir): genconf_dir = tmpdir.join('genconf') genconf_dir.ensure(dir=True) config_path = genconf_dir.join('config.yaml') config_path.write(config_str) genconf_dir.join('ip-detect').write('#!/bin/bash\necho 127.0.0.1') def create_fake_build_artifacts(tmpdir): artifact_dir = tmpdir.join('artifacts/bootstrap') artifact_dir.ensure(dir=True) artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True) artifact_dir.join('12345.active.json').write('["package--version"]', ensure=True) artifact_dir.join('test_variant.bootstrap.latest').write("12345") tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write( '{"bootstrap": "12345", "packages": ["package--version"]}', ensure=True, ) tmpdir.join('artifacts/complete/complete.latest.json').write( '{"bootstrap": "12345", "packages": ["package--version"]}', ensure=True, ) tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True)
apache-2.0
-808,577,008,452,427,300
33.647815
115
0.667606
false
kain88-de/mdanalysis
testsuite/MDAnalysisTests/test_failure.py
1
1352
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDAnalysis --- http://www.mdanalysis.org # Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler, # D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # # N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # from __future__ import absolute_import import os def test_failure(): """Fail if the MDA_FAILURE_TEST environment variable is set. """ # Have a file open to trigger an output from the open_files plugin. f = open('./failure.txt', 'w') if u'MDA_FAILURE_TEST' in os.environ: assert False
gpl-2.0
7,582,456,156,020,400,000
39.969697
79
0.701923
false
hooram/ownphotos-backend
densecap/webcam/server2.py
1
3086
import argparse, random, os, time, json from PIL import Image from io import BytesIO import base64 from flask import Flask, request from flask.ext.cors import CORS from flask_restful import Resource, Api import ipdb app = Flask(__name__) app.config['DEBUG'] = True ext2conttype2 = { "jpg": "JPEG", "jpeg": "JPEG", "png": "PNG", "gif": "GIF", "image/jpeg": "JPEG", "image/png": "PNG", "image/gif": "GIF" } ext2conttype = { "jpg": "image/jpeg", "jpeg": "image/jpeg", "png": "image/png", "gif": "image/gif" } input_dir = 'webcam/inputs' output_dir = 'webcam/outputs' @app.route('/media/upload',methods=['POST','GET']) def densecap(): if request.method=='POST': ipdb.set_trace() file = request.files['file'] if file and file.filename: img_id = random.randint(1,1000000) img_path = os.path.join(input_dir, '%d.jpg' % img_id) filename = file.filename extension = filename[filename.rfind(".")+1:].lower() content_type = ext2conttype[extension] image = Image.open(file) image.save(img_path) json_name = os.path.join(output_dir, '%d,json' % img_id) while not os.path.isfile(json_name): time.sleep(0.05) with open(json_name, 'r') as f: ann = json.load(f) os.remove(json_name) return ann else: return 'error 2' else: return 'running' class DenseCap(Resource): def get(self): return 'The DenseCap server seems to be running!' def post(self): img_id = random.randint(1, 1000000) img_name = os.path.join(input_dir, '%d.jpg' % img_id) # Get the base64 image data out of the request. # for some reason Flask doesn't parse this out at all for use, so we'll just # do it manually. There is a prefix telling us that this is an image and the # type of the image, then a comma, then the raw base64 data for the image. # We just grab the part after the comma and decode it. idx = request.data.find(',') + 1 img_data = request.data[idx:] im = Image.open(BytesIO(base64.b64decode(img_data))) im.save(img_name) # request.files['image'].save(img_name) json_name = os.path.join(output_dir, '%d.json' % img_id) while not os.path.isfile(json_name): time.sleep(0.05) with open(json_name, 'r') as f: ann = json.load(f) os.remove(json_name) return ann if __name__ == '__main__': app.run(debug=True) # from tornado.wsgi import WSGIContainer # from tornado.httpserver import HTTPServer # from tornado.ioloop import IOLoop # # http_server = HTTPServer(WSGIContainer(app), ssl_options={ # 'certfile': 'webcam/ssl/server.crt', # 'keyfile': 'webcam/ssl/server.key' # }) # # http_server.listen(5000) # # # We have to do a little weirdness to make the server actually die # # when we hit CTRL+C # try: # IOLoop.instance().start() # except KeyboardInterrupt: # IOLoop.instance().stop()
mit
3,539,640,382,857,798,700
25.152542
80
0.602722
false
Lucretiel/genetics
test/test_dna/test_binary.py
1
2361
import random from genetics.dna.binary import DNABinary def test_basic_init(): init = (True, False, True, True, False, False, False, True, True, False) x = DNABinary(init) for component, b in zip(x, init): assert component == b def test_string_init(): init = '1000101011110101010010100101001010101110101010000000111' x = DNABinary(init) def convert_ones_zeroes(c): if c == '1': return True elif c == '0': return False return None init_bools = [convert_ones_zeroes(c) for c in init] for component, b in zip(x, init_bools): assert component == b def test_length_init(): x = DNABinary(100) assert len(x) == 100 for component in x: assert component is True or component is False def test_total_length(): x = DNABinary(100) assert x.total_length() == 100 def test_deterministic_combine(): dna1 = DNABinary(True for _ in range(100)) dna2 = DNABinary(False for _ in range(100)) combine_mask = [True if i < 25 else False for i in range(100)] dna3, dna4 = dna1.combine(dna2, combine_mask) for component1, component2, mask in zip(dna3, dna4, combine_mask): if mask: assert component1 is True assert component2 is False else: assert component1 is False assert component2 is True def test_random_combine(): dna1 = DNABinary(100) dna2 = DNABinary(100) combine_mask = [random.choice((True, False)) for _ in range(100)] dna3, dna4 = dna1.combine(dna2, combine_mask) for parent1, parent2, child1, child2, mask in zip(dna1, dna2, dna3, dna4, combine_mask): if mask: assert child1 == parent1 assert child2 == parent2 else: assert child1 == parent2 assert child2 == parent1 def test_mutation(): dna = DNABinary(False for _ in range(100)) mask = [True if i % 2 == 0 else False for i in range(100)] mutated = dna.mutate(mask) # Test that the original DNA is untouched for b in dna: assert b is False for b, mask in zip(mutated, mask): if not mask: assert b is False def test_element_access(): x = [True, False, False, True] dna = DNABinary(x) for i in range(4): assert dna[i] == x[i]
lgpl-2.1
940,507,718,833,275,900
22.61
92
0.603981
false
asntech/jaspar
portal/migrations/0002_auto_20170617_1217.py
1
1491
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-06-17 12:17 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('portal', '0001_initial'), ] operations = [ migrations.CreateModel( name='NewsAndUpdate', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=25)), ('body', models.TextField()), ('category', models.CharField(choices=[('realese', 'New release'), ('bug', 'Bug fix'), ('announcement', 'Announcement')], max_length=150)), ('date', models.DateTimeField(auto_now_add=True)), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), # migrations.AlterModelOptions( # name='matrixannotation', # options={'managed': False}, # ), # migrations.AlterModelOptions( # name='matrixprotein', # options={'managed': False}, # ), # migrations.AlterModelOptions( # name='matrixspecies', # options={'managed': False}, # ), ]
bsd-3-clause
3,353,032,720,810,922,000
35.365854
155
0.574782
false
gwind/YWeb
yweb/yweb/utils/translation/trans_real.py
1
25606
"""Translation helper functions.""" from __future__ import unicode_literals import locale import os import re import sys import gettext as gettext_module from threading import local import warnings from yweb.utils.importlib import import_module from yweb.utils.datastructures import SortedDict from yweb.utils.encoding import force_str, force_text from yweb.utils.functional import memoize from yweb.utils._os import upath from yweb.utils.safestring import mark_safe, SafeData from yweb.utils import six from yweb.utils.six import StringIO from yweb.utils.translation import TranslatorCommentWarning # Translations are cached in a dictionary for every language+app tuple. # The active translations are stored by threadid to make them thread local. _translations = {} _active = local() # The default translation is based on the settings file. _default = None # This is a cache for normalized accept-header languages to prevent multiple # file lookups when checking the same locale on repeated requests. _accepted = {} _checked_languages = {} # magic gettext number to separate context from message CONTEXT_SEPARATOR = "\x04" # Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9 # and RFC 3066, section 2.1 accept_language_re = re.compile(r''' ([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*" (?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8" (?:\s*,\s*|$) # Multiple accepts per header. ''', re.VERBOSE) language_code_prefix_re = re.compile(r'^/([\w-]+)(/|$)') def to_locale(language, to_lower=False): """ Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is True, the last component is lower-cased (en_us). """ p = language.find('-') if p >= 0: if to_lower: return language[:p].lower()+'_'+language[p+1:].lower() else: # Get correct locale for sr-latn if len(language[p+1:]) > 2: return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower() return language[:p].lower()+'_'+language[p+1:].upper() else: return language.lower() def to_language(locale): """Turns a locale name (en_US) into a language name (en-us).""" p = locale.find('_') if p >= 0: return locale[:p].lower()+'-'+locale[p+1:].lower() else: return locale.lower() class DjangoTranslation(gettext_module.GNUTranslations): """ This class sets up the GNUTranslations context with regard to output charset. """ def __init__(self, *args, **kw): gettext_module.GNUTranslations.__init__(self, *args, **kw) self.set_output_charset('utf-8') self.__language = '??' def merge(self, other): self._catalog.update(other._catalog) def set_language(self, language): self.__language = language self.__to_language = to_language(language) def language(self): return self.__language def to_language(self): return self.__to_language def __repr__(self): return "<DjangoTranslation lang:%s>" % self.__language def translation(language): """ Returns a translation object. This translation object will be constructed out of multiple GNUTranslations objects by merging their catalogs. It will construct a object for the requested language and add a fallback to the default language, if it's different from the requested language. """ global _translations t = _translations.get(language, None) if t is not None: return t from yweb.conf import settings globalpath = os.path.join(os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale') def _fetch(lang, fallback=None): global _translations res = _translations.get(lang, None) if res is not None: return res loc = to_locale(lang) def _translation(path): try: t = gettext_module.translation('yweb', path, [loc], DjangoTranslation) t.set_language(lang) return t except IOError: return None res = _translation(globalpath) # We want to ensure that, for example, "en-gb" and "en-us" don't share # the same translation object (thus, merging en-us with a local update # doesn't affect en-gb), even though they will both use the core "en" # translation. So we have to subvert Python's internal gettext caching. base_lang = lambda x: x.split('-', 1)[0] if base_lang(lang) in [base_lang(trans) for trans in list(_translations)]: res._info = res._info.copy() res._catalog = res._catalog.copy() def _merge(path): t = _translation(path) if t is not None: if res is None: return t else: res.merge(t) return res for appname in reversed(settings.INSTALLED_APPS): app = import_module(appname) apppath = os.path.join(os.path.dirname(upath(app.__file__)), 'locale') if os.path.isdir(apppath): res = _merge(apppath) for localepath in reversed(settings.LOCALE_PATHS): if os.path.isdir(localepath): res = _merge(localepath) if res is None: if fallback is not None: res = fallback else: return gettext_module.NullTranslations() _translations[lang] = res return res default_translation = _fetch(settings.LANGUAGE_CODE) current_translation = _fetch(language, fallback=default_translation) return current_translation def activate(language): """ Fetches the translation object for a given tuple of application name and language and installs it as the current translation object for the current thread. """ _active.value = translation(language) def deactivate(): """ Deinstalls the currently active translation object so that further _ calls will resolve against the default translation object, again. """ if hasattr(_active, "value"): del _active.value def deactivate_all(): """ Makes the active translation object a NullTranslations() instance. This is useful when we want delayed translations to appear as the original string for some reason. """ _active.value = gettext_module.NullTranslations() def get_language(): """Returns the currently selected language.""" t = getattr(_active, "value", None) if t is not None: try: return t.to_language() except AttributeError: pass # If we don't have a real translation object, assume it's the default language. from yweb.conf import settings return settings.LANGUAGE_CODE def get_language_bidi(): """ Returns selected language's BiDi layout. * False = left-to-right layout * True = right-to-left layout """ from yweb.conf import settings base_lang = get_language().split('-')[0] return base_lang in settings.LANGUAGES_BIDI def catalog(): """ Returns the current active catalog for further processing. This can be used if you need to modify the catalog or want to access the whole message catalog instead of just translating one string. """ global _default t = getattr(_active, "value", None) if t is not None: return t if _default is None: from yweb.conf import settings _default = translation(settings.LANGUAGE_CODE) return _default def do_translate(message, translation_function): """ Translates 'message' using the given 'translation_function' name -- which will be either gettext or ugettext. It uses the current thread to find the translation object to use. If no current translation is activated, the message will be run through the default translation object. """ global _default # str() is allowing a bytestring message to remain bytestring on Python 2 eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n')) t = getattr(_active, "value", None) if t is not None: result = getattr(t, translation_function)(eol_message) else: if _default is None: from yweb.conf import settings _default = translation(settings.LANGUAGE_CODE) result = getattr(_default, translation_function)(eol_message) if isinstance(message, SafeData): return mark_safe(result) return result def gettext(message): """ Returns a string of the translation of the message. Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2. """ return do_translate(message, 'gettext') if six.PY3: ugettext = gettext else: def ugettext(message): return do_translate(message, 'ugettext') def pgettext(context, message): msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message) result = ugettext(msg_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found # force unicode, because lazy version expects unicode result = force_text(message) return result def gettext_noop(message): """ Marks strings for translation but doesn't translate them now. This can be used to store strings in global variables that should stay in the base language (because they might be used externally) and will be translated later. """ return message def do_ntranslate(singular, plural, number, translation_function): global _default t = getattr(_active, "value", None) if t is not None: return getattr(t, translation_function)(singular, plural, number) if _default is None: from yweb.conf import settings _default = translation(settings.LANGUAGE_CODE) return getattr(_default, translation_function)(singular, plural, number) def ngettext(singular, plural, number): """ Returns a string of the translation of either the singular or plural, based on the number. Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2. """ return do_ntranslate(singular, plural, number, 'ngettext') if six.PY3: ungettext = ngettext else: def ungettext(singular, plural, number): """ Returns a unicode strings of the translation of either the singular or plural, based on the number. """ return do_ntranslate(singular, plural, number, 'ungettext') def npgettext(context, singular, plural, number): msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular), "%s%s%s" % (context, CONTEXT_SEPARATOR, plural), number) result = ungettext(*msgs_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = ungettext(singular, plural, number) return result def all_locale_paths(): """ Returns a list of paths to user-provides languages files. """ from yweb.conf import settings globalpath = os.path.join( os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale') return [globalpath] + list(settings.LOCALE_PATHS) def check_for_language(lang_code): """ Checks whether there is a global language file for the given language code. This is used to decide whether a user-provided language is available. This is only used for language codes from either the cookies or session and during format localization. """ for path in all_locale_paths(): if gettext_module.find('django', path, [to_locale(lang_code)]) is not None: return True return False check_for_language = memoize(check_for_language, _checked_languages, 1) def get_supported_language_variant(lang_code, supported=None, strict=False): """ Returns the language-code that's listed in supported languages, possibly selecting a more generic variant. Raises LookupError if nothing found. If `strict` is False (the default), the function will look for an alternative country-specific variant when the currently checked is not found. """ if supported is None: from yweb.conf import settings supported = SortedDict(settings.LANGUAGES) if lang_code: # if fr-CA is not supported, try fr-ca; if that fails, fallback to fr. generic_lang_code = lang_code.split('-')[0] variants = (lang_code, lang_code.lower(), generic_lang_code, generic_lang_code.lower()) for code in variants: if code in supported and check_for_language(code): return code if not strict: # if fr-fr is not supported, try fr-ca. for supported_code in supported: if supported_code.startswith((generic_lang_code + '-', generic_lang_code.lower() + '-')): return supported_code raise LookupError(lang_code) def get_language_from_path(path, supported=None, strict=False): """ Returns the language-code if there is a valid language-code found in the `path`. If `strict` is False (the default), the function will look for an alternative country-specific variant when the currently checked is not found. """ if supported is None: from yweb.conf import settings supported = SortedDict(settings.LANGUAGES) regex_match = language_code_prefix_re.match(path) if not regex_match: return None lang_code = regex_match.group(1) try: return get_supported_language_variant(lang_code, supported, strict=strict) except LookupError: return None def get_language_from_request(request, check_path=False): """ Analyzes the request to find what language the user wants the system to show. Only languages listed in settings.LANGUAGES are taken into account. If the user requests a sublanguage where we have a main language, we send out the main language. If check_path is True, the URL path prefix will be checked for a language code, otherwise this is skipped for backwards compatibility. """ global _accepted from yweb.conf import settings supported = SortedDict(settings.LANGUAGES) if check_path: lang_code = get_language_from_path(request.path_info, supported) if lang_code is not None: return lang_code if hasattr(request, 'session'): lang_code = request.session.get('django_language', None) if lang_code in supported and lang_code is not None and check_for_language(lang_code): return lang_code lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME) try: return get_supported_language_variant(lang_code, supported) except LookupError: pass accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '') for accept_lang, unused in parse_accept_lang_header(accept): if accept_lang == '*': break # 'normalized' is the root name of the locale in POSIX format (which is # the format used for the directories holding the MO files). normalized = locale.locale_alias.get(to_locale(accept_lang, True)) if not normalized: continue # Remove the default encoding from locale_alias. normalized = normalized.split('.')[0] if normalized in _accepted: # We've seen this locale before and have an MO file for it, so no # need to check again. return _accepted[normalized] try: accept_lang = get_supported_language_variant(accept_lang, supported) except LookupError: continue else: _accepted[normalized] = accept_lang return accept_lang try: return get_supported_language_variant(settings.LANGUAGE_CODE, supported) except LookupError: return settings.LANGUAGE_CODE dot_re = re.compile(r'\S') def blankout(src, char): """ Changes every non-whitespace character to the given char. Used in the templatize function. """ return dot_re.sub(char, src) context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""") inline_re = re.compile(r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*""") block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""") endblock_re = re.compile(r"""^\s*endblocktrans$""") plural_re = re.compile(r"""^\s*plural$""") constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""") one_percent_re = re.compile(r"""(?<!%)%(?!%)""") def templatize(src, origin=None): """ Turns a Django template into something that is understood by xgettext. It does so by translating the Django translation tags into standard gettext function invocations. """ from yweb.conf import settings from yweb.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK) src = force_text(src, settings.FILE_CHARSET) out = StringIO() message_context = None intrans = False inplural = False singular = [] plural = [] incomment = False comment = [] lineno_comment_map = {} comment_lineno_cache = None for t in Lexer(src, origin).tokenize(): if incomment: if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment': content = ''.join(comment) translators_comment_start = None for lineno, line in enumerate(content.splitlines(True)): if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK): translators_comment_start = lineno for lineno, line in enumerate(content.splitlines(True)): if translators_comment_start is not None and lineno >= translators_comment_start: out.write(' # %s' % line) else: out.write(' #\n') incomment = False comment = [] else: comment.append(t.contents) elif intrans: if t.token_type == TOKEN_BLOCK: endbmatch = endblock_re.match(t.contents) pluralmatch = plural_re.match(t.contents) if endbmatch: if inplural: if message_context: out.write(' npgettext(%r, %r, %r,count) ' % (message_context, ''.join(singular), ''.join(plural))) else: out.write(' ngettext(%r, %r, count) ' % (''.join(singular), ''.join(plural))) for part in singular: out.write(blankout(part, 'S')) for part in plural: out.write(blankout(part, 'P')) else: if message_context: out.write(' pgettext(%r, %r) ' % (message_context, ''.join(singular))) else: out.write(' gettext(%r) ' % ''.join(singular)) for part in singular: out.write(blankout(part, 'S')) message_context = None intrans = False inplural = False singular = [] plural = [] elif pluralmatch: inplural = True else: filemsg = '' if origin: filemsg = 'file %s, ' % origin raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno)) elif t.token_type == TOKEN_VAR: if inplural: plural.append('%%(%s)s' % t.contents) else: singular.append('%%(%s)s' % t.contents) elif t.token_type == TOKEN_TEXT: contents = one_percent_re.sub('%%', t.contents) if inplural: plural.append(contents) else: singular.append(contents) else: # Handle comment tokens (`{# ... #}`) plus other constructs on # the same line: if comment_lineno_cache is not None: cur_lineno = t.lineno + t.contents.count('\n') if comment_lineno_cache == cur_lineno: if t.token_type != TOKEN_COMMENT: for c in lineno_comment_map[comment_lineno_cache]: filemsg = '' if origin: filemsg = 'file %s, ' % origin warn_msg = ("The translator-targeted comment '%s' " "(%sline %d) was ignored, because it wasn't the last item " "on the line.") % (c, filemsg, comment_lineno_cache) warnings.warn(warn_msg, TranslatorCommentWarning) lineno_comment_map[comment_lineno_cache] = [] else: out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache])) comment_lineno_cache = None if t.token_type == TOKEN_BLOCK: imatch = inline_re.match(t.contents) bmatch = block_re.match(t.contents) cmatches = constant_re.findall(t.contents) if imatch: g = imatch.group(1) if g[0] == '"': g = g.strip('"') elif g[0] == "'": g = g.strip("'") g = one_percent_re.sub('%%', g) if imatch.group(2): # A context is provided context_match = context_re.match(imatch.group(2)) message_context = context_match.group(1) if message_context[0] == '"': message_context = message_context.strip('"') elif message_context[0] == "'": message_context = message_context.strip("'") out.write(' pgettext(%r, %r) ' % (message_context, g)) message_context = None else: out.write(' gettext(%r) ' % g) elif bmatch: for fmatch in constant_re.findall(t.contents): out.write(' _(%s) ' % fmatch) if bmatch.group(1): # A context is provided context_match = context_re.match(bmatch.group(1)) message_context = context_match.group(1) if message_context[0] == '"': message_context = message_context.strip('"') elif message_context[0] == "'": message_context = message_context.strip("'") intrans = True inplural = False singular = [] plural = [] elif cmatches: for cmatch in cmatches: out.write(' _(%s) ' % cmatch) elif t.contents == 'comment': incomment = True else: out.write(blankout(t.contents, 'B')) elif t.token_type == TOKEN_VAR: parts = t.contents.split('|') cmatch = constant_re.match(parts[0]) if cmatch: out.write(' _(%s) ' % cmatch.group(1)) for p in parts[1:]: if p.find(':_(') >= 0: out.write(' %s ' % p.split(':',1)[1]) else: out.write(blankout(p, 'F')) elif t.token_type == TOKEN_COMMENT: if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK): lineno_comment_map.setdefault(t.lineno, []).append(t.contents) comment_lineno_cache = t.lineno else: out.write(blankout(t.contents, 'X')) return force_str(out.getvalue()) def parse_accept_lang_header(lang_string): """ Parses the lang_string, which is the body of an HTTP Accept-Language header, and returns a list of (lang, q-value), ordered by 'q' values. Any format errors in lang_string results in an empty list being returned. """ result = [] pieces = accept_language_re.split(lang_string) if pieces[-1]: return [] for i in range(0, len(pieces) - 1, 3): first, lang, priority = pieces[i : i + 3] if first: return [] if priority: priority = float(priority) if not priority: # if priority is 0.0 at this point make it 1.0 priority = 1.0 result.append((lang, priority)) result.sort(key=lambda k: k[1], reverse=True) return result
mit
615,606,380,099,209,600
36.878698
143
0.573772
false
john123951/SmartQQBot
MsgHandler.py
1
6793
# -*- coding: utf-8 -*- # Code by Yinzo: https://github.com/Yinzo # Origin repository: https://github.com/Yinzo/SmartQQBot from Group import * from Pm import * from Sess import * import threading logging.basicConfig( filename='smartqq.log', level=logging.DEBUG, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', ) class MsgHandler: def __init__(self, operator): if not isinstance(operator, QQ): raise TypeError("Operator must be a logined QQ instance") self.__operator = operator self.process_threads = {} self.__group_list = {} self.__pm_list = {} self.__sess_list = {} def handle(self, msg_list): assert isinstance(msg_list, list), "msg_list is NOT a LIST" for msg in msg_list: # 仅处理程序管理层面上的操作 Only do the operation of the program management if not isinstance(msg, (Msg, Notify)): logging.error("Handler received a not a Msg or Notify instance.") return elif isinstance(msg, MsgWithContent): logging.info(str(self.__get_account(msg)) + ":" + msg.content) if isinstance(msg, GroupMsg): # 群聊信息的处理 # 判断群对象是否存在,info_seq实际上为群号 if msg.info_seq not in self.__group_list: self.__group_list[msg.info_seq] = Group(self.__operator, msg) # 维护一个线程队列,然后每一个线程处理各自的信息 self.process_threads[msg.info_seq] = MsgHandleQueue(self.__group_list[msg.info_seq]) self.process_threads[msg.info_seq].start() logging.debug("Now group list: " + str(self.__group_list)) tgt_group = self.__group_list[msg.info_seq] if len(tgt_group.msg_list) >= 1 and msg.seq == tgt_group.msg_list[-1].seq: # 若如上一条seq重复则抛弃此条信息不处理 logging.info("消息重复,抛弃") return tgt_group.msg_id = msg.msg_id self.process_threads[msg.info_seq].append(msg) elif isinstance(msg, PmMsg): # 私聊信息处理 tid = self.__get_account(msg) if tid not in self.__pm_list: self.__pm_list[tid] = Pm(self.__operator, msg) # 维护一个线程队列,然后每一个线程处理各自的信息 self.process_threads[tid] = MsgHandleQueue(self.__pm_list[tid]) self.process_threads[tid].start() logging.debug("Now pm thread list: " + str(self.__pm_list)) tgt_pm = self.__pm_list[tid] if len(tgt_pm.msg_list) >= 1 and msg.time == tgt_pm.msg_list[-1].time \ and msg.from_uin == tgt_pm.msg_list[-1].from_uin \ and msg.content == tgt_pm.msg_list[-1].content: # 私聊没有seq可用于判断重复,只能抛弃同一个人在同一时间戳发出的内容相同的消息。 logging.info("消息重复,抛弃") return tgt_pm.msg_id = msg.msg_id self.process_threads[tid].append(msg) elif isinstance(msg, SessMsg): # 临时会话的处理 tid = self.__get_account(msg) if tid not in self.__sess_list: self.__sess_list[tid] = Sess(self.__operator, msg) self.process_threads[tid] = MsgHandleQueue(self.__sess_list[tid]) self.process_threads[tid].start() logging.debug("Now sess thread list: " + str(self.__sess_list)) tgt_sess = self.__sess_list[tid] if len(tgt_sess.msg_list) >= 1 and msg.time == tgt_sess.msg_list[-1].time \ and msg.from_uin == tgt_sess.msg_list[-1].from_uin \ and msg.content == tgt_sess.msg_list[-1].content: # 私聊没有seq可用于判断重复,只能抛弃同一个人在同一时间戳发出的同一内容的消息。 logging.info("消息重复,抛弃") return tgt_sess.msg_id = msg.msg_id self.process_threads[tid].append(msg) elif isinstance(msg, InputNotify): self.__input_notify_handler(msg) elif isinstance(msg, BuddiesStatusChange): self.__buddies_status_change_handler(msg) elif isinstance(msg, KickMessage): self.__kick_message(msg) else: logging.warning("Unsolved Msg type :" + str(msg.poll_type)) return def __get_account(self, msg): assert isinstance(msg, (Msg, Notify)), "function get_account received a not Msg or Notify parameter." if isinstance(msg, (PmMsg, SessMsg, InputNotify)): # 如果消息的发送者的真实QQ号码不在FriendList中,则自动去取得真实的QQ号码并保存到缓存中 tuin = msg.from_uin account = self.__operator.uin_to_account(tuin) return account elif isinstance(msg, GroupMsg): return str(msg.info_seq).join("[]") + str(self.__operator.uin_to_account(msg.send_uin)) def __input_notify_handler(self, inputNotify): logging.info(str(self.__get_account(inputNotify)) + " is typing...") if isinstance(inputNotify, GroupAddMessage): pass return def __buddies_status_change_handler(self, buddiesStatusChange): pass def __kick_message(self, kickMessage): logging.warning(str(kickMessage.to_uin) + " is kicked. Reason: " + str(kickMessage.reason)) logging.warning("[{0}]{1} is kicked. Reason: {2}".format( str(kickMessage.to_uin), self.__operator.username, str(kickMessage.reason), )) raise KeyboardInterrupt("Kicked") # 为了加速程序处理消息,采用了多线程技术 class MsgHandleQueue(threading.Thread): def __init__(self, handler): super(MsgHandleQueue, self).__init__() self.handler = handler self.msg_queue = [] self.setDaemon(True) def run(self): while 1: if len(self.msg_queue): self.handler.handle(self.msg_queue.pop(0)) logging.debug("queue handling.Now queue length:" + str(len(self.msg_queue))) else: time.sleep(1) def append(self, msg): self.msg_queue.append(msg)
gpl-3.0
-3,575,861,079,491,298,300
37.962733
109
0.545194
false
zemon1/CrawfoSys
weather.py
1
2138
#!/usr/bin/env python2 #weather.py #Original author: Josh McSavaney ([email protected]) #Current maintainer: Jeff Haak ([email protected]) #A script used to scrape and parse weather information import urllib, re, argparse if __name__ == "__main__": parser = argparse.ArgumentParser(description='Gets weather info from weather.gov') parser.add_argument('--noTroll' , help='Display temp in Kelvin' , default=False , required=False) args = vars(parser.parse_args()) #print args # get the file from the site file = urllib.urlopen('http://www.weather.gov/data/current_obs/KROC.xml') # make the file into a string data = file.read() weather = "N/A" temp = "N/A" windchill = "N/A" # search the file for the weather and store the string try: re2 = re.search(r'<weather>(.*?)</weather>', data) weather = re2.group(1) except (AttributeError): pass # search the file for the temp and store the string try: re3 = re.search(r'<temperature_string>(.*?)</temperature_string>', data) temp = re3.group(1) except (AttributeError): pass # search the file for the windchill and store the string try: re4 = re.search(r'<windchill_string>(.*?)</windchill_string>', data) windchill = re4.group(1) except (AttributeError): pass #use Kelvin if not args['noTroll']: windchill = float(windchill.split()[2][1:]) + 273.15 temp = float(temp.split()[2][1:]) + 273.15 windchill = "Windchill:" + str(windchill) + "K" temp = "Temp:" + str(temp) + "K" res = temp + " " + windchill + " " + weather else: windchill = int(windchill.split()[0].split(".")[0]) temp = int(temp.split()[0].split(".")[0]) windchill = "Windchill:" + str(windchill) + "F" temp = "Temp:" + str(temp) + "F" res = temp + " " + windchill + " " + weather print res
apache-2.0
-8,801,125,068,099,630,000
25.395062
86
0.546305
false
EachenKuang/PythonRepository
MedicineSCI/Tools/Dao.py
1
1155
# -*- coding: utf-8 -*- import pymssql class Dao: def __init__(self): self.conn = None self.cur = None def connect(self): # 数据库连接信息 self.conn = pymssql.connect(host="localhost:59318", user="eachen", password="123456", database="mydata", charset="utf8") # host = "localhost:59318", user = "eachen", pwd = "123456", db = "mydata" self.cur = self.conn.cursor() if not self.cur: raise (NameError, "数据库连接失败") else: print("数据库连接成功") def create(self, sql): # print(sql) try: self.cur.execute(sql) self.conn.commit() except: print('create failed') else: print('create succeed') def insert(self, sql): # print(sql) self.cur.execute(sql) self.conn.commit() def select(self, sql): # print(sql) self.cur.execute(sql) # fetchall()是接收全部的返回结果行 return self.cur.fetchall() def close(self): self.conn.close()
apache-2.0
-7,258,135,829,876,773,000
23.795455
112
0.507791
false
forseti-security/forseti-security
google/cloud/forseti/common/gcp_api/api_helpers.py
1
5251
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for API clients.""" import google.auth from google.auth import iam from google.auth.credentials import with_scopes_if_required from google.auth.transport import requests from google.oauth2 import service_account from google.cloud.forseti.common.gcp_api._base_repository import CLOUD_SCOPES _TOKEN_URI = 'https://accounts.google.com/o/oauth2/token' def get_delegated_credential(delegated_account, scopes): """Build delegated credentials required for accessing the gsuite APIs. Args: delegated_account (str): The account to delegate the service account to use. scopes (list): The list of required scopes for the service account. Returns: service_account.Credentials: Credentials as built by google.oauth2.service_account. """ request = requests.Request() # Get the "bootstrap" credentials that will be used to talk to the IAM # API to sign blobs. bootstrap_credentials, _ = google.auth.default() bootstrap_credentials = with_scopes_if_required( bootstrap_credentials, list(CLOUD_SCOPES)) # Refresh the boostrap credentials. This ensures that the information about # this account, notably the email, is populated. bootstrap_credentials.refresh(request) # Create an IAM signer using the bootstrap credentials. signer = iam.Signer(request, bootstrap_credentials, bootstrap_credentials.service_account_email) # Create OAuth 2.0 Service Account credentials using the IAM-based signer # and the bootstrap_credential's service account email. delegated_credentials = service_account.Credentials( signer, bootstrap_credentials.service_account_email, _TOKEN_URI, scopes=scopes, subject=delegated_account) return delegated_credentials def flatten_list_results(paged_results, item_key): """Flatten a split-up list as returned by list_next() API. GCE 'list' APIs return results in the form: {item_key: [...]} with one dictionary for each "page" of results. This method flattens that to a simple list of items. Args: paged_results (list): A list of paged API response objects. [{page 1 results}, {page 2 results}, {page 3 results}, ...] item_key (str): The name of the key within the inner "items" lists containing the objects of interest. Returns: list: A list of items. """ results = [] for page in paged_results: results.extend(page.get(item_key, [])) return results def flatten_aggregated_list_results(paged_results, item_key): """Flatten a split-up list as returned by GCE "aggregatedList" API. The compute API's aggregatedList methods return a structure in the form: { items: { $group_value_1: { $item_key: [$items] }, $group_value_2: { $item_key: [$items] }, $group_value_3: { "warning": { message: "There are no results for ..." } }, ..., $group_value_n, { $item_key: [$items] }, } } where each "$group_value_n" is a particular element in the aggregation, e.g. a particular zone or group or whatever, and "$item_key" is some type-specific resource name, e.g. "backendServices" for an aggregated list of backend services. This method takes such a structure and yields a simple list of all $items across all of the groups. Args: paged_results (list): A list of paged API response objects. [{page 1 results}, {page 2 results}, {page 3 results}, ...] item_key (str): The name of the key within the inner "items" lists containing the objects of interest. Returns: list: A list of items. """ items = [] for page in paged_results: aggregated_items = page.get('items', {}) for items_for_grouping in list(aggregated_items.values()): for item in items_for_grouping.get(item_key, []): items.append(item) return items def get_ratelimiter_config(global_configs, api_name): """Get rate limiter configuration. Args: global_configs (dict): Global configurations. api_name (String): The name of the api. Returns: float: Max calls float: quota period) """ max_calls = global_configs.get(api_name, {}).get('max_calls') quota_period = global_configs.get(api_name, {}).get('period') return max_calls, quota_period
apache-2.0
-1,306,014,316,624,759,000
32.660256
79
0.652828
false
skycucumber/Messaging-Gateway
src/Command/HeartBeat.py
1
1072
''' Created on 2013-8-12 @author: E525649 ''' from BaseCommand import CBaseCommand from twisted.internet import threads import BaseCommand from DB import SBDB class CHeartBeat(CBaseCommand): ''' classdocs ''' command_id=0x00000002 def __init__(self,data=None,protocol=None): ''' Constructor ''' CBaseCommand.__init__(self, data, protocol) def Run(self): with self.protocol.lockCmd: if self.Authorized(): CBaseCommand.Run(self) self.SendResp() if self.protocol.role==BaseCommand.PV_ROLE_HUMAN: threads.deferToThread(SBDB.UpdateActiveTime,self.protocol.role,self.protocol.client_id,id(self.protocol.transport)) elif self.protocol.role==BaseCommand.PV_ROLE_SUPERBOX: threads.deferToThread(SBDB.UpdateActiveTime,self.protocol.role,self.protocol.superbox_id,id(self.protocol.transport)) else: self.SendUnauthorizedResp()
gpl-2.0
-7,245,791,321,416,922,000
29.529412
137
0.60541
false
martinggww/lucasenlights
MachineLearning/sklearn/mrjbq7-ta-lib-c553531/setup.py
1
3712
#!/usr/bin/env python import sys import os import warnings from distutils.dist import Distribution display_option_names = Distribution.display_option_names + ['help', 'help-commands'] query_only = any('--' + opt in sys.argv for opt in display_option_names) or len(sys.argv) < 2 or sys.argv[1] == 'egg_info' # Use setuptools for querying the package, normal builds use distutils if query_only: try: from setuptools import setup except ImportError: from distutils.core import setup else: from distutils.core import setup from distutils.extension import Extension lib_talib_name = 'ta_lib' # the underlying C library's name platform_supported = False for prefix in ['darwin', 'linux', 'bsd', 'sunos']: if prefix in sys.platform: platform_supported = True include_dirs = [ '/usr/include', '/usr/local/include', '/opt/include', '/opt/local/include', ] if 'TA_INCLUDE_PATH' in os.environ: include_dirs.append(os.environ['TA_INCLUDE_PATH']) lib_talib_dirs = [ '/usr/lib', '/usr/local/lib', '/usr/lib64', '/usr/local/lib64', '/opt/lib', '/opt/local/lib', ] if 'TA_LIBRARY_PATH' in os.environ: lib_talib_dirs.append(os.environ['TA_LIBRARY_PATH']) break if sys.platform == "win32": platform_supported = True lib_talib_name = 'ta_libc_cdr' include_dirs = [r"c:\ta-lib\c\include"] lib_talib_dirs = [r"c:\ta-lib\c\lib"] if not platform_supported: raise NotImplementedError(sys.platform) # Do not require numpy or cython for just querying the package if not query_only: import numpy include_dirs.insert(0, numpy.get_include()) try: from Cython.Distutils import build_ext has_cython = True except ImportError: has_cython = False for lib_talib_dir in lib_talib_dirs: try: files = os.listdir(lib_talib_dir) if any(lib_talib_name in f for f in files): break except OSError: pass else: warnings.warn('Cannot find ta-lib library, installation may fail.') cmdclass = {} if has_cython: cmdclass['build_ext'] = build_ext ext_modules = [ Extension( 'talib._ta_lib', ['talib/_ta_lib.pyx' if has_cython else 'talib/_ta_lib.c'], include_dirs=include_dirs, library_dirs=lib_talib_dirs, libraries=[lib_talib_name] ) ] setup( name = 'TA-Lib', version = '0.4.10', description = 'Python wrapper for TA-Lib', author = 'John Benediktsson', author_email = '[email protected]', url = 'http://github.com/mrjbq7/ta-lib', download_url = 'https://github.com/mrjbq7/ta-lib/releases', classifiers = [ "License :: OSI Approved :: BSD License", "Development Status :: 4 - Beta", "Operating System :: Unix", "Operating System :: POSIX", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Cython", "Topic :: Office/Business :: Financial", "Topic :: Scientific/Engineering :: Mathematics", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "Intended Audience :: Financial and Insurance Industry", ], packages = ['talib'], ext_modules = ext_modules, cmdclass = cmdclass, requires = ['numpy'], )
cc0-1.0
-1,881,101,097,156,988,400
28.935484
122
0.60695
false
endreszabo/pdnsapp
dns.py
1
6190
#!/usr/bin/env python from sys import exit, stdin, stderr, argv, stdout from inspect import stack from config import * import os import csv CONT=0 FINAL=1 default_ttl=60 loglevel=3 class istr(str): def __eq__(self, text): return str.__eq__(self.lower(), text.lower()) class qname(istr): def __new__(cls, value, *args, **kwargs): return istr.__new__(cls, value) def _domain_parts(self,request): return map(lambda x: istr(x), filter(lambda x: x!='', self.split('.'))) def _domain_parts_len(self,request): return len(domain_parts(request)) def _tld(self, count=2): return istr('.'.join(self.domain_parts[-count:])) def __init__(self, value, minlen=None, maxlen=None): self.domain_parts=self._domain_parts(value) self.domain_parts_count=len(self.domain_parts) self.tld=self._tld() def host_part(self, substring): try: if self.lower().index(substring+'.')==0: return True except ValueError: return False return False def is_subdomain(string, substring): try: return (string.lower().rindex('.'+substring)+len(substring)+1 == len(string)) except ValueError: return False return False def logd(level=loglevel, message=None, kwargs={}): if level>=loglevel: print("LOG\t%s(): %s" % (stack()[1][3],'; '.join([message,', '.join(map(lambda (k,v): "%s='%s'" % (k,v), kwargs.iteritems()))]))) def log(level=loglevel, message=None, **kwargs): if level>=loglevel: print( "LOG\t%s(): %s" % ( stack()[1][3], '; '.join( [ message, ', '.join( map(lambda (k,v): "%s='%s'" % (k,v), kwargs.iteritems()) ) ] ) ) ) def MX(priority=0, data=None, ttl=default_ttl): if data: return { 'qtype': 'MX', 'data':"%s\t%s" % (priority, data), 'ttl': ttl } else: return {} def LOG(msg): pass def A(data=None, ttl=default_ttl): if data: return { 'qtype': 'A', 'data': data, 'ttl': ttl } else: return {} def match_domain(name, domain): if name[-len(domain):] == domain or name[-len(domain)-1:] == '.'+domain: return True return False matches=[] def match(host=None, fqdn=None, domain=None, dns_class=None, type=None, remote_ip=None, local_ip=None, cache=True): params=locals() def wrapper(f): matches.append([f, params]) return wrapper def represent(response): return "\t".join([ 'DATA', response['qname'], response['qclass'], response['qtype'], str(response['ttl']), response['id'], response['data'] ]) def route(request): retval=[] if request['qname'] in skip_zones: retval.append("LOG\tqname '%s' is in skipped zones list, skipping" % request['qname']) return retval for f, conditions in matches: if (conditions['fqdn'] is None or conditions['fqdn'] == request['qname']) and \ (conditions['domain'] is None or match_domain(request['qname'], conditions['domain'])) and \ (conditions['type'] is None or conditions['type'] == request['qtype'] or request['qtype'] == 'ANY') and \ (conditions['dns_class'] is None or conditions['dns_class'] == request['qclass']) and \ (conditions['remote_ip'] is None or conditions['remote_ip'] == request['remote-ip']) and \ (conditions['local_ip'] is None or conditions['local_ip'] == request['local-ip']): returned=f(request) if returned: if returned[1]: if type(returned[1]) is list: for item in returned[1]: retval.append( represent( dict(request.items() + item.items()) ) ) else: retval.append( represent( dict(request.items() + returned[1].items()) ) ) if returned[0] == FINAL: break return retval def run(f_in=stdin, f_out=stdout): line = f_in.readline().strip() if not line.startswith('HELO'): print >>f_out, 'FAIL' f_out.flush() f_in.readline() else: print >>f_out, "OK\tapp firing up" f_out.flush() while True: line = f_in.readline().strip() if not line: break #request = line.split('\t') request = dict( zip( ['cmd','qname','qclass','qtype','id','remote-ip','local-ip','edns-subnet-address'], line.split('\t') ) ) request['qname']=qname(request['qname']) #request['id']=1 #logd(3, 'Processing request', request) if request['cmd'] == 'Q': if request['qname'] != '': datas=route(request) if datas: print >>f_out, "\n".join(datas) #print >>f_out, "LOG\t"+"\nLOG\t".join(datas) print >>f_out, "END" f_out.flush() elif request['cmd'] == 'PING': print >>f_out, "LOG\tPONG" f_out.flush() continue elif request['cmd'] == 'HELO': print >>f_out, "OK\trunning" f_out.flush() continue elif request['cmd'] == 'AXFR': print >>f_out, "END" f_out.flush() continue else: print >>f_out, "LOG\tUnprocessed" def acme_b64encode(acme_challenge): return acme_challenge.replace('_','_u').replace('-','_h') def acme_b64decode(acme_challenge): return acme_challenge.replace('_h','-').replace('_u','_')
gpl-2.0
1,141,596,529,849,517,300
30.907216
137
0.485784
false
nixingyang/Kaggle-Competitions
Face Verification/Extra/Cross Validation/Cross_Validation.py
1
5595
from joblib import Parallel, delayed from sklearn.cross_validation import KFold import numpy as np import prepare_data import pylab import solution_basic def inspect_final_data_set_without_labels(image_index_list, seed): np.random.seed(seed) image_index_array = np.array(image_index_list) # Cross Validation fold_num = 5 label_kfold = KFold(image_index_array.size, n_folds=fold_num, shuffle=True) true_records_num_list = [] false_records_num_list = [] for _, fold_item in enumerate(label_kfold): # Generate final data set selected_index_array = image_index_array[fold_item[0]] _, Y_train = solution_basic.get_record_map(selected_index_array, None) true_records = Y_train == 1 true_records_num = np.sum(true_records) false_records_num = Y_train.size - true_records_num true_records_num_list.append(true_records_num) false_records_num_list.append(false_records_num) return (true_records_num_list, false_records_num_list) def inspect_final_data_set_with_labels(image_index_list, seed): np.random.seed(seed) # Cross Validation fold_num = 5 unique_label_values = np.unique(image_index_list) selected_label_values = np.random.choice(unique_label_values, \ size=np.ceil(unique_label_values.size * (fold_num - 1) / fold_num), \ replace=False) selected_index_list = [] for single_image_index in image_index_list: if single_image_index in selected_label_values: selected_index_list.append(single_image_index) selected_index_array = np.array(selected_index_list) _, Y_train = solution_basic.get_record_map(selected_index_array, None) true_records = Y_train == 1 true_records_num = np.sum(true_records) false_records_num = Y_train.size - true_records_num return ([true_records_num], [false_records_num]) def inspect_number_of_occurrences(): # Get image paths in the training and testing datasets _, training_image_index_list = prepare_data.get_image_paths_in_training_dataset( ) repeated_num = 20 seed_array = np.random.choice(range(repeated_num), size=repeated_num, replace=False) records_list = (Parallel(n_jobs=-1)(delayed( inspect_final_data_set_without_labels)(training_image_index_list, seed) for seed in seed_array)) # repeated_num = 100 # seed_array = np.random.choice(range(repeated_num), size=repeated_num, replace=False) # records_list = (Parallel(n_jobs=-1)(delayed(inspect_final_data_set_with_labels)(training_image_index_list, seed) for seed in seed_array)) true_records_num_list = [] false_records_num_list = [] for single_true_records_num_list, single_false_records_num_list in records_list: for value in single_true_records_num_list: true_records_num_list.append(value) for value in single_false_records_num_list: false_records_num_list.append(value) for single_list in [true_records_num_list, false_records_num_list]: repeated_times_list = [] min_value_list = [] max_value_list = [] mean_value_list = [] for end_index in range(len(single_list)): current_list = single_list[0:end_index + 1] repeated_times_list.append(len(current_list)) min_value_list.append(np.min(current_list)) max_value_list.append(np.max(current_list)) mean_value_list.append(np.mean(current_list)) pylab.figure() pylab.plot(repeated_times_list, min_value_list, color="yellowgreen", label="Minimum") pylab.plot(repeated_times_list, max_value_list, color="lightskyblue", label="Maximum") pylab.plot(repeated_times_list, mean_value_list, color="darkorange", label="Mean") pylab.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, mode="expand", borderaxespad=0.) pylab.xlabel("Repeated Times", fontsize="large") pylab.ylabel("Number of Occurrences", fontsize="large") pylab.grid() pylab.show() def inspect_number_of_images(): # Get image paths in the training and testing datasets _, training_image_index_list = prepare_data.get_image_paths_in_training_dataset( ) images_number_list = [] for current_image_index in np.unique(training_image_index_list): images_number_list.append( np.sum(np.array(training_image_index_list) == current_image_index)) # the histogram of the data with histtype="step" bins = np.arange(np.min(images_number_list), np.max(images_number_list) + 2) - 0.5 _, _, patches = pylab.hist(images_number_list, bins=bins) pylab.setp(patches, "facecolor", "yellowgreen", "alpha", 0.75) pylab.xlim([bins[0], bins[-1]]) pylab.xticks( np.arange(np.min(images_number_list), np.max(images_number_list) + 1)) pylab.xlabel("Number of Images from the Same Person", fontsize="large") pylab.ylabel("Number of Occurrences", fontsize="large") pylab.title("Histogram of Number of Images from the Same Person") pylab.show() inspect_number_of_occurrences()
mit
-2,729,778,496,074,134,000
36.3
143
0.611796
false
DMSalesman/Nemris
modules/pkgutils.py
1
3330
"""Module with functions for management of installed APK lists.""" import glob import re import subprocess import apkutils # needed for AndroidManifest.xml dump import utils # needed for sudo # Creates a APK/path dictionary to avoid the sluggish "pm path" def create_pkgdict(): """Creates a dict for fast path lookup from /data/system/packages.xml; returns dict.""" (out, err) = utils.sudo("cat /data/system/packages.xml") if err: return False xml_dump = [i for i in out.decode("utf-8").split("\n") if "<package name=" in i] pkgdict = {} for i in xml_dump: pkgname = re.findall("<package name=\"(.*?)\"", i)[0] pkgpath = re.findall("codePath=\"(.*?)\"", i)[0] # Normalizes each entry if not pkgpath.endswith(".apk"): try: pkgpath = glob.glob(pkgpath + "/*.apk")[0] except: continue pkgdict[pkgname] = pkgpath return pkgdict def list_installed_pkgs(args): """Lists the members of a given category of packages; returns list.""" prefix = "pm list packages" if args.user: suffix = "-3" elif args.system: suffix = "-s" elif args.disabled: suffix = "-d" else: suffix = "" pkgs = [i[8:] for i in subprocess.Popen("{0} {1}".format(prefix, suffix), stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell = True).communicate()[0].decode("utf-8").split("\n") if i] return pkgs def list_installed_pkgs_nougat(args): """Uses Nougat's cmd command to query the package service (faster); returns list.""" prefix = "cmd package list packages" if args.user: suffix = "-3" elif args.system: suffix = "-s" elif args.disabled: suffix = "-d" else: suffix = "" pkgs = [i[8:] for i in utils.sudo("{0} {1}".format(prefix, suffix))[0].decode("utf-8").split("\n") if i] return pkgs def check_substratum(nougat): """Checks if the Substratum engine is installed; returns bool.""" if nougat: user_pkgs = [i[8:] for i in utils.sudo("cmd package list packages -3")[0].decode("utf-8").split("\n") if i] else: user_pkgs = [i[8:] for i in subprocess.Popen("pm list packages -3", stdout = subprocess.PIPE, shell = True).communicate()[0].decode("utf-8").split("\n") if i] substratum_installed = True if "projekt.substratum" in user_pkgs else False return substratum_installed def exclude_overlays(aapt, pkgdict, pkgs): """Excludes Substratum overlays from the packages to extract; returns nothing.""" for i in pkgs: pkgpath = pkgdict.get(i) out = apkutils.get_pkgxml(aapt, pkgpath)[0].decode("utf-8") if "Substratum_Parent" in out: pkgs.remove(i) def exclude_arcus_variants(pkgs): """Excludes Arcus theme variants from the packages to extract; returns nothing.""" for i in pkgs: if "pixkart.arcus.user" in i: pkgs.remove(i) def check_already_extracted(pkgpath, md5sums): """Checks if an APK has already been extracted; returns bool, str.""" pkgsum = utils.compute_md5sum(pkgpath) already_extracted = True if pkgsum in md5sums else False return already_extracted, pkgsum
unlicense
-4,136,320,089,613,756,400
30.714286
194
0.606607
false
zhongwcool/Muzei
web/handlers/backroomarthelper.py
1
6229
# Copyright 2014 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re import sys import webapp2 from google.appengine.api import images from google.appengine.api import urlfetch sys.path.append(os.path.join(os.path.dirname(__file__),'../lib')) from bs4 import BeautifulSoup import cloudstorage as gcs from handlers.common import * from models import FeaturedArtwork THUMB_HEIGHT=600 NO_CROP_TUPLE=(0, 0, 1, 1) def add_art_from_external_details_url(publish_date, url): if FeaturedArtwork.all().filter('publish_date =', publish_date).get() != None: webapp2.abort(409, message='Artwork already exists for this date.') result = urlfetch.fetch(url) if result.status_code < 200 or result.status_code >= 300: webapp2.abort(400, message='Error processing URL: HTTP %d. Content: %s' % (result.status_code, result.content)) soup = BeautifulSoup(result.content, 'html.parser') attribution = None if re.search(r'wikiart.org', url, re.I) or re.search(r'wikipaintings.org', url, re.I): attribution = 'wikiart.org' details_url = re.sub(r'#.+', '', url, re.I | re.S) + '?utm_source=Muzei&utm_campaign=Muzei' title = soup.find('h1').get_text() author = soup.find('a', class_='artist-name').get_text() completion_year = None try: completion_year = unicode(soup .find(text='Date:') .parent .find_next_sibling('span') .text).strip() except: pass byline = author + ((', ' + completion_year) if completion_year else '') image_url = get_wikiart_image_url(soup) elif re.search(r'metmuseum.org', url, re.I): attribution = 'metmuseum.org' details_url = re.sub(r'[#?].+', '', url, re.I | re.S) + '?utm_source=Muzei&utm_campaign=Muzei' title = soup.find('h2').get_text() author = '' try: author = unicode(soup.find(text='Artist:').parent.next_sibling).strip() except: pass author = re.sub(r'\s*\(.*', '', author) completion_year = None try: completion_year = unicode(soup.find(text='Date:').parent.next_sibling).strip() except: pass byline = author + ((', ' + completion_year) if completion_year else '') image_url = soup.find('a', class_='download').attrs['href'] else: webapp2.abort(400, message='Unrecognized URL') if not title or not author or not image_url: webapp2.abort(500, message='Could not parse HTML') image_url, thumb_url = maybe_process_image(image_url, NO_CROP_TUPLE, publish_date.strftime('%Y%m%d') + ' ' + title + ' ' + byline) # create the artwork entry new_artwork = FeaturedArtwork( title=title.strip(), byline=byline.strip(), attribution=attribution, image_url=image_url, thumb_url=thumb_url, details_url=details_url, publish_date=publish_date) new_artwork.save() return new_artwork def get_wikiart_image_url(soup): # TODO: use a cleaner method :( tmp = soup.find(class_='thumbnails_ref')['onclick'] thumb_html_url = re.search(r'(/en.+?)\'', tmp).group(1) thumb_html_url = "http://www.wikiart.org%s" % thumb_html_url result = urlfetch.fetch(thumb_html_url) if result.status_code < 200 or result.status_code >= 300: webapp2.abort(400, message='Error processing URL: HTTP %d. Content: %s' % (result.status_code, result.content)) thumb_html = json.loads(result.content) thumb_soup = BeautifulSoup(thumb_html, 'html.parser') max_thumb_width = 0 max_thumb_url = None for thumb_title_el in thumb_soup.select('.thumbnail_title'): thumb_width = int(re.search(r'(\d+)x\d+', thumb_title_el.get_text()).group(1)) if thumb_width > max_thumb_width: max_thumb_width = thumb_width max_thumb_url = thumb_title_el.parent.find('a')['href'] return max_thumb_url def maybe_process_image(image_url, crop_tuple, base_name): if CLOUD_STORAGE_ROOT_URL in image_url and crop_tuple == NO_CROP_TUPLE: return (image_url, None) image_result = urlfetch.fetch(image_url, deadline=20) if image_result.status_code < 200 or image_result.status_code >= 300: raise IOError('Error downloading image: HTTP %d.' % image_result.status_code) filename = re.sub(r'[^\w]+', '-', base_name.strip().lower()) + '.jpg' # main image image_gcs_path = CLOUD_STORAGE_BASE_PATH + '/fullres/' + filename # resize to max width 4000 or max height 2000 image_contents = image_result.content image = images.Image(image_contents) edited = False if image.height > 2000: image.resize(width=(image.width * 2000 / image.height), height=2000) edited = True elif image.width > 4000: image.resize(width=4000, height=(image.height * 4000 / image.width)) edited = True if crop_tuple != NO_CROP_TUPLE: image.crop(*crop_tuple) edited = True if edited: image_contents = image.execute_transforms(output_encoding=images.JPEG, quality=80) # upload with default ACLs set on the bucket # or use options={'x-goog-acl': 'public-read'}) gcs_file = gcs.open(image_gcs_path, 'w', content_type='image/jpeg') gcs_file.write(image_contents) gcs_file.close() # thumb thumb_gcs_path = CLOUD_STORAGE_BASE_PATH + '/thumbs/' + filename thumb = images.Image(image_result.content) thumb.resize(width=(thumb.width * THUMB_HEIGHT / thumb.height), height=THUMB_HEIGHT) if crop_tuple != NO_CROP_TUPLE: thumb.crop(*crop_tuple) edited = True thumb_contents = thumb.execute_transforms(output_encoding=images.JPEG, quality=40) gcs_file = gcs.open(thumb_gcs_path, 'w', content_type='image/jpeg') gcs_file.write(thumb_contents) gcs_file.close() return (CLOUD_STORAGE_ROOT_URL + image_gcs_path, CLOUD_STORAGE_ROOT_URL + thumb_gcs_path)
apache-2.0
525,997,306,480,910,340
33.798883
98
0.67266
false
realgam3/SubtitlesClient
SubtitlesClient.py
1
3702
#!/usr/bin/env python #-*- coding:utf-8 -*- ######################################################## # Name: Subtitles Client # Site: http://RealGame.co.il __author__ = 'RealGame (Tomer Zait)' __license__ = 'GPL v3' __version__ = '1.0' __email__ = '[email protected]' ######################################################## from os import path from sys import argv from docopt import docopt from engines.engine import SubtitleSite, SUBTITLE_SITE_LIST, DEFAULTS __doc__ = \ """ Subtitles Client Usage: {prog} download <releases_path>... [--lang=<language> --engine=<subtitle_site>...] {prog} exist <releases_path>... [--lang=<language> --engine=<subtitle_site>...] {prog} test [<engines>...] {prog} (-l | --list) {prog} (-h | --help) {prog} (-v | --version) Options: -l --list Show subtitles engine list. -h --help Show this screen. -v --version Show version. --lang=<language> Subtitle language (alpha2) [default: {def_language}]. --engine=<subtitle_site> Subtitle site [default: {def_engine}]. """.format(prog=path.basename(argv[0]), def_language=DEFAULTS['subtitle_language'], def_engine=DEFAULTS['subtitle_engine']) def download_subtitles(releases, engines=[DEFAULTS['subtitle_engine']], lang=DEFAULTS['subtitle_language']): if releases: for release in releases: for engine in engines: subtitle_release = SubtitleSite.get_file_properties(release)['release_name'] print "[{engine: ^15}] Trying To Download Subtitles For: '{release}'".format(engine=engine, release=subtitle_release) sub_obj = SubtitleSite.class_factory(engine) subtitle_path = sub_obj.download_subtitle(release, lang) if subtitle_path: print "{0:17} Download Success: ({file_path}).\n".format("", file_path=subtitle_path) else: print "{0:17} Subtitles Not Found.\n".format("") def is_subtitles_exist(releases, engines=[DEFAULTS['subtitle_engine']], lang=DEFAULTS['subtitle_language']): if releases: for release in releases: for engine in engines: subtitle_release = SubtitleSite.get_file_properties(release)['release_name'] sub_obj = SubtitleSite.class_factory(engine) exist_flag = sub_obj.is_subtitle_exist(release, lang) res = "Exist" if not exist_flag: res = "Does Not " + res print "[{engine: ^15}] '{release}' - {res}.".format(engine=engine, release=subtitle_release, res=res) def test_engines(engines): if not engines: engines = SUBTITLE_SITE_LIST.keys() for engine_key in engines: t = SubtitleSite.class_factory(engine_key) t.test_engine() def main(): args = docopt(__doc__, help=True, version='Subtitles Client %s' % __version__) if args['download']: download_subtitles(args['<releases_path>'], args['--engine'], args['--lang']) elif args['exist']: is_subtitles_exist(args['<releases_path>'], args['--engine'], args['--lang']) elif args['test']: test_engines(args['<engines>']) elif args['--list']: for sub_site in SUBTITLE_SITE_LIST.keys(): sub_dict = SUBTITLE_SITE_LIST.get(sub_site) print sub_dict.get('class_name') if __name__ == "__main__": main()
gpl-3.0
1,652,222,305,396,797,700
36.77551
118
0.537007
false
eReuse/DeviceHub
ereuse_devicehub/resources/account/settings.py
1
5490
from ereuse_devicehub.resources.account.role import Role from ereuse_devicehub.resources.resource import ResourceSettings from ereuse_devicehub.resources.schema import Thing from ereuse_devicehub.security.perms import DB_PERMS from ereuse_devicehub.validation.validation import ALLOWED_WRITE_ROLE class Account(Thing): """ An account represents a physical person or an organization. """ email = { 'type': 'email', 'required': True, 'unique': True, 'sink': 5 } password = { 'type': 'string', # 'required': True, todo active OR password required 'minlength': 4, 'sink': 4, 'doc': 'Users can only see their own passwords.' } role = { 'type': 'string', 'allowed': set(Role.ROLES), 'default': Role.USER, 'doc': 'See the Roles section to get more info.', ALLOWED_WRITE_ROLE: Role(Role.ADMIN) } token = { 'type': 'string', 'readonly': True, } name = { 'type': 'string', 'sink': 3, 'description': 'The name of an account, if it is a person or an organization.' } organization = { 'type': 'string', 'sink': 1, 'description': 'The name of the organization the account is in. Organizations can be inside others.' } active = { 'type': 'boolean', 'default': True, 'sink': -1, 'description': 'Activate the account so you can start using it.', 'doc': 'Inactive accounts cannot login, and they are created through regular events.' } blocked = { 'type': 'boolean', 'default': True, 'sink': -2, 'description': 'As a manager, you need to specifically accept the user by unblocking it\'s account.', ALLOWED_WRITE_ROLE: Role(Role.ADMIN) } isOrganization = { 'type': 'boolean', 'sink': 2 } databases = { # todo make admin worthy 'type': 'dict', 'valueschema': { 'type': 'string', 'allowed': list(DB_PERMS) }, 'required': True, ALLOWED_WRITE_ROLE: Role(Role.ADMIN), 'teaser': False, 'sink': -4, } defaultDatabase = { 'type': 'string', # todo If this is not set, the first databased in 'databases' it should be used ALLOWED_WRITE_ROLE: Role(Role.ADMIN), 'teaser': False, 'sink': -5 } shared = { 'type': 'list', 'schema': { 'type': 'dict', 'schema': { 'db': { 'type': 'string' }, '@type': { 'type': 'string' }, 'label': { 'type': 'string' }, '_id': { 'type': 'string' }, 'baseUrl': { 'type': 'url', 'doc': 'The scheme, domain, any path to reach the DeviceHub.' } } }, 'default': [], 'materialized': True, 'description': 'The groups (eg: lots, packages...) other people shared to this account.' } fingerprints = { 'type': 'list', 'readonly': True, } publicKey = { 'type': 'string', 'writeonly': True } class AccountSettings(ResourceSettings): resource_methods = ['GET', 'POST'] item_methods = ['PATCH', 'DELETE', 'GET'] # the standard account entry point is defined as # '/accounts/<ObjectId>'. We define an additional read-only entry # point accessible at '/accounts/<username>'. # Note that this regex is weak; it will accept more string that are not emails, which is fine; it is fast. additional_lookup = { 'url': 'regex("[^@]+@[^@]+\.[^@]+")', 'field': 'email', } # 'public_methods': ['POST'], # Everyone can create an account, which will be blocked (not active) datasource = { 'projection': {'token': 0}, # We exclude from showing tokens to everyone 'source': 'accounts' } # We also disable endpoint caching as we don't want client apps to # cache account data. cache_control = '' cache_expires = 0 # Allow 'token' to be returned with POST responses extra_response_fields = ResourceSettings.extra_response_fields + ['email', 'active', 'name', 'databases', 'defaultDatabase', 'organization', 'isOrganization'] # Finally, let's add the schema definition for this endpoint. _schema = Account allowed_write_roles = {Role.ADMIN} # Only admins or above can POST, PUT or DELETE use_default_database = True # We have a common shared database with accounts fa = 'fa-user-o' unregistered_user = { 'email': Account.email, 'name': Account.name, 'organization': Account.organization, 'isOrganization': Account.isOrganization } unregistered_user_doc = 'It can be a reference to an account, or a basic account object. ' \ + 'The object has to contain at least an e-mail. If the e-mail does ' \ + 'not match to an existing one, an account is created. If the e-mail exists, ' \ + 'that account is used, and the rest of the data (name, org...) is discarded.'
agpl-3.0
5,062,361,657,323,126,000
32.680982
117
0.532058
false
codedsk/hubcheck
hubcheck/pageobjects/widgets/groups_wiki_edit_form.py
1
3453
from hubcheck.pageobjects.widgets.groups_wiki_new_form import \ GroupsWikiNewForm1, GroupsWikiNewForm1_Locators_Base, \ GroupsWikiNewForm2, GroupsWikiNewForm2_Locators_Base, \ GroupsWikiNewForm3, GroupsWikiNewForm3_Locators_Base from hubcheck.pageobjects.basepageelement import Link class GroupsWikiEditForm1(GroupsWikiNewForm1): """ GroupsWikiNewForm with TextArea widget for pagetext """ def __init__(self, owner, locatordict={}): super(GroupsWikiEditForm1,self).__init__(owner,locatordict) # load hub's classes GroupsWikiEditForm_Locators = self.load_class('GroupsWikiEditForm_Locators') # update this object's locator self.locators.update(GroupsWikiEditForm_Locators.locators) # update the locators with those from the owner self.update_locators_from_owner() # setup page object's components self.rename = Link(self,{'base':'rename'}) # update the component's locators with this objects overrides self._updateLocators() def goto_rename(self): """click the rename link""" self.rename.click() class GroupsWikiEditForm1_Locators_Base(object): """locators for GroupsWikiEditForm1 object""" locators = { 'rename' : "xpath=//a[text()='here']", } class GroupsWikiEditForm2(GroupsWikiNewForm2): """ GroupsWikiEditForm that uses an IframeWrap widget for pagetext """ def __init__(self, owner, locatordict={}): super(GroupsWikiEditForm2,self).__init__(owner,locatordict) # load hub's classes GroupsWikiEditForm_Locators = self.load_class('GroupsWikiEditForm_Locators') # update this object's locator self.locators.update(GroupsWikiEditForm_Locators.locators) # update the locators with those from the owner self.update_locators_from_owner() # setup page object's components self.rename = Link(self,{'base':'rename'}) # update the component's locators with this objects overrides self._updateLocators() def goto_rename(self): """click the rename link""" self.rename.click() class GroupsWikiEditForm2_Locators_Base(object): """locators for GroupsWikiEditForm2 object""" locators = { 'rename' : "xpath=//a[text()='here']", } class GroupsWikiEditForm3(GroupsWikiNewForm3): """GroupsWikiEditForm TextArea widget for pagetext Upload3 file upload widget with embedded iframes """ def __init__(self, owner, locatordict={}): super(GroupsWikiEditForm3,self).__init__(owner,locatordict) # load hub's classes GroupsWikiEditForm_Locators = self.load_class('GroupsWikiEditForm_Locators') # update this object's locator self.locators.update(GroupsWikiEditForm_Locators.locators) # update the locators with those from the owner self.update_locators_from_owner() # setup page object's components self.rename = Link(self,{'base':'rename'}) # update the component's locators with this objects overrides self._updateLocators() def goto_rename(self): """click the rename link""" self.rename.click() class GroupsWikiEditForm3_Locators_Base(object): """locators for GroupsWikiEditForm3 object""" locators = { 'rename' : "xpath=//a[text()='here']", }
mit
2,063,670,874,993,383,400
27.073171
84
0.660875
false
couchbaselabs/devguide-examples
python/cas.py
1
1612
from __future__ import print_function from couchbase.bucket import Bucket from couchbase.bucket import LOCKMODE_WAIT from threading import Thread from couchbase.exceptions import KeyExistsError cb = Bucket('couchbase://10.0.0.31/default', lockmode=LOCKMODE_WAIT) cb.upsert('a_list', []) print('Will attempt concurrent document mutations without CAS') def add_item_to_list(client, new_item): l = client.get('a_list').value l.append(new_item) client.replace('a_list', l) threads = [Thread(target=add_item_to_list, args=(cb, "item_" + str(x))) for x in range(0, 10)] [t.start() for t in threads] [t.join() for t in threads] cur_list = cb.get('a_list').value print('Current list has {0} elements'.format(len(cur_list))) if len(cur_list) != 10: print('Concurrent modifications removed some of our items!', cur_list) # The same as above, but using CAS def add_item_to_list_safe(client, new_item): while True: rv = client.get('a_list') l = rv.value l.append(new_item) try: cb.replace('a_list', l, cas=rv.cas) return except KeyExistsError: print("Cas mismatch for item", new_item) continue # Reset the list again cb.upsert('a_list', []) print('Will attempt concurrent modifications using CAS') threads = [Thread(target=add_item_to_list_safe, args=(cb, "item_" + str(x))) for x in range(0, 10)] [t.start() for t in threads] [t.join() for t in threads] cur_list = cb.get('a_list').value print('Current list has {0} elements'.format(len(cur_list))) assert len(cur_list) == 10
apache-2.0
2,503,954,480,258,937,300
26.322034
76
0.653226
false
tanonev/codewebs
src/dataBaseTools/PrecomputeNN.py
1
3695
import sys import os.path sys.path.append(os.path.abspath('../../')) sys.path.append(os.path.abspath('../../site/cwsite')) import src.util.DBSetup from src.util.MLClass import MLClass from src.util.FileSystem import FileSystem from src.util.AstNetwork import AstNetwork from src.util.Assignment import Assignment from models.models import Octave from operator import itemgetter import logging from sets import Set class PrecomputeNN(object): def getASTs(self, assn, label): dataDir = FileSystem.getDataDir() outputDir = os.path.join(dataDir, 'incorrects') fileName = label + '_' + str(assn) + '.txt' path = os.path.join(outputDir, fileName) astList = [] astFile = open(path) for line in astFile.readlines(): astList.append(int(line)) return Set(astList) def getAllParts(self): return [(4,1), (4,2), (4,3), (4,4), (4,5)] def getNN(self, corrects, incorrects, astNetwork): NNmap = {} numASTs = len(corrects) + len(incorrects) row = 0 astNetwork.matrixFile.seek(0) while(True): if row % 100 == 0: logging.info(str(row) + ' of ' + str(numASTs)) line = astNetwork.matrixFile.readline() if not line: break rowValues = map(int, line.strip().split()) for col in range(row+1, len(rowValues)): value = rowValues[col] if value == -1: continue if col in corrects: try: if value < NNmap[row][1]: NNmap[row] = (col, value) except KeyError: NNmap[row] = (col, value) if row in corrects: try: if value < NNmap[col][1]: NNmap[col] = (row, value) except KeyError: NNmap[col] = (row, value) row += 1 return NNmap def writeNN(self, path, NNmap): fid = open(path,'wt') NNmaptuples = sorted(NNmap.iteritems(), key = itemgetter(0)) for t in NNmaptuples: fid.write(str(t[0]) + ', ' + str(t[1][0]) + ', ' + str(t[1][1]) + '\n') fid.close() def initializeLog(self): logDir = os.path.join(FileSystem.getLogDir(),'PrecomputeNN') if not os.path.exists(logDir): os.makedirs(logDir) logFileName = os.path.join(logDir,'log') logging.basicConfig(filename = logFileName, format = '%(asctime)s %(message)s', \ datefmt = '%m/%d/%Y %I:%M:%S %p', level = logging.INFO) def run(self): self.initializeLog() for (h,p) in self.getAllParts(): assn = Assignment(h,p) logging.info('PrecomputeNN (hw,part): ' + str(assn)) corrects = self.getASTs(assn, 'corrects') incorrects = self.getASTs(assn, 'incorrects') distanceMatrix = FileSystem.loadDistanceMatrix(assn.getTuple(),False) subIdMap = FileSystem.loadSubmissionIdMap(assn.getTuple()) astNetwork = AstNetwork(assn.getTuple(), distanceMatrix, subIdMap) NNmap = self.getNN(corrects, incorrects, astNetwork) outputDir = os.path.join(FileSystem.getDataDir(), 'nearestNeighbors') if not os.path.exists(outputDir): os.makedirs(outputDir) outputPath = os.path.join(outputDir, 'NNmap_' + str(assn) + '.txt') self.writeNN(outputPath, NNmap) if __name__ == '__main__': PrecomputeNN().run()
mit
-6,443,573,087,008,330,000
35.584158
89
0.540731
false
oVirt/mom
mom/__init__.py
1
9505
from six.moves import configparser import os import time import re import logging.handlers from mom.LogUtils import * from mom.HostMonitor import HostMonitor from mom.HypervisorInterfaces.HypervisorInterface import ConnectionError from mom.GuestManager import GuestManager from mom.PolicyEngine import PolicyEngine from mom.RPCServer import RPCServer, enable_i8 from mom.MOMFuncs import MOMFuncs, EXPORTED_ATTRIBUTE class MOM: def __init__(self, conf_file, conf_overrides=None): self._load_config(conf_file, conf_overrides) self.logger = self._configure_logger() def run(self): if not self._validate_config(): self.logger.error("Invalid configuration. Unable to start") return # Start threads self.logger.info("MOM starting") self.config.set('__int__', 'running', '1') try: host_monitor = HostMonitor(self.config) hypervisor_iface = self.get_hypervisor_interface() if not hypervisor_iface: self.shutdown() guest_manager = GuestManager(self.config, hypervisor_iface) guest_manager.start() policy_engine = PolicyEngine(self.config, hypervisor_iface, host_monitor, guest_manager) threads = {'host_monitor': host_monitor, 'guest_manager': guest_manager, 'policy_engine': policy_engine} momFuncs = MOMFuncs(self.config, threads) self._setupAPI(momFuncs) rpc_server = RPCServer(self.config, momFuncs) except ConnectionError as e: self.logger.error( "Cannot connect to VDSM. " "This can happen when VDSM is starting. Error: %s", str(e) ) return except Exception as e: self.logger.exception("Failed to initialize MOM threads") return interval = self.config.getint('main', 'main-loop-interval') while self.config.getint('__int__', 'running') == 1: time.sleep(interval) if not self._threads_ok((host_monitor,guest_manager,policy_engine)): self.config.set('__int__', 'running', '0') # Check the RPC server separately from the other threads since it # can be disabled. if not rpc_server.thread_ok(): self.config.set('__int__', 'running', '0') self.logger.info("Shutting down RPC server.") rpc_server.shutdown() self.logger.info("Waiting for RPC server thread.") self._wait_for_thread(rpc_server, 5) self.logger.info("Waiting for policy engine thread.") self._wait_for_thread(policy_engine, 10) self.logger.info("Waiting for guest manager thread.") self._wait_for_thread(guest_manager, 5) self.logger.info("Waiting for host monitor thread.") self._wait_for_thread(host_monitor, 5) self.logger.info("MOM ending") def shutdown(self): self.config.set('__int__', 'running', '0') def _setupAPI(self, funcs): """ Initialize the public API in the MOMFuncs class and add its members to this MOM instance so they can be called by our owner as well. """ for funcName in dir(funcs): funcObj = getattr(funcs, funcName) if hasattr(funcObj, EXPORTED_ATTRIBUTE) and callable(funcObj): setattr(self, funcName, funcObj) def _load_config(self, fname, overrides): self.config = configparser.SafeConfigParser() # Set built-in defaults self.config.add_section('main') self.config.set('main', 'main-loop-interval', '5') self.config.set('main', 'host-monitor-interval', '5') self.config.set('main', 'guest-manager-interval', '5') self.config.set('main', 'hypervisor-interface', 'libvirt') self.config.set('main', 'guest-monitor-interval', '5') self.config.set('main', 'policy-engine-interval', '10') self.config.set('main', 'sample-history-length', '10') self.config.set('main', 'libvirt-hypervisor-uri', '') self.config.set('main', 'controllers', 'Balloon') self.config.set('main', 'plot-dir', '') self.config.set('main', 'rpc-port', '-1') self.config.set('main', 'policy', '') self.config.set('main', 'policy-dir', '') self.config.set('main', 'guest-manager-multi-thread', 'true') self.config.add_section('logging') self.config.set('logging', 'log', 'stdio') self.config.set('logging', 'verbosity', 'info') self.config.set('logging', 'max-bytes', '2097152') self.config.set('logging', 'backup-count', '5') self.config.add_section('host') self.config.set('host', 'collectors', 'HostMemory') self.config.add_section('guest') self.config.set('guest', 'collectors', 'GuestQemuProc, GuestMemory') # Override defaults from the config file self.config.read(fname) # Process configuration overrides from our owner. For example, momd # allows certain settings to be overriden via its command line. if overrides is not None: for sect in overrides.sections(): if sect not in self.config.sections(): continue for (item, value) in overrides.items(sect): self.config.set(sect, item, value) # Add non-customizable thread-global variables # The supplied config file must not contain a '__int__' section if self.config.has_section('__int__'): self.config.remove_section('__int__') self.config.add_section('__int__') self.config.set('__int__', 'running', '0') plot_subdir = self._get_plot_subdir(self.config.get('main', 'plot-dir')) self.config.set('__int__', 'plot-subdir', plot_subdir) def _validate_config(self): policy = self.config.get('main', 'policy') policy_dir = self.config.get('main', 'policy-dir') if policy and policy_dir: self.logger.error("Only one of 'policy' and 'policy-dir' may be" "specified") return False return True def _configure_logger(self): logger = logging.getLogger('mom') # MOM is a module with its own logging facility. Don't impact any # logging that might be done by the program that loads this. logger.propagate = False verbosity = self.config.get('logging', 'verbosity').lower() level = log_set_verbosity(logger, verbosity) log = self.config.get('logging', 'log') if log.lower() == 'stdio': handler = logging.StreamHandler() else: bytes = self.config.getint('logging', 'max-bytes') backups = self.config.getint('logging', 'backup-count') handler = logging.handlers.RotatingFileHandler(log, 'a', bytes, backups) handler.setLevel(level) formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) return logger def _get_plot_subdir(self, basedir): """ Create a new directory for plot files inside basedir. The name is in the format: momplot-NNN where NNN is an ascending sequence number. Return: The new directory name or '' on error. """ if basedir == '': return '' regex = re.compile(r'^momplot-(\d{3})$') try: names = os.listdir(basedir) except OSError as e: self.logger.warn("Cannot read plot-basedir %s: %s", basedir, e.strerror) return '' seq_num = -1 for name in names: m = regex.match(name) if m is not None: num = int(m.group(1)) if num > seq_num: seq_num = num seq_num = seq_num + 1 dir = "%s/momplot-%03d" % (basedir, seq_num) if seq_num > 999: self.logger.warn("Cannot create plot-dir because the sequence number "\ "is out of range. Clear the directory or choose a different one") return '' try: os.mkdir(dir) except OSError as e: self.logger.warn("Cannot create plot-dir %s: %s", dir, e.strerror) return '' return dir def _threads_ok(self, threads): """ Check to make sure a list of expected threads are still alive """ for t in threads: if not t.is_alive(): self.logger.error("Thread '%s' has exited" % t.getName()) return False return True def _wait_for_thread(self, t, timeout): """ Join a thread only if it is still running """ if t.is_alive(): t.join(timeout) def get_hypervisor_interface(self): name = self.config.get('main', 'hypervisor-interface').lower() self.logger.info("hypervisor interface %s",name) try: module = __import__('mom.HypervisorInterfaces.' + name + 'Interface', None, None, name) return module.instance(self.config) except ImportError: self.logger.error("Unable to import hypervisor interface: %s", name) return None
gpl-2.0
-7,535,552,825,987,476,000
39.105485
99
0.581273
false
zandao/stn3270
stn3270/field.py
1
1639
# -*- coding: utf-8 -*- """ ****************** Super TN3270 Field ****************** Super TN3270 Field - stn3270.field - implements the field manipulation on a 3270 virtual screen: read, write, verify and find fields. """ class Field: """It's a representation of a 3270 field, with a *start of field* sequence, its position (*row* and *column*), raw *text* and its ASCII *data* representation :param start_of_field: a 3270 SF sequence :param row: starting row of the field :param col: starting column of the field :param text: raw text of the field :param filler: ASCII character used to fill empty editable field :type start_of_field: string :type row: int :type col: int :type text: string :type filler: string """ def __init__(self, start_of_field, row=None, col=None, text="", filler="_"): self.filler = filler self.start_of_field = self._SF(start_of_field) self.row = row self.col = col self.set_text(text) self.is_visible = ("c0=cd" not in start_of_field) self.is_editable = False for sf in self.start_of_field: self.is_editable = self.is_editable or sf in ("c0=c1","c0=cd") def set_text(self, text): """Sets the text of the field and the filtered data (text without filler characters) :param text: raw text of field :type text: string """ self.text = text self.length = len(text) self.data = text.replace(self.filler," ").rstrip() return self.data def _SF(self, char): return char[3:-1].split(',')
lgpl-2.1
-6,518,581,165,051,224,000
31.78
92
0.597315
false
kfcpaladin/sze-the-game
renpy/debug.py
1
1941
# Copyright 2004-2017 Tom Rothamel <[email protected]> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # This file contains debugging code that isn't enabled in normal Ren'Py # operation. from __future__ import print_function import renpy import __builtin__ import threading import datetime import os real_open = __builtin__.open __builtin__.real_file = __builtin__.file def replacement_open(*args, **kwargs): rv = real_open(*args, **kwargs) if not renpy.game.contexts: return rv if renpy.game.context().init_phase: return rv if threading.current_thread().name != "MainThread": return rv print(datetime.datetime.now().strftime("%H:%M:%S"), "In main thread: open" + repr(args)) return rv def init_main_thread_open(): if not "RENPY_DEBUG_MAIN_THREAD_OPEN" in os.environ: return __builtin__.open = replacement_open __builtin__.file = replacement_open
mit
1,905,923,962,222,076,000
31.898305
92
0.729006
false
pdebuyl/cg_md_polymerization
code/epoxy_setup.py
1
3569
import espressopp import mpi4py.MPI as MPI def get_velocity(system, n): """Obtain total velocity of a espressopp system.""" total_v = espressopp.Real3D(0.) total_m = 0. for i in range(n): p = system.storage.getParticle(i) total_v += p.v*p.mass total_m += p.mass return total_v/total_m def reset_velocity(system, n): """Reset the total velocity of a espressopp system.""" excess_v = get_velocity(system, n) for i in range(n): v = system.storage.getParticle(i).v system.storage.modifyParticle(i, 'v', v-excess_v) # LJ settins sigma = 1.0 epsilon=1.0 caprad_LJ=0.85 rc = pow(2., 1./6.) # FENE settings K=30. rMax=1.5 caprad_FENE=1.4 # Polymer chain settings bondlen=0.97 # General settings skin = 0.3 def chains_x_system(num_chains, monomers_per_chain, num_X, density=0.8, seed=None): num_particles = num_chains*monomers_per_chain + num_X L = pow(num_particles/density, 1./3.) box = (L, L, L) # Initialize the espressopp system system = espressopp.System() if seed is not None: system.rng = espressopp.esutil.RNG(seed) else: system.rng = espressopp.esutil.RNG() system.bc = espressopp.bc.OrthorhombicBC(system.rng, box) system.skin = skin nodeGrid = espressopp.tools.decomp.nodeGrid(MPI.COMM_WORLD.size) cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc, skin) system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid) def normal_v(): return espressopp.Real3D(system.rng.normal()*0.5, system.rng.normal()*0.5, system.rng.normal()*0.5) # Add the chains chainFPL = espressopp.FixedPairList(system.storage) pid = 0 for i in range(num_chains): chain=[] startpos = system.bc.getRandomPos() positions, bonds = espressopp.tools.topology.polymerRW(pid, startpos, monomers_per_chain, bondlen) for k in range(monomers_per_chain): part = [pid + k, positions[k], normal_v()] chain.append(part) pid += monomers_per_chain system.storage.addParticles(chain, 'id', 'pos', 'v') chainFPL.addBonds(bonds) # Add the individual particles Xs = [] for i in range(num_X): pos = system.bc.getRandomPos() v = espressopp.Real3D(system.rng.normal(),system.rng.normal(),system.rng.normal()) Xs.append([pid, pos, v]) pid += 1 system.storage.addParticles(Xs, 'id', 'pos', 'v') # Define capped LJ potential verletList = espressopp.VerletList(system, cutoff=rc) LJCapped = espressopp.interaction.VerletListLennardJonesCapped(verletList) LJCapped.setPotential(type1=0, type2=0, potential=espressopp.interaction.LennardJonesCapped(epsilon=epsilon, sigma=sigma, cutoff=rc, caprad=caprad_LJ)) system.addInteraction(LJCapped) # Define capped FENE potential potFENE = espressopp.interaction.FENECapped(K=K, r0=0.0, rMax=rMax, caprad=caprad_FENE) FENECapped = espressopp.interaction.FixedPairListFENECapped(system, chainFPL, potFENE) system.addInteraction(FENECapped) # Define integrator and StochasticVelocityRescaling thermostat integrator = espressopp.integrator.VelocityVerlet(system) thermostat = espressopp.integrator.StochasticVelocityRescaling(system) thermostat.temperature = 1.0 integrator.addExtension(thermostat) system.storage.decompose() return system, integrator, LJCapped, verletList, FENECapped, chainFPL, thermostat, num_particles
bsd-3-clause
-3,066,971,447,088,541,000
34.336634
155
0.674979
false
hpparvi/PyTransit
pytransit/lpf/tessoclttvlpf.py
1
3050
# PyTransit: fast and easy exoplanet transit modelling in Python. # Copyright (C) 2010-2019 Hannu Parviainen # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from pathlib import Path from astropy.table import Table from numba import njit, prange from numpy import atleast_2d, zeros, log, concatenate, pi, transpose, sum, compress, ones from uncertainties import UFloat, ufloat from .oclttvlpf import OCLTTVLPF from ..utils.keplerlc import KeplerLC from .baselines.legendrebaseline import LegendreBaseline @njit(parallel=True, cache=False, fastmath=True) def lnlike_normal_v(o, m, e): m = atleast_2d(m) npv = m.shape[0] npt = o.size lnl = zeros(npv) for i in prange(npv): lnl[i] = -npt*log(e[i, 0]) - 0.5*log(2*pi) - 0.5*sum(((o-m[i, :])/e[i ,0])**2) return lnl class TESSCLTTVLPF(LegendreBaseline, OCLTTVLPF): def __init__(self, name: str, dfile: Path, zero_epoch: float, period: float, nsamples: int = 10, trdur: float = 0.125, bldur: float = 0.3, nlegendre: int = 2, ctx = None, queue = None): zero_epoch = zero_epoch if isinstance(zero_epoch, UFloat) else ufloat(zero_epoch, 1e-5) period = period if isinstance(period, UFloat) else ufloat(period, 1e-7) tb = Table.read(dfile) self.bjdrefi = tb.meta['BJDREFI'] zero_epoch = zero_epoch - self.bjdrefi df = tb.to_pandas().dropna(subset=['TIME', 'SAP_FLUX', 'PDCSAP_FLUX']) self.lc = lc = KeplerLC(df.TIME.values, df.SAP_FLUX.values, zeros(df.shape[0]), zero_epoch.n, period.n, trdur, bldur) LegendreBaseline.__init__(self, nlegendre) OCLTTVLPF.__init__(self, name, zero_epoch, period, ['TESS'], times=lc.time_per_transit, fluxes=lc.normalized_flux_per_transit, pbids=zeros(lc.nt, 'int'), nsamples=nsamples, exptimes=[0.00139], cl_ctx=ctx, cl_queue=queue) self.lnlikelihood = self.lnlikelihood_nb def create_pv_population(self, npop=50): pvp = self.ps.sample_from_prior(npop) return pvp def flux_model(self, pvp): tmodel = transpose(self.transit_model(pvp, copy=True)).copy() return tmodel * self.baseline(pvp) def lnlikelihood_nb(self, pvp): fmodel = self.flux_model(pvp).astype('d') err = 10**atleast_2d(pvp)[:, self._sl_err] return lnlike_normal_v(self.ofluxa, fmodel, err)
gpl-2.0
-3,970,768,457,033,635,300
40.780822
105
0.65541
false
RJRetro/mame
scripts/build/makedep.py
2
8404
#!/usr/bin/python ## ## license:BSD-3-Clause ## copyright-holders:Miodrag Milanovic from __future__ import with_statement import sys ## to ignore include of emu.h add it always to list files_included = ['src/emu/emu.h'] include_dirs = ['src/emu/', 'src/devices/', 'src/mame/'] mappings = dict() deps_files_included = [ ] deps_include_dirs = ['src/mame/'] components = [ ] drivers = [ ] def file_exists(root, srcfile, folder, inc_dir): includes = [ folder ] includes.extend(inc_dir) for line in includes: try: fp = open(root + line + srcfile, 'r') fp.close() return line + srcfile except IOError: pass return '' def add_c_if_exists(root, fullname): try: fp = open(root + fullname, 'r') fp.close() deps_files_included.append(fullname) except IOError: pass def add_rest_if_exists(root, srcfile,folder): t = srcfile.rsplit('/', 2) if t[1]=='includes': t[2] = t[2].replace('.h','.cpp') t[1] = 'drivers' add_c_if_exists(root,"/".join(t)) parse_file_for_deps(root, "/".join(t), folder) t[1] = 'machine' add_c_if_exists(root,"/".join(t)) parse_file_for_deps(root, "/".join(t), folder) t[1] = 'video' add_c_if_exists(root,"/".join(t)) parse_file_for_deps(root, "/".join(t), folder) t[1] = 'audio' add_c_if_exists(root,"/".join(t)) parse_file_for_deps(root, "/".join(t), folder) def parse_file_for_deps(root, srcfile, folder): try: fp = open(root + srcfile, 'r') except IOError: return 1 in_comment = 0 linenum = 0 for line in fp.readlines(): content = '' linenum+=1 srcptr = 0 while srcptr < len(line): c = line[srcptr] srcptr+=1 if ord(c)==13 or ord(c)==10: if ord(c)==13 and ord(line[srcptr])==10: srcptr+=1 continue if c==' ' or ord(c)==9: continue if in_comment==1 and c=='*' and line[srcptr]=='/' : srcptr+=1 in_comment = 0 continue if in_comment: continue if c=='/' and line[srcptr]=='*' : srcptr+=1 in_comment = 1 continue if c=='/' and line[srcptr]=='/' : break content += c content = content.strip() if len(content)>0: if content.startswith('#include'): name = content[8:] name = name.replace('"','') fullname = file_exists(root, name, folder,deps_include_dirs) if fullname in deps_files_included: continue if fullname!='': deps_files_included.append(fullname) add_c_if_exists(root, fullname.replace('.h','.cpp')) add_rest_if_exists(root, fullname,folder) newfolder = fullname.rsplit('/', 1)[0] + '/' parse_file_for_deps(root, fullname, newfolder) continue fp.close() return 0 def parse_file(root, srcfile, folder): try: fp = open(root + srcfile, 'r') except IOError: return 1 in_comment = 0 linenum = 0 for line in fp.readlines(): content = '' linenum+=1 srcptr = 0 while srcptr < len(line): c = line[srcptr] srcptr+=1 if ord(c)==13 or ord(c)==10: if ord(c)==13 and ord(line[srcptr])==10: srcptr+=1 continue if c==' ' or ord(c)==9: continue if in_comment==1 and c=='*' and line[srcptr]=='/' : srcptr+=1 in_comment = 0 continue if in_comment: continue if c=='/' and line[srcptr]=='*' : srcptr+=1 in_comment = 1 continue if c=='/' and line[srcptr]=='/' : break content += c content = content.strip() if len(content)>0: if content.startswith('#include'): name = content[8:] name = name.replace('"','') fullname = file_exists(root, name, folder,include_dirs) if fullname in files_included: continue if "src/lib/netlist/" in fullname: continue if fullname!='': if fullname in mappings.keys(): if not(mappings[fullname] in components): components.append(mappings[fullname]) files_included.append(fullname) newfolder = fullname.rsplit('/', 1)[0] + '/' parse_file(root, fullname, newfolder) if (fullname.endswith('.h')): parse_file(root, fullname.replace('.h','.cpp'), newfolder) continue fp.close() return 0 def parse_file_for_drivers(root, srcfile): srcfile = srcfile.replace('\\','/') if srcfile.startswith('src/mame/drivers'): splitname = srcfile.split('/', 4) drivers.append(splitname[3]) return 0 def parse_lua_file(srcfile): try: fp = open(srcfile, 'r') except IOError: sys.stderr.write("Unable to open source file '%s'\n" % srcfile) return 1 for line in fp.readlines(): content = line.strip() if len(content)>0: if content.startswith('--@'): name = content[3:] mappings[name.rsplit(',', 1)[0]] = name.rsplit(',', 1)[1] return 0 if len(sys.argv) < 5: print('Usage:') print(' makedep <root> <source.c> <type> <target>') sys.exit(0) root = sys.argv[1] + '/' parse_lua_file(root +'scripts/src/bus.lua') parse_lua_file(root +'scripts/src/cpu.lua') parse_lua_file(root +'scripts/src/machine.lua') parse_lua_file(root +'scripts/src/sound.lua') parse_lua_file(root +'scripts/src/video.lua') for filename in sys.argv[2].rsplit(',') : deps_files_included.append(filename.replace('\\','/')) parse_file_for_deps(root,filename,'') for filename in deps_files_included: parse_file(root,filename,'') for filename in sys.argv[2].rsplit(',') : parse_file_for_drivers(root,filename) # display output if sys.argv[3]=='drivers': #output the list of externs first for drv in sorted(drivers): print(drv) print("") if sys.argv[3]=='target': for line in components: sys.stdout.write("%s\n" % line) sys.stdout.write('\n') sys.stdout.write('function createProjects_mame_%s(_target, _subtarget)\n' % sys.argv[4]) sys.stdout.write(' project ("mame_%s")\n' % sys.argv[4]) sys.stdout.write(' targetsubdir(_target .."_" .. _subtarget)\n') sys.stdout.write(' kind (LIBTYPE)\n') sys.stdout.write(' uuid (os.uuid("drv-mame-%s"))\n' % sys.argv[4]) sys.stdout.write(' \n') sys.stdout.write(' includedirs {\n') sys.stdout.write(' MAME_DIR .. "src/osd",\n') sys.stdout.write(' MAME_DIR .. "src/emu",\n') sys.stdout.write(' MAME_DIR .. "src/devices",\n') sys.stdout.write(' MAME_DIR .. "src/mame",\n') sys.stdout.write(' MAME_DIR .. "src/lib",\n') sys.stdout.write(' MAME_DIR .. "src/lib/util",\n') sys.stdout.write(' MAME_DIR .. "src/lib/netlist",\n') sys.stdout.write(' MAME_DIR .. "3rdparty",\n') sys.stdout.write(' GEN_DIR .. "mame/layout",\n') sys.stdout.write(' ext_includedir("zlib"),\n') sys.stdout.write(' ext_includedir("flac"),\n') sys.stdout.write(' }\n') sys.stdout.write('\n') sys.stdout.write(' files{\n') for line in deps_files_included: sys.stdout.write(' MAME_DIR .. "%s",\n' % line) sys.stdout.write(' }\n') sys.stdout.write('end\n') sys.stdout.write('\n') sys.stdout.write('function linkProjects_mame_%s(_target, _subtarget)\n' % sys.argv[4]) sys.stdout.write(' links {\n') sys.stdout.write(' "mame_%s",\n' % sys.argv[4]) sys.stdout.write(' }\n') sys.stdout.write('end\n')
gpl-2.0
5,154,461,549,047,289,000
32.086614
92
0.510114
false
normic/django-data-approvals
tests/test_urls.py
1
1688
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Tests for `django-data-approvals` urls. """ from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType from django.core.urlresolvers import reverse from django.test import TestCase from approvals.models import Approval class TestURLs(TestCase): @classmethod def setUpTestData(cls): # Set up data for the TestCase cls.user = User.objects.create_user( username='testuser', email='testuser@...', password='secret' ) # set the User model as content_type user_ct = ContentType.objects.get_for_model(cls.user) approval = Approval.objects.create( state=Approval.REQUESTED, created_by=cls.user, content_type=user_ct ) def test_index(self): url = reverse('approvals:index', args=[]) self.assertEqual(url, '/approvals/') def test_create(self): url = reverse('approvals:approval_create') self.assertEqual(url, '/approvals/approval/create/') def test_detail(self): url = reverse('approvals:approval_detail', kwargs={'pk': 1}) self.assertEqual(url, '/approvals/approval/1/') def test_update(self): url = reverse('approvals:approval_update', kwargs={'pk': 1}) self.assertEqual(url, '/approvals/approval/1/update/') def test_list(self): url = reverse('approvals:approval_list') self.assertEqual(url, '/approvals/approval/') def test_delete(self): url = reverse('approvals:approval_delete', kwargs={'pk': 1}) self.assertEqual(url, '/approvals/approval/1/delete/')
bsd-3-clause
3,095,041,702,529,421,300
29.142857
72
0.64218
false
izilly/izdvd
izdvd/dvd.py
1
34440
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (c) 2013 William Adams # Distributed under the terms of the Modified BSD License. # The full license is in the file LICENSE, distributed with this software. # from izdvd.dvdmenu import DVDMenu from izdvd.encoder import Encoder from izdvd import utils from izdvd import user_input from izdvd import config import sys import subprocess import math from datetime import timedelta import os from lxml import etree import glob import re import logging import difflib class DVD (object): def __init__(self, # input in_vids=None, in_dirs=None, in_srts=None, menu_imgs=None, menu_labels=None, menu_bg=None, # input options vid_fmts=['mp4', 'avi', 'mkv'], img_fmts=['png', 'jpg', 'bmp', 'gif'], sub_fmts=['srt'], img_names=['poster', 'folder'], one_vid_per_dir=False, label_from_img=False, label_from_dir=True, strip_label_year=True, no_encode_v=False, no_encode_a=False, unstack_vids=None, # output locations out_name=None, out_dir=None, tmp_dir=None, # output options no_prompt=False, with_menu=True, menu_only=False, with_author_dvd=True, #~ dvd_size_bits=37602983936, dvd_size_bytes=4700372992, # dvd options audio_lang='en', with_subs=False, sub_lang='en', dvd_format='NTSC', dvd_ar=None, vbitrate=None, abitrate=196608, two_pass=True, separate_titles=True, separate_titlesets=False, ar_threshold=1.38, # ------bg opts------ # padding outer_padding=None, inner_padding=None, label_padding=None, # size/shape menu_ar=16/9, # buttons button_border_color=None, button_border_thickness=None, button_highlight_color=None, button_highlight_thickness=None, button_select_color=None, shadow_sigma=None, shadow_x_offset=None, shadow_y_offset=None, # labels with_menu_labels=False, label_line_height=None, label_lines=None, # ------menu opts------ menu_audio=None, no_loop_menu=False, frames=360, mode='dvd'): self.uid = str(id(self)) # input self.in_vids = in_vids self.in_dirs = in_dirs self.in_srts = in_srts self.menu_imgs = menu_imgs self.menu_labels = menu_labels self.menu_bg = menu_bg # input options self.vid_fmts = vid_fmts self.img_fmts = img_fmts self.sub_fmts = sub_fmts self.img_names = img_names self.one_vid_per_dir = one_vid_per_dir self.label_from_img = label_from_img self.label_from_dir = label_from_dir self.strip_label_year = strip_label_year self.no_encode_v = no_encode_v self.no_encode_a = no_encode_a self.unstack_vids = unstack_vids # output locations self.out_name = out_name self.out_dir = out_dir self.tmp_dir = tmp_dir # output options self.no_prompt = no_prompt self.with_menu = with_menu self.menu_only = menu_only self.with_author_dvd = with_author_dvd self.dvd_size_bytes = dvd_size_bytes self.dvd_size_bits = dvd_size_bytes * 8 # dvd options self.audio_lang = audio_lang self.with_subs = with_subs self.sub_lang = sub_lang self.dvd_format = dvd_format self.dvd_ar = dvd_ar self.vbitrate = vbitrate self.abitrate = abitrate self.two_pass = two_pass self.separate_titles = separate_titles self.separate_titlesets = separate_titlesets self.ar_threshold = ar_threshold # menu options if type(menu_ar) == str: ar_w, ar_h = menu_ar.split(':') menu_ar = int(ar_w) / int(ar_h) self.menu_ar=menu_ar self.outer_padding = outer_padding self.inner_padding = inner_padding self.label_padding = label_padding self.button_border_color = button_border_color self.button_border_thickness = button_border_thickness self.button_highlight_color = button_highlight_color self.button_highlight_thickness = button_highlight_thickness self.button_select_color = button_select_color self.shadow_sigma = shadow_sigma self.shadow_x_offset = shadow_x_offset self.shadow_y_offset = shadow_y_offset self.with_menu_labels = with_menu_labels self.label_line_height = label_line_height self.label_lines = label_lines self.menu_audio = menu_audio self.no_loop_menu = no_loop_menu self.frames = frames self.mode = mode #------------------------------- if self.menu_ar is None: self.menu_ar = self.dvd_ar self.get_in_vids() self.get_menu_imgs() self.get_menu_labels() self.get_subs() self.get_out_paths() # get information about input video self.get_media_info() self.calculate_vbitrate() self.log_output_info() self.log_input_info() self.log_titlesets() self.log_dvd_info() self.prompt_input_output() # make menu if self.with_menu or self.menu_only: self.get_menu() if self.menu_only: return #~ self.log_menu_info() self.prompt_menu() # prepare mpeg2 files self.encode_video() self.create_dvd_xml() # author DVD if self.with_author_dvd: self.author_dvd() def get_out_paths(self): paths = utils.get_out_paths(config.PROG_NAME, self.out_name, self.out_dir, self.tmp_dir, self.dvd_size_bytes * 1.2) self.out_name, self.out_dir, self.tmp_dir = paths self.out_dvd_dir = os.path.join(self.out_dir, 'DVD') #~ self.out_files_dir = os.path.join(self.out_dir, 'files') self.out_dvd_xml = os.path.join(self.out_dir, '{}_dvd.xml'.format(self.out_name)) self.out_log = os.path.join(self.out_dir, '{}.log'.format(self.out_name)) # make dirs if they don't exist for i in [self.out_dvd_dir, self.tmp_dir]: if not os.path.exists(i): os.makedirs(i) # check available space devices = {} dvd_size = self.dvd_size_bytes for d,s in zip([self.out_dvd_dir, self.tmp_dir], [dvd_size*1.05, dvd_size*1.05]): dev = os.stat(d).st_dev if devices.get(dev): devices[dev] -= s else: devices[dev] = utils.get_space_available(i) - s if min(devices.values()) < 1024*1024: raise self.logger = logging.getLogger('{}.dvd'.format(config.PROG_NAME)) self.logger.addHandler(logging.FileHandler(self.out_log)) self.logger.setLevel(logging.INFO) def get_in_vids(self): if not self.in_vids: if self.unstack_vids is None: self.unstack_vids = True in_vids = [] if not self.in_dirs: raise for d in self.in_dirs: for fmt in self.vid_fmts: pat = os.path.join(d, '*.{}'.format(fmt)) found = sorted(glob.glob(pat)) if found: if self.one_vid_per_dir: in_vids.extend(found[:1]) break else: in_vids.extend(found) self.in_vids = [i for i in in_vids if i is not None] else: if self.unstack_vids is None: self.unstack_vids = False def get_menu_imgs(self): if not self.with_menu: self.menu_imgs = [None for i in self.in_vids] return if not self.menu_imgs: self.menu_imgs = [] for i in self.in_vids: img = self.get_matching_file(i, self.img_fmts, self.img_names) self.menu_imgs.append(img) def get_menu_labels(self): if not self.menu_labels: self.menu_labels = [] # get labels if self.label_from_img: label_list = self.menu_imgs else: label_list = self.in_vids if self.label_from_dir: pt = 0 else: pt = 1 self.menu_labels = [os.path.splitext( os.path.basename(os.path.split(i)[pt]))[0] if i is not None else None for i in label_list] else: self.with_menu_labels = True self.vid_labels = self.menu_labels if self.with_menu and self.with_menu_labels: if self.strip_label_year: pat = r'\s*\([-./\d]{2,12}\)\s*$' self.menu_labels = [re.sub(pat, '', i) for i in self.menu_labels] else: self.menu_labels = [None for i in self.in_vids] def get_subs(self): if self.with_subs: if not self.in_srts: self.in_srts = [] for i in self.in_vids: s = self.get_matching_file(i, self.sub_fmts) self.in_srts.append(s) else: self.in_srts = [None for i in self.in_vids] def get_matching_file(self, vid, fmts, names=[]): fmts = [i.lower() for i in fmts] dirname, basename = os.path.split(vid) name, ext = os.path.splitext(basename) for n in [name, basename, '{}*'.format(name)] + names: search_base = os.path.join(dirname, n) for fmt in fmts: search_name = '.'.join([search_base, fmt]) found = sorted(glob.glob(search_name)) if found: return found[0] # if no exact match found, use difflib to get the closest match matches = [] for fmt in fmts: ideal_match = '{}.{}'.format(name, fmt).lower() #~ dir_files = glob.glob(os.path.join(dirname, '*.{}'.format(fmt))) dir_files = [i for i in os.listdir(dirname) if os.path.splitext(i)[1].lstrip('.').lower() == fmt] dir_files_lower = [i.lower() for i in dir_files] similar = difflib.get_close_matches(ideal_match, dir_files_lower) if similar: score = difflib.SequenceMatcher(None, ideal_match, similar[0].lower()) match_basename = dir_files[dir_files_lower.index(similar[0])] match_path = os.path.join(dirname, match_basename) matches.append((score, match_path)) if matches: best_match = sorted(matches, key=lambda i: i[0], reverse=True)[0] return best_match[1] # if still no match found and one_vid_per_dir, we search for # match based only on file extension if self.one_vid_per_dir: matches = [] for fmt in fmts: dir_files = [i for i in os.listdir(dirname) if os.path.splitext(i)[1].lstrip('.').lower() == fmt] if dir_files: # pick the file with the shortest name # (reverse the sorting so that if there is a tie, # the file with the lowest index in dir_files will # be picked) shortest = sorted(dir_files, key=lambda i: len(i), reverse=True)[-1] matches.append(os.path.join(dirname, shortest)) if matches: shortest = sorted(matches, key=lambda i: len(i), reverse=True)[-1] return shortest return None def get_media_info(self): vids = [] fmt = ('--output=Video;%Duration%|^|%Width%|^|%Height%|^|' '%PixelAspectRatio%|^|%DisplayAspectRatio%') for n,i in enumerate(self.in_vids): subs = [self.in_srts[n]] if self.in_srts is not None else [None] if self.unstack_vids: stacked = self.get_stacked_vids(i) if self.with_subs: subs = [self.in_srts[n]] addl_subs = [self.get_matching_file(i, ['srt'], []) for i in stacked[1:]] for sb in addl_subs: if sb not in subs: subs.append(sb) else: stacked = [i] if self.with_subs: subs = [self.in_srts[n]] v = {} duration = 0 for path in stacked: mi = subprocess.check_output(['mediainfo', fmt, path], universal_newlines=True).strip() d_ms,w,h,par,dar = mi.split('|^|') d_s = int(d_ms) / 1000 duration += d_s width = int(w) height = int(h) par = float(par) dar = float(dar) ar = (width/height) * par nv = {'ar': ar, 'dar': dar, 'width': width, 'height': height} if v: if nv != v: raise else: v.update(nv) v['in'] = stacked v['mpeg'] = '' #~ v['srt'] = self.in_srts[n] v['srt'] = subs v['duration'] = duration v['img'] = self.menu_imgs[n] v['menu_label'] = self.menu_labels[n] v['vid_label'] = self.vid_labels[n] vids.append(v) self.vids = vids self.titlesets = self.split_titlesets() self.durations = [i['duration'] for i in vids] self.duration_total = sum(self.durations) def get_stacked_vids(self, vid_path): vid_dir, vid_name = os.path.split(vid_path) paths = [i for i in os.listdir(vid_dir) if i != vid_name] regex = self.get_stacking_regex() matches = [] for r in regex: if not matches: vm = re.search(r, vid_name, re.I) if vm: ve = vm.expand(r'\1\3\4') for p in paths: pm = re.search(r, p, re.I) if pm: pe = pm.expand(r'\1\3\4') if pe == ve: matches.append(os.path.join(vid_dir, p)) return [vid_path] + matches def get_stacking_regex(self): re_tem = (r'^(.*?)' # title r'{}' # volume r'(.*?)' # ignore r'(\.[^.]+)' # extension r'$') re_tem_labeled_nums = r'({0}*(?:{1}){0}*{2}+)'.format(config.RE_PARTS_SEP, config.RE_VOL_PREFIXES, config.RE_VOL_NUMS) re_tem_labeled_letters = r'({0}*(?:{1}){0}*{2})'.format(config.RE_PARTS_SEP, config.RE_VOL_PREFIXES, config.RE_VOL_LETTERS) re_tem_bare_letters = r'({0}*{1})'.format(config.RE_PARTS_SEP, config.RE_VOL_LETTERS) re_stacked_labeled_nums = re_tem.format(re_tem_labeled_nums) re_stacked_labeled_letters = re_tem.format(re_tem_labeled_letters) re_stacked_bare_letters = re_tem.format(re_tem_bare_letters) return [re_stacked_labeled_nums, re_stacked_labeled_letters, re_stacked_bare_letters] def log_output_info(self): logs = list(zip(['Name', 'DVD', 'tmp'], [self.out_name, self.out_dvd_dir, self.tmp_dir])) utils.log_items(logs, 'Output Paths', col_width=16, logger=self.logger) def log_input_info(self): utils.log_items(heading='Video Information', items=[], lines_before=1, logger=self.logger) for n,i in enumerate(self.vids): #~ dirs = [i['in'], i['img'], i['srt']] dirs = [p for p in i['in']] if i['srt']: dirs.extend(i['srt']) if i['img']: dirs.append(i['img']) #~ dirs.extend([i['img'], i['srt']]) dirs = [os.path.dirname(i) for i in dirs if i is not None] commonprefix = utils.get_commonprefix(dirs) if len(commonprefix) > 1: in_dir = commonprefix #~ in_vid = os.path.relpath(i['in'], commonprefix) in_vids = [os.path.relpath(v, commonprefix) for v in i['in']] if i['srt']: in_srt = [os.path.relpath(v, commonprefix) if v else None for v in i['srt']] else: in_srt = None if i['img']: in_img = os.path.relpath(i['img'], commonprefix) if i['img'] else None else: in_img = None #~ in_srt = os.path.relpath(i['srt'], commonprefix) else: in_dir = None #~ in_vid = i['in'] in_vids = [v for v in i['in']] in_img = i['img'] in_srt = i['srt'] #~ name = os.path.basename(i['in']) duration = self.get_duration_string(i['duration']) keys = ['In file(s)', 'Image', 'Label', 'Subtitle', 'Aspect Ratio', 'Duration'] vals = [in_vids, in_img, i['menu_label'], in_srt, '{:.2f}'.format(i['ar']), duration] if not self.with_menu_labels: vals.pop(keys.index('Label')) keys.pop(keys.index('Label')) if not self.with_subs: vals.pop(keys.index('Subtitle')) keys.pop(keys.index('Subtitle')) log_data = list(zip(keys, vals)) if in_dir: log_data.append(('In Dir', in_dir)) utils.log_items('#{}: {}:'.format(n+1, i['vid_label']), lines_before=0, sep_pre='-', sep_post='-', logger=self.logger) utils.log_items(log_data, col_width=12, indent=4, lines_before=0, logger=self.logger) def log_titlesets(self): utils.log_items(heading='Titlesets', items=[], lines_before=1, logger=self.logger) for n,i in enumerate(self.titlesets): ar = i['ar'] seconds = sum([d['duration'] for d in i['vids']]) duration = self.get_duration_string(seconds) log_data = list(zip(['Aspect Ratio', 'Duration', 'Titles'], [ar, duration, '{} of {}'.format(len(i['vids']), len(self.vids))])) log_data.append(('Videos', [v['vid_label'] for v in i['vids']])) utils.log_items('Titleset #{} of {}'.format(n+1, len(self.titlesets)), lines_before=0, sep_pre='-', sep_post='-', logger=self.logger) utils.log_items(log_data, col_width=12, indent=4, lines_before=0, logger=self.logger) def log_dvd_info(self): total_bitrate = self.vbitrate + self.abitrate log_data = list(zip(['Total Duration', 'Total Bitrate', 'Video Bitrate', 'Audio Bitrate'], [str(timedelta(seconds=self.duration_total)), '{:.1f} kbps'.format(total_bitrate / 1024), '{:.1f} kbps'.format(self.vbitrate / 1024), '{:.1f} kbps'.format(self.abitrate / 1024),])) utils.log_items(log_data, 'DVD Info', col_width=16, logger=self.logger) def prompt_input_output(self): if self.no_prompt: return choices = ['Continue', 'Play a video', 'Display a menu image', 'List contents of a directory'] while True: resp = user_input.prompt_user_list(choices) if resp is False: sys.exit() elif resp == 0: break vids = [i['vid_label'] for i in self.vids] chosen = user_input.prompt_user_list(vids, header=choices[resp]) # TODO: offer choice of files when video is stacked chosen_vid = self.vids[chosen] vid_path = chosen_vid['in'][0] img_path = chosen_vid['img'] if resp == 1: o = subprocess.check_call([config.VIDEO_PLAYER, vid_path], stderr=subprocess.STDOUT, stdout=subprocess.DEVNULL) elif resp == 2: o = subprocess.check_call([config.IMAGE_VIEWER, img_path], stderr=subprocess.STDOUT, stdout=subprocess.DEVNULL) elif resp == 3: o = subprocess.check_output(['ls', '-lhaF', '--color=auto', os.path.dirname(vid_path)], universal_newlines=True) print('\n{}\n\n{}'.format(vid_path, o.strip())) def prompt_menu(self): if self.no_prompt: return choices = ['Continue', 'Display Menu Image', 'Play Menu Video'] while True: resp = user_input.prompt_user_list(choices) if resp is False: sys.exit() elif resp == 0: break if resp == 1: o = subprocess.check_call([config.IMAGE_VIEWER, self.menu.bg.path_bg_img]) elif resp == 2: o = subprocess.check_call([config.VIDEO_PLAYER, self.menu.path_menu_mpg], stderr=subprocess.STDOUT, stdout=subprocess.DEVNULL) def get_duration_string(self, seconds): h,m,s = str(timedelta(seconds=seconds)).split(':') duration = '{:02.0f}:{:02.0f}:{:02.0f}'.format(float(h), float(m), float(s)) return duration def split_titlesets(self): titlesets = [{'ar': 0, 'vids': [] }] for n,i in enumerate(self.vids): if self.split_titlesets: if i['ar'] < self.ar_threshold: ar = '4:3' else: ar = '16:9' else: ar = self.dvd_ar if ar == titlesets[-1]['ar']: titlesets[-1]['vids'].append(i) else: titlesets.append({'ar': ar, 'vids': [i] }) titlesets = [i for i in titlesets if i['vids']] # automatically set dvd_ar if unset if self.dvd_ar is None: ars = [i['ar'] for i in titlesets] if '16:9' in ars: self.dvd_ar = 16/9 else: self.dvd_ar = 4/3 if self.menu_ar is None: self.menu_ar = self.dvd_ar return titlesets def calculate_vbitrate(self): duration = self.duration_total abitrate = self.get_audio_bitrate() available = self.dvd_size_bits / duration available -= available * .05 v_available = available - abitrate if not self.vbitrate: self.vbitrate = math.floor(v_available) else: if self.vbitrate > v_available: print('WARNING: Not enough space to encode at specified', 'audio/video bitrates!') total_bitrate = self.vbitrate + self.abitrate #~ # 9800kbps (dvd max) = 10035200 bits per second #~ if total_bitrate > 10035200: #~ self.vbitrate = math.floor(10035200 - self.abitrate) # 9800kbps (dvd max) = 10035200 bits per second if total_bitrate > 9000000: self.vbitrate = math.floor(9000000 - self.abitrate) def get_audio_bitrate(self): return self.abitrate def get_menu(self): utils.log_items(heading='Making DVD Menu...', items=False, sep=None, sep_post='-', lines_before=2, logger=self.logger) if not self.with_menu_labels: self.menu_label_line_height = 0 self.menu_labels = None menu_args = {} menu_attrs = [ 'out_name', 'tmp_dir', 'outer_padding', 'inner_padding', 'label_padding', 'menu_ar', 'button_border_color', 'button_border_thickness', 'button_highlight_color', 'button_highlight_thickness', 'button_select_color', 'shadow_sigma', 'shadow_x_offset', 'shadow_y_offset', 'label_line_height', 'label_lines', 'menu_audio', 'frames', 'mode'] for k in menu_attrs: v = getattr(self, k) if v is not None: menu_args[k] = v self.menu = DVDMenu(self.menu_imgs, menu_bg=self.menu_bg, menu_labels=self.menu_labels, out_dir=self.tmp_dir, #~ tmp_dir=self.tmp_dir, dvd_format=self.dvd_format, out_log=self.out_log, **menu_args) self.blank_menu = DVDMenu(menu_imgs=None, out_dir=self.tmp_dir, out_name='blank', tmp_dir=self.tmp_dir, menu_ar=self.menu_ar, dvd_format=self.dvd_format, frames=1, mode=self.mode, no_logging=True) def encode_video(self): # TODO: self.vids[n]['in'] is now a list of paths if self.no_encode_v: utils.log_items('Skipping encoding mpeg2 video...', logger=self.logger) for i in self.vids: i['mpeg'] = i['in'][0] return utils.log_items(heading='Encoding mpeg2 video...', items=False, logger=self.logger) if self.dvd_ar == 16/9: aspect = '16:9' else: aspect = '4:3' for ts in self.titlesets: aspect = ts['ar'] for v in ts['vids']: e = Encoder(v['in'], out_dir=self.tmp_dir, vbitrate=self.vbitrate, abitrate=self.abitrate, two_pass=self.two_pass, aspect=aspect, dvd_format=self.dvd_format, with_subs=self.with_subs, in_srt=v['srt'][0]) mpeg = e.encode() v['mpeg'] = mpeg def create_dvd_xml(self): utils.log_items(heading='Making dvdauthor xml...', items=False, logger=self.logger) if self.dvd_format == 'PAL': fmt = 'pal' else: fmt = 'ntsc' if self.dvd_ar == 16/9: dvd_ar = '16:9' else: dvd_ar = '4:3' if self.menu.menu_ar == 16/9: menu_ar = '16:9' else: menu_ar = '4:3' dvdauthor = etree.Element('dvdauthor', jumppad='on') vmgm = etree.SubElement(dvdauthor, 'vmgm') # vmgm menu if self.menu: menus = etree.SubElement(vmgm, 'menus') menus_vid = etree.SubElement(menus, 'video', format=fmt, aspect=menu_ar) if menu_ar == '16:9': menus_vid.set('widescreen', 'nopanscan') menus_subpicture = etree.SubElement(menus, 'subpicture') sub_stream_ws = etree.SubElement(menus_subpicture, 'stream', id='0', mode='widescreen') sub_stream_lb = etree.SubElement(menus_subpicture, 'stream', id='1', mode='letterbox') menus_pgc = etree.SubElement(menus, 'pgc') #~ for n,i in enumerate(self.menu.buttons): for n,i in enumerate(self.menu.bg.button_imgs): #~ b = etree.SubElement(menus_pgc, 'button', name=i) b = etree.SubElement(menus_pgc, 'button') b.text = 'jump title {};'.format(n+1) menus_vob = etree.SubElement(menus_pgc, 'vob', file=self.menu.path_menu_mpg) menus_post = etree.SubElement(menus_pgc, 'post') if self.no_loop_menu: menus_post.text = 'jump title 1;' else: menus_post.text = 'jump cell 1;' # titlesets for n,ts in enumerate(self.titlesets): titleset = etree.SubElement(dvdauthor, 'titleset') blank_menus = etree.SubElement(titleset, 'menus') blank_menus_pgc = etree.SubElement(blank_menus, 'pgc') blank_menus_pre = etree.SubElement(blank_menus_pgc, 'pre') blank_menus_pre.text = 'jump vmgm menu;' blank_menus_vob = etree.SubElement(blank_menus_pgc, 'vob', file=self.blank_menu.path_menu_mpg) blank_menus_post = etree.SubElement(blank_menus_pgc, 'post') blank_menus_post.text = 'jump vmgm menu;' titles = etree.SubElement(titleset, 'titles') titles_vid = etree.SubElement(titles, 'video', format=fmt, aspect=ts['ar']) titles_audio = etree.SubElement(titles, 'audio', lang=self.audio_lang) if self.with_subs: titles_sub = etree.SubElement(titles, 'subpicture', lang=self.sub_lang) #~ mpeg_files = [v['mpeg'] for i in ts for v in i['vids']] mpeg_files = [v['mpeg'] for v in ts['vids']] if n == len(self.titlesets) - 1: call_target = 'call vmgm menu;' else: call_target = 'jump titleset {} title 1;'.format(n+2) titles.extend(self.populate_pgcgroup(mpeg_files, self.separate_titles, call_target)) # write xml to disk tree = etree.ElementTree(dvdauthor) tree.write(self.out_dvd_xml, encoding='UTF-8', pretty_print=True) def populate_pgcgroup(self, in_vids, separate_titles, call_target): groups = [] vobs = [etree.Element('vob', file=i) for i in in_vids] if separate_titles: for n,i in enumerate(vobs): pgc = etree.Element('pgc') pgc.append(i) #~ pgc.extend(i) post = etree.SubElement(pgc, 'post') if n == len(vobs)-1: post.text = call_target else: post.text = 'jump title {};'.format(n+2) groups.append(pgc) else: pgc = etree.Element('pgc') pgc.extend(vobs) post = etree.SubElement(pgc, 'post') post.text = call_target groups.append(pgc) return groups def author_dvd(self): utils.log_items(heading='Authoring DVD...', items=False, logger=self.logger) e = dict(os.environ) e['VIDEO_FORMAT'] = self.dvd_format cmd = ['dvdauthor', '-x', self.out_dvd_xml, '-o', self.out_dvd_dir] o = subprocess.check_output(cmd, env=e, universal_newlines=True)
bsd-3-clause
-7,510,610,660,816,949,000
40.594203
87
0.455749
false
pku9104038/edx-platform
common/lib/capa/capa/capa_problem.py
1
28574
# # File: capa/capa_problem.py # # Nomenclature: # # A capa Problem is a collection of text and capa Response questions. # Each Response may have one or more Input entry fields. # The capa problem may include a solution. # """ Main module which shows problems (of "capa" type). This is used by capa_module. """ from datetime import datetime import logging import os.path import re from lxml import etree from xml.sax.saxutils import unescape from copy import deepcopy from capa.correctmap import CorrectMap import capa.inputtypes as inputtypes import capa.customrender as customrender import capa.responsetypes as responsetypes from capa.util import contextualize_text, convert_files_to_filenames import capa.xqueue_interface as xqueue_interface from capa.safe_exec import safe_exec from pytz import UTC # extra things displayed after "show answers" is pressed solution_tags = ['solution'] # these get captured as student responses response_properties = ["codeparam", "responseparam", "answer", "openendedparam"] # special problem tags which should be turned into innocuous HTML html_transforms = { 'problem': {'tag': 'div'}, 'text': {'tag': 'span'}, 'math': {'tag': 'span'}, } # These should be removed from HTML output, including all subelements html_problem_semantics = [ "codeparam", "responseparam", "answer", "script", "hintgroup", "openendedparam", "openendedrubric" ] log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # main class for this module class LoncapaSystem(object): """ An encapsulation of resources needed from the outside. These interfaces are collected here so that a caller of LoncapaProblem can provide these resources however make sense for their environment, and this code can remain independent. Attributes: i18n: an object implementing the `gettext.Translations` interface so that we can use `.ugettext` to localize strings. See :class:`ModuleSystem` for documentation of other attributes. """ def __init__( # pylint: disable=invalid-name self, ajax_url, anonymous_student_id, cache, can_execute_unsafe_code, DEBUG, # pylint: disable=invalid-name filestore, i18n, node_path, render_template, seed, # Why do we do this if we have self.seed? STATIC_URL, # pylint: disable=invalid-name xqueue, ): self.ajax_url = ajax_url self.anonymous_student_id = anonymous_student_id self.cache = cache self.can_execute_unsafe_code = can_execute_unsafe_code self.DEBUG = DEBUG # pylint: disable=invalid-name self.filestore = filestore self.i18n = i18n self.node_path = node_path self.render_template = render_template self.seed = seed # Why do we do this if we have self.seed? self.STATIC_URL = STATIC_URL # pylint: disable=invalid-name self.xqueue = xqueue class LoncapaProblem(object): """ Main class for capa Problems. """ def __init__(self, problem_text, id, capa_system, state=None, seed=None): """ Initializes capa Problem. Arguments: problem_text (string): xml defining the problem. id (string): identifier for this problem, often a filename (no spaces). capa_system (LoncapaSystem): LoncapaSystem instance which provides OS, rendering, user context, and other resources. state (dict): containing the following keys: - `seed` (int) random number generator seed - `student_answers` (dict) maps input id to the stored answer for that input - `correct_map` (CorrectMap) a map of each input to their 'correctness' - `done` (bool) indicates whether or not this problem is considered done - `input_state` (dict) maps input_id to a dictionary that holds the state for that input seed (int): random number generator seed. """ ## Initialize class variables from state self.do_reset() self.problem_id = id self.capa_system = capa_system state = state or {} # Set seed according to the following priority: # 1. Contained in problem's state # 2. Passed into capa_problem via constructor self.seed = state.get('seed', seed) assert self.seed is not None, "Seed must be provided for LoncapaProblem." self.student_answers = state.get('student_answers', {}) if 'correct_map' in state: self.correct_map.set_dict(state['correct_map']) self.done = state.get('done', False) self.input_state = state.get('input_state', {}) # Convert startouttext and endouttext to proper <text></text> problem_text = re.sub(r"startouttext\s*/", "text", problem_text) problem_text = re.sub(r"endouttext\s*/", "/text", problem_text) self.problem_text = problem_text # parse problem XML file into an element tree self.tree = etree.XML(problem_text) # handle any <include file="foo"> tags self._process_includes() # construct script processor context (eg for customresponse problems) self.context = self._extract_context(self.tree) # Pre-parse the XML tree: modifies it to add ID's and perform some in-place # transformations. This also creates the dict (self.responders) of Response # instances for each question in the problem. The dict has keys = xml subtree of # Response, values = Response instance self._preprocess_problem(self.tree) if not self.student_answers: # True when student_answers is an empty dict self.set_initial_display() # dictionary of InputType objects associated with this problem # input_id string -> InputType object self.inputs = {} self.extracted_tree = self._extract_html(self.tree) def do_reset(self): """ Reset internal state to unfinished, with no answers """ self.student_answers = dict() self.correct_map = CorrectMap() self.done = False def set_initial_display(self): """ Set the student's answers to the responders' initial displays, if specified. """ initial_answers = dict() for responder in self.responders.values(): if hasattr(responder, 'get_initial_display'): initial_answers.update(responder.get_initial_display()) self.student_answers = initial_answers def __unicode__(self): return u"LoncapaProblem ({0})".format(self.problem_id) def get_state(self): """ Stored per-user session data neeeded to: 1) Recreate the problem 2) Populate any student answers. """ return {'seed': self.seed, 'student_answers': self.student_answers, 'correct_map': self.correct_map.get_dict(), 'input_state': self.input_state, 'done': self.done} def get_max_score(self): """ Return the maximum score for this problem. """ maxscore = 0 for responder in self.responders.values(): maxscore += responder.get_max_score() return maxscore def get_score(self): """ Compute score for this problem. The score is the number of points awarded. Returns a dictionary {'score': integer, from 0 to get_max_score(), 'total': get_max_score()}. """ correct = 0 for key in self.correct_map: try: correct += self.correct_map.get_npoints(key) except Exception: log.error('key=%s, correct_map = %s', key, self.correct_map) raise if (not self.student_answers) or len(self.student_answers) == 0: return {'score': 0, 'total': self.get_max_score()} else: return {'score': correct, 'total': self.get_max_score()} def update_score(self, score_msg, queuekey): """ Deliver grading response (e.g. from async code checking) to the specific ResponseType that requested grading Returns an updated CorrectMap """ cmap = CorrectMap() cmap.update(self.correct_map) for responder in self.responders.values(): if hasattr(responder, 'update_score'): # Each LoncapaResponse will update its specific entries in cmap # cmap is passed by reference responder.update_score(score_msg, cmap, queuekey) self.correct_map.set_dict(cmap.get_dict()) return cmap def ungraded_response(self, xqueue_msg, queuekey): """ Handle any responses from the xqueue that do not contain grades Will try to pass the queue message to all inputtypes that can handle ungraded responses Does not return any value """ # check against each inputtype for the_input in self.inputs.values(): # if the input type has an ungraded function, pass in the values if hasattr(the_input, 'ungraded_response'): the_input.ungraded_response(xqueue_msg, queuekey) def is_queued(self): """ Returns True if any part of the problem has been submitted to an external queue (e.g. for grading.) """ return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map) def get_recentmost_queuetime(self): """ Returns a DateTime object that represents the timestamp of the most recent queueing request, or None if not queued """ if not self.is_queued(): return None # Get a list of timestamps of all queueing requests, then convert it to a DateTime object queuetime_strs = [ self.correct_map.get_queuetime_str(answer_id) for answer_id in self.correct_map if self.correct_map.is_queued(answer_id) ] queuetimes = [ datetime.strptime(qt_str, xqueue_interface.dateformat).replace(tzinfo=UTC) for qt_str in queuetime_strs ] return max(queuetimes) def grade_answers(self, answers): """ Grade student responses. Called by capa_module.check_problem. `answers` is a dict of all the entries from request.POST, but with the first part of each key removed (the string before the first "_"). Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123 Calls the Response for each question in this problem, to do the actual grading. """ # if answers include File objects, convert them to filenames. self.student_answers = convert_files_to_filenames(answers) return self._grade_answers(answers) def supports_rescoring(self): """ Checks that the current problem definition permits rescoring. More precisely, it checks that there are no response types in the current problem that are not fully supported (yet) for rescoring. This includes responsetypes for which the student's answer is not properly stored in state, i.e. file submissions. At present, we have no way to know if an existing response was actually a real answer or merely the filename of a file submitted as an answer. It turns out that because rescoring is a background task, limiting it to responsetypes that don't support file submissions also means that the responsetypes are synchronous. This is convenient as it permits rescoring to be complete when the rescoring call returns. """ return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values()) def rescore_existing_answers(self): """ Rescore student responses. Called by capa_module.rescore_problem. """ return self._grade_answers(None) def _grade_answers(self, student_answers): """ Internal grading call used for checking new 'student_answers' and also rescoring existing student_answers. For new student_answers being graded, `student_answers` is a dict of all the entries from request.POST, but with the first part of each key removed (the string before the first "_"). Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123. For rescoring, `student_answers` is None. Calls the Response for each question in this problem, to do the actual grading. """ # old CorrectMap oldcmap = self.correct_map # start new with empty CorrectMap newcmap = CorrectMap() # Call each responsetype instance to do actual grading for responder in self.responders.values(): # File objects are passed only if responsetype explicitly allows # for file submissions. But we have no way of knowing if # student_answers contains a proper answer or the filename of # an earlier submission, so for now skip these entirely. # TODO: figure out where to get file submissions when rescoring. if 'filesubmission' in responder.allowed_inputfields and student_answers is None: _ = self.capa_system.i18n.ugettext raise Exception(_(u"Cannot rescore problems with possible file submissions")) # use 'student_answers' only if it is provided, and if it might contain a file # submission that would not exist in the persisted "student_answers". if 'filesubmission' in responder.allowed_inputfields and student_answers is not None: results = responder.evaluate_answers(student_answers, oldcmap) else: results = responder.evaluate_answers(self.student_answers, oldcmap) newcmap.update(results) self.correct_map = newcmap return newcmap def get_question_answers(self): """ Returns a dict of answer_ids to answer values. If we cannot generate an answer (this sometimes happens in customresponses), that answer_id is not included. Called by "show answers" button JSON request (see capa_module) """ # dict of (id, correct_answer) answer_map = dict() for response in self.responders.keys(): results = self.responder_answers[response] answer_map.update(results) # include solutions from <solution>...</solution> stanzas for entry in self.tree.xpath("//" + "|//".join(solution_tags)): answer = etree.tostring(entry) if answer: answer_map[entry.get('id')] = contextualize_text(answer, self.context) log.debug('answer_map = %s', answer_map) return answer_map def get_answer_ids(self): """ Return the IDs of all the responses -- these are the keys used for the dicts returned by grade_answers and get_question_answers. (Though get_question_answers may only return a subset of these. """ answer_ids = [] for response in self.responders.keys(): results = self.responder_answers[response] answer_ids.append(results.keys()) return answer_ids def get_html(self): """ Main method called externally to get the HTML to be rendered for this capa Problem. """ html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context) return html def handle_input_ajax(self, data): """ InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data Also, parse out the dispatch from the get so that it can be passed onto the input type nicely """ # pull out the id input_id = data['input_id'] if self.inputs[input_id]: dispatch = data['dispatch'] return self.inputs[input_id].handle_ajax(dispatch, data) else: log.warning("Could not find matching input for id: %s", input_id) return {} # ======= Private Methods Below ======== def _process_includes(self): """ Handle any <include file="foo"> tags by reading in the specified file and inserting it into our XML tree. Fail gracefully if debugging. """ includes = self.tree.findall('.//include') for inc in includes: filename = inc.get('file') if filename is not None: try: # open using LoncapaSystem OSFS filestore ifp = self.capa_system.filestore.open(filename) except Exception as err: log.warning( 'Error %s in problem xml include: %s', err, etree.tostring(inc, pretty_print=True) ) log.warning( 'Cannot find file %s in %s', filename, self.capa_system.filestore ) # if debugging, don't fail - just log error # TODO (vshnayder): need real error handling, display to users if not self.capa_system.DEBUG: raise else: continue try: # read in and convert to XML incxml = etree.XML(ifp.read()) except Exception as err: log.warning( 'Error %s in problem xml include: %s', err, etree.tostring(inc, pretty_print=True) ) log.warning('Cannot parse XML in %s', (filename)) # if debugging, don't fail - just log error # TODO (vshnayder): same as above if not self.capa_system.DEBUG: raise else: continue # insert new XML into tree in place of include parent = inc.getparent() parent.insert(parent.index(inc), incxml) parent.remove(inc) log.debug('Included %s into %s' % (filename, self.problem_id)) def _extract_system_path(self, script): """ Extracts and normalizes additional paths for code execution. For now, there's a default path of data/course/code; this may be removed at some point. script : ?? (TODO) """ DEFAULT_PATH = ['code'] # Separate paths by :, like the system path. raw_path = script.get('system_path', '').split(":") + DEFAULT_PATH # find additional comma-separated modules search path path = [] for dir in raw_path: if not dir: continue # path is an absolute path or a path relative to the data dir dir = os.path.join(self.capa_system.filestore.root_path, dir) # Check that we are within the filestore tree. reldir = os.path.relpath(dir, self.capa_system.filestore.root_path) if ".." in reldir: log.warning("Ignoring Python directory outside of course: %r", dir) continue abs_dir = os.path.normpath(dir) path.append(abs_dir) return path def _extract_context(self, tree): """ Extract content of <script>...</script> from the problem.xml file, and exec it in the context of this problem. Provides ability to randomize problems, and also set variables for problem answer checking. Problem XML goes to Python execution context. Runs everything in script tags. """ context = {} context['seed'] = self.seed all_code = '' python_path = [] for script in tree.findall('.//script'): stype = script.get('type') if stype: if 'javascript' in stype: continue # skip javascript if 'perl' in stype: continue # skip perl # TODO: evaluate only python for d in self._extract_system_path(script): if d not in python_path and os.path.exists(d): python_path.append(d) XMLESC = {"&apos;": "'", "&quot;": '"'} code = unescape(script.text, XMLESC) all_code += code if all_code: try: safe_exec( all_code, context, random_seed=self.seed, python_path=python_path, cache=self.capa_system.cache, slug=self.problem_id, unsafely=self.capa_system.can_execute_unsafe_code(), ) except Exception as err: log.exception("Error while execing script code: " + all_code) msg = "Error while executing script code: %s" % str(err).replace('<', '&lt;') raise responsetypes.LoncapaProblemError(msg) # Store code source in context, along with the Python path needed to run it correctly. context['script_code'] = all_code context['python_path'] = python_path return context def _extract_html(self, problemtree): # private """ Main (private) function which converts Problem XML tree to HTML. Calls itself recursively. Returns Element tree of XHTML representation of problemtree. Calls render_html of Response instances to render responses into XHTML. Used by get_html. """ if not isinstance(problemtree.tag, basestring): # Comment and ProcessingInstruction nodes are not Elements, # and we're ok leaving those behind. # BTW: etree gives us no good way to distinguish these things # other than to examine .tag to see if it's a string. :( return if (problemtree.tag == 'script' and problemtree.get('type') and 'javascript' in problemtree.get('type')): # leave javascript intact. return deepcopy(problemtree) if problemtree.tag in html_problem_semantics: return problemid = problemtree.get('id') # my ID if problemtree.tag in inputtypes.registry.registered_tags(): # If this is an inputtype subtree, let it render itself. status = "unsubmitted" msg = '' hint = '' hintmode = None input_id = problemtree.get('id') if problemid in self.correct_map: pid = input_id status = self.correct_map.get_correctness(pid) msg = self.correct_map.get_msg(pid) hint = self.correct_map.get_hint(pid) hintmode = self.correct_map.get_hintmode(pid) value = "" if self.student_answers and problemid in self.student_answers: value = self.student_answers[problemid] if input_id not in self.input_state: self.input_state[input_id] = {} # do the rendering state = { 'value': value, 'status': status, 'id': input_id, 'input_state': self.input_state[input_id], 'feedback': { 'message': msg, 'hint': hint, 'hintmode': hintmode, } } input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag) # save the input type so that we can make ajax calls on it if we need to self.inputs[input_id] = input_type_cls(self.capa_system, problemtree, state) return self.inputs[input_id].get_html() # let each Response render itself if problemtree in self.responders: overall_msg = self.correct_map.get_overall_message() return self.responders[problemtree].render_html( self._extract_html, response_msg=overall_msg ) # let each custom renderer render itself: if problemtree.tag in customrender.registry.registered_tags(): renderer_class = customrender.registry.get_class_for_tag(problemtree.tag) renderer = renderer_class(self.capa_system, problemtree) return renderer.get_html() # otherwise, render children recursively, and copy over attributes tree = etree.Element(problemtree.tag) for item in problemtree: item_xhtml = self._extract_html(item) if item_xhtml is not None: tree.append(item_xhtml) if tree.tag in html_transforms: tree.tag = html_transforms[problemtree.tag]['tag'] else: # copy attributes over if not innocufying for (key, value) in problemtree.items(): tree.set(key, value) tree.text = problemtree.text tree.tail = problemtree.tail return tree def _preprocess_problem(self, tree): # private """ Assign IDs to all the responses Assign sub-IDs to all entries (textline, schematic, etc.) Annoted correctness and value In-place transformation Also create capa Response instances for each responsetype and save as self.responders Obtain all responder answers and save as self.responder_answers dict (key = response) """ response_id = 1 self.responders = {} for response in tree.xpath('//' + "|//".join(responsetypes.registry.registered_tags())): response_id_str = self.problem_id + "_" + str(response_id) # create and save ID for this response response.set('id', response_id_str) response_id += 1 answer_id = 1 input_tags = inputtypes.registry.registered_tags() inputfields = tree.xpath( "|".join(['//' + response.tag + '[@id=$id]//' + x for x in (input_tags + solution_tags)]), id=response_id_str ) # assign one answer_id for each input type or solution type for entry in inputfields: entry.attrib['response_id'] = str(response_id) entry.attrib['answer_id'] = str(answer_id) entry.attrib['id'] = "%s_%i_%i" % (self.problem_id, response_id, answer_id) answer_id = answer_id + 1 # instantiate capa Response responsetype_cls = responsetypes.registry.get_class_for_tag(response.tag) responder = responsetype_cls(response, inputfields, self.context, self.capa_system) # save in list in self self.responders[response] = responder # get responder answers (do this only once, since there may be a performance cost, # eg with externalresponse) self.responder_answers = {} for response in self.responders.keys(): try: self.responder_answers[response] = self.responders[response].get_answers() except: log.debug('responder %s failed to properly return get_answers()', self.responders[response]) # FIXME raise # <solution>...</solution> may not be associated with any specific response; give # IDs for those separately # TODO: We should make the namespaces consistent and unique (e.g. %s_problem_%i). solution_id = 1 for solution in tree.findall('.//solution'): solution.attrib['id'] = "%s_solution_%i" % (self.problem_id, solution_id) solution_id += 1
agpl-3.0
-9,036,447,143,522,534,000
37.718157
115
0.585042
false
JonathanSalwan/Triton
src/examples/python/proving_opaque_predicates.py
1
3432
#!/usr/bin/env python3 ## -*- coding: utf-8 -*- ## ## Example to detect opaque predicates. This example is based ## on the Tomislav Zubcic's blog post [0,1] =). ## ## Output: ## ## $ python3 proving_opaque_predicates.py ## xor eax, eax ## jo 7 ## opaque predicate: never taken ## ---------------------------------- ## xor eax, eax ## je 7 ## opaque predicate: always taken ## ---------------------------------- ## xor eax, ebx ## je 7 ## not an opaque predicate ## ---------------------------------- ## and eax, 0x3fffffff ## and ebx, 0x3fffffff ## xor ecx, edx ## xor edx, edi ## add eax, ebx ## jo 0x16 ## opaque predicate: never taken ## ---------------------------------- ## and eax, 0x3fffffff ## and ebx, 0x3fffffff ## xor ecx, edx ## xor edx, edi ## xor eax, ebx ## je 0x16 ## not an opaque predicate ## ---------------------------------- ## ## [0] http://zubcic.re/blog/experimenting-with-z3-proving-opaque-predicates ## [1] https://www.reddit.com/r/ReverseEngineering/comments/4yf6tz/experimenting_with_z3_proving_opaque_predicates/ ## ## -- jonathan from __future__ import print_function from triton import TritonContext, ARCH, Instruction import sys trace_1 = [ b"\x31\xC0", # xor eax, eax b"\x0F\x80\x01\x00\x00\x00", # jo 7 ] trace_2 = [ b"\x31\xC0", # xor eax, eax b"\x0F\x84\x01\x00\x00\x00", # je 7 ] trace_3 = [ b"\x31\xD8", # xor eax, ebx b"\x0F\x84\x01\x00\x00\x00", # je 7 ] trace_4 = [ b"\x25\xff\xff\xff\x3f", # and eax, 0x3fffffff b"\x81\xe3\xff\xff\xff\x3f", # and ebx, 0x3fffffff b"\x31\xd1", # xor ecx, edx b"\x31\xfa", # xor edx, edi b"\x01\xd8", # add eax, ebx b"\x0f\x80\x10\x00\x00\x00", # jo 27 ] trace_5 = [ b"\x25\xff\xff\xff\x3f", # and eax, 0x3fffffff b"\x81\xe3\xff\xff\xff\x3f", # and ebx, 0x3fffffff b"\x31\xd1", # xor ecx, edx b"\x31\xfa", # xor edx, edi b"\x31\xD8", # xor eax, ebx b"\x0F\x84\x10\x00\x00\x00", # je 16 ] Triton = TritonContext() def symbolization_init(): Triton.symbolizeRegister(Triton.registers.eax) Triton.symbolizeRegister(Triton.registers.ebx) Triton.symbolizeRegister(Triton.registers.ecx) Triton.symbolizeRegister(Triton.registers.edx) return def test_trace(trace): Triton.setArchitecture(ARCH.X86) symbolization_init() astCtxt = Triton.getAstContext() for opcode in trace: instruction = Instruction() instruction.setOpcode(opcode) Triton.processing(instruction) print(instruction.getDisassembly()) if instruction.isBranch(): # Opaque Predicate AST op_ast = Triton.getPathPredicate() # Try another model model = Triton.getModel(astCtxt.lnot(op_ast)) if model: print("not an opaque predicate") else: if instruction.isConditionTaken(): print("opaque predicate: always taken") else: print("opaque predicate: never taken") print('----------------------------------') return if __name__ == '__main__': test_trace(trace_1) test_trace(trace_2) test_trace(trace_3) test_trace(trace_4) test_trace(trace_5) sys.exit(0)
apache-2.0
-7,506,264,363,885,100,000
26.238095
115
0.540501
false
mekkablue/Glyphs-Scripts
Kerning/kernanalysis.py
1
7373
# -*- coding: utf-8 -*--- -- from __future__ import print_function from GlyphsApp import Glyphs if Glyphs.versionNumber >= 3.0: from GlyphsApp import LTR from Foundation import NSNotFound intervalList = (1,3,5,10,20) categoryList = ( "Letter:Uppercase", "Letter:Lowercase", "Letter:Smallcaps", "Punctuation", "Symbol:Currency", "Symbol:Math", "Symbol:Other", "Symbol:Arrow", "Number:Decimal Digit", "Number:Small", "Number:Fraction", ) def stringToListOfGlyphsForFont( string, Font, report=True, excludeNonExporting=True, suffix="" ): # parse string into parseList: parseList = [] waitForSeparator = False parsedName = "" # cut off comment: if "#" in string: string = string[:string.find("#")].strip() # parse string: for i,x in enumerate(string): if x in "/ ": if parsedName: parseList.append(parsedName) parsedName = "" if x == "/": waitForSeparator = True else: waitForSeparator = False elif waitForSeparator: parsedName += x if i == len(string)-1: parseList.append(parsedName) else: parsedName = "" parseList.append(x) # go through parseList and find corresponding glyph in Font: glyphList = [] for parsedName in parseList: if parsedName.startswith("@"): # category and subcategory: if ":" in parsedName: category, subcategory = parsedName[1:].split(":") else: category, subcategory = parsedName[1:], None # TODO parse categoryGlyphs = listOfNamesForCategories( Font, category, subcategory, #OK "latin", # requiredScript, # need to implement still None, # excludedGlyphNameParts, # need to implement still excludeNonExporting, #OK suffix=suffix, ) if categoryGlyphs: glyphList += categoryGlyphs if report: print(u"Added glyphs for category %s, subcategory %s: %s" % (category, subcategory, ", ".join(categoryGlyphs))) elif report: print(u"Warning: no glyphs found for category %s, subcategory %s." % (category, subcategory)) else: # actual single glyph names: glyph = Font.glyphForName_(parsedName+suffix) # actual single character: if not glyph and len(parsedName) == 1: unicodeForName = "%04X" % ord(parsedName) glyphInfo = Glyphs.glyphInfoForUnicode(unicodeForName) if glyphInfo: glyphName = "%s%s" % (glyphInfo.name, suffix) glyph = Font.glyphs[glyphName] # check if glyph exists, exports, and collect in glyphList: if glyph: if (glyph.export or not excludeNonExporting): glyphList.append(glyph) elif report: print(u"Ignoring non-exporting glyph '%s'." % (parsedName+suffix)) elif report: print(u"Warning: Could not find glyph for '%s'." % (parsedName+suffix)) return glyphList def nameUntilFirstPeriod( glyphName ): if not "." in glyphName: return glyphName else: offset = glyphName.find(".") return glyphName[:offset] def effectiveKerning( leftGlyphName, rightGlyphName, thisFont, thisFontMasterID, directionSensitive=True ): leftLayer = thisFont.glyphs[leftGlyphName].layers[thisFontMasterID] rightLayer = thisFont.glyphs[rightGlyphName].layers[thisFontMasterID] if Glyphs.versionNumber>=3: direction = 0 #LTR if directionSensitive: direction = Glyphs.font.currentTab.direction effectiveKerning = leftLayer.nextKerningForLayer_direction_( rightLayer, direction ) else: effectiveKerning = leftLayer.rightKerningForLayer_( rightLayer ) if effectiveKerning < NSNotFound: return effectiveKerning else: return 0.0 # older version: # def effectiveKerning( leftGlyphName, rightGlyphName, thisFont, thisFontMasterID ): # leftLayer = thisFont.glyphs[leftGlyphName].layers[thisFontMasterID] # rightLayer = thisFont.glyphs[rightGlyphName].layers[thisFontMasterID] # if Glyphs.versionNumber >= 3.0: # effectiveKerning = leftLayer.nextKerningForLayer_direction_( rightLayer, LTR ) # else: # effectiveKerning = leftLayer.rightKerningForLayer_( rightLayer ) # return effectiveKerning # can be NSNotFound # # if effectiveKerning < NSNotFound: # # return effectiveKerning # # else: # # return 0.0 def listOfNamesForCategories( thisFont, requiredCategory, requiredSubCategory, requiredScript, excludedGlyphNameParts, excludeNonExporting, suffix="" ): nameList = [] for thisGlyph in thisFont.glyphs: thisScript = thisGlyph.script glyphName = thisGlyph.name nameIsOK = True if suffix: nameIsOK = glyphName.endswith(suffix) if nameIsOK and excludedGlyphNameParts: for thisNamePart in excludedGlyphNameParts: nameIsOK = nameIsOK and not thisNamePart in glyphName if nameIsOK and (thisGlyph.export or not excludeNonExporting): if thisScript == None or thisScript == requiredScript: if thisGlyph.category == requiredCategory: if requiredSubCategory: if thisGlyph.subCategory == requiredSubCategory: nameList.append( glyphName ) else: nameList.append( glyphName ) return nameList def splitString( string, delimiter=":", minimum=2 ): # split string into a list: returnList = string.split(delimiter) # remove trailing spaces: for i in range(len(returnList)): returnList[i] = returnList[i].strip() # if necessary fill up with None: while len(returnList) < minimum: returnList.append(None) if returnList == [""]: return None return returnList def measureLayerAtHeightFromLeftOrRight( thisLayer, height, leftSide=True ): try: if leftSide: measurement = thisLayer.lsbAtHeight_(height) else: measurement = thisLayer.rsbAtHeight_(height) if measurement < NSNotFound: return measurement else: return None except: return None def isHeightInIntervals( height, ignoreIntervals ): if ignoreIntervals: for interval in ignoreIntervals: if height <= interval[1] and height >= interval[0]: return True return False def minDistanceBetweenTwoLayers( leftLayer, rightLayer, interval=5.0, kerning=0.0, report=False, ignoreIntervals=[] ): # correction = leftLayer.RSB+rightLayer.LSB topY = min( leftLayer.bounds.origin.y+leftLayer.bounds.size.height, rightLayer.bounds.origin.y+rightLayer.bounds.size.height ) bottomY = max( leftLayer.bounds.origin.y, rightLayer.bounds.origin.y ) distance = topY - bottomY minDist = None if kerning > 10000: # NSNotFound kerning = 0 for i in range(int(distance//interval)): height = bottomY + i * interval if not isHeightInIntervals(height, ignoreIntervals) or not ignoreIntervals: left = measureLayerAtHeightFromLeftOrRight( leftLayer, height, leftSide=False ) right = measureLayerAtHeightFromLeftOrRight( rightLayer, height, leftSide=True ) try: # avoid gaps like in i or j total = left+right+kerning # +correction if minDist == None or minDist > total: minDist = total except: pass return minDist def sortedIntervalsFromString( intervals="" ): ignoreIntervals = [] if intervals: for interval in intervals.split(","): if interval.find(":") != -1: interval = interval.strip() try: intervalTuple = tuple(sorted([ int(interval.split(":")[0].strip()), int(interval.split(":")[1].strip()), ])) ignoreIntervals.append(intervalTuple) except: print("Warning: could not convert '%s' into a number interval." % interval.strip()) pass else: print("Warning: '%s' is not an interval (missing colon)" % interval.strip()) return ignoreIntervals
apache-2.0
-576,480,600,968,904,300
29.849372
152
0.714092
false
Pablites/W2IO
setup.py
1
1491
#!/usr/bin/env python # -*- coding: utf-8 -*- # Project: W2IO (https://github.com/Pablites/W2IO) # Authors: Paweł Wichary & Michał Waleszczuk # Date: 21 July 2015 # Licence: available on github """ Setup of W2IO install all files of plugin Instal directory is: ~/.starcluster/plugins """ import logging import os import shutil import sys from installer.src.files_copy import FilesCopier from installer.src.folders_dependency import FoldersDependency logging.basicConfig(filename='log/w2io_setup.log', level=logging.DEBUG) logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG) logging.info('W2IO setup - started') print 'W2IO setup - started' if os.path.exists("/root/.starcluster/plugins/w2io"): logging.info('W2IO setup - removing old W2IO') shutil.rmtree("/root/.starcluster/plugins/w2io") __current_path = os.path.dirname(os.path.abspath(__file__)) try: FoldersDependency(__current_path).run() except OSError as e: logging.error(e) print e logging.info('W2IO setup - aborting') print 'W2IO setup - aborting' sys.exit("NoDirectoryFound") FilesCopier(__current_path).run() logging.info('W2IO setup - plugin installed at ~/.starcluster/plugins') print 'W2IO setup - plugin installed at ~/.starcluster/plugins' logging.info('W2IO setup - remember to read configuration file ~/.starcluster/plugins/w2io/w2io_config') print 'W2IO setup - remember to read configuration file ~/.starcluster/plugins/w2io/w2io_config'
lgpl-3.0
-7,020,134,948,091,910,000
30.020833
104
0.733378
false
m3h0w/jigsaw_friend
trackbar.py
1
1497
""" usage: threshold_custom = tb.SimpleTrackbar(img, "ImgThresh") """ import cv2 import numpy as np def empty_function(*arg): pass def SimpleTrackbar(img, win_name): trackbar_name = win_name + "Trackbar" cv2.namedWindow(win_name) cv2.createTrackbar(trackbar_name, win_name, 0, 255, empty_function) while True: trackbar_pos = cv2.getTrackbarPos(trackbar_name, win_name) _, img_th = cv2.threshold(img, trackbar_pos, 255, cv2.THRESH_BINARY) cv2.imshow(win_name, img_th) key = cv2.waitKey(1) & 0xFF if key == ord("c"): break cv2.destroyAllWindows() return trackbar_pos def CannyTrackbar(img, win_name): trackbar_name = win_name + "Trackbar" cv2.namedWindow(win_name) cv2.resizeWindow(win_name, 500,100) cv2.createTrackbar("first", win_name, 0, 255, empty_function) cv2.createTrackbar("second", win_name, 0, 255, empty_function) cv2.createTrackbar("third", win_name, 0, 255, empty_function) while True: trackbar_pos1 = cv2.getTrackbarPos("first", win_name) trackbar_pos2 = cv2.getTrackbarPos("second", win_name) trackbar_pos3 = cv2.getTrackbarPos("third", win_name) img_blurred = cv2.GaussianBlur(img.copy(), (7,7), 2) canny = cv2.Canny(img_blurred, trackbar_pos1, trackbar_pos2) cv2.imshow(win_name, canny) key = cv2.waitKey(1) & 0xFF if key == ord("c"): break cv2.destroyAllWindows() return canny
mit
-5,651,194,603,384,155,000
28.372549
76
0.639947
false
eteq/ginga
ginga/Bindings.py
1
66446
# # Bindings.py -- Bindings classes for Ginga FITS viewer. # # Eric Jeschke ([email protected]) # # Copyright (c) Eric R. Jeschke. All rights reserved. # This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. import math from ginga.misc import Bunch, Settings, Callback from ginga import AutoCuts, trcalc from ginga import cmap, imap class ImageViewBindings(object): """ Mouse Operation and Bindings """ def __init__(self, logger, settings=None): super(ImageViewBindings, self).__init__() self.logger = logger self.canpan = False self.canzoom = False self._ispanning = False self.cancut = False self.cancmap = False self.canflip = False self.canrotate = False # For panning self._pantype = 1 self._start_x = None self._start_y = None self._start_panx = 0 self._start_pany = 0 self._start_scale_x = 0 self._start_scale_y = 0 self._start_rot = 0 if settings is None: # No settings passed. Set up defaults. settings = Settings.SettingGroup(name='bindings', logger=self.logger) self.initialize_settings(settings) self.settings = settings self.autocuts = AutoCuts.ZScale(self.logger) self.features = dict( # name, attr pairs pan='canpan', zoom='canzoom', cuts='cancut', cmap='cancmap', flip='canflip', rotate='canrotate') def initialize_settings(self, settings): settings.addSettings( # You should rarely have to change these. btn_nobtn = 0x0, btn_left = 0x1, btn_middle= 0x2, btn_right = 0x4, # Set up our standard modifiers mod_shift = ['shift_l', 'shift_r'], mod_ctrl = ['control_l', 'control_r'], mod_meta = ['meta_right'], # Define our modes dmod_draw = ['space', None, None], dmod_cmap = ['y', None, None], dmod_cuts = ['s', None, None], dmod_dist = ['d', None, None], dmod_contrast = ['t', None, None], dmod_rotate = ['r', None, None], dmod_pan = ['q', None, None], dmod_freepan = ['w', None, None], # KEYBOARD kp_zoom_in = ['+', '='], kp_zoom_out = ['-', '_'], kp_zoom = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0'], kp_zoom_inv = ['!', '@', '#', '$', '%', '^', '&', '*', '(', ')'], kp_zoom_fit = ['backquote'], kp_autozoom_toggle = ['doublequote'], kp_autozoom_override = ['singlequote'], kp_dist_reset = ['D'], kp_pan_set = ['p'], kp_center = ['c'], kp_cut_255 = ['A'], kp_cut_minmax = ['S'], kp_cut_auto = ['a'], kp_autocuts_toggle = [':'], kp_autocuts_override = [';'], kp_autocenter_toggle = ['?'], kp_autocenter_override = ['/'], kp_contrast_restore = ['T'], kp_cmap_reset = ['Y'], kp_imap_reset = [], kp_flip_x = ['[', '{'], kp_flip_y = [']', '}'], kp_swap_xy = ['backslash', '|'], kp_rotate_reset = ['R'], kp_rotate_inc90 = ['.'], kp_rotate_dec90 = [','], kp_orient_lh = ['o'], kp_orient_rh = ['O'], kp_poly_add = ['v', 'draw+v'], kp_poly_del = ['z', 'draw+z'], kp_edit_del = ['draw+x'], kp_reset = ['escape'], kp_lock = ['l'], # SCROLLING/WHEEL sc_pan = [], sc_pan_fine = [], sc_pan_coarse = [], sc_zoom = ['scroll'], sc_zoom_fine = ['shift+scroll'], sc_zoom_coarse = ['ctrl+scroll'], sc_cuts_fine = ['cuts+scroll'], sc_cuts_coarse = [], sc_dist = ['dist+scroll'], sc_cmap = ['cmap+scroll'], sc_imap = [], #sc_draw = ['draw+scroll'], scroll_pan_acceleration = 1.0, # 1.0 is appropriate for a mouse, 0.1 for most trackpads scroll_zoom_acceleration = 1.0, #scroll_zoom_acceleration = 0.1, mouse_zoom_acceleration = 1.085, mouse_rotate_acceleration = 0.75, pan_reverse = False, pan_multiplier = 1.0, zoom_scroll_reverse = False, # MOUSE/BUTTON ms_none = ['nobtn'], ms_cursor = ['left'], ms_wheel = [], ms_draw = ['draw+left', 'meta+left', 'right'], ms_rotate = ['rotate+left'], ms_rotate_reset = ['rotate+right'], ms_contrast = ['contrast+left', 'ctrl+right'], ms_contrast_restore = ['contrast+right', 'ctrl+middle'], ms_pan = ['pan+left', 'ctrl+left'], ms_zoom = ['pan+right'], ms_freepan = ['freepan+left', 'middle'], ms_zoom_in = ['freepan+middle'], ms_zoom_out = ['freepan+right'], ## ms_cutlo = ['cutlo+left'], ## ms_cuthi = ['cuthi+left'], ms_cutall = ['cuts+left'], ms_cut_auto = ['cuts+right'], ms_panset = ['pan+middle', 'shift+left'], # GESTURES (some backends only) gs_pinch = [], # Rotate gesture usually doesn't work so well on most platforms # so don't enable by default #gs_rotate = [], gs_pan = [], gs_swipe = [], gs_tap = [], pinch_actions = [], pinch_zoom_acceleration = 1.4, pinch_rotate_acceleration = 1.0, ) def get_settings(self): return self.settings def window_map(self, viewer): self.to_default_mode(viewer) def set_bindings(self, viewer): viewer.add_callback('map', self.window_map) bindmap = viewer.get_bindmap() bindmap.clear_button_map() bindmap.clear_event_map() # Set up bindings self.setup_settings_events(viewer, bindmap) def set_mode(self, viewer, name, mode_type='oneshot'): bindmap = viewer.get_bindmap() bindmap.set_mode(name, mode_type=mode_type) def parse_combo(self, combo, modes_set, modifiers_set): mode, mods, trigger = None, set([]), combo if '+' in combo: if combo.endswith('+'): # special case: probably contains the keystroke '+' trigger, combo = '+', combo[:-1] if '+' in combo: items = set(combo.split('+')) else: items = set(combo) else: # trigger is always specified last items = combo.split('+') trigger, items = items[-1], set(items[:-1]) mods = items.intersection(modifiers_set) mode = items.intersection(modes_set) if len(mode) == 0: mode = None else: mode = mode.pop() return (mode, mods, trigger) def setup_settings_events(self, viewer, bindmap): d = self.settings.getDict() if len(d) == 0: self.initialize_settings(self.settings) d = self.settings.getDict() # First scan settings for buttons and modes bindmap.clear_modifier_map() bindmap.clear_mode_map() for name, value in d.items(): if name.startswith('mod_'): modname = name[4:] for combo in value: # NOTE: for now no chorded combinations keyname = combo bindmap.add_modifier(keyname, modname) elif name.startswith('btn_'): btnname = name[4:] bindmap.map_button(value, btnname) elif name.startswith('dmod_'): mode_name = name[5:] keyname, mode_type, msg = value bindmap.add_mode(keyname, mode_name, mode_type=mode_type, msg=msg) modes_set = bindmap.get_modes() modifiers_set = bindmap.get_modifiers() # Add events for name, value in d.items(): if len(name) <= 3: continue pfx = name[:3] if not pfx in ('kp_', 'ms_', 'sc_', 'gs_'): continue evname = name[3:] for combo in value: mode, modifiers, trigger = self.parse_combo(combo, modes_set, modifiers_set) bindmap.map_event(mode, modifiers, trigger, evname) # Register for this symbolic event if we have a handler for it try: cb_method = getattr(self, name) except AttributeError: self.logger.warn("No method found matching '%s'" % (name)) cb_method = None if pfx == 'kp_': # keyboard event event = 'keydown-%s' % (evname) viewer.enable_callback(event) if cb_method: viewer.add_callback(event, cb_method) elif pfx == 'ms_': # mouse/button event for action in ('down', 'move', 'up'): event = '%s-%s' % (evname, action) viewer.enable_callback(event) if cb_method: viewer.add_callback(event, cb_method) elif pfx == 'sc_': # scrolling event event = '%s-scroll' % evname viewer.enable_callback(event) if cb_method: viewer.add_callback(event, cb_method) elif pfx == 'gs_': viewer.set_callback(evname, cb_method) def reset(self, viewer): bindmap = viewer.get_bindmap() bindmap.reset_mode(viewer) self.pan_stop(viewer) viewer.onscreen_message(None) ##### ENABLERS ##### # These methods are a quick way to enable or disable certain user # interface features in a ImageView window def enable_pan(self, tf): """Enable the image to be panned interactively (True/False).""" self.canpan = tf def enable_zoom(self, tf): """Enable the image to be zoomed interactively (True/False).""" self.canzoom = tf def enable_cuts(self, tf): """Enable the cuts levels to be set interactively (True/False).""" self.cancut = tf def enable_cmap(self, tf): """Enable the color map to be warped interactively (True/False).""" self.cancmap = tf def enable_flip(self, tf): """Enable the image to be flipped interactively (True/False).""" self.canflip = tf def enable_rotate(self, tf): """Enable the image to be rotated interactively (True/False).""" self.canrotate = tf def enable(self, **kwdargs): """ General enable function encompassing all user interface features. Usage (e.g.): viewer.enable(rotate=False, flip=True) """ for feat, value in kwdargs: feat = feat.lower() if not feat in self.features: raise ValueError("'%s' is not a feature. Must be one of %s" % ( feat, str(self.features))) attr = self.features[feat] setattr(self, attr, bool(value)) def enable_all(self, tf): for feat, attr in self.features.items(): setattr(self, attr, bool(tf)) ##### Help methods ##### # Methods used by the callbacks to do actions. def get_new_pan(self, viewer, win_x, win_y, ptype=1): if ptype == 1: # This is a "free pan", similar to dragging the "lens" # over the canvas. dat_wd, dat_ht = viewer.get_data_size() win_wd, win_ht = viewer.get_window_size() if (win_x >= win_wd): win_x = win_wd - 1 if (win_y >= win_ht): win_y = win_ht - 1 # Figure out data x,y based on percentage of X axis # and Y axis off_x, off_y = viewer.window_to_offset(win_x, win_y) max_x, max_y = viewer.window_to_offset(win_wd, win_ht) wd_x = abs(max_x) * 2.0 ht_y = abs(max_y) * 2.0 panx = (off_x + abs(max_x)) / float(wd_x) pany = (off_y + abs(max_y)) / float(ht_y) # Account for user preference if self.settings.get('pan_reverse', False): panx = 1.0 - panx pany = 1.0 - pany data_x, data_y = panx * dat_wd, pany * dat_ht return data_x, data_y elif ptype == 2: # This is a "drag pan", similar to dragging the canvas # under the "lens" or "viewport". if self._start_x is None: # user has not held the mouse button yet # return current pan values return (self._start_panx, self._start_pany) scale_x, scale_y = viewer.get_scale_xy() multiplier = self.settings.get('pan_multiplier', 1.0) off_x, off_y = viewer.window_to_offset(win_x, win_y) delta_x = float(self._start_x - off_x) / scale_x * multiplier delta_y = float(self._start_y - off_y) / scale_y * multiplier data_x = self._start_panx + delta_x data_y = self._start_pany + delta_y return (data_x, data_y) def _panset(self, viewer, data_x, data_y, msg=True): try: msg = self.settings.get('msg_panset', msg) if msg: viewer.onscreen_message("Pan position set", delay=0.4) res = viewer.panset_xy(data_x, data_y) return res except Exception as e: viewer.onscreen_message("Pan position set error; see log", delay=2.0) # most likely image does not have a valid wcs self.logger.error("Error setting pan position: %s" % ( str(e))) def get_direction(self, direction, rev=False): """ Translate a direction in compass degrees into 'up' or 'down'. """ if (direction < 90.0) or (direction > 270.0): if not rev: return 'up' else: return 'down' elif (90.0 < direction < 270.0): if not rev: return 'down' else: return 'up' else: return 'none' def _tweak_colormap(self, viewer, x, y, mode): win_wd, win_ht = viewer.get_window_size() # translate Y cursor position as a percentage of the window # height into a scaling factor y_pct = (win_ht - y) / float(win_ht) # I tried to mimic ds9's exponential scale feel along the Y-axis def exp_scale(i): return (1.0/(i**3))*0.0002 + (1.0/i)*0.085 scale_pct = exp_scale(1.0 - y_pct) # translate X cursor position as a percentage of the window # width into a shifting factor shift_pct = x / float(win_wd) - 0.5 viewer.scale_and_shift_cmap(scale_pct, shift_pct) def _cutlow_pct(self, viewer, pct, msg=True): msg = self.settings.get('msg_cuts', msg) image = viewer.get_image() minval, maxval = image.get_minmax() spread = maxval - minval loval, hival = viewer.get_cut_levels() loval = loval + (pct * spread) if msg: viewer.onscreen_message("Cut low: %.4f" % (loval)) viewer.cut_levels(loval, hival) def _cutlow_xy(self, viewer, x, y, msg=True): msg = self.settings.get('msg_cuts', msg) win_wd, win_ht = viewer.get_window_size() pct = float(x) / float(win_wd) image = viewer.get_image() minval, maxval = image.get_minmax() spread = maxval - minval loval, hival = viewer.get_cut_levels() loval = minval + (pct * spread) if msg: viewer.onscreen_message("Cut low: %.4f" % (loval)) viewer.cut_levels(loval, hival) def _cuthigh_pct(self, viewer, pct, msg=True): msg = self.settings.get('msg_cuts', msg) image = viewer.get_image() minval, maxval = image.get_minmax() spread = maxval - minval loval, hival = viewer.get_cut_levels() hival = hival - (pct * spread) if msg: viewer.onscreen_message("Cut high: %.4f" % (hival)) viewer.cut_levels(loval, hival) def _cuthigh_xy(self, viewer, x, y, msg=True): msg = self.settings.get('msg_cuts', msg) win_wd, win_ht = viewer.get_window_size() pct = 1.0 - (float(x) / float(win_wd)) image = viewer.get_image() minval, maxval = image.get_minmax() spread = maxval - minval loval, hival = viewer.get_cut_levels() hival = maxval - (pct * spread) if msg: viewer.onscreen_message("Cut high: %.4f" % (hival)) viewer.cut_levels(loval, hival) def _cutboth_xy(self, viewer, x, y, msg=True): msg = self.settings.get('msg_cuts', msg) win_wd, win_ht = viewer.get_window_size() xpct = 1.0 - (float(x) / float(win_wd)) #ypct = 1.0 - (float(y) / float(win_ht)) ypct = (float(win_ht - y) / float(win_ht)) spread = self._hival - self._loval hival = self._hival - (xpct * spread) loval = self._loval + (ypct * spread) if msg: viewer.onscreen_message("Cut low: %.4f high: %.4f" % ( loval, hival)) viewer.cut_levels(loval, hival) def _cut_pct(self, viewer, pct, msg=True): msg = self.settings.get('msg_cuts', msg) image = viewer.get_image() minval, maxval = image.get_minmax() spread = maxval - minval loval, hival = viewer.get_cut_levels() loval = loval + (pct * spread) hival = hival - (pct * spread) if msg: viewer.onscreen_message("Cut low: %.4f high: %.4f" % ( loval, hival), delay=1.0) viewer.cut_levels(loval, hival) def _adjust_cuts(self, viewer, direction, pct, msg=True): direction = self.get_direction(direction) if direction == 'up': self._cut_pct(viewer, pct, msg=msg) elif direction == 'down': self._cut_pct(viewer, -pct, msg=msg) def _scale_image(self, viewer, direction, factor, msg=True): msg = self.settings.get('msg_zoom', msg) rev = self.settings.get('zoom_scroll_reverse', False) scale_x, scale_y = viewer.get_scale_xy() direction = self.get_direction(direction, rev=rev) if direction == 'up': mult = 1.0 + factor elif direction == 'down': mult = 1.0 - factor scale_x, scale_y = scale_x * mult, scale_y * mult viewer.scale_to(scale_x, scale_y) if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=0.4) def _zoom_xy(self, viewer, x, y, msg=True): win_wd, win_ht = viewer.get_window_size() delta = float(x - self._start_x) factor = math.fabs(self.settings.get('mouse_zoom_acceleration', 1.085) - 1.0) direction = 0.0 if delta < 0.0: direction = 180.0 #print("factor=%f direction=%f" % (factor, direction)) self._start_x = x self._scale_image(viewer, direction, factor, msg=msg) def _cycle_dist(self, viewer, msg, direction='down'): if self.cancmap: msg = self.settings.get('msg_dist', msg) rgbmap = viewer.get_rgbmap() algs = rgbmap.get_hash_algorithms() algname = rgbmap.get_hash_algorithm() idx = algs.index(algname) if direction == 'down': idx = (idx + 1) % len(algs) else: idx = idx - 1 if idx < 0: idx = len(algs) - 1 algname = algs[idx] rgbmap.set_hash_algorithm(algname) if msg: viewer.onscreen_message("Color dist: %s" % (algname), delay=1.0) def _reset_dist(self, viewer, msg): if self.cancmap: msg = self.settings.get('msg_dist', msg) rgbmap = viewer.get_rgbmap() algname = 'linear' rgbmap.set_hash_algorithm(algname) if msg: viewer.onscreen_message("Color dist: %s" % (algname), delay=1.0) def _cycle_cmap(self, viewer, msg, direction='down'): if self.cancmap: msg = self.settings.get('msg_cmap', msg) rgbmap = viewer.get_rgbmap() cm = rgbmap.get_cmap() cmapname = cm.name cmapnames = cmap.get_names() idx = cmapnames.index(cmapname) if direction == 'down': idx = (idx + 1) % len(cmapnames) else: idx = idx - 1 if idx < 0: idx = len(cmapnames) - 1 cmapname = cmapnames[idx] rgbmap.set_cmap(cmap.get_cmap(cmapname)) if msg: viewer.onscreen_message("Color map: %s" % (cmapname), delay=1.0) def _reset_cmap(self, viewer, msg): if self.cancmap: msg = self.settings.get('msg_cmap', msg) rgbmap = viewer.get_rgbmap() # default cmapname = 'gray' rgbmap.set_cmap(cmap.get_cmap(cmapname)) if msg: viewer.onscreen_message("Color map: %s" % (cmapname), delay=1.0) def _cycle_imap(self, viewer, msg, direction='down'): if self.cancmap: msg = self.settings.get('msg_imap', msg) rgbmap = viewer.get_rgbmap() im = rgbmap.get_imap() imapname = im.name imapnames = imap.get_names() idx = imapnames.index(imapname) if direction == 'down': idx = (idx + 1) % len(imapnames) else: idx = idx - 1 if idx < 0: idx = len(imapnames) - 1 imapname = imapnames[idx] rgbmap.set_imap(imap.get_imap(imapname)) if msg: viewer.onscreen_message("Intensity map: %s" % (imapname), delay=1.0) def _reset_imap(self, viewer, msg): if self.cancmap: msg = self.settings.get('msg_imap', msg) rgbmap = viewer.get_rgbmap() # default imapname = 'ramp' rgbmap.set_imap(imap.get_imap(imapname)) if msg: viewer.onscreen_message("Intensity map: %s" % (imapname), delay=1.0) def _get_pct_xy(self, viewer, x, y): win_wd, win_ht = viewer.get_window_size() x_pct = float(x - self._start_x) / win_wd y_pct = float(y - self._start_y) / win_ht return (x_pct, y_pct) def _rotate_xy(self, viewer, x, y, msg=True): msg = self.settings.get('msg_rotate', msg) x_pct, y_pct = self._get_pct_xy(viewer, x, y) delta_deg = x_pct * 360.0 factor = self.settings.get('mouse_rotate_acceleration', 0.75) deg = math.fmod(self._start_rot + delta_deg * factor, 360.0) if msg: viewer.onscreen_message("Rotate: %.2f" % (deg)) viewer.rotate(deg) def _rotate_inc(self, viewer, inc_deg, msg=True): msg = self.settings.get('msg_rotate_inc', msg) cur_rot_deg = viewer.get_rotation() rot_deg = math.fmod(cur_rot_deg + inc_deg, 360.0) viewer.rotate(rot_deg) if msg: viewer.onscreen_message("Rotate Inc: (%.2f) %.2f" % ( inc_deg, rot_deg), delay=1.0) def _orient(self, viewer, righthand=False, msg=True): msg = self.settings.get('msg_orient', msg) image = viewer.get_image() (x, y, xn, yn, xe, ye) = image.calc_compass_center() degn = math.degrees(math.atan2(xn - x, yn - y)) self.logger.info("degn=%f xe=%f ye=%f" % ( degn, xe, ye)) # rotate east point also by degn xe2, ye2 = trcalc.rotate_pt(xe, ye, degn, xoff=x, yoff=y) dege = math.degrees(math.atan2(xe2 - x, ye2 - y)) self.logger.info("dege=%f xe2=%f ye2=%f" % ( dege, xe2, ye2)) # if right-hand image, flip it to make left hand xflip = righthand if dege > 0.0: xflip = not xflip if xflip: degn = - degn viewer.transform(xflip, False, False) viewer.rotate(degn) if msg: viewer.onscreen_message("Orient: rot=%.2f flipx=%s" % ( degn, str(xflip)), delay=1.0) def to_default_mode(self, viewer): self._ispanning = False viewer.switch_cursor('pick') def pan_start(self, viewer, ptype=1): # If already panning then ignore multiple keystrokes if self._ispanning: return self._pantype = ptype viewer.switch_cursor('pan') self._ispanning = True def pan_set_origin(self, viewer, win_x, win_y, data_x, data_y): self._start_x, self._start_y = viewer.window_to_offset(win_x, win_y) self._start_panx, self._start_pany = viewer.get_pan() def pan_stop(self, viewer): self._ispanning = False self._start_x = None self._pantype = 1 self.to_default_mode(viewer) def restore_colormap(self, viewer, msg=True): msg = self.settings.get('msg_cmap', msg) rgbmap = viewer.get_rgbmap() rgbmap.reset_sarr() if msg: viewer.onscreen_message("Restored color map", delay=0.5) return True ##### KEYBOARD ACTION CALLBACKS ##### def kp_pan_set(self, viewer, event, data_x, data_y, msg=True): if self.canpan: self._panset(viewer, data_x, data_y, msg=msg) return True def kp_center(self, viewer, event, data_x, data_y): if self.canpan: viewer.center_image() return True def kp_zoom_out(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) viewer.zoom_out() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_zoom_in(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) viewer.zoom_in() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_zoom(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) keylist = self.settings.get('kp_zoom') zoomval = (keylist.index(event.key) + 1) viewer.zoom_to(zoomval) if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_zoom_inv(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) keylist = self.settings.get('kp_zoom_inv') zoomval = - (keylist.index(event.key) + 1) viewer.zoom_to(zoomval) if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_zoom_fit(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) viewer.zoom_fit() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_autozoom_toggle(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) val = viewer.get_settings().get('autozoom') if val == 'off': val = 'on' else: val = 'off' viewer.enable_autozoom(val) if msg: viewer.onscreen_message('Autozoom %s' % val, delay=1.0) return True def kp_autozoom_override(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) viewer.enable_autozoom('override') if msg: viewer.onscreen_message('Autozoom Override', delay=1.0) return True def kp_cut_255(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) viewer.cut_levels(0.0, 255.0, no_reset=True) return True def kp_cut_minmax(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) image = viewer.get_image() mn, mx = image.get_minmax(noinf=True) viewer.cut_levels(mn, mx, no_reset=True) return True def kp_cut_auto(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) if msg: viewer.onscreen_message("Auto cut levels", delay=1.0) viewer.auto_levels() return True def kp_autocuts_toggle(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) val = viewer.get_settings().get('autocuts') if val == 'off': val = 'on' else: val = 'off' viewer.enable_autocuts(val) if msg: viewer.onscreen_message('Autocuts %s' % val, delay=1.0) return True def kp_autocuts_override(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) viewer.enable_autocuts('override') if msg: viewer.onscreen_message('Autocuts Override', delay=1.0) return True def kp_autocenter_toggle(self, viewer, event, data_x, data_y, msg=True): if self.canpan: msg = self.settings.get('msg_pan', msg) val = viewer.get_settings().get('autocenter') if val == 'off': val = 'on' else: val = 'off' viewer.set_autocenter(val) if msg: viewer.onscreen_message('Autocenter %s' % val, delay=1.0) return True def kp_autocenter_override(self, viewer, event, data_x, data_y, msg=True): if self.canpan: msg = self.settings.get('msg_pan', msg) viewer.set_autocenter('override') if msg: viewer.onscreen_message('Autocenter Override', delay=1.0) return True def kp_contrast_restore(self, viewer, event, data_x, data_y, msg=True): if self.cancmap: msg = self.settings.get('msg_cmap', msg) self.restore_colormap(viewer, msg=msg) return True def kp_flip_x(self, viewer, event, data_x, data_y, msg=True): if self.canflip: msg = self.settings.get('msg_transform', msg) flipX, flipY, swapXY = viewer.get_transforms() if event.key == '[': flipx = not flipX else: flipx = False viewer.transform(flipx, flipY, swapXY) if msg: viewer.onscreen_message("Flip X=%s" % flipx, delay=1.0) return True def kp_flip_y(self, viewer, event, data_x, data_y, msg=True): if self.canflip: msg = self.settings.get('msg_transform', msg) flipX, flipY, swapXY = viewer.get_transforms() if event.key == ']': flipy = not flipY else: flipy = False viewer.transform(flipX, flipy, swapXY) if msg: viewer.onscreen_message("Flip Y=%s" % flipy, delay=1.0) return True def kp_swap_xy(self, viewer, event, data_x, data_y, msg=True): if self.canflip: msg = self.settings.get('msg_transform', msg) flipX, flipY, swapXY = viewer.get_transforms() if event.key == 'backslash': swapxy = not swapXY else: swapxy = False viewer.transform(flipX, flipY, swapxy) if msg: viewer.onscreen_message("Swap XY=%s" % swapxy, delay=1.0) return True def kp_dist(self, viewer, event, data_x, data_y, msg=True): self._cycle_dist(viewer, msg) return True def kp_dist_reset(self, viewer, event, data_x, data_y, msg=True): self._reset_dist(viewer, msg) return True def kp_cmap_reset(self, viewer, event, data_x, data_y, msg=True): self._reset_cmap(viewer, msg) return True def kp_imap_reset(self, viewer, event, data_x, data_y, msg=True): self._reset_imap(viewer, msg) return True def kp_rotate_reset(self, viewer, event, data_x, data_y): if self.canrotate: viewer.rotate(0.0) # also reset all transforms viewer.transform(False, False, False) return True def kp_rotate_inc90(self, viewer, event, data_x, data_y, msg=True): if self.canrotate: self._rotate_inc(viewer, 90.0, msg=msg) return True def kp_rotate_dec90(self, viewer, event, data_x, data_y, msg=True): if self.canrotate: self._rotate_inc(viewer, -90.0, msg=msg) return True def kp_orient_lh(self, viewer, event, data_x, data_y, msg=True): if self.canrotate: self._orient(viewer, righthand=False, msg=msg) return True def kp_orient_rh(self, viewer, event, data_x, data_y, msg=True): if self.canrotate: self._orient(viewer, righthand=True, msg=msg) return True def kp_reset(self, viewer, event, data_x, data_y): self.reset(viewer) return True def kp_lock(self, viewer, event, data_x, data_y): bm = viewer.get_bindmap() # toggle default mode type to locked/oneshot dfl_modetype = bm.get_default_mode_type() # get current mode mode_name, cur_modetype = bm.current_mode() if dfl_modetype == 'locked': mode_type = 'oneshot' bm.set_default_mode_type(mode_type) # turning off lock also resets the mode bm.reset_mode(viewer) else: mode_type = 'locked' bm.set_default_mode_type(mode_type) bm.set_mode(mode_name, mode_type=mode_type) return True ##### MOUSE ACTION CALLBACKS ##### ## def ms_none(self, viewer, event, data_x, data_y): ## return False ## def ms_cursor(self, viewer, event, data_x, data_y): ## return False ## def ms_wheel(self, viewer, event, data_x, data_y): ## return False ## def ms_draw(self, viewer, event, data_x, data_y): ## return False def ms_zoom(self, viewer, event, data_x, data_y, msg=True): """Zoom the image by dragging the cursor left or right. """ if not self.canzoom: return True msg = self.settings.get('msg_zoom', msg) x, y = viewer.get_last_win_xy() if event.state == 'move': self._zoom_xy(viewer, x, y) elif event.state == 'down': if msg: viewer.onscreen_message("Zoom (drag mouse L-R)", delay=1.0) self._start_x, self._start_y = x, y else: viewer.onscreen_message(None) return True def ms_zoom_in(self, viewer, event, data_x, data_y, msg=True): """Zoom in one level by a mouse click. """ if not self.canzoom: return True if event.state == 'down': viewer.panset_xy(data_x, data_y) viewer.zoom_in() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def ms_zoom_out(self, viewer, event, data_x, data_y, msg=True): """Zoom out one level by a mouse click. """ if not self.canzoom: return True if event.state == 'down': viewer.panset_xy(data_x, data_y) viewer.zoom_out() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def ms_rotate(self, viewer, event, data_x, data_y, msg=True): """Rotate the image by dragging the cursor left or right. """ if not self.canrotate: return True msg = self.settings.get('msg_rotate', msg) x, y = viewer.get_last_win_xy() if event.state == 'move': self._rotate_xy(viewer, x, y) elif event.state == 'down': if msg: viewer.onscreen_message("Rotate (drag mouse L-R)", delay=1.0) self._start_x, self._start_y = x, y self._start_rot = viewer.get_rotation() else: viewer.onscreen_message(None) return True def ms_rotate_reset(self, viewer, event, data_x, data_y, msg=True): if not self.canrotate: return True msg = self.settings.get('msg_rotate', msg) if event.state == 'down': viewer.rotate(0.0) viewer.onscreen_message("Rotation reset", delay=0.5) return True def ms_contrast(self, viewer, event, data_x, data_y, msg=True): """Shift the colormap by dragging the cursor left or right. Stretch the colormap by dragging the cursor up or down. """ if not self.cancmap: return True msg = self.settings.get('msg_contrast', msg) x, y = viewer.get_last_win_xy() if not viewer._originUpper: y = viewer._imgwin_ht - y if event.state == 'move': self._tweak_colormap(viewer, x, y, 'preview') elif event.state == 'down': self._start_x, self._start_y = x, y if msg: viewer.onscreen_message("Shift and stretch colormap (drag mouse)", delay=1.0) else: viewer.onscreen_message(None) return True def ms_contrast_restore(self, viewer, event, data_x, data_y, msg=True): """An interactive way to restore the colormap settings after a warp operation. """ if self.cancmap and (event.state == 'down'): self.restore_colormap(viewer, msg=msg) return True def ms_pan(self, viewer, event, data_x, data_y): """A 'drag' or proportional pan, where the image is panned by 'dragging the canvas' up or down. The amount of the pan is proportionate to the length of the drag. """ if not self.canpan: return True x, y = viewer.get_last_win_xy() if event.state == 'move': data_x, data_y = self.get_new_pan(viewer, x, y, ptype=self._pantype) viewer.panset_xy(data_x, data_y) elif event.state == 'down': self.pan_set_origin(viewer, x, y, data_x, data_y) self.pan_start(viewer, ptype=2) else: self.pan_stop(viewer) return True def ms_freepan(self, viewer, event, data_x, data_y): """A 'free' pan, where the image is panned by dragging the cursor towards the area you want to see in the image. The entire image is pannable by dragging towards each corner of the window. """ if not self.canpan: return True x, y = viewer.get_last_win_xy() if event.state == 'move': data_x, data_y = self.get_new_pan(viewer, x, y, ptype=self._pantype) viewer.panset_xy(data_x, data_y) elif event.state == 'down': self.pan_start(viewer, ptype=1) else: self.pan_stop(viewer) return True def ms_cutlo(self, viewer, event, data_x, data_y): """An interactive way to set the low cut level. """ if not self.cancut: return True x, y = viewer.get_last_win_xy() if event.state == 'move': self._cutlow_xy(viewer, x, y) elif event.state == 'down': self._start_x, self._start_y = x, y self._loval, self._hival = viewer.get_cut_levels() else: viewer.onscreen_message(None) return True def ms_cuthi(self, viewer, event, data_x, data_y): """An interactive way to set the high cut level. """ if not self.cancut: return True x, y = viewer.get_last_win_xy() if event.state == 'move': self._cuthigh_xy(viewer, x, y) elif event.state == 'down': self._start_x, self._start_y = x, y self._loval, self._hival = viewer.get_cut_levels() else: viewer.onscreen_message(None) return True def ms_cutall(self, viewer, event, data_x, data_y): """An interactive way to set the low AND high cut levels. """ if not self.cancut: return True x, y = viewer.get_last_win_xy() if not viewer._originUpper: y = viewer._imgwin_ht - y if event.state == 'move': self._cutboth_xy(viewer, x, y) elif event.state == 'down': self._start_x, self._start_y = x, y image = viewer.get_image() #self._loval, self._hival = viewer.get_cut_levels() self._loval, self._hival = self.autocuts.calc_cut_levels(image) else: viewer.onscreen_message(None) return True def ms_cut_auto(self, viewer, event, data_x, data_y, msg=True): return self.kp_cut_auto(viewer, event, data_x, data_y, msg=msg) def ms_panset(self, viewer, event, data_x, data_y, msg=True): """An interactive way to set the pan position. The location (data_x, data_y) will be centered in the window. """ if self.canpan and (event.state == 'down'): self._panset(viewer, data_x, data_y, msg=msg) return True ##### SCROLL ACTION CALLBACKS ##### def sc_cuts_coarse(self, viewer, event, msg=True): """Adjust cuts interactively by setting the low AND high cut levels. This function adjusts it coarsely. """ if self.cancut: self._adjust_cuts(viewer, event.direction, 0.01, msg=msg) return True def sc_cuts_fine(self, viewer, event, msg=True): """Adjust cuts interactively by setting the low AND high cut levels. This function adjusts it finely. """ if self.cancut: self._adjust_cuts(viewer, event.direction, 0.001, msg=msg) return True def sc_zoom(self, viewer, event, msg=True): """Interactively zoom the image by scrolling motion. This zooms by the zoom steps configured under Preferences. """ if self.canzoom: msg = self.settings.get('msg_zoom', msg) rev = self.settings.get('zoom_scroll_reverse', False) direction = self.get_direction(event.direction, rev=rev) if direction == 'up': viewer.zoom_in() elif direction == 'down': viewer.zoom_out() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=0.4) return True def sc_zoom_new(self, viewer, event, msg=True): return self.sc_zoom_coarse(viewer, event, msg=msg) def sc_zoom_coarse(self, viewer, event, msg=True): """Interactively zoom the image by scrolling motion. This zooms by adjusting the scale in x and y coarsely. """ if self.canzoom: zoom_accel = self.settings.get('scroll_zoom_acceleration', 1.0) amount = zoom_accel * 0.20 self._scale_image(viewer, event.direction, amount, msg=msg) return True def sc_zoom_fine(self, viewer, event, msg=True): """Interactively zoom the image by scrolling motion. This zooms by adjusting the scale in x and y coarsely. """ if self.canzoom: zoom_accel = self.settings.get('scroll_zoom_acceleration', 1.0) amount = zoom_accel * 0.08 self._scale_image(viewer, event.direction, 0.08, msg=msg) return True def sc_pan(self, viewer, event, msg=True): """Interactively pan the image by scrolling motion. """ if not self.canpan: return True # User has "Pan Reverse" preference set? rev = self.settings.get('pan_reverse', False) direction = event.direction if rev: direction = math.fmod(direction + 180.0, 360.0) pan_accel = self.settings.get('scroll_pan_acceleration', 1.0) num_degrees = event.amount * pan_accel ang_rad = math.radians(90.0 - direction) # Calculate distance of pan amount, based on current scale wd, ht = viewer.get_data_size() # pageSize = min(wd, ht) ((x0, y0), (x1, y1), (x2, y2), (x3, y3)) = viewer.get_pan_rect() page_size = min(abs(x2 - x0), abs(y2 - y0)) distance = (num_degrees / 360.0) * page_size self.logger.debug("angle=%f ang_rad=%f distance=%f" % ( direction, ang_rad, distance)) # Calculate new pan position pan_x, pan_y = viewer.get_pan() new_x = pan_x + math.cos(ang_rad) * distance new_y = pan_y + math.sin(ang_rad) * distance # cap pan position new_x = min(max(new_x, 0.0), wd) new_y = min(max(new_y, 0.0), ht) # Because pan position is reported +0.5 #new_x, new_y = new_x - 0.5, new_y - 0.5 #print "data x,y=%f,%f new x, y=%f,%f" % (pan_x, pan_y, new_x, new_y) viewer.panset_xy(new_x, new_y) # For checking result #pan_x, pan_y = viewer.get_pan() #print "new pan x,y=%f, %f" % (pan_x, pan_y) return True def sc_pan_coarse(self, viewer, event, msg=True): event.amount = event.amount / 2.0 return self.sc_pan(viewer, event, msg=msg) def sc_pan_fine(self, viewer, event, msg=True): event.amount = event.amount / 5.0 return self.sc_pan(viewer, event, msg=msg) def sc_dist(self, viewer, event, msg=True): direction = self.get_direction(event.direction) self._cycle_dist(viewer, msg, direction=direction) return True def sc_cmap(self, viewer, event, msg=True): direction = self.get_direction(event.direction) self._cycle_cmap(viewer, msg, direction=direction) return True def sc_imap(self, viewer, event, msg=True): direction = self.get_direction(event.direction) self._cycle_imap(viewer, msg, direction=direction) return True ##### GESTURE ACTION CALLBACKS ##### def gs_pinch(self, viewer, state, rot_deg, scale, msg=True): pinch_actions = self.settings.get('pinch_actions', []) if state == 'start': self._start_scale_x, self._start_scale_y = viewer.get_scale_xy() self._start_rot = viewer.get_rotation() else: msg_str = None if self.canzoom and ('zoom' in pinch_actions): scale_accel = self.settings.get('pinch_zoom_acceleration', 1.0) scale = scale * scale_accel scale_x, scale_y = (self._start_scale_x * scale, self._start_scale_y * scale) viewer.scale_to(scale_x, scale_y) msg_str = viewer.get_scale_text() msg = self.settings.get('msg_zoom', True) if self.canrotate and ('rotate' in pinch_actions): deg = self._start_rot - rot_deg rotate_accel = self.settings.get('pinch_rotate_acceleration', 1.0) deg = rotate_accel * deg viewer.rotate(deg) if msg_str is None: msg_str = "Rotate: %.2f" % (deg) msg = self.settings.get('msg_rotate', msg) if msg and (msg_str is not None): viewer.onscreen_message(msg_str, delay=0.4) return True def gs_pan(self, viewer, state, dx, dy): if not self.canpan: return True if state == 'move': scale_x, scale_y = viewer.get_scale_xy() delta_x = float(dx) / scale_x delta_y = float(dy) / scale_y data_x = self._start_panx + delta_x data_y = self._start_pany + delta_y viewer.panset_xy(data_x, data_y) elif state == 'start': self._start_panx, self._start_pany = viewer.get_pan() self.pan_start(viewer, ptype=2) else: self.pan_stop(viewer) return True def gs_rotate(self, viewer, state, rot_deg, msg=True): if state == 'start': self._start_rot = viewer.get_rotation() else: msg_str = None if self.canrotate: deg = self._start_rot - rot_deg rotate_accel = self.settings.get('pinch_rotate_acceleration', 1.0) deg = rotate_accel * deg viewer.rotate(deg) if msg_str is None: msg_str = "Rotate: %.2f" % (deg) msg = self.settings.get('msg_rotate', msg) if msg and (msg_str is not None): viewer.onscreen_message(msg_str, delay=0.4) return True class UIEvent(object): pass class KeyEvent(UIEvent): def __init__(self, key=None, state=None, mode=None, modifiers=None, data_x=None, data_y=None, viewer=None): super(KeyEvent, self).__init__() self.key = key self.state = state self.mode = mode self.modifiers = modifiers self.data_x = data_x self.data_y = data_y self.viewer = viewer class PointEvent(UIEvent): def __init__(self, button=None, state=None, mode=None, modifiers=None, data_x=None, data_y=None, viewer=None): super(PointEvent, self).__init__() self.button = button self.state = state self.mode = mode self.modifiers = modifiers self.data_x = data_x self.data_y = data_y self.viewer = viewer class ScrollEvent(UIEvent): def __init__(self, button=None, state=None, mode=None, modifiers=None, direction=None, amount=None, data_x=None, data_y=None, viewer=None): super(ScrollEvent, self).__init__() self.button = button self.state = state self.mode = mode self.modifiers = modifiers self.direction = direction self.amount = amount self.data_x = data_x self.data_y = data_y self.viewer = viewer class BindingMapError(Exception): pass class BindingMapper(Callback.Callbacks): """The BindingMapper class maps physical events (key presses, button clicks, mouse movement, etc) into logical events. By registering for logical events, plugins and other event handling code doesn't need to care about the physical controls bindings. The bindings can be changed and everything continues to work. """ def __init__(self, logger, btnmap=None, mode_map=None, modifier_map=None): Callback.Callbacks.__init__(self) self.logger = logger # For event mapping self.eventmap = {} self._kbdmode = None self._kbdmode_types = ('held', 'oneshot', 'locked') self._kbdmode_type = 'held' self._kbdmode_type_default = 'oneshot' self._delayed_reset = False self._modifiers = frozenset([]) # Set up button mapping if btnmap is None: btnmap = { 0x1: 'cursor', 0x2: 'wheel', 0x4: 'draw' } self.btnmap = btnmap self._button = 0 # Set up modifier mapping if modifier_map is None: self.modifier_map = {} for keyname in ('shift_l', 'shift_r'): self.add_modifier(keyname, 'shift') for keyname in ('control_l', 'control_r'): self.add_modifier(keyname, 'ctrl') for keyname in ('meta_right',): self.add_modifier(keyname, 'meta') else: self.modifier_map = mode_map # Set up mode mapping if mode_map is None: self.mode_map = {} else: self.mode_map = mode_map self._empty_set = frozenset([]) # For callbacks for name in ('mode-set', ): self.enable_callback(name) def add_modifier(self, keyname, modname): bnch = Bunch.Bunch(name=modname) self.modifier_map[keyname] = bnch self.modifier_map['mod_%s' % modname] = bnch def get_modifiers(self): return set([bnch.name for keyname, bnch in self.modifier_map.items()]) def clear_modifier_map(self): self.modifier_map = {} def set_mode_map(self, mode_map): self.mode_map = mode_map def clear_mode_map(self): self.mode_map = {} def current_mode(self): return (self._kbdmode, self._kbdmode_type) def get_modes(self): return set([bnch.name for keyname, bnch in self.mode_map.items()]) def add_mode(self, keyname, mode_name, mode_type='held', msg=None): if mode_type is not None: assert mode_type in self._kbdmode_types, \ ValueError("Bad mode type '%s': must be one of %s" % ( mode_type, self._kbdmode_types)) bnch = Bunch.Bunch(name=mode_name, type=mode_type, msg=msg) self.mode_map[keyname] = bnch self.mode_map['mode_%s' % mode_name] = bnch def set_mode(self, name, mode_type=None): if mode_type == None: mode_type = self._kbdmode_type_default assert mode_type in self._kbdmode_types, \ ValueError("Bad mode type '%s': must be one of %s" % ( mode_type, self._kbdmode_types)) self._kbdmode = name if name is None: # like a reset_mode() mode_type = 'held' self._delayed_reset = False self._kbdmode_type = mode_type self.logger.info("set keyboard mode to '%s' type=%s" % (name, mode_type)) self.make_callback('mode-set', self._kbdmode, self._kbdmode_type) def set_default_mode_type(self, mode_type): assert mode_type in self._kbdmode_types, \ ValueError("Bad mode type '%s': must be one of %s" % ( mode_type, self._kbdmode_types)) self._kbdmode_type_default = mode_type def get_default_mode_type(self): return self._kbdmode_type_default def reset_mode(self, viewer): try: bnch = self.mode_map['mode_%s' % self._kbdmode] except: bnch = None self._kbdmode = None self._kbdmode_type = 'held' self._delayed_reset = False self.logger.info("set keyboard mode reset") # clear onscreen message, if any if (bnch is not None) and (bnch.msg is not None): viewer.onscreen_message(None) self.make_callback('mode-set', self._kbdmode, self._kbdmode_type) def clear_button_map(self): self.btnmap = {} def map_button(self, btncode, alias): """For remapping the buttons to different names. 'btncode' is a fixed button code and 'alias' is a logical name. """ self.btnmap[btncode] = alias def get_buttons(self): return set([alias for keyname, alias in self.btnmap.items()]) def clear_event_map(self): self.eventmap = {} def map_event(self, mode, modifiers, alias, eventname): self.eventmap[(mode, frozenset(tuple(modifiers)), alias)] = Bunch.Bunch(name=eventname) def register_for_events(self, viewer): # Add callbacks for interesting events viewer.add_callback('motion', self.window_motion) viewer.add_callback('button-press', self.window_button_press) viewer.add_callback('button-release', self.window_button_release) viewer.add_callback('key-press', self.window_key_press) viewer.add_callback('key-release', self.window_key_release) ## viewer.add_callback('drag-drop', self.window_drag_drop) viewer.add_callback('scroll', self.window_scroll) ## viewer.add_callback('map', self.window_map) ## viewer.add_callback('focus', self.window_focus) ## viewer.add_callback('enter', self.window_enter) ## viewer.add_callback('leave', self.window_leave) def window_map(self, viewer): pass def window_focus(self, viewer, hasFocus): return True def window_enter(self, viewer): return True def window_leave(self, viewer): return True def window_key_press(self, viewer, keyname): self.logger.debug("keyname=%s" % (keyname)) # Is this a modifer key? if keyname in self.modifier_map: bnch = self.modifier_map[keyname] self._modifiers = self._modifiers.union(set([bnch.name])) return True # Is this a mode key? elif keyname in self.mode_map: bnch = self.mode_map[keyname] if self._kbdmode_type == 'locked': if bnch.name == self._kbdmode: self.reset_mode(viewer) return True if self._delayed_reset: if bnch.name == self._kbdmode: self._delayed_reset = False return False # if there is not a mode active now, # activate this one if self._kbdmode is None: mode_type = bnch.type if mode_type == None: mode_type = self._kbdmode_type_default self.set_mode(bnch.name, mode_type) if bnch.msg is not None: viewer.onscreen_message(bnch.msg) return True try: # TEMP: hack to get around the issue of how keynames # are generated. if keyname == 'escape': idx = (None, self._empty_set, keyname) else: idx = (self._kbdmode, self._modifiers, keyname) emap = self.eventmap[idx] except KeyError: try: idx = (None, self._empty_set, keyname) emap = self.eventmap[idx] except KeyError: return False self.logger.debug("idx=%s" % (str(idx))) cbname = 'keydown-%s' % (emap.name) last_x, last_y = viewer.get_last_data_xy() event = KeyEvent(key=keyname, state='down', mode=self._kbdmode, modifiers=self._modifiers, viewer=viewer, data_x=last_x, data_y=last_y) return viewer.make_ui_callback(cbname, event, last_x, last_y) def window_key_release(self, viewer, keyname): self.logger.debug("keyname=%s" % (keyname)) # Is this a modifer key? if keyname in self.modifier_map: bnch = self.modifier_map[keyname] self._modifiers = self._modifiers.difference(set([bnch.name])) return True try: idx = (self._kbdmode, self._modifiers, keyname) emap = self.eventmap[idx] except KeyError: try: idx = (None, self._empty_set, keyname) emap = self.eventmap[idx] except KeyError: emap = None # Is this a mode key? if keyname in self.mode_map: bnch = self.mode_map[keyname] if self._kbdmode == bnch.name: # <-- the current mode key is being released if bnch.type == 'held': if self._button == 0: # if no button is being held, then reset mode self.reset_mode(viewer) else: self._delayed_reset = True return True # release mode if this is a oneshot mode ## if self._kbdmode_type == 'oneshot': ## self.reset_mode(viewer) if emap is None: return False cbname = 'keyup-%s' % (emap.name) last_x, last_y = viewer.get_last_data_xy() event = KeyEvent(key=keyname, state='up', mode=self._kbdmode, modifiers=self._modifiers, viewer=viewer, data_x=last_x, data_y=last_y) return viewer.make_ui_callback(cbname, event, last_x, last_y) def window_button_press(self, viewer, btncode, data_x, data_y): self.logger.debug("x,y=%d,%d btncode=%s" % (data_x, data_y, hex(btncode))) self._button |= btncode button = self.btnmap[btncode] try: idx = (self._kbdmode, self._modifiers, button) emap = self.eventmap[idx] except KeyError: # no entry for this mode, try unmodified entry try: idx = (None, self._empty_set, button) emap = self.eventmap[idx] except KeyError: #self.logger.warn("No button map binding for %s" % (str(btncode))) return False self.logger.debug("Event map for %s" % (str(idx))) cbname = '%s-down' % (emap.name) self.logger.debug("making callback for %s (mode=%s)" % ( cbname, self._kbdmode)) event = PointEvent(button=button, state='down', mode=self._kbdmode, modifiers=self._modifiers, viewer=viewer, data_x=data_x, data_y=data_y) return viewer.make_ui_callback(cbname, event, data_x, data_y) def window_motion(self, viewer, btncode, data_x, data_y): button = self.btnmap[btncode] try: idx = (self._kbdmode, self._modifiers, button) emap = self.eventmap[idx] except KeyError: # no entry for this mode, try unmodified entry try: idx = (None, self._empty_set, button) emap = self.eventmap[idx] except KeyError: return False self.logger.debug("Event map for %s" % (str(idx))) cbname = '%s-move' % (emap.name) event = PointEvent(button=button, state='move', mode=self._kbdmode, modifiers=self._modifiers, viewer=viewer, data_x=data_x, data_y=data_y) return viewer.make_ui_callback(cbname, event, data_x, data_y) def window_button_release(self, viewer, btncode, data_x, data_y): self.logger.debug("x,y=%d,%d button=%s" % (data_x, data_y, hex(btncode))) self._button &= ~btncode button = self.btnmap[btncode] try: idx = (self._kbdmode, self._modifiers, button) # release mode if this is a oneshot mode if (self._kbdmode_type == 'oneshot') or (self._delayed_reset): self.reset_mode(viewer) emap = self.eventmap[idx] except KeyError: # no entry for this mode, try unmodified entry try: idx = (None, self._empty_set, button) emap = self.eventmap[idx] except KeyError: #self.logger.warn("No button map binding for %s" % (str(btncode))) return False self.logger.debug("Event map for %s" % (str(idx))) cbname = '%s-up' % (emap.name) event = PointEvent(button=button, state='up', mode=self._kbdmode, modifiers=self._modifiers, viewer=viewer, data_x=data_x, data_y=data_y) return viewer.make_ui_callback(cbname, event, data_x, data_y) def window_scroll(self, viewer, direction, amount, data_x, data_y): try: idx = (self._kbdmode, self._modifiers, 'scroll') emap = self.eventmap[idx] except KeyError: # no entry for this mode, try unmodified entry try: idx = (None, self._empty_set, 'scroll') emap = self.eventmap[idx] except KeyError: return False cbname = '%s-scroll' % (emap.name) event = ScrollEvent(button='scroll', state='scroll', mode=self._kbdmode, modifiers=self._modifiers, viewer=viewer, direction=direction, amount=amount, data_x=data_x, data_y=data_y) return viewer.make_ui_callback(cbname, event) #END
bsd-3-clause
6,027,979,708,236,508,000
34.646996
82
0.523658
false
hotsyk/mailpost
mailpost/management/commands/fetchmail.py
1
1566
""" A package that maps incoming email to HTTP requests Mailpost version 0.1 (C) 2010 oDesk www.oDesk.com """ import os from django.core.management.base import BaseCommand, CommandError from django.conf import settings from django.core.mail import mail_admins from mailpost.handler import Handler class Command(BaseCommand): def handle(self, *args, **options): if settings.DISABLE_FETCHMAIL: print "Fetchmail is disabled" return False if os.path.exists(settings.LOCK_FILENAME): print "Lock file found! Cannot run another process." print "If you believe this is a mistake," + \ " please delete '%s' file manually" % \ os.path.normpath(settings.LOCK_FILENAME) mail_admins('MAILPOST:Lock file found! Cannot run another process',\ "If you believe this is a mistake," + \ " please delete '%s' file manually" % \ os.path.normpath(settings.LOCK_FILENAME), fail_silently=True) return False handler = Handler(config_file=settings.MAILPOST_CONFIG_FILE) f = open(settings.LOCK_FILENAME, 'w') f.close() try: for url, result in handler.process(): print 'Sent to URL: %s' % url if isinstance(result, Exception): print 'Error: ', result else: print 'OK' finally: os.remove(settings.LOCK_FILENAME)
bsd-3-clause
1,583,851,459,839,918,300
33.043478
80
0.574074
false
yotamfr/prot2vec
src/python/pssm3go_model.py
1
11287
import numpy as np import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F USE_CUDA = False KERN_SIZE = 3 def set_cuda(val): global USE_CUDA USE_CUDA = val def sequence_mask(sequence_length, max_len=None): if max_len is None: max_len = sequence_length.data.max() batch_size = sequence_length.size(0) seq_range = torch.arange(0, max_len).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) seq_range_expand = Variable(seq_range_expand) # if sequence_length.is_cuda: if USE_CUDA: seq_range_expand = seq_range_expand.cuda() seq_length_expand = (sequence_length.unsqueeze(1) .expand_as(seq_range_expand)) return seq_range_expand < seq_length_expand def masked_cross_entropy(logits, target, length, gamma=0, eps=1e-7): length = Variable(torch.LongTensor(length)) if USE_CUDA: length = length.cuda() """ Args: logits: A Variable containing a FloatTensor of size (batch, max_len, num_classes) which contains the unnormalized probability for each class. target: A Variable containing a LongTensor of size (batch, max_len) which contains the index of the true class for each corresponding step. length: A Variable containing a LongTensor of size (batch,) which contains the length of each data in a batch. Returns: loss: An average loss value masked by the length. """ # logits_flat: (batch * max_len, num_classes) logits_flat = logits.view(-1, logits.size(-1)) # target_flat: (batch * max_len, 1) target_flat = target.view(-1, 1) # probs_flat: (batch * max_len, 1) probs_flat = torch.gather(F.softmax(logits_flat), dim=1, index=target_flat) probs_flat = probs_flat.clamp(eps, 1. - eps) # prob: [0, 1] -> [eps, 1 - eps] # losses_flat: (batch * max_len, 1) losses_flat = -torch.log(probs_flat) * (1 - probs_flat) ** gamma # focal loss # losses: (batch, max_len) losses = losses_flat.view(*target.size()) # mask: (batch, max_len) mask = sequence_mask(sequence_length=length, max_len=target.size(1)) losses = losses * mask.float() loss = losses.sum() / length.float().sum() return loss class EncoderRNN(nn.Module): def __init__(self, input_size, hidden_size, n_layers=1, dropout=0.1): super(EncoderRNN, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.n_layers = n_layers self.dropout = dropout self.gru = nn.GRU(input_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True) def forward(self, input_seqs, input_lengths, hidden=None): # Note: we run this all at once (over multiple batches of multiple sequences) packed = torch.nn.utils.rnn.pack_padded_sequence(input_seqs, input_lengths) outputs, hidden = self.gru(packed, hidden) outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs) # unpack (back to padded) outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:] # Sum bidirectional outputs return outputs, hidden class CNN(nn.Module): def __init__(self, input_size): super(CNN, self).__init__() inp_size = input_size self.features = nn.Sequential( nn.Conv2d(1, 10, kernel_size=(KERN_SIZE, inp_size)), nn.BatchNorm2d(10), nn.ReLU(inplace=True), nn.Conv2d(10, 10, kernel_size=(KERN_SIZE, 1)), nn.BatchNorm2d(10), nn.ReLU(inplace=True), nn.MaxPool2d((2, 1)), nn.Conv2d(10, 20, kernel_size=(KERN_SIZE, 1)), nn.BatchNorm2d(20), nn.ReLU(inplace=True), nn.Conv2d(20, 20, kernel_size=(KERN_SIZE, 1)), nn.BatchNorm2d(20), nn.ReLU(inplace=True), nn.MaxPool2d((2, 1)), nn.Conv2d(20, 40, kernel_size=(KERN_SIZE, 1)), nn.BatchNorm2d(40), nn.ReLU(inplace=True), nn.Conv2d(40, 40, kernel_size=(KERN_SIZE, 1)), nn.BatchNorm2d(40), nn.ReLU(inplace=True), nn.MaxPool2d((2, 1)), ) self.n_pool_layers = 3 def forward(self, x): out = self.features(x) out = out.view(out.size(2), out.size(0), out.size(1) * out.size(3)) return out class EncoderCNN(nn.Module): def __init__(self, input_size, hidden_size, n_layers=1, dropout=0.1): super(EncoderCNN, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.n_layers = n_layers self.dropout = dropout self.cnn = CNN(input_size) self.gru = nn.GRU(input_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True) def forward(self, input_seqs, input_lengths, hidden=None): input_features = self.cnn(input_seqs.transpose(0, 1).unsqueeze(1)) features_length = [(l//(2 ** self.cnn.n_pool_layers)) for l in input_lengths] # features_length = input_lengths # print(input_features.size()) # print(features_length) # Note: we run this all at once (over multiple batches of multiple sequences) packed = torch.nn.utils.rnn.pack_padded_sequence(input_features, features_length) outputs, hidden = self.gru(packed, hidden) outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs) # unpack (back to padded) outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:] # Sum bidirectional outputs return outputs, hidden class Attn(nn.Module): def __init__(self, method, hidden_size): super(Attn, self).__init__() self.method = method self.hidden_size = hidden_size if self.method == 'general': self.attn = nn.Linear(self.hidden_size, hidden_size) elif self.method == 'concat': self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.FloatTensor(1, hidden_size)) def forward(self, hidden, encoder_outputs): max_len = encoder_outputs.size(0) this_batch_size = encoder_outputs.size(1) # Create variable to store attention energies attn_energies = Variable(torch.zeros(this_batch_size, max_len)) # B x S if USE_CUDA: attn_energies = attn_energies.cuda() # For each batch of encoder outputs for b in range(this_batch_size): # Calculate energy for each encoder output for i in range(max_len): attn_energies[b, i] = self.score(hidden[:, b], encoder_outputs[i, b].unsqueeze(0)) # Normalize energies to weights in range 0 to 1, resize to 1 x B x S return F.softmax(attn_energies).unsqueeze(1) def score(self, hidden, encoder_output): if self.method == 'dot': energy = torch.dot(hidden.view(-1), encoder_output.view(-1)) return energy elif self.method == 'general': energy = self.attn(encoder_output) energy = torch.dot(hidden.view(-1), energy.view(-1)) return energy elif self.method == 'concat': energy = self.attn(torch.cat((hidden, encoder_output), 1)) energy = self.v.dot(energy) return energy class LuongAttnDecoderRNN(nn.Module): def __init__(self, attn_model, hidden_size, output_size, n_layers=1, prior_size=0, dropout=0.1, embedding=None): super(LuongAttnDecoderRNN, self).__init__() # Keep for reference self.attn_model = attn_model self.hidden_size = hidden_size self.output_size = output_size self.prior_size = prior_size self.n_layers = n_layers self.dropout = dropout # Define layers if np.any(embedding): self.embedding_size = embedding_size = embedding.shape[1] self.embedding = nn.Embedding(output_size, embedding_size) self.embedding.weight = nn.Parameter(torch.from_numpy(embedding).float()) self.embedding.requires_grad = True else: self.embedding_size = embedding_size = hidden_size self.embedding = nn.Embedding(output_size, embedding_size) self.embedding_dropout = nn.Dropout(dropout) self.gru = nn.GRU(embedding_size, hidden_size, n_layers, dropout=dropout) self.concat = nn.Linear(hidden_size * 2, hidden_size) self.out = nn.Linear(hidden_size + prior_size, output_size) # Choose attention model if attn_model != 'none': self.attn = Attn(attn_model, hidden_size) def forward(self, input_seq, last_hidden, encoder_outputs, prior): # Note: we run this one step at a time # Get the embedding of the current input word (last output word) batch_size = input_seq.size(0) embedded = self.embedding(input_seq) embedded = self.embedding_dropout(embedded) embedded = embedded.view(1, batch_size, -1) # S=1 x B x N # Get current hidden state from input word and last hidden state rnn_output, hidden = self.gru(embedded, last_hidden) # Calculate attention from current RNN state and all encoder outputs; # apply to encoder outputs to get weighted average attn_weights = self.attn(rnn_output, encoder_outputs) context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # B x S=1 x N # Attentional vector using the RNN hidden state and context vector # concatenated together (Luong eq. 5) rnn_output = rnn_output.squeeze(0) # S=1 x B x N -> B x N context = context.squeeze(1) # B x S=1 x N -> B x N concat_input = torch.cat((rnn_output, context), 1) concat_output = F.tanh(self.concat(concat_input)) # Finally predict next token (Luong eq. 6, without softmax) if prior is None: output = self.out(concat_output) else: output = self.out(torch.cat((concat_output, prior), 1)) # Return final output, hidden state, and attention weights (for visualization) return output, hidden, attn_weights # https://github.com/DingKe/pytorch_workplace/blob/master/focalloss/loss.py def one_hot(index, classes): size = index.size() + (classes,) view = index.size() + (1,) mask = torch.Tensor(*size).fill_(0) index = index.view(*view) ones = 1. if isinstance(index, Variable): ones = Variable(torch.Tensor(index.size()).fill_(1)) mask = Variable(mask, volatile=index.volatile) return mask.scatter_(1, index, ones) class FocalLoss(nn.Module): def __init__(self, gamma=0, eps=1e-7): super(FocalLoss, self).__init__() self.gamma = gamma self.eps = eps def forward(self, input, target): y = one_hot(target, input.size(-1)) logit = F.softmax(input) logit = logit.clamp(self.eps, 1. - self.eps) loss = -1 * y * torch.log(logit) # cross entropy loss = loss * (1 - logit) ** self.gamma # focal loss return loss.sum()
mit
-5,608,129,983,196,193,000
35.409677
116
0.611234
false
eneldoserrata/marcos_openerp
addons/point_of_sale/report/pos_receipt.py
1
3036
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.report import report_sxw from openerp import pooler def titlize(journal_name): words = journal_name.split() while words.pop() != 'journal': continue return ' '.join(words) class order(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(order, self).__init__(cr, uid, name, context=context) user = pooler.get_pool(cr.dbname).get('res.users').browse(cr, uid, uid, context=context) partner = user.company_id.partner_id self.localcontext.update({ 'time': time, 'disc': self.discount, 'net': self.netamount, 'get_journal_amt': self._get_journal_amt, 'address': partner or False, 'titlize': titlize }) def netamount(self, order_line_id): sql = 'select (qty*price_unit) as net_price from pos_order_line where id = %s' self.cr.execute(sql, (order_line_id,)) res = self.cr.fetchone() return res[0]/1.18 def discount(self, order_id): sql = 'select discount, price_unit, qty from pos_order_line where order_id = %s ' self.cr.execute(sql, (order_id,)) res = self.cr.fetchall() dsum = 0 for line in res: if line[0] != 0: dsum = dsum +(line[2] * (line[0]*line[1]/100)) return dsum def _get_journal_amt(self, order_id): data={} sql = """ select aj.name,absl.amount as amt from account_bank_statement as abs LEFT JOIN account_bank_statement_line as absl ON abs.id = absl.statement_id LEFT JOIN account_journal as aj ON aj.id = abs.journal_id WHERE absl.pos_statement_id =%d"""%(order_id) self.cr.execute(sql) data = self.cr.dictfetchall() return data report_sxw.report_sxw('report.pos.receipt', 'pos.order', 'addons/point_of_sale/report/pos_receipt.rml', parser=order, header=False) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
873,737,431,624,882,400
38.428571
131
0.591897
false
baylee-d/osf.io
osf_tests/test_elastic_search.py
1
61588
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals import mock import time import unittest import logging import functools from nose.tools import * # noqa: F403 import pytest from framework.auth.core import Auth from website import settings import website.search.search as search from website.search import elastic_search from website.search.util import build_query from website.search_migration.migrate import migrate from osf.models import ( Retraction, NodeLicense, OSFGroup, Tag, Preprint, QuickFilesNode, ) from addons.wiki.models import WikiPage from addons.osfstorage.models import OsfStorageFile from scripts.populate_institutions import main as populate_institutions from osf_tests import factories from tests.base import OsfTestCase from tests.test_features import requires_search from tests.utils import run_celery_tasks TEST_INDEX = 'test' def query(term, raw=False): results = search.search(build_query(term), index=elastic_search.INDEX, raw=raw) return results def query_collections(name): term = 'category:collectionSubmission AND "{}"'.format(name) return query(term, raw=True) def query_user(name): term = 'category:user AND "{}"'.format(name) return query(term) def query_file(name): term = 'category:file AND "{}"'.format(name) return query(term) def query_tag_file(name): term = 'category:file AND (tags:u"{}")'.format(name) return query(term) def retry_assertion(interval=0.3, retries=3): def test_wrapper(func): t_interval = interval t_retries = retries @functools.wraps(func) def wrapped(*args, **kwargs): try: func(*args, **kwargs) except AssertionError as e: if retries: time.sleep(t_interval) retry_assertion(interval=t_interval, retries=t_retries - 1)(func)(*args, **kwargs) else: raise e return wrapped return test_wrapper @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestCollectionsSearch(OsfTestCase): def setUp(self): super(TestCollectionsSearch, self).setUp() search.delete_index(elastic_search.INDEX) search.create_index(elastic_search.INDEX) self.user = factories.UserFactory(fullname='Salif Keita') self.node_private = factories.NodeFactory(creator=self.user, title='Salif Keita: Madan', is_public=False) self.node_public = factories.NodeFactory(creator=self.user, title='Salif Keita: Yamore', is_public=True) self.node_one = factories.NodeFactory(creator=self.user, title='Salif Keita: Mandjou', is_public=True) self.node_two = factories.NodeFactory(creator=self.user, title='Salif Keita: Tekere', is_public=True) self.reg_private = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=False, archive=True) self.reg_public = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=True, archive=True) self.reg_one = factories.RegistrationFactory(title='Salif Keita: Madan', creator=self.user, is_public=True, archive=True) self.provider = factories.CollectionProviderFactory() self.reg_provider = factories.RegistrationProviderFactory() self.collection_one = factories.CollectionFactory(creator=self.user, is_public=True, provider=self.provider) self.collection_public = factories.CollectionFactory(creator=self.user, is_public=True, provider=self.provider) self.collection_private = factories.CollectionFactory(creator=self.user, is_public=False, provider=self.provider) self.reg_collection = factories.CollectionFactory(creator=self.user, provider=self.reg_provider, is_public=True) self.reg_collection_private = factories.CollectionFactory(creator=self.user, provider=self.reg_provider, is_public=False) def test_only_public_collections_submissions_are_searchable(self): docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 0) self.collection_public.collect_object(self.node_private, self.user) self.reg_collection.collect_object(self.reg_private, self.user) docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 0) assert_false(self.node_one.is_collected) assert_false(self.node_public.is_collected) self.collection_one.collect_object(self.node_one, self.user) self.collection_public.collect_object(self.node_public, self.user) self.reg_collection.collect_object(self.reg_public, self.user) assert_true(self.node_one.is_collected) assert_true(self.node_public.is_collected) assert_true(self.reg_public.is_collected) docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 3) self.collection_private.collect_object(self.node_two, self.user) self.reg_collection_private.collect_object(self.reg_one, self.user) docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 3) def test_index_on_submission_privacy_changes(self): # test_submissions_turned_private_are_deleted_from_index docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 0) self.collection_public.collect_object(self.node_one, self.user) self.collection_one.collect_object(self.node_one, self.user) docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 2) with run_celery_tasks(): self.node_one.is_public = False self.node_one.save() docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 0) # test_submissions_turned_public_are_added_to_index self.collection_public.collect_object(self.node_private, self.user) docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 0) with run_celery_tasks(): self.node_private.is_public = True self.node_private.save() docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 1) def test_index_on_collection_privacy_changes(self): # test_submissions_of_collection_turned_private_are_removed_from_index docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 0) self.collection_public.collect_object(self.node_one, self.user) self.collection_public.collect_object(self.node_two, self.user) self.collection_public.collect_object(self.node_public, self.user) self.reg_collection.collect_object(self.reg_public, self.user) docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 4) with run_celery_tasks(): self.collection_public.is_public = False self.collection_public.save() self.reg_collection.is_public = False self.reg_collection.save() docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 0) # test_submissions_of_collection_turned_public_are_added_to_index self.collection_private.collect_object(self.node_one, self.user) self.collection_private.collect_object(self.node_two, self.user) self.collection_private.collect_object(self.node_public, self.user) self.reg_collection_private.collect_object(self.reg_public, self.user) assert_true(self.node_one.is_collected) assert_true(self.node_two.is_collected) assert_true(self.node_public.is_collected) assert_true(self.reg_public.is_collected) docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 0) with run_celery_tasks(): self.collection_private.is_public = True self.collection_private.save() self.reg_collection.is_public = True self.reg_collection.save() docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 4) def test_collection_submissions_are_removed_from_index_on_delete(self): docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 0) self.collection_public.collect_object(self.node_one, self.user) self.collection_public.collect_object(self.node_two, self.user) self.collection_public.collect_object(self.node_public, self.user) self.reg_collection.collect_object(self.reg_public, self.user) docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 4) self.collection_public.delete() self.reg_collection.delete() assert_true(self.collection_public.deleted) assert_true(self.reg_collection.deleted) docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 0) def test_removed_submission_are_removed_from_index(self): self.collection_public.collect_object(self.node_one, self.user) self.reg_collection.collect_object(self.reg_public, self.user) assert_true(self.node_one.is_collected) assert_true(self.reg_public.is_collected) docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 2) self.collection_public.remove_object(self.node_one) self.reg_collection.remove_object(self.reg_public) assert_false(self.node_one.is_collected) assert_false(self.reg_public.is_collected) docs = query_collections('Salif Keita')['results'] assert_equal(len(docs), 0) def test_collection_submission_doc_structure(self): self.collection_public.collect_object(self.node_one, self.user) docs = query_collections('Keita')['results'] assert_equal(docs[0]['_source']['title'], self.node_one.title) with run_celery_tasks(): self.node_one.title = 'Keita Royal Family of Mali' self.node_one.save() docs = query_collections('Keita')['results'] assert_equal(docs[0]['_source']['title'], self.node_one.title) assert_equal(docs[0]['_source']['abstract'], self.node_one.description) assert_equal(docs[0]['_source']['contributors'][0]['url'], self.user.url) assert_equal(docs[0]['_source']['contributors'][0]['fullname'], self.user.fullname) assert_equal(docs[0]['_source']['url'], self.node_one.url) assert_equal(docs[0]['_source']['id'], '{}-{}'.format(self.node_one._id, self.node_one.collecting_metadata_list[0].collection._id)) assert_equal(docs[0]['_source']['category'], 'collectionSubmission') @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestUserUpdate(OsfTestCase): def setUp(self): super(TestUserUpdate, self).setUp() search.delete_index(elastic_search.INDEX) search.create_index(elastic_search.INDEX) self.user = factories.UserFactory(fullname='David Bowie') def test_new_user(self): # Verify that user has been added to Elastic Search docs = query_user(self.user.fullname)['results'] assert_equal(len(docs), 1) def test_new_user_unconfirmed(self): user = factories.UnconfirmedUserFactory() docs = query_user(user.fullname)['results'] assert_equal(len(docs), 0) token = user.get_confirmation_token(user.username) user.confirm_email(token) user.save() docs = query_user(user.fullname)['results'] assert_equal(len(docs), 1) @retry_assertion def test_change_name(self): # Add a user, change her name, and verify that only the new name is # found in search. user = factories.UserFactory(fullname='Barry Mitchell') fullname_original = user.fullname user.fullname = user.fullname[::-1] user.save() docs_original = query_user(fullname_original)['results'] assert_equal(len(docs_original), 0) docs_current = query_user(user.fullname)['results'] assert_equal(len(docs_current), 1) def test_disabled_user(self): # Test that disabled users are not in search index user = factories.UserFactory(fullname='Bettie Page') user.save() # Ensure user is in search index assert_equal(len(query_user(user.fullname)['results']), 1) # Disable the user user.is_disabled = True user.save() # Ensure user is not in search index assert_equal(len(query_user(user.fullname)['results']), 0) @pytest.mark.enable_quickfiles_creation def test_merged_user(self): user = factories.UserFactory(fullname='Annie Lennox') merged_user = factories.UserFactory(fullname='Lisa Stansfield') user.save() merged_user.save() assert_equal(len(query_user(user.fullname)['results']), 1) assert_equal(len(query_user(merged_user.fullname)['results']), 1) user.merge_user(merged_user) assert_equal(len(query_user(user.fullname)['results']), 1) assert_equal(len(query_user(merged_user.fullname)['results']), 0) def test_employment(self): user = factories.UserFactory(fullname='Helga Finn') user.save() institution = 'Finn\'s Fine Filers' docs = query_user(institution)['results'] assert_equal(len(docs), 0) user.jobs.append({ 'institution': institution, 'title': 'The Big Finn', }) user.save() docs = query_user(institution)['results'] assert_equal(len(docs), 1) def test_education(self): user = factories.UserFactory(fullname='Henry Johnson') user.save() institution = 'Henry\'s Amazing School!!!' docs = query_user(institution)['results'] assert_equal(len(docs), 0) user.schools.append({ 'institution': institution, 'degree': 'failed all classes', }) user.save() docs = query_user(institution)['results'] assert_equal(len(docs), 1) def test_name_fields(self): names = ['Bill Nye', 'William', 'the science guy', 'Sanford', 'the Great'] user = factories.UserFactory(fullname=names[0]) user.given_name = names[1] user.middle_names = names[2] user.family_name = names[3] user.suffix = names[4] user.save() docs = [query_user(name)['results'] for name in names] assert_equal(sum(map(len, docs)), len(docs)) # 1 result each assert_true(all([user._id == doc[0]['id'] for doc in docs])) @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestProject(OsfTestCase): def setUp(self): super(TestProject, self).setUp() search.delete_index(elastic_search.INDEX) search.create_index(elastic_search.INDEX) self.user = factories.UserFactory(fullname='John Deacon') self.project = factories.ProjectFactory(title='Red Special', creator=self.user) def test_new_project_private(self): # Verify that a private project is not present in Elastic Search. docs = query(self.project.title)['results'] assert_equal(len(docs), 0) def test_make_public(self): # Make project public, and verify that it is present in Elastic # Search. with run_celery_tasks(): self.project.set_privacy('public') docs = query(self.project.title)['results'] assert_equal(len(docs), 1) @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestOSFGroup(OsfTestCase): def setUp(self): with run_celery_tasks(): super(TestOSFGroup, self).setUp() search.delete_index(elastic_search.INDEX) search.create_index(elastic_search.INDEX) self.user = factories.UserFactory(fullname='John Deacon') self.user_two = factories.UserFactory(fullname='Grapes McGee') self.group = OSFGroup( name='Cornbread', creator=self.user, ) self.group.save() self.project = factories.ProjectFactory(is_public=True, creator=self.user, title='Biscuits') self.project.save() def test_create_osf_group(self): title = 'Butter' group = OSFGroup(name=title, creator=self.user) group.save() docs = query(title)['results'] assert_equal(len(docs), 1) def test_set_group_name(self): title = 'Eggs' self.group.set_group_name(title) self.group.save() docs = query(title)['results'] assert_equal(len(docs), 1) docs = query('Cornbread')['results'] assert_equal(len(docs), 0) def test_add_member(self): self.group.make_member(self.user_two) docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results'] assert_equal(len(docs), 1) self.group.make_manager(self.user_two) docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results'] assert_equal(len(docs), 1) self.group.remove_member(self.user_two) docs = query('category:group AND "{}"'.format(self.user_two.fullname))['results'] assert_equal(len(docs), 0) def test_connect_to_node(self): self.project.add_osf_group(self.group) docs = query('category:project AND "{}"'.format(self.group.name))['results'] assert_equal(len(docs), 1) self.project.remove_osf_group(self.group) docs = query('category:project AND "{}"'.format(self.group.name))['results'] assert_equal(len(docs), 0) def test_remove_group(self): group_name = self.group.name self.project.add_osf_group(self.group) docs = query('category:project AND "{}"'.format(group_name))['results'] assert_equal(len(docs), 1) self.group.remove_group() docs = query('category:project AND "{}"'.format(group_name))['results'] assert_equal(len(docs), 0) docs = query(group_name)['results'] assert_equal(len(docs), 0) @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestPreprint(OsfTestCase): def setUp(self): with run_celery_tasks(): super(TestPreprint, self).setUp() search.delete_index(elastic_search.INDEX) search.create_index(elastic_search.INDEX) self.user = factories.UserFactory(fullname='John Deacon') self.preprint = Preprint( title='Red Special', description='We are the champions', creator=self.user, provider=factories.PreprintProviderFactory() ) self.preprint.save() self.file = OsfStorageFile.create( target=self.preprint, path='/panda.txt', name='panda.txt', materialized_path='/panda.txt') self.file.save() self.published_preprint = factories.PreprintFactory( creator=self.user, title='My Fairy King', description='Under pressure', ) def test_new_preprint_unsubmitted(self): # Verify that an unsubmitted preprint is not present in Elastic Search. title = 'Apple' self.preprint.title = title self.preprint.save() docs = query(title)['results'] assert_equal(len(docs), 0) def test_new_preprint_unpublished(self): # Verify that an unpublished preprint is not present in Elastic Search. title = 'Banana' self.preprint = factories.PreprintFactory(creator=self.user, is_published=False, title=title) assert self.preprint.title == title docs = query(title)['results'] assert_equal(len(docs), 0) def test_unsubmitted_preprint_primary_file(self): # Unpublished preprint's primary_file not showing up in Elastic Search title = 'Cantaloupe' self.preprint.title = title self.preprint.set_primary_file(self.file, auth=Auth(self.user), save=True) assert self.preprint.title == title docs = query(title)['results'] assert_equal(len(docs), 0) def test_publish_preprint(self): title = 'Date' self.preprint = factories.PreprintFactory(creator=self.user, is_published=False, title=title) self.preprint.set_published(True, auth=Auth(self.preprint.creator), save=True) assert self.preprint.title == title docs = query(title)['results'] # Both preprint and primary_file showing up in Elastic assert_equal(len(docs), 2) def test_preprint_title_change(self): title_original = self.published_preprint.title new_title = 'New preprint title' self.published_preprint.set_title(new_title, auth=Auth(self.user), save=True) docs = query('category:preprint AND ' + title_original)['results'] assert_equal(len(docs), 0) docs = query('category:preprint AND ' + new_title)['results'] assert_equal(len(docs), 1) def test_preprint_description_change(self): description_original = self.published_preprint.description new_abstract = 'My preprint abstract' self.published_preprint.set_description(new_abstract, auth=Auth(self.user), save=True) docs = query(self.published_preprint.title)['results'] docs = query('category:preprint AND ' + description_original)['results'] assert_equal(len(docs), 0) docs = query('category:preprint AND ' + new_abstract)['results'] assert_equal(len(docs), 1) def test_set_preprint_private(self): # Not currently an option for users, but can be used for spam self.published_preprint.set_privacy('private', auth=Auth(self.user), save=True) docs = query(self.published_preprint.title)['results'] # Both preprint and primary_file showing up in Elastic assert_equal(len(docs), 0) def test_set_primary_file(self): # Only primary_file should be in index, if primary_file is changed, other files are removed from index. self.file = OsfStorageFile.create( target=self.published_preprint, path='/panda.txt', name='panda.txt', materialized_path='/panda.txt') self.file.save() self.published_preprint.set_primary_file(self.file, auth=Auth(self.user), save=True) docs = query(self.published_preprint.title)['results'] assert_equal(len(docs), 2) assert_equal(docs[1]['name'], self.file.name) def test_set_license(self): license_details = { 'id': 'NONE', 'year': '2015', 'copyrightHolders': ['Iron Man'] } title = 'Elderberry' self.published_preprint.title = title self.published_preprint.set_preprint_license(license_details, Auth(self.user), save=True) assert self.published_preprint.title == title docs = query(title)['results'] assert_equal(len(docs), 2) assert_equal(docs[0]['license']['copyright_holders'][0], 'Iron Man') assert_equal(docs[0]['license']['name'], 'No license') def test_add_tags(self): tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family'] for tag in tags: docs = query('tags:"{}"'.format(tag))['results'] assert_equal(len(docs), 0) self.published_preprint.add_tag(tag, Auth(self.user), save=True) for tag in tags: docs = query('tags:"{}"'.format(tag))['results'] assert_equal(len(docs), 1) def test_remove_tag(self): tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family'] for tag in tags: self.published_preprint.add_tag(tag, Auth(self.user), save=True) self.published_preprint.remove_tag(tag, Auth(self.user), save=True) docs = query('tags:"{}"'.format(tag))['results'] assert_equal(len(docs), 0) def test_add_contributor(self): # Add a contributor, then verify that project is found when searching # for contributor. user2 = factories.UserFactory(fullname='Adam Lambert') docs = query('category:preprint AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 0) # with run_celery_tasks(): self.published_preprint.add_contributor(user2, save=True) docs = query('category:preprint AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 1) def test_remove_contributor(self): # Add and remove a contributor, then verify that project is not found # when searching for contributor. user2 = factories.UserFactory(fullname='Brian May') self.published_preprint.add_contributor(user2, save=True) self.published_preprint.remove_contributor(user2, Auth(self.user)) docs = query('category:preprint AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 0) def test_hide_contributor(self): user2 = factories.UserFactory(fullname='Brian May') self.published_preprint.add_contributor(user2) self.published_preprint.set_visible(user2, False, save=True) docs = query('category:preprint AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 0) self.published_preprint.set_visible(user2, True, save=True) docs = query('category:preprint AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 1) def test_move_contributor(self): user2 = factories.UserFactory(fullname='Brian May') self.published_preprint.add_contributor(user2, save=True) docs = query('category:preprint AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 1) docs[0]['contributors'][0]['fullname'] == self.user.fullname docs[0]['contributors'][1]['fullname'] == user2.fullname self.published_preprint.move_contributor(user2, Auth(self.user), 0) docs = query('category:preprint AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 1) docs[0]['contributors'][0]['fullname'] == user2.fullname docs[0]['contributors'][1]['fullname'] == self.user.fullname def test_tag_aggregation(self): tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family'] for tag in tags: self.published_preprint.add_tag(tag, Auth(self.user), save=True) docs = query(self.published_preprint.title)['tags'] assert len(docs) == 3 for doc in docs: assert doc['key'] in tags @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestNodeSearch(OsfTestCase): def setUp(self): super(TestNodeSearch, self).setUp() with run_celery_tasks(): self.node = factories.ProjectFactory(is_public=True, title='node') self.public_child = factories.ProjectFactory(parent=self.node, is_public=True, title='public_child') self.private_child = factories.ProjectFactory(parent=self.node, title='private_child') self.public_subchild = factories.ProjectFactory(parent=self.private_child, is_public=True) self.node.node_license = factories.NodeLicenseRecordFactory() self.node.save() self.query = 'category:project & category:component' @retry_assertion() def test_node_license_added_to_search(self): docs = query(self.query)['results'] node = [d for d in docs if d['title'] == self.node.title][0] assert_in('license', node) assert_equal(node['license']['id'], self.node.node_license.license_id) @unittest.skip('Elasticsearch latency seems to be causing theses tests to fail randomly.') @retry_assertion(retries=10) def test_node_license_propogates_to_children(self): docs = query(self.query)['results'] child = [d for d in docs if d['title'] == self.public_child.title][0] assert_in('license', child) assert_equal(child['license'].get('id'), self.node.node_license.license_id) child = [d for d in docs if d['title'] == self.public_subchild.title][0] assert_in('license', child) assert_equal(child['license'].get('id'), self.node.node_license.license_id) @unittest.skip('Elasticsearch latency seems to be causing theses tests to fail randomly.') @retry_assertion(retries=10) def test_node_license_updates_correctly(self): other_license = NodeLicense.objects.get(name='MIT License') new_license = factories.NodeLicenseRecordFactory(node_license=other_license) self.node.node_license = new_license self.node.save() docs = query(self.query)['results'] for doc in docs: assert_equal(doc['license'].get('id'), new_license.license_id) @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestRegistrationRetractions(OsfTestCase): def setUp(self): super(TestRegistrationRetractions, self).setUp() self.user = factories.UserFactory(fullname='Doug Bogie') self.title = 'Red Special' self.consolidate_auth = Auth(user=self.user) self.project = factories.ProjectFactory( title=self.title, description='', creator=self.user, is_public=True, ) self.registration = factories.RegistrationFactory(project=self.project, is_public=True) @mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False)) def test_retraction_is_searchable(self): self.registration.retract_registration(self.user) self.registration.retraction.state = Retraction.APPROVED self.registration.retraction.save() self.registration.save() self.registration.retraction._on_complete(self.user) docs = query('category:registration AND ' + self.title)['results'] assert_equal(len(docs), 1) @mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False)) def test_pending_retraction_wiki_content_is_searchable(self): # Add unique string to wiki wiki_content = {'home': 'public retraction test'} for key, value in wiki_content.items(): docs = query(value)['results'] assert_equal(len(docs), 0) with run_celery_tasks(): WikiPage.objects.create_for_node(self.registration, key, value, self.consolidate_auth) # Query and ensure unique string shows up docs = query(value)['results'] assert_equal(len(docs), 1) # Query and ensure registration does show up docs = query('category:registration AND ' + self.title)['results'] assert_equal(len(docs), 1) # Retract registration self.registration.retract_registration(self.user, '') with run_celery_tasks(): self.registration.save() self.registration.reload() # Query and ensure unique string in wiki doesn't show up docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results'] assert_equal(len(docs), 1) # Query and ensure registration does show up docs = query('category:registration AND ' + self.title)['results'] assert_equal(len(docs), 1) @mock.patch('osf.models.registrations.Registration.archiving', mock.PropertyMock(return_value=False)) def test_retraction_wiki_content_is_not_searchable(self): # Add unique string to wiki wiki_content = {'home': 'public retraction test'} for key, value in wiki_content.items(): docs = query(value)['results'] assert_equal(len(docs), 0) with run_celery_tasks(): WikiPage.objects.create_for_node(self.registration, key, value, self.consolidate_auth) # Query and ensure unique string shows up docs = query(value)['results'] assert_equal(len(docs), 1) # Query and ensure registration does show up docs = query('category:registration AND ' + self.title)['results'] assert_equal(len(docs), 1) # Retract registration self.registration.retract_registration(self.user, '') self.registration.retraction.state = Retraction.APPROVED with run_celery_tasks(): self.registration.retraction.save() self.registration.save() self.registration.update_search() # Query and ensure unique string in wiki doesn't show up docs = query('category:registration AND "{}"'.format(wiki_content['home']))['results'] assert_equal(len(docs), 0) # Query and ensure registration does show up docs = query('category:registration AND ' + self.title)['results'] assert_equal(len(docs), 1) @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestPublicNodes(OsfTestCase): def setUp(self): with run_celery_tasks(): super(TestPublicNodes, self).setUp() self.user = factories.UserFactory(fullname='Doug Bogie') self.title = 'Red Special' self.consolidate_auth = Auth(user=self.user) self.project = factories.ProjectFactory( title=self.title, description='', creator=self.user, is_public=True, ) self.component = factories.NodeFactory( parent=self.project, description='', title=self.title, creator=self.user, is_public=True ) self.registration = factories.RegistrationFactory( title=self.title, description='', creator=self.user, is_public=True, ) self.registration.archive_job.target_addons.clear() self.registration.archive_job.status = 'SUCCESS' self.registration.archive_job.save() def test_make_private(self): # Make project public, then private, and verify that it is not present # in search. with run_celery_tasks(): self.project.set_privacy('private') docs = query('category:project AND ' + self.title)['results'] assert_equal(len(docs), 0) with run_celery_tasks(): self.component.set_privacy('private') docs = query('category:component AND ' + self.title)['results'] assert_equal(len(docs), 0) def test_search_node_partial(self): self.project.set_title('Blue Rider-Express', self.consolidate_auth) with run_celery_tasks(): self.project.save() find = query('Blue')['results'] assert_equal(len(find), 1) def test_search_node_partial_with_sep(self): self.project.set_title('Blue Rider-Express', self.consolidate_auth) with run_celery_tasks(): self.project.save() find = query('Express')['results'] assert_equal(len(find), 1) def test_search_node_not_name(self): self.project.set_title('Blue Rider-Express', self.consolidate_auth) with run_celery_tasks(): self.project.save() find = query('Green Flyer-Slow')['results'] assert_equal(len(find), 0) def test_public_parent_title(self): self.project.set_title('hello &amp; world', self.consolidate_auth) with run_celery_tasks(): self.project.save() docs = query('category:component AND ' + self.title)['results'] assert_equal(len(docs), 1) assert_equal(docs[0]['parent_title'], 'hello & world') assert_true(docs[0]['parent_url']) def test_make_parent_private(self): # Make parent of component, public, then private, and verify that the # component still appears but doesn't link to the parent in search. with run_celery_tasks(): self.project.set_privacy('private') docs = query('category:component AND ' + self.title)['results'] assert_equal(len(docs), 1) assert_false(docs[0]['parent_title']) assert_false(docs[0]['parent_url']) def test_delete_project(self): with run_celery_tasks(): self.component.remove_node(self.consolidate_auth) docs = query('category:component AND ' + self.title)['results'] assert_equal(len(docs), 0) with run_celery_tasks(): self.project.remove_node(self.consolidate_auth) docs = query('category:project AND ' + self.title)['results'] assert_equal(len(docs), 0) def test_change_title(self): title_original = self.project.title with run_celery_tasks(): self.project.set_title( 'Blue Ordinary', self.consolidate_auth, save=True ) docs = query('category:project AND ' + title_original)['results'] assert_equal(len(docs), 0) docs = query('category:project AND ' + self.project.title)['results'] assert_equal(len(docs), 1) def test_add_tags(self): tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family'] with run_celery_tasks(): for tag in tags: docs = query('tags:"{}"'.format(tag))['results'] assert_equal(len(docs), 0) self.project.add_tag(tag, self.consolidate_auth, save=True) for tag in tags: docs = query('tags:"{}"'.format(tag))['results'] assert_equal(len(docs), 1) def test_remove_tag(self): tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family'] for tag in tags: self.project.add_tag(tag, self.consolidate_auth, save=True) self.project.remove_tag(tag, self.consolidate_auth, save=True) docs = query('tags:"{}"'.format(tag))['results'] assert_equal(len(docs), 0) def test_update_wiki(self): """Add text to a wiki page, then verify that project is found when searching for wiki text. """ wiki_content = { 'home': 'Hammer to fall', 'swag': '#YOLO' } for key, value in wiki_content.items(): docs = query(value)['results'] assert_equal(len(docs), 0) with run_celery_tasks(): WikiPage.objects.create_for_node(self.project, key, value, self.consolidate_auth) docs = query(value)['results'] assert_equal(len(docs), 1) def test_clear_wiki(self): # Add wiki text to page, then delete, then verify that project is not # found when searching for wiki text. wiki_content = 'Hammer to fall' wp = WikiPage.objects.create_for_node(self.project, 'home', wiki_content, self.consolidate_auth) with run_celery_tasks(): wp.update(self.user, '') docs = query(wiki_content)['results'] assert_equal(len(docs), 0) def test_add_contributor(self): # Add a contributor, then verify that project is found when searching # for contributor. user2 = factories.UserFactory(fullname='Adam Lambert') docs = query('category:project AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 0) with run_celery_tasks(): self.project.add_contributor(user2, save=True) docs = query('category:project AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 1) def test_remove_contributor(self): # Add and remove a contributor, then verify that project is not found # when searching for contributor. user2 = factories.UserFactory(fullname='Brian May') self.project.add_contributor(user2, save=True) self.project.remove_contributor(user2, self.consolidate_auth) docs = query('category:project AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 0) def test_hide_contributor(self): user2 = factories.UserFactory(fullname='Brian May') self.project.add_contributor(user2) with run_celery_tasks(): self.project.set_visible(user2, False, save=True) docs = query('category:project AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 0) with run_celery_tasks(): self.project.set_visible(user2, True, save=True) docs = query('category:project AND "{}"'.format(user2.fullname))['results'] assert_equal(len(docs), 1) def test_wrong_order_search(self): title_parts = self.title.split(' ') title_parts.reverse() title_search = ' '.join(title_parts) docs = query(title_search)['results'] assert_equal(len(docs), 3) def test_tag_aggregation(self): tags = ['stonecoldcrazy', 'just a poor boy', 'from-a-poor-family'] with run_celery_tasks(): for tag in tags: self.project.add_tag(tag, self.consolidate_auth, save=True) docs = query(self.title)['tags'] assert len(docs) == 3 for doc in docs: assert doc['key'] in tags @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestAddContributor(OsfTestCase): # Tests of the search.search_contributor method def setUp(self): self.name1 = 'Roger1 Taylor1' self.name2 = 'John2 Deacon2' self.name3 = u'j\xc3\xb3ebert3 Smith3' self.name4 = u'B\xc3\xb3bbert4 Jones4' with run_celery_tasks(): super(TestAddContributor, self).setUp() self.user = factories.UserFactory(fullname=self.name1) self.user3 = factories.UserFactory(fullname=self.name3) def test_unreg_users_dont_show_in_search(self): unreg = factories.UnregUserFactory() contribs = search.search_contributor(unreg.fullname) assert_equal(len(contribs['users']), 0) def test_unreg_users_do_show_on_projects(self): with run_celery_tasks(): unreg = factories.UnregUserFactory(fullname='Robert Paulson') self.project = factories.ProjectFactory( title='Glamour Rock', creator=unreg, is_public=True, ) results = query(unreg.fullname)['results'] assert_equal(len(results), 1) def test_search_fullname(self): # Searching for full name yields exactly one result. contribs = search.search_contributor(self.name1) assert_equal(len(contribs['users']), 1) contribs = search.search_contributor(self.name2) assert_equal(len(contribs['users']), 0) def test_search_firstname(self): # Searching for first name yields exactly one result. contribs = search.search_contributor(self.name1.split(' ')[0]) assert_equal(len(contribs['users']), 1) contribs = search.search_contributor(self.name2.split(' ')[0]) assert_equal(len(contribs['users']), 0) def test_search_partial(self): # Searching for part of first name yields exactly one # result. contribs = search.search_contributor(self.name1.split(' ')[0][:-1]) assert_equal(len(contribs['users']), 1) contribs = search.search_contributor(self.name2.split(' ')[0][:-1]) assert_equal(len(contribs['users']), 0) def test_search_fullname_special_character(self): # Searching for a fullname with a special character yields # exactly one result. contribs = search.search_contributor(self.name3) assert_equal(len(contribs['users']), 1) contribs = search.search_contributor(self.name4) assert_equal(len(contribs['users']), 0) def test_search_firstname_special_charcter(self): # Searching for a first name with a special character yields # exactly one result. contribs = search.search_contributor(self.name3.split(' ')[0]) assert_equal(len(contribs['users']), 1) contribs = search.search_contributor(self.name4.split(' ')[0]) assert_equal(len(contribs['users']), 0) def test_search_partial_special_character(self): # Searching for a partial name with a special character yields # exctly one result. contribs = search.search_contributor(self.name3.split(' ')[0][:-1]) assert_equal(len(contribs['users']), 1) contribs = search.search_contributor(self.name4.split(' ')[0][:-1]) assert_equal(len(contribs['users']), 0) def test_search_profile(self): orcid = '123456' user = factories.UserFactory() user.social['orcid'] = orcid user.save() contribs = search.search_contributor(orcid) assert_equal(len(contribs['users']), 1) assert_equal(len(contribs['users'][0]['social']), 1) assert_equal(contribs['users'][0]['social']['orcid'], user.social_links['orcid']) @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestProjectSearchResults(OsfTestCase): def setUp(self): self.singular = 'Spanish Inquisition' self.plural = 'Spanish Inquisitions' self.possessive = 'Spanish\'s Inquisition' with run_celery_tasks(): super(TestProjectSearchResults, self).setUp() self.user = factories.UserFactory(fullname='Doug Bogie') self.project_singular = factories.ProjectFactory( title=self.singular, creator=self.user, is_public=True, ) self.project_plural = factories.ProjectFactory( title=self.plural, creator=self.user, is_public=True, ) self.project_possessive = factories.ProjectFactory( title=self.possessive, creator=self.user, is_public=True, ) self.project_unrelated = factories.ProjectFactory( title='Cardinal Richelieu', creator=self.user, is_public=True, ) def test_singular_query(self): # Verify searching for singular term includes singular, # possessive and plural versions in results. time.sleep(1) results = query(self.singular)['results'] assert_equal(len(results), 3) def test_plural_query(self): # Verify searching for singular term includes singular, # possessive and plural versions in results. results = query(self.plural)['results'] assert_equal(len(results), 3) def test_possessive_query(self): # Verify searching for possessive term includes singular, # possessive and plural versions in results. results = query(self.possessive)['results'] assert_equal(len(results), 3) def job(**kwargs): keys = [ 'title', 'institution', 'department', 'location', 'startMonth', 'startYear', 'endMonth', 'endYear', 'ongoing', ] job = {} for key in keys: if key[-5:] == 'Month': job[key] = kwargs.get(key, 'December') elif key[-4:] == 'Year': job[key] = kwargs.get(key, '2000') else: job[key] = kwargs.get(key, 'test_{}'.format(key)) return job @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestUserSearchResults(OsfTestCase): def setUp(self): with run_celery_tasks(): super(TestUserSearchResults, self).setUp() self.user_one = factories.UserFactory(jobs=[job(institution='Oxford'), job(institution='Star Fleet')], fullname='Date Soong') self.user_two = factories.UserFactory(jobs=[job(institution='Grapes la Picard'), job(institution='Star Fleet')], fullname='Jean-Luc Picard') self.user_three = factories.UserFactory(jobs=[job(institution='Star Fleet'), job(institution='Federation Medical')], fullname='Beverly Crusher') self.user_four = factories.UserFactory(jobs=[job(institution='Star Fleet')], fullname='William Riker') self.user_five = factories.UserFactory(jobs=[job(institution='Traveler intern'), job(institution='Star Fleet Academy'), job(institution='Star Fleet Intern')], fullname='Wesley Crusher') for i in range(25): factories.UserFactory(jobs=[job()]) self.current_starfleet = [ self.user_three, self.user_four, ] self.were_starfleet = [ self.user_one, self.user_two, self.user_three, self.user_four, self.user_five ] @unittest.skip('Cannot guarentee always passes') def test_current_job_first_in_results(self): results = query_user('Star Fleet')['results'] result_names = [r['names']['fullname'] for r in results] current_starfleet_names = [u.fullname for u in self.current_starfleet] for name in result_names[:2]: assert_in(name, current_starfleet_names) def test_had_job_in_results(self): results = query_user('Star Fleet')['results'] result_names = [r['names']['fullname'] for r in results] were_starfleet_names = [u.fullname for u in self.were_starfleet] for name in result_names: assert_in(name, were_starfleet_names) @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestSearchExceptions(OsfTestCase): # Verify that the correct exception is thrown when the connection is lost @classmethod def setUpClass(cls): logging.getLogger('website.project.model').setLevel(logging.CRITICAL) super(TestSearchExceptions, cls).setUpClass() if settings.SEARCH_ENGINE == 'elastic': cls._client = search.search_engine.CLIENT search.search_engine.CLIENT = None @classmethod def tearDownClass(cls): super(TestSearchExceptions, cls).tearDownClass() if settings.SEARCH_ENGINE == 'elastic': search.search_engine.CLIENT = cls._client @requires_search def test_connection_error(self): # Ensures that saving projects/users doesn't break as a result of connection errors self.user = factories.UserFactory(fullname='Doug Bogie') self.project = factories.ProjectFactory( title='Tom Sawyer', creator=self.user, is_public=True, ) self.user.save() self.project.save() @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestSearchMigration(OsfTestCase): # Verify that the correct indices are created/deleted during migration @classmethod def tearDownClass(cls): super(TestSearchMigration, cls).tearDownClass() search.create_index(settings.ELASTIC_INDEX) def setUp(self): super(TestSearchMigration, self).setUp() populate_institutions(default_args=True) self.es = search.search_engine.CLIENT search.delete_index(settings.ELASTIC_INDEX) search.create_index(settings.ELASTIC_INDEX) self.user = factories.UserFactory(fullname='David Bowie') self.project = factories.ProjectFactory( title=settings.ELASTIC_INDEX, creator=self.user, is_public=True ) self.preprint = factories.PreprintFactory( creator=self.user ) def test_first_migration_no_remove(self): migrate(delete=False, remove=False, index=settings.ELASTIC_INDEX, app=self.app.app) var = self.es.indices.get_aliases() assert_equal(list(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys())[0], settings.ELASTIC_INDEX) def test_multiple_migrations_no_remove(self): for n in range(1, 21): migrate(delete=False, remove=False, index=settings.ELASTIC_INDEX, app=self.app.app) var = self.es.indices.get_aliases() assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys())[0], settings.ELASTIC_INDEX) def test_first_migration_with_remove(self): migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app) var = self.es.indices.get_aliases() assert_equal(list(var[settings.ELASTIC_INDEX + '_v1']['aliases'].keys())[0], settings.ELASTIC_INDEX) def test_multiple_migrations_with_remove(self): for n in range(1, 21, 2): migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app) var = self.es.indices.get_aliases() assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n)]['aliases'].keys())[0], settings.ELASTIC_INDEX) migrate(delete=False, remove=True, index=settings.ELASTIC_INDEX, app=self.app.app) var = self.es.indices.get_aliases() assert_equal(list(var[settings.ELASTIC_INDEX + '_v{}'.format(n + 1)]['aliases'].keys())[0], settings.ELASTIC_INDEX) assert not var.get(settings.ELASTIC_INDEX + '_v{}'.format(n)) def test_migration_institutions(self): migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app) count_query = {} count_query['aggregations'] = { 'counts': { 'terms': { 'field': '_type', } } } institution_bucket_found = False res = self.es.search(index=settings.ELASTIC_INDEX, doc_type=None, search_type='count', body=count_query) for bucket in res['aggregations']['counts']['buckets']: if bucket['key'] == u'institution': institution_bucket_found = True assert_equal(institution_bucket_found, True) def test_migration_collections(self): provider = factories.CollectionProviderFactory() collection_one = factories.CollectionFactory(is_public=True, provider=provider) collection_two = factories.CollectionFactory(is_public=True, provider=provider) node = factories.NodeFactory(creator=self.user, title='Ali Bomaye', is_public=True) collection_one.collect_object(node, self.user) collection_two.collect_object(node, self.user) assert node.is_collected docs = query_collections('*')['results'] assert len(docs) == 2 docs = query_collections('Bomaye')['results'] assert len(docs) == 2 count_query = {} count_query['aggregations'] = { 'counts': { 'terms': { 'field': '_type', } } } migrate(delete=True, index=settings.ELASTIC_INDEX, app=self.app.app) docs = query_collections('*')['results'] assert len(docs) == 2 docs = query_collections('Bomaye')['results'] assert len(docs) == 2 res = self.es.search(index=settings.ELASTIC_INDEX, doc_type='collectionSubmission', search_type='count', body=count_query) assert res['hits']['total'] == 2 @pytest.mark.enable_search @pytest.mark.enable_enqueue_task class TestSearchFiles(OsfTestCase): def setUp(self): super(TestSearchFiles, self).setUp() self.node = factories.ProjectFactory(is_public=True, title='Otis') self.osf_storage = self.node.get_addon('osfstorage') self.root = self.osf_storage.get_root() def test_search_file(self): self.root.append_file('Shake.wav') find = query_file('Shake.wav')['results'] assert_equal(len(find), 1) def test_search_file_name_without_separator(self): self.root.append_file('Shake.wav') find = query_file('Shake')['results'] assert_equal(len(find), 1) def test_delete_file(self): file_ = self.root.append_file('I\'ve Got Dreams To Remember.wav') find = query_file('I\'ve Got Dreams To Remember.wav')['results'] assert_equal(len(find), 1) file_.delete() find = query_file('I\'ve Got Dreams To Remember.wav')['results'] assert_equal(len(find), 0) def test_add_tag(self): file_ = self.root.append_file('That\'s How Strong My Love Is.mp3') tag = Tag(name='Redding') tag.save() file_.tags.add(tag) file_.save() find = query_tag_file('Redding')['results'] assert_equal(len(find), 1) def test_remove_tag(self): file_ = self.root.append_file('I\'ve Been Loving You Too Long.mp3') tag = Tag(name='Blue') tag.save() file_.tags.add(tag) file_.save() find = query_tag_file('Blue')['results'] assert_equal(len(find), 1) file_.tags.remove(tag) file_.save() find = query_tag_file('Blue')['results'] assert_equal(len(find), 0) def test_make_node_private(self): self.root.append_file('Change_Gonna_Come.wav') find = query_file('Change_Gonna_Come.wav')['results'] assert_equal(len(find), 1) self.node.is_public = False with run_celery_tasks(): self.node.save() find = query_file('Change_Gonna_Come.wav')['results'] assert_equal(len(find), 0) def test_make_private_node_public(self): self.node.is_public = False self.node.save() self.root.append_file('Try a Little Tenderness.flac') find = query_file('Try a Little Tenderness.flac')['results'] assert_equal(len(find), 0) self.node.is_public = True with run_celery_tasks(): self.node.save() find = query_file('Try a Little Tenderness.flac')['results'] assert_equal(len(find), 1) def test_delete_node(self): node = factories.ProjectFactory(is_public=True, title='The Soul Album') osf_storage = node.get_addon('osfstorage') root = osf_storage.get_root() root.append_file('The Dock of the Bay.mp3') find = query_file('The Dock of the Bay.mp3')['results'] assert_equal(len(find), 1) node.is_deleted = True with run_celery_tasks(): node.save() find = query_file('The Dock of the Bay.mp3')['results'] assert_equal(len(find), 0) def test_file_download_url_guid(self): file_ = self.root.append_file('Timber.mp3') file_guid = file_.get_guid(create=True) file_.save() find = query_file('Timber.mp3')['results'] assert_equal(find[0]['guid_url'], '/' + file_guid._id + '/') def test_file_download_url_no_guid(self): file_ = self.root.append_file('Timber.mp3') path = file_.path deep_url = '/' + file_.target._id + '/files/osfstorage' + path + '/' find = query_file('Timber.mp3')['results'] assert_not_equal(file_.path, '') assert_equal(file_.path, path) assert_equal(find[0]['guid_url'], None) assert_equal(find[0]['deep_url'], deep_url) @pytest.mark.enable_quickfiles_creation def test_quickfiles_files_appear_in_search(self): quickfiles = QuickFilesNode.objects.get(creator=self.node.creator) quickfiles_osf_storage = quickfiles.get_addon('osfstorage') quickfiles_root = quickfiles_osf_storage.get_root() quickfiles_root.append_file('GreenLight.mp3') find = query_file('GreenLight.mp3')['results'] assert_equal(len(find), 1) assert find[0]['node_url'] == '/{}/quickfiles/'.format(quickfiles.creator._id) @pytest.mark.enable_quickfiles_creation def test_qatest_quickfiles_files_not_appear_in_search(self): quickfiles = QuickFilesNode.objects.get(creator=self.node.creator) quickfiles_osf_storage = quickfiles.get_addon('osfstorage') quickfiles_root = quickfiles_osf_storage.get_root() file = quickfiles_root.append_file('GreenLight.mp3') tag = Tag(name='qatest') tag.save() file.tags.add(tag) file.save() find = query_file('GreenLight.mp3')['results'] assert_equal(len(find), 0) @pytest.mark.enable_quickfiles_creation def test_quickfiles_spam_user_files_do_not_appear_in_search(self): quickfiles = QuickFilesNode.objects.get(creator=self.node.creator) quickfiles_osf_storage = quickfiles.get_addon('osfstorage') quickfiles_root = quickfiles_osf_storage.get_root() quickfiles_root.append_file('GreenLight.mp3') self.node.creator.disable_account() self.node.creator.confirm_spam() self.node.creator.save() find = query_file('GreenLight.mp3')['results'] assert_equal(len(find), 0)
apache-2.0
-7,887,259,153,225,950,000
38.888601
134
0.621079
false
chrysante87/pyterpol
pyterpol_test/test_hjd/test.hjd.py
1
1829
import numpy as np import pyterpol def load_observations(f): """ :param f: file :return: """ # load the observations flist = np.loadtxt(f, usecols=[0], unpack=True, dtype=str) hjd = np.loadtxt(f, usecols=[1], unpack=True).tolist() hjd[0] = None # create list of observations obs = [] for i, sf in enumerate(flist): # wrap the spectrum into observed spectrum class # o = pyterpol.ObservedSpectrum(filename=sf, group=dict(rv=0)) # o = pyterpol.ObservedSpectrum(filename=sf, group=dict(rv=0), hjd=hjd[i]) o = pyterpol.ObservedSpectrum(filename=sf, group=dict(rv=i), hjd=hjd[i]) # estimate uncertainty from continuum o.get_sigma_from_continuum(6620., 6630.) obs.append(o) # create ObservedList ol = pyterpol.ObservedList() ol.add_observations(obs) return ol, flist, hjd def main(): """ :return: """ # parameters niter = 2 # 1) Generate region rl = pyterpol.RegionList() rl.add_region(wmin=6337., wmax=6410.0, groups=dict(lr=0)) rl.add_region(wmin=6530., wmax=6600.0, groups=dict(lr=0)) rl.add_region(wmin=6660., wmax=6690.0, groups=dict(lr=0)) # 2) Load observed data ol = load_observations('prekor.lst')[0] ## 3) Generate components sl = pyterpol.StarList() sl.add_component('primary', teff=16000., logg=4.285, lr=1.0, vrot=90., z=1.0) ## 4) construct the interface itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol) itf.set_grid_properties(order=4, step=0.05) itf.setup() print itf ## 5) write rvs itf.save('test.sav') itf.write_rvs('test.rv.dat') # 6) try to load it itf.load('test.sav') # 7) and save it again itf.save('test2.sav') if __name__ == '__main__': main()
gpl-2.0
-9,011,860,902,559,890,000
24.402778
87
0.600328
false
rljacobson/Guru
guru/ServerListDlg.py
1
6741
from PySide.QtGui import (QApplication, QDialog, QMessageBox, QListWidgetItem, QFont) from PySide.QtCore import (SIGNAL) # from Ui_ServerListDlg import Ui_ServerListDlg from guru.Ui_ServerListDlg import Ui_ServerListDlg # from EditSageServerDlg import EditSageServerDlg from guru.EditSageServerDlg import EditSageServerDlg from guru.ServerConfigurations import ServerConfigurations class ServerListDlg(QDialog, Ui_ServerListDlg): def __init__(self, parent=None): super(ServerListDlg, self).__init__(parent) self.setupUi(self) #Hook up the UI. self.connect(self.AddServerBtn, SIGNAL("clicked()"), self.addServer) self.connect(self.EditBtn, SIGNAL("clicked()"), self.editServer) self.connect(self.ServerListView, SIGNAL("itemDoubleClicked(QListWidgetItem*)"), self.editServer) # self.connect(self.MoveUpBtn, SIGNAL("clicked()"), self.moveUp) # self.connect(self.MoveDownBtn, SIGNAL("clicked()"), self.moveDown) self.connect(self.DeleteServerBtn, SIGNAL("clicked()"), self.deleteServer) self.populateServerListView() def addServer(self): dialog = EditSageServerDlg(self) name_collision = True #The while loop needs to run at least once. while name_collision: if not dialog.exec_(): #The user clicked cancel. return #Fetch the data. new_server = dialog.getServerConfiguration() #Check to see if the name is in use. name_collision = ServerConfigurations.getServerByName(new_server["name"]) #If the user set the name to a new name that is already in use, name_collision will #not be None. The loop will continue and the dialog reopened. if name_collision: #The name is already in use. QMessageBox.critical(self, "Name already exists", "A server configuration already exists with that name. Choose a different name.") dialog.txtName.selectAll() dialog.txtName.setFocus() #Add the server configuration to the list. ServerConfigurations.addServer(new_server) item = QListWidgetItem(new_server["name"], self.ServerListView) self.ServerListView.setCurrentItem(item) if new_server["default"]: self.updateListViewDefault() def editServer(self): #Which server configuration is selected? if not self.ServerListView.currentItem(): #Nothing selected. return name = self.ServerListView.currentItem().text() #Find the corresponding server server_config = ServerConfigurations.getServerByName(name) #Create the dialog. It's only shown when we call dialog.exec_(). dialog = EditSageServerDlg(server_info=server_config) name_collision = True #The while loop needs to run at least once. while name_collision: if not dialog.exec_(): #User clicked cancel. return new_server = dialog.getServerConfiguration() name_collision = False #We check it with the 'if' below. #If the user changed the name to a new name that is already in use, the loop will continue #and the dialog reopened. if new_server["name"] != server_config["name"] and ServerConfigurations.getServerByName(new_server["name"]): #The name is already in use. name_collision = True QMessageBox.critical(self, "Name already exists", "A server configuration already exists with that name. Choose a different name.") dialog.txtName.selectAll() dialog.txtName.setFocus() #Update server_config if server_config != new_server: # Replace server_config with new_server. index = ServerConfigurations.server_list.index(server_config) ServerConfigurations.server_list[index] = new_server self.ServerListView.currentItem().setText(new_server["name"]) #When we set the "default" value, we need to also take care of the font of the item in the ListView. ServerConfigurations.setDefault(new_server, set=new_server["default"]) #Update the ListView to reflect our possibly new default server settings. self.updateListViewDefault() def deleteServer(self): #Which server configuration is selected? if not self.ServerListView.currentItem(): #Nothing selected, nothing to do. return name = self.ServerListView.currentItem().text() #Remove the corresponding server from the server_list. ServerConfigurations.removeServerByName(name) #And remove it from the ListView as well. self.removeSelectedItem() def removeSelectedItem(self): #For some weird reason, we need to do this in order to delete the selected item. for item in self.ServerListView.selectedItems(): self.ServerListView.takeItem(self.ServerListView.row(item)) def updateListViewDefault(self): #This method updates the font weight of the Listview. #Bold ONLY the default server in the ListView. for j in range(self.ServerListView.count()): item = self.ServerListView.item(j) if ServerConfigurations.default and item.text() == ServerConfigurations.default["name"]: self.setItemFontWeight(item, QFont.Bold) else: self.setItemFontWeight(item, QFont.Normal) def populateServerListView(self): #Remove everything from the list. Unnecessary with current design, but eh. self.ServerListView.clear() #Add each server configuration to the ListView. for server in ServerConfigurations.server_list: item = QListWidgetItem(server["name"], self.ServerListView) if server["default"]: #We bold the default server. self.setItemFontWeight(item, QFont.Bold) self.ServerListView.setCurrentItem(item) def setItemFontWeight(self, item, weight): #Seems like there should be an easier way to do this. font = item.font() font.setWeight(weight) item.setFont(font) def selectServer(self, server): server_name = server["name"] for j in range(self.ServerListView.count()): item = self.ServerListView.item(j) if item.text() == server_name: self.ServerListView.setCurrentItem(item) break if __name__ == "__main__": import sys app = QApplication(sys.argv) dlg = ServerListDlg() dlg.exec_()
mit
-1,040,866,541,898,030,500
41.402516
147
0.64931
false
upcFrost/SDR2_translate
GuiFuncs.py
1
6614
import os, ConfigParser, PIL from PIL import Image, ImageTk, ImageDraw, ImageFont from PakFile import * from GimFile import * from Common import * from clt import * def showSprite(self, GameDataLoc, pars): fn = os.path.join(GameDataLoc,'all','cg', 'bustup_%02d_%02d.gim' % (pars[1][1], pars[2][1])) GimImage = GimFile() GimImage.openGim(fn) GimImage.getImage() pilImage = PIL.Image.new("RGBA", (GimImage.width, GimImage.height)) pilImage.putdata(GimImage.image) self.scene.sprite = ImageTk.PhotoImage(pilImage) POS_X = (2*SCREEN_W - GimImage.width)/2 POS_Y = (2*SCREEN_H - GimImage.height)/2 imagesprite = self._ScreenView.create_image(POS_X,POS_Y,image=self.scene.sprite, tag = 'sprite') pass def showBGD(self, GameDataLoc, pars): fn = os.path.join(GameDataLoc,'all','cg', 'bgd_%03d.gim' % (pars[0][1])) # Show image if (pars[1][1] == 1): GimImage = GimFile() GimImage.openGim(fn) GimImage.getImage() pilImage = PIL.Image.new("RGBA", (GimImage.width, GimImage.height)) pilImage.putdata(GimImage.image) self.scene.bgd = ImageTk.PhotoImage(pilImage) POS_X = (2*SCREEN_W - GimImage.width)/2 POS_Y = (2*SCREEN_H - GimImage.height)/2 imagebgd = self._ScreenView.create_image(POS_X,POS_Y,image=self.scene.bgd, tag = 'bgd') else: self.scene.bgd = []; self._ScreenView.delete('bgd') pass def showFlash(self, GameDataLoc, pars): # Flash types: # If id < 1000, then it's a flash event. # if id >= 1000, then it's ammo # if id >= 1500, then it's an ammo update # if id >= 2000, then it's a present # If id >= 3000, it's a cutin. id = pars[0][1] # Flash ID added_Y = SCREEN_H/2 # Additional display height readfile = True # Flag that we're reading file, not dataarray # Check if that really is a flash if id >= 3000: # Cutin root = os.path.join(GameDataLoc,'all','cg','cutin') fn_tmp = 'cutin_ico_%03d.gim' id = id - 3000 elif id >= 2000: # Present root = os.path.join(GameDataLoc,'all','cg','present') fn_tmp = 'present_ico_%03d.gim' id = id - 2000 elif id >= 1500: # Ammo root = os.path.join(GameDataLoc,'all','cg','kotodama') fn_tmp = 'kotodama_ico_%03d.gim' id = id - 1500 elif id >= 1000: # Also ammo root = os.path.join(GameDataLoc,'all','cg','kotodama') fn_tmp = 'kotodama_ico_%03d.gim' id = id - 1000 # A flash event. else: added_Y = 0 # Don't need an additional height here root = os.path.join(GameDataLoc,'all','flash') fn_tmp = 'fla_%03d.pak' file = os.path.join(root, fn_tmp % id) # Check dir because we have 2 of those if not os.path.isfile(file): root = os.path.join(GameDataLoc,'jp','flash') file = os.path.join(root, fn_tmp % id) # Check for file if not os.path.isfile(file): return -1 # Get extension _, ext = os.path.splitext(file) if ext not in ['.pak', '.gmo', '.gim']: return -1 if ext == '.pak': Pak = PakFile(file) Pak.getFiles() # FIXME: need to check its number idx = pars[-1][1] if idx == 255: # Erase everything from the screen self._ScreenView.delete(ALL) self.scene.flash = [] return 0 # Else - check for image, numeration starts with 1 # Note that i'm using the SAME variable for unification file = Pak.files[idx - 1][1] _, ext = os.path.splitext(Pak.files[idx - 1][0]) # Set flag that we're reading data readfile = False # Check extension if ext not in ['.gmo', '.gim']: return -1 if ext == '.gmo': GmoImage = GmoFile() if readfile: GmoImage.openGmo(file) else: GmoImage.fromData(file) GmoImage.extractGim() GimImage = GmoImage.gim if ext == '.gim': GimImage = GimFile() if readfile: GimImage.openGim(file) else: GimImage.fromData(file) # Now working with gim image GimImage.getImage() pilImage = PIL.Image.new("RGBA", (GimImage.width, GimImage.height)) pilImage.putdata(GimImage.image) self.scene.flash.append(ImageTk.PhotoImage(pilImage)) POS_X = (2*SCREEN_W - GimImage.width)/2 POS_Y = (2*SCREEN_H - GimImage.height)/2 - added_Y imagesprite = self._ScreenView.create_image(POS_X,POS_Y,image=self.scene.flash[-1]) # Text should be kept on the top self._ScreenView.tag_raise('text') return 0 def printLine(self): # First delete the old line try: self._ScreenView.delete(self.scene.text_idx) except: print "No old line present on the screen" # I'm using images here because of the following things: positioning, alpha and font pilImage = PIL.Image.new("RGBA", (SCREEN_W, TEXT_H), (32,32,32,192)) draw = PIL.ImageDraw.Draw(pilImage) font = PIL.ImageFont.truetype("Meiryo.ttf", 20) # First - draw the speaker name at (20,0) draw.text((20,0), self.scene.speaker, (255,255,255), font=font) # Default highlighting clt = 0 color = CLT_STYLES[clt].top_color # Regex for finding highlighted regions clt_marker = re.compile(r"\<CLT (\d+)\>(.*?)\<CLT\>", re.DOTALL) clt_counter = 0 # The text is split into a list like [CLT0_TEXT, CLT_NUM, CLT_TEXT, CLT0_TEXT] text = re.split(clt_marker, self.scene.text) # Draw lines with the fixed line spacing attSpacing = 20 x = 20 # Margin y = 20 # Initial y partNum = 0 for part in text: # Reset text color if partNum % 3 == 0: clt = 0 color = CLT_STYLES[clt].top_color # Every first out of 3 - CLT number (look at the list form once again) if partNum % 3 == 1: clt = int(part) color = CLT_STYLES[clt].top_color # Dealing with a string else: # Draw text with the color we need for line in part.splitlines(): draw.text( (x,y), line, color, font=font) y = y + attSpacing # Next part partNum += 1 # Draw the text on canvas self.scene.text_img = ImageTk.PhotoImage(pilImage) self.scene.text_idx = self._ScreenView.create_image(SCREEN_W/2, SCREEN_H - TEXT_H/2,image=self.scene.text_img, tag = 'text') pass
gpl-3.0
-2,265,219,950,478,194,700
34.180851
128
0.579226
false
lokeshsaini94/DaysCalculator
src/days_calculator.py
1
2817
# Calculates number of days between two date. # Enter your birth date and current date. This program will calculate the number of days def date_error_check(month1, day1, month2, day2): # Checks if dates are correct. if month1 > 12 or month2 > 12 or day1 > 31 or day2 > 31: return False return True def date_is_before(year1, month1, day1, year2, month2, day2): # Checks if birth date is less than current date if year1 < year2: return True if year1 == year2: if month1 < month2: return True if month1 == month2: if day1 < day2: return True return False def is_leap_year(year1): # Checks if the year is a leap year or not if (year1 % 400 == 0): return True if (year1 % 100 == 0): return False if (year1 % 4 == 0): return True return False def days_in_month(year1, month1): # Returns the number of days in the given month and year if month1 == 1 or month1 == 3 or month1 == 5 or month1 == 7 or month1 == 8 or month1 == 10 or month1 == 12: return 31 if month1 == 2: if is_leap_year(year1): return 29 return 28 return 30 def next_day(year1, month1, day1): # Returns the date of next day if day1 < days_in_month(year1, month1): return year1, month1, day1+1 else: if month1 < 12: return year1, month1+1, 1 else: return year1+1, 01, 01 def days_calculator(year1, month1, day1, year2, month2, day2): # Calculates the days b/w birth date and current date days = 0 if not date_error_check(month1, day1, month2, day2): return False # "Wrong date format! try again" if not date_is_before(year1, month1, day1, year2, month2, day2): return False # "No Time travelers allowed" while date_is_before(year1, month1, day1, year2, month2, day2): year1, month1, day1 = next_day(year1, month1, day1) days = days + 1 return days # Getting user input and printing results print "Enter Birth date (yyyy-mm-dd)" year1 = input("Enter year 1: ") month1 = input("Enter month 1: ") day1 = input("Enter day 1: ") print "Enter current date (yyyy-mm-dd)" year2 = input("Enter year 2: ") month2 = input("Enter month 2: ") day2 = input("Enter day 2: ") if not days_calculator(year1, month1, day1, year2, month2, day2): print "Wrong Date! Try again" else: print "Number of days:", days_calculator(year1, month1, day1, year2, month2, day2) print "Number of hours:", days_calculator(year1, month1, day1, year2, month2, day2) * 24 print "Number of minutes:", days_calculator(year1, month1, day1, year2, month2, day2) * 24 * 60 print "Number of seconds:", days_calculator(year1, month1, day1, year2, month2, day2) * 24 * 60 * 60
apache-2.0
6,824,768,713,936,740,000
34.658228
117
0.629393
false
moshthepitt/product.co.ke
links/migrations/0001_initial.py
1
1633
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-05-02 10:06 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Link', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')), ('updated_on', models.DateTimeField(auto_now=True, verbose_name='Updated on')), ('title', models.CharField(max_length=255, verbose_name='Title')), ('link', models.URLField(max_length=2083, unique=True, verbose_name='Link')), ('description', models.TextField(default='', help_text='A short description. Please limit to 300 cahracters.', verbose_name='Description')), ('active', models.BooleanField(default=True, verbose_name='Active')), ('ghost', models.BooleanField(default=False, verbose_name='Ghost')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='User')), ], options={ 'ordering': ['-created_on'], 'verbose_name': 'Link', 'verbose_name_plural': 'Links', }, ), ]
mit
4,329,832,834,615,764,500
41.973684
157
0.59951
false
akretion/l10n-brazil
l10n_br_base/tests/test_other_ie.py
1
3687
# -*- coding: utf-8 -*- # @ 2018 Akretion - www.akretion.com.br - # Magno Costa <[email protected]> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from odoo.tests.common import TransactionCase import logging _logger = logging.getLogger(__name__) class OtherIETest(TransactionCase): def setUp(self): super(OtherIETest, self).setUp() self.company_model = self.env['res.company'] self.company = self.company_model.create({ 'name': 'Akretion Sao Paulo', 'legal_name': 'Akretion Sao Paulo', 'cnpj_cpf': '26.905.703/0001-52', 'inscr_est': '932.446.119.086', 'street': 'Rua Paulo Dias', 'number': '586', 'district': 'Alumínio', 'state_id': self.ref('base.state_br_sp'), 'l10n_br_city_id': self.ref('l10n_br_base.city_3501152'), 'country_id': self.ref('base.br'), 'city': 'Alumínio', 'zip': '18125-000', 'phone': '+55 (21) 3010 9965', 'email': '[email protected]', 'website': 'www.companytest.com.br' }) def test_included_valid_ie_in_company(self): result = self.company.write({ 'other_inscr_est_lines': [(0, 0, { 'state_id': self.ref('base.state_br_ba'), 'inscr_est': 41902653, })] }) self.assertTrue(result, "Error to included valid IE.") for line in self.company.partner_id.other_inscr_est_lines: result = False if line.inscr_est == '41902653': result = True self.assertTrue( result, "Error in method to update other IE(s) on partner.") try: result = self.company.write({ 'other_inscr_est_lines': [(0, 0, { 'state_id': self.ref('base.state_br_ba'), 'inscr_est': 67729139, })] }) except: result = False self.assertFalse( result, "Error to check included other" " IE to State already informed.") def test_included_invalid_ie(self): try: result = self.company.write({ 'other_inscr_est_lines': [(0, 0, { 'state_id': self.ref('base.state_br_ba'), 'inscr_est': 41902652, })] }) except: result = False self.assertFalse(result, "Error to check included invalid IE.") def test_included_other_valid_ie_to_same_state_of_company(self): try: result = self.company.write({ 'other_inscr_est_lines': [(0, 0, { 'state_id': self.ref('base.state_br_sp'), 'inscr_est': 692015742119, })] }) except: result = False self.assertFalse( result, "Error to check included other valid IE " " in to same state of Company.") def test_included_valid_ie_on_partner(self): result = self.company.partner_id.write({ 'other_inscr_est_lines': [(0, 0, { 'state_id': self.ref('base.state_br_ba'), 'inscr_est': 41902653, })] }) self.assertTrue(result, "Error to included valid IE.") for line in self.company.other_inscr_est_lines: result = False if line.inscr_est == '41902653': result = True self.assertTrue( result, "Error in method to update other IE(s) on Company.")
agpl-3.0
-2,175,972,573,397,180,000
34.095238
76
0.504206
false
DedMemez/ODS-August-2017
toon/DistributedNPCLaffRestock.py
1
2504
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.toon.DistributedNPCLaffRestock from otp.nametag.NametagConstants import CFSpeech, CFTimeout from toontown.toonbase import TTLocalizer, ToontownGlobals from toontown.toon import NPCToons from DistributedNPCToonBase import DistributedNPCToonBase import LaffRestockGlobals, LaffShopGui, time class DistributedNPCLaffRestock(DistributedNPCToonBase): def __init__(self, cr): DistributedNPCToonBase.__init__(self, cr) self.lastCollision = 0 self.laffDialog = None return def disable(self): self.ignoreAll() self.destroyDialog() DistributedNPCToonBase.disable(self) def destroyDialog(self): self.clearChat() if self.laffDialog: self.laffDialog.destroy() self.laffDialog = None return def postToonStateInit(self): self.putOnSuit(ToontownGlobals.cogHQZoneId2deptIndex(self.zoneId), rental=True) def getCollSphereRadius(self): return 1.25 def handleCollisionSphereEnter(self, collEntry): if self.lastCollision > time.time(): return self.lastCollision = time.time() + ToontownGlobals.NPCCollisionDelay if base.localAvatar.getHp() >= base.localAvatar.getMaxHp(): self.setChatAbsolute(TTLocalizer.RestockFullLaffMessage, CFSpeech | CFTimeout) return base.cr.playGame.getPlace().fsm.request('stopped') base.setCellsAvailable(base.bottomCells, 0) self.destroyDialog() self.acceptOnce('laffShopDone', self.__laffShopDone) self.laffDialog = LaffShopGui.LaffShopGui() def freeAvatar(self): base.cr.playGame.getPlace().fsm.request('walk') base.setCellsAvailable(base.bottomCells, 1) def __laffShopDone(self, state, laff): self.freeAvatar() if state == LaffRestockGlobals.TIMER_END: self.setChatAbsolute(TTLocalizer.STOREOWNER_TOOKTOOLONG, CFSpeech | CFTimeout) elif state == LaffRestockGlobals.USER_CANCEL: self.setChatAbsolute(TTLocalizer.STOREOWNER_GOODBYE, CFSpeech | CFTimeout) elif state == LaffRestockGlobals.RESTOCK: self.sendUpdate('restock', [laff]) def restockResult(self, state): if state in LaffRestockGlobals.RestockMessages: self.setChatAbsolute(LaffRestockGlobals.RestockMessages[state], CFSpeech | CFTimeout)
apache-2.0
3,289,223,680,876,199,400
38.419355
98
0.680911
false
coreboot/chrome-ec
util/uart_stress_tester.py
2
18551
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2019 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ChromeOS Uart Stress Test This tester runs the command 'chargen' on EC and/or AP, captures the output, and compares it against the expected output to check any characters lost. Prerequisite: (1) This test needs PySerial. Please check if it is available before test. Can be installed by 'pip install pyserial' (2) If servod is running, turn uart_timestamp off before running this test. e.g. dut-control cr50_uart_timestamp:off """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import atexit import logging import os import stat import sys import threading import time import serial BAUDRATE = 115200 # Default baudrate setting for UART port CROS_USERNAME = 'root' # Account name to login to ChromeOS CROS_PASSWORD = 'test0000' # Password to login to ChromeOS CHARGEN_TXT = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' # The result of 'chargen 62 62' CHARGEN_TXT_LEN = len(CHARGEN_TXT) CR = '\r' # Carriage Return LF = '\n' # Line Feed CRLF = CR + LF FLAG_FILENAME = '/tmp/chargen_testing' TPM_CMD = ('trunks_client --key_create --rsa=2048 --usage=sign' ' --key_blob=/tmp/blob &> /dev/null') # A ChromeOS TPM command for the cr50 stress # purpose. CR50_LOAD_GEN_CMD = ('while [[ -f %s ]]; do %s; done &' % (FLAG_FILENAME, TPM_CMD)) # A command line to run TPM_CMD in background # infinitely. class ChargenTestError(Exception): """Exception for Uart Stress Test Error""" pass class UartSerial(object): """Test Object for a single UART serial device Attributes: UART_DEV_PROFILES char_loss_occurrences: Number that character loss happens cleanup_cli: Command list to perform before the test exits cr50_workload: True if cr50 should be stressed, or False otherwise usb_output: True if output should be generated to USB channel dev_prof: Dictionary of device profile duration: Time to keep chargen running eol: Characters to add at the end of input logger: object that store the log num_ch_exp: Expected number of characters in output num_ch_cap: Number of captured characters in output test_cli: Command list to run for chargen test test_thread: Thread object that captures the UART output serial: serial.Serial object """ UART_DEV_PROFILES = ( # Kernel { 'prompt':'localhost login:', 'device_type':'AP', 'prepare_cmd':[ CROS_USERNAME, # Login CROS_PASSWORD, # Password 'dmesg -D', # Disable console message 'touch ' + FLAG_FILENAME, # Create a temp file ], 'cleanup_cmd':[ 'rm -f ' + FLAG_FILENAME, # Remove the temp file 'dmesg -E', # Enable console message 'logout', # Logout ], 'end_of_input':LF, }, # EC { 'prompt':'> ', 'device_type':'EC', 'prepare_cmd':[ 'chan save', 'chan 0' # Disable console message ], 'cleanup_cmd':['', 'chan restore'], 'end_of_input':CRLF, }, ) def __init__(self, port, duration, timeout=1, baudrate=BAUDRATE, cr50_workload=False, usb_output=False): """Initialize UartSerial Args: port: UART device path. e.g. /dev/ttyUSB0 duration: Time to test, in seconds timeout: Read timeout value. baudrate: Baud rate such as 9600 or 115200. cr50_workload: True if a workload should be generated on cr50 usb_output: True if a workload should be generated to USB channel """ # Initialize serial object self.serial = serial.Serial() self.serial.port = port self.serial.timeout = timeout self.serial.baudrate = baudrate self.duration = duration self.cr50_workload = cr50_workload self.usb_output = usb_output self.logger = logging.getLogger(type(self).__name__ + '| ' + port) self.test_thread = threading.Thread(target=self.stress_test_thread) self.dev_prof = {} self.cleanup_cli = [] self.test_cli = [] self.eol = CRLF self.num_ch_exp = 0 self.num_ch_cap = 0 self.char_loss_occurrences = 0 atexit.register(self.cleanup) def run_command(self, command_lines, delay=0): """Run command(s) at UART prompt Args: command_lines: list of commands to run. delay: delay after a command in second """ for cli in command_lines: self.logger.debug('run %r', cli) self.serial.write((cli + self.eol).encode()) self.serial.flush() if delay: time.sleep(delay) def cleanup(self): """Before termination, clean up the UART device.""" self.logger.debug('Closing...') self.serial.open() self.run_command(self.cleanup_cli) # Run cleanup commands self.serial.close() self.logger.debug('Cleanup done') def get_output(self): """Capture the UART output Args: stop_char: Read output buffer until it reads stop_char. Returns: text from UART output. """ if self.serial.inWaiting() == 0: time.sleep(1) return self.serial.read(self.serial.inWaiting()).decode() def prepare(self): """Prepare the test: Identify the type of UART device (EC or Kernel?), then decide what kind of commands to use to generate stress loads. Raises: ChargenTestError if UART source can't be identified. """ try: self.logger.info('Preparing...') self.serial.open() # Prepare the device for test self.serial.flushInput() self.serial.flushOutput() self.get_output() # drain data # Give a couple of line feeds, and capture the prompt text self.run_command(['', '']) prompt_txt = self.get_output() # Detect the device source: EC or AP? # Detect if the device is AP or EC console based on the captured. for dev_prof in self.UART_DEV_PROFILES: if dev_prof['prompt'] in prompt_txt: self.dev_prof = dev_prof break else: # No prompt patterns were found. UART seems not responding or in # an undesirable status. if prompt_txt: raise ChargenTestError('%s: Got an unknown prompt text: %s\n' 'Check manually whether %s is available.' % (self.serial.port, prompt_txt, self.serial.port)) else: raise ChargenTestError('%s: Got no input. Close any other connections' ' to this port, and try it again.' % self.serial.port) self.logger.info('Detected as %s UART', self.dev_prof['device_type']) # Log displays the UART type (AP|EC) instead of device filename. self.logger = logging.getLogger(type(self).__name__ + '| ' + self.dev_prof['device_type']) # Either login to AP or run some commands to prepare the device # for test self.eol = self.dev_prof['end_of_input'] self.run_command(self.dev_prof['prepare_cmd'], delay=2) self.cleanup_cli += self.dev_prof['cleanup_cmd'] # 'chargen' of AP does not have option for USB output. # Force it work on UART. if self.dev_prof['device_type'] == 'AP': self.usb_output = False # Check whether the command 'chargen' is available in the device. # 'chargen 1 4' is supposed to print '0000' self.get_output() # drain data chargen_cmd = 'chargen 1 4' if self.usb_output: chargen_cmd += ' usb' self.run_command([chargen_cmd]) tmp_txt = self.get_output() # Check whether chargen command is available. if '0000' not in tmp_txt: raise ChargenTestError('%s: Chargen got an unexpected result: %s' % (self.dev_prof['device_type'], tmp_txt)) self.num_ch_exp = int(self.serial.baudrate * self.duration / 10) chargen_cmd = 'chargen ' + str(CHARGEN_TXT_LEN) + ' ' + \ str(self.num_ch_exp) if self.usb_output: chargen_cmd += ' usb' self.test_cli = [chargen_cmd] self.logger.info('Ready to test') finally: self.serial.close() def stress_test_thread(self): """Test thread Raises: ChargenTestError: if broken character is found. """ try: self.serial.open() self.serial.flushInput() self.serial.flushOutput() # Run TPM command in background to burden cr50. if self.dev_prof['device_type'] == 'AP' and self.cr50_workload: self.run_command([CR50_LOAD_GEN_CMD]) self.logger.debug('run TPM job while %s exists', FLAG_FILENAME) # Run the command 'chargen', one time self.run_command(['']) # Give a line feed self.get_output() # Drain the output self.run_command(self.test_cli) self.serial.readline() # Drain the echoed command line. err_msg = '%s: Expected %r but got %s after %d char received' # Keep capturing the output until the test timer is expired. self.num_ch_cap = 0 self.char_loss_occurrences = 0 data_starve_count = 0 total_num_ch = self.num_ch_exp # Expected number of characters in total ch_exp = CHARGEN_TXT[0] ch_cap = 'z' # any character value is ok for loop initial condition. while self.num_ch_cap < total_num_ch: captured = self.get_output() if captured: # There is some output data. Reset the data starvation count. data_starve_count = 0 else: data_starve_count += 1 if data_starve_count > 1: # If nothing was captured more than once, then terminate the test. self.logger.debug('No more output') break for ch_cap in captured: if ch_cap not in CHARGEN_TXT: # If it is not alpha-numeric, terminate the test. if ch_cap not in CRLF: # If it is neither a CR nor LF, then it is an error case. self.logger.error('Whole captured characters: %r', captured) raise ChargenTestError(err_msg % ('Broken char captured', ch_exp, hex(ord(ch_cap)), self.num_ch_cap)) # Set the loop termination condition true. total_num_ch = self.num_ch_cap if self.num_ch_cap >= total_num_ch: break if ch_exp != ch_cap: # If it is alpha-numeric but not continuous, then some characters # are lost. self.logger.error(err_msg, 'Char loss detected', ch_exp, repr(ch_cap), self.num_ch_cap) self.char_loss_occurrences += 1 # Recalculate the expected number of characters to adjust # termination condition. The loss might be bigger than this # adjustment, but it is okay since it will terminates by either # CR/LF detection or by data starvation. idx_ch_exp = CHARGEN_TXT.find(ch_exp) idx_ch_cap = CHARGEN_TXT.find(ch_cap) if idx_ch_cap < idx_ch_exp: idx_ch_cap += len(CHARGEN_TXT) total_num_ch -= (idx_ch_cap - idx_ch_exp) self.num_ch_cap += 1 # Determine What character is expected next? ch_exp = CHARGEN_TXT[(CHARGEN_TXT.find(ch_cap) + 1) % CHARGEN_TXT_LEN] finally: self.serial.close() def start_test(self): """Start the test thread""" self.logger.info('Test thread starts') self.test_thread.start() def wait_test_done(self): """Wait until the test thread get done and join""" self.test_thread.join() self.logger.info('Test thread is done') def get_result(self): """Display the result Returns: Integer = the number of lost character Raises: ChargenTestError: if the capture is corrupted. """ # If more characters than expected are captured, it means some messages # from other than chargen are mixed. Stop processing further. if self.num_ch_exp < self.num_ch_cap: raise ChargenTestError('%s: UART output is corrupted.' % self.dev_prof['device_type']) # Get the count difference between the expected to the captured # as the number of lost character. char_lost = self.num_ch_exp - self.num_ch_cap self.logger.info('%8d char lost / %10d (%.1f %%)', char_lost, self.num_ch_exp, char_lost * 100.0 / self.num_ch_exp) return char_lost, self.num_ch_exp, self.char_loss_occurrences class ChargenTest(object): """UART stress tester Attributes: logger: logging object serials: Dictionary where key is filename of UART device, and the value is UartSerial object """ def __init__(self, ports, duration, cr50_workload=False, usb_output=False): """Initialize UART stress tester Args: ports: List of UART ports to test. duration: Time to keep testing in seconds. cr50_workload: True if a workload should be generated on cr50 usb_output: True if a workload should be generated to USB channel Raises: ChargenTestError: if any of ports is not a valid character device. """ # Save the arguments for port in ports: try: mode = os.stat(port).st_mode except OSError as e: raise ChargenTestError(e) if not stat.S_ISCHR(mode): raise ChargenTestError('%s is not a character device.' % port) if duration <= 0: raise ChargenTestError('Input error: duration is not positive.') # Initialize logging object self.logger = logging.getLogger(type(self).__name__) # Create an UartSerial object per UART port self.serials = {} # UartSerial objects for port in ports: self.serials[port] = UartSerial(port=port, duration=duration, cr50_workload=cr50_workload, usb_output=usb_output) def prepare(self): """Prepare the test for each UART port""" self.logger.info('Prepare ports for test') for _, ser in self.serials.items(): ser.prepare() self.logger.info('Ports are ready to test') def print_result(self): """Display the test result for each UART port Returns: char_lost: Total number of characters lost """ char_lost = 0 for _, ser in self.serials.items(): (tmp_lost, _, _) = ser.get_result() char_lost += tmp_lost # If any characters are lost, then test fails. msg = 'lost %d character(s) from the test' % char_lost if char_lost > 0: self.logger.error('FAIL: %s', msg) else: self.logger.info('PASS: %s', msg) return char_lost def run(self): """Run the stress test on UART port(s) Raises: ChargenTestError: If any characters are lost. """ # Detect UART source type, and decide which command to test. self.prepare() # Run the test on each UART port in thread. self.logger.info('Test starts') for _, ser in self.serials.items(): ser.start_test() # Wait all tests to finish. for _, ser in self.serials.items(): ser.wait_test_done() # Print the result. char_lost = self.print_result() if char_lost: raise ChargenTestError('Test failed: lost %d character(s)' % char_lost) self.logger.info('Test is done') def parse_args(cmdline): """Parse command line arguments. Args: cmdline: list to be parsed Returns: tuple (options, args) where args is a list of cmdline arguments that the parser was unable to match i.e. they're servod controls, not options. """ description = """%(prog)s repeats sending a uart console command to each UART device for a given time, and check if output has any missing characters. Examples: %(prog)s /dev/ttyUSB2 --time 3600 %(prog)s /dev/ttyUSB1 /dev/ttyUSB2 --debug %(prog)s /dev/ttyUSB1 /dev/ttyUSB2 --cr50 """ parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter ) parser.add_argument('port', type=str, nargs='*', help='UART device path to test') parser.add_argument('-c', '--cr50', action='store_true', default=False, help='generate TPM workload on cr50') parser.add_argument('-d', '--debug', action='store_true', default=False, help='enable debug messages') parser.add_argument('-t', '--time', type=int, help='Test duration in second', default=300) parser.add_argument('-u', '--usb', action='store_true', default=False, help='Generate output to USB channel instead') return parser.parse_known_args(cmdline) def main(): """Main function wrapper""" try: (options, _) = parse_args(sys.argv[1:]) # Set Log format log_format = '%(asctime)s %(levelname)-6s | %(name)-25s' date_format = '%Y-%m-%d %H:%M:%S' if options.debug: log_format += ' | %(filename)s:%(lineno)4d:%(funcName)-18s' loglevel = logging.DEBUG else: loglevel = logging.INFO log_format += ' | %(message)s' logging.basicConfig(level=loglevel, format=log_format, datefmt=date_format) # Create a ChargenTest object utest = ChargenTest(options.port, options.time, cr50_workload=options.cr50, usb_output=options.usb) utest.run() # Run except KeyboardInterrupt: sys.exit(0) except ChargenTestError as e: logging.error(str(e)) sys.exit(1) if __name__ == '__main__': main()
bsd-3-clause
-350,908,902,241,869,500
32.24552
80
0.593876
false
cerrno/neurokernel
neurokernel/tools/gpu.py
1
7561
#!/usr/bin/env python import numbers import numpy as np import pycuda.driver as drv import pycuda.elementwise as elementwise import pycuda.gpuarray as gpuarray from pycuda.tools import dtype_to_ctype # List of available numerical types provided by numpy: # XXX This try/except is an ugly hack to prevent the doc build on # ReadTheDocs from failing: try: num_types = [np.typeDict[t] for t in \ np.typecodes['AllInteger']+np.typecodes['AllFloat']] except TypeError: num_types = [] # Numbers of bytes occupied by each numerical type: num_nbytes = dict((np.dtype(t), t(1).nbytes) for t in num_types) def set_realloc(x_gpu, data): """ Transfer data into a GPUArray instance. Copies the contents of a numpy array into a GPUArray instance. If the array has a different type or dimensions than the instance, the GPU memory used by the instance is reallocated and the instance updated appropriately. Parameters ---------- x_gpu : pycuda.gpuarray.GPUArray GPUArray instance to modify. data : numpy.ndarray Array of data to transfer to the GPU. Examples -------- >>> import pycuda.gpuarray as gpuarray >>> import pycuda.autoinit >>> import numpy as np >>> import misc >>> x = np.asarray(np.random.rand(5), np.float32) >>> x_gpu = gpuarray.to_gpu(x) >>> x = np.asarray(np.random.rand(10, 1), np.float64) >>> set_realloc(x_gpu, x) >>> np.allclose(x, x_gpu.get()) True """ # Only reallocate if absolutely necessary: if x_gpu.shape != data.shape or x_gpu.size != data.size or \ x_gpu.strides != data.strides or x_gpu.dtype != data.dtype: # Free old memory: x_gpu.gpudata.free() # Allocate new memory: nbytes = num_nbytes[data.dtype] x_gpu.gpudata = drv.mem_alloc(nbytes*data.size) # Set array attributes: x_gpu.shape = data.shape x_gpu.size = data.size x_gpu.strides = data.strides x_gpu.dtype = data.dtype # Update the GPU memory: x_gpu.set(data) def bufint(a): """ Return buffer interface to GPU array. Parameters ---------- a : pycuda.gpuarray.GPUArray GPU array. Returns ------- b : buffer Buffer interface to array. Returns None if `a` has a length of 0. """ assert isinstance(a, gpuarray.GPUArray) if a.size: return a.gpudata.as_buffer(a.nbytes) else: return None def set_by_inds(dest_gpu, ind, src_gpu, ind_which='dest'): """ Set values in a GPUArray by index. Parameters ---------- dest_gpu : pycuda.gpuarray.GPUArray GPUArray instance to modify. ind : pycuda.gpuarray.GPUArray or numpy.ndarray 1D array of element indices to set. Must have an integer dtype. src_gpu : pycuda.gpuarray.GPUArray GPUArray instance from which to set values. ind_which : str If set to 'dest', set the elements in `dest_gpu` with indices `ind` to the successive values in `src_gpu`; the lengths of `ind` and `src_gpu` must be equal. If set to 'src', set the successive values in `dest_gpu` to the values in `src_gpu` with indices `ind`; the lengths of `ind` and `dest_gpu` must be equal. Examples -------- >>> import pycuda.gpuarray as gpuarray >>> import pycuda.autoinit >>> import numpy as np >>> import misc >>> dest_gpu = gpuarray.to_gpu(np.arange(5, dtype=np.float32)) >>> ind = gpuarray.to_gpu(np.array([0, 2, 4])) >>> src_gpu = gpuarray.to_gpu(np.array([1, 1, 1], dtype=np.float32)) >>> misc.set_by_inds(dest_gpu, ind, src_gpu, 'dest') >>> np.allclose(dest_gpu.get(), np.array([1, 1, 1, 3, 1], dtype=np.float32)) True >>> dest_gpu = gpuarray.to_gpu(np.zeros(3, dtype=np.float32)) >>> ind = gpuarray.to_gpu(np.array([0, 2, 4])) >>> src_gpu = gpuarray.to_gpu(np.arange(5, dtype=np.float32)) >>> misc.set_by_inds(dest_gpu, ind, src_gpu) >>> np.allclose(dest_gpu.get(), np.array([0, 2, 4], dtype=np.float32)) True Notes ----- Only supports 1D index arrays. May not be efficient for certain index patterns because of lack of inability to coalesce memory operations. """ # Only support 1D index arrays: assert len(np.shape(ind)) == 1 assert dest_gpu.dtype == src_gpu.dtype assert issubclass(ind.dtype.type, numbers.Integral) N = len(ind) # Manually handle empty index array because it will cause the kernel to # fail if processed: if N == 0: return if ind_which == 'dest': assert N == len(src_gpu) elif ind_which == 'src': assert N == len(dest_gpu) else: raise ValueError('invalid value for `ind_which`') if not isinstance(ind, gpuarray.GPUArray): ind = gpuarray.to_gpu(ind) try: func = set_by_inds.cache[(dest_gpu.dtype, ind.dtype, ind_which)] except KeyError: data_ctype = dtype_to_ctype(dest_gpu.dtype) ind_ctype = dtype_to_ctype(ind.dtype) v = "{data_ctype} *dest, {ind_ctype} *ind, {data_ctype} *src".format(data_ctype=data_ctype, ind_ctype=ind_ctype) if ind_which == 'dest': func = elementwise.ElementwiseKernel(v, "dest[ind[i]] = src[i]") else: func = elementwise.ElementwiseKernel(v, "dest[i] = src[ind[i]]") set_by_inds.cache[(dest_gpu.dtype, ind.dtype, ind_which)] = func func(dest_gpu, ind, src_gpu, range=slice(0, N, 1)) set_by_inds.cache = {} def set_by_inds_from_inds(dest_gpu, ind_dest, src_gpu, ind_src): """ Set values in a GPUArray by index from indexed values in another GPUArray. Parameters ---------- dest_gpu : pycuda.gpuarray.GPUArray GPUArray instance to modify. ind_dest : pycuda.gpuarray.GPUArray or numpy.ndarray 1D array of element indices in `dest_gpu` to set. Must have an integer dtype. src_gpu : pycuda.gpuarray.GPUArray GPUArray instance from which to set values. ind_src : pycuda.gpuarray.GPUArray or numpy.ndarray 1D array of element indices in `src_gpu` to copy. Must have an integer dtype. """ assert len(np.shape(ind_dest)) == 1 assert len(np.shape(ind_src)) == 1 assert dest_gpu.dtype == src_gpu.dtype assert ind_dest.dtype == ind_src.dtype assert issubclass(ind_dest.dtype.type, numbers.Integral) assert issubclass(ind_src.dtype.type, numbers.Integral) N = len(ind_src) # Manually handle empty index array because it will cause the kernel to # fail if processed: if N == 0: return assert N == len(ind_dest) if not isinstance(ind_dest, gpuarray.GPUArray): ind_dest = gpuarray.to_gpu(ind_dest) if not isinstance(ind_src, gpuarray.GPUArray): ind_src = gpuarray.to_gpu(ind_src) try: func = set_by_inds_from_inds.cache[(dest_gpu.dtype, ind_dest.dtype)] except KeyError: data_ctype = dtype_to_ctype(dest_gpu.dtype) ind_ctype = dtype_to_ctype(ind_dest.dtype) v = "{data_ctype} *dest, {ind_ctype} *ind_dest,"\ "{data_ctype} *src, {ind_ctype} *ind_src".format(data_ctype=data_ctype, ind_ctype=ind_ctype) func = elementwise.ElementwiseKernel(v, "dest[ind_dest[i]] = src[ind_src[i]]") set_by_inds_from_inds.cache[(dest_gpu.dtype, ind_dest.dtype)] = func func(dest_gpu, ind_dest, src_gpu, ind_src, range=slice(0, N, 1)) set_by_inds_from_inds.cache = {}
bsd-3-clause
-1,037,744,287,174,244,400
33.683486
120
0.622404
false
TravelModellingGroup/TMGToolbox
TMGToolbox/src/XTMF_internal/delete_scenario.py
1
2229
''' Copyright 2016 Travel Modelling Group, Department of Civil Engineering, University of Toronto This file is part of the TMG Toolbox. The TMG Toolbox is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. The TMG Toolbox is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with the TMG Toolbox. If not, see <http://www.gnu.org/licenses/>. ''' #---METADATA--------------------- ''' Delete Scenario Authors: JamesVaughan Latest revision by: JamesVaughan This tool will allow XTMF to be able to delete a scenario within an EMME Databank. ''' #---VERSION HISTORY ''' 0.0.1 Created on 2016-03-24 by JamesVaughan ''' import inro.modeller as _m import traceback as _traceback _MODELLER = _m.Modeller() #Instantiate Modeller once. class DeleteScenario(_m.Tool()): version = '0.0.1' Scenario = _m.Attribute(int) def page(self): pb = _m.ToolPageBuilder(self, title="Delete Scenario", runnable=False, description="Cannot be called from Modeller.", branding_text="XTMF") return pb.render() def run(self): pass def __call__(self, Scenario): try: self._execute(Scenario) except Exception as e: raise Exception(_traceback.format_exc()) def _execute(self, Scenario): project = _MODELLER.emmebank scenario = project.scenario(str(Scenario)) if scenario is None: print "A delete was requested for scenario " + str(Scenario) + " but the scenario does not exist." return if scenario.delete_protected == True: scenario.delete_protected = False project.delete_scenario(scenario.id)
gpl-3.0
-351,590,633,516,950,140
30.408451
110
0.638852
false
google/nerfactor
third_party/xiuminglib/xiuminglib/imprt.py
1
2311
from importlib import import_module from .log import get_logger logger = get_logger() # For < Python 3.6 try: ModuleNotFoundError except NameError: ModuleNotFoundError = ImportError def preset_import(name, assert_success=False): """A unified importer for both regular and ``google3`` modules, according to specified presets/profiles (e.g., ignoring ``ModuleNotFoundError``). """ if name in ('cv2', 'opencv'): try: # BUILD dep: # "//third_party/py/cvx2", from cvx2 import latest as mod # Or # BUILD dep: # "//third_party/OpenCVX:cvx2", # from google3.third_party.OpenCVX import cvx2 as cv2 except ModuleNotFoundError: mod = import_module_404ok('cv2') elif name in ('tf', 'tensorflow'): mod = import_module_404ok('tensorflow') elif name == 'gfile': # BUILD deps: # "//pyglib:gfile", # "//file/colossus/cns", mod = import_module_404ok('google3.pyglib.gfile') elif name == 'video_api': # BUILD deps: # "//learning/deepmind/video/python:video_api", mod = import_module_404ok( 'google3.learning.deepmind.video.python.video_api') elif name in ('bpy', 'bmesh', 'OpenEXR', 'Imath'): # BUILD deps: # "//third_party/py/Imath", # "//third_party/py/OpenEXR", mod = import_module_404ok(name) elif name in ('Vector', 'Matrix', 'Quaternion'): mod = import_module_404ok('mathutils') mod = _get_module_class(mod, name) elif name == 'BVHTree': mod = import_module_404ok('mathutils.bvhtree') mod = _get_module_class(mod, name) else: raise NotImplementedError(name) if assert_success: assert mod is not None, "Failed in importing '%s'" % name return mod def import_module_404ok(*args, **kwargs): """Returns ``None`` (instead of failing) in the case of ``ModuleNotFoundError``. """ try: mod = import_module(*args, **kwargs) except (ModuleNotFoundError, ImportError) as e: mod = None logger.debug("Ignored: %s", str(e)) return mod def _get_module_class(mod, clsname): if mod is None: return None return getattr(mod, clsname)
apache-2.0
-7,016,370,503,272,079,000
27.182927
77
0.590221
false
CrankWheel/grit-i18n
grit/tool/xmb.py
1
11671
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The 'grit xmb' tool. """ import getopt import os from xml.sax import saxutils from grit import grd_reader from grit import lazy_re from grit import tclib from grit import util from grit.tool import interface # Used to collapse presentable content to determine if # xml:space="preserve" is needed. _WHITESPACES_REGEX = lazy_re.compile(ur'\s\s*') # See XmlEscape below. _XML_QUOTE_ESCAPES = { u"'": u'&apos;', u'"': u'&quot;', } _XML_BAD_CHAR_REGEX = lazy_re.compile(u'[^\u0009\u000A\u000D' u'\u0020-\uD7FF\uE000-\uFFFD]') def _XmlEscape(s): """Returns text escaped for XML in a way compatible with Google's internal Translation Console tool. May be used for attributes as well as for contents. """ if not type(s) == unicode: s = unicode(s) result = saxutils.escape(s, _XML_QUOTE_ESCAPES) return _XML_BAD_CHAR_REGEX.sub(u'', result).encode('utf-8') def _WriteAttribute(file, name, value): """Writes an XML attribute to the specified file. Args: file: file to write to name: name of the attribute value: (unescaped) value of the attribute """ if value: file.write(' %s="%s"' % (name, _XmlEscape(value))) def _WriteMessage(file, message): presentable_content = message.GetPresentableContent() assert (type(presentable_content) == unicode or (len(message.parts) == 1 and type(message.parts[0] == tclib.Placeholder))) preserve_space = presentable_content != _WHITESPACES_REGEX.sub( u' ', presentable_content.strip()) file.write('<msg') _WriteAttribute(file, 'desc', message.GetDescription()) _WriteAttribute(file, 'id', message.GetId()) _WriteAttribute(file, 'meaning', message.GetMeaning()) if preserve_space: _WriteAttribute(file, 'xml:space', 'preserve') file.write('>') if not preserve_space: file.write('\n ') parts = message.GetContent() for part in parts: if isinstance(part, tclib.Placeholder): file.write('<ph') _WriteAttribute(file, 'name', part.GetPresentation()) file.write('><ex>') file.write(_XmlEscape(part.GetExample())) file.write('</ex>') file.write(_XmlEscape(part.GetOriginal())) file.write('</ph>') else: file.write(_XmlEscape(part)) if not preserve_space: file.write('\n') file.write('</msg>\n') def WriteXmbFile(file, messages): """Writes the given grit.tclib.Message items to the specified open file-like object in the XMB format. """ file.write("""<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE messagebundle [ <!ELEMENT messagebundle (msg)*> <!ATTLIST messagebundle class CDATA #IMPLIED> <!ELEMENT msg (#PCDATA|ph|source)*> <!ATTLIST msg id CDATA #IMPLIED> <!ATTLIST msg seq CDATA #IMPLIED> <!ATTLIST msg name CDATA #IMPLIED> <!ATTLIST msg desc CDATA #IMPLIED> <!ATTLIST msg meaning CDATA #IMPLIED> <!ATTLIST msg obsolete (obsolete) #IMPLIED> <!ATTLIST msg xml:space (default|preserve) "default"> <!ATTLIST msg is_hidden CDATA #IMPLIED> <!ELEMENT source (#PCDATA)> <!ELEMENT ph (#PCDATA|ex)*> <!ATTLIST ph name CDATA #REQUIRED> <!ELEMENT ex (#PCDATA)> ]> <messagebundle> """) for message in messages: _WriteMessage(file, message) file.write('</messagebundle>') # Good resource on POT format: http://pology.nedohodnik.net/doc/user/en_US/ch-poformat.html def WritePotFile(file, cliques, lang='', include_translation=False): def WriteAttribute(prefix, value): if value: file.write('%s%s\n' % (prefix, value)) def WriteExamples(): parts = message.GetContent() for part in parts: if isinstance(part, tclib.Placeholder): if part.GetExample(): file.write(u'#. - placeholder %s, example: %s\n' % (part.GetPresentation(), part.GetExample())) else: file.write(u'#. - placeholder %s, literally replaced with: %s\n' % (part.GetPresentation(), part.GetOriginal())) def PotEscape(text): return text.replace(u'\\', u'\\\\').replace(u'\n', u'\\n').replace(u'\t', u'\\t').replace(u'%', u'\%').encode('utf-8') for clique in cliques: message = clique.GetMessage() WriteAttribute(u'#. - description:', message.GetDescription()) WriteExamples() WriteAttribute(u'#: id: ', message.GetId()) meaning = message.GetMeaning() if meaning: file.write(u'msgctxt "%s"\n' % PotEscape(meaning)) def WriteMessagePart(key, msg): file.write(u'%s "' % key) parts = msg.GetContent() for part in parts: if isinstance(part, tclib.Placeholder): file.write(u'%%{%s}' % part.GetPresentation()) else: file.write(PotEscape(part)) file.write(u'"\n') WriteMessagePart(u'msgid', message) if not include_translation: file.write(u'msgstr ""\n') else: WriteMessagePart(u'msgstr', clique.MessageForLanguage(lang, pseudo_if_no_match=False, fallback_to_english=False)) file.write(u'\n') class OutputXmb(interface.Tool): """Outputs all translateable messages in the .grd input file to an .xmb file, which is the format used to give source messages to Google's internal Translation Console tool. The format could easily be used for other systems. Usage: grit xmb [-i|-h] [-l LIMITFILE] OUTPUTPATH OUTPUTPATH is the path you want to output the .xmb file to. The -l option can be used to output only some of the resources to the .xmb file. LIMITFILE is the path to a file that is used to limit the items output to the xmb file. If the filename extension is .grd, the file must be a .grd file and the tool only output the contents of nodes from the input file that also exist in the limit file (as compared on the 'name' attribute). Otherwise it must contain a list of the IDs that output should be limited to, one ID per line, and the tool will only output nodes with 'name' attributes that match one of the IDs. The -i option causes 'grit xmb' to output an "IDs only" file instead of an XMB file. The "IDs only" file contains the message ID of each message that would normally be output to the XMB file, one message ID per line. It is designed for use with the 'grit transl2tc' tool's -l option. Other options: -D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional value VAL (defaults to 1) which will be used to control conditional inclusion of resources. -E NAME=VALUE Set environment variable NAME to VALUE (within grit). """ # The different output formats supported by this tool FORMAT_XMB = 0 FORMAT_IDS_ONLY = 1 FORMAT_POT = 2 def __init__(self, defines=None): super(OutputXmb, self).__init__() self.format = self.FORMAT_XMB self.defines = defines or {} def ShortDescription(self): return 'Exports all translateable messages into an XMB file.' def Run(self, opts, args): self.SetOptions(opts) limit_file = None limit_is_grd = False limit_file_dir = None own_opts, args = getopt.getopt(args, 'l:D:ihp') for key, val in own_opts: if key == '-l': limit_file = open(val, 'r') limit_file_dir = util.dirname(val) if not len(limit_file_dir): limit_file_dir = '.' limit_is_grd = os.path.splitext(val)[1] == '.grd' elif key == '-i': self.format = self.FORMAT_IDS_ONLY elif key == '-p': self.format = self.FORMAT_POT elif key == '-D': name, val = util.ParseDefine(val) self.defines[name] = val elif key == '-E': (env_name, env_value) = val.split('=', 1) os.environ[env_name] = env_value if not len(args) == 1: print ('grit xmb takes exactly one argument, the path to the XMB file ' 'to output.') return 2 xmb_path = args[0] res_tree = grd_reader.Parse(opts.input, debug=opts.extra_verbose) res_tree.SetOutputLanguage('en') res_tree.SetDefines(self.defines) res_tree.OnlyTheseTranslations([]) res_tree.RunGatherers() with open(xmb_path, 'wb') as output_file: self.Process( res_tree, output_file, limit_file, limit_is_grd, limit_file_dir) if limit_file: limit_file.close() print "Wrote %s" % xmb_path def Process(self, res_tree, output_file, limit_file=None, limit_is_grd=False, dir=None): """Writes a document with the contents of res_tree into output_file, limiting output to the IDs specified in limit_file, which is a GRD file if limit_is_grd is true, otherwise a file with one ID per line. The format of the output document depends on this object's format attribute. It can be FORMAT_XMB or FORMAT_IDS_ONLY. The FORMAT_IDS_ONLY format causes this function to write just a list of the IDs of all messages that would have been added to the XMB file, one ID per line. The FORMAT_XMB format causes this function to output the (default) XMB format. Args: res_tree: base.Node() output_file: file open for writing limit_file: None or file open for reading limit_is_grd: True | False dir: Directory of the limit file """ if limit_file: if limit_is_grd: limit_list = [] limit_tree = grd_reader.Parse(limit_file, dir=dir, debug=self.o.extra_verbose) for node in limit_tree: if 'name' in node.attrs: limit_list.append(node.attrs['name']) else: # Not a GRD file, so it's just a file with one ID per line limit_list = [item.strip() for item in limit_file.read().split('\n')] ids_already_done = {} cliques = [] for node in res_tree: if (limit_file and not ('name' in node.attrs and node.attrs['name'] in limit_list)): continue if not node.IsTranslateable(): continue for clique in node.GetCliques(): if not clique.IsTranslateable(): continue if not clique.GetMessage().GetRealContent(): continue # Some explanation is in order here. Note that we can have # many messages with the same ID. # # The way we work around this is to maintain a list of cliques # per message ID (in the UberClique) and select the "best" one # (the first one that has a description, or an arbitrary one # if there is no description) for inclusion in the XMB file. # The translations are all going to be the same for messages # with the same ID, although the way we replace placeholders # might be slightly different. id = clique.GetMessage().GetId() if id in ids_already_done: continue ids_already_done[id] = 1 clique = node.UberClique().BestClique(id) cliques += [clique] # Ensure a stable order of messages, to help regression testing. cliques.sort(key=lambda x:x.GetMessage().GetId()) messages = [c.GetMessage() for c in cliques] if self.format == self.FORMAT_IDS_ONLY: # We just print the list of IDs to the output file. for msg in messages: output_file.write(msg.GetId()) output_file.write('\n') elif self.format == self.FORMAT_POT: WritePotFile(output_file, cliques) else: assert self.format == self.FORMAT_XMB WriteXmbFile(output_file, messages)
bsd-2-clause
5,850,516,025,989,369,000
32.927326
122
0.639877
false
carthach/essentia
test/src/unittests/stats/test_mean.py
1
1594
#!/usr/bin/env python # Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra # # This file is part of Essentia # # Essentia is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation (FSF), either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the Affero GNU General Public License # version 3 along with this program. If not, see http://www.gnu.org/licenses/ from essentia_test import * class TestMean(TestCase): def testEmpty(self): self.assertComputeFails(Mean(), []) def testZero(self): result = Mean()([0]*10) self.assertAlmostEqual(result, 0) def testOne(self): result = Mean()([100]) self.assertAlmostEqual(result, 100) def testMulti(self): result = Mean()([5, 8, 4, 9, 1]) self.assertAlmostEqual(result, 5.4) def testNegatives(self): result = Mean()([3, 7, -45, 2, -1, 0]) self.assertAlmostEqual(result, -5.666666666) def testRational(self): result = Mean()([3.1459, -0.4444, .00002]) self.assertAlmostEqual(result, 0.900506666667) suite = allTests(TestMean) if __name__ == '__main__': TextTestRunner(verbosity=2).run(suite)
agpl-3.0
-3,298,976,419,984,662,500
29.075472
79
0.68005
false
leiferikb/bitpop-private
build/android/pylib/perf_tests_helper.py
2
5576
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import re import android_commands import json import math # Valid values of result type. RESULT_TYPES = {'unimportant': 'RESULT ', 'default': '*RESULT ', 'informational': '', 'unimportant-histogram': 'HISTOGRAM ', 'histogram': '*HISTOGRAM '} def _EscapePerfResult(s): """Escapes |s| for use in a perf result.""" # Colons (:), equal signs (=) and slashes (/) are not allowed. return re.sub('[\:|=/]', '_', s) def GeomMeanAndStdDevFromHistogram(histogram_json): histogram = json.loads(histogram_json) count = 0 sum_of_logs = 0 for bucket in histogram['buckets']: if 'high' in bucket: bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0 else: bucket['mean'] = bucket['low'] if bucket['mean'] > 0: sum_of_logs += math.log(bucket['mean']) * bucket['count'] count += bucket['count'] if count == 0: return 0.0, 0.0 sum_of_squares = 0 geom_mean = math.exp(sum_of_logs / count) for bucket in histogram['buckets']: if bucket['mean'] > 0: sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count'] return geom_mean, math.sqrt(sum_of_squares / count) def _MeanAndStdDevFromList(values): avg = None sd = None if len(values) > 1: try: value = '[%s]' % ','.join([str(v) for v in values]) avg = sum([float(v) for v in values]) / len(values) sqdiffs = [(float(v) - avg) ** 2 for v in values] variance = sum(sqdiffs) / (len(values) - 1) sd = math.sqrt(variance) except ValueError: value = ", ".join(values) else: value = values[0] return value, avg, sd def PrintPerfResult(measurement, trace, values, units, result_type='default', print_to_stdout=True): """Prints numerical data to stdout in the format required by perf tests. The string args may be empty but they must not contain any colons (:) or equals signs (=). Args: measurement: A description of the quantity being measured, e.g. "vm_peak". trace: A description of the particular data point, e.g. "reference". values: A list of numeric measured values. units: A description of the units of measure, e.g. "bytes". result_type: Accepts values of RESULT_TYPES. print_to_stdout: If True, prints the output in stdout instead of returning the output to caller. Returns: String of the formated perf result. """ assert result_type in RESULT_TYPES, 'result type: %s is invalid' % result_type trace_name = _EscapePerfResult(trace) if result_type in ['unimportant', 'default', 'informational']: assert isinstance(values, list) assert len(values) assert '/' not in measurement value, avg, sd = _MeanAndStdDevFromList(values) output = '%s%s: %s%s%s %s' % ( RESULT_TYPES[result_type], _EscapePerfResult(measurement), trace_name, # Do not show equal sign if the trace is empty. Usually it happens when # measurement is enough clear to describe the result. '= ' if trace_name else '', value, units) else: assert(result_type in ['histogram', 'unimportant-histogram']) assert isinstance(values, list) # The histograms can only be printed individually, there's no computation # across different histograms. assert len(values) == 1 value = values[0] measurement += '.' + trace_name output = '%s%s: %s= %s' % ( RESULT_TYPES[result_type], _EscapePerfResult(measurement), _EscapePerfResult(measurement), value) avg, sd = GeomMeanAndStdDevFromHistogram(value) if avg: output += '\nAvg %s: %f%s' % (measurement, avg, units) if sd: output += '\nSd %s: %f%s' % (measurement, sd, units) if print_to_stdout: print output return output class PerfTestSetup(object): """Provides methods for setting up a device for perf testing.""" _DROP_CACHES = '/proc/sys/vm/drop_caches' _SCALING_GOVERNOR = '/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor' def __init__(self, adb): self._adb = adb num_cpus = self._adb.GetFileContents('/sys/devices/system/cpu/online', log_result=False) assert num_cpus, 'Unable to find /sys/devices/system/cpu/online' self._num_cpus = int(num_cpus[0].split('-')[-1]) self._original_scaling_governor = None def DropRamCaches(self): """Drops the filesystem ram caches for performance testing.""" if not self._adb.IsRootEnabled(): self._adb.EnableAdbRoot() self._adb.RunShellCommand('sync') self._adb.RunShellCommand('echo 3 > ' + PerfTestSetup._DROP_CACHES) def SetUp(self): """Sets up performance tests.""" if not self._original_scaling_governor: self._original_scaling_governor = self._adb.GetFileContents( PerfTestSetup._SCALING_GOVERNOR % 0, log_result=False)[0] self._SetScalingGovernorInternal('performance') self.DropRamCaches() def TearDown(self): """Tears down performance tests.""" if self._original_scaling_governor: self._SetScalingGovernorInternal(self._original_scaling_governor) self._original_scaling_governor = None def _SetScalingGovernorInternal(self, value): for cpu in range(self._num_cpus): self._adb.RunShellCommand( ('echo %s > ' + PerfTestSetup._SCALING_GOVERNOR) % (value, cpu))
bsd-3-clause
-1,881,200,107,508,496,100
32.793939
80
0.637195
false