repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
davy39/eric | Helpviewer/Bookmarks/BookmarksManager.py | 1 | 21594 | # -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2014 Detlev Offenbach <[email protected]>
#
"""
Module implementing the bookmarks manager.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtCore import pyqtSignal, Qt, QT_TRANSLATE_NOOP, QObject, QFile, \
QIODevice, QXmlStreamReader, QDate, QDateTime, QFileInfo, QUrl, \
QCoreApplication
from PyQt5.QtWidgets import QUndoStack, QUndoCommand, QDialog
from E5Gui import E5MessageBox, E5FileDialog
from .BookmarkNode import BookmarkNode
from Utilities.AutoSaver import AutoSaver
import Utilities
import Preferences
BOOKMARKBAR = QT_TRANSLATE_NOOP("BookmarksManager", "Bookmarks Bar")
BOOKMARKMENU = QT_TRANSLATE_NOOP("BookmarksManager", "Bookmarks Menu")
StartRoot = 0
StartMenu = 1
StartToolBar = 2
class BookmarksManager(QObject):
"""
Class implementing the bookmarks manager.
@signal entryAdded(BookmarkNode) emitted after a bookmark node has been
added
@signal entryRemoved(BookmarkNode, int, BookmarkNode) emitted after a
bookmark node has been removed
@signal entryChanged(BookmarkNode) emitted after a bookmark node has been
changed
@signal bookmarksSaved() emitted after the bookmarks were saved
@signal bookmarksReloaded() emitted after the bookmarks were reloaded
"""
entryAdded = pyqtSignal(BookmarkNode)
entryRemoved = pyqtSignal(BookmarkNode, int, BookmarkNode)
entryChanged = pyqtSignal(BookmarkNode)
bookmarksSaved = pyqtSignal()
bookmarksReloaded = pyqtSignal()
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent object (QObject)
"""
super(BookmarksManager, self).__init__(parent)
self.__saveTimer = AutoSaver(self, self.save)
self.entryAdded.connect(self.__saveTimer.changeOccurred)
self.entryRemoved.connect(self.__saveTimer.changeOccurred)
self.entryChanged.connect(self.__saveTimer.changeOccurred)
self.__initialize()
def __initialize(self):
"""
Private method to initialize some data.
"""
self.__loaded = False
self.__bookmarkRootNode = None
self.__toolbar = None
self.__menu = None
self.__bookmarksModel = None
self.__commands = QUndoStack()
@classmethod
def getFileName(cls):
"""
Class method to get the file name of the bookmark file.
@return name of the bookmark file (string)
"""
return os.path.join(Utilities.getConfigDir(), "browser",
"bookmarks.xbel")
def close(self):
"""
Public method to close the bookmark manager.
"""
self.__saveTimer.saveIfNeccessary()
def undoRedoStack(self):
"""
Public method to get a reference to the undo stack.
@return reference to the undo stack (QUndoStack)
"""
return self.__commands
def changeExpanded(self):
"""
Public method to handle a change of the expanded state.
"""
self.__saveTimer.changeOccurred()
def reload(self):
"""
Public method used to initiate a reloading of the bookmarks.
"""
self.__initialize()
self.load()
self.bookmarksReloaded.emit()
def load(self):
"""
Public method to load the bookmarks.
@exception RuntimeError raised to indicate an error loading the
bookmarks
"""
if self.__loaded:
return
self.__loaded = True
bookmarkFile = self.getFileName()
if not QFile.exists(bookmarkFile):
from . import DefaultBookmarks_rc # __IGNORE_WARNING__
bookmarkFile = QFile(":/DefaultBookmarks.xbel")
bookmarkFile.open(QIODevice.ReadOnly)
from .XbelReader import XbelReader
reader = XbelReader()
self.__bookmarkRootNode = reader.read(bookmarkFile)
if reader.error() != QXmlStreamReader.NoError:
E5MessageBox.warning(
None,
self.tr("Loading Bookmarks"),
self.tr(
"""Error when loading bookmarks on line {0},"""
""" column {1}:\n {2}""")
.format(reader.lineNumber(),
reader.columnNumber(),
reader.errorString()))
others = []
for index in range(
len(self.__bookmarkRootNode.children()) - 1, -1, -1):
node = self.__bookmarkRootNode.children()[index]
if node.type() == BookmarkNode.Folder:
if (node.title == self.tr("Toolbar Bookmarks") or
node.title == BOOKMARKBAR) and \
self.__toolbar is None:
node.title = self.tr(BOOKMARKBAR)
self.__toolbar = node
if (node.title == self.tr("Menu") or
node.title == BOOKMARKMENU) and \
self.__menu is None:
node.title = self.tr(BOOKMARKMENU)
self.__menu = node
else:
others.append(node)
self.__bookmarkRootNode.remove(node)
if len(self.__bookmarkRootNode.children()) > 0:
raise RuntimeError("Error loading bookmarks.")
if self.__toolbar is None:
self.__toolbar = BookmarkNode(BookmarkNode.Folder,
self.__bookmarkRootNode)
self.__toolbar.title = self.tr(BOOKMARKBAR)
else:
self.__bookmarkRootNode.add(self.__toolbar)
if self.__menu is None:
self.__menu = BookmarkNode(BookmarkNode.Folder,
self.__bookmarkRootNode)
self.__menu.title = self.tr(BOOKMARKMENU)
else:
self.__bookmarkRootNode.add(self.__menu)
for node in others:
self.__menu.add(node)
self.__convertFromOldBookmarks()
def save(self):
"""
Public method to save the bookmarks.
"""
if not self.__loaded:
return
from .XbelWriter import XbelWriter
writer = XbelWriter()
bookmarkFile = self.getFileName()
# save root folder titles in English (i.e. not localized)
self.__menu.title = BOOKMARKMENU
self.__toolbar.title = BOOKMARKBAR
if not writer.write(bookmarkFile, self.__bookmarkRootNode):
E5MessageBox.warning(
None,
self.tr("Saving Bookmarks"),
self.tr("""Error saving bookmarks to <b>{0}</b>.""")
.format(bookmarkFile))
# restore localized titles
self.__menu.title = self.tr(BOOKMARKMENU)
self.__toolbar.title = self.tr(BOOKMARKBAR)
self.bookmarksSaved.emit()
def addBookmark(self, parent, node, row=-1):
"""
Public method to add a bookmark.
@param parent reference to the node to add to (BookmarkNode)
@param node reference to the node to add (BookmarkNode)
@param row row number (integer)
"""
if not self.__loaded:
return
self.setTimestamp(node, BookmarkNode.TsAdded,
QDateTime.currentDateTime())
command = InsertBookmarksCommand(self, parent, node, row)
self.__commands.push(command)
def removeBookmark(self, node):
"""
Public method to remove a bookmark.
@param node reference to the node to be removed (BookmarkNode)
"""
if not self.__loaded:
return
parent = node.parent()
row = parent.children().index(node)
command = RemoveBookmarksCommand(self, parent, row)
self.__commands.push(command)
def setTitle(self, node, newTitle):
"""
Public method to set the title of a bookmark.
@param node reference to the node to be changed (BookmarkNode)
@param newTitle title to be set (string)
"""
if not self.__loaded:
return
command = ChangeBookmarkCommand(self, node, newTitle, True)
self.__commands.push(command)
def setUrl(self, node, newUrl):
"""
Public method to set the URL of a bookmark.
@param node reference to the node to be changed (BookmarkNode)
@param newUrl URL to be set (string)
"""
if not self.__loaded:
return
command = ChangeBookmarkCommand(self, node, newUrl, False)
self.__commands.push(command)
def setNodeChanged(self, node):
"""
Public method to signal changes of bookmarks other than title, URL
or timestamp.
@param node reference to the bookmark (BookmarkNode)
"""
self.__saveTimer.changeOccurred()
def setTimestamp(self, node, timestampType, timestamp):
"""
Public method to set the URL of a bookmark.
@param node reference to the node to be changed (BookmarkNode)
@param timestampType type of the timestamp to set
(BookmarkNode.TsAdded, BookmarkNode.TsModified,
BookmarkNode.TsVisited)
@param timestamp timestamp to set (QDateTime)
"""
if not self.__loaded:
return
assert timestampType in [BookmarkNode.TsAdded,
BookmarkNode.TsModified,
BookmarkNode.TsVisited]
if timestampType == BookmarkNode.TsAdded:
node.added = timestamp
elif timestampType == BookmarkNode.TsModified:
node.modified = timestamp
elif timestampType == BookmarkNode.TsVisited:
node.visited = timestamp
self.__saveTimer.changeOccurred()
def bookmarks(self):
"""
Public method to get a reference to the root bookmark node.
@return reference to the root bookmark node (BookmarkNode)
"""
if not self.__loaded:
self.load()
return self.__bookmarkRootNode
def menu(self):
"""
Public method to get a reference to the bookmarks menu node.
@return reference to the bookmarks menu node (BookmarkNode)
"""
if not self.__loaded:
self.load()
return self.__menu
def toolbar(self):
"""
Public method to get a reference to the bookmarks toolbar node.
@return reference to the bookmarks toolbar node (BookmarkNode)
"""
if not self.__loaded:
self.load()
return self.__toolbar
def bookmarksModel(self):
"""
Public method to get a reference to the bookmarks model.
@return reference to the bookmarks model (BookmarksModel)
"""
if self.__bookmarksModel is None:
from .BookmarksModel import BookmarksModel
self.__bookmarksModel = BookmarksModel(self, self)
return self.__bookmarksModel
def importBookmarks(self):
"""
Public method to import bookmarks.
"""
from .BookmarksImportDialog import BookmarksImportDialog
dlg = BookmarksImportDialog()
if dlg.exec_() == QDialog.Accepted:
importRootNode = dlg.getImportedBookmarks()
if importRootNode is not None:
self.addBookmark(self.menu(), importRootNode)
def exportBookmarks(self):
"""
Public method to export the bookmarks.
"""
fileName, selectedFilter = E5FileDialog.getSaveFileNameAndFilter(
None,
self.tr("Export Bookmarks"),
"eric6_bookmarks.xbel",
self.tr("XBEL bookmarks (*.xbel);;"
"XBEL bookmarks (*.xml);;"
"HTML Bookmarks (*.html)"))
if not fileName:
return
ext = QFileInfo(fileName).suffix()
if not ext:
ex = selectedFilter.split("(*")[1].split(")")[0]
if ex:
fileName += ex
ext = QFileInfo(fileName).suffix()
if ext == "html":
from .NsHtmlWriter import NsHtmlWriter
writer = NsHtmlWriter()
else:
from .XbelWriter import XbelWriter
writer = XbelWriter()
if not writer.write(fileName, self.__bookmarkRootNode):
E5MessageBox.critical(
None,
self.tr("Exporting Bookmarks"),
self.tr("""Error exporting bookmarks to <b>{0}</b>.""")
.format(fileName))
def __convertFromOldBookmarks(self):
"""
Private method to convert the old bookmarks into the new ones.
"""
bmNames = Preferences.Prefs.settings.value('Bookmarks/Names')
bmFiles = Preferences.Prefs.settings.value('Bookmarks/Files')
if bmNames is not None and bmFiles is not None:
if len(bmNames) == len(bmFiles):
convertedRootNode = BookmarkNode(BookmarkNode.Folder)
convertedRootNode.title = self.tr("Converted {0}")\
.format(QDate.currentDate().toString(
Qt.SystemLocaleShortDate))
for i in range(len(bmNames)):
node = BookmarkNode(BookmarkNode.Bookmark,
convertedRootNode)
node.title = bmNames[i]
url = QUrl(bmFiles[i])
if not url.scheme():
url.setScheme("file")
node.url = url.toString()
self.addBookmark(self.menu(), convertedRootNode)
Preferences.Prefs.settings.remove('Bookmarks')
def iconChanged(self, url):
"""
Public slot to update the icon image for an URL.
@param url URL of the icon to update (QUrl or string)
"""
if isinstance(url, QUrl):
url = url.toString()
nodes = self.bookmarksForUrl(url)
for node in nodes:
self.bookmarksModel().entryChanged(node)
def bookmarkForUrl(self, url, start=StartRoot):
"""
Public method to get a bookmark node for a given URL.
@param url URL of the bookmark to search for (QUrl or string)
@keyparam start indicator for the start of the search
(StartRoot, StartMenu, StartToolBar)
@return bookmark node for the given url (BookmarkNode)
"""
if start == StartMenu:
startNode = self.__menu
elif start == StartToolBar:
startNode = self.__toolbar
else:
startNode = self.__bookmarkRootNode
if startNode is None:
return None
if isinstance(url, QUrl):
url = url.toString()
return self.__searchBookmark(url, startNode)
def __searchBookmark(self, url, startNode):
"""
Private method get a bookmark node for a given URL.
@param url URL of the bookmark to search for (string)
@param startNode reference to the node to start searching
(BookmarkNode)
@return bookmark node for the given url (BookmarkNode)
"""
bm = None
for node in startNode.children():
if node.type() == BookmarkNode.Folder:
bm = self.__searchBookmark(url, node)
elif node.type() == BookmarkNode.Bookmark:
if node.url == url:
bm = node
if bm is not None:
return bm
return None
def bookmarksForUrl(self, url, start=StartRoot):
"""
Public method to get a list of bookmark nodes for a given URL.
@param url URL of the bookmarks to search for (QUrl or string)
@keyparam start indicator for the start of the search
(StartRoot, StartMenu, StartToolBar)
@return list of bookmark nodes for the given url (list of BookmarkNode)
"""
if start == StartMenu:
startNode = self.__menu
elif start == StartToolBar:
startNode = self.__toolbar
else:
startNode = self.__bookmarkRootNode
if startNode is None:
return None
if isinstance(url, QUrl):
url = url.toString()
return self.__searchBookmarks(url, startNode)
def __searchBookmarks(self, url, startNode):
"""
Private method get a list of bookmark nodes for a given URL.
@param url URL of the bookmarks to search for (string)
@param startNode reference to the node to start searching
(BookmarkNode)
@return list of bookmark nodes for the given url (list of BookmarkNode)
"""
bm = []
for node in startNode.children():
if node.type() == BookmarkNode.Folder:
bm.extend(self.__searchBookmarks(url, node))
elif node.type() == BookmarkNode.Bookmark:
if node.url == url:
bm.append(node)
return bm
class RemoveBookmarksCommand(QUndoCommand):
"""
Class implementing the Remove undo command.
"""
def __init__(self, bookmarksManager, parent, row):
"""
Constructor
@param bookmarksManager reference to the bookmarks manager
(BookmarksManager)
@param parent reference to the parent node (BookmarkNode)
@param row row number of bookmark (integer)
"""
super(RemoveBookmarksCommand, self).__init__(
QCoreApplication.translate("BookmarksManager", "Remove Bookmark"))
self._row = row
self._bookmarksManager = bookmarksManager
try:
self._node = parent.children()[row]
except IndexError:
self._node = BookmarkNode()
self._parent = parent
def undo(self):
"""
Public slot to perform the undo action.
"""
self._parent.add(self._node, self._row)
self._bookmarksManager.entryAdded.emit(self._node)
def redo(self):
"""
Public slot to perform the redo action.
"""
self._parent.remove(self._node)
self._bookmarksManager.entryRemoved.emit(
self._parent, self._row, self._node)
class InsertBookmarksCommand(RemoveBookmarksCommand):
"""
Class implementing the Insert undo command.
"""
def __init__(self, bookmarksManager, parent, node, row):
"""
Constructor
@param bookmarksManager reference to the bookmarks manager
(BookmarksManager)
@param parent reference to the parent node (BookmarkNode)
@param node reference to the node to be inserted (BookmarkNode)
@param row row number of bookmark (integer)
"""
RemoveBookmarksCommand.__init__(self, bookmarksManager, parent, row)
self.setText(QCoreApplication.translate(
"BookmarksManager", "Insert Bookmark"))
self._node = node
def undo(self):
"""
Public slot to perform the undo action.
"""
RemoveBookmarksCommand.redo(self)
def redo(self):
"""
Public slot to perform the redo action.
"""
RemoveBookmarksCommand.undo(self)
class ChangeBookmarkCommand(QUndoCommand):
"""
Class implementing the Insert undo command.
"""
def __init__(self, bookmarksManager, node, newValue, title):
"""
Constructor
@param bookmarksManager reference to the bookmarks manager
(BookmarksManager)
@param node reference to the node to be changed (BookmarkNode)
@param newValue new value to be set (string)
@param title flag indicating a change of the title (True) or
the URL (False) (boolean)
"""
super(ChangeBookmarkCommand, self).__init__()
self._bookmarksManager = bookmarksManager
self._title = title
self._newValue = newValue
self._node = node
if self._title:
self._oldValue = self._node.title
self.setText(QCoreApplication.translate(
"BookmarksManager", "Name Change"))
else:
self._oldValue = self._node.url
self.setText(QCoreApplication.translate(
"BookmarksManager", "Address Change"))
def undo(self):
"""
Public slot to perform the undo action.
"""
if self._title:
self._node.title = self._oldValue
else:
self._node.url = self._oldValue
self._bookmarksManager.entryChanged.emit(self._node)
def redo(self):
"""
Public slot to perform the redo action.
"""
if self._title:
self._node.title = self._newValue
else:
self._node.url = self._newValue
self._bookmarksManager.entryChanged.emit(self._node)
| gpl-3.0 | -6,105,752,820,382,670,000 | 32.635514 | 79 | 0.563119 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/subnet_shared_public_ip_address_configuration.py | 1 | 1061 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubnetSharedPublicIpAddressConfiguration(Model):
"""Configuration for public IP address sharing.
:param allowed_ports: Backend ports that virtual machines on this subnet
are allowed to expose
:type allowed_ports: list[~azure.mgmt.devtestlabs.models.Port]
"""
_attribute_map = {
'allowed_ports': {'key': 'allowedPorts', 'type': '[Port]'},
}
def __init__(self, allowed_ports=None):
super(SubnetSharedPublicIpAddressConfiguration, self).__init__()
self.allowed_ports = allowed_ports
| mit | 812,117,735,086,572,800 | 35.586207 | 76 | 0.613572 | false |
Gavitron/pipulator | tcp_fakeclient.py | 1 | 2704 | # Connect to a known game Server and spew out whatever it tells us, before the missing heartbeat causes us to disco
#
import socket
import struct
import sys
import json
# internet variables
game_address = ('127.0.0.1', 27001) # a hack so that I can use the tcpserver when testing.
######
# misc helper function declarations
######
# build a byte string for tx on the wire
def msg_builder(msg_type=0,contents=''):
return struct.pack('<LB', len(contents),msg_type)+contents
# generator f'n to take an arbitrary string and pump it through, one byte at a time
def byte_pump(byte_string):
for byte in byte_string:
yield byte
######
# Main block starts here
# the client connected, so make a connection to the server now
try:
print >>sys.stderr, 'CLIENT : connecting to %s port %s...' % game_address
game_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
game_socket.connect(game_address)
isRunning=True
while isRunning:
message = game_socket.recv(5)
if message:
(msg_len,code) = struct.unpack('<LB', message)
if msg_len > 0:
payload = game_socket.recv(msg_len)
print >>sys.stderr, 'CLIENT : recd %d bytes payload with code %r' % (msg_len, code)
else:
payload = False
if code==0:
#no-op for heartbeat
if payload:
print >>sys.stderr, 'WARNING, NONZERO PAYLOAD OF %d BYTES IN HEARTBEAT MESSAGE.\n ABORTING RUN AND DUMPING PAYLOAD:\n%u' % \
(msg_len, payload)
isRunning=False
break
elif code == 1:
data=json.loads(payload)
print >>sys.stderr, 'CLIENT : app version: %s lang: %s ' % (data['version'],data['lang'])
elif code == 3:
print >>sys.stderr, 'CLIENT : gamestate update, %d bytes' % len(payload)
elif code == 5:
data=json.loads(payload)
print >>sys.stderr, 'CLIENT : unknown JSON state message. Dumping:\n%s\n\n' % \
json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
else:
print >>sys.stderr, 'CLIENT : unknown code "%d", payload of %d bytes ' % (code,len(payload))
# reply with an empty heartbeat
game_socket.sendall(msg_builder())
else:
print >>sys.stderr, 'CLIENT : error from socket'
isRunning = False
finally:
# close out the connections
print >>sys.stderr, 'CLIENT : closing socket'
game_socket.close()
| bsd-3-clause | -4,238,135,791,722,192,400 | 36.555556 | 144 | 0.565089 | false |
ebar0n/SD-Fumadores | agent.py | 1 | 3769 | import threading
import time
from random import choice
import socketserver
from storage import codes, packet_size, store, time_sleep, time_smoke
from utils import _print
global smoke
smoke = False
global smoke_code
class MyTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class MyTCPServerHandler(socketserver.BaseRequestHandler):
bufer = ''
def process(self):
while True:
message = self.request.recv(packet_size).decode('UTF-8')
if message == 'need':
_print('{}: Necesito {}!'.format(
store.get(self.code)['name'],
store.get(self.code)['required']
))
if self.smoke_released:
self.smoke_released = False
global smoke
smoke = False
elif message == 'enable':
_print('{}: Termino de fumar!'.format(store.get(self.code)['name']))
self.smoke_released = True
elif message == 'ack':
time.sleep(time_smoke)
elif message == 'exit':
break
time.sleep(time_sleep)
def handle(self):
# Proceso de reconocimiento
# cur_thread = threading.current_thread()
self.code = self.request.recv(packet_size).decode('UTF-8')
self.rejected = False
self.smoke_released = False
_print('Conectando fumador...')
if store.get(self.code)['flag'] is False:
store.get(self.code)['request'] = self.request
store.get(self.code)['flag'] = True
_print('Fumador aceptado *{}*'.format(store.get(self.code)['name']))
self.request.send('accepte'.encode('UTF-8'))
self.process()
else:
self.rejected = True
_print('Fumador rechazado *{}*'.format(store.get(self.code)['name']))
self.request.send('rejected'.encode('UTF-8'))
def finish(self):
_print('Fumador desconectado *{}*'.format(store.get(self.code)['name']))
if self.rejected is False:
store.get(self.code)['flag'] = False
global smoke_code
if smoke_code == self.code:
global smoke
smoke = False
def handle_timeout(self):
print('tiempo de espera agotado')
def verify_smoking():
# Se verifica si estan todos los fumadores conectados
while True:
active_smokers = True
for i in codes:
if store[i].get('flag') is False:
active_smokers = False
break
time.sleep(time_sleep)
if active_smokers and smoke is False:
break
else:
if active_smokers is False:
_print('Agente: Esperando todos los fumadores')
def init(port):
try:
server = MyTCPServer(('0.0.0.0', port), MyTCPServerHandler)
server.timeout = 10
server_thread = threading.Thread(target=server.serve_forever)
server_thread.timeout = 10
# iniciando agente
_print("Esperando fumadores...")
server_thread.daemon = True
server_thread.start()
while True:
verify_smoking()
global smoke_code
smoke_code = choice(codes)
_print('Agente: Tengo disponible {}!'.format(
store.get(smoke_code)['required']
))
global smoke
smoke = True
store.get(smoke_code)['request'].send('enable'.encode('UTF-8'))
_print('Agente: fumador {} servido!'.format(store.get(smoke_code)['name']))
except KeyboardInterrupt:
_print('Cerrando conexiones...')
server.shutdown()
server.server_close()
| bsd-3-clause | 6,229,673,584,993,234,000 | 30.940678 | 87 | 0.557177 | false |
anselmobd/fo2 | src/lotes/views/ajax/estoque_depositos_modelo.py | 1 | 1486 | from pprint import pprint
from django.db.models import Exists, OuterRef
from django.http import JsonResponse
from fo2.connections import db_cursor_so
from systextil.queries.deposito.total_modelo import totais_modelos_depositos
import comercial.models
def estoque_depositos_modelo(request, modelo, filtra=''):
cursor = db_cursor_so(request)
data = {
'modelo': modelo,
}
try:
if filtra == 'm':
metas = comercial.models.MetaEstoque.objects
metas = metas.annotate(antiga=Exists(
comercial.models.MetaEstoque.objects.filter(
modelo=OuterRef('modelo'),
data__gt=OuterRef('data')
)
))
metas = metas.filter(antiga=False)
metas = metas.exclude(venda_mensal=0)
metas = metas.values('modelo')
modelos = [m['modelo'] for m in metas]
else:
modelos = None
totais = totais_modelos_depositos(
cursor, ('101', '102', '103', '122', '231'), modelos)
try:
total_est = totais[modelo]
except KeyError:
total_est = 0
data.update({
'result': 'OK',
'total_est': total_est,
})
except Exception as e:
raise e
data.update({
'result': 'ERR',
'descricao_erro': 'Erro ao buscar estoque nos depósitos',
})
return JsonResponse(data, safe=False)
| mit | 5,535,124,717,280,641,000 | 27.557692 | 76 | 0.556229 | false |
datasoftsrl/ssh-poweroff | sshpoff.py | 1 | 3502 | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
import sys, random
from os import path
import yaml
from flask import (
Flask,
request,
render_template
)
from pexpect.pxssh import (
pxssh,
ExceptionPxssh
)
import log as lg
VERSION = '1.3.1'
CONF_PATH = path.join(
path.dirname(__file__),
'config.yml'
)
app = Flask(__name__)
"""
Load config and throw an error if config is not good.
"""
try:
config = yaml.safe_load(open(CONF_PATH))
except:
print(
'[!] ops, I found an error reading {}, using default.'.format('config.yml'),
file=sys.stderr
)
config = {}
"""
Check if config key is present, otherwise use default.
"""
default = {
'title': 'SSH Poweroff',
'mail': '[email protected]',
'log-path': '/var/log/ssh-poweroff/sshpoff.log',
'devices': [],
'poweroff-all': 'Power off all devices!',
'success-msg': '{} successfully turned off.',
'unvalid-msg': '{} is not valid!',
'no-ssh-msg': 'Could not communicate with {}!'
}
default.update(config)
config = default
del default
"""
Cache a 'devices' dict for ease of use.
"""
if len(config['devices']) > 0:
devices = {x['name']: x for x in config['devices']}
else:
devices = {}
"""
Instatiate logging facility.
"""
log = lg.Log(config['title'], config['log-path'])
def _random_colors():
"""
Returns a generators with names of random colors.
"""
colors = [
'red',
'pink',
'deep-purple',
'indigo',
'blue',
'light-blue',
'cyan',
'teal',
'green',
'light-green',
'orange',
'deep-orange',
'brown',
'blue-grey'
]
random.shuffle(colors)
count = 0
length = len(colors)
while True:
if count == length:
count = 0
yield colors[count]
count += 1
@app.route('/')
def home():
"""
If called with GET method, it show buttons.
If called with POST method, it processes the action given by the button
pressed.
"""
global config
log.info('%s connected.', request.remote_addr)
return render_template('index.html',
title = config['title'],
version = VERSION,
mail = config['mail'],
dev_names = list(devices.keys()),
col_dev = zip(_random_colors(), config['devices']),
all_button = config['poweroff-all']
)
@app.route('/command', methods=['POST'])
def command():
"""
Executes a command to a given device (form field 'id') when triggered.
"""
global config, devices
if request.method == 'POST':
name = request.form['id']
try:
properties = devices[name]
ssh = pxssh()
ssh.force_password = True
ssh.options['StrictHostKeyChecking'] = 'no'
ssh.login(
server = properties['host'],
username = properties['user'],
password = properties['password'],
port = properties['port']
)
ssh.sendline(properties['command'])
ssh.logout()
# log
log.info('%s -> %s: OK.', request.remote_addr, properties['host'])
return config['success-msg'].format(name)
except (KeyError, ExceptionPxssh) as e:
if isinstance(e, KeyError):
# user tried to launch a command on an unexistent device
log.error('%s -> %s: invalid device.', request.remote_addr,
properties['host'])
return config['unvalid-msg'].format(name)
elif isinstance(e, ExceptionPxssh):
# ssh connection failed
log.error('%s -> %s: KO.', request.remote_addr, properties['host'])
return config['no-ssh-msg'].format(name)
if __name__ == '__main__':
app.run()
| mit | -8,402,887,354,948,703,000 | 21.025157 | 80 | 0.605368 | false |
sistason/pa3 | src/pa3_frontend/pa3_django/pa3/statistics_handling.py | 1 | 4335 | import logging
import time
from django.utils import timezone
from django.db.models import Sum
from django.http import HttpResponse
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from pa3.models import WaitingNumberBatch, WaitingNumber, NewestNumberBatch, StatisticalData
def get_src_statistic(_src):
try:
return StatisticalData.objects.get(src=_src)
except MultipleObjectsReturned:
for stats_ in StatisticalData.objects.filter(src=_src):
stats_.delete()
except ObjectDoesNotExist:
pass
except Exception as e:
logging.exception('Exception while updating Stats: {}'.format(e))
def create_statistic(_src, date_):
real_data_begin = timezone.datetime(2013, 9, 1)
stat_ = StatisticalData(src=_src, date=date_)
stat_qs = WaitingNumber.objects.filter(src=_src).filter(date__gt=real_data_begin).filter(
date_delta__lt=60 * 60 * 3).filter(date_delta__gt=1)
stat_.avg_len = stat_qs.count()
stat_.avg_sum = stat_qs.aggregate(s=Sum('date_delta'))['s']
if stat_.avg_sum is None:
stat_.avg_sum = 0
stat_.avg = 0
else:
stat_.avg = 1.0 * stat_.avg_sum / stat_.avg_len
stat_.avg_last_two_weeks = stat_.avg
stat_.avg_last_same_day = stat_.avg
stat_.avg_whole = (stat_.avg + stat_.avg_last_two_weeks + stat_.avg_last_same_day) / 3
stat_.avg_proc_delay_sum = stat_qs.aggregate(s=Sum('proc_delay'))['s']
if stat_.avg_proc_delay_sum is None:
stat_.avg_proc_delay_sum = 0
stat_.avg_proc_delay_whole = 0
else:
stat_.avg_proc_delay_whole = 1.0 * stat_.avg_proc_delay_sum / stat_.avg_len
stat_.save()
return stat_
def update_statistic(_src, dd, new_batch, date_):
stat_ = StatisticalData.objects.get(src=_src)
stat_.avg_sum += dd
stat_.avg_len += 1
# sum/len = avg | sum=avg*len | new_avg = sum+dd/len+1
stat_.avg = 1.0 * stat_.avg_sum / stat_.avg_len
stat_.avg_whole = (stat_.avg + stat_.avg_last_two_weeks + stat_.avg_last_same_day) / 3
if new_batch.proc_delay is not None and new_batch.proc_delay > 0:
stat_.avg_proc_delay_sum += new_batch.proc_delay
stat_.avg_proc_delay_len += 1
stat_.avg_proc_delay_whole = 1.0 * stat_.avg_proc_delay_sum / stat_.avg_proc_delay_len
stat_.date = date_
stat_.save()
def recompute_stats(request):
# Recomputes the last_two_weeks average and the last_day average
# Requires calls, e.g. CRON
real_data_begin = timezone.datetime(2013, 9, 1)
for stat_data in StatisticalData.objects.all():
# Get average over the last two weeks
last_two_weeks_qs = WaitingNumber.objects.filter(
src=stat_data.src).filter(
date__gt=real_data_begin).filter(
date_delta__lt=60*60*3).filter(
date_delta__gt=1).filter(
date__gt=int(time.time())-(60*60*24*14))
last_two_weeks_len = last_two_weeks_qs.count()
stat_data.avg_last_two_weeks = last_two_weeks_qs.aggregate(
s=Sum('date_delta'))['s'] / last_two_weeks_len if last_two_weeks_len else 0
# Get average from weekday last week (Tuesday last week)
last_sameday_qs = WaitingNumber.objects.filter(
src=stat_data.src).filter(
date__gt=real_data_begin).filter(
date_delta__lt=60*60*3).filter(
date_delta__gt=1).filter(
date__lt=timezone.now() + (24*60*60) - (60*60*24*7),
date__gt=timezone.now() - (60*60*24*7))
last_sameday_len = last_sameday_qs.count()
stat_data.avg_last_same_day = last_sameday_qs.aggregate(
s=Sum('date_delta'))['s'] / last_sameday_len if last_sameday_len else 0
# Weights of whole, last two weeks and last weekday are equal
if last_two_weeks_len and last_sameday_len:
stat_data.avg_whole = (stat_data.avg + stat_data.avg_last_two_weeks + stat_data.avg_last_same_day) / 3.0
elif last_two_weeks_len:
stat_data.avg_whole = (stat_data.avg + stat_data.avg_last_two_weeks) / 2.0
elif last_sameday_len:
stat_data.avg_whole = (stat_data.avg + stat_data.avg_last_same_day) / 2.0
else:
stat_data.avg_whole = stat_data.avg
stat_data.save()
return HttpResponse(status=200) | gpl-3.0 | -7,438,429,729,537,712,000 | 39.148148 | 116 | 0.62722 | false |
ElettraSciComp/STP-Core | STP-Core/preview_phaseretrieval.py | 1 | 5490 | ###########################################################################
# (C) 2016 Elettra - Sincrotrone Trieste S.C.p.A.. All rights reserved. #
# #
# #
# This file is part of STP-Core, the Python core of SYRMEP Tomo Project, #
# a software tool for the reconstruction of experimental CT datasets. #
# #
# STP-Core is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# STP-Core is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License #
# for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with STP-Core. If not, see <http://www.gnu.org/licenses/>. #
# #
###########################################################################
#
# Author: Francesco Brun
# Last modified: July, 8th 2016
#
from sys import argv, exit
from os import remove, sep, linesep
from os.path import exists
from numpy import float32, double, nanmin, nanmax, finfo, ndarray
from time import time
from multiprocessing import Process, Lock
from pyfftw.interfaces.cache import enable as pyfftw_cache_enable, disable as pyfftw_cache_disable
from pyfftw.interfaces.cache import set_keepalive_time as pyfftw_set_keepalive_time
from phaseretrieval.tiehom import tiehom, tiehom_plan
from phaseretrieval.tiehom2020 import tiehom2020, tiehom_plan2020
from phaseretrieval.phrt import phrt, phrt_plan
from h5py import File as getHDF5
from utils.caching import cache2plan, plan2cache
from preprocess.extract_flatdark import extract_flatdark
import stpio.tdf as tdf
def main(argv):
"""To do...
"""
lock = Lock()
skip_flat = True
first_done = False
pyfftw_cache_disable()
pyfftw_cache_enable()
pyfftw_set_keepalive_time(1800)
# Get the from and to number of files to process:
idx = int(argv[0])
# Get full paths of input TDF and output TDF:
infile = argv[1]
outfile = argv[2]
# Get the phase retrieval parameters:
method = int(argv[3])
param1 = double(argv[4]) # param1( e.g. regParam, or beta)
param2 = double(argv[5]) # param2( e.g. thresh or delta)
energy = double(argv[6])
distance = double(argv[7])
pixsize = double(argv[8]) / 1000.0 # pixsixe from micron to mm:
pad = True if argv[9] == "True" else False
# Tmp path and log file:
tmppath = argv[10]
if not tmppath.endswith(sep): tmppath += sep
logfilename = argv[11]
# Open the HDF5 file and check it contains flat files:
skipflat = False
f_in = getHDF5(infile, 'r')
if "/tomo" in f_in:
dset = f_in['tomo']
if not "/flat" in f_in:
skipflat = True
else:
dset = f_in['exchange/data']
if not "/exchange/data_white" in f_in:
skipflat = True
num_proj = tdf.get_nr_projs(dset)
num_sinos = tdf.get_nr_sinos(dset)
# Check if the HDF5 makes sense:
if (num_proj == 0):
log = open(logfilename,"a")
log.write(linesep + "\tNo projections found. Process will end.")
log.close()
exit()
# Get flats and darks from cache or from file:
if not skipflat:
try:
corrplan = cache2plan(infile, tmppath)
except Exception as e:
#print "Error(s) when reading from cache"
corrplan = extract_flatdark(f_in, True, logfilename)
remove(logfilename)
plan2cache(corrplan, infile, tmppath)
# Read projection:
im = tdf.read_tomo(dset,idx).astype(float32)
f_in.close()
# Apply simple flat fielding (if applicable):
if not skipflat:
if (isinstance(corrplan['im_flat_after'], ndarray) and isinstance(corrplan['im_flat'], ndarray) and
isinstance(corrplan['im_dark'], ndarray) and isinstance(corrplan['im_dark_after'], ndarray)) :
if (idx < num_proj/2):
im = (im - corrplan['im_dark']) / (abs(corrplan['im_flat'] - corrplan['im_dark']) + finfo(float32).eps)
else:
im = (im - corrplan['im_dark_after']) / (abs(corrplan['im_flat_after'] - corrplan['im_dark_after'])
+ finfo(float32).eps)
# Prepare plan:
im = im.astype(float32)
if (method == 0):
# Paganin 2002:
plan = tiehom_plan (im, param1, param2, energy, distance, pixsize, pad)
im = tiehom(im, plan).astype(float32)
elif (method == 1):
# Paganin 2020:
plan = tiehom_plan2020 (im, param1, param2, energy, distance, pixsize, pad)
im = tiehom2020(im, plan).astype(float32)
else:
plan = phrt_plan (im, energy, distance, pixsize, param2, param1, method, pad)
im = phrt(im, plan, method).astype(float32)
# Write down reconstructed preview file (file name modified with metadata):
im = im.astype(float32)
outfile = outfile + '_' + str(im.shape[1]) + 'x' + str(im.shape[0]) + '_' + str( nanmin(im)) + '$' + str( nanmax(im) )
im.tofile(outfile)
if __name__ == "__main__":
main(argv[1:]) | gpl-3.0 | 5,234,524,101,130,132,000 | 35.838926 | 120 | 0.598579 | false |
williamyangcn/iBlah_py | ui/ui_profile_dialog.py | 1 | 4945 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/lee/backups/code/iblah_py/ui/ui_profile_dialog.ui'
#
# Created: Fri May 6 21:47:58 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_ProfileDialog(object):
def setupUi(self, ProfileDialog):
ProfileDialog.setObjectName(_fromUtf8("ProfileDialog"))
ProfileDialog.setEnabled(True)
ProfileDialog.resize(470, 300)
self.save_btn = QtGui.QPushButton(ProfileDialog)
self.save_btn.setEnabled(True)
self.save_btn.setGeometry(QtCore.QRect(330, 240, 114, 32))
self.save_btn.setObjectName(_fromUtf8("save_btn"))
self.avatar_label = QtGui.QLabel(ProfileDialog)
self.avatar_label.setGeometry(QtCore.QRect(310, 20, 130, 130))
self.avatar_label.setStyleSheet(_fromUtf8("border: 2px solid #ccc;"))
self.avatar_label.setObjectName(_fromUtf8("avatar_label"))
self.label_2 = QtGui.QLabel(ProfileDialog)
self.label_2.setGeometry(QtCore.QRect(21, 117, 26, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.impresa_text_edit = QtGui.QTextEdit(ProfileDialog)
self.impresa_text_edit.setGeometry(QtCore.QRect(80, 170, 361, 51))
self.impresa_text_edit.setObjectName(_fromUtf8("impresa_text_edit"))
self.fullname_line_edit = QtGui.QLineEdit(ProfileDialog)
self.fullname_line_edit.setGeometry(QtCore.QRect(81, 117, 201, 22))
self.fullname_line_edit.setObjectName(_fromUtf8("fullname_line_edit"))
self.label_3 = QtGui.QLabel(ProfileDialog)
self.label_3.setGeometry(QtCore.QRect(21, 21, 39, 16))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_4 = QtGui.QLabel(ProfileDialog)
self.label_4.setGeometry(QtCore.QRect(21, 53, 39, 16))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.cellphone_no_line_edit = QtGui.QLineEdit(ProfileDialog)
self.cellphone_no_line_edit.setEnabled(True)
self.cellphone_no_line_edit.setGeometry(QtCore.QRect(81, 53, 201, 22))
self.cellphone_no_line_edit.setText(_fromUtf8(""))
self.cellphone_no_line_edit.setReadOnly(True)
self.cellphone_no_line_edit.setObjectName(_fromUtf8("cellphone_no_line_edit"))
self.fetion_no_line_edit = QtGui.QLineEdit(ProfileDialog)
self.fetion_no_line_edit.setEnabled(True)
self.fetion_no_line_edit.setGeometry(QtCore.QRect(81, 21, 201, 22))
self.fetion_no_line_edit.setText(_fromUtf8(""))
self.fetion_no_line_edit.setReadOnly(True)
self.fetion_no_line_edit.setObjectName(_fromUtf8("fetion_no_line_edit"))
self.label_5 = QtGui.QLabel(ProfileDialog)
self.label_5.setGeometry(QtCore.QRect(21, 85, 33, 16))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.email_line_edit = QtGui.QLineEdit(ProfileDialog)
self.email_line_edit.setEnabled(True)
self.email_line_edit.setGeometry(QtCore.QRect(81, 85, 201, 22))
self.email_line_edit.setText(_fromUtf8(""))
self.email_line_edit.setReadOnly(True)
self.email_line_edit.setObjectName(_fromUtf8("email_line_edit"))
self.label_6 = QtGui.QLabel(ProfileDialog)
self.label_6.setGeometry(QtCore.QRect(21, 170, 52, 16))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.retranslateUi(ProfileDialog)
QtCore.QObject.connect(self.save_btn, QtCore.SIGNAL(_fromUtf8("clicked()")), ProfileDialog.accept)
QtCore.QMetaObject.connectSlotsByName(ProfileDialog)
def retranslateUi(self, ProfileDialog):
ProfileDialog.setWindowTitle(QtGui.QApplication.translate("ProfileDialog", "Profile", None, QtGui.QApplication.UnicodeUTF8))
self.save_btn.setText(QtGui.QApplication.translate("ProfileDialog", "关闭 (&C)", None, QtGui.QApplication.UnicodeUTF8))
self.save_btn.setShortcut(QtGui.QApplication.translate("ProfileDialog", "Return", None, QtGui.QApplication.UnicodeUTF8))
self.avatar_label.setText(QtGui.QApplication.translate("ProfileDialog", "avatar", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("ProfileDialog", "姓名", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("ProfileDialog", "飞信号", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("ProfileDialog", "手机号", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("ProfileDialog", "EMail", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("ProfileDialog", "心情短语", None, QtGui.QApplication.UnicodeUTF8))
| bsd-3-clause | -4,948,456,608,463,711,000 | 57.535714 | 132 | 0.702054 | false |
thoreg/satchmo | store/localsite/templatetags/email_munge.py | 1 | 1857 | """
Stolen from: http://tomcoote.co.uk/code-bank/django-email-munger/
"""
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
import re
register = template.Library()
@register.filter
@stringfilter
def mungify(email, text=None, autoescape=None):
'''
Template filter to hide an email address away from any sort of email harvester
type web scrapers and so keep away from spam etc.
The filter should be applied on a string which represents an email address. You
can optionally give the filter a parameter which will represent the name of the
resulting email href link. If no extra parameter is given the email address will
be used as the href text.
{{ email|mungify:"contact me" }}
or
{{ email|mungify }}
The output is javascript which will write out the email href link in a way so
as to not actually show the email address in the source code as plain text.
'''
text = text or email
if autoescape:
email = conditional_escape(email)
text = conditional_escape(text)
emailArrayContent = ''
textArrayContent = ''
r = lambda c: '"' + str(ord(c)) + '",'
for c in email: emailArrayContent += r(c)
for c in text: textArrayContent += r(c)
result = """<script>
var _tyjsdf = [%s], _qplmks = [%s];
document.write('<a href="mailto:');
for(_i=0;_i<_tyjsdf.length;_i++){document.write('&#'+_tyjsdf[_i]+';');}
document.write('">');
for(_i=0;_i<_qplmks.length;_i++){document.write('&#'+_qplmks[_i]+';');}
document.write('</a>');
</script>""" % (re.sub(r',$', '', emailArrayContent), re.sub(r',$', '', textArrayContent))
return mark_safe(result)
mungify.needs_autoescape = True
| bsd-3-clause | -8,291,347,823,628,032,000 | 32.160714 | 98 | 0.655358 | false |
vicente-gonzalez-ruiz/QSVC | trunk/src/old_py/info_mc_j2k.py | 1 | 7723 | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
## @file info_mc_j2k.py
# The size in bytes, and a codestream Kbps, even detailed subband
# level and neglecting headers, from a MCJ2K codestream.
#
# @authors Jose Carmelo Maturana-Espinosa\n Vicente Gonzalez-Ruiz.
# @date Last modification: 2015, January 7.
#
## @package info_mc_j2k
# The size in bytes, and a codestream Kbps, even detailed subband
# level and neglecting headers, from a MCJ2K codestream.
import sys
import os
import re
import math
import os.path
from GOP import GOP
from subprocess import check_call
from subprocess import CalledProcessError
from MCTF_parser import MCTF_parser
## Refers to high frequency subbands.
HIGH = "high_"
## Refers to low frequency subbands.
LOW = "low_"
## Refers to fields of motion.
MOTION = "motion_residue_"
## Indicates whether a log is recorded in a file.
print_file = False
## Number of Group Of Pictures to process.
GOPs = 1
## Number of Temporal Resolution Levels.
TRLs = 5
## Frames per second.
FPS = 30 # 30 # 50
## The parser module provides an interface to Python's internal parser
## and byte-code compiler.
parser = MCTF_parser(description="Information of codestream.")
parser.GOPs(GOPs)
parser.FPS(FPS)
## A script may only parse a few of the command-line arguments,
## passing the remaining arguments on to another script or program.
args = parser.parse_known_args()[0]
if args.GOPs :
GOPs = int(args.GOPs)
if args.TRLs:
TRLs = int(args.TRLs)
if args.FPS :
FPS = int(args.FPS)
## Initializes the class GOP (Group Of Pictures).
gop=GOP()
## Extract the value of the size of a GOP, that is, the number of images.
GOP_size = gop.get_size(TRLs)
## Calculate the total number of video images.
pictures = GOPs * GOP_size + 1
## Duration of the sequence.
duration = pictures / (FPS * 1.0)
## Number of bytes of an entire directory. The size in bytes, and a
## codestream Kbps, even detailed subband level and neglecting headers
## is performed in info.py.
# @param the_path Directory path.
# @param key If you want to have only a certain type of files in the directory.
# @return Files size.
def get_size (the_path, key) :
path_size = 0
for path, dirs, files in os.walk(the_path) :
for fil in files :
if re.search(key, fil) :
path_size += os.path.getsize(the_path + "/" + fil)
return path_size
#-----------------------------------------------
#-----------------------------------------------
#- MAIN ----------------------------------------
#-----------------------------------------------
#-----------------------------------------------
# info = [[kbps GOP1, kbps GOP2, kbps GOPn], kbps GOPs, rmse1D]
## Current path.
p = sub.Popen("echo $PWD", shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
out, err = p.communicate()
## Reconstruction path.
path_tmp = out[:-1]
########
# RMSE #
########
# Existe la reconstrucción. Entonces se calcula su distorsión.
if os.path.exists(path_tmp + "/low_0") :
##########
# SNR 1D #
##########
# BRC y UnaSubParaTodas
p = sub.Popen("snr --file_A=low_0 --file_B=../low_0 2> /dev/null | grep RMSE | cut -f 3",
shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
# subIndependientes
#p = sub.Popen("snr --file_A=high_4 --file_B=../high_4 2> /dev/null | grep RMSE | cut -f 3",
# shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
out, err = p.communicate()
#errcode = p.returncode
if out == "" : #if err in locals() :
check_call("echo SNR sin salida.", shell=True)
exit (0)
rmse1D = float(out)
##########
# SNR 2D #
##########
#rmse2D=`snr2D --block_size=$block_size_snr --dim_X=$RES_X --dim_Y=$RES_Y --file_A=$DATA/$VIDEO.yuv --file_B=$data_dir/tmp/low_0_UP --FFT 2> /dev/null | grep RMSE | cut -f 3` # FFT en 3D
##########
# SNR 3D #
##########
#rmse3D=`snr3D --block_size=$block_size_snr --dim_X=$RES_X --dim_Y=$RES_Y --dim_Z=5 --file_A=$DATA/$VIDEO.yuv --file_B=$data_dir/tmp/low_0_UP --FFT 2> /dev/null | grep RMSE | cut -f 3` # FFT en 3D
####################
# export variables #
####################
globals()["info_rmse1D"] = rmse1D #p = sub.Popen("export info_mc_j2k_rmse1D=" + rmse1D, shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
info[2] = rmse1D
# info = [[kbps GOP1, kbps GOP2, kbps GOPn], kbps GOPs, rmse1D]
if not 'info' in globals() :
globals()["info"] = [[]]
###################
# Print a fichero #
###################
#########################
# Media Pts de cada GOP #
#########################
for par in range (0, len(info[0])) :
info[1] = Pts_GOPs[par]
check_call("echo \"" + Pts_GOPs + "\" >> ../info_PtsGOPs", shell=True) #BASH: check_call("echo \"" + ${PtsGOPs[@]} + "\" >> ../info_PtsGOPs", shell=True)
check_call("echo \"" + average_Pts_GOPs + "\" >> ../info_average_PtsGOPs", shell=True)
########
# KBPS #
########
# No existe la reconstrucción. Entonces se calculan los kbps del codestream aún comprimido.
else :
TO_KBPS = 8.0 / duration / 1000
############
# KBPS GOP #
############
nGOP = 1
while nGOP <= GOPs :
# H's
subband = TRLs - 1
nImage = 0
pictures_sub = GOP_size
while subband > 0 :
pictures_sub = ( pictures_sub + 1 ) / 2
# SIZES MOTION un GOP #
_kbps_M.append( get_size(path_tmp, MOTION + str(subband) + "_*_[" + str('%04d'%(nImage*1)) + "-" + str('%04d'%pictures_sub) + "].j2c") * TO_KBPS )
# SIZES H's un GOP #
# SIZES L un GOP #
subband -= 1
nImage = pictures_sub
# L
# SUMATORIA #
print ("sumatoria size de este GOP. Y apuntarlo.")
nGOP += 1
##########################################
# M
kbps_M = get_size(path_tmp, MOTION) * TO_KBPS
# T 1ªL (fuera del GOP)
kbps_T_first_L = [ get_size(path_tmp, LOW + str(TRLs-1) + "_[YUV]_0000.j2c") * TO_KBPS ]
# T L (la del GOP)
_kbps_T = [ get_size(path_tmp, LOW + str(TRLs-1) + "_[YUV]_000?.j2c") * TO_KBPS ]
# = [ get_size(path_extract, LOW) ] (las imagenes de 2 L)
# T (Hs)
for i in range (1, TRLs) :
_kbps_T.append( get_size(path_tmp, HIGH + str(TRLs - i)) * TO_KBPS )
# T del GOP (2ªL + Hs)
_kbps_T.append( _kbps_T[0] + (get_size(path_tmp, HIGH) * TO_KBPS) )
# kbps_GOP (M + T)
kbps_GOP = kbps_M + _kbps_T[TRLs]
# kbps_ALL (M + T). Siendo T = con 1ªL + types
bytes_mj2k = get_size(path_tmp, "")
kbps_ALL = bytes_mj2k * TO_KBPS
####################
# export variables #
####################
globals()["info_kbps_M"] = kbps_M
globals()["info_kbps_T"] = _kbps_T
globals()["info_kbps_GOP"] = kbps_GOP
globals()["info_kbps_ALL"] = kbps_ALL
''' ##############
NOTAS DEL CODIGO #
''' ##############
'''
#POR BASH
#p = sub.Popen("export info_mc_j2k_kbps_M=" + kbps_M + "; "
# "export info_mc_j2k_kbps_T=" + _kbps_T + "; "
# "export info_mc_j2k_kbps_GOP=" + kbps_GOP + "; "
# "export info_mc_j2k_kbps_ALL=" + kbps_ALL
# , shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
#out, err = p.communicate()
##errcode = p.returncode
#POR PYTHON
'''
'''
import re
import os
path_size = 0
for path, dirs, files in os.walk("/home/cmaturana") :
for fil in files :
if re.search("aaa0[2-3]", fil) :
path_size += os.path.getsize("/home/cmaturana" + "/" + fil)
path_size
http://www.tutorialspoint.com/python/python_reg_expressions.htm
'''
| gpl-2.0 | 6,323,669,541,710,254,000 | 25.179661 | 200 | 0.545125 | false |
numerical-mathematics/extrapolation | ex_parallel_original.py | 1 | 28458 | from __future__ import division
import numpy as np
import multiprocessing as mp
import math
NUM_WORKERS = None
def set_NUM_WORKERS(nworkers):
global NUM_WORKERS
if nworkers == None:
try:
NUM_WORKERS = mp.cpu_count()
except NotImplementedError:
NUM_WORKERS = 4
else:
NUM_WORKERS = max(nworkers, 1)
def error_norm(y1, y2, atol, rtol):
tol = atol + np.maximum(np.abs(y1),np.abs(y2))*rtol
return np.linalg.norm((y1-y2)/tol)/(len(y1)**0.5)
def adapt_step(method, func, tn_1, yn_1, args, y, y_hat, h, p, atol, rtol, pool,
seq=(lambda t: 2*t), dense=False):
'''
Only called when adaptive == 'step'; i.e., for fixed order.
Checks if the step size is accepted. If not, computes a new step size
and checks again. Repeats until step size is accepted
**Inputs**:
- method: -- the method on which the extrapolation is based
- func -- the right hand side function of the IVP.
Must output a non-scalar numpy.ndarray
- tn_1, yn_1 -- y(tn_1) = yn_1 is the last accepted value of the
computed solution
- args -- Extra arguments to pass to function.
- y, y_hat -- the computed values of y(tn_1 + h) of order p and
(p-1), respectively
- h -- the step size taken and to be tested
- p -- the order of the higher extrapolation method
Assumed to be greater than 1.
- atol, rtol -- the absolute and relative tolerance of the local
error.
- seq -- the step-number sequence. optional; defaults to the
harmonic sequence given by (lambda t: 2*t)
**Outputs**:
- y, y_hat -- the computed solution of orders p and (p-1) at the
accepted step size
- h -- the accepted step taken to compute y and y_hat
- h_new -- the proposed next step size
- (fe_seq, fe_tot) -- the number of sequential f evaluations, and
the total number of f evaluations
'''
facmax = 5
facmin = 0.2
fac = 0.8
err = error_norm(y, y_hat, atol, rtol)
h_new = h*min(facmax, max(facmin, fac*((1/err)**(1/p))))
fe_seq = 0
fe_tot = 0
while err > 1:
h = h_new
if dense:
y, y_hat, (fe_seq_, fe_tot_), poly = method(func, tn_1, yn_1, args,
h, p, pool, seq=seq, dense=dense)
else:
y, y_hat, (fe_seq_, fe_tot_) = method(func, tn_1, yn_1, args, h, p,
pool, seq=seq, dense=dense)
fe_seq += fe_seq_
fe_tot += fe_tot_
err = error_norm(y, y_hat, atol, rtol)
h_new = h*min(facmax, max(facmin, fac*((1/err)**(1/p))))
if dense:
return (y, y_hat, h, h_new, (fe_seq, fe_tot), poly)
else:
return (y, y_hat, h, h_new, (fe_seq, fe_tot))
def extrapolation_parallel (method, func, y0, t, args=(), full_output=False,
rtol=1.0e-8, atol=1.0e-8, h0=0.5, mxstep=10e4, adaptive="order", p=4,
seq=(lambda t: 2*t), nworkers=None):
'''
Solves the system of IVPs dy/dt = func(y, t0, ...) with parallel extrapolation.
**Parameters**
- method: callable()
The method on which the extrapolation is based
- func: callable(y, t0, ...)
Computes the derivative of y at t0 (i.e. the right hand side of the
IVP). Must output a non-scalar numpy.ndarray
- y0 : numpy.ndarray
Initial condition on y (can be a vector). Must be a non-scalar
numpy.ndarray
- t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
- args : tuple, optional
Extra arguments to pass to function.
- full_output : bool, optional
True if to return a dictionary of optional outputs as the second
output. Defaults to False
**Returns**
- ys : numpy.ndarray, shape (len(t), len(y0))
Array containing the value of y for each desired time in t, with
the initial value y0 in the first row.
- infodict : dict, only returned if full_output == True
Dictionary containing additional output information
KEY MEANING
'fe_seq' cumulative number of sequential derivative evaluations
'fe_tot' cumulative number of total derivative evaluations
'nstp' cumulative number of successful time steps
'h_avg' average step size if adaptive == "order" (None otherwise)
'k_avg' average extrapolation order if adaptive == "order"
... (None otherwise)
**Other Parameters**
- rtol, atol : float, optional
The input parameters rtol and atol determine the error control
performed by the solver. The solver will control the vector,
e = y2 - y1, of estimated local errors in y, according to an
inequality of the form l2-norm of (e / (ewt * len(e))) <= 1,
where ewt is a vector of positive error weights computed as
ewt = atol + max(y1, y2) * rtol. rtol and atol can be either vectors
the same length as y0 or scalars. Both default to 1.0e-8.
- h0 : float, optional
The step size to be attempted on the first step. Defaults to 0.5
- mxstep : int, optional
Maximum number of (internally defined) steps allowed for each
integration point in t. Defaults to 10e4
- adaptive: string, optional
Specifies the strategy of integration. Can take three values:
-- "fixed" = use fixed step size and order strategy.
-- "step" = use adaptive step size but fixed order strategy.
-- "order" = use adaptive step size and adaptive order strategy.
Defaults to "order"
- p: int, optional
The order of extrapolation if adaptive is not "order", and the
starting order otherwise. Defaults to 4
- seq: callable(k) (k: positive int), optional
The step-number sequence. Defaults to the harmonic sequence given
by (lambda t: 2*t)
- nworkers: int, optional
The number of workers working in parallel. If nworkers==None, then
the the number of workers is set to the number of CPUs on the the
running machine. Defaults to None.
'''
set_NUM_WORKERS(nworkers)
pool = mp.Pool(NUM_WORKERS)
assert len(t) > 1, ("the array t must be of length at least 2, " +
"and the initial value point should be the first element of t")
dense = True if len(t) > 2 else False
ys = np.zeros((len(t), len(y0)), dtype=(type(y0[0])))
ys[0] = y0
t0 = t[0]
fe_seq = 0
fe_tot = 0
nstp = 0
cur_stp = 0
if adaptive == "fixed":
# Doesn't work correctly with dense output
ts, h = np.linspace(t0, t[-1], (t[-1]-t0)/h0 + 1, retstep=True)
y = 1*y0
for i in range(len(ts) - 1):
if dense:
y, _, (fe_seq_, fe_tot_), poly = method(func, ts[i], y, args, h,
p, pool, seq=seq, dense=dense)
else:
y, _, (fe_seq_, fe_tot_) = method(func, ts[i], y, args, h, p,
pool, seq=seq, dense=dense)
fe_seq += fe_seq_
fe_tot += fe_tot_
nstp += 1
cur_stp += 1
if cur_stp > mxstep:
raise Exception('Reached Max Number of Steps. Current t = '
+ str(t_curr))
ys[1] = 1*y
elif adaptive == "step":
assert p > 1, "order of method must be greater than 1 if adaptive=step"
t_max = t[-1]
t_index = 1
y, t_curr = 1*y0, t0
h = min(h0, t_max-t0)
while t_curr < t_max:
if dense:
y_, y_hat, (fe_seq_, fe_tot_), poly = method(func, t_curr, y,
args, h, p, pool, seq=seq, dense=dense)
else:
y_, y_hat, (fe_seq_, fe_tot_) = method(func, t_curr, y, args,
h, p, pool, seq=seq, dense=dense)
fe_seq += fe_seq_
fe_tot += fe_tot_
if dense:
reject_inter = True
while reject_inter:
y_temp, _, h, h_new, (fe_seq_, fe_tot_), poly = adapt_step(
method, func, t_curr, y, args, y_, y_hat, h, p, atol,
rtol, pool, seq=seq, dense=dense)
reject_inter = False
while t_index < len(t) and t[t_index] <= t_curr + h:
y_poly, errint, h_int = poly((t[t_index] - t_curr)/h)
if errint <= 10:
ys[t_index] = 1*y_poly
cur_stp = 0
t_index += 1
reject_inter = False
else:
h = h_int
fe_seq += fe_seq_
fe_tot += fe_tot_
reject_inter = True
break
if not reject_inter:
y = 1*y_temp
else:
y, _, h, h_new, (fe_seq_, fe_tot_) = adapt_step(method, func,
t_curr, y, args, y_, y_hat, h, p, atol, rtol, pool, seq=seq,
dense=dense)
t_curr += h
fe_seq += fe_seq_
fe_tot += fe_tot_
nstp += 1
cur_stp += 1
if cur_stp > mxstep:
raise Exception('Reached Max Number of Steps. Current t = '
+ str(t_curr))
h = min(h_new, t_max - t_curr)
if not dense:
ys[-1] = 1*y
elif adaptive == "order":
t_max = t[-1]
t_index = 1
y, t_curr, k = 1*y0, t0, p
h = min(h0, t_max-t0)
sum_ks, sum_hs = 0, 0
while t_curr < t_max:
if dense:
reject_inter = True
while reject_inter:
y_temp, h, k, h_new, k_new, (fe_seq_, fe_tot_), poly = method(
func, t_curr, y, args, h, k, atol, rtol, pool, seq=seq,
dense=dense)
reject_inter = False
old_index = t_index
while t_index < len(t) and t[t_index] <= t_curr + h:
y_poly, errint, h_int = poly((t[t_index] - t_curr)/h)
if errint <= 10:
ys[t_index] = 1*y_poly
cur_stp = 0
t_index += 1
reject_inter = False
else:
h = h_int
fe_seq += fe_seq_
fe_tot += fe_tot_
reject_inter = True
t_index = old_index
break
if not reject_inter:
y = 1*y_temp
else:
y, h, k, h_new, k_new, (fe_seq_, fe_tot_) = method(func, t_curr,
y, args, h, k, atol, rtol, pool, seq=seq, dense=dense)
t_curr += h
fe_seq += fe_seq_
fe_tot += fe_tot_
sum_ks += k
sum_hs += h
nstp += 1
cur_stp += 1
if cur_stp > mxstep:
raise Exception('Reached Max Number of Steps. Current t = '
+ str(t_curr))
h = min(h_new, t_max - t_curr)
k = k_new
if not dense:
ys[-1] = 1*y
pool.close()
if full_output:
infodict = {'fe_seq': fe_seq, 'nfe': fe_tot, 'nst': nstp, 'nje': 0,
'h_avg': sum_hs/nstp, 'k_avg': sum_ks/nstp}
return (ys, infodict)
else:
return ys
else:
raise Exception("\'" + str(adaptive) +
"\' is not a valid value for the argument \'adaptive\'")
pool.close()
if full_output:
infodict = {'fe_seq': fe_seq, 'fe_tot': fe_tot, 'nst': nstp,
'h_avg': None, 'k_avg': None}
return (ys, infodict)
else:
return ys
def compute_stages_dense((func, tn, yn, args, h, k_nj_lst)):
res = []
for (k, nj) in k_nj_lst:
f_tot=0
nj = int(nj)
Y = np.zeros((nj+1, len(yn)), dtype=(type(yn[0])))
f_yj = np.zeros((nj+1, len(yn)), dtype=(type(yn[0])))
Y[0] = yn
f_yj[0] = func(*(Y[0], tn) + args)
f_tot+=1
Y[1] = Y[0] + h/nj*f_yj[0]
for j in range(2,nj+1):
if j == nj/2 + 1:
y_half = Y[j-1]
f_yj[j-1] = func(*(Y[j-1], tn + (j-1)*(h/nj)) + args)
f_tot+=1
Y[j] = Y[j-2] + (2*h/nj)*f_yj[j-1]
f_yj[nj] = func(*(Y[nj], tn + h) + args)
f_tot+=1
res += [(k, nj, Y[nj], y_half, f_yj, f_tot)]
return res
def compute_stages((func, tn, yn, args, h, k_nj_lst)):
res = []
for (k, nj) in k_nj_lst:
nj = int(nj)
Y = np.zeros((nj+1, len(yn)), dtype=(type(yn[0])))
Y[0] = yn
Y[1] = Y[0] + h/nj*func(*(Y[0], tn) +args)
for j in range(2,nj+1):
Y[j] = Y[j-2] + (2*h/nj)*func(*(Y[j-1], tn + (j-1)*(h/nj))+ args)
res += [(k, nj, Y[nj])]
return res
def balance_load(k, seq=(lambda t: 2*t)):
if k <= NUM_WORKERS:
k_nj_lst = [[(i,seq(i))] for i in range(k, 0, -1)]
else:
k_nj_lst = [[] for i in range(NUM_WORKERS)]
index = range(NUM_WORKERS)
i = k
while 1:
if i >= NUM_WORKERS:
for j in index:
k_nj_lst[j] += [(i, seq(i))]
i -= 1
else:
for j in index:
if i == 0:
break
k_nj_lst[j] += [(i, seq(i))]
i -= 1
break
index = index[::-1]
fe_tot = 0
for i in range(len(k_nj_lst)):
fe_tot += sum([pair[1] for pair in k_nj_lst[i]])
fe_seq = sum([pair[1] for pair in k_nj_lst[0]])
return (k_nj_lst, fe_seq, fe_tot)
def compute_ex_table(func, tn, yn, args, h, k, pool, seq=(lambda t: 2*t),
dense=False):
"""
**Inputs**:
- func: RHS of ODE
- tn, yn: time and solution values from previous step
- args: any extra args to func
- h: proposed step size
- k: proposed # of extrapolation iterations
- pool: parallel worker pool
- seq: extrapolation step number sequence
- dense: whether to provide dense output
"""
T = np.zeros((k+1,k+1, len(yn)), dtype=(type(yn[0])))
k_nj_lst, fe_seq, fe_tot= balance_load(k, seq=seq)
jobs = [(func, tn, yn, args, h, k_nj) for k_nj in k_nj_lst]
if dense:
results = pool.map(compute_stages_dense, jobs, chunksize=1)
else:
results = pool.map(compute_stages, jobs, chunksize=1)
# process the returned results from the pool
if dense:
fe_tot=0
y_half = (k+1)*[None]
f_yj = (k+1)*[None]
hs = (k+1)*[None]
for res in results:
for (k_, nj_, Tk_, y_half_, f_yj_, fe_tot_) in res:
T[k_, 1] = Tk_
y_half[k_] = y_half_
f_yj[k_] = f_yj_
hs[k_] = h/nj_
fe_tot += fe_tot_
else:
for res in results:
for (k_, nj_, Tk_) in res:
T[k_, 1] = Tk_
# compute extrapolation table
# only correct for midpoint method
for i in range(2, k+1):
for j in range(i, k+1):
T[j,i] = T[j,i-1] + (T[j,i-1] - T[j-1,i-1])/((seq(j)/(seq(j-i+1)))**2 - 1)
if dense:
Tkk = T[k,k]
f_Tkk = func(*(Tkk, tn+h) + args)
fe_seq +=1
fe_tot +=1
return (T, fe_seq, fe_tot, yn, Tkk, f_Tkk, y_half, f_yj, hs)
else:
return (T, fe_seq, fe_tot)
def finite_diff(j, f_yj, hj):
# Called by interpolate
max_order = 2*j
nj = len(f_yj) - 1
coeff = [1,1]
dj = (max_order+1)*[None]
dj[1] = 1*f_yj[nj/2]
dj[2] = (f_yj[nj/2+1] - f_yj[nj/2-1])/(2*hj)
for order in range(2,max_order):
coeff = [1] + [coeff[j] + coeff[j+1] for j in range(len(coeff)-1)] + [1]
index = [nj/2 + order - 2*i for i in range(order+1)]
sum_ = 0
for i in range(order+1):
sum_ += ((-1)**i)*coeff[i]*f_yj[index[i]]
dj[order+1] = sum_ / (2*hj)**order
return dj
def compute_ds(y_half, f_yj, hs, k, seq=(lambda t: 4*t-2)):
# Called by interpolate
dj_kappa = np.zeros((2*k+1, k+1), dtype=(type(y_half[1])))
ds = np.zeros((2*k+1), dtype=(type(y_half[1])))
for j in range(1,k+1):
dj_kappa[0,j] = 1*y_half[j]
nj = len(f_yj[j])-1
dj_ = finite_diff(j,f_yj[j], hs[j])
for kappa in range(1,2*j+1):
dj_kappa[kappa,j] = 1*dj_[kappa]
skip = 0
for kappa in range(2*k+1):
T = np.zeros((k+1-int(skip/2), k+1 - int(skip/2)), dtype=(type(y_half[1])))
T[:,1] = 1*dj_kappa[kappa, int(skip/2):]
# print("T1"+str(T[:,1]))
for i in range(2, k+1-int(skip/2)):
for j in range(i, k+1-int(skip/2)):
T[j,i] = T[j,i-1] + (T[j,i-1] - T[j-1,i-1])/((seq(j)/(seq(j-i+1)))**2 - 1)
ds[kappa] = 1*T[k-int(skip/2),k-int(skip/2)]
if not(kappa == 0):
skip +=1
return ds
def interpolate(y0, Tkk, f_Tkk, y_half, f_yj, hs, H, k, atol, rtol,
seq=(lambda t: 4*t-2)):
u = 2*k-3
u_1 = u - 1
ds = compute_ds(y_half, f_yj, hs, k, seq=seq)
print "ds->" + str(ds)
a_u = (u+5)*[None]
a_u_1 = (u_1+5)*[None]
for i in range(u+1):
a_u[i] = (H**i)*ds[i]/math.factorial(i)
for i in range(u_1 + 1):
a_u_1[i] = (H**i)*ds[i]/math.factorial(i)
A_inv_u = (2**(u-2))*np.matrix(
[[(-2*(3 + u))*(-1)**u, -(-1)**u, 2*(3 + u), -1],
[(4*(4 + u))*(-1)**u, 2*(-1)**u, 4*(4 + u), -2],
[(8*(1 + u))*(-1)**u, 4*(-1)**u, -8*(1 + u), 4],
[(-16*(2 + u))*(-1)**u, -8*(-1)**u, -16*(2 + u), 8]]
)
A_inv_u_1 = (2**(u_1-2))*np.matrix(
[[(-2*(3 + u_1))*(-1)**u_1, -(-1)**u_1, 2*(3 + u_1), -1],
[(4*(4 + u_1))*(-1)**u_1, 2*(-1)**u_1, 4*(4 + u_1), -2],
[(8*(1 + u_1))*(-1)**u_1, 4*(-1)**u_1, -8*(1 + u_1), 4],
[(-16*(2 + u_1))*(-1)**u_1, -8*(-1)**u_1, -16*(2 + u_1), 8]]
)
b1_u = 1*y0
for i in range(u+1):
b1_u -= a_u[i]/(-2)**i
b1_u_1 = 1*y0
for i in range(u_1+1):
b1_u_1 -= a_u_1[i]/(-2)**i
b2_u = H*f_yj[1][0]
for i in range(1, u+1):
b2_u -= i*a_u[i]/(-2)**(i-1)
b2_u_1 = H*f_yj[1][0]
for i in range(1, u_1+1):
b2_u_1 -= i*a_u_1[i]/(-2)**(i-1)
b3_u = 1*Tkk
for i in range(u+1):
b3_u -= a_u[i]/(2**i)
b3_u_1 = 1*Tkk
for i in range(u_1+1):
b3_u_1 -= a_u_1[i]/(2**i)
b4_u = H*f_Tkk
for i in range(1, u+1):
b4_u -= i*a_u[i]/(2**(i-1))
b4_u_1 = H*f_Tkk
for i in range(1, u_1+1):
b4_u_1 -= i*a_u_1[i]/(2**(i-1))
b_u = np.array([b1_u,b2_u,b3_u,b4_u])
b_u_1 = np.array([b1_u_1,b2_u_1,b3_u_1,b4_u_1])
x = A_inv_u*b_u
x = np.array(x)
x_1 = A_inv_u_1*b_u_1
x_1 = np.array(x)
a_u[u+1] = x[0]
a_u[u+2] = x[1]
a_u[u+3] = x[2]
a_u[u+4] = x[3]
a_u_1[u_1+1] = x_1[0]
a_u_1[u_1+2] = x_1[1]
a_u_1[u_1+3] = x_1[2]
a_u_1[u_1+4] = x_1[3]
# polynomial of degree u+4 defined on [0,1] and centered about 1/2
# also returns the interpolation error (errint). If errint > 10, then reject
# step
def poly (t):
res = 1*a_u[0]
for i in range(1, len(a_u)):
res += a_u[i]*((t-0.5)**i)
res_u_1 = 1*a_u_1[0]
for i in range(1, len(a_u_1)):
res_u_1 += a_u_1[i]*((t-0.5)**i)
errint = error_norm(res, res_u_1, atol, rtol)
h_int = H*((1/errint)**(1/(u+4)))
return (res, errint, h_int)
return poly
def midpoint_fixed_step(func, tn, yn, args, h, p, pool, seq=(lambda t: 2*t),
dense=False):
k = int(round(p/2))
if dense:
T, fe_seq, fe_tot, y0, Tkk, f_Tkk, y_half, f_yj, hs = compute_ex_table(
func, tn, yn, args, h, k, pool, seq=seq, dense=dense)
poly = interpolate(y0, Tkk, f_Tkk, y_half, f_yj, hs, h, k, atol, rtol,
seq=seq)
return (T[k,k], T[k-1,k-1], (fe_seq, fe_tot), poly)
else:
T, fe_seq, fe_tot = compute_ex_table(func, tn, yn, args, h, k, pool,
seq=seq, dense=dense)
return (T[k,k], T[k-1,k-1], (fe_seq, fe_tot))
def midpoint_adapt_order(func, tn, yn, args, h, k, atol, rtol, pool,
seq=(lambda t: 2*t), dense=False):
k_max = 10
k_min = 3
k = min(k_max, max(k_min, k))
def A_k(k):
"""
Expected time to compute k lines of the extrapolation table,
in units of RHS evaluations.
"""
sum_ = 0
for i in range(k):
sum_ += seq(i+1)
return max(seq(k), sum_/NUM_WORKERS) # The second value is only an estimate
H_k = lambda h, k, err_k: h*0.94*(0.65/err_k)**(1/(2*k-1))
W_k = lambda Ak, Hk: Ak/Hk
if dense:
T, fe_seq, fe_tot, y0, Tkk, f_Tkk, y_half, f_yj, hs = compute_ex_table(
func, tn, yn, args, h, k, pool, seq=seq, dense=dense)
else:
T, fe_seq, fe_tot = compute_ex_table(func, tn, yn, args, h, k, pool,
seq=seq, dense=dense)
# compute the error and work function for the stages k-2 and k
err_k_2 = error_norm(T[k-2,k-3], T[k-2,k-2], atol, rtol)
err_k_1 = error_norm(T[k-1,k-2], T[k-1,k-1], atol, rtol)
err_k = error_norm(T[k,k-1], T[k,k], atol, rtol)
h_k_2 = H_k(h, k-2, err_k_2)
h_k_1 = H_k(h, k-1, err_k_1)
h_k = H_k(h, k, err_k)
w_k_2 = W_k(A_k(k-2), h_k_2)
w_k_1 = W_k(A_k(k-1), h_k_1)
w_k = W_k(A_k(k), h_k)
if err_k_1 <= 1:
# convergence in line k-1
if err_k <= 1:
y = T[k,k]
else:
y = T[k-1,k-1]
k_new = k if w_k_1 < 0.9*w_k_2 else k-1
h_new = h_k_1 if k_new <= k-1 else h_k_1*A_k(k)/A_k(k-1)
if dense:
poly = interpolate(y0, Tkk, f_Tkk, y_half, f_yj, hs, h, k, atol,
rtol, seq=seq)
elif err_k <= 1:
# convergence in line k
y = T[k,k]
k_new = k-1 if w_k_1 < 0.9*w_k else (
k+1 if w_k < 0.9*w_k_1 else k)
h_new = h_k_1 if k_new == k-1 else (
h_k if k_new == k else h_k*A_k(k+1)/A_k(k))
if dense:
poly = interpolate(y0, Tkk, f_Tkk, y_half, f_yj, hs, h, k, atol,
rtol, seq=seq)
else:
# no convergence
# reject (h, k) and restart with new values accordingly
k_new = k-1 if w_k_1 < 0.9*w_k else k
h_new = min(h_k_1 if k_new == k-1 else h_k, h)
if dense:
y, h, k, h_new, k_new, (fe_seq_, fe_tot_), poly = midpoint_adapt_order(
func, tn, yn, args, h_new, k_new, atol, rtol, pool, seq=seq,
dense=dense)
else:
y, h, k, h_new, k_new, (fe_seq_, fe_tot_) = midpoint_adapt_order(
func, tn, yn, args, h_new, k_new, atol, rtol, pool, seq=seq,
dense=dense)
fe_seq += fe_seq_
fe_tot += fe_tot_
if dense:
return (y, h, k, h_new, k_new, (fe_seq, fe_tot), poly)
else:
return (y, h, k, h_new, k_new, (fe_seq, fe_tot))
def ex_midpoint_parallel(func, y0, t, args=(), full_output=0, rtol=1.0e-8,
atol=1.0e-8, h0=0.5, mxstep=10e4, adaptive="order", p=4, nworkers=None):
'''
(An instantiation of extrapolation_parallel() function with the midpoint
method.)
Solves the system of IVPs dy/dt = func(y, t0, ...) with parallel extrapolation.
**Parameters**
- func: callable(y, t0, ...)
Computes the derivative of y at t0 (i.e. the right hand side of the
IVP). Must output a non-scalar numpy.ndarray
- y0 : numpy.ndarray
Initial condition on y (can be a vector). Must be a non-scalar
numpy.ndarray
- t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
- args : tuple, optional
Extra arguments to pass to function.
- full_output : bool, optional
True if to return a dictionary of optional outputs as the second
output. Defaults to False
**Returns**
- ys : numpy.ndarray, shape (len(t), len(y0))
Array containing the value of y for each desired time in t, with
the initial value y0 in the first row.
- infodict : dict, only returned if full_output == True
Dictionary containing additional output information
KEY MEANING
'fe_seq' cumulative number of sequential derivative evaluations
'fe_tot' cumulative number of total derivative evaluations
'nstp' cumulative number of successful time steps
'h_avg' average step size if adaptive == "order" (None otherwise)
'k_avg' average extrapolation order if adaptive == "order"
... (None otherwise)
**Other Parameters**
- rtol, atol : float, optional
The input parameters rtol and atol determine the error control
performed by the solver. The solver will control the vector,
e = y2 - y1, of estimated local errors in y, according to an
inequality of the form l2-norm of (e / (ewt * len(e))) <= 1,
where ewt is a vector of positive error weights computed as
ewt = atol + max(y1, y2) * rtol. rtol and atol can be either vectors
the same length as y0 or scalars. Both default to 1.0e-8.
- h0 : float, optional
The step size to be attempted on the first step. Defaults to 0.5
- mxstep : int, optional
Maximum number of (internally defined) steps allowed for each
integration point in t. Defaults to 10e4
- adaptive: string, optional
Specifies the strategy of integration. Can take three values:
-- "fixed" = use fixed step size and order strategy.
-- "step" = use adaptive step size but fixed order strategy.
-- "order" = use adaptive step size and adaptive order strategy.
Defaults to "order"
- p: int, optional
The order of extrapolation if adaptive is not "order", and the
starting order otherwise. Defaults to 4
- nworkers: int, optional
The number of workers working in parallel. If nworkers==None, then
the the number of workers is set to the number of CPUs on the the
running machine. Defaults to None.
'''
if len(t) > 2:
seq = lambda t: 4*t - 2 # {2,6,10,14,...} sequence for dense output
else:
seq = lambda t: 2*t # harmonic sequence for midpoint method
method = midpoint_adapt_order if adaptive == "order" else midpoint_fixed_step
return extrapolation_parallel(method, func, y0, t, args=args,
full_output=full_output, rtol=rtol, atol=atol, h0=h0, mxstep=mxstep,
adaptive=adaptive, p=p, seq=seq, nworkers=nworkers)
| mit | 6,856,311,973,943,909,000 | 35.4379 | 90 | 0.476667 | false |
sirpercival/kivy | kivy/uix/textinput.py | 1 | 92223 | # -*- encoding: utf8 -*-
'''
Text Input
==========
.. versionadded:: 1.0.4
.. image:: images/textinput-mono.jpg
.. image:: images/textinput-multi.jpg
The :class:`TextInput` widget provides a box of editable plain text.
Unicode, multiline, cursor navigation, selection and clipboard features
are supported.
.. note::
Two different coordinate systems are used with TextInput:
- (x, y) - coordinates in pixels, mostly used for rendering on screen.
- (row, col) - cursor index in characters / lines, used for selection
and cursor movement.
Usage example
-------------
To create a multiline textinput ('enter' key adds a new line)::
from kivy.uix.textinput import TextInput
textinput = TextInput(text='Hello world')
To create a singleline textinput, set the multiline property to False ('enter'
key will defocus the textinput and emit on_text_validate event)::
def on_enter(instance, value):
print('User pressed enter in', instance)
textinput = TextInput(text='Hello world', multiline=False)
textinput.bind(on_text_validate=on_enter)
The textinput's text is stored on its :attr:`TextInput.text` property. To run a
callback when the text changes::
def on_text(instance, value):
print('The widget', instance, 'have:', value)
textinput = TextInput()
textinput.bind(text=on_text)
You can 'focus' a textinput, meaning that the input box will be highlighted
and keyboard focus will be requested::
textinput = TextInput(focus=True)
The textinput is defocused if the 'escape' key is pressed, or if another
widget requests the keyboard. You can bind a callback to the focus property to
get notified of focus changes::
def on_focus(instance, value):
if value:
print('User focused', instance)
else:
print('User defocused', instance)
textinput = TextInput()
textinput.bind(focus=on_focus)
Selection
---------
The selection is automatically updated when the cursor position changes.
You can get the currently selected text from the
:attr:`TextInput.selection_text` property.
Filtering
---------
You can control which text can be added to the :class:`TextInput` by
overwriting :meth:`TextInput.insert_text`.Every string that is typed, pasted
or inserted by any other means to the :class:`TextInput` is passed through
this function. By overwriting it you can reject or change unwanted characters.
For example, to write only in capitalized characters::
class CapitalInput(TextInput):
def insert_text(self, substring, from_undo=False):
s = substring.upper()
return super(CapitalInput, self).insert_text(s,\
from_undo=from_undo)
Or to only allow floats (0 - 9 and a single period)::
class FloatInput(TextInput):
pat = re.compile('[^0-9]')
def insert_text(self, substring, from_undo=False):
pat = self.pat
if '.' in self.text:
s = re.sub(pat, '', substring)
else:
s = '.'.join([re.sub(pat, '', s) for s in\
substring.split('.', 1)])
return super(FloatInput, self).insert_text(s, from_undo=from_undo)
Default shortcuts
-----------------
=============== ========================================================
Shortcuts Description
--------------- --------------------------------------------------------
Left Move cursor to left
Right Move cursor to right
Up Move cursor to up
Down Move cursor to down
Home Move cursor at the beginning of the line
End Move cursor at the end of the line
PageUp Move cursor to 3 lines before
PageDown Move cursor to 3 lines after
Backspace Delete the selection or character before the cursor
Del Delete the selection of character after the cursor
Shift + <dir> Start a text selection. Dir can be Up, Down, Left, Right
Control + c Copy selection
Control + x Cut selection
Control + p Paste selection
Control + a Select all the content
Control + z undo
Control + r redo
=============== ========================================================
'''
__all__ = ('TextInput', )
import re
import sys
from functools import partial
from os import environ
from weakref import ref
from kivy.animation import Animation
from kivy.base import EventLoop
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.config import Config
from kivy.compat import PY2
from kivy.logger import Logger
from kivy.metrics import inch
from kivy.utils import boundary, platform
from kivy.core.text import Label
from kivy.graphics import Color, Rectangle
from kivy.graphics.texture import Texture
from kivy.uix.widget import Widget
from kivy.uix.bubble import Bubble
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.image import Image
from kivy.properties import StringProperty, NumericProperty, \
BooleanProperty, AliasProperty, \
ListProperty, ObjectProperty, VariableListProperty, OptionProperty
Cache_register = Cache.register
Cache_append = Cache.append
Cache_get = Cache.get
Cache_remove = Cache.remove
Cache_register('textinput.label', timeout=60.)
Cache_register('textinput.width', timeout=60.)
FL_IS_NEWLINE = 0x01
# late binding
Clipboard = None
_platform = platform
# for reloading, we need to keep a list of textinput to retrigger the rendering
_textinput_list = []
# cache the result
_is_osx = sys.platform == 'darwin'
# When we are generating documentation, Config doesn't exist
_is_desktop = False
if Config:
_is_desktop = Config.getboolean('kivy', 'desktop')
# register an observer to clear the textinput cache when OpenGL will reload
if 'KIVY_DOC' not in environ:
def _textinput_clear_cache(*l):
Cache_remove('textinput.label')
Cache_remove('textinput.width')
for wr in _textinput_list[:]:
textinput = wr()
if textinput is None:
_textinput_list.remove(wr)
else:
textinput._trigger_refresh_text()
from kivy.graphics.context import get_context
get_context().add_reload_observer(_textinput_clear_cache, True)
class Selector(ButtonBehavior, Image):
# Internal class for managing the selection Handles.
def on_touch_down(self, touch):
self._touch_diff = self.top - touch.y
return super(Selector, self).on_touch_down(touch)
class TextInputCutCopyPaste(Bubble):
# Internal class used for showing the little bubble popup when
# copy/cut/paste happen.
textinput = ObjectProperty(None)
''' Holds a reference to the TextInput this Bubble belongs to.
'''
but_cut = ObjectProperty(None)
but_copy = ObjectProperty(None)
but_paste = ObjectProperty(None)
but_selectall = ObjectProperty(None)
def __init__(self, **kwargs):
self.mode = 'normal'
super(TextInputCutCopyPaste, self).__init__(**kwargs)
Clock.schedule_interval(self._check_parent, .5)
def on_textinput(self, instance, value):
if value and not Clipboard and _platform == 'android':
value._ensure_clipboard()
def _check_parent(self, dt):
# this is a prevention to get the Bubble staying on the screen, if the
# attached textinput is not on the screen anymore.
parent = self.textinput
while parent is not None:
if parent == parent.parent:
break
parent = parent.parent
if parent is None:
Clock.unschedule(self._check_parent)
if self.textinput:
self.textinput._hide_cut_copy_paste()
def on_parent(self, instance, value):
parent = self.textinput
mode = self.mode
if parent:
self.clear_widgets()
if mode == 'paste':
# show only paste on long touch
self.but_selectall.opacity = 1
widget_list = [self.but_selectall, ]
if not parent.readonly:
widget_list.append(self.but_paste)
elif parent.readonly:
# show only copy for read only text input
widget_list = (self.but_copy, )
else:
# normal mode
widget_list = (self.but_cut, self.but_copy, self.but_paste)
for widget in widget_list:
self.add_widget(widget)
def do(self, action):
textinput = self.textinput
if action == 'cut':
textinput._cut(textinput.selection_text)
elif action == 'copy':
textinput._copy(textinput.selection_text)
elif action == 'paste':
textinput._paste()
elif action == 'selectall':
textinput.select_all()
self.mode = ''
anim = Animation(opacity=0, d=.333)
anim.bind(on_complete=lambda *args:
self.on_parent(self, self.parent))
anim.start(self.but_selectall)
class TextInput(Widget):
'''TextInput class. See module documentation for more information.
:Events:
`on_text_validate`
Fired only in multiline=False mode when the user hits 'enter'.
This will also unfocus the textinput.
`on_double_tap`
Fired when a double tap happens in the text input. The default
behavior selects the text around the cursor position. More info at
:meth:`on_double_tap`.
`on_triple_tap`
Fired when a triple tap happens in the text input. The default
behavior selects the line around the cursor position. More info at
:meth:`on_triple_tap`.
`on_quad_touch`
Fired when four fingers are touching the text input. The default
behavior selects the whole text. More info at
:meth:`on_quad_touch`.
.. versionchanged:: 1.7.0
`on_double_tap`, `on_triple_tap` and `on_quad_touch` events added.
'''
__events__ = ('on_text_validate', 'on_double_tap', 'on_triple_tap',
'on_quad_touch')
def __init__(self, **kwargs):
self._win = None
self._cursor_blink_time = Clock.get_time()
self._cursor = [0, 0]
self._selection = False
self._selection_finished = True
self._selection_touch = None
self.selection_text = u''
self._selection_from = None
self._selection_to = None
self._handle_left = None
self._handle_right = None
self._handle_middle = None
self._bubble = None
self._lines_flags = []
self._lines_labels = []
self._lines_rects = []
self._hint_text_flags = []
self._hint_text_labels = []
self._hint_text_rects = []
self._label_cached = None
self._line_options = None
self._keyboard = None
self._keyboard_mode = Config.get('kivy', 'keyboard_mode')
self._command_mode = False
self._command = ''
self.reset_undo()
self._touch_count = 0
self.interesting_keys = {
8: 'backspace',
13: 'enter',
127: 'del',
271: 'enter',
273: 'cursor_up',
274: 'cursor_down',
275: 'cursor_right',
276: 'cursor_left',
278: 'cursor_home',
279: 'cursor_end',
280: 'cursor_pgup',
281: 'cursor_pgdown',
303: 'shift_L',
304: 'shift_R'}
super(TextInput, self).__init__(**kwargs)
self.bind(font_size=self._trigger_refresh_line_options,
font_name=self._trigger_refresh_line_options)
self.bind(padding=self._update_text_options,
tab_width=self._update_text_options,
font_size=self._update_text_options,
font_name=self._update_text_options,
size=self._update_text_options,
password=self._update_text_options)
self.bind(pos=self._trigger_update_graphics)
self._trigger_position_handles = Clock.create_trigger(
self._position_handles)
self._trigger_show_handles = Clock.create_trigger(
self._show_handles, .05)
self._trigger_refresh_line_options()
self._trigger_refresh_text()
self.bind(pos=self._trigger_position_handles,
size=self._trigger_position_handles)
# when the gl context is reloaded, trigger the text rendering again.
_textinput_list.append(ref(self, TextInput._reload_remove_observer))
def on_disabled(self, instance, value):
if value:
self.focus = False
def on_text_validate(self):
pass
def cursor_index(self, cursor=None):
'''Return the cursor index in the text/value.
'''
if not cursor:
cursor = self.cursor
try:
l = self._lines
if len(l) == 0:
return 0
lf = self._lines_flags
index, cr = cursor
for row in range(cr):
if row >= len(l):
continue
index += len(l[row])
if lf[row] & FL_IS_NEWLINE:
index += 1
if lf[cr] & FL_IS_NEWLINE:
index += 1
return index
except IndexError:
return 0
def cursor_offset(self):
'''Get the cursor x offset on the current line.
'''
offset = 0
row = self.cursor_row
col = self.cursor_col
_lines = self._lines
if col and row < len(_lines):
offset = self._get_text_width(
_lines[row][:col], self.tab_width,
self._label_cached)
return offset
def get_cursor_from_index(self, index):
'''Return the (row, col) of the cursor from text index.
'''
index = boundary(index, 0, len(self.text))
if index <= 0:
return 0, 0
lf = self._lines_flags
l = self._lines
i = 0
for row in range(len(l)):
ni = i + len(l[row])
if lf[row] & FL_IS_NEWLINE:
ni += 1
i += 1
if ni >= index:
return index - i, row
i = ni
return index, row
def select_text(self, start, end):
''' Select a portion of text displayed in this TextInput.
.. versionadded:: 1.4.0
:Parameters:
`start`
Index of textinput.text from where to start selection
`end`
Index of textinput.text till which the selection should be
displayed
'''
if end < start:
raise Exception('end must be superior to start')
m = len(self.text)
self._selection_from = boundary(start, 0, m)
self._selection_to = boundary(end, 0, m)
self._selection_finished = True
self._update_selection(True)
self._update_graphics_selection()
def select_all(self):
''' Select all of the text displayed in this TextInput.
.. versionadded:: 1.4.0
'''
self.select_text(0, len(self.text))
re_indent = re.compile('^(\s*|)')
def _auto_indent(self, substring):
index = self.cursor_index()
_text = self._get_text(encode=False)
if index > 0:
line_start = _text.rfind('\n', 0, index)
if line_start > -1:
line = _text[line_start + 1:index]
indent = self.re_indent.match(line).group()
substring += indent
return substring
def insert_text(self, substring, from_undo=False):
'''Insert new text at the current cursor position. Override this
function in order to pre-process text for input validation.
'''
if self.readonly or not substring:
return
self._hide_handles(self._win)
# check for command modes
if ord(substring[0]) == 1:
self._command_mode = True
self._command = ''
if ord(substring[0]) == 2:
self._command_mode = False
self._command = self._command[1:]
if self._command_mode:
self._command += substring
return
_command = self._command
if _command and ord(substring[0]) == 2:
from_undo = True
_command, data = _command.split(':')
self._command = ''
if _command == 'DEL':
count = int(data)
end = self.cursor_index()
self._selection_from = max(end - count, 0)
self._selection_to = end
self._selection = True
self.delete_selection(from_undo=True)
return
elif _command == 'INSERT':
substring = data
elif _command == 'INSERTN':
from_undo = False
substring = data
if not from_undo and self.multiline and self.auto_indent \
and substring == u'\n':
substring = self._auto_indent(substring)
cc, cr = self.cursor
sci = self.cursor_index
ci = sci()
text = self._lines[cr]
len_str = len(substring)
new_text = text[:cc] + substring + text[cc:]
self._set_line_text(cr, new_text)
wrap = (self._get_text_width(
new_text,
self.tab_width,
self._label_cached) > self.width)
if len_str > 1 or substring == u'\n' or wrap:
# Avoid refreshing text on every keystroke.
# Allows for faster typing of text when the amount of text in
# TextInput gets large.
start, finish, lines,\
lineflags, len_lines = self._get_line_from_cursor(cr, new_text)
# calling trigger here could lead to wrong cursor positioning
# and repeating of text when keys are added rapidly in a automated
# fashion. From Android Keyboard for example.
self._refresh_text_from_property('insert', start, finish, lines,
lineflags, len_lines)
self.cursor = self.get_cursor_from_index(ci + len_str)
# handle undo and redo
self._set_unredo_insert(ci, ci + len_str, substring, from_undo)
def _get_line_from_cursor(self, start, new_text):
# get current paragraph from cursor position
finish = start
lines = self._lines
linesflags = self._lines_flags
if start and not linesflags[start]:
start -= 1
new_text = u''.join((lines[start], new_text))
try:
while not linesflags[finish + 1]:
new_text = u''.join((new_text, lines[finish + 1]))
finish += 1
except IndexError:
pass
lines, lineflags = self._split_smart(new_text)
len_lines = max(1, len(lines))
return start, finish, lines, lineflags, len_lines
def _set_unredo_insert(self, ci, sci, substring, from_undo):
# handle undo and redo
if from_undo:
return
self._undo.append({'undo_command': ('insert', ci, sci),
'redo_command': (ci, substring)})
# reset redo when undo is appended to
self._redo = []
def reset_undo(self):
'''Reset undo and redo lists from memory.
.. versionadded:: 1.3.0
'''
self._redo = self._undo = []
def do_redo(self):
'''Do redo operation.
.. versionadded:: 1.3.0
This action re-does any command that has been un-done by
do_undo/ctrl+z. This function is automatically called when
`ctrl+r` keys are pressed.
'''
try:
x_item = self._redo.pop()
undo_type = x_item['undo_command'][0]
_get_cusror_from_index = self.get_cursor_from_index
if undo_type == 'insert':
ci, substring = x_item['redo_command']
self.cursor = _get_cusror_from_index(ci)
self.insert_text(substring, True)
elif undo_type == 'bkspc':
self.cursor = _get_cusror_from_index(x_item['redo_command'])
self.do_backspace(from_undo=True)
else:
# delsel
ci, sci = x_item['redo_command']
self._selection_from = ci
self._selection_to = sci
self._selection = True
self.delete_selection(True)
self.cursor = _get_cusror_from_index(ci)
self._undo.append(x_item)
except IndexError:
# reached at top of undo list
pass
def do_undo(self):
'''Do undo operation.
.. versionadded:: 1.3.0
This action un-does any edits that have been made since the last
call to reset_undo().
This function is automatically called when `ctrl+z` keys are pressed.
'''
try:
x_item = self._undo.pop()
undo_type = x_item['undo_command'][0]
self.cursor = self.get_cursor_from_index(x_item['undo_command'][1])
if undo_type == 'insert':
ci, sci = x_item['undo_command'][1:]
self._selection_from = ci
self._selection_to = sci
self._selection = True
self.delete_selection(True)
elif undo_type == 'bkspc':
substring = x_item['undo_command'][2:][0]
self.insert_text(substring, True)
else:
# delsel
substring = x_item['undo_command'][2:][0]
self.insert_text(substring, True)
self._redo.append(x_item)
except IndexError:
# reached at top of undo list
pass
def do_backspace(self, from_undo=False, mode='bkspc'):
'''Do backspace operation from the current cursor position.
This action might do several things:
- removing the current selection if available.
- removing the previous char and move the cursor back.
- do nothing, if we are at the start.
'''
if self.readonly:
return
cc, cr = self.cursor
_lines = self._lines
text = _lines[cr]
cursor_index = self.cursor_index()
text_last_line = _lines[cr - 1]
if cc == 0 and cr == 0:
return
_lines_flags = self._lines_flags
start = cr
if cc == 0:
substring = u'\n' if _lines_flags[cr] else u' '
new_text = text_last_line + text
self._set_line_text(cr - 1, new_text)
self._delete_line(cr)
start = cr - 1
else:
#ch = text[cc-1]
substring = text[cc - 1]
new_text = text[:cc - 1] + text[cc:]
self._set_line_text(cr, new_text)
# refresh just the current line instead of the whole text
start, finish, lines, lineflags, len_lines =\
self._get_line_from_cursor(start, new_text)
# avoid trigger refresh, leads to issue with
# keys/text send rapidly through code.
self._refresh_text_from_property('del', start, finish, lines,
lineflags, len_lines)
self.cursor = self.get_cursor_from_index(cursor_index - 1)
# handle undo and redo
self._set_undo_redo_bkspc(
cursor_index,
cursor_index - 1,
substring, from_undo)
def _set_undo_redo_bkspc(self, ol_index, new_index, substring, from_undo):
# handle undo and redo for backspace
if from_undo:
return
self._undo.append({
'undo_command': ('bkspc', new_index, substring),
'redo_command': ol_index})
#reset redo when undo is appended to
self._redo = []
def do_cursor_movement(self, action):
'''Move the cursor relative to it's current position.
Action can be one of :
- cursor_left: move the cursor to the left
- cursor_right: move the cursor to the right
- cursor_up: move the cursor on the previous line
- cursor_down: move the cursor on the next line
- cursor_home: move the cursor at the start of the current line
- cursor_end: move the cursor at the end of current line
- cursor_pgup: move one "page" before
- cursor_pgdown: move one "page" after
.. warning::
Current page has three lines before/after.
'''
pgmove_speed = 3
col, row = self.cursor
if action == 'cursor_up':
row = max(row - 1, 0)
col = min(len(self._lines[row]), col)
elif action == 'cursor_down':
row = min(row + 1, len(self._lines) - 1)
col = min(len(self._lines[row]), col)
elif action == 'cursor_left':
if col == 0:
if row:
row -= 1
col = len(self._lines[row])
else:
col, row = col - 1, row
elif action == 'cursor_right':
if col == len(self._lines[row]):
if row < len(self._lines) - 1:
col = 0
row += 1
else:
col, row = col + 1, row
elif action == 'cursor_home':
col = 0
elif action == 'cursor_end':
col = len(self._lines[row])
elif action == 'cursor_pgup':
row /= pgmove_speed
col = min(len(self._lines[row]), col)
elif action == 'cursor_pgdown':
row = min((row + 1) * pgmove_speed,
len(self._lines) - 1)
col = min(len(self._lines[row]), col)
self.cursor = (col, row)
def get_cursor_from_xy(self, x, y):
'''Return the (row, col) of the cursor from an (x, y) position.
'''
padding_left = self.padding[0]
padding_top = self.padding[1]
l = self._lines
dy = self.line_height + self.line_spacing
cx = x - self.x
scrl_y = self.scroll_y
scrl_x = self.scroll_x
scrl_y = scrl_y / dy if scrl_y > 0 else 0
cy = (self.top - padding_top + scrl_y * dy) - y
cy = int(boundary(round(cy / dy - 0.5), 0, len(l) - 1))
dcx = 0
_get_text_width = self._get_text_width
_tab_width = self.tab_width
_label_cached = self._label_cached
for i in range(1, len(l[cy]) + 1):
if _get_text_width(l[cy][:i],
_tab_width,
_label_cached) + padding_left >= cx + scrl_x:
break
dcx = i
cx = dcx
return cx, cy
#
# Selection control
#
def cancel_selection(self):
'''Cancel current selection (if any).
'''
self._selection_from = self._selection_to = self.cursor_index()
self._selection = False
self._selection_finished = True
self._selection_touch = None
self._trigger_update_graphics()
def delete_selection(self, from_undo=False):
'''Delete the current text selection (if any).
'''
if self.readonly:
return
self._hide_handles(self._win)
scrl_x = self.scroll_x
scrl_y = self.scroll_y
cc, cr = self.cursor
if not self._selection:
return
v = self._get_text(encode=False)
a, b = self._selection_from, self._selection_to
if a > b:
a, b = b, a
self.cursor = cursor = self.get_cursor_from_index(a)
start = cursor
finish = self.get_cursor_from_index(b)
cur_line = self._lines[start[1]][:start[0]] +\
self._lines[finish[1]][finish[0]:]
lines, lineflags = self._split_smart(cur_line)
len_lines = len(lines)
if start[1] == finish[1]:
self._set_line_text(start[1], cur_line)
else:
self._refresh_text_from_property('del', start[1], finish[1], lines,
lineflags, len_lines)
self.scroll_x = scrl_x
self.scroll_y = scrl_y
# handle undo and redo for delete selecttion
self._set_unredo_delsel(a, b, v[a:b], from_undo)
self.cancel_selection()
def _set_unredo_delsel(self, a, b, substring, from_undo):
# handle undo and redo for backspace
if from_undo:
return
self._undo.append({
'undo_command': ('delsel', a, substring),
'redo_command': (a, b)})
# reset redo when undo is appended to
self._redo = []
def _update_selection(self, finished=False):
'''Update selection text and order of from/to if finished is True.
Can be called multiple times until finished is True.
'''
a, b = self._selection_from, self._selection_to
if a > b:
a, b = b, a
self._selection_finished = finished
_selection_text = self._get_text(encode=False)[a:b]
self.selection_text = ("" if not self.allow_copy else
(('*' * (b - a)) if self.password else
_selection_text))
if not finished:
self._selection = True
else:
self._selection = bool(len(_selection_text))
self._selection_touch = None
if a == 0:
# update graphics only on new line
# allows smoother scrolling, noticeably
# faster when dealing with large text.
self._update_graphics_selection()
#self._trigger_update_graphics()
#
# Touch control
#
def long_touch(self, dt):
if self._selection_to == self._selection_from:
self._show_cut_copy_paste(self._long_touch_pos,
self._win,
mode='paste')
def on_double_tap(self):
'''This event is dispatched when a double tap happens
inside TextInput. The default behavior is to select the
word around the current cursor position. Override this to provide
different behavior. Alternatively, you can bind to this
event to provide additional functionality.
'''
ci = self.cursor_index()
cc = self.cursor_col
line = self._lines[self.cursor_row]
len_line = len(line)
start = max(0, len(line[:cc]) - line[:cc].rfind(u' ') - 1)
end = line[cc:].find(u' ')
end = end if end > - 1 else (len_line - cc)
Clock.schedule_once(lambda dt: self.select_text(ci - start, ci + end))
def on_triple_tap(self):
'''This event is dispatched when a triple tap happens
inside TextInput. The default behavior is to select the
line around current cursor position. Override this to provide
different behavior. Alternatively, you can bind to this
event to provide additional functionality.
'''
ci = self.cursor_index()
cc = self.cursor_col
line = self._lines[self.cursor_row]
len_line = len(line)
Clock.schedule_once(lambda dt:
self.select_text(ci - cc, ci + (len_line - cc)))
def on_quad_touch(self):
'''This event is dispatched when four fingers are touching
inside TextInput. The default behavior is to select all text.
Override this to provide different behavior. Alternatively,
you can bind to this event to provide additional functionality.
'''
Clock.schedule_once(lambda dt: self.select_all())
def on_touch_down(self, touch):
if self.disabled:
return
touch_pos = touch.pos
if not self.collide_point(*touch_pos):
if self._keyboard_mode == 'multi':
if self.readonly:
self.focus = False
else:
self.focus = False
return False
if not self.focus:
self.focus = True
touch.grab(self)
self._touch_count += 1
if touch.is_double_tap:
self.dispatch('on_double_tap')
if touch.is_triple_tap:
self.dispatch('on_triple_tap')
if self._touch_count == 4:
self.dispatch('on_quad_touch')
win = self._win
if not win:
self._win = win = EventLoop.window
if not win:
Logger.warning('Textinput: '
'Cannot show bubble, unable to get root window')
return True
self._hide_cut_copy_paste(self._win)
# schedule long touch for paste
self._long_touch_pos = touch.pos
Clock.schedule_once(self.long_touch, .5)
self.cursor = self.get_cursor_from_xy(*touch_pos)
if not self._selection_touch:
self.cancel_selection()
self._selection_touch = touch
self._selection_from = self._selection_to = self.cursor_index()
self._update_selection()
return False
def on_touch_move(self, touch):
if touch.grab_current is not self:
return
if not self.focus:
touch.ungrab(self)
if self._selection_touch is touch:
self._selection_touch = None
return False
if self._selection_touch is touch:
self.cursor = self.get_cursor_from_xy(touch.x, touch.y)
self._selection_to = self.cursor_index()
self._update_selection()
return True
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
self._touch_count -= 1
# schedule long touch for paste
Clock.unschedule(self.long_touch)
if not self.focus:
return False
if self._selection_touch is touch:
self._selection_to = self.cursor_index()
self._update_selection(True)
# show Bubble
win = self._win
if self._selection_to != self._selection_from:
self._show_cut_copy_paste(touch.pos, win)
elif self.use_handles:
self._hide_handles()
handle_middle = self._handle_middle
if handle_middle is None:
self._handle_middle = handle_middle = Selector(
source=self.handle_image_middle,
size_hint=(None, None),
size=('45dp', '45dp'))
handle_middle.bind(on_press=self._handle_pressed,
on_touch_move=self._handle_move,
on_release=self._handle_released)
if not self._handle_middle.parent and self.text:
self._win.add_widget(handle_middle)
self._position_handles(mode='middle')
return True
def _handle_pressed(self, instance):
self._hide_cut_copy_paste()
sf, st = self._selection_from, self.selection_to
if sf > st:
self._selection_from , self._selection_to = st, sf
def _handle_released(self, instance):
sf, st = self._selection_from, self.selection_to
if sf == st:
return
self._update_selection()
self._show_cut_copy_paste(
(instance.x + ((1 if instance is self._handle_left else - 1)
* self._bubble.width / 2) if self._bubble else 0,
instance.y + self.line_height), self._win)
def _handle_move(self, instance, touch):
if touch.grab_current != instance:
return
get_cursor = self.get_cursor_from_xy
handle_right = self._handle_right
handle_left = self._handle_left
handle_middle = self._handle_middle
x, y = self.to_widget(*touch.pos)
cursor = get_cursor(
x,
y + instance._touch_diff + (self.line_height / 2))
if instance != touch.grab_current:
return
if instance == handle_middle:
self.cursor = cursor
self._position_handles(mode='middle')
return
ci = self.cursor_index(cursor=cursor)
sf, st = self._selection_from, self.selection_to
if instance == handle_left:
self._selection_from = ci
elif instance == handle_right:
self._selection_to = ci
self._trigger_update_graphics()
self._trigger_position_handles()
def _position_handles(self, *args, **kwargs):
if not self.text:
return
mode = kwargs.get('mode', 'both')
lh = self.line_height
to_win = self.to_window
handle_middle = self._handle_middle
if handle_middle:
hp_mid = self.cursor_pos
pos = to_win(*hp_mid)
handle_middle.x = pos[0] - handle_middle.width / 2
handle_middle.top = pos[1] - lh
if mode[0] == 'm':
return
group = self.canvas.get_group('selection')
if not group:
return
self._win.remove_widget(self._handle_middle)
handle_left = self._handle_left
if not handle_left:
return
hp_left = group[2].pos
handle_left.pos = to_win(*hp_left)
handle_left.x -= handle_left.width
handle_left.y -= handle_left.height
handle_right = self._handle_right
last_rect = group[-1]
hp_right = last_rect.pos[0], last_rect.pos[1]
x, y = to_win(*hp_right)
handle_right.x = x + last_rect.size[0]
handle_right.y = y - handle_right.height
def _hide_handles(self, win=None):
win = win or self._win
if win is None:
return
self._win.remove_widget(self._handle_right)
self._win.remove_widget(self._handle_left)
self._win.remove_widget(self._handle_middle)
def _hide_cut_copy_paste(self, win=None):
win = win or self._win
if win is None:
return
bubble = self._bubble
if bubble is not None:
anim = Animation(opacity=0, d=.225)
anim.bind(on_complete=lambda *args: win.remove_widget(bubble))
anim.start(bubble)
def _show_handles(self, dt):
if not self.use_handles or not self.text:
return
win = self._win
if not win:
self._set_window()
win = self._win
handle_right = self._handle_right
handle_left = self._handle_left
if self._handle_left is None:
self._handle_left = handle_left = Selector(
source=self.handle_image_left,
size_hint=(None, None),
size=('45dp', '45dp'))
handle_left.bind(on_press=self._handle_pressed,
on_touch_move=self._handle_move,
on_release=self._handle_released)
self._handle_right = handle_right = Selector(
source=self.handle_image_right,
size_hint=(None, None),
size=('45dp', '45dp'))
handle_right.bind(on_press=self._handle_pressed,
on_touch_move=self._handle_move,
on_release=self._handle_released)
else:
if self._handle_left.parent:
self._position_handles()
return
if not self.parent:
return
self._trigger_position_handles()
if self.selection_from != self.selection_to:
self._handle_left.opacity = self._handle_right.opacity = 0
win.add_widget(self._handle_left)
win.add_widget(self._handle_right)
anim = Animation(opacity=1, d=.4)
anim.start(self._handle_right)
anim.start(self._handle_left)
def _show_cut_copy_paste(self, pos, win, parent_changed=False,
mode='', *l):
# Show a bubble with cut copy and paste buttons
if not self.use_bubble:
return
bubble = self._bubble
if bubble is None:
self._bubble = bubble = TextInputCutCopyPaste(textinput=self)
self.bind(parent=partial(self._show_cut_copy_paste,
pos, win, True))
else:
win.remove_widget(bubble)
if not self.parent:
return
if parent_changed:
return
# Search the position from the touch to the window
lh, ls = self.line_height, self.line_spacing
x, y = pos
t_pos = self.to_window(x, y)
bubble_size = bubble.size
win_size = win.size
bubble.pos = (t_pos[0] - bubble_size[0] / 2., t_pos[1] + inch(.25))
bubble_pos = bubble.pos
if bubble_pos[0] < 0:
# bubble beyond left of window
if bubble.pos[1] > (win_size[1] - bubble_size[1]):
# bubble above window height
bubble.pos = (0, (t_pos[1]) - (bubble_size[1] + lh + ls))
bubble.arrow_pos = 'top_left'
else:
bubble.pos = (0, bubble_pos[1])
bubble.arrow_pos = 'bottom_left'
elif bubble.right > win_size[0]:
# bubble beyond right of window
if bubble_pos[1] > (win_size[1] - bubble_size[1]):
# bubble above window height
bubble.pos = (win_size[0] - bubble_size[0],
(t_pos[1]) - (bubble_size[1] + lh + ls))
bubble.arrow_pos = 'top_right'
else:
bubble.right = win_size[0]
bubble.arrow_pos = 'bottom_right'
else:
if bubble_pos[1] > (win_size[1] - bubble_size[1]):
# bubble above window height
bubble.pos = (bubble_pos[0],
(t_pos[1]) - (bubble_size[1] + lh + ls))
bubble.arrow_pos = 'top_mid'
else:
bubble.arrow_pos = 'bottom_mid'
bubble.mode = mode
Animation.cancel_all(bubble)
bubble.opacity = 0
win.add_widget(bubble)
Animation(opacity=1, d=.225).start(bubble)
#
# Private
#
@staticmethod
def _reload_remove_observer(wr):
# called when the textinput is deleted
if wr in _textinput_list:
_textinput_list.remove(wr)
def _set_window(self, *largs):
win = self._win
if not win:
self._win = win = EventLoop.window
if not win:
# we got argument, it could be the previous schedule
# cancel focus.
if len(largs):
Logger.warning('Textinput: '
'Cannot focus the element, unable to get '
'root window')
return
else:
#XXX where do `value` comes from?
Clock.schedule_once(partial(self.on_focus, self, largs), 0)
return
def on_focus(self, instance, value, *largs):
self._set_window(*largs)
if value:
if self.keyboard_mode != 'managed':
self._bind_keyboard()
else:
if self.keyboard_mode != 'managed':
self._unbind_keyboard()
def _unbind_keyboard(self):
self._set_window()
win = self._win
if self._keyboard:
keyboard = self._keyboard
keyboard.unbind(
on_key_down=self._keyboard_on_key_down,
on_key_up=self._keyboard_on_key_up)
keyboard.release()
self._keyboard = None
self.cancel_selection()
Clock.unschedule(self._do_blink_cursor)
self._hide_cut_copy_paste(win)
self._hide_handles(win)
self._win = None
def _bind_keyboard(self):
self._set_window()
win = self._win
self._editable = editable = (not (self.readonly or self.disabled) or
_is_desktop and
self._keyboard_mode == 'system')
if not _is_desktop and not editable:
return
keyboard = win.request_keyboard(
self._keyboard_released, self, input_type=self.input_type)
self._keyboard = keyboard
if editable:
keyboard.bind(
on_key_down=self._keyboard_on_key_down,
on_key_up=self._keyboard_on_key_up)
Clock.schedule_interval(self._do_blink_cursor, 1 / 2.)
else:
# in non-editable mode, we still want shortcut (as copy)
keyboard.bind(
on_key_down=self._keyboard_on_key_down)
def on_readonly(self, instance, value):
if not value:
self.focus = False
def _ensure_clipboard(self):
global Clipboard
if hasattr(self, '_clip_mime_type'):
return
if Clipboard is None:
from kivy.core.clipboard import Clipboard # NOQA
if _platform == 'win':
self._clip_mime_type = 'text/plain;charset=utf-8'
# windows clipboard uses a utf-16 encoding
self._encoding = 'utf-16'
elif _platform == 'linux':
self._clip_mime_type = 'UTF8_STRING'
self._encoding = 'utf-8'
else:
self._clip_mime_type = 'text/plain'
self._encoding = 'utf-8'
def cut(self):
''' Copy current selection to clipboard then delete it from TextInput.
.. versionadded:: 1.8.0
'''
self._cut(self.selection_text)
def _cut(self, data):
self._copy(data)
self.delete_selection()
def copy(self, data=''):
''' Copy the value provided in argument `data` into current clipboard.
If data is not of type string it will be converted to string.
If no data is provided then current selection if present is copied.
.. versionadded:: 1.8.0
'''
if data:
self._copy(data)
return
if self.selection_text:
self._copy(self.selection_text)
def _copy(self, data):
# explicitly terminate strings with a null character
# so as to avoid putting spurious data after the end.
# MS windows issue.
self._ensure_clipboard()
data = data.encode(self._encoding) + b'\x00'
Clipboard.put(data, self._clip_mime_type)
def paste(self):
''' Insert text from system :class:`~kivy.core.clipboard.Clipboard`
into the :class:`~kivy.uix.textinput.TextInput` at current cursor
position.
.. versionadded:: 1.8.0
'''
self._paste()
def _paste(self):
self._ensure_clipboard()
_clip_types = Clipboard.get_types()
mime_type = self._clip_mime_type
if mime_type not in _clip_types:
mime_type = 'text/plain'
data = Clipboard.get(mime_type)
if data is not None:
# decode only if we don't have unicode
# we would still need to decode from utf-16 (windows)
# data is of type bytes in PY3
data = data.decode(self._encoding, 'ignore')
# remove null strings mostly a windows issue
data = data.replace(u'\x00', u'')
self.delete_selection()
self.insert_text(data)
data = None
def _keyboard_released(self):
# Callback called when the real keyboard is taken by someone else
# called by the window if the keyboard is taken by somebody else
# FIXME: handle virtual keyboard.
self.focus = False
def _get_text_width(self, text, tab_width, _label_cached):
# Return the width of a text, according to the current line options
kw = self._get_line_options()
try:
cid = u'{}\0{}'.format(text, kw)
except UnicodeDecodeError:
cid = '{}\0{}'.format(text, kw)
width = Cache_get('textinput.width', cid)
if width:
return width
if not _label_cached:
_label_cached = self._label_cached
text = text.replace('\t', ' ' * tab_width)
if not self.password:
width = _label_cached.get_extents(text)[0]
else:
width = _label_cached.get_extents('*' * len(text))[0]
Cache_append('textinput.width', cid, width)
return width
def _do_blink_cursor(self, dt):
# Callback called by the timer to blink the cursor, according to the
# last activity in the widget
b = (Clock.get_time() - self._cursor_blink_time)
self.cursor_blink = int(b * 2) % 2
def on_cursor(self, instance, value):
# When the cursor is moved, reset the activity timer, and update all
# the graphics.
self._cursor_blink_time = Clock.get_time()
self._trigger_update_graphics()
def _delete_line(self, idx):
# Delete current line, and fix cursor position
assert(idx < len(self._lines))
self._lines_flags.pop(idx)
self._lines_labels.pop(idx)
self._lines.pop(idx)
self.cursor = self.cursor
def _set_line_text(self, line_num, text):
# Set current line with other text than the default one.
self._lines_labels[line_num] = self._create_line_label(text)
self._lines[line_num] = text
def _trigger_refresh_line_options(self, *largs):
Clock.unschedule(self._refresh_line_options)
Clock.schedule_once(self._refresh_line_options, 0)
def _refresh_line_options(self, *largs):
self._line_options = None
self._get_line_options()
self._refresh_text_from_property()
self._refresh_hint_text()
self.cursor = self.get_cursor_from_index(len(self.text))
def _trigger_refresh_text(self, *largs):
if len(largs) and largs[0] == self:
largs = ()
Clock.unschedule(lambda dt: self._refresh_text_from_property(*largs))
Clock.schedule_once(lambda dt:
self._refresh_text_from_property(*largs))
def _update_text_options(self, *largs):
Cache_remove('textinput.width')
self._trigger_refresh_text()
def _refresh_text_from_trigger(self, dt, *largs):
self._refresh_text_from_property(*largs)
def _refresh_text_from_property(self, *largs):
self._refresh_text(self._get_text(encode=False), *largs)
def _refresh_text(self, text, *largs):
# Refresh all the lines from a new text.
# By using cache in internal functions, this method should be fast.
mode = 'all'
if len(largs) > 1:
mode, start, finish, _lines, _lines_flags, len_lines = largs
#start = max(0, start)
else:
_lines, self._lines_flags = self._split_smart(text)
_lines_labels = []
_line_rects = []
_create_label = self._create_line_label
for x in _lines:
lbl = _create_label(x)
_lines_labels.append(lbl)
_line_rects.append(
Rectangle(size=(lbl.size if lbl else (0, 0))))
lbl = None
if mode == 'all':
self._lines_labels = _lines_labels
self._lines_rects = _line_rects
self._lines = _lines
elif mode == 'del':
if finish > start:
self._insert_lines(start,
finish if start == finish else (finish + 1),
len_lines, _lines_flags,
_lines, _lines_labels, _line_rects)
elif mode == 'insert':
self._insert_lines(
start,
finish if (start == finish and not len_lines)
else (finish + 1),
len_lines, _lines_flags, _lines, _lines_labels,
_line_rects)
line_label = _lines_labels[0]
min_line_ht = self._label_cached.get_extents('_')[1]
if line_label is None:
self.line_height = max(1, min_line_ht)
else:
# with markup texture can be of height `1`
self.line_height = max(line_label.height, min_line_ht)
#self.line_spacing = 2
# now, if the text change, maybe the cursor is not at the same place as
# before. so, try to set the cursor on the good place
row = self.cursor_row
self.cursor = self.get_cursor_from_index(self.cursor_index())
# if we back to a new line, reset the scroll, otherwise, the effect is
# ugly
if self.cursor_row != row:
self.scroll_x = 0
# with the new text don't forget to update graphics again
self._trigger_update_graphics()
def _insert_lines(self, start, finish, len_lines, _lines_flags,
_lines, _lines_labels, _line_rects):
self_lines_flags = self._lines_flags
_lins_flags = []
_lins_flags.extend(self_lines_flags[:start])
if len_lines:
# if not inserting at first line then
if start:
# make sure line flags restored for first line
# _split_smart assumes first line to be not a new line
_lines_flags[0] = self_lines_flags[start]
_lins_flags.extend(_lines_flags)
_lins_flags.extend(self_lines_flags[finish:])
self._lines_flags = _lins_flags
_lins_lbls = []
_lins_lbls.extend(self._lines_labels[:start])
if len_lines:
_lins_lbls.extend(_lines_labels)
_lins_lbls.extend(self._lines_labels[finish:])
self._lines_labels = _lins_lbls
_lins_rcts = []
_lins_rcts.extend(self._lines_rects[:start])
if len_lines:
_lins_rcts.extend(_line_rects)
_lins_rcts.extend(self._lines_rects[finish:])
self._lines_rects = _lins_rcts
_lins = []
_lins.extend(self._lines[:start])
if len_lines:
_lins.extend(_lines)
_lins.extend(self._lines[finish:])
self._lines = _lins
def _trigger_update_graphics(self, *largs):
Clock.unschedule(self._update_graphics)
Clock.schedule_once(self._update_graphics, -1)
def _update_graphics(self, *largs):
# Update all the graphics according to the current internal values.
#
# This is a little bit complex, cause we have to :
# - handle scroll_x
# - handle padding
# - create rectangle for the lines matching the viewport
# - crop the texture coordinates to match the viewport
#
# This is the first step of graphics, the second is the selection.
self.canvas.clear()
add = self.canvas.add
lh = self.line_height
dy = lh + self.line_spacing
# adjust view if the cursor is going outside the bounds
sx = self.scroll_x
sy = self.scroll_y
# draw labels
if not self.focus and (not self._lines or (
not self._lines[0] and len(self._lines) == 1)):
rects = self._hint_text_rects
labels = self._hint_text_labels
lines = self._hint_text_lines
else:
rects = self._lines_rects
labels = self._lines_labels
lines = self._lines
padding_left, padding_top, padding_right, padding_bottom = self.padding
x = self.x + padding_left
y = self.top - padding_top + sy
miny = self.y + padding_bottom
maxy = self.top - padding_top
for line_num, value in enumerate(lines):
if miny <= y <= maxy + dy:
texture = labels[line_num]
if not texture:
y -= dy
continue
size = list(texture.size)
texc = texture.tex_coords[:]
# calcul coordinate
viewport_pos = sx, 0
vw = self.width - padding_left - padding_right
vh = self.height - padding_top - padding_bottom
tw, th = list(map(float, size))
oh, ow = tch, tcw = texc[1:3]
tcx, tcy = 0, 0
# adjust size/texcoord according to viewport
if vw < tw:
tcw = (vw / tw) * tcw
size[0] = vw
if vh < th:
tch = (vh / th) * tch
size[1] = vh
if viewport_pos:
tcx, tcy = viewport_pos
tcx = tcx / tw * (ow)
tcy = tcy / th * oh
# cropping
mlh = lh
if y > maxy:
vh = (maxy - y + lh)
tch = (vh / float(lh)) * oh
tcy = oh - tch
size[1] = vh
if y - lh < miny:
diff = miny - (y - lh)
y += diff
vh = lh - diff
tch = (vh / float(lh)) * oh
size[1] = vh
texc = (
tcx,
tcy + tch,
tcx + tcw,
tcy + tch,
tcx + tcw,
tcy,
tcx,
tcy)
# add rectangle.
r = rects[line_num]
r.pos = int(x), int(y - mlh)
r.size = size
r.texture = texture
r.tex_coords = texc
add(r)
y -= dy
self._update_graphics_selection()
def _update_graphics_selection(self):
if not self._selection:
return
self.canvas.remove_group('selection')
dy = self.line_height + self.line_spacing
rects = self._lines_rects
padding_top = self.padding[1]
padding_bottom = self.padding[3]
_top = self.top
y = _top - padding_top + self.scroll_y
miny = self.y + padding_bottom
maxy = _top - padding_top
draw_selection = self._draw_selection
a, b = self._selection_from, self._selection_to
if a > b:
a, b = b, a
get_cursor_from_index = self.get_cursor_from_index
s1c, s1r = get_cursor_from_index(a)
s2c, s2r = get_cursor_from_index(b)
s2r += 1
# pass only the selection lines[]
# passing all the lines can get slow when dealing with a lot of text
y -= s1r * dy
_lines = self._lines
_get_text_width = self._get_text_width
tab_width = self.tab_width
_label_cached = self._label_cached
width = self.width
padding_left = self.padding[0]
padding_right = self.padding[2]
x = self.x
canvas_add = self.canvas.add
selection_color = self.selection_color
for line_num, value in enumerate(_lines[s1r:s2r], start=s1r):
if miny <= y <= maxy + dy:
r = rects[line_num]
draw_selection(r.pos, r.size, line_num, (s1c, s1r),
(s2c, s2r - 1), _lines, _get_text_width,
tab_width, _label_cached, width,
padding_left, padding_right, x,
canvas_add, selection_color)
y -= dy
self._position_handles('both')
def _draw_selection(self, *largs):
pos, size, line_num, (s1c, s1r), (s2c, s2r),\
_lines, _get_text_width, tab_width, _label_cached, width,\
padding_left, padding_right, x, canvas_add, selection_color = largs
# Draw the current selection on the widget.
if line_num < s1r or line_num > s2r:
return
x, y = pos
w, h = size
x1 = x
x2 = x + w
if line_num == s1r:
lines = _lines[line_num]
x1 -= self.scroll_x
x1 += _get_text_width(lines[:s1c], tab_width, _label_cached)
if line_num == s2r:
lines = _lines[line_num]
x2 = (x - self.scroll_x) + _get_text_width(lines[:s2c],
tab_width,
_label_cached)
width_minus_padding = width - (padding_right + padding_left)
maxx = x + width_minus_padding
if x1 > maxx:
return
x1 = max(x1, x)
x2 = min(x2, x + width_minus_padding)
canvas_add(Color(*selection_color, group='selection'))
canvas_add(Rectangle(
pos=(x1, pos[1]), size=(x2 - x1, size[1]), group='selection'))
def on_size(self, instance, value):
# if the size change, we might do invalid scrolling / text split
# size the text maybe be put after size_hint have been resolved.
self._trigger_refresh_text()
self._refresh_hint_text()
self.scroll_x = self.scroll_y = 0
def _get_cursor_pos(self):
# return the current cursor x/y from the row/col
dy = self.line_height + self.line_spacing
padding_left = self.padding[0]
padding_top = self.padding[1]
left = self.x + padding_left
top = self.top - padding_top
y = top + self.scroll_y
y -= self.cursor_row * dy
x, y = left + self.cursor_offset() - self.scroll_x, y
if x < left:
self.scroll_x = 0
x = left
if y > top:
y = top
self.scroll_y = 0
return x, y
def _get_line_options(self):
# Get or create line options, to be used for Label creation
if self._line_options is None:
self._line_options = kw = {
'font_size': self.font_size,
'font_name': self.font_name,
'anchor_x': 'left',
'anchor_y': 'top',
'padding_x': 0,
'padding_y': 0,
'padding': (0, 0)}
self._label_cached = Label(**kw)
return self._line_options
def _create_line_label(self, text, hint=False):
# Create a label from a text, using line options
ntext = text.replace(u'\n', u'').replace(u'\t', u' ' * self.tab_width)
if self.password and not hint: # Don't replace hint_text with *
ntext = u'*' * len(ntext)
kw = self._get_line_options()
cid = '%s\0%s' % (ntext, str(kw))
texture = Cache_get('textinput.label', cid)
if not texture:
# FIXME right now, we can't render very long line...
# if we move on "VBO" version as fallback, we won't need to
# do this. try to found the maximum text we can handle
label = None
label_len = len(ntext)
ld = None
# check for blank line
if not ntext:
texture = Texture.create(size=(1, 1))
Cache_append('textinput.label', cid, texture)
return texture
while True:
try:
label = Label(text=ntext[:label_len], **kw)
label.refresh()
if ld is not None and ld > 2:
ld = int(ld / 2)
label_len += ld
else:
break
except:
# exception happen when we tried to render the text
# reduce it...
if ld is None:
ld = len(ntext)
ld = int(ld / 2)
if ld < 2 and label_len:
label_len -= 1
label_len -= ld
continue
# ok, we found it.
texture = label.texture
Cache_append('textinput.label', cid, texture)
return texture
def _tokenize(self, text):
# Tokenize a text string from some delimiters
if text is None:
return
delimiters = u' ,\'".;:\n\r\t'
oldindex = 0
for index, char in enumerate(text):
if char not in delimiters:
continue
if oldindex != index:
yield text[oldindex:index]
yield text[index:index + 1]
oldindex = index + 1
yield text[oldindex:]
def _split_smart(self, text):
# Do a "smart" split. If autowidth or autosize is set,
# we are not doing smart split, just a split on line break.
# Otherwise, we are trying to split as soon as possible, to prevent
# overflow on the widget.
# depend of the options, split the text on line, or word
if not self.multiline:
lines = text.split(u'\n')
lines_flags = [0] + [FL_IS_NEWLINE] * (len(lines) - 1)
return lines, lines_flags
# no autosize, do wordwrap.
x = flags = 0
line = []
lines = []
lines_flags = []
_join = u''.join
lines_append, lines_flags_append = lines.append, lines_flags.append
padding_left = self.padding[0]
padding_right = self.padding[2]
width = self.width - padding_left - padding_right
text_width = self._get_text_width
_tab_width, _label_cached = self.tab_width, self._label_cached
# try to add each word on current line.
for word in self._tokenize(text):
is_newline = (word == u'\n')
w = text_width(word, _tab_width, _label_cached)
# if we have more than the width, or if it's a newline,
# push the current line, and create a new one
if (x + w > width and line) or is_newline:
lines_append(_join(line))
lines_flags_append(flags)
flags = 0
line = []
x = 0
if is_newline:
flags |= FL_IS_NEWLINE
else:
x += w
line.append(word)
if line or flags & FL_IS_NEWLINE:
lines_append(_join(line))
lines_flags_append(flags)
return lines, lines_flags
def _key_down(self, key, repeat=False):
displayed_str, internal_str, internal_action, scale = key
if internal_action is None:
if self._selection:
self.delete_selection()
self.insert_text(displayed_str)
elif internal_action in ('shift', 'shift_L', 'shift_R'):
if not self._selection:
self._selection_from = self._selection_to = self.cursor_index()
self._selection = True
self._selection_finished = False
elif internal_action.startswith('cursor_'):
cc, cr = self.cursor
self.do_cursor_movement(internal_action)
if self._selection and not self._selection_finished:
self._selection_to = self.cursor_index()
self._update_selection()
else:
self.cancel_selection()
elif self._selection and internal_action in ('del', 'backspace'):
self.delete_selection()
elif internal_action == 'del':
# Move cursor one char to the right. If that was successful,
# do a backspace (effectively deleting char right of cursor)
cursor = self.cursor
self.do_cursor_movement('cursor_right')
if cursor != self.cursor:
self.do_backspace(mode='del')
elif internal_action == 'backspace':
self.do_backspace()
elif internal_action == 'enter':
if self.multiline:
self.insert_text(u'\n')
else:
self.dispatch('on_text_validate')
self.focus = False
elif internal_action == 'escape':
self.focus = False
if internal_action != 'escape':
#self._recalc_size()
pass
def _key_up(self, key, repeat=False):
displayed_str, internal_str, internal_action, scale = key
if internal_action in ('shift', 'shift_L', 'shift_R'):
if self._selection:
self._update_selection(True)
def _keyboard_on_key_down(self, window, keycode, text, modifiers):
# Keycodes on OSX:
ctrl, cmd = 64, 1024
key, key_str = keycode
# This allows *either* ctrl *or* cmd, but not both.
is_shortcut = (modifiers == ['ctrl'] or (
_is_osx and modifiers == ['meta']))
is_interesting_key = key in (list(self.interesting_keys.keys()) + [27])
if not self._editable:
# duplicated but faster testing for non-editable keys
if text and not is_interesting_key:
if is_shortcut and key == ord('c'):
self._copy(self.selection_text)
elif key == 27:
self.focus = False
return True
if text and not is_interesting_key:
self._hide_handles(self._win)
self._hide_cut_copy_paste()
self._win.remove_widget(self._handle_middle)
if is_shortcut:
if key == ord('x'): # cut selection
self._cut(self.selection_text)
elif key == ord('c'): # copy selection
self._copy(self.selection_text)
elif key == ord('v'): # paste selection
self._paste()
elif key == ord('a'): # select all
self.select_all()
elif key == ord('z'): # undo
self.do_undo()
elif key == ord('r'): # redo
self.do_redo()
else:
if self._selection:
self.delete_selection()
self.insert_text(text)
#self._recalc_size()
return
if key == 27: # escape
self.focus = False
return True
elif key == 9: # tab
self.insert_text(u'\t')
return True
k = self.interesting_keys.get(key)
if k:
key = (None, None, k, 1)
self._key_down(key)
def _keyboard_on_key_up(self, window, keycode):
key, key_str = keycode
k = self.interesting_keys.get(key)
if k:
key = (None, None, k, 1)
self._key_up(key)
def on_hint_text(self, instance, value):
self._refresh_hint_text()
def _refresh_hint_text(self):
_lines, self._hint_text_flags = self._split_smart(self.hint_text)
_hint_text_labels = []
_hint_text_rects = []
_create_label = self._create_line_label
for x in _lines:
lbl = _create_label(x, hint=True)
_hint_text_labels.append(lbl)
_hint_text_rects.append(
Rectangle(size=(lbl.size if lbl else (0, 0))))
lbl = None
self._hint_text_lines = _lines
self._hint_text_labels = _hint_text_labels
self._hint_text_rects = _hint_text_rects
# Remember to update graphics
self._trigger_update_graphics()
#
# Properties
#
_lines = ListProperty([])
_hint_text_lines = ListProperty([])
_editable = BooleanProperty(True)
readonly = BooleanProperty(False)
'''If True, the user will not be able to change the content of a textinput.
.. versionadded:: 1.3.0
:attr:`readonly` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
multiline = BooleanProperty(True)
'''If True, the widget will be able show multiple lines of text. If False,
the "enter" keypress will defocus the textinput instead of adding a new
line.
:attr:`multiline` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
password = BooleanProperty(False)
'''If True, the widget will display its characters as the character '*'.
.. versionadded:: 1.2.0
:attr:`password` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
keyboard_suggestions = BooleanProperty(True)
'''If True provides auto suggestions on top of keyboard.
This will only work if :attr:`input_type` is set to `text`.
.. versionadded:: 1.8.0
:attr:`keyboard_suggestions` is a
:class:`~kivy.properties.BooleanProperty` defaults to True.
'''
cursor_blink = BooleanProperty(False)
'''This property is used to blink the cursor graphic. The value of
:attr:`cursor_blink` is automatically computed. Setting a value on it will
have no impact.
:attr:`cursor_blink` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
def _get_cursor(self):
return self._cursor
def _set_cursor(self, pos):
if not self._lines:
self._trigger_refresh_text()
return
l = self._lines
cr = boundary(pos[1], 0, len(l) - 1)
cc = boundary(pos[0], 0, len(l[cr]))
cursor = cc, cr
if self._cursor == cursor:
return
self._cursor = cursor
# adjust scrollview to ensure that the cursor will be always inside our
# viewport.
padding_left = self.padding[0]
padding_right = self.padding[2]
viewport_width = self.width - padding_left - padding_right
sx = self.scroll_x
offset = self.cursor_offset()
# if offset is outside the current bounds, reajust
if offset > viewport_width + sx:
self.scroll_x = offset - viewport_width
if offset < sx:
self.scroll_x = offset
# do the same for Y
# this algo try to center the cursor as much as possible
dy = self.line_height + self.line_spacing
offsety = cr * dy
sy = self.scroll_y
padding_top = self.padding[1]
padding_bottom = self.padding[3]
viewport_height = self.height - padding_top - padding_bottom - dy
if offsety > viewport_height + sy:
sy = offsety - viewport_height
if offsety < sy:
sy = offsety
self.scroll_y = sy
return True
cursor = AliasProperty(_get_cursor, _set_cursor)
'''Tuple of (row, col) values indicating the current cursor position.
You can set a new (row, col) if you want to move the cursor. The scrolling
area will be automatically updated to ensure that the cursor is
visible inside the viewport.
:attr:`cursor` is an :class:`~kivy.properties.AliasProperty`.
'''
def _get_cursor_col(self):
return self._cursor[0]
cursor_col = AliasProperty(_get_cursor_col, None, bind=('cursor', ))
'''Current column of the cursor.
:attr:`cursor_col` is an :class:`~kivy.properties.AliasProperty` to
cursor[0], read-only.
'''
def _get_cursor_row(self):
return self._cursor[1]
cursor_row = AliasProperty(_get_cursor_row, None, bind=('cursor', ))
'''Current row of the cursor.
:attr:`cursor_row` is an :class:`~kivy.properties.AliasProperty` to
cursor[1], read-only.
'''
cursor_pos = AliasProperty(_get_cursor_pos, None, bind=(
'cursor', 'padding', 'pos', 'size', 'focus',
'scroll_x', 'scroll_y'))
'''Current position of the cursor, in (x, y).
:attr:`cursor_pos` is an :class:`~kivy.properties.AliasProperty`,
read-only.
'''
cursor_color = ListProperty([1, 0, 0, 1])
'''Current color of the cursor, in (r, g, b, a) format.
.. versionadded:: 1.8.1
:attr:`cursor_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 0, 0, 1].
'''
line_height = NumericProperty(1)
'''Height of a line. This property is automatically computed from the
:attr:`font_name`, :attr:`font_size`. Changing the line_height will have
no impact.
.. note::
:attr:`line_height` is the height of a single line of text.
Use :attr:`minimum_height`, which also includes padding, to
get the height required to display the text properly.
:attr:`line_height` is a :class:`~kivy.properties.NumericProperty`,
read-only.
'''
tab_width = NumericProperty(4)
'''By default, each tab will be replaced by four spaces on the text
input widget. You can set a lower or higher value.
:attr:`tab_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to 4.
'''
padding_x = VariableListProperty([0, 0], length=2)
'''Horizontal padding of the text: [padding_left, padding_right].
padding_x also accepts a one argument form [padding_horizontal].
:attr:`padding_x` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0]. This might be changed by the current theme.
.. deprecated:: 1.7.0
Use :attr:`padding` instead.
'''
def on_padding_x(self, instance, value):
self.padding[0] = value[0]
self.padding[2] = value[1]
padding_y = VariableListProperty([0, 0], length=2)
'''Vertical padding of the text: [padding_top, padding_bottom].
padding_y also accepts a one argument form [padding_vertical].
:attr:`padding_y` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0]. This might be changed by the current theme.
.. deprecated:: 1.7.0
Use :attr:`padding` instead.
'''
def on_padding_y(self, instance, value):
self.padding[1] = value[0]
self.padding[3] = value[1]
padding = VariableListProperty([6, 6, 6, 6])
'''Padding of the text: [padding_left, padding_top, padding_right,
padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
.. versionchanged:: 1.7.0
Replaced AliasProperty with VariableListProperty.
:attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [6, 6, 6, 6].
'''
scroll_x = NumericProperty(0)
'''X scrolling value of the viewport. The scrolling is automatically
updated when the cursor is moved or text changed. If there is no
user input, the scroll_x and scroll_y properties may be changed.
:attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
scroll_y = NumericProperty(0)
'''Y scrolling value of the viewport. See :attr:`scroll_x` for more
information.
:attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
selection_color = ListProperty([0.1843, 0.6549, 0.8313, .5])
'''Current color of the selection, in (r, g, b, a) format.
.. warning::
The color should always have an "alpha" component less than 1
since the selection is drawn after the text.
:attr:`selection_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [0.1843, 0.6549, 0.8313, .5].
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used with :attr:`background_normal` and
:attr:`background_active`. Can be used for a custom background.
.. versionadded:: 1.4.1
It must be a list of four values: (top, right, bottom, left). Read the
BorderImage instruction for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults
to (16, 16, 16, 16).
'''
background_normal = StringProperty(
'atlas://data/images/defaulttheme/textinput')
'''Background image of the TextInput when it's not in focus.
.. versionadded:: 1.4.1
:attr:`background_normal` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput'.
'''
background_disabled_normal = StringProperty(
'atlas://data/images/defaulttheme/textinput_disabled')
'''Background image of the TextInput when disabled.
.. versionadded:: 1.8.0
:attr:`background_disabled_normal` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput_disabled'.
'''
background_active = StringProperty(
'atlas://data/images/defaulttheme/textinput_active')
'''Background image of the TextInput when it's in focus.
.. versionadded:: 1.4.1
:attr:`background_active` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput_active'.
'''
background_disabled_active = StringProperty(
'atlas://data/images/defaulttheme/textinput_disabled_active')
'''Background image of the TextInput when it's in focus and disabled.
.. versionadded:: 1.8.0
:attr:`background_disabled_active` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput_disabled_active'.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Current color of the background, in (r, g, b, a) format.
.. versionadded:: 1.2.0
:attr:`background_color` is a :class:`~kivy.properties.ListProperty`
and defaults to [1, 1, 1, 1] (white).
'''
foreground_color = ListProperty([0, 0, 0, 1])
'''Current color of the foreground, in (r, g, b, a) format.
.. versionadded:: 1.2.0
:attr:`foreground_color` is a :class:`~kivy.properties.ListProperty`
and defaults to [0, 0, 0, 1] (black).
'''
disabled_foreground_color = ListProperty([0, 0, 0, .5])
'''Current color of the foreground when disabled, in (r, g, b, a) format.
.. versionadded:: 1.8.0
:attr:`disabled_foreground_color` is a
:class:`~kivy.properties.ListProperty` and
defaults to [0, 0, 0, 5] (50% transparent black).
'''
use_bubble = BooleanProperty(not _is_desktop)
'''Indicates whether the cut/copy/paste bubble is used.
.. versionadded:: 1.7.0
:attr:`use_bubble` is a :class:`~kivy.properties.BooleanProperty`
and defaults to True on mobile OS's, False on desktop OS's.
'''
use_handles = BooleanProperty(not _is_desktop)
'''Indicates whether the selection handles are displayed.
.. versionadded:: 1.8.0
:attr:`use_handles` is a :class:`~kivy.properties.BooleanProperty`
and defaults to True on mobile OS's, False on desktop OS's.
'''
def get_sel_from(self):
return self._selection_from
selection_from = AliasProperty(get_sel_from, None)
'''If a selection is in progress or complete, this property will represent
the cursor index where the selection started.
.. versionchanged:: 1.4.0
:attr:`selection_from` is an :class:`~kivy.properties.AliasProperty` and
defaults to None, readonly.
'''
def get_sel_to(self):
return self._selection_to
selection_to = AliasProperty(get_sel_to, None)
'''If a selection is in progress or complete, this property will represent
the cursor index where the selection started.
.. versionchanged:: 1.4.0
:attr:`selection_to` is an :class:`~kivy.properties.AliasProperty` and
defaults to None, readonly.
'''
selection_text = StringProperty(u'')
'''Current content selection.
:attr:`selection_text` is a :class:`~kivy.properties.StringProperty`
and defaults to '', readonly.
'''
def on_selection_text(self, instance, value):
if value and self.use_handles:
self._trigger_show_handles()
focus = BooleanProperty(False)
'''If focus is True, the keyboard will be requested and you can start
entering text into the textinput.
:attr:`focus` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
.. Note::
Selection is cancelled when TextInput is focused. If you need to
show selection when TextInput is focused, you should delay
(use Clock.schedule) the call to the functions for selecting
text (select_all, select_text).
'''
def _get_text(self, encode=True):
lf = self._lines_flags
l = self._lines
len_l = len(l)
if len(lf) < len_l:
lf.append(1)
text = u''.join([(u'\n' if (lf[i] & FL_IS_NEWLINE) else u'') + l[i]
for i in range(len_l)])
if PY2 and encode and type(text) is not str:
text = text.encode('utf-8')
return text
def _set_text(self, text):
if PY2 and type(text) is str:
text = text.decode('utf-8')
if self._get_text(encode=False) == text:
return
self._refresh_text(text)
self.cursor = self.get_cursor_from_index(len(text))
text = AliasProperty(_get_text, _set_text, bind=('_lines', ))
'''Text of the widget.
Creation of a simple hello world::
widget = TextInput(text='Hello world')
If you want to create the widget with an unicode string, use::
widget = TextInput(text=u'My unicode string')
:attr:`text` a :class:`~kivy.properties.StringProperty`.
'''
font_name = StringProperty('DroidSans')
'''Filename of the font to use. The path can be absolute or relative.
Relative paths are resolved by the :func:`~kivy.resources.resource_find`
function.
.. warning::
Depending on your text provider, the font file may be ignored. However,
you can mostly use this without problems.
If the font used lacks the glyphs for the particular language/symbols
you are using, you will see '[]' blank box characters instead of the
actual glyphs. The solution is to use a font that has the glyphs you
need to display. For example, to display |unicodechar|, use a font like
freesans.ttf that has the glyph.
.. |unicodechar| image:: images/unicode-char.png
:attr:`font_name` is a :class:`~kivy.properties.StringProperty` and
defaults to 'DroidSans'.
'''
font_size = NumericProperty('15sp')
'''Font size of the text in pixels.
:attr:`font_size` is a :class:`~kivy.properties.NumericProperty` and
defaults to 10.
'''
hint_text = StringProperty('')
'''Hint text of the widget.
Shown if text is '' and focus is False.
.. versionadded:: 1.6.0
:attr:`hint_text` a :class:`~kivy.properties.StringProperty` and defaults
to ''.
'''
hint_text_color = ListProperty([0.5, 0.5, 0.5, 1.0])
'''Current color of the hint_text text, in (r, g, b, a) format.
.. versionadded:: 1.6.0
:attr:`hint_text_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [0.5, 0.5, 0.5, 1.0] (grey).
'''
auto_indent = BooleanProperty(False)
'''Automatically indent multiline text.
.. versionadded:: 1.7.0
:attr:`auto_indent` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
allow_copy = BooleanProperty(True)
'''Decides whether to allow copying the text.
.. versionadded:: 1.8.0
:attr:`allow_copy` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
def _get_min_height(self):
return (len(self._lines) * (self.line_height + self.line_spacing)
+ self.padding[0] + self.padding[2])
minimum_height = AliasProperty(_get_min_height, None,
bind=('_lines', 'line_spacing', 'padding',
'font_size', 'font_name', 'password',
'hint_text'))
'''Minimum height of the content inside the TextInput.
.. versionadded:: 1.8.0
:attr:`minimum_height` is a readonly
:class:`~kivy.properties.AliasProperty`.
'''
line_spacing = NumericProperty(0)
'''Space taken up between the lines.
.. versionadded:: 1.8.0
:attr:`line_spacing` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
input_type = OptionProperty('text', options=('text', 'number', 'url',
'mail', 'datetime', 'tel',
'address'))
'''The kind of input, keyboard to request
.. versionadded:: 1.8.0
:attr:`input_type` is an :class:`~kivy.properties.OptionsProperty` and
defaults to 'text'. Can be one of 'text', 'number', 'url', 'mail',
'datetime', 'tel', 'address'.
'''
handle_image_middle = StringProperty(
'atlas://data/images/defaulttheme/selector_middle')
'''Image used to display the middle handle on the TextInput for cursor
positioning.
.. versionadded:: 1.8.0
:attr:`handle_image_middle` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/selector_middle'.
'''
def on_handle_image_middle(self, instance, value):
if self._handle_middle:
self._handle_middle.source = value
handle_image_left = StringProperty(
'atlas://data/images/defaulttheme/selector_left')
'''Image used to display the Left handle on the TextInput for selection.
.. versionadded:: 1.8.0
:attr:`handle_image_left` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/selector_left'.
'''
def on_handle_image_left(self, instance, value):
if self._handle_left:
self._handle_left.source = value
handle_image_right = StringProperty(
'atlas://data/images/defaulttheme/selector_right')
'''Image used to display the Right handle on the TextInput for selection.
.. versionadded:: 1.8.0
:attr:`handle_image_right` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/selector_right'.
'''
def on_handle_image_right(self, instance, value):
if self._handle_right:
self._handle_right.source = value
keyboard_mode = OptionProperty('auto', options=('auto', 'managed'))
'''How the keyboard visibility should be managed (auto will have standard
behaviour to show/hide on focus, managed requires setting keyboard_visible
manually, or calling the helper functions ``show_keyboard()``
and ``hide_keyboard()``.
.. versionadded:: 1.8.0
:attr:`keyboard_mode` is an :class:`~kivy.properties.OptionsProperty` and
defaults to 'auto'. Can be one of 'auto' or 'managed'.
'''
def show_keyboard(self):
"""
Convenience function to show the keyboard in managed mode
"""
if self.keyboard_mode == "managed":
self._bind_keyboard()
def hide_keyboard(self):
"""
Convenience function to hide the keyboard in managed mode
"""
if self.keyboard_mode == "managed":
self._unbind_keyboard()
if __name__ == '__main__':
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
class TextInputApp(App):
def build(self):
root = BoxLayout(orientation='vertical')
textinput = TextInput(multiline=True, use_bubble=True,
use_handles=True)
textinput.text = __doc__
root.add_widget(textinput)
textinput2 = TextInput(multiline=False, text='monoline textinput',
size_hint=(1, None), height=30)
root.add_widget(textinput2)
return root
TextInputApp().run()
| mit | -7,725,666,435,607,477,000 | 33.735593 | 80 | 0.553452 | false |
ninepints/hootvetica | food/management/commands/__init__.py | 1 | 2086 | import uuid
from django.db.models import Q
from food.models import Category, Item, WeeklyClosure, OneTimeClosure
def get_closures(current_date):
weekly_closures = set(
vals['location_id'] for vals in WeeklyClosure.objects.filter(
weekday=current_date.weekday())
.values('location_id'))
onetime_closures = set(
vals['location_id'] for vals in OneTimeClosure.objects.filter(
start_date__lte=current_date,
end_date__gte=current_date)
.values('location_id'))
return (weekly_closures, onetime_closures)
def open_locations(locations, current_time):
count = locations.update(open=True, last_modified=current_time)
Item.objects.filter(parent__parent__in=locations).update(
status='AVA', last_modified=current_time)
do_hardcoded_menu_insertions(locations, current_time)
return count
def close_locations(locations, current_time):
count = locations.update(open=False, message='', last_modified=current_time)
do_hardcoded_menu_deletions(locations)
return count
def do_hardcoded_menu_insertions(locations, current_time):
if current_time.weekday() == 6:
Item.objects.filter(parent__name='Chicken').update(status='OUT')
Item.objects.bulk_create(
[Item(
uid=uuid.uuid4().hex,
parent=cat,
name='HBCB',
status='AVA',
last_modified=current_time)
for cat in Category.objects.filter(
name='Chicken',
parent__in=locations)])
Item.objects.bulk_create(
[Item(
uid=uuid.uuid4().hex,
parent=cat,
name='Specialty',
status='AVA',
last_modified=current_time)
for cat in Category.objects.filter(
name='Pizza',
parent__in=locations)])
def do_hardcoded_menu_deletions(locations):
Item.objects.filter(parent__parent__in=locations).filter(
Q(name='HBCB') | Q(name='Specialty')).delete()
| mit | 2,817,346,170,367,118,300 | 36.927273 | 80 | 0.610738 | false |
openstack/python-openstackclient | openstackclient/common/sdk_utils.py | 2 | 2358 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def get_osc_show_columns_for_sdk_resource(
sdk_resource,
osc_column_map,
invisible_columns=None
):
"""Get and filter the display and attribute columns for an SDK resource.
Common utility function for preparing the output of an OSC show command.
Some of the columns may need to get renamed, others made invisible.
:param sdk_resource: An SDK resource
:param osc_column_map: A hash of mappings for display column names
:param invisible_columns: A list of invisible column names
:returns: Two tuples containing the names of the display and attribute
columns
"""
if getattr(sdk_resource, 'allow_get', None) is not None:
resource_dict = sdk_resource.to_dict(
body=True, headers=False, ignore_none=False)
else:
resource_dict = sdk_resource
# Build the OSC column names to display for the SDK resource.
attr_map = {}
display_columns = list(resource_dict.keys())
invisible_columns = [] if invisible_columns is None else invisible_columns
for col_name in invisible_columns:
if col_name in display_columns:
display_columns.remove(col_name)
for sdk_attr, osc_attr in osc_column_map.items():
if sdk_attr in display_columns:
attr_map[osc_attr] = sdk_attr
display_columns.remove(sdk_attr)
if osc_attr not in display_columns:
display_columns.append(osc_attr)
sorted_display_columns = sorted(display_columns)
# Build the SDK attribute names for the OSC column names.
attr_columns = []
for column in sorted_display_columns:
new_column = attr_map[column] if column in attr_map else column
attr_columns.append(new_column)
return tuple(sorted_display_columns), tuple(attr_columns)
| apache-2.0 | -8,352,966,954,439,167,000 | 39.655172 | 78 | 0.692536 | false |
mishbahr/django-connected | connected_accounts/providers/instagram.py | 1 | 1535 | from django.utils.translation import ugettext_lazy as _
from connected_accounts.conf import settings
from connected_accounts.provider_pool import providers
from .base import OAuth2Provider, ProviderAccount
class InstagramAccount(ProviderAccount):
PROFILE_URL = 'http://instagram.com/'
def get_profile_url(self):
return self.PROFILE_URL + self.account.extra_data.get('username', '')
def get_avatar_url(self):
return self.account.extra_data.get('profile_picture')
def to_str(self):
default = super(InstagramAccount, self).to_str()
return self.account.extra_data.get('username', default)
def extract_common_fields(self):
data = self.account.extra_data
return dict(username=data.get('username'),
name=data.get('full_name'))
class InstagramProvider(OAuth2Provider):
id = 'instagram'
name = _('Instagram')
account_class = InstagramAccount
access_token_url = 'https://api.instagram.com/oauth/access_token'
authorization_url = 'https://api.instagram.com/oauth/authorize'
profile_url = 'https://api.instagram.com/v1/users/self'
consumer_key = settings.CONNECTED_ACCOUNTS_INSTAGRAM_CONSUMER_KEY
consumer_secret = settings.CONNECTED_ACCOUNTS_INSTAGRAM_CONSUMER_SECRET
scope = settings.CONNECTED_ACCOUNTS_INSTAGRAM_SCOPE
def extract_uid(self, data):
return str(data['data']['id'])
def extract_extra_data(self, data):
return data.get('data', {})
providers.register(InstagramProvider)
| bsd-3-clause | -4,676,531,674,048,964,000 | 30.979167 | 77 | 0.700326 | false |
sunset1995/py360convert | py360convert/c2e.py | 1 | 1865 | import numpy as np
from . import utils
def c2e(cubemap, h, w, mode='bilinear', cube_format='dice'):
if mode == 'bilinear':
order = 1
elif mode == 'nearest':
order = 0
else:
raise NotImplementedError('unknown mode')
if cube_format == 'horizon':
pass
elif cube_format == 'list':
cubemap = utils.cube_list2h(cubemap)
elif cube_format == 'dict':
cubemap = utils.cube_dict2h(cubemap)
elif cube_format == 'dice':
cubemap = utils.cube_dice2h(cubemap)
else:
raise NotImplementedError('unknown cube_format')
assert len(cubemap.shape) == 3
assert cubemap.shape[0] * 6 == cubemap.shape[1]
assert w % 8 == 0
face_w = cubemap.shape[0]
uv = utils.equirect_uvgrid(h, w)
u, v = np.split(uv, 2, axis=-1)
u = u[..., 0]
v = v[..., 0]
cube_faces = np.stack(np.split(cubemap, 6, 1), 0)
# Get face id to each pixel: 0F 1R 2B 3L 4U 5D
tp = utils.equirect_facetype(h, w)
coor_x = np.zeros((h, w))
coor_y = np.zeros((h, w))
for i in range(4):
mask = (tp == i)
coor_x[mask] = 0.5 * np.tan(u[mask] - np.pi * i / 2)
coor_y[mask] = -0.5 * np.tan(v[mask]) / np.cos(u[mask] - np.pi * i / 2)
mask = (tp == 4)
c = 0.5 * np.tan(np.pi / 2 - v[mask])
coor_x[mask] = c * np.sin(u[mask])
coor_y[mask] = c * np.cos(u[mask])
mask = (tp == 5)
c = 0.5 * np.tan(np.pi / 2 - np.abs(v[mask]))
coor_x[mask] = c * np.sin(u[mask])
coor_y[mask] = -c * np.cos(u[mask])
# Final renormalize
coor_x = (np.clip(coor_x, -0.5, 0.5) + 0.5) * face_w
coor_y = (np.clip(coor_y, -0.5, 0.5) + 0.5) * face_w
equirec = np.stack([
utils.sample_cubefaces(cube_faces[..., i], tp, coor_y, coor_x, order=order)
for i in range(cube_faces.shape[3])
], axis=-1)
return equirec
| mit | -4,961,470,892,452,482,000 | 28.140625 | 83 | 0.538338 | false |
mad-lab/transit | src/pytransit/draw_trash.py | 1 | 12680 | # Copyright 2015.
# Michael A. DeJesus, Chaitra Ambadipudi, and Thomas R. Ioerger.
#
#
# This file is part of TRANSIT.
#
# TRANSIT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License.
#
#
# TRANSIT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TRANSIT. If not, see <http://www.gnu.org/licenses/>.
import pytransit.view_trash as view_trash
from math import *
import os
import platform
import numpy
from PIL import Image, ImageDraw, ImageFont
def normalize(X, old_min, old_max, new_min, new_max):
old_range = (old_max - old_min)
new_range = (new_max - new_min)
if old_range == 0:
return new_min
else:
return (((X - old_min) * new_range) / old_range) + new_min
linuxFonts = []
linuxFonts.append("/usr/share/fonts/truetype/ttf-dejavu/DejaVuSans-Bold.ttf")
linuxFonts.append("/usr/share/fonts/dejavu-lgc/DejaVuLGCSerifCondensed-Bold.ttf")
linuxFonts.append("/usr/share/fonts/dejavu-lgc/DejaVuLGCSansCondensed-Bold.ttf")
winFonts = []
winFonts.append("consolab.ttf")
winFonts.append("courb.ttf")
winFonts.append("arial.ttf")
fontsize = 16
font = ImageFont.load_default()
if platform.system() == "Linux":
for fontpath in linuxFonts:
if os.path.isfile(fontpath):
font = ImageFont.truetype(fontpath, fontsize)
break
elif platform.system() == "Windows":
for fontpath in winFonts:
try:
font = ImageFont.truetype(fontpath, fontsize)
break
except:
pass
def draw_reads(draw, reads, ta_sites, start_x=0, start_y=0, width=400, height=100, start=0, end=500, min_read=0, max_read=500, lwd=2):
TRUNC_READS = [min(rd, max_read) for rd in reads]
NORM_READS = [normalize(rd, 0, max_read, 0, max_read) for rd in TRUNC_READS]
new_min_w = start_x
new_max_w = start_x + width #- self.padding_r
new_min_h = start_y
new_max_h = start_y + height
for i,TA in enumerate(ta_sites):
TApos = normalize(TA, start, end, new_min_w, new_max_w)
if NORM_READS[i] == 0: continue
read_h = normalize(NORM_READS[i], 0, max_read, new_min_h, new_max_h) # height of read line
Y1 = start_y
Y2 = start_y - (read_h-start_y)
draw.line([(TApos, Y1), (TApos, Y2)], width=lwd, fill=(255,0,0))
def draw_ta_sites(draw, ta_sites, start_x=0, start_y=0, width=200, height=0, start=0, end=500, lwd=2):
new_min_w = start_x
new_max_w = start_x + width #- self.padding_r
for i,TA in enumerate(ta_sites):
TApos = normalize(TA, start, end, new_min_w, new_max_w)
draw.line([(TApos, start_y+0), (TApos, start_y + height)], width=lwd, fill="black")
def draw_scale(draw, start_x, start_y, height, max_read):
#print("scale", start_x, start_y, height)
MIDREAD = int(max_read/2.0)
top_text_w, top_text_h = draw.textsize(str(max_read), font=font)
draw.text((start_x, start_y), str(max_read), font=font, fill="black")
draw.text((start_x, start_y + height/2.0), str(MIDREAD), font=font, fill="black")
bottom_text_w, bottom_text_h = draw.textsize(str(MIDREAD), font=font)
draw.text((start_x+bottom_text_w-(top_text_w/2.0), start_y+height), "0", font=font, fill="black")
def draw_features(draw, GENES, orf2data, start, end, start_x, start_y, width, height):
padding_h = 3
text_w, text_h = draw.textsize("RV0001", font=font)
gene_h = height - text_h
triangle_size = 10
for gene in GENES:
if gene not in orf2data: continue
gene_start = orf2data[gene][2]
gene_end = orf2data[gene][3]
strand = orf2data[gene][4]
name = orf2data[gene][0]
new_min = start_x
new_max = start_x + width
norm_start = normalize(max(gene_start, start), start, end, new_min, new_max)
norm_end = normalize(min(gene_end, end), start, end, new_min, new_max)
color = "gray"
if gene.startswith("ES-"):
color = "red"
elif gene.startswith("GD-"):
color = "yellow"
elif gene.startswith("NE-"):
color = "blue"
elif gene.startswith("GA-"):
color = "green"
if strand == "-":
if gene_start >= start:
draw.rectangle(((norm_start, start_y+5),(norm_end,start_y+gene_h-5)), fill=color)
else:
draw.rectangle(((norm_start, start_y+5),(norm_end,start_y+gene_h-5)), fill=color)
else:
if gene_end <= end:
draw.rectangle(((norm_start, start_y+5),(norm_end, start_y+gene_h-5)), fill=color)
else:
draw.rectangle(((norm_start, start_y+5),(norm_end, start_y+gene_h-5)), fill=color)
if name == "-": name = gene
if not name.startswith("non-coding"):
name_text_w, name_text_h = draw.textsize(name, font=font)
if abs(norm_start-norm_end) >= name_text_w:
draw.text(( norm_start + (abs(norm_start-norm_end) - name_text_w)/2.0 , start_y+gene_h+text_h), name, font=font, fill="black")
def draw_genes(draw, GENES, orf2data, start, end, start_x, start_y, width, height, doTriangle=True):
padding_h = 3
text_w, text_h = draw.textsize("RV0001", font=font)
gene_h = height - text_h
triangle_size = 10
if not doTriangle:
triangle_size = 0
for gene in GENES:
if gene not in orf2data: continue
gene_start = orf2data[gene][2]
gene_end = orf2data[gene][3]
strand = orf2data[gene][4]
name = orf2data[gene][0]
new_min = start_x
new_max = start_x + width
norm_start = normalize(max(gene_start, start), start, end, new_min, new_max)
norm_end = normalize(min(gene_end, end), start, end, new_min, new_max)
if strand == "-":
if gene_start >= start:
draw.rectangle(((norm_start+triangle_size, start_y+5),(norm_end,start_y+gene_h-5)), fill="blue")
if doTriangle:
draw.polygon([(norm_start+triangle_size, start_y),(norm_start+triangle_size,start_y+gene_h), (norm_start,start_y+gene_h/2.0)], fill="blue" )
else:
draw.rectangle(((norm_start, start_y+5),(norm_end,start_y+gene_h-5)), fill="blue")
else:
if gene_end <= end:
draw.rectangle(((norm_start, start_y+5),(norm_end-triangle_size, start_y+gene_h-5)), fill="blue")
if doTriangle:
draw.polygon([(norm_end-triangle_size, start_y),(norm_end-triangle_size,start_y+gene_h), (norm_end,start_y+gene_h/2.0)], fill="blue" )
else:
draw.rectangle(((norm_start, start_y+5),(norm_end, start_y+gene_h-5)), fill="blue")
if name == "-": name = gene
if not name.startswith("non-coding"):
name_text_w, name_text_h = draw.textsize(name, font=font)
if abs(norm_start-norm_end) >= name_text_w:
draw.text(( norm_start + (abs(norm_start-norm_end) - name_text_w)/2.0 , start_y+gene_h+text_h), name, font=font, fill="black")
def get_dynamic_height(N):
#Set rest of heights and widths
read_h = 100
gene_h = 50
ta_h = 20
padding_h = 3
canvas_h = read_h*N + ta_h + gene_h + padding_h + padding_h + 80
return (canvas_h)
def draw_canvas(fulldata, position, hash, orf2data, feature_hashes, feature_data, labels=[], min_read=0, scale=[500], globalScale = False, start=1, end=500, canvas_h=-1, canvas_w=1000):
temp_image = Image.new("RGB",(200, 200),"white")
temp_draw = ImageDraw.Draw(temp_image)
#Set main draw object
N = len(fulldata)
Nfeat = len(feature_hashes)
#Set Labels
if not labels:
labels= ["Read Counts"]*N
GENES = []
FEATURES = [[] for j in range(len(feature_hashes))]
TA_SITES = []
READS = []
nc_count = 1
for j,data in enumerate(fulldata):
#print(j)
temp = []
for i,read in enumerate(data):
pos = position[i]
if start <= pos <= end:
gene = hash.get(pos,["non-coding"])[0]
if gene == "non-coding" and len(GENES) > 0 and not GENES[-1].startswith("non-coding"):
gene+="_%d" % nc_count
nc_count +=1
if j ==0:
if gene not in GENES: GENES.append(gene)
TA_SITES.append(pos)
for f,f_hash in enumerate(feature_hashes):
feat = f_hash.get(pos,["non-coding"])[0]
if feat not in FEATURES[f]: FEATURES[f].append(feat)
temp.append(read)
READS.append(temp)
max_reads = []
if globalScale:
max_reads = [int(numpy.max(READS))] * len(READS)
else:
for j,s in enumerate(scale):
#print(j,s)
if s < 0:
max_reads.append(int(numpy.max(READS[j])))
else:
max_reads.append(s)
#Get dynamic text widths
#print("Labels:")
max_label_w = 0
for L in labels:
label_text_w, label_text_h = temp_draw.textsize(L, font=font)
max_label_w = max(label_text_w, max_label_w)
#print(L)
scale_text_w, scale_text_h = temp_draw.textsize(str(max(max_reads)), font=font)
#Set rest of heights and widths
read_h = 100
gene_h = 50
ta_h = 20
padding_w = 3
padding_h = 3
read_w = canvas_w - (max_label_w + scale_text_w + padding_w + padding_w + 30)
if canvas_h == -1:
canvas_h = read_h*N + ta_h + gene_h + padding_h + padding_h + 80 + (gene_h+padding_h+50)*(Nfeat)
image = Image.new("RGB",(canvas_w, canvas_h),"white")
draw = ImageDraw.Draw(image)
lwd = 2
#print(READS)
#print("start", start)
#print("end", end)
#print(len(READS), len(TA_SITES))
#print("")
#for rd in READS:
# print(rd)
#print("")
start_x = max_label_w + padding_w + 21
draw.line([(start_x, 0), (start_x, canvas_h)], width=lwd, fill="black")
start_y = 0
half = 100*0.5
start_x += 5
for j in range(len(fulldata)):
temp_label_text_w, temp_label_text_h = temp_draw.textsize(labels[j], font=font)
label_text_x = (start_x/2.0) - (temp_label_text_w/2.0)
start_y+=read_h+padding_h
#draw.text((10, start_y - half), labels[j], font=font, fill="black")
draw.text((label_text_x, start_y - half), labels[j], font=font, fill="black")
draw_reads(draw, READS[j], TA_SITES, start_x, start_y, read_w, read_h, start, end, min_read, max_reads[j])
draw_scale(draw, start_x+read_w+padding_w+2, start_y-100+10, 70, max_reads[j])
start_y+=10
#start_x+=5
#TA sites
temp_label_text_w, temp_label_text_h = temp_draw.textsize('TA Sites', font=font)
label_text_x = (start_x/2.0) - (temp_label_text_w/2.0)
#draw.text((30, start_y),'TA Sites', font=font, fill="black")
draw.text((label_text_x, start_y),'TA Sites', font=font, fill="black")
draw_ta_sites(draw, TA_SITES, start_x, start_y, read_w, ta_h, start, end)
#Genes
temp_label_text_w, temp_label_text_h = temp_draw.textsize('Genes', font=font)
label_text_x = (start_x/2.0) - (temp_label_text_w/2.0)
start_y += 50
#draw.text((30, start_y+10),'Genes', font=font, fill="black")
draw.text((label_text_x, start_y+10),'Genes', font=font, fill="black")
width = read_w
draw_genes(draw, GENES, orf2data, start, end, start_x, start_y, width, gene_h)
start_y += gene_h -20#+ padding_h
#Features:
for f in range(len(FEATURES)):
start_y += gene_h + padding_h + 25
temp_label_text_w, temp_label_text_h = temp_draw.textsize('Feature-%d' % (f+1), font=font)
label_text_x = (start_x/2.0) - (temp_label_text_w/2.0)
draw.text((label_text_x, start_y+10),'Feature-%d' % (f+1), font=font, fill="black")
width = read_w
#print(FEATURES[f])
#draw_genes(draw, FEATURES[f], feature_data[f], start, end, start_x, start_y, width, gene_h))
draw_features(draw, FEATURES[f], feature_data[f], start, end, start_x, start_y, width, gene_h)
start_y +=10
return(image)
| gpl-3.0 | 3,052,072,579,063,662,000 | 31.429668 | 185 | 0.579101 | false |
mitsuhiko/fungiform | setup.py | 1 | 1459 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Fungiform
~~~~~~~~~
A form handling system that previously was used for Pocoo's Zine
and Plurk's Solace software. Unbundled into a separate library that
is framework independent.
This is still a preview release. Check the source for more information.
:copyright: (c) 2010 by Armin Ronacher, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name = 'Fungiform',
version = '0.2',
url = 'http://github.com/mitsuhiko/fungiform',
license = 'BSD License',
author = 'Armin Ronacher',
author_email = '[email protected]',
description = 'form library',
long_description = __doc__,
keywords = 'form library',
packages = ['fungiform', 'fungiform.tests'],
platforms = 'any',
zip_safe = False,
test_suite = 'fungiform.tests.suite',
include_package_data = True,
classifiers = [
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Development Status :: 4 - Beta'
],
)
| bsd-3-clause | 3,290,813,701,191,853,600 | 29.395833 | 76 | 0.632625 | false |
googleapis/googleapis-gen | google/devtools/cloudtrace/v1/devtools-cloudtrace-v1-py/google/cloud/trace_v1/types/trace.py | 1 | 12909 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.devtools.cloudtrace.v1',
manifest={
'Trace',
'Traces',
'TraceSpan',
'ListTracesRequest',
'ListTracesResponse',
'GetTraceRequest',
'PatchTracesRequest',
},
)
class Trace(proto.Message):
r"""A trace describes how long it takes for an application to
perform an operation. It consists of a set of spans, each of
which represent a single timed event within the operation.
Attributes:
project_id (str):
Project ID of the Cloud project where the
trace data is stored.
trace_id (str):
Globally unique identifier for the trace. This identifier is
a 128-bit numeric value formatted as a 32-byte hex string.
For example, ``382d4f4c6b7bb2f4a972559d9085001d``.
spans (Sequence[google.cloud.trace_v1.types.TraceSpan]):
Collection of spans in the trace.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
trace_id = proto.Field(
proto.STRING,
number=2,
)
spans = proto.RepeatedField(
proto.MESSAGE,
number=3,
message='TraceSpan',
)
class Traces(proto.Message):
r"""List of new or updated traces.
Attributes:
traces (Sequence[google.cloud.trace_v1.types.Trace]):
List of traces.
"""
traces = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Trace',
)
class TraceSpan(proto.Message):
r"""A span represents a single timed event within a trace. Spans
can be nested and form a trace tree. Often, a trace contains a
root span that describes the end-to-end latency of an operation
and, optionally, one or more subspans for its suboperations.
Spans do not need to be contiguous. There may be gaps between
spans in a trace.
Attributes:
span_id (int):
Identifier for the span. Must be a 64-bit integer other than
0 and unique within a trace. For example,
``2205310701640571284``.
kind (google.cloud.trace_v1.types.TraceSpan.SpanKind):
Distinguishes between spans generated in a particular
context. For example, two spans with the same name may be
distinguished using ``RPC_CLIENT`` and ``RPC_SERVER`` to
identify queueing latency associated with the span.
name (str):
Name of the span. Must be less than 128
bytes. The span name is sanitized and displayed
in the Stackdriver Trace tool in the Google
Cloud Platform Console.
The name may be a method name or some other per-
call site name. For the same executable and the
same call point, a best practice is to use a
consistent name, which makes it easier to
correlate cross-trace spans.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Start time of the span in nanoseconds from
the UNIX epoch.
end_time (google.protobuf.timestamp_pb2.Timestamp):
End time of the span in nanoseconds from the
UNIX epoch.
parent_span_id (int):
Optional. ID of the parent span, if any.
labels (Sequence[google.cloud.trace_v1.types.TraceSpan.LabelsEntry]):
Collection of labels associated with the span. Label keys
must be less than 128 bytes. Label values must be less than
16 kilobytes (10MB for ``/stacktrace`` values).
Some predefined label keys exist, or you may create your
own. When creating your own, we recommend the following
formats:
- ``/category/product/key`` for agents of well-known
products (e.g. ``/db/mongodb/read_size``).
- ``short_host/path/key`` for domain-specific keys (e.g.
``foo.com/myproduct/bar``)
Predefined labels include:
- ``/agent``
- ``/component``
- ``/error/message``
- ``/error/name``
- ``/http/client_city``
- ``/http/client_country``
- ``/http/client_protocol``
- ``/http/client_region``
- ``/http/host``
- ``/http/method``
- ``/http/path``
- ``/http/redirected_url``
- ``/http/request/size``
- ``/http/response/size``
- ``/http/route``
- ``/http/status_code``
- ``/http/url``
- ``/http/user_agent``
- ``/pid``
- ``/stacktrace``
- ``/tid``
"""
class SpanKind(proto.Enum):
r"""Type of span. Can be used to specify additional relationships
between spans in addition to a parent/child relationship.
"""
SPAN_KIND_UNSPECIFIED = 0
RPC_SERVER = 1
RPC_CLIENT = 2
span_id = proto.Field(
proto.FIXED64,
number=1,
)
kind = proto.Field(
proto.ENUM,
number=2,
enum=SpanKind,
)
name = proto.Field(
proto.STRING,
number=3,
)
start_time = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
end_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
parent_span_id = proto.Field(
proto.FIXED64,
number=6,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=7,
)
class ListTracesRequest(proto.Message):
r"""The request message for the ``ListTraces`` method. All fields are
required unless specified.
Attributes:
project_id (str):
Required. ID of the Cloud project where the
trace data is stored.
view (google.cloud.trace_v1.types.ListTracesRequest.ViewType):
Optional. Type of data returned for traces in the list.
Default is ``MINIMAL``.
page_size (int):
Optional. Maximum number of traces to return.
If not specified or <= 0, the implementation
selects a reasonable value. The implementation
may return fewer traces than the requested page
size.
page_token (str):
Token identifying the page of results to return. If
provided, use the value of the ``next_page_token`` field
from a previous request.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Start of the time interval (inclusive) during
which the trace data was collected from the
application.
end_time (google.protobuf.timestamp_pb2.Timestamp):
End of the time interval (inclusive) during
which the trace data was collected from the
application.
filter (str):
Optional. A filter against labels for the request.
By default, searches use prefix matching. To specify exact
match, prepend a plus symbol (``+``) to the search term.
Multiple terms are ANDed. Syntax:
- ``root:NAME_PREFIX`` or ``NAME_PREFIX``: Return traces
where any root span starts with ``NAME_PREFIX``.
- ``+root:NAME`` or ``+NAME``: Return traces where any root
span's name is exactly ``NAME``.
- ``span:NAME_PREFIX``: Return traces where any span starts
with ``NAME_PREFIX``.
- ``+span:NAME``: Return traces where any span's name is
exactly ``NAME``.
- ``latency:DURATION``: Return traces whose overall latency
is greater or equal to than ``DURATION``. Accepted units
are nanoseconds (``ns``), milliseconds (``ms``), and
seconds (``s``). Default is ``ms``. For example,
``latency:24ms`` returns traces whose overall latency is
greater than or equal to 24 milliseconds.
- ``label:LABEL_KEY``: Return all traces containing the
specified label key (exact match, case-sensitive)
regardless of the key:value pair's value (including empty
values).
- ``LABEL_KEY:VALUE_PREFIX``: Return all traces containing
the specified label key (exact match, case-sensitive)
whose value starts with ``VALUE_PREFIX``. Both a key and
a value must be specified.
- ``+LABEL_KEY:VALUE``: Return all traces containing a
key:value pair exactly matching the specified text. Both
a key and a value must be specified.
- ``method:VALUE``: Equivalent to ``/http/method:VALUE``.
- ``url:VALUE``: Equivalent to ``/http/url:VALUE``.
order_by (str):
Optional. Field used to sort the returned traces. Can be one
of the following:
- ``trace_id``
- ``name`` (``name`` field of root span in the trace)
- ``duration`` (difference between ``end_time`` and
``start_time`` fields of the root span)
- ``start`` (``start_time`` field of the root span)
Descending order can be specified by appending ``desc`` to
the sort field (for example, ``name desc``).
Only one sort field is permitted.
"""
class ViewType(proto.Enum):
r"""Type of data returned for traces in the list."""
VIEW_TYPE_UNSPECIFIED = 0
MINIMAL = 1
ROOTSPAN = 2
COMPLETE = 3
project_id = proto.Field(
proto.STRING,
number=1,
)
view = proto.Field(
proto.ENUM,
number=2,
enum=ViewType,
)
page_size = proto.Field(
proto.INT32,
number=3,
)
page_token = proto.Field(
proto.STRING,
number=4,
)
start_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
end_time = proto.Field(
proto.MESSAGE,
number=6,
message=timestamp_pb2.Timestamp,
)
filter = proto.Field(
proto.STRING,
number=7,
)
order_by = proto.Field(
proto.STRING,
number=8,
)
class ListTracesResponse(proto.Message):
r"""The response message for the ``ListTraces`` method.
Attributes:
traces (Sequence[google.cloud.trace_v1.types.Trace]):
List of trace records as specified by the
view parameter.
next_page_token (str):
If defined, indicates that there are more
traces that match the request and that this
value should be passed to the next request to
continue retrieving additional traces.
"""
@property
def raw_page(self):
return self
traces = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Trace',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class GetTraceRequest(proto.Message):
r"""The request message for the ``GetTrace`` method.
Attributes:
project_id (str):
Required. ID of the Cloud project where the
trace data is stored.
trace_id (str):
Required. ID of the trace to return.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
trace_id = proto.Field(
proto.STRING,
number=2,
)
class PatchTracesRequest(proto.Message):
r"""The request message for the ``PatchTraces`` method.
Attributes:
project_id (str):
Required. ID of the Cloud project where the
trace data is stored.
traces (google.cloud.trace_v1.types.Traces):
Required. The body of the message.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
traces = proto.Field(
proto.MESSAGE,
number=2,
message='Traces',
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 8,235,140,834,633,207,000 | 32.270619 | 77 | 0.577736 | false |
googleapis/googleapis-gen | google/cloud/bigquery/storage/v1beta1/bigquery-storage-v1beta1-py/google/cloud/bigquery/storage_v1beta1/services/big_query_storage/transports/base.py | 1 | 10654 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.bigquery.storage_v1beta1.types import storage
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-bigquery-storage',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class BigQueryStorageTransport(abc.ABC):
"""Abstract transport class for BigQueryStorage."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/bigquery.readonly',
'https://www.googleapis.com/auth/cloud-platform',
)
DEFAULT_HOST: str = 'bigquerystorage.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials is service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_read_session: gapic_v1.method.wrap_method(
self.create_read_session,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.read_rows: gapic_v1.method.wrap_method(
self.read_rows,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.ServiceUnavailable,
),
deadline=86400.0,
),
default_timeout=86400.0,
client_info=client_info,
),
self.batch_create_read_session_streams: gapic_v1.method.wrap_method(
self.batch_create_read_session_streams,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.finalize_stream: gapic_v1.method.wrap_method(
self.finalize_stream,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.split_read_stream: gapic_v1.method.wrap_method(
self.split_read_stream,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
}
@property
def create_read_session(self) -> Callable[
[storage.CreateReadSessionRequest],
Union[
storage.ReadSession,
Awaitable[storage.ReadSession]
]]:
raise NotImplementedError()
@property
def read_rows(self) -> Callable[
[storage.ReadRowsRequest],
Union[
storage.ReadRowsResponse,
Awaitable[storage.ReadRowsResponse]
]]:
raise NotImplementedError()
@property
def batch_create_read_session_streams(self) -> Callable[
[storage.BatchCreateReadSessionStreamsRequest],
Union[
storage.BatchCreateReadSessionStreamsResponse,
Awaitable[storage.BatchCreateReadSessionStreamsResponse]
]]:
raise NotImplementedError()
@property
def finalize_stream(self) -> Callable[
[storage.FinalizeStreamRequest],
Union[
empty_pb2.Empty,
Awaitable[empty_pb2.Empty]
]]:
raise NotImplementedError()
@property
def split_read_stream(self) -> Callable[
[storage.SplitReadStreamRequest],
Union[
storage.SplitReadStreamResponse,
Awaitable[storage.SplitReadStreamResponse]
]]:
raise NotImplementedError()
__all__ = (
'BigQueryStorageTransport',
)
| apache-2.0 | -6,510,122,743,847,912,000 | 39.819923 | 161 | 0.598085 | false |
freeflightsim/fg-flying-club | flying-club.appspot.com/app/AuthHandler.py | 1 | 4541 | # -*- coding: utf-8 -*-
import os
import uuid
import datetime
from google.appengine.ext import webapp
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.api import mail
from google.appengine.ext.webapp import template
from django.utils import simplejson as json
from google.appengine.api import urlfetch
import urllib
import conf
import app.FlyingClub
import app.CoreHandler
from app.models import Comment, Crew
class AuthHandler(webapp.RequestHandler):
###################################################################################################
## Get Actions
###################################################################################################
def get(self, section=None, page=None):
#sessID = self.do_cookie_check()
section = 'auth'
template_vars = {}
App = app.FlyingClub.FlyingClub(section, page)
template_vars['app'] = App
#tvars['appo'] = Appo
#tvars['conf'] = conf
#tvars['user'] = None
#template_vars['crewID'] = crewID
#f 'sessIdent' in self.request.cookies:
#sessIdent = self.request.cookies['sessIdent']
#lse:
# sessIdent = None
## Setup Section and Page
#if section == None:
#section = "index"
#template_vars['section'] = section
#template_vars['page'] = page
## Get Comments
q = db.GqlQuery("SELECT * FROM Comment " +
"WHERE section = :1 " +
"ORDER BY dated DESC",
section)
results = q.fetch(50)
#template_vars['comments'] = results
## Application Object
#template_vars['page_title'] = Appo.title("/%s/" % section)
## Setup User + Aauth
#user = users.get_current_user()
#if not user:
# template_vars['user'] = None
# template_vars['login_url'] = users.create_login_url("/set_session/")
#else:
# template_vars['user'] = user
# template_vars['logout_url'] = users.create_logout_url("/subscribe/")
## Sign In Section
#if section == 'ssignin' :
# if sessID:
# self.redirect("/profile/")
# return
#template_vars['page_title'] = 'Sign In with OpenId'
#if section == 'sdo_logout':
# cook_str = 'sessID=%s; expires=Fri, 31-Dec-1980 23:59:59 GMT; Path=/;' % ''
# self.response.headers.add_header( 'Set-Cookie',
# cook_str
# )
# self.redirect("/")
# return
#if section == 'sprofile':
# if not sessID:
# self.redirect("/signin/")
# return
#template_vars['welcome'] = True if self.request.get("welcome") == '1' else False
#template_vars['page_title'] = 'My Profile'
main_template = '%s.html' % (section)
path = '/%s/' % (section)
#template_vars['path'] = path
template_path = os.path.join(os.path.dirname(__file__), '../templates/pages/%s' % main_template)
self.response.out.write(template.render(template_path, template_vars))
###################################################################################################
## Post Actions
###################################################################################################
def post(self, page=None):
if page == 'rpx':
token = self.request.get('token')
url = 'https://rpxnow.com/api/v2/auth_info'
args = {
'format': 'json',
'apiKey': conf.RPX_API_KEY,
'token': token
}
r = urlfetch.fetch( url=url,
payload=urllib.urlencode(args),
method=urlfetch.POST,
headers={'Content-Type':'application/x-www-form-urlencoded'}
)
data = json.loads(r.content)
if data['stat'] == 'ok':
welcome = 0
unique_identifier = data['profile']['identifier']
q = db.GqlQuery("select * from Crew where ident= :1", unique_identifier)
crew = q.get()
if not crew:
crew = Crew(ident=unique_identifier)
crew.name = data['profile']['preferredUsername']
if data['profile'].has_key('email'):
crew.email = data['profile']['email']
crew.put()
welcome = 1
subject = "New Login: %s" % crew.name
body = "New login on schedule"
else:
subject = "Return Login: %s" % crew.name
body = "New login on schedule"
sessID = str(crew.key())
cook_str = 'crewID=%s; expires=Fri, 31-Dec-2020 23:59:59 GMT; Path=/;' % crew.id()
self.response.headers.add_header( 'Set-Cookie',
cook_str
)
mail.send_mail( sender = conf.EMAIL,
to = "Dev <[email protected]>",
subject = subject,
body = body
)
self.redirect("/profile/?welcome=%s" % welcome)
return
else:
print section, page
#self.redirect("/")
| gpl-2.0 | -6,413,474,163,439,239,000 | 24.227778 | 100 | 0.566175 | false |
amitdhiman000/dais | politics/migrations/0001_initial.py | 1 | 4585 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-18 20:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('user', '0002_auto_20161215_0806'),
]
operations = [
migrations.CreateModel(
name='Leader',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='*****', max_length=50)),
],
options={
'verbose_name_plural': 'Leaders',
'verbose_name': 'Leader',
},
),
migrations.CreateModel(
name='LegislativeConstituency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='*****', max_length=50)),
],
options={
'verbose_name_plural': 'LegislativeConstituencies',
'verbose_name': 'LegislativeConstituency',
},
),
migrations.CreateModel(
name='MemberLegislative',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('constituency', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.LegislativeConstituency')),
('leader', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.Leader')),
],
options={
'verbose_name_plural': 'MemberLegislatives',
'verbose_name': 'MemberLegislative',
},
),
migrations.CreateModel(
name='MemberParliament',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name_plural': 'MemberParliaments',
'verbose_name': 'MemberParliament',
},
),
migrations.CreateModel(
name='ParliamentConstituency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='*****', max_length=50)),
('lc', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.LegislativeConstituency')),
('state', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.State')),
],
options={
'verbose_name_plural': 'ParliamentConstituencies',
'verbose_name': 'ParliamentConstituency',
},
),
migrations.CreateModel(
name='Party',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('short_name', models.CharField(default='***', max_length=10)),
('full_name', models.CharField(default='*****', max_length=50)),
],
options={
'verbose_name_plural': 'Parties',
'verbose_name': 'Party',
},
),
migrations.AddField(
model_name='memberparliament',
name='constituency',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.ParliamentConstituency'),
),
migrations.AddField(
model_name='memberparliament',
name='leader',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.Leader'),
),
migrations.AddField(
model_name='legislativeconstituency',
name='pc',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.ParliamentConstituency'),
),
migrations.AddField(
model_name='legislativeconstituency',
name='state',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.State'),
),
migrations.AddField(
model_name='leader',
name='party',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='politics.Party'),
),
]
| apache-2.0 | -7,740,220,461,772,860,000 | 39.9375 | 136 | 0.551145 | false |
gandelman-a/neutron-lbaas | neutron_lbaas/tests/tempest/lib/common/utils/linux/remote_client.py | 2 | 6497 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import re
import time
import six
from neutron_lbaas.tests.tempest.lib import config
from neutron_lbaas.tests.tempest.lib import exceptions
from neutron_lbaas.tests.tempest.lib.common import ssh
CONF = config.CONF
class RemoteClient(object):
# NOTE(afazekas): It should always get an address instead of server
def __init__(self, server, username, password=None, pkey=None):
ssh_timeout = CONF.compute.ssh_timeout
network = CONF.compute.network_for_ssh
ip_version = CONF.compute.ip_version_for_ssh
ssh_channel_timeout = CONF.compute.ssh_channel_timeout
if isinstance(server, six.string_types):
ip_address = server
else:
addresses = server['addresses'][network]
for address in addresses:
if address['version'] == ip_version:
ip_address = address['addr']
break
else:
raise exceptions.ServerUnreachable()
self.ssh_client = ssh.Client(ip_address, username, password,
ssh_timeout, pkey=pkey,
channel_timeout=ssh_channel_timeout)
def exec_command(self, cmd):
# Shell options below add more clearness on failures,
# path is extended for some non-cirros guest oses (centos7)
cmd = "set -eu -o pipefail; PATH=$PATH:/sbin; " + cmd
return self.ssh_client.exec_command(cmd)
def validate_authentication(self):
"""Validate ssh connection and authentication
This method raises an Exception when the validation fails.
"""
self.ssh_client.test_connection_auth()
def hostname_equals_servername(self, expected_hostname):
# Get host name using command "hostname"
actual_hostname = self.exec_command("hostname").rstrip()
return expected_hostname == actual_hostname
def get_ram_size_in_mb(self):
output = self.exec_command('free -m | grep Mem')
if output:
return output.split()[1]
def get_number_of_vcpus(self):
command = 'cat /proc/cpuinfo | grep processor | wc -l'
output = self.exec_command(command)
return int(output)
def get_partitions(self):
# Return the contents of /proc/partitions
command = 'cat /proc/partitions'
output = self.exec_command(command)
return output
def get_boot_time(self):
cmd = 'cut -f1 -d. /proc/uptime'
boot_secs = self.exec_command(cmd)
boot_time = time.time() - int(boot_secs)
return time.localtime(boot_time)
def write_to_console(self, message):
message = re.sub("([$\\`])", "\\\\\\\\\\1", message)
# usually to /dev/ttyS0
cmd = 'sudo sh -c "echo \\"%s\\" >/dev/console"' % message
return self.exec_command(cmd)
def ping_host(self, host, count=CONF.compute.ping_count,
size=CONF.compute.ping_size):
addr = netaddr.IPAddress(host)
cmd = 'ping6' if addr.version == 6 else 'ping'
cmd += ' -c{0} -w{0} -s{1} {2}'.format(count, size, host)
return self.exec_command(cmd)
def get_mac_address(self):
cmd = "ip addr | awk '/ether/ {print $2}'"
return self.exec_command(cmd)
def get_nic_name(self, address):
cmd = "ip -o addr | awk '/%s/ {print $2}'" % address
return self.exec_command(cmd)
def get_ip_list(self):
cmd = "ip address"
return self.exec_command(cmd)
def assign_static_ip(self, nic, addr):
cmd = "sudo ip addr add {ip}/{mask} dev {nic}".format(
ip=addr, mask=CONF.network.tenant_network_mask_bits,
nic=nic
)
return self.exec_command(cmd)
def turn_nic_on(self, nic):
cmd = "sudo ip link set {nic} up".format(nic=nic)
return self.exec_command(cmd)
def get_pids(self, pr_name):
# Get pid(s) of a process/program
cmd = "ps -ef | grep %s | grep -v 'grep' | awk {'print $1'}" % pr_name
return self.exec_command(cmd).split('\n')
def get_dns_servers(self):
cmd = 'cat /etc/resolv.conf'
resolve_file = self.exec_command(cmd).strip().split('\n')
entries = (l.split() for l in resolve_file)
dns_servers = [l[1] for l in entries
if len(l) and l[0] == 'nameserver']
return dns_servers
def send_signal(self, pid, signum):
cmd = 'sudo /bin/kill -{sig} {pid}'.format(pid=pid, sig=signum)
return self.exec_command(cmd)
def _renew_lease_udhcpc(self, fixed_ip=None):
"""Renews DHCP lease via udhcpc client. """
file_path = '/var/run/udhcpc.'
nic_name = self.get_nic_name(fixed_ip)
nic_name = nic_name.strip().lower()
pid = self.exec_command('cat {path}{nic}.pid'.
format(path=file_path, nic=nic_name))
pid = pid.strip()
self.send_signal(pid, 'USR1')
def _renew_lease_dhclient(self, fixed_ip=None):
"""Renews DHCP lease via dhclient client. """
cmd = "sudo /sbin/dhclient -r && sudo /sbin/dhclient"
self.exec_command(cmd)
def renew_lease(self, fixed_ip=None):
"""Wrapper method for renewing DHCP lease via given client
Supporting:
* udhcpc
* dhclient
"""
# TODO(yfried): add support for dhcpcd
supported_clients = ['udhcpc', 'dhclient']
dhcp_client = CONF.scenario.dhcp_client
if dhcp_client not in supported_clients:
raise exceptions.InvalidConfiguration('%s DHCP client unsupported'
% dhcp_client)
if dhcp_client == 'udhcpc' and not fixed_ip:
raise ValueError("need to set 'fixed_ip' for udhcpc client")
return getattr(self, '_renew_lease_' + dhcp_client)(fixed_ip=fixed_ip)
| apache-2.0 | -1,462,144,387,955,705,300 | 37.217647 | 78 | 0.59843 | false |
m-wichmann/dist_brake | drm/data.py | 1 | 5599 | import os
import uuid
import tempfile
temp_dir = tempfile.TemporaryDirectory()
class Job(object):
def __init__(self, disc, rip_config, hb_config, fixes):
if not isinstance(disc, Disc):
raise ValueError()
if not isinstance(rip_config, RipConfig):
raise ValueError()
if not isinstance(hb_config, HandbrakeConfig):
raise ValueError()
self.disc = disc
self.rip_config = rip_config
self.hb_config = hb_config
self.fixes = fixes
self.name = str(uuid.uuid4())
self.temp_path = os.path.join(temp_dir.name, self.name)
self.files = []
os.mkdir(self.temp_path)
def __str__(self):
return self.name
class Fix(object):
allowed_fixes = {
'remove_duplicate_tracks': 'Tries to remove duplicate tracks, if there are the same length and directly after one another.',
'reencode_audio': 'Reencode audio to mp3. Otherwise audio will be copied.',
'split_every_chapters': 'Splits every title depending on the chapters. int for equal sized chunks, list of ints for different chunk lengths.',
'use_libdvdread': 'Use libdvdread instead of libdvdnav.'
}
def __init__(self, name, value):
if name not in Fix.allowed_fixes:
raise KeyError()
self.name = name
self.value = value
def __eq__(self, other):
if isinstance(other, Fix):
return (self.name == other.name) and (self.value == other.value)
elif isinstance(other, str):
return self.name == other
else:
return False
def __str__(self):
return self.name
def dump_data(self):
return {'name': self.name, 'value': self.value}
@classmethod
def parse_data(cls, data):
return cls(data['name'], data['value'])
class Chapter(object):
def __init__(self, no, length):
"""
Initializes a new Chapter object and specifies its number inside the title and its length in seconds.
:param no: number of chapter in title
:param length: length of chapter in seconds
"""
self.no = no
self.length = length
def __eq__(self, other):
if other is None:
return False
return (self.no == other.no) and (self.length == other.length)
class Track(object):
def __init__(self, index, lang):
self.index = index
self.lang = lang
def __eq__(self, other):
if other is None:
return False
return (self.index == other.index) and (self.lang == other.lang)
def __str__(self):
return self.lang
def __repr__(self):
return self.__str__()
class Title(object):
def __init__(self, index):
self.index = index
self.duration = ''
self.a_tracks = []
self.s_tracks = []
self.chapters = []
def __eq__(self, other):
if other is None:
return False
return (self.duration == other.duration) and (self.a_tracks == other.a_tracks) and (self.s_tracks == other.s_tracks) and (self.chapters == other.chapters)
def __str__(self):
ret = 'Title: {num} - {duration} - A: {a_tracks} S: {s_tracks} - {chapter} chapters'
return ret.format(num=self.index, duration=self.duration, a_tracks=self.a_tracks,
s_tracks=self.s_tracks, chapter=len(self.chapters))
class Disc(object):
def __init__(self, local_path):
self.titles = []
self.local_path = local_path
def __str__(self):
ret = self. local_path
if (len(self.titles) > 0):
ret += ' ('
ret += ''.join([str(t) for t in self.titles])
ret += ')'
return ret
def __repr__(self):
return self.__str__()
class HandbrakeConfig(object):
def __init__(self, preset=None, quality=20, h264_preset='medium', h264_profile='high',
h264_level='4.1'):
if h264_preset not in ['ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower', 'veryslow', 'placebo']:
raise Exception('Preset invalid')
if h264_profile not in ['baseline', 'main', 'high', 'high10', 'high422', 'high444']:
raise Exception('Profile invalid')
if h264_level not in ['1.0', '1b', '1.1', '1.2', '1.3', '2.0', '2.1', '2.2', '3.0', '3.1', '3.2', '4.0', '4.1', '4.2', '5.0', '5.1', '5.2']:
raise Exception('Level invalid')
self.preset = preset
self.quality = quality
self.h264_preset = h264_preset
self.h264_profile = h264_profile
self.h264_level = h264_level
def dump_data(self):
return {'preset': self.preset, 'quality': self.quality, 'h264_preset': self.h264_preset, 'h264_profile': self.h264_profile, 'h264_level': self.h264_level}
@classmethod
def parse_data(cls, data):
return cls(data['preset'], data['quality'], data['h264_preset'], data['h264_profile'], data['h264_level'])
class RipConfig(object):
def __init__(self, a_lang=None, s_lang=None, len_range=(15, 50)):
if a_lang is None:
a_lang = ['eng', 'deu']
if s_lang is None:
s_lang = ['eng', 'deu']
self.a_lang = a_lang
self.s_lang = s_lang
self.len_range = len_range
def dump_data(self):
return {'a_lang': self.a_lang, 's_lang': self.s_lang, 'len_range': self.len_range}
@classmethod
def parse_data(cls, data):
return cls(data['a_lang'], data['s_lang'], data['len_range'])
| mit | -1,933,623,782,987,842,600 | 30.455056 | 162 | 0.568137 | false |
zxjsdp/NodeFinderGUI | setup.py | 1 | 1280 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='NodeFinderGUI',
version='0.5.0',
description=('GUI Tool for node related operations in '
'phylogenetic analyses.'),
author='Haofei Jin',
author_email='[email protected]',
url='https://github.com/zxjsdp/NodeFinderGUI',
license='Apache',
keywords='node phylogenetic tools calibration clade',
packages=['nodefinder_gui'],
install_requires=[],
# $ pip install -e .[dev,test]
extras_require={
'dev': ['pytest', 'tox', 'sphinx'],
'test': ['pytest'],
},
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| apache-2.0 | -9,152,846,158,618,841,000 | 31.820513 | 63 | 0.609375 | false |
gutomaia/nesasm_py | nesasm/tests/__init__.py | 1 | 4632 | # -*- coding: utf-8 -*-
from unittest import TestCase
from nesasm.compiler import compile
import os
import re
from nesasm.compiler import lexical, syntax, semantic
class MetaInstructionCase(type):
def __new__(cls, name, bases, args):
def gen_lex():
def test(self):
tokens = list(lexical(self.asm))
self.assertEquals(len(tokens), len(self.lex))
for i, l in enumerate(self.lex):
self.assertEquals(l[0], tokens[i]['type'])
self.assertEquals(l[1], tokens[i]['value'])
return test
def gen_syn():
def test(self):
tokens = [
{'type': l[0], 'value': l[1]}
for l in self.lex
]
ast = syntax(tokens)
self.assertEquals(len(ast), len(self.syn))
for i, a in enumerate(self.syn):
self.assertEquals(ast[i]['type'], self.syn[i])
return test
def gen_sem():
def test(self):
ast = []
token_counter = 0
for i, a in enumerate(self.syn):
tokens = []
for j, l in enumerate(self.lex[token_counter:]):
if l[0] == 'T_ENDLINE':
token_counter = j + 1
break
tokens.append({'type': l[0], 'value': l[1]})
ast.append({'type': self.syn[i], 'children': tokens})
# print ast
compiled = semantic(ast)
self.assertEquals(compiled, self.code)
return test
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
s2 = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
s3 = re.sub('_test$', '', s2)
args['test_%s_lexical' % s3] = gen_lex()
args['test_%s_syntax' % s3] = gen_syn()
args['test_%s_semantic' % s3] = gen_sem()
return type.__new__(cls, name, bases, args)
class FileTestCase(TestCase):
def assertFileExists(self, filename):
try:
self.assertTrue(os.path.exists(filename))
except AssertionError:
raise AssertionError('File %s should exist' % filename)
def assertFileNotExists(self, filename):
try:
self.assertFalse(os.path.exists(filename))
except AssertionError:
raise AssertionError('File %s should not exist' % filename)
class HexTestCase(TestCase):
def assertHexEquals(self, expected, actual):
OKGREEN = '\033[92m'
FAIL = '\033[91m'
ENDC = '\033[0m'
try:
self.assertEquals(expected, actual)
except AssertionError:
line = 0
cursor = 0
lines = []
out = ''
while cursor < len(expected) or cursor < len(actual):
for a in range(16):
if cursor < len(expected) and cursor < len(actual):
if expected[cursor] != actual[cursor] and line not in \
lines:
lines.append(line)
cursor += 1
line += 1
exp = ''
act = ''
for line in lines:
exp = 'Expected: %04x: ' % (line)
act = 'Actual : %04x: ' % (line)
for a in range(16):
cursor = (line * 16) + a
if cursor < len(expected) and cursor < len(actual):
if expected[cursor] != actual[cursor]:
exp += '%s%02x%s' % (
OKGREEN, ord(expected[cursor]), ENDC)
act += '%s%02x%s' % (
FAIL, ord(actual[cursor]), ENDC)
else:
exp += '%02x' % ord(expected[cursor])
act += '%02x' % ord(actual[cursor])
if ((a + 1) % 2) == 0:
exp += ' '
act += ' '
out += '%s- %d \n' % (exp, line + 1)
out += '%s- %d \n' % (act, line + 1)
print(out)
raise AssertionError('Hex are not equal')
class HexFileTestCase(HexTestCase):
def assertHexFileEquals(self, expected, actual):
with open(expected, 'rb') as f:
expectedf = f.read()
with open(actual, 'rb') as f:
actualf = f.read()
self.assertHexEquals(expectedf, actualf)
| bsd-3-clause | 4,055,735,136,519,646,000 | 33.567164 | 79 | 0.444948 | false |
AndiDog/git-cola | cola/widgets/merge.py | 1 | 8995 | from __future__ import division, absolute_import, unicode_literals
from qtpy import QtWidgets
from qtpy.QtCore import Qt
from ..i18n import N_
from ..interaction import Interaction
from ..qtutils import get
from .. import cmds
from .. import icons
from .. import qtutils
from . import completion
from . import standard
from . import defs
def local_merge(context):
"""Provides a dialog for merging branches"""
view = Merge(context, qtutils.active_window())
view.show()
view.raise_()
return view
class Merge(standard.Dialog):
"""Provides a dialog for merging branches."""
def __init__(self, context, parent=None, ref=None):
standard.Dialog.__init__(self, parent=parent)
self.context = context
self.cfg = cfg = context.cfg
self.model = model = context.model
if parent is not None:
self.setWindowModality(Qt.WindowModal)
# Widgets
self.title_label = QtWidgets.QLabel()
self.revision_label = QtWidgets.QLabel()
self.revision_label.setText(N_('Revision to Merge'))
self.revision = completion.GitRefLineEdit(context)
self.revision.setToolTip(N_('Revision to Merge'))
if ref:
self.revision.set_value(ref)
self.radio_local = qtutils.radio(text=N_('Local Branch'), checked=True)
self.radio_remote = qtutils.radio(text=N_('Tracking Branch'))
self.radio_tag = qtutils.radio(text=N_('Tag'))
self.revisions = QtWidgets.QListWidget()
self.revisions.setAlternatingRowColors(True)
self.button_viz = qtutils.create_button(text=N_('Visualize'),
icon=icons.visualize())
tooltip = N_('Squash the merged commits into a single commit')
self.checkbox_squash = qtutils.checkbox(text=N_('Squash'),
tooltip=tooltip)
tooltip = N_('Always create a merge commit when enabled, '
'even when the merge is a fast-forward update')
self.checkbox_noff = qtutils.checkbox(text=N_('No fast forward'),
tooltip=tooltip, checked=False)
self.checkbox_noff_state = False
tooltip = N_('Commit the merge if there are no conflicts. '
'Uncheck to leave the merge uncommitted')
self.checkbox_commit = qtutils.checkbox(text=N_('Commit'),
tooltip=tooltip, checked=True)
self.checkbox_commit_state = True
text = N_('Create Signed Commit')
checked = cfg.get('cola.signcommits', False)
tooltip = N_('GPG-sign the merge commit')
self.checkbox_sign = qtutils.checkbox(text=text, checked=checked,
tooltip=tooltip)
self.button_close = qtutils.close_button()
icon = icons.merge()
self.button_merge = qtutils.create_button(text=N_('Merge'), icon=icon,
default=True)
# Layouts
self.revlayt = qtutils.hbox(defs.no_margin, defs.spacing,
self.revision_label, self.revision,
qtutils.STRETCH, self.title_label)
self.radiolayt = qtutils.hbox(defs.no_margin, defs.spacing,
self.radio_local, self.radio_remote,
self.radio_tag)
self.buttonlayt = qtutils.hbox(defs.no_margin, defs.button_spacing,
self.button_close, qtutils.STRETCH,
self.checkbox_squash, self.checkbox_noff,
self.checkbox_commit, self.checkbox_sign,
self.button_viz, self.button_merge)
self.mainlayt = qtutils.vbox(defs.margin, defs.spacing,
self.radiolayt, self.revisions,
self.revlayt, self.buttonlayt)
self.setLayout(self.mainlayt)
# Signal/slot connections
# pylint: disable=no-member
self.revision.textChanged.connect(self.update_title)
self.revision.enter.connect(self.merge_revision)
self.revisions.itemSelectionChanged.connect(self.revision_selected)
qtutils.connect_released(self.radio_local, self.update_revisions)
qtutils.connect_released(self.radio_remote, self.update_revisions)
qtutils.connect_released(self.radio_tag, self.update_revisions)
qtutils.connect_button(self.button_merge, self.merge_revision)
qtutils.connect_button(self.checkbox_squash, self.toggle_squash)
qtutils.connect_button(self.button_viz, self.viz_revision)
qtutils.connect_button(self.button_close, self.reject)
# Observer messages
model.add_observer(model.message_updated, self.update_all)
self.update_all()
self.init_size(parent=parent)
self.revision.setFocus()
def update_all(self):
"""Set the branch name for the window title and label."""
self.update_title()
self.update_revisions()
def update_title(self, _txt=None):
branch = self.model.currentbranch
revision = self.revision.text()
if revision:
txt = (N_('Merge "%(revision)s" into "%(branch)s"') %
dict(revision=revision, branch=branch))
else:
txt = N_('Merge into "%s"') % branch
self.button_merge.setEnabled(bool(revision))
self.title_label.setText(txt)
self.setWindowTitle(txt)
def toggle_squash(self):
"""Toggles the commit checkbox based on the squash checkbox."""
if get(self.checkbox_squash):
self.checkbox_commit_state = self.checkbox_commit.checkState()
self.checkbox_commit.setCheckState(Qt.Unchecked)
self.checkbox_commit.setDisabled(True)
self.checkbox_noff_state = self.checkbox_noff.checkState()
self.checkbox_noff.setCheckState(Qt.Unchecked)
self.checkbox_noff.setDisabled(True)
else:
self.checkbox_noff.setDisabled(False)
oldstateff = self.checkbox_noff_state
self.checkbox_noff.setCheckState(oldstateff)
self.checkbox_commit.setDisabled(False)
oldstate = self.checkbox_commit_state
self.checkbox_commit.setCheckState(oldstate)
def update_revisions(self):
"""Update the revision list whenever a radio button is clicked"""
self.revisions.clear()
self.revisions.addItems(self.current_revisions())
def revision_selected(self):
"""Update the revision field when a list item is selected"""
revlist = self.current_revisions()
widget = self.revisions
revision = qtutils.selected_item(widget, revlist)
if revision is not None:
self.revision.setText(revision)
def current_revisions(self):
"""Retrieve candidate items to merge"""
if get(self.radio_local):
return self.model.local_branches
elif get(self.radio_remote):
return self.model.remote_branches
elif get(self.radio_tag):
return self.model.tags
return []
def viz_revision(self):
"""Launch a gitk-like viewer on the selection revision"""
revision = self.revision.text()
if not revision:
Interaction.information(
N_('No Revision Specified'),
N_('You must specify a revision to view.'))
return
cmds.do(cmds.VisualizeRevision, self.context, revision)
def merge_revision(self):
"""Merge the selected revision/branch"""
revision = self.revision.text()
if not revision:
Interaction.information(
N_('No Revision Specified'),
N_('You must specify a revision to merge.'))
return
noff = get(self.checkbox_noff)
no_commit = not get(self.checkbox_commit)
squash = get(self.checkbox_squash)
sign = get(self.checkbox_sign)
context = self.context
cmds.do(cmds.Merge, context, revision, no_commit, squash, noff, sign)
self.accept()
def export_state(self):
"""Export persistent settings"""
state = super(Merge, self).export_state()
state['no-ff'] = get(self.checkbox_noff)
state['sign'] = get(self.checkbox_sign)
state['commit'] = get(self.checkbox_commit)
return state
def apply_state(self, state):
"""Apply persistent settings"""
result = super(Merge, self).apply_state(state)
self.checkbox_noff.setChecked(state.get('no-ff', False))
self.checkbox_sign.setChecked(state.get('sign', False))
self.checkbox_commit.setChecked(state.get('commit', True))
return result
| gpl-2.0 | 5,567,192,275,756,753,000 | 39.518018 | 80 | 0.597332 | false |
tyiannak/pySLRF | slrf.py | 1 | 4205 | import numpy
import scipy.signal
import scipy.interpolate
from matplotlib import pyplot as plt
from breezylidar import URG04LX
def flags2segs(Flags, window):
'''
ARGUMENTS:
- Flags: a sequence of class flags (per time window)
- window: window duration (in seconds)
RETURNS:
- segs: a sequence of segment's limits: segs[i,0] is start and segs[i,1] are start and end point of segment i
- classes: a sequence of class flags: class[i] is the class ID of the i-th segment
'''
preFlag = 0
curFlag = 0
numOfSegments = 0
curVal = Flags[curFlag]
segsList = []
classes = []
while (curFlag<len(Flags)-1):
stop = 0
preFlag = curFlag
preVal = curVal
while (stop==0):
curFlag = curFlag + 1
tempVal = Flags[curFlag]
if ((tempVal != curVal) | (curFlag==len(Flags)-1)): # stop
numOfSegments = numOfSegments + 1
stop = 1
curSegment = curVal
curVal = Flags[curFlag]
segsList.append((curFlag*window))
classes.append(preVal)
segs = numpy.zeros ((len(segsList),2))
for i in range(len(segsList)):
if i>0:
segs[i, 0] = segsList[i-1]
segs[i, 1] = segsList[i]
return (segs, classes)
def preProcess(angleRange, Scan):
Scan = numpy.array(Scan)
Scan = scipy.signal.medfilt(Scan, 3)
Scan = scipy.signal.medfilt(Scan, 5)
#f = scipy.interpolate.interp1d(angleRange, Scan, kind='cubic')
I = Scan==0
segs, classes = flags2segs(I, 1)
Scan2 = numpy.copy(Scan)
for i in range(1, segs.shape[0]-1):
if classes[i]:
a1 = angleRange[segs[i-1,0]:segs[i-1,1]]
a2 = angleRange[segs[i+1,0]:segs[i+1,1]]
a1 = a1[-1::]
a2 = a2[0:1]
A = numpy.concatenate((a1, a2))
b1 = Scan[segs[i-1,0]:segs[i-1,1]]
b2 = Scan[segs[i+1,0]:segs[i+1,1]]
b1 = b1[-1::]
b2 = b2[0:1]
B = numpy.concatenate((b1, b2))
#f = scipy.interpolate.interp1d(A, B, kind='cubic')
f = scipy.interpolate.interp1d(A, B)
Scan2[segs[i,0]: segs[i,1]] = f(angleRange[segs[i,0]: segs[i,1]])
Scan2[Scan2<0] = 0
Scan2 = scipy.signal.medfilt(Scan2, 3)
Scan2 = scipy.signal.medfilt(Scan2, 5)
return Scan, Scan2
laser = URG04LX('/dev/ttyACM0')
count = 0
angleRange = numpy.arange(-120, 120, 0.352)
print angleRange.shape
plt.figure(figsize=(6*3.13,4*3.13))
while True:
count += 1
Scan = laser.getScan()
Scan, Scan2 = preProcess(angleRange, Scan)
if count==1:
diffScan = numpy.zeros(Scan.shape)
diffScan2 = numpy.zeros(Scan2.shape)
else:
diffScan = numpy.abs(Scan - ScanPrev)
diffScan2 = numpy.abs(Scan2 - ScanPrev2)
diffScan = scipy.signal.medfilt(diffScan, 3)
diffScan = scipy.signal.medfilt(diffScan, 15)
diffScan2 = scipy.signal.medfilt(diffScan2, 3)
diffScan2 = scipy.signal.medfilt(diffScan2, 15)
X = numpy.cos(numpy.deg2rad(angleRange)) * Scan
Y = numpy.sin(numpy.deg2rad(angleRange)) * Scan
X2 = numpy.cos(numpy.deg2rad(angleRange)) * Scan2
Y2 = numpy.sin(numpy.deg2rad(angleRange)) * Scan2
plt.clf()
ax = plt.subplot(2,3,1)
plt.plot(angleRange, Scan)
plt.plot(angleRange, Scan2, 'r')
plt.title(count)
plt.ylim([-120, 120])
plt.ylim([0, 6000])
ax.set_ylim([0, 6000])
ax = plt.subplot(2,3,2, aspect='equal')
plt.plot(X, Y, '*')
ax.set_xlim([-3000, 3000])
ax.set_ylim([-3000, 3000])
ax = plt.subplot(2,3,3, aspect='equal')
plt.plot(X2, Y2, '*')
ax.set_xlim([-3000, 3000])
ax.set_ylim([-3000, 3000])
ax = plt.subplot(2,3,4)
plt.plot(angleRange, diffScan)
plt.plot(angleRange, diffScan2, 'r')
plt.title(count)
plt.ylim([-120, 120])
plt.ylim([0, 6000])
ax.set_ylim([0, 6000])
plt.draw()
plt.show(block=False)
ScanPrev = Scan
ScanPrev2 = Scan2
| apache-2.0 | 1,125,670,961,743,430,300 | 28.612676 | 117 | 0.557669 | false |
cbetheridge/simpleclassroom | views/views.py | 1 | 4252 | """
Contains all of the page view handlers for the app.
These handlers should be GET handlers indended for the serving of HTTP. AJAX or
otherwise action-based handlers should be stored in another module.
"""
import json
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import RequestContext
from django.template.loader import get_template
from django.utils import html
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_GET
from models.classroom import Classroom
from models.classroom import Student
def _make_class_anchor_html(class_data):
"""Creates an escaped html anchor tag for a link.
Args:
class_data: A dick with keys [class_id, class_name]
Returns:
A string html anchor tag, rendered safe from html injection attacks.
"""
classroom_url = reverse(display_students)
anchor_text = '<a href={}?Id={}>{}</a>'.format(
classroom_url, class_data['class_id'],
html.escape(class_data['class_name']))
return anchor_text
@ensure_csrf_cookie
@require_GET
def display_classrooms(request):
"""Displays a list of Classrooms.
Args:
request: django.http.HttpRequest object.
Returns:
An django.http.HttpResponse object.
"""
template = get_template('classrooms.html')
db_classes = Classroom.objects.all()
classes_repr = [c.get_jsonable_repr() for c in db_classes]
db_students = Student.objects.all()
students_list = []
for student in db_students:
students_list.append({'id': student.pk, 'name': student.full_name})
context = RequestContext(request, {
'stored_classes': json.dumps(classes_repr),
'stored_students': json.dumps(students_list)})
return HttpResponse(template.render(context))
@ensure_csrf_cookie
@require_GET
def display_students(request):
"""Displays a list of Students.
GET data params:
id: (Optional) A classroom ID number. If 'id' is not set or if 'id' is set
to the string 'all', an HTTPResponse of all students will be returned.
Args:
request: django.http.HttpRequest object
Returns:
An django.http.HttpResponse object.
"""
template = get_template('student_list.html')
params = request.GET if request.GET else None
s_query = Student.objects.all()
if (not params or
'Id' not in params or
str(params['Id']).lower() == 'all'):
class_name = 'All Classes'
class_desc = 'All Classes'
else:
# TODO([email protected]): Better handling of one vs many ID params.
class_ids = [params['Id']]
s_query = s_query.filter(membership__classroom__pk__in=class_ids)
class_objs = Classroom.objects.filter(pk__in=class_ids)
if len(class_ids) > 1:
class_a_names = []
for classroom in class_objs:
class_data = classroom.get_jsonable_repr()
class_a_names.append(_make_class_anchor_html(class_data))
class_name = ', '.join(class_a_names)
class_desc = 'Multiple'
else:
class_name = class_objs[0].name
class_desc = class_objs[0].desc
s_query = s_query.order_by('pk')
s_query = s_query.distinct()
students = [s.get_jsonable_repr() for s in list(s_query)]
context = RequestContext(request, {
'class_name': class_name, 'class_desc': class_desc,
'class_student_data': json.dumps(students)})
return HttpResponse(template.render(context))
@ensure_csrf_cookie
@require_GET
def display_student_details(request):
"""Displays detailed information about a student.
GET data params:
Id: A student ID number.
Args:
request: django.http.HttpRequest object
"""
template = get_template('student_details.html')
params = request.GET if request.GET else None
# TODO([email protected]): standardize IDs to lower case.
if (not params or
'Id' not in params or
not params['Id']):
return redirect('student list')
student_obj = Student.objects.get(pk=params['Id'])
student_data = student_obj.get_jsonable_repr()
classes = [_make_class_anchor_html(c) for c in student_obj.class_list]
student_data['class_list'] = classes
context = RequestContext(request, student_data)
return HttpResponse(template.render(context))
| mit | -7,785,976,437,739,663,000 | 28.123288 | 79 | 0.699671 | false |
ARM-software/armnn | python/pyarmnn/examples/example_utils.py | 1 | 6828 | # Copyright 2020 NXP
# SPDX-License-Identifier: MIT
from urllib.parse import urlparse
import os
from PIL import Image
import pyarmnn as ann
import numpy as np
import requests
import argparse
import warnings
def parse_command_line(desc: str = ""):
"""Adds arguments to the script.
Args:
desc(str): Script description.
Returns:
Namespace: Arguments to the script command.
"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-v", "--verbose", help="Increase output verbosity",
action="store_true")
return parser.parse_args()
def __create_network(model_file: str, backends: list, parser=None):
"""Creates a network based on a file and parser type.
Args:
model_file (str): Path of the model file.
backends (list): List of backends to use when running inference.
parser_type: Parser instance. (pyarmnn.ITFliteParser/pyarmnn.IOnnxParser...)
Returns:
int: Network ID.
int: Graph ID.
IParser: TF Lite parser instance.
IRuntime: Runtime object instance.
"""
args = parse_command_line()
options = ann.CreationOptions()
runtime = ann.IRuntime(options)
if parser is None:
# try to determine what parser to create based on model extension
_, ext = os.path.splitext(model_file)
if ext == ".onnx":
parser = ann.IOnnxParser()
elif ext == ".tflite":
parser = ann.ITfLiteParser()
assert (parser is not None)
network = parser.CreateNetworkFromBinaryFile(model_file)
preferred_backends = []
for b in backends:
preferred_backends.append(ann.BackendId(b))
opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(),
ann.OptimizerOptions())
if args.verbose:
for m in messages:
warnings.warn(m)
net_id, w = runtime.LoadNetwork(opt_network)
if args.verbose and w:
warnings.warn(w)
return net_id, parser, runtime
def create_tflite_network(model_file: str, backends: list = ['CpuAcc', 'CpuRef']):
"""Creates a network from a tflite model file.
Args:
model_file (str): Path of the model file.
backends (list): List of backends to use when running inference.
Returns:
int: Network ID.
int: Graph ID.
ITFliteParser: TF Lite parser instance.
IRuntime: Runtime object instance.
"""
net_id, parser, runtime = __create_network(model_file, backends, ann.ITfLiteParser())
graph_id = parser.GetSubgraphCount() - 1
return net_id, graph_id, parser, runtime
def create_onnx_network(model_file: str, backends: list = ['CpuAcc', 'CpuRef']):
"""Creates a network from an onnx model file.
Args:
model_file (str): Path of the model file.
backends (list): List of backends to use when running inference.
Returns:
int: Network ID.
IOnnxParser: ONNX parser instance.
IRuntime: Runtime object instance.
"""
return __create_network(model_file, backends, ann.IOnnxParser())
def preprocess_default(img: Image, width: int, height: int, data_type, scale: float, mean: list,
stddev: list):
"""Default preprocessing image function.
Args:
img (PIL.Image): PIL.Image object instance.
width (int): Width to resize to.
height (int): Height to resize to.
data_type: Data Type to cast the image to.
scale (float): Scaling value.
mean (list): RGB mean offset.
stddev (list): RGB standard deviation.
Returns:
np.array: Resized and preprocessed image.
"""
img = img.resize((width, height), Image.BILINEAR)
img = img.convert('RGB')
img = np.array(img)
img = np.reshape(img, (-1, 3)) # reshape to [RGB][RGB]...
img = ((img / scale) - mean) / stddev
img = img.flatten().astype(data_type)
return img
def load_images(image_files: list, input_width: int, input_height: int, data_type=np.uint8,
scale: float = 1., mean: list = [0., 0., 0.], stddev: list = [1., 1., 1.],
preprocess_fn=preprocess_default):
"""Loads images, resizes and performs any additional preprocessing to run inference.
Args:
img (list): List of PIL.Image object instances.
input_width (int): Width to resize to.
input_height (int): Height to resize to.
data_type: Data Type to cast the image to.
scale (float): Scaling value.
mean (list): RGB mean offset.
stddev (list): RGB standard deviation.
preprocess_fn: Preprocessing function.
Returns:
np.array: Resized and preprocessed images.
"""
images = []
for i in image_files:
img = Image.open(i)
img = preprocess_fn(img, input_width, input_height, data_type, scale, mean, stddev)
images.append(img)
return images
def load_labels(label_file: str):
"""Loads a labels file containing a label per line.
Args:
label_file (str): Labels file path.
Returns:
list: List of labels read from a file.
"""
with open(label_file, 'r') as f:
labels = [l.rstrip() for l in f]
return labels
return None
def print_top_n(N: int, results: list, labels: list, prob: list):
"""Prints TOP-N results
Args:
N (int): Result count to print.
results (list): Top prediction indices.
labels (list): A list of labels for every class.
prob (list): A list of probabilities for every class.
Returns:
None
"""
assert (len(results) >= 1 and len(results) == len(labels) == len(prob))
for i in range(min(len(results), N)):
print("class={0} ; value={1}".format(labels[results[i]], prob[results[i]]))
def download_file(url: str, force: bool = False, filename: str = None, dest: str = "tmp"):
"""Downloads a file.
Args:
url (str): File url.
force (bool): Forces to download the file even if it exists.
filename (str): Renames the file when set.
Returns:
str: Path to the downloaded file.
"""
if filename is None: # extract filename from url when None
filename = urlparse(url)
filename = os.path.basename(filename.path)
if str is not None:
if not os.path.exists(dest):
os.makedirs(dest)
filename = os.path.join(dest, filename)
print("Downloading '{0}' from '{1}' ...".format(filename, url))
if not os.path.exists(filename) or force is True:
r = requests.get(url)
with open(filename, 'wb') as f:
f.write(r.content)
print("Finished.")
else:
print("File already exists.")
return filename
| mit | -5,464,050,138,903,942,000 | 29.895928 | 96 | 0.616139 | false |
symbooglix/boogie-runner | prepare-smoke-tests.py | 1 | 6375 | #!/usr/bin/env python
# vim: set sw=2 ts=2 softtabstop=2 expandtab:
"""
Script to run a Symbooglix's AxiomAndEntryRequiresCheckTransformPass
pass on a set of boogie programs (from a program List) in preparation
for running a smoke test to check that all the assumptions leading to
an entry point are satisfiable.
"""
import argparse
import logging
import multiprocessing
import os
import pprint
from BoogieRunner import ProgramListLoader
from BoogieRunner import EntryPointFinder
import traceback
import yaml
import signal
import sys
import subprocess
_logger = None
futureToRunner = None
def handleInterrupt(signum, frame):
logging.info('Received signal {}'.format(signum))
if futureToRunner != None:
cancel(futureToRunner)
def entryPoint(args):
global _logger, futureToRunner
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-l","--log-level",type=str, default="info", dest="log_level", choices=['debug','info','warning','error'])
parser.add_argument("--rprefix", default=os.getcwd(), help="Prefix for relative paths for program_list")
parser.add_argument("-j", "--jobs", default=multiprocessing.cpu_count(), type=int, help="jobs to run in parallel")
parser.add_argument("input_program_list", help="File containing list of Boogie programs")
parser.add_argument("output_dir", help="Directory to create working transformed programs in")
parser.add_argument("output_program_list")
parser.add_argument("--spr-path", dest='spr_path', required=True, help="Path to Symbooglix pass runner tool (spr)")
# Options to set the entry point
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--entry-point", dest='entry_point', default=None, help="Entry point name")
group.add_argument("--entry-point-from-bool-attribute", dest='entry_point_from_bool_attribute',
default=None, help="Get entry point from bool attribute on procedure e.g. {:entry_point}")
pargs = parser.parse_args()
if pargs.jobs < 1:
_logger.error('Jobs must be >= 1')
return 1
logLevel = getattr(logging, pargs.log_level.upper(),None)
if logLevel == logging.DEBUG:
logFormat = '%(levelname)s:%(threadName)s: %(filename)s:%(lineno)d %(funcName)s() : %(message)s'
else:
logFormat = '%(levelname)s:%(threadName)s: %(message)s'
logging.basicConfig(level=logLevel, format=logFormat)
_logger = logging.getLogger(__name__)
# Check paths that must exist
for pathToCheck in [ pargs.input_program_list, pargs.spr_path]:
if not os.path.exists(pathToCheck):
_logger.error('"{}" does not exist'.format(pathToCheck))
return 1
# Check paths that must not already exist
for pathToCheck in [ pargs.output_program_list, pargs.output_dir]:
if os.path.exists(pathToCheck):
_logger.error('Refusing to overwrite "{}"'.format(pathToCheck))
return 1
# Load list of programs
programList = None
try:
_logger.debug('Loading program_list from "{}"'.format(pargs.input_program_list))
programList = ProgramListLoader.load(pargs.input_program_list, pargs.rprefix)
except (ProgramListLoader.ProgramListLoaderException) as e:
_logger.error(e)
_logger.debug(traceback.format_exc())
return 1
# Compute list index to entry point name mapping
entryPoints = [ ]
_logger.info('Getting program entry points...')
for programPath in programList:
if pargs.entry_point != None:
entryPoints.append(pargs.entry_point)
else:
assert pargs.entry_point_from_bool_attribute != None
entryPointName = EntryPointFinder.findEntryPointWithBooleanAttribute(pargs.entry_point_from_bool_attribute, programPath)
assert entryPointName != None
entryPoints.append(entryPointName)
# Generate new programs
_logger.info('Generating new programs')
tasks = [ ]
os.mkdir(pargs.output_dir)
for index, (programPath, entryPoint) in enumerate(zip(programList, entryPoints)):
outputPath = os.path.join(pargs.output_dir, 'program-{}.bpl'.format(index))
tasks.append( ProgramGenTask(programPath, entryPoint, outputPath, pargs.spr_path) )
# Run
if pargs.jobs == 1:
for t in tasks:
t.run()
else:
signal.signal(signal.SIGINT, handleInterrupt)
signal.signal(signal.SIGTERM, handleInterrupt)
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor(max_workers=pargs.jobs) as executor:
futureToRunner = { executor.submit(t.run) : t for t in tasks }
for future in concurrent.futures.as_completed(futureToRunner):
r = futureToRunner[future]
_logger.info('{} runner finished'.format(r.outputPath))
if future.exception():
e = future.exception()
cancel(futureToRunner)
_logger.error(e)
return 1
if r.exitCode != 0:
_logger.error('Tool failed')
cancel(futureToRunner)
return 1
# Write program list
with open(pargs.output_program_list, 'w') as f:
for t in tasks:
if t.exitCode != None:
f.writelines('# Generated from {}\n'.format(t.programPath))
f.writelines('{}\n\n'.format(t.outputPath))
else:
f.writelines('# Skipping program "{}" due to failure\n'.format(t.programPath))
_logger.info('Finished')
return 0
def cancel(futureToRunner):
_logger.warning('Cancelling futures')
for future in futureToRunner.keys():
future.cancel()
class ProgramGenTask:
def __init__(self, programPath, entryPoint, outputPath, sprPath):
assert isinstance(programPath, str)
assert isinstance(entryPoint, str)
assert isinstance(outputPath, str)
assert isinstance(sprPath, str)
assert os.path.exists(programPath)
self.programPath = programPath
self.entryPoint = entryPoint
self.outputPath = outputPath
self.sprPath = sprPath
self.exitCode = None
def run(self):
cmdLine = [ self.sprPath,
'-e', self.entryPoint,
'-p', 'Transform.AxiomAndEntryRequiresCheckTransformPass',
'-o', self.outputPath,
self.programPath
]
_logger.debug('Running:\n{}'.format(pprint.pformat(cmdLine)))
exitCode = subprocess.call(cmdLine)
_logger.debug('Finished')
self.exitCode = exitCode
if __name__ == '__main__':
try:
sys.exit(entryPoint(sys.argv[1:]))
except KeyboardInterrupt:
sys.exit(2)
| bsd-3-clause | -8,607,544,325,035,027,000 | 34.614525 | 128 | 0.694431 | false |
datawire/mdk | functionaltests/webservers/djangoserver/settings.py | 1 | 3262 | """
Django settings for djangoserver project.
Generated by 'django-admin startproject' using Django 1.9.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vo%5*42w1e@@eh%7)oss9t#fh9q8@a3(h2#$qg+63@0w#5wy-s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'djangoserver.apps.MyMDKAppConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'mdk.django.MDKSessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangoserver.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangoserver.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| apache-2.0 | 610,415,732,512,119,600 | 25.737705 | 91 | 0.692213 | false |
erikdejonge/newsrivr | daemons/hn.py | 1 | 17357 | """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import open
from builtins import int
from future import standard_library
standard_library.install_aliases()
from builtins import chr
from builtins import str
from builtins import range
from past.utils import old_div
from xml.sax.saxutils import escape
import urllib.request, urllib.parse, urllib.error, re, os, urllib.parse
import html.parser, feedparser
from BeautifulSoup import BeautifulSoup, Comment
from pprint import pprint
import codecs
import sys
import html.entities
streamWriter = codecs.lookup("utf-8")[-1]
sys.stdout = streamWriter(sys.stdout)
HN_RSS_FEED = "http://news.ycombinator.com/rss"
negative_str = "([A-Z,a-z,0-9,-,_ ]*comments[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*comment[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*bcomments[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*meta[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*footer[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*footnote[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*foot[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*bottom[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*klasbox[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*side[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*inner[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*sidebar[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*hide[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*component[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*reactie[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*ad[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*ads[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*transcript[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*react[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*transcript[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*transcriptText[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*error[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*related[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*also[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*share[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*sideblock[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*policy[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*related[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*social[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*reflist[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*postmetadata[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*references[A-Z,a-z,0-9,-,_ ]*)|"
negative_str += "([A-Z,a-z,0-9,-,_ ]*promo[A-Z,a-z,0-9,-,_ ]*)"
NEGATIVE = re.compile(negative_str)
super_negative_str = "([A-Z,a-z,0-9,-,_ ]*comment[A-Z,a-z,0-9,-,_ ]*)|"
super_negative_str += "([A-Z,a-z,0-9,-,_ ]*voting[A-Z,a-z,0-9,-,_ ]*)|"
super_negative_str += "([A-Z,a-z,0-9,-,_ ]*reactie[A-Z,a-z,0-9,-,_ ]*)|"
super_negative_str += "([A-Z,a-z,0-9,-,_ ]*reaction[A-Z,a-z,0-9,-,_ ]*)|"
super_negative_str += "([A-Z,a-z,0-9,-,_ ]*idgedragregelsusercontent[A-Z,a-z,0-9,-,_ ]*)|"
super_negative_str += "([A-Z,a-z,0-9,-,_ ]*vote[A-Z,a-z,0-9,-,_ ]*)"
SUPERNEGATIVE = re.compile(super_negative_str)
positive_str = "([A-Z,a-z,0-9,-,_ ]*summary[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*post[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*hentry[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*entry[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*content[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*text[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*tekst[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*venue[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*venueInfo[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*venueDetails[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*body[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*bodycontent[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*content permalink[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*wrapper[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*article[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*articleblock[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*text[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*tekst[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*lead[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*leadarticle[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*story[A-Z,a-z,0-9,-,_ ]*)|"
positive_str += "([A-Z,a-z,0-9,-,_ ]*permalink[A-Z,a-z,0-9,-,_ ]*)"
POSITIVE = re.compile(positive_str)
PUNCTUATION = re.compile("""[!"#$%&\"()*+,-./:;<=>?@[\\]^_`{|}~]""")
MAXLINKS = 50
def latin1_to_ascii (unicrap):
xlate={0xc0:'A', 0xc1:'A', 0xc2:'A', 0xc3:'A', 0xc4:'A', 0xc5:'A',
0xc6:'Ae', 0xc7:'C',
0xc8:'E', 0xc9:'E', 0xca:'E', 0xcb:'E',
0xcc:'I', 0xcd:'I', 0xce:'I', 0xcf:'I',
0xd0:'Th', 0xd1:'N',
0xd2:'O', 0xd3:'O', 0xd4:'O', 0xd5:'O', 0xd6:'O', 0xd8:'O',
0xd9:'U', 0xda:'U', 0xdb:'U', 0xdc:'U',
0xdd:'Y', 0xde:'th', 0xdf:'ss',
0xe0:'a', 0xe1:'a', 0xe2:'a', 0xe3:'a', 0xe4:'a', 0xe5:'a',
0xe6:'ae', 0xe7:'c',
0xe8:'e', 0xe9:'e', 0xea:'e', 0xeb:'e',
0xec:'i', 0xed:'i', 0xee:'i', 0xef:'i',
0xf0:'th', 0xf1:'n',
0xf2:'o', 0xf3:'o', 0xf4:'o', 0xf5:'o', 0xf6:'o', 0xf8:'o',
0xf9:'u', 0xfa:'u', 0xfb:'u', 0xfc:'u',
0xfd:'y', 0xfe:'th', 0xff:'y',
0xa1:'!', 0xa2:'{cent}', 0xa3:'{pound}', 0xa4:'{currency}',
0xa5:'{yen}', 0xa6:'|', 0xa7:'{section}', 0xa8:'{umlaut}',
0xa9:'{C}', 0xaa:'{^a}', 0xab:'<<', 0xac:'{not}',
0xad:'-', 0xae:'{R}', 0xaf:'_', 0xb0:'{degrees}',
0xb1:'{+/-}', 0xb2:'{^2}', 0xb3:'{^3}', 0xb4:"'",
0xb5:'{micro}', 0xb6:'{paragraph}', 0xb7:'*', 0xb8:'{cedilla}',
0xb9:'{^1}', 0xba:'{^o}', 0xbb:'>>',
0xbc:'{1/4}', 0xbd:'{1/2}', 0xbe:'{3/4}', 0xbf:'?',
0xd7:'*', 0xf7:'/'
}
r = ''
for i in unicrap:
if ord(i) in xlate:
r += xlate[ord(i)]
elif ord(i) >= 0x80:
pass
else:
r += str(i)
return r
def toUTF8(data):
try:
data = data.encode("utf-8")
except:
data = latin1_to_ascii(data)
return data
def text2simpleHtml(data):
data = data.replace("<h1"," <b").replace("</h1>","</b><br><br>")
data = data.replace("<h2"," <b").replace("</h2>","</b><br>")
data = data.replace("<h3>","").replace("</h3>","<br>")
VALID_TAGS = ["strong", "b", "i", "table", "th", "tr", "td", "a", "code", "em", "p", "ul", "li", "br"]
soup = BeautifulSoup(data)
for tag in soup.findAll(True):
if tag.name not in VALID_TAGS:
tag.hidden = True
return soup.renderContents()
def _text(node):
return " ".join(node.findAll(text=True))
def get_link_density(elem):
link_length = len("".join([i.text or "" for i in elem.findAll("a")]))
text_length = len(_text(elem))
return old_div(float(link_length), max(text_length, 1))
def removeFrontBreaks(s):
try:
soup = BeautifulSoup(s)
whitespace = True
for tag in soup.findAll(True):
tagname = str(tag.name)
if tagname!="br":
whitespace=False
if tagname!="p":
whitespace=False
if tagname=="br" or tagname=="p" and whitespace:
tag.extract()
return str(soup).strip()
except Exception as e:
clog(e)
return s
def convertentity(m):
"""Convert a HTML entity into normal string (ISO-8859-1)"""
if m.group(1)=='#':
try:
return chr(int(m.group(2)))
except ValueError:
return '&#%s;' % m.group(2)
try:
return html.entities.entitydefs[m.group(2)]
except KeyError:
return '&%s;' % m.group(2)
def unquotehtml(s):
"""Convert a HTML quoted string into normal string (ISO-8859-1).
Works with &#XX; and with > etc."""
return re.sub(r'&(#?)(.+?);',convertentity,s)
def getNumLinks(s):
try:
cnt = 0
soup = BeautifulSoup(s)
for a in soup.findAll("a"):
if "href" in a:
#print a
cnt += 1
return cnt
except:
return 0
def removeEmptyParas(html):
foundempty = False
soup = BeautifulSoup(html)
for p in soup.findAll("p"):
if "id" in p:
if "error_" in p["id"]:
p.extract()
if 0==len(p.text.strip().replace("\n", "")):
if foundempty:
p.extract()
foundempty = True
else:
foundempty = False
return soup.renderContents()
def removeEmptyLis(html):
soup = BeautifulSoup(html)
for li in soup.findAll("li"):
for a in li.findAll("a"):
if len(a.contents)>0:
if len(a.contents[0])<5:
a.extract()
if len(li.renderContents().strip())==0:
li.extract()
else:
for x in li.findAll():
if len(x.renderContents().strip())==0:
li.extract()
for ul in soup.findAll("ul"):
if 0==len(ul.findAll("li")):
ul.extract()
return soup.renderContents()
def removeExtraBreaks(s):
try:
l = []
brcnt = 0
soup = BeautifulSoup(s)
for tag in soup.findAll():
if tag.name=="p":
if len(tag.text.strip().replace("\n", ""))<1:
tag.extract()
brcnt += 1
if tag.name=="br":
brcnt += 1
if brcnt>1:
tag.extract()
else:
brcnt = 0
return str(soup)
except Exception as e:
clog(e)
return s
def grabContent(link, html):
if ">" in html:
html = unquotehtml(html)
html = "<!DOCTYPE html><html><head><meta charset=\"utf-8\"></head><body>"+html+"</body></html>"
#open("usedforscoring.html", "w").write(html)
#exit(1)
replaceBrs = re.compile("<br */? *>[ \r\n]*<br */? *>")
html = re.sub(replaceBrs, "</p><p>", html)
try:
soup = BeautifulSoup(html)
except html.parser.HTMLParseError as e:
try:
soup = BeautifulSoup(text2simpleHtml(html))
except html.parser.HTMLParseError:
return ""
#print str(soup)
# REMOVE SCRIPTS
for s in soup.findAll("div"):
if get_link_density(s)>0.5 and len(s.renderContents())>1000:
s.extract()
if "id" in s:
if SUPERNEGATIVE.match(str(s["id"]).lower()):
s.extract()
if "class" in s:
if SUPERNEGATIVE.match(str(s["class"]).lower()):
s.extract()
for s in soup.findAll("script"):
s.extract()
for a in soup.findAll("a"):
if "href" in a:
if "javascript:" in a["href"]:
a.extract()
if "onclick" in a:
if "return " in a["onclick"]:
a.extract()
allParagraphs = soup.findAll("p")
topParent = None
parents = []
for paragraph in allParagraphs:
parent = paragraph.parent
if (parent not in parents):
parents.append(parent)
parent.score = 0
if ("class" in parent):
if (NEGATIVE.match(parent["class"].lower())):
#print parent["class"]
if len(parent.findAll('a'))>MAXLINKS:
parent.score -= 500
parent.score -= 50
if (POSITIVE.match(parent["class"].lower())):
if len(parent.findAll('a'))<MAXLINKS:
parent.score += 25
else:
parent.score -= 150
parent.score += 50
if ("id" in parent):
if (NEGATIVE.match(parent["id"].lower())):
#print parent["id"]
if len(parent.findAll('a'))>MAXLINKS:
parent.score -= 500
parent.score -= 50
if (POSITIVE.match(parent["id"].lower())):
if len(parent.findAll('a'))<MAXLINKS:
parent.score += 25
else:
parent.score -= 150
parent.score += 50
if (parent.score == None):
parent.score = 0
innerText = paragraph.renderContents() #"".join(paragraph.findAll(text=True))
if (len(innerText) > 10):
parent.score += 1
if (len(innerText) > 300):
parent.score += 2
parent.score += innerText.count(",")*3
parent.score += innerText.count(".")*3
for parent in parents:
#print parent.score
#print str(parent )
#print "-------------"
if ((not topParent) or (parent.score > topParent.score)):
topParent = parent
if (not topParent):
return ""
# REMOVE LINK"D STYLES
styleLinks = soup.findAll("link", attrs={"type" : "text/css"})
for s in styleLinks:
s.extract()
# REMOVE ON PAGE STYLES
for s in soup.findAll("style"):
s.extract()
# CLEAN STYLES FROM ELEMENTS IN TOP PARENT
for ele in topParent.findAll(True):
del(ele["style"])
del(ele["class"])
#print str(ele)
#print "-----"
killDivs(topParent)
clean(topParent, "form")
clean(topParent, "object")
clean(topParent, "iframe")
fixLinks(topParent, link)
for s in topParent.findAll("ul"):
if get_link_density(s)>0.3:
s.extract()
lis = topParent.findAll("li")
if len(lis)>50:
for li in lis:
li.extract()
for li in lis:
if len(li)>1:
contents = str(li.contents[1]).replace("\n", "").replace(" ", "").replace("<br>", "").replace("<br/>", "").replace("<br />", "").replace("<p></p>", "")
#print "c", contents
if len(contents)==0:
li.extract()
comments = topParent.findAll(text=lambda text:isinstance(text, Comment))
[comment.extract() for comment in comments]
html2 = topParent.renderContents()
html2 = removeFrontBreaks(html2)
html2 = html2.replace("\n", " ")
for i in range(0, 10):
html2 = html2.replace(" ", " ")
html2 = html2.replace("<div></div>", "")
html2 = html2.replace("<p>\xc2\xa0</p>", "")
html2 = html2.replace("<p></p>", "<br/>")
html2 = html2.replace("<p><br /></p>", "")
#html2 = html2.replace("\xc2\xa9", "")#
html2 = re.sub(r'© (\w+.\w+)', "", html2)
html2 = re.sub(r'© (\w+)', "", html2)
html2 = re.sub(r'\xc2\xa9 (\w+.\w+)', "", html2)
html2 = re.sub(r'\xc2\xa9 (\w+)', "", html2)
#if getNumLinks(html2)>25:
# html2 = "html ignored, more then 25 links"
#print get_link_density(BeautifulSoup(html2))
html2 = removeEmptyLis(html2)
html2 = toUTF8(text2simpleHtml(html2)).replace("a href", "a target='blank' href")
html2 = removeEmptyParas(html2)
html2 = removeExtraBreaks(html2)
html2 = html2.replace("</strong>", "</strong><br/>")
html2 = html2.replace("</b>", "</b><br/>")
#detect
return html2
def fixLinks(parent, link):
tags = parent.findAll(True)
for t in tags:
if ("href" in t):
t["href"] = urllib.parse.urljoin(link, t["href"])
if ("src" in t):
t["src"] = urllib.parse.urljoin(link, t["src"])
def clean(top, tag, minWords=10000):
tags = top.findAll(tag)
for t in tags:
if (t.renderContents().count(" ") < minWords):
t.extract()
def killDivs(parent):
divs = parent.findAll("div")
for d in divs:
p = len(d.findAll("p"))
img = len(d.findAll("img"))
li = len(d.findAll("li"))
a = len(d.findAll("a"))
embed = len(d.findAll("embed"))
pre = len(d.findAll("pre"))
#code = len(d.findAll("code"))
if (d.renderContents().count(",") < 10):
if (pre == 0):# and (code == 0)):
if ((img > p ) or (li > p) or (a > p) or (p == 0) or (embed > 0)):
d.extract()
def upgradeLink(link):
link = link.encode("utf-8")
if (not (link.startswith("http://news.ycombinator.com") or link.endswith(".pdf"))):
linkFile = "upgraded/" + re.sub(PUNCTUATION, "_", link)
if (os.path.exists(linkFile)):
return open(linkFile).read()
else:
content = ""
try:
html = urllib.request.urlopen(link).read()
content = grabContent(link, html)
filp = open(linkFile, "w")
filp.write(content)
filp.close()
except IOError:
pass
return content
else:
return ""
def upgradeFeed(feedUrl):
feedData = urllib.request.urlopen(feedUrl).read()
upgradedLinks = []
parsedFeed = feedparser.parse(feedData)
for entry in parsedFeed.entries:
upgradedLinks.append((entry, upgradeLink(entry.link)))
rss = """<rss version="2.0">
<channel>
<title>Hacker News</title>
<link>http://news.ycombinator.com/</link>
<description>Links for the intellectually curious, ranked by readers.</description>
"""
for entry, content in upgradedLinks:
rss += u"""
<item>
<title>%s</title>
<link>%s</link>
<comments>%s</comments>
<description>
<![CDATA[<a href="%s">Comments</a><br/>%s<br/><a href="%s">Comments</a>]]>
</description>
</item>
""" % (entry.title, escape(entry.link), escape(entry.comments), entry.comments, content.decode("utf-8"), entry.comments)
rss += """
</channel>
</rss>"""
return rss
def clog(s):
from time import gmtime, strftime
s= str(s)
print('\033[%93m'+strftime("%Y-%m-%d %H:%M:%S", gmtime())+": "+s+'\033[%0m')
if __name__ == "__main__":
c = open("usedforscoring.html", "r").read()
soup = BeautifulSoup(grabContent('x', c))
clog(soup.prettify())
| gpl-2.0 | -2,107,189,339,879,482,400 | 30.965009 | 159 | 0.567667 | false |
johnoleary/Farkel | game.py | 1 | 4955 | ####################
### Farkel Game ####
####################
import dice
import player
dice_list = [dice.Dice() for i in range(6)]
### Set up players ###
number_of_players = input("How many players? ")
player_list = []
for i in range(number_of_players):
player_list.append(player.Player(raw_input("What is player "+str(i+1)+"'s name?\n")))
print player_list[0].name
### Set up variables for game play ###
game_over = False
##########
## GAME ##
##########
def check_validity_of_selection(choosen_dice):
if len(choosen_dice) == 0:
return False
for i in range(len(choosen_dice)):
if choosen_dice[i] > 6 or choosen_dice[i] < 1:
return False
amounts_of_dice = [[0]*6,[0]*6] # First list is values of dice_list. Second is from choosen dice.
for i in range(len(dice_list)):
if not dice_list[i].set_aside:
amounts_of_dice[0][dice_list[i].current_side-1] += 1
for i in range(len(choosen_dice)):
amounts_of_dice[1][choosen_dice[i]-1] += 1
for i in range(6):
if amounts_of_dice[0][i] < amounts_of_dice[1][i]:
return False
return True;
## This needs to be seriously flushed out
def score_dice(choosen_dice):
running_score = 0
amounts_of_dice = [0]*6
## Stores amount of each side show.
for i in range(len(choosen_dice)):
amounts_of_dice[choosen_dice[i]-1] += 1
## Check for special patterns
groupsOfThree = 0;
groupsOfTwo = 0;
straight = 0;
for i in range(6):
if amounts_of_dice[i] == 3:
groupsOfThree += 1
if amounts_of_dice[i] == 2:
groupsOfTwo += 1
if amounts_of_dice[i] == 1:
straight += 1;
# Add this in later
if groupsOfThree == 2:
pass
elif groupsOfTwo == 3:
pass
elif straight == 6:
pass
for i in range(6):
if amounts_of_dice[i] == 6:
if i == 0:
running_score += 1000*2
else:
running_score += (i+1)*100*2
amounts_of_dice[i] = amounts_of_dice[i] - 6
if amounts_of_dice[i] >= 3:
if i == 0:
running_score += 1000
else:
running_score += (i+1)*100
amounts_of_dice[i] = amounts_of_dice[i] - 3
running_score += 100*amounts_of_dice[0]
running_score += 50*amounts_of_dice[4]
return running_score
def choose_dice(dice_list): # Sets aside any dice that are pulled out.
valid_entry = False
while not valid_entry:
tempList = raw_input("Which dice would you like to pull out? ")
dice_to_keep = map(int, tempList.split())
valid_entry = check_validity_of_selection(dice_to_keep)
if not valid_entry:
print "I'm sorry, that is not a valid entry. Try again."
# Going to need a way to check that the choosen dice can be legally choosen... #
for i in range(len(dice_to_keep)):
for j in range(len(dice_list)):
if dice_list[j].current_side == dice_to_keep[i] and not dice_list[j].set_aside:
dice_list[j].set_aside = True
break
return dice_to_keep
def run_turn(player, starting_score):
this_rounds_score = starting_score
print player_list[player].name+"'s turn."
print "\n=== Roll ==="
dice_aside = 0
for i in range(6):
if not dice_list[i].set_aside:
print dice_list[i].roll()
else:
dice_aside += 1
if dice_aside > 0:
print "--- Set Aside ---"
for i in range(6):
if dice_list[i].set_aside:
print dice_list[i].current_side
temp_score = score_dice(choose_dice(dice_list))
this_rounds_score += temp_score
if temp_score > 0:
dice_remaining = 0
for i in range(6):
if not dice_list[i].set_aside:
dice_remaining += 1
if dice_remaining == 0:
print "You must roll again for removing all dice. Good luck!"
for i in range(6):
dice_list[i].set_aside = False
run_turn(player, this_rounds_score)
else:
if 0 == input("Type 1 to continue rolling, type 0 to stop. "):
player_list[player].current_score += this_rounds_score
else:
run_turn(player, this_rounds_score)
else:
print "That's unfortunate."
current_player = 0
final_round = False
final_round_countdown = number_of_players # Used to allow each player to get one more chance to play after someone gets 5000
###### Main Loop ######
while not game_over:
## Loop for each turn
run_turn(current_player, 0)
print player_list[current_player].name+" has a total of "+str(player_list[current_player].current_score)+" points."
## Check if a player has enough points to win
if player_list[current_player].current_score >= 5000:
final_round = True
## Prepare everything for the next iteration
if final_round:
final_round_countdown -= 1
if final_round_countdown <= 0:
game_over = True
current_player += 1
current_player = current_player % number_of_players ## To cycle through the players
for i in range(6):
dice_list[i].set_aside = False
### Finish the Game ###
winning_player_idx = -1
top_score = 0
for i in range(number_of_players):
if player_list[i].current_score > top_score:
winning_player_idx = i
top_score = player_list[i].current_score
# Need to figure out how to handle a tie
print player_list[winning_player_idx].name+" wins with "+str(top_score)+" points!"
| mit | 8,157,945,392,773,263,000 | 24.152284 | 124 | 0.655298 | false |
chrisdickinson/multipart | setup.py | 1 | 1572 | from distutils.core import setup
import os
# Stolen from django-registration
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('multipart'):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
elif filenames:
prefix = dirpath[13:] # Strip "registration/" or "registration\"
for f in filenames:
data_files.append(os.path.join(prefix, f))
setup(
name='multipart',
version='0.1',
description='Two helper functions for creating multipart encoded bodies for httplib2',
author='Chris Dickinson',
author_email='[email protected]',
url='http://github.com/chrisdickinson/multipart/',
packages=packages,
package_data={'multipart': data_files},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
| bsd-3-clause | -832,142,934,570,517,000 | 32.446809 | 90 | 0.637405 | false |
xkmato/tracpro | tracpro/contacts/tests/test_views.py | 1 | 8868 | from __future__ import unicode_literals
import datetime
import json
import pytz
from temba_client.types import Contact as TembaContact, Run
from django.core.urlresolvers import reverse
from django.utils import timezone
from tracpro.polls.models import Response
from tracpro.test import factories
from tracpro.test.cases import TracProDataTest
from ..models import Contact
class ContactCRUDLTest(TracProDataTest):
def test_create(self):
url = reverse('contacts.contact_create')
# log in as an org administrator
self.login(self.admin)
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
# submit with no fields entered
response = self.url_post('unicef', url, dict())
self.assertEqual(response.status_code, 200)
form = response.context['form']
self.assertEqual(len(form.errors), 4, form.errors)
self.assertFormError(response, 'form', 'name', 'This field is required.')
self.assertFormError(response, 'form', 'urn', 'This field is required.')
self.assertFormError(response, 'form', 'region', 'This field is required.')
self.assertFormError(response, 'form', 'group', 'This field is required.')
# submit again with all fields
temba_contact = TembaContact()
temba_contact.uuid = "uuid"
self.mock_temba_client.create_contact.return_value = temba_contact
response = self.url_post('unicef', url, {
'name': "Mo Polls",
'urn_0': "tel",
'urn_1': "5678",
'region': self.region1.pk,
'group': self.group1.pk,
'language': 'eng',
})
self.assertEqual(response.status_code, 302)
self.assertEqual(self.mock_temba_client.create_contact.call_count, 1)
# check new contact and profile
contact = Contact.objects.get(urn='tel:5678')
self.assertEqual(contact.name, "Mo Polls")
self.assertEqual(contact.region, self.region1)
self.assertEqual(contact.group, self.group1)
self.assertEqual(contact.language, 'eng')
# log in as a user
self.login(self.user1)
# try to create contact in region we don't have access to
response = self.url_post('unicef', url, {
'name': "Mo Polls II",
'urn_0': "tel",
'urn_1': "5678",
'region': self.region3.pk,
'group': self.group1.pk,
})
self.assertFormError(response, 'form', 'region',
"Select a valid choice. That choice is not one "
"of the available choices.")
# try again but this time in a region we do have access to
response = self.url_post('unicef', url, {
'name': "Mo Polls II",
'urn_0': "tel",
'urn_1': "5678",
'region': self.region1.pk,
'group': self.group1.pk,
})
self.assertEqual(response.status_code, 302)
# test ajax querying for languages
response = self.url_get('unicef', '%s?initial=' % url)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), dict(results=[]))
response = self.url_get('unicef', '%s?initial=eng' % url)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content),
dict(results=[dict(id='eng', text='English')]))
response = self.url_get('unicef', '%s?search=' % url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(response.content)['results']), 10)
response = self.url_get('unicef', '%s?search=Kin' % url)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content),
dict(results=[dict(id='kin', text='Kinyarwanda')]))
def test_update(self):
# log in as a user
self.login(self.user1)
url = reverse('contacts.contact_update', args=[self.contact1.pk])
response = self.url_get('unicef', url)
self.assertEqual(response.status_code, 200)
response = self.url_post('unicef', url, {
'name': "Morris",
'urn_0': "tel",
'urn_1': "6789",
'region': self.region1.pk,
'group': self.group2.pk,
'language': 'kin',
})
self.assertEqual(response.status_code, 302)
# check updated contact and profile
contact = Contact.objects.get(pk=self.contact1.pk)
self.assertEqual(contact.name, "Morris")
self.assertEqual(contact.urn, 'tel:6789')
self.assertEqual(contact.region, self.region1)
self.assertEqual(contact.group, self.group2)
self.assertEqual(contact.language, 'kin')
# try to update contact in a region we don't have access to
response = self.url_get(
'unicef', reverse('contacts.contact_read', args=[self.contact5.pk]))
self.assertEqual(response.status_code, 404)
# try to update contact from other org
response = self.url_get(
'unicef', reverse('contacts.contact_read', args=[self.contact6.pk]))
self.assertEqual(response.status_code, 404)
def test_read(self):
# log in as a user
self.login(self.user1)
# view contact in a region we have access to
response = self.url_get(
'unicef', reverse('contacts.contact_read', args=[self.contact3.pk]))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Phone")
# try to view contact in a region we don't have access to
response = self.url_get(
'unicef', reverse('contacts.contact_read', args=[self.contact5.pk]))
self.assertEqual(response.status_code, 404)
# try to view contact from other org
response = self.url_get(
'unicef', reverse('contacts.contact_read', args=[self.contact6.pk]))
self.assertEqual(response.status_code, 404)
def test_list(self):
pollrun1 = factories.UniversalPollRun(
poll=self.poll1, conducted_on=datetime.datetime(2014, 12, 1, tzinfo=pytz.UTC))
Response.create_empty(
self.unicef, pollrun1,
Run.create(id=123, contact='C-001', created_on=timezone.now()))
self.login(self.admin)
response = self.url_get('unicef', reverse('contacts.contact_list'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 5)
# no poll pollruns shown in "All Regions" view
self.assertNotContains(response, "Farm Poll")
url = '{}?search=an'.format(reverse('contacts.contact_list'))
response = self.url_get('unicef', url)
self.assertEqual(len(response.context['object_list']), 2)
self.assertContains(response, "Ann")
self.assertContains(response, "Dan")
self.login(self.user1)
response = self.url_get('unicef', reverse('contacts.contact_list'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['object_list']), 3)
self.assertContains(response, "Farm Poll")
def test_delete(self):
# log in as an org administrator
self.login(self.admin)
# delete contact
response = self.url_post(
'unicef', reverse('contacts.contact_delete', args=[self.contact1.pk]))
self.assertRedirects(
response, 'http://unicef.testserver/contact/', fetch_redirect_response=False)
self.assertFalse(Contact.objects.get(pk=self.contact1.pk).is_active)
# try to delete contact from other org
response = self.url_post(
'unicef', reverse('contacts.contact_delete', args=[self.contact6.pk]))
self.assertEqual(response.status_code, 404)
self.assertTrue(Contact.objects.get(pk=self.contact6.pk).is_active)
# log in as user
self.login(self.user1)
# delete contact from region we have access to
response = self.url_post(
'unicef', reverse('contacts.contact_delete', args=[self.contact3.pk]))
self.assertRedirects(
response, 'http://unicef.testserver/contact/', fetch_redirect_response=False)
contact = Contact.objects.get(pk=self.contact3.pk)
self.assertFalse(contact.is_active)
self.assertEqual(contact.modified_by, self.user1)
# try to delete contact from region we don't have access to
response = self.url_post(
'unicef', reverse('contacts.contact_delete', args=[self.contact5.pk]))
self.assertEqual(response.status_code, 404)
self.assertTrue(Contact.objects.get(pk=self.contact5.pk).is_active)
| bsd-3-clause | -397,108,231,638,319,000 | 39.309091 | 90 | 0.61705 | false |
agepoly/mezzanine | mezzanine/twitter/models.py | 1 | 6766 | from __future__ import unicode_literals
from future.builtins import str
from datetime import datetime, timedelta
import re
from time import timezone
try:
from urllib.parse import quote
except ImportError:
# Python 2
from urllib import quote
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import urlize
from django.utils.timezone import get_default_timezone, make_aware
from django.utils.translation import ugettext_lazy as _
from requests_oauthlib import OAuth1
import requests
from mezzanine.conf import settings
from mezzanine.twitter import QUERY_TYPE_CHOICES, QUERY_TYPE_USER, \
QUERY_TYPE_LIST, QUERY_TYPE_SEARCH
from mezzanine.twitter import get_auth_settings
from mezzanine.twitter.managers import TweetManager
re_usernames = re.compile("@([0-9a-zA-Z+_]+)", re.IGNORECASE)
re_hashtags = re.compile("#([0-9a-zA-Z+_]+)", re.IGNORECASE)
replace_hashtags = "<a href=\"http://twitter.com/search?q=%23\\1\">#\\1</a>"
replace_usernames = "<a href=\"http://twitter.com/\\1\">@\\1</a>"
class TwitterQueryException(Exception):
pass
@python_2_unicode_compatible
class Query(models.Model):
type = models.CharField(_("Type"), choices=QUERY_TYPE_CHOICES,
max_length=10)
value = models.CharField(_("Value"), max_length=140)
interested = models.BooleanField("Interested", default=True)
class Meta:
verbose_name = _("Twitter query")
verbose_name_plural = _("Twitter queries")
ordering = ("-id",)
def __str__(self):
return "%s: %s" % (self.get_type_display(), self.value)
def run(self):
"""
Request new tweets from the Twitter API.
"""
try:
value = quote(self.value)
except KeyError:
value = self.value
urls = {
QUERY_TYPE_USER: ("https://api.twitter.com/1.1/statuses/"
"user_timeline.json?screen_name=%s"
"&include_rts=true" % value.lstrip("@")),
QUERY_TYPE_LIST: ("https://api.twitter.com/1.1/lists/statuses.json"
"?list_id=%s&include_rts=true" % value),
QUERY_TYPE_SEARCH: "https://api.twitter.com/1.1/search/tweets.json"
"?q=%s" % value,
}
try:
url = urls[self.type]
except KeyError:
raise TwitterQueryException("Invalid query type: %s" % self.type)
settings.use_editable()
auth_settings = get_auth_settings()
if not auth_settings:
from mezzanine.conf import registry
if self.value == registry["TWITTER_DEFAULT_QUERY"]["default"]:
# These are some read-only keys and secrets we use
# for the default query (eg nothing has been configured)
auth_settings = (
"KxZTRD3OBft4PP0iQW0aNQ",
"sXpQRSDUVJ2AVPZTfh6MrJjHfOGcdK4wRb1WTGQ",
"1368725588-ldWCsd54AJpG2xcB5nyTHyCeIC3RJcNVUAkB1OI",
"r9u7qS18t8ad4Hu9XVqmCGxlIpzoCN3e1vx6LOSVgyw3R",
)
else:
raise TwitterQueryException("Twitter OAuth settings missing")
try:
tweets = requests.get(url, auth=OAuth1(*auth_settings)).json()
except Exception as e:
raise TwitterQueryException("Error retrieving: %s" % e)
try:
raise TwitterQueryException(tweets["errors"][0]["message"])
except (IndexError, KeyError, TypeError):
pass
if self.type == "search":
tweets = tweets["statuses"]
for tweet_json in tweets:
remote_id = str(tweet_json["id"])
tweet, created = self.tweets.get_or_create(remote_id=remote_id)
if not created:
continue
if "retweeted_status" in tweet_json:
user = tweet_json['user']
tweet.retweeter_user_name = user["screen_name"]
tweet.retweeter_full_name = user["name"]
tweet.retweeter_profile_image_url = user["profile_image_url"]
tweet_json = tweet_json["retweeted_status"]
if self.type == QUERY_TYPE_SEARCH:
tweet.user_name = tweet_json['user']['screen_name']
tweet.full_name = tweet_json['user']['name']
tweet.profile_image_url = \
tweet_json['user']["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
else:
user = tweet_json["user"]
tweet.user_name = user["screen_name"]
tweet.full_name = user["name"]
tweet.profile_image_url = user["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
tweet.text = urlize(tweet_json["text"])
tweet.text = re_usernames.sub(replace_usernames, tweet.text)
tweet.text = re_hashtags.sub(replace_hashtags, tweet.text)
if getattr(settings, 'TWITTER_STRIP_HIGH_MULTIBYTE', False):
chars = [ch for ch in tweet.text if ord(ch) < 0x800]
tweet.text = ''.join(chars)
d = datetime.strptime(tweet_json["created_at"], date_format)
d -= timedelta(seconds=timezone)
tweet.created_at = make_aware(d, get_default_timezone())
try:
tweet.save()
except Warning:
pass
tweet.save()
self.interested = False
self.save()
class Tweet(models.Model):
remote_id = models.CharField(_("Twitter ID"), max_length=50)
created_at = models.DateTimeField(_("Date/time"), null=True)
text = models.TextField(_("Message"), null=True)
profile_image_url = models.URLField(_("Profile image URL"), null=True)
user_name = models.CharField(_("User name"), max_length=100, null=True)
full_name = models.CharField(_("Full name"), max_length=100, null=True)
retweeter_profile_image_url = models.URLField(
_("Profile image URL (Retweeted by)"), null=True)
retweeter_user_name = models.CharField(
_("User name (Retweeted by)"), max_length=100, null=True)
retweeter_full_name = models.CharField(
_("Full name (Retweeted by)"), max_length=100, null=True)
query = models.ForeignKey("Query", related_name="tweets")
objects = TweetManager()
class Meta:
verbose_name = _("Tweet")
verbose_name_plural = _("Tweets")
ordering = ("-created_at",)
def __str__(self):
return "%s: %s" % (self.user_name, self.text)
def is_retweet(self):
return self.retweeter_user_name is not None
| bsd-2-clause | 3,241,061,509,415,992,300 | 39.035503 | 79 | 0.585427 | false |
arcolife/scholarec | corpus/dumps/data_handler.py | 1 | 2830 | #!/usr/bin/python
import os
import sys
import json
from subprocess import call
# path of directory containing all .json files
PATH_SOURCE = './data_arxiv_json/'
PATH_DEST = './sharded/'
def __write_json_files(path_source, path_dest, keyword):
'''
Create json chunks from a previous db dump (.json)
'''
# load dump
data = json.loads(open(path_source+'query_results'+keyword+'.json','rb').read())
for key in data.keys():
temp = data[key]
temp['ID'] = key.split('/')[-1]
temp['keyword'] = keyword
#jEncoder = json.JSONEncoder()
f = open(path_dest +temp['ID']+'.json','wb')
json.dump(temp, f)
f.close()
def __write_es_upload_script(path_dest):
'''
write content into bash script that
is supposed to upload chunks to ElasticSearch instance
'''
#list of all json filenames
filenames = os.listdir(path_dest)
FILE = open('es_upload', 'wb')
# write shell commands
FILE.write('#!/bin/bash\n')
FILE.write('cd ' + path_dest + '\n')
for filename in filenames:
# develop a command to upload files
CMD = ['curl','-XPOST',"'http://localhost:9200/arxiv/docs/" \
#+ filename.strip('.json') \
+ "'",'-d ','@'+filename]
FILE.write(' '.join(CMD) +"\n")
#call(CMD)
#print CMD
FILE.close()
def __write_mongo_upload_script(path_dest):
'''
write content into bash script that
is supposed to upload chunks to mongodb instance
'''
#list of all json filenames
filenames = os.listdir(path_dest)
FILE = open('mongo_upload', 'wb')
# write shell commands
FILE.write('#!/bin/bash\n')
FILE.write('cd ' + path_dest + ' \n')
passw = os.environ.get('mongo_scholarec_p')
for filename in filenames:
# develop a command to upload files
FILE.write('mongoimport --db scholarec -u arco -p ' + passw + ' --collection docs --file '+ \
filename + "\n")
#+ filename.strip('.json') \
FILE.close()
if __name__=='__main__':
'''
try:
# creat directory to dump individual json files
os.mkdir(PATH_DEST)
file_ = open('searchWords.lst', 'rb')
import ast
keywords = ast.literal_eval(file_.read())
file_.close()
for word in keywords:
__write_json_files(PATH_SOURCE, PATH_DEST, word)
except OSError:
print "Error: ", sys.exc_info()[1][1]
__write_es_upload_script(PATH_DEST)
'''
__write_mongo_upload_script(PATH_DEST)
'''
# set executable permission to shell script: ./_User_sharded/post
set_perm = ['chmod', '+x', 'es_upload']
call(set_perm)
# execute the script and upload json fiels to ES instance
call_post = ['./es_upload']
call(call_post)
'''
| gpl-3.0 | -9,152,104,286,490,961,000 | 30.098901 | 101 | 0.579152 | false |
caspahouzer/TiShineLabel | build.py | 1 | 8791 | #!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string, optparse, subprocess
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
warn("Couldn't find documentation file at: %s" % docdir)
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.lightapps.TiShineLabel.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComLightappsTiShineLabelModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def info(msg):
print "[INFO] %s" % msg
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def generate_apidoc(apidoc_build_path):
global options
if options.skip_docs:
info("Skipping documentation generation.")
return False
else:
info("Module apidoc generation can be skipped using --skip-docs")
apidoc_path = os.path.join(cwd, "apidoc")
if not os.path.exists(apidoc_path):
warn("Skipping apidoc generation. No apidoc folder found at: %s" % apidoc_path)
return False
if not os.path.exists(apidoc_build_path):
os.makedirs(apidoc_build_path)
ti_root = string.strip(subprocess.check_output(["echo $TI_ROOT"], shell=True))
if not len(ti_root) > 0:
warn("Not generating documentation from the apidoc folder. The titanium_mobile repo could not be found.")
warn("Set the TI_ROOT environment variable to the parent folder where the titanium_mobile repo resides (eg.'export TI_ROOT=/Path').")
return False
docgen = os.path.join(ti_root, "titanium_mobile", "apidoc", "docgen.py")
if not os.path.exists(docgen):
warn("Not generating documentation from the apidoc folder. Couldn't find docgen.py at: %s" % docgen)
return False
info("Generating documentation from the apidoc folder.")
rc = os.system("\"%s\" --format=jsca,modulehtml --css=styles.css -o \"%s\" -e \"%s\"" % (docgen, apidoc_build_path, apidoc_path))
if rc != 0:
die("docgen failed")
return True
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
apidoc_build_path = os.path.join(cwd, "build", "apidoc")
if generate_apidoc(apidoc_build_path):
for file in os.listdir(apidoc_build_path):
if file in ignoreFiles or os.path.isdir(os.path.join(apidoc_build_path, file)):
continue
zf.write(os.path.join(apidoc_build_path, file), '%s/documentation/apidoc/%s' % (modulepath, file))
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
global options
parser = optparse.OptionParser()
parser.add_option("-s", "--skip-docs",
dest="skip_docs",
action="store_true",
help="Will skip building documentation in apidoc folder",
default=False)
(options, args) = parser.parse_args()
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
| mit | 4,717,175,354,227,685,000 | 30.967273 | 135 | 0.694574 | false |
hall1467/wikidata_usage_tracking | python_analysis_scripts/longitudinal_misalignment/alignment_and_misalignment_table_pre_processor.py | 1 | 2373 | """
Preprocess alignment and misalignment data so that it can be imported into
Postgres
Usage:
alignment_and_misalignment_table_pre_processor (-h|--help)
alignment_and_misalignment_table_pre_processor <output> <input_alignment_data>...
[--debug]
[--verbose]
Options:
-h, --help This help message is printed
<input_alignment_data> Path to file to process.
<output> Where output will be written
--debug Print debug logging to stderr
--verbose Print dots and stuff to stderr
"""
import docopt
import logging
import operator
from collections import defaultdict
import mysqltsv
import bz2
import re
from collections import defaultdict
import sys
logger = logging.getLogger(__name__)
MISALIGNMENT_FILE_RE =\
re.compile(r'.*\/(\d\d\d\d\d\d)_misaligned\.tsv')
ALIGNMENT_FILE_RE =\
re.compile(r'.*\/(\d\d\d\d\d\d)_aligned\.tsv')
def main(argv=None):
args = docopt.docopt(__doc__)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
input_alignment_data = args['<input_alignment_data>']
output_file = mysqltsv.Writer(open(args['<output>'], "w"))
verbose = args['--verbose']
run(input_alignment_data, output_file, verbose)
def run(input_alignment_data, output_file, verbose):
for f in input_alignment_data:
if verbose:
sys.stderr.write("Processing: {0}\n".format(f))
sys.stderr.flush()
if MISALIGNMENT_FILE_RE.match(f):
date = MISALIGNMENT_FILE_RE.match(f).group(1)
file_type = "ma"
elif ALIGNMENT_FILE_RE.match(f):
date = ALIGNMENT_FILE_RE.match(f).group(1)
file_type = "a"
else:
raise RuntimeError("Incorrect filename: {0}".format(f))
for i, line in enumerate(mysqltsv.Reader(open(f, "r"), headers=False,
types=[str, str, str])):
output_file.write([line[0], int(date[0:4]), int(date[4:6]), line[2],
line[1]])
if verbose and i % 10000 == 0 and i != 0:
sys.stderr.write("\tEntities processed: {0}\n".format(i))
sys.stderr.flush()
main()
| mit | -126,529,642,722,674,400 | 26.275862 | 85 | 0.585335 | false |
NeCTAR-RC/cinder | cinder/backup/api.py | 1 | 11074 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to the volume backups service.
"""
from eventlet import greenthread
from oslo.config import cfg
from cinder.backup import rpcapi as backup_rpcapi
from cinder import context
from cinder.db import base
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import utils
import cinder.policy
import cinder.volume
from cinder.volume import utils as volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def check_policy(context, action):
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
_action = 'backup:%s' % action
cinder.policy.enforce(context, _action, target)
class API(base.Base):
"""API for interacting with the volume backup manager."""
def __init__(self, db_driver=None):
self.backup_rpcapi = backup_rpcapi.BackupAPI()
self.volume_api = cinder.volume.API()
super(API, self).__init__(db_driver)
def get(self, context, backup_id):
check_policy(context, 'get')
rv = self.db.backup_get(context, backup_id)
return dict(rv.iteritems())
def delete(self, context, backup_id):
"""Make the RPC call to delete a volume backup."""
check_policy(context, 'delete')
backup = self.get(context, backup_id)
if backup['status'] not in ['available', 'error']:
msg = _('Backup status must be available or error')
raise exception.InvalidBackup(reason=msg)
self.db.backup_update(context, backup_id, {'status': 'deleting'})
self.backup_rpcapi.delete_backup(context,
backup['host'],
backup['id'])
# TODO(moorehef): Add support for search_opts, discarded atm
def get_all(self, context, search_opts=None):
if search_opts is None:
search_opts = {}
check_policy(context, 'get_all')
if context.is_admin:
backups = self.db.backup_get_all(context)
else:
backups = self.db.backup_get_all_by_project(context,
context.project_id)
return backups
def _is_backup_service_enabled(self, volume, volume_host):
"""Check if there is a backup service available."""
topic = CONF.backup_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
for srv in services:
if (srv['availability_zone'] == volume['availability_zone'] and
srv['host'] == volume_host and not srv['disabled'] and
utils.service_is_up(srv)):
return True
return False
def _list_backup_services(self):
"""List all enabled backup services.
:returns: list -- hosts for services that are enabled for backup.
"""
topic = CONF.backup_topic
ctxt = context.get_admin_context()
services = self.db.service_get_all_by_topic(ctxt, topic)
return [srv['host'] for srv in services if not srv['disabled']]
def create(self, context, name, description, volume_id,
container, availability_zone=None):
"""Make the RPC call to create a volume backup."""
check_policy(context, 'create')
volume = self.volume_api.get(context, volume_id)
if volume['status'] != "available":
msg = _('Volume to be backed up must be available')
raise exception.InvalidVolume(reason=msg)
volume_host = volume_utils.extract_host(volume['host'], 'host')
if not self._is_backup_service_enabled(volume, volume_host):
raise exception.ServiceNotFound(service_id='cinder-backup')
self.db.volume_update(context, volume_id, {'status': 'backing-up'})
options = {'user_id': context.user_id,
'project_id': context.project_id,
'display_name': name,
'display_description': description,
'volume_id': volume_id,
'status': 'creating',
'container': container,
'size': volume['size'],
'host': volume_host, }
backup = self.db.backup_create(context, options)
#TODO(DuncanT): In future, when we have a generic local attach,
# this can go via the scheduler, which enables
# better load balancing and isolation of services
self.backup_rpcapi.create_backup(context,
backup['host'],
backup['id'],
volume_id)
return backup
def restore(self, context, backup_id, volume_id=None):
"""Make the RPC call to restore a volume backup."""
check_policy(context, 'restore')
backup = self.get(context, backup_id)
if backup['status'] != 'available':
msg = _('Backup status must be available')
raise exception.InvalidBackup(reason=msg)
size = backup['size']
if size is None:
msg = _('Backup to be restored has invalid size')
raise exception.InvalidBackup(reason=msg)
# Create a volume if none specified. If a volume is specified check
# it is large enough for the backup
if volume_id is None:
name = 'restore_backup_%s' % backup_id
description = 'auto-created_from_restore_from_backup'
LOG.audit(_("Creating volume of %(size)s GB for restore of "
"backup %(backup_id)s"),
{'size': size, 'backup_id': backup_id},
context=context)
volume = self.volume_api.create(context, size, name, description)
volume_id = volume['id']
while True:
volume = self.volume_api.get(context, volume_id)
if volume['status'] != 'creating':
break
greenthread.sleep(1)
else:
volume = self.volume_api.get(context, volume_id)
if volume['status'] != "available":
msg = _('Volume to be restored to must be available')
raise exception.InvalidVolume(reason=msg)
LOG.debug('Checking backup size %s against volume size %s',
size, volume['size'])
if size > volume['size']:
msg = (_('volume size %(volume_size)d is too small to restore '
'backup of size %(size)d.') %
{'volume_size': volume['size'], 'size': size})
raise exception.InvalidVolume(reason=msg)
LOG.audit(_("Overwriting volume %(volume_id)s with restore of "
"backup %(backup_id)s"),
{'volume_id': volume_id, 'backup_id': backup_id},
context=context)
# Setting the status here rather than setting at start and unrolling
# for each error condition, it should be a very small window
self.db.backup_update(context, backup_id, {'status': 'restoring'})
self.db.volume_update(context, volume_id, {'status':
'restoring-backup'})
self.backup_rpcapi.restore_backup(context,
backup['host'],
backup['id'],
volume_id)
d = {'backup_id': backup_id,
'volume_id': volume_id, }
return d
def export_record(self, context, backup_id):
"""Make the RPC call to export a volume backup.
Call backup manager to execute backup export.
:param context: running context
:param backup_id: backup id to export
:returns: dictionary -- a description of how to import the backup
:returns: contains 'backup_url' and 'backup_service'
:raises: InvalidBackup
"""
check_policy(context, 'backup-export')
backup = self.get(context, backup_id)
if backup['status'] != 'available':
msg = (_('Backup status must be available and not %s.') %
backup['status'])
raise exception.InvalidBackup(reason=msg)
LOG.debug("Calling RPCAPI with context: "
"%(ctx)s, host: %(host)s, backup: %(id)s.",
{'ctx': context,
'host': backup['host'],
'id': backup['id']})
export_data = self.backup_rpcapi.export_record(context,
backup['host'],
backup['id'])
return export_data
def import_record(self, context, backup_service, backup_url):
"""Make the RPC call to import a volume backup.
:param context: running context
:param backup_service: backup service name
:param backup_url: backup description to be used by the backup driver
:raises: InvalidBackup
:raises: ServiceNotFound
"""
check_policy(context, 'backup-import')
# NOTE(ronenkat): since we don't have a backup-scheduler
# we need to find a host that support the backup service
# that was used to create the backup.
# We send it to the first backup service host, and the backup manager
# on that host will forward it to other hosts on the hosts list if it
# cannot support correct service itself.
hosts = self._list_backup_services()
if len(hosts) == 0:
raise exception.ServiceNotFound(service_id=backup_service)
options = {'user_id': context.user_id,
'project_id': context.project_id,
'volume_id': '0000-0000-0000-0000',
'status': 'creating', }
backup = self.db.backup_create(context, options)
first_host = hosts.pop()
self.backup_rpcapi.import_record(context,
first_host,
backup['id'],
backup_service,
backup_url,
hosts)
return backup
| apache-2.0 | -4,439,977,846,825,401,300 | 39.123188 | 78 | 0.559689 | false |
zestedesavoir/Python-ZMarkdown | zmarkdown/extensions/urlize.py | 1 | 2230 | # Inspired by https://github.com/r0wb0t/markdown-urlize/blob/master/urlize.py
from __future__ import unicode_literals
from zmarkdown.inlinepatterns import Pattern as InlinePattern, sanitize_url, MAIL_RE
from zmarkdown import Extension, util
try: # pragma: no cover
from urllib.parse import urlparse
except ImportError: # pragma: no cover
from urlparse import urlparse
import re
# Global Vars. Do not catch ending dot
URLIZE_RE = r'(^|(?<=\s))({0})(?=\.?(\s|$))'.format("|".join((
# mail adress (two lines):
MAIL_RE,
# Anything with protocol between < >
r"<(?:f|ht)tps?://[^>]*>",
# with protocol : any valid domain match.
r"((?:f|ht)tps?://)([\da-z\.-]+)\.([a-z\.]{1,5}[a-z])([/\w\.$%&_?#=()'-]*[/\w$%&_?#=()'-])?\/?",
# without protocol, only somes specified protocols match
r"((?:f|ht)tps?://)?([\da-z\.-]+)\.(?:com|net|org|fr)([/\w\.$%&_?#=()'-]*[/\w$%&_?#=()'-])?\/?")))
class UrlizePattern(InlinePattern):
""" Return a link Element given an autolink (`http://example/com`). """
def __init__(self, *args, **kwargs):
kwargs["not_in"] = ('link',)
InlinePattern.__init__(self, *args, **kwargs)
def handleMatch(self, m):
url = m.group(3)
if url.startswith('<'):
url = url[1:-1]
text = url
is_url = re.match(MAIL_RE, url)
if not is_url:
url = sanitize_url(url)
parts = urlparse(url)
# If no protocol (and not explicit relative link), add one
if parts[0] == "":
if is_url:
url = 'mailto:' + url
elif not url.startswith("#") and not url.startswith("/"):
url = 'http://' + url
el = util.etree.Element("a")
el.set('href', url)
el.text = util.AtomicString(text)
return el
class UrlizeExtension(Extension):
""" Urlize Extension for Python-Markdown. """
def extendZMarkdown(self, md, md_globals):
""" Replace autolink with UrlizePattern """
md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)
def makeExtension(*args, **kwargs):
return UrlizeExtension(*args, **kwargs)
| bsd-3-clause | -4,029,473,678,313,161,700 | 30.794118 | 102 | 0.54574 | false |
MariaKrusteva/MALL | create_mall_database.py | 1 | 1640 | import establishment_sql_manager
import store_sql_manager
def main():
establishment_sql_manager.create_table()
store_sql_manager.create_table()
store_sql_manager.create_staff_table()
stores = [("your_shoes", "snikers", "purple", 48, 52.3, 4),
("your_shoes", "high heels", "12cm", 65, 70, 5),
("your_shoes", "sandals", "brown", 23, 26, 5),
("mimis_grocery", "cheese", "white", 10, 12, 4),
("mimis_grocery", "bread", "700gr", 0.6, 1.2, 12),
("mimis_grocery", "sugar", "1kg", 1.2, 2, 13),
("pythons_clothes", "jeans", "blue", 35, 40.5, 7),
("pythons_clothes", "T-shirt", "green", 15, 20.3, 7),
("pythons_clothes", "skirt", "pleated", 23, 26, 7),
]
establishments = [("anakonda", "restaurant", "sweet potatoes",
"potatoes herbs", 3.2, 350),
("anakonda", "restaurant", "pizza", "cheese, tomatoes",
7.2, 400),
("anakonda", "restaurant", "salad", "cucumber, tomato",
3.5, 350),
("ubuntu", "cafe", "tea", "teabag, honey", 1.2, 250),
("ubuntu", "cafe", "latte", " espresso, steamed milk",
2.5, 150),
("ubuntu", "cafe", "latte", " espresso, steamed milk",
2.5, 150)]
for store in stores:
store_sql_manager.add_item(*store)
for establishment in establishments:
establishment_sql_manager.add_item(*establishment)
if __name__ == '__main__':
main()
| gpl-3.0 | 8,095,784,587,101,703,000 | 41.051282 | 77 | 0.483537 | false |
paritoshsingh/konehack | read_and_send_msg.py | 1 | 2879 | #!/usr/bin/python
import smbus
import math
import paho.mqtt.client as mqtt
# Power management registers
power_mgmt_1 = 0x6b
power_mgmt_2 = 0x6c
def read_byte(adr):
return bus.read_byte_data(address, adr)
def read_word(adr):
high = bus.read_byte_data(address, adr)
low = bus.read_byte_data(address, adr+1)
val = (high << 8) + low
return val
def read_word_2c(adr):
val = read_word(adr)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def dist(a,b):
return math.sqrt((a*a)+(b*b))
def get_y_rotation(x,y,z):
radians = math.atan2(x, dist(y,z))
return -math.degrees(radians)
def get_x_rotation(x,y,z):
radians = math.atan2(y, dist(x,z))
return math.degrees(radians)
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
bus = smbus.SMBus(1) # or bus = smbus.SMBus(1) for Revision 2 boards
address = 0x68 # This is the address value read via the i2cdetect command
# Now wake the 6050 up as it starts in sleep mode
bus.write_byte_data(address, power_mgmt_1, 0)
while 1:
# print "gyro data"
# print "---------"
gyro_xout = read_word_2c(0x43)
gyro_yout = read_word_2c(0x45)
gyro_zout = read_word_2c(0x47)
print "gyro_xout: ", gyro_xout, " scaled: ", (gyro_xout / 131)
print "gyro_yout: ", gyro_yout, " scaled: ", (gyro_yout / 131)
print "gyro_zout: ", gyro_zout, " scaled: ", (gyro_zout / 131)
# print
# print "accelerometer data"
# print "------------------"
accel_xout = read_word_2c(0x3b)
accel_yout = read_word_2c(0x3d)
accel_zout = read_word_2c(0x3f)
accel_xout_scaled = accel_xout / 16384.0
accel_yout_scaled = accel_yout / 16384.0
accel_zout_scaled = accel_zout / 16384.0
# print "accel_xout: ", accel_xout, " scaled: ", accel_xout_scaled
# print "accel_yout: ", accel_yout, " scaled: ", accel_yout_scaled
# print "accel_zout: ", accel_zout, " scaled: ", accel_zout_scaled
# print "x rotation: " , get_x_rotation(accel_xout_scaled, accel_yout_scaled, accel_zout_scaled)
# print "y rotation: " , get_y_rotation(accel_xout_scaled, accel_yout_scaled, accel_zout_scaled)
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("$SYS/#")
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("iot.eclipse.org", 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
| mit | 6,383,770,058,486,674,000 | 27.22549 | 97 | 0.674192 | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/researchsubject_tests.py | 1 | 1832 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import researchsubject
from .fhirdate import FHIRDate
class ResearchSubjectTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ResearchSubject", js["resourceType"])
return researchsubject.ResearchSubject(js)
def testResearchSubject1(self):
inst = self.instantiate_from("researchsubject-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ResearchSubject instance")
self.implResearchSubject1(inst)
js = inst.as_json()
self.assertEqual("ResearchSubject", js["resourceType"])
inst2 = researchsubject.ResearchSubject(js)
self.implResearchSubject1(inst2)
def implResearchSubject1(self, inst):
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "http://example.org/studysubjectids")
self.assertEqual(inst.identifier[0].type.text, "Subject id")
self.assertEqual(inst.identifier[0].value, "123")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.status, "candidate")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">[Put rendering here]</div>")
self.assertEqual(inst.text.status, "generated")
| bsd-3-clause | -114,741,716,307,559,120 | 39.711111 | 113 | 0.679585 | false |
ActiveState/code | recipes/Python/440657_Determine_functiexecutitime_Pythonic/recipe-440657.py | 1 | 2051 | """
Determine function execution time.
>>> def f():
... return sum(range(10))
...
>>> pytime(f)
(Time to execute function f, including function call overhead).
>>> 1.0/pytime(f)
(Function calls/sec, including function call overhead).
>>> 1.0/pytime_statement('sum(range(10))')
(Statements/sec, does not include any function call overhead).
"""
import sys
# Source code is public domain.
if sys.platform == "win32":
from time import clock
else:
from time import time as clock
def pytime(f, args=(), kwargs={}, Tmax=2.0):
"""
Calls f many times to determine the average time to execute f.
Tmax is the maximum time to spend in pytime(), in seconds.
"""
count = 1
while True:
start = clock()
if args == () and kwargs == {}:
for i in xrange(count):
f()
elif kwargs == {}:
for i in xrange(count):
f(*args)
else:
for i in xrange(count):
f(*args, **kwargs)
T = clock() - start
if T >= Tmax/4.0: break
count *= 2
return T / count
def pytime_statement(stmt, global_dict=None, Tmax=2.0,
repeat_count=128):
"""
Determine time to execute statement (or block) of Python code.
Here global_dict is the globals dict used for exec, Tmax is the max
time to spend in pytime_statement(), in sec, and repeat_count is the
number of times to paste stmt into the inner timing loop (this is
automatically set to 1 if stmt takes too long).
"""
if global_dict is None:
global_dict = globals()
ns = {}
code = 'def timed_func():' + ('\n' +
'\n'.join([' '+x for x in stmt.split('\n')]))
exec code in global_dict, ns
start = clock()
ns['timed_func']()
T = clock() - start
if T >= Tmax/4.0:
return T
elif T >= Tmax/4.0/repeat_count:
return pytime(ns['timed_func'], (), {}, Tmax-T)
else:
code = 'def timed_func():' + ('\n' +
'\n'.join([' '+x for x in stmt.split('\n')]))*repeat_count
exec code in global_dict, ns
return pytime(ns['timed_func'], (), {}, Tmax-T) / repeat_count
| mit | -7,438,097,726,899,165,000 | 25.294872 | 70 | 0.601658 | false |
jucacrispim/mongomotor | mongomotor/connection.py | 1 | 3842 | # -*- coding: utf-8 -*-
# Copyright 2016 Juca Crispim <[email protected]>
# This file is part of mongomotor.
# mongomotor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# mongomotor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with mongomotor. If not, see <http://www.gnu.org/licenses/>.
from mongoengine import connection
from mongoengine.connection import (connect as me_connect,
DEFAULT_CONNECTION_NAME,
disconnect as me_disconnect,
register_connection,
get_connection)
from mongomotor import utils
from mongomotor.clients import (MongoMotorAsyncIOClient,
MongoMotorTornadoClient)
from mongomotor.monkey import MonkeyPatcher
CLIENTS = {'asyncio': (MongoMotorAsyncIOClient,),
'tornado': (MongoMotorTornadoClient,)}
_db_version = {}
def get_mongodb_version(alias=DEFAULT_CONNECTION_NAME):
"""Return the version of the connected mongoDB (first 2 digits)
:param alias: The alias identifying the connection
:return: tuple(int, int)
"""
# e.g: (3, 2)
version_list = get_connection(alias).server_info()["versionArray"][:2]
return tuple(version_list)
def get_db_version(alias=DEFAULT_CONNECTION_NAME):
"""Returns the version of the database for a given alias. This
will patch the original mongoengine's get_mongodb_version.
:param alias: The alias identifying the connection.
"""
return _db_version[alias]
def connect(db=None, async_framework='asyncio',
alias=DEFAULT_CONNECTION_NAME, **kwargs):
"""Connect to the database specified by the 'db' argument.
Connection settings may be provided here as well if the database is not
running on the default port on localhost. If authentication is needed,
provide username and password arguments as well.
Multiple databases are supported by using aliases. Provide a separate
`alias` to connect to a different instance of :program:`mongod`.
Parameters are the same as for :func:`mongoengine.connection.connect`
plus one:
:param async_framework: Which asynchronous framework should be used.
It can be `tornado` or `asyncio`. Defaults to `asyncio`.
"""
clients = CLIENTS[async_framework]
with MonkeyPatcher() as patcher:
patcher.patch_db_clients(*clients)
patcher.patch_sync_connections()
ret = me_connect(db=db, alias=alias, **kwargs)
# here we register a connection that will use the original pymongo
# client and if used will block the process.
# We need to patch here otherwise we will get the async connection
# beeing reused instead of a sync one.
with MonkeyPatcher() as patcher:
patcher.patch_item(connection, '_find_existing_connection',
lambda *a, **kw: None)
kwargs.pop('io_loop', None)
sync_alias = utils.get_sync_alias(alias)
register_connection(sync_alias, db, **kwargs)
_db_version[alias] = get_mongodb_version(sync_alias)
return ret
def disconnect(alias=DEFAULT_CONNECTION_NAME):
"""Disconnects from the database indentified by ``alias``.
"""
me_disconnect(alias=alias)
# disconneting sync connection
sync_alias = utils.get_sync_alias(alias)
me_disconnect(alias=sync_alias)
| gpl-3.0 | 4,309,757,562,831,834,600 | 34.906542 | 75 | 0.682457 | false |
thinkxl/mentions | mentions/mentions.py | 1 | 3175 | # -*- coding: utf-8 -*-
"""
This module contains the primary objects that power Mention.
"""
import json
import requests
from bs4 import BeautifulSoup
headers = {'User-Agent': 'Karma v0.1.0', 'From': '@thinkxl'}
# Facebook
def get_facebook_data(method, url):
try:
facebook_url = 'https://api.facebook.com/method/fql.query?query=select \
' + method + ' from link_stat where url="' + url + '"&format=json'
r = requests.get(facebook_url, headers=headers)
json_data = json.loads(r.text)
return json_data[0][method]
except:
return 0
def facebook_total_count(url):
return get_facebook_data('total_count', url)
def facebook_like_count(url):
return get_facebook_data('like_count', url)
def facebook_comment_count(url):
return get_facebook_data('comment_count', url)
def facebook_share_count(url):
return get_facebook_data('share_count', url)
# Twitter
def tweets(url):
"""tweets count"""
try:
twitter_url = 'http://urls.api.twitter.com/1/urls/count.json?url=' + url
r = requests.get(twitter_url, headers=headers)
json_data = json.loads(r.text)
return json_data['count']
except:
return 0
# Google+
def google_plus_one(url):
"""+1's count"""
try:
google_url = 'https://plusone.google.com/_/+1/fastbutton?url=' + url
soup = BeautifulSoup(requests.get(google_url, headers=headers).text)
tag = soup.find_all(id="aggregateCount")[0]
count = tag.string.extract()
return count
except:
return 0
def linkedin_mentions(url):
"""mentions count"""
try:
linkedin_url = 'http://www.linkedin.com/countserv/count/share?url=' \
+ url + '&format=json'
json_data = json.loads(requests.get(linkedin_url, headers=headers).text)
return json_data['count']
except:
return 0
def pinterest_shares(url):
"""pinterest share count"""
try:
pinterest_url = 'http://api.pinterest.com/v1/urls/count.json?url=' \
+ url
response = requests.get(pinterest_url).text\
.replace('receiveCount(', '')\
.replace(')', '')
json_data = json.loads(response)
return json_data['count']
except:
return 0
def stumbleupon_views(url):
"""views count"""
try:
stumbleupon_url = 'http://www.stumbleupon.com/services/1.01/badge.getinfo?\
url=' + url + '&format=jsonp'
json_data = json.loads(requests.get(stumbleupon_url).text)
return json_data['result']['views']
except:
return 0
# def delicious_count(url):
# """bookmarked count"""
# delicious_url = 'http://feeds.delicious.com/v2/json/urlinfo/data?url='\
# + url
# return requests.get(delicious_url).response
def reddit_mentions(url):
"""mentions count"""
try:
reddit_url = 'http://www.reddit.com/api/info.json?url=' + url
json_data = json.loads(requests.get(reddit_url, headers=headers).text)
return len(json_data['data']['children'])
except:
return 0
| mit | 7,374,464,425,110,252,000 | 29.238095 | 83 | 0.596535 | false |
kpolimis/kpolimis.github.io-src | pelicanconf.py | 1 | 3979 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
import os
#from utils import filters
AUTHOR = u'Kivan Polimis'
SITENAME = u'Kivan Polimis'
# SITESUBTITLE = u'my personal musings and attempts to apply \
# and share some programming tips'
INDEX_SAVE_AS = 'pages/home.html'
PATH = 'content'
PAGE_ORDER_BY = 'sortorder'
# Times and dates
TIMEZONE = 'US/Pacific'
DEFAULT_LANG = u'en'
SUMMARY_MAX_LENGTH = '50'
GOOGLE_ANALYTICS = 'UA-104881568-1'
# Set the article URL
#ARTICLE_URL = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/'
#ARTICLE_SAVE_AS = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'
# Theme and plugins
# JINJA_FILTERS = {'sidebar': filters.sidebar}
THEME = "pelican-themes/nest"
# Minified CSS
NEST_CSS_MINIFY = True
# Add canonical link element to top page header and all article/author/category/tag page header
NEST_REL_CANONICAL_LINK = True
NEST_HEADER_IMAGES = ''
NEST_HEADER_LOGO = '/images/jade_mountain.png'
DISPLAY_PAGES_ON_MENU = False
# MENUITEMS = [('Home', '/index.html'), ('Articles', '/categories.html')]
MENUITEMS = [('Home', '/index.html'),('Articles','/categories.html'),
('Vita','/pages/vita.html'), ('Software','/pages/software.html'),
('Projects','/pages/projects.html')]
# categories.html
NEST_CATEGORIES_HEAD_TITLE = u'Articles'
NEST_CATEGORIES_HEAD_DESCRIPTION = u'Articles listed by category'
NEST_CATEGORIES_HEADER_TITLE = u'Articles'
NEST_CATEGORIES_HEADER_SUBTITLE = u'Articles listed by category'
# software.html
NEST_SOFTWARE_HEAD_TITLE = u'Software'
NEST_SOFTWARE_HEAD_DESCRIPTION = u'Software'
NEST_SOFTWARE_HEADER_TITLE = u'Software'
NEST_SOFTWARE_HEADER_SUBTITLE = u'Articles listed by category'
# Footer
NEST_SITEMAP_COLUMN_TITLE = u'Sitemap'
NEST_SITEMAP_MENU = [('Home', '/index.html'),('Articles','/categories.html'),
('Vita','/pages/vita.html'), ('Software','/pages/software.html'),
('Projects','/pages/projects.html')]
NEST_SITEMAP_ATOM_LINK = u'Atom Feed'
NEST_SITEMAP_RSS_LINK = u'RSS Feed'
NEST_SOCIAL_COLUMN_TITLE = u'Social'
NEST_LINKS_COLUMN_TITLE = u'Links'
NEST_COPYRIGHT = u'© Kivan Polimis 2021'
# pagination.html
NEST_PAGINATION_PREVIOUS = u'Previous'
NEST_PAGINATION_NEXT = u'Next'
# Footer optional
NEST_FOOTER_HTML = ''
# Static files
STATIC_PATHS = ['images', 'favicon.ico']
CODE_DIR = 'downloads/code'
NOTEBOOK_DIR = 'downloads/notebooks'
READERS = {'html': None}
PLUGIN_PATHS = ['pelican-plugins']
PLUGINS = ['liquid_tags.notebook', # for embedding notebooks
'summary', # auto-summarizing articles
'feed_summary', # use summaries for RSS, not full articles
'render_math'
]
MD = ['codehilite(css_class=highlight)','extra', 'mathjax']
# Only use LaTeX for selected articles
LATEX = 'article'
# SUMMARY_USE_FIRST_PARAGRAPH = 'True'
TWITTER_USERNAME = 'kpolimis'
GITHUB_USERNAME = 'kpolimis'
AUTHOR_CV = "http://kivanpolimis.com/docs/Kivan_Polimis_Curriculum_Vitae.pdf"
SHOW_ARCHIVES = True
IGNORE_FILES = ['.ipynb_checkpoints']
if not os.path.exists('_nb_header.html'):
import warnings
warnings.warn("_nb_header.html not found. "
"Rerun make html to finalize build.")
else:
EXTRA_HEADER = open('_nb_header.html').read()
# RMD_READER_KNITR_OPTS_CHUNK = {'fig.path': '../../../figure/'}
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Title menu options
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
# Blogroll
#LINKS = (('Pelican', 'http://getpelican.com/'),
# ('Python.org', 'http://python.org/'),
# ('Jinja2', 'http://jinja.pocoo.org/'),
# ('You can modify those links in your config file', '#'),)
# Social widget
#SOCIAL = (('You can add links in your config file', '#'),
# ('Another social link', '#'),)
| gpl-3.0 | 2,233,683,985,221,732,400 | 28.474074 | 95 | 0.688615 | false |
libvirt/libvirt-test-API | libvirttestapi/repos/storage/dir_vol_upload.py | 1 | 5210 | # Copyright (C) 2010-2012 Red Hat, Inc.
# This work is licensed under the GNU GPLv2 or later.
# dir storage volume upload testing, only raw format volume is
# supported, other format might fail. offset and length can
# only be chosen in 0 and 1048576.
import os
import string
import sys
from xml.dom import minidom
from libvirt import libvirtError
from libvirttestapi.src import sharedmod
from libvirttestapi.utils import utils
required_params = ('poolname', 'volname', 'volformat', 'capacity',
'offset', 'length',)
optional_params = {'xml': 'xmls/dir_volume.xml',
}
def get_pool_path(poolobj):
""" get pool xml description
"""
poolxml = poolobj.XMLDesc(0)
logger.debug("the xml description of pool is %s" % poolxml)
doc = minidom.parseString(poolxml)
path_element = doc.getElementsByTagName('path')[0]
textnode = path_element.childNodes[0]
path_value = textnode.data
return path_value
def write_file(path):
"""write 1M test data to file
"""
logger.info("write data into file %s" % path)
f = open(path, 'w')
if sys.version_info[0] < 3:
datastr = ''.join(string.lowercase + string.uppercase +
string.digits + '.' + '\n')
else:
datastr = ''.join(string.ascii_lowercase + string.ascii_uppercase +
string.digits + '.' + '\n')
data = ''.join(16384 * datastr)
f.write(data)
f.close()
def handler(stream, data, file_):
return file_.read(data)
def dir_vol_upload(params):
"""test volume download and check"""
global logger
logger = params['logger']
poolname = params['poolname']
volname = params['volname']
volformat = params['volformat']
offset = int(params['offset'])
length = int(params['length'])
capacity = params['capacity']
xmlstr = params['xml']
logger.info("the poolname is %s, volname is %s, volformat is %s" %
(poolname, volname, volformat))
logger.info("upload offset is: %s" % offset)
logger.info("the data length to upload is: %s" % length)
conn = sharedmod.libvirtobj['conn']
try:
poolobj = conn.storagePoolLookupByName(poolname)
path_value = get_pool_path(poolobj)
volume_path = path_value + "/" + volname
xmlstr = xmlstr.replace('VOLPATH', volume_path)
xmlstr = xmlstr.replace('SUFFIX', capacity[-1])
xmlstr = xmlstr.replace('CAP', capacity[:-1])
logger.debug("volume xml:\n%s" % xmlstr)
logger.info("create %s %s volume" % (volname, volformat))
vol = poolobj.createXML(xmlstr, 0)
test_path = path_value + "/" + "vol_test"
write_file(test_path)
olddigest = utils.digest(test_path, 0, 0)
logger.debug("the old file digest is: %s" % olddigest)
if offset:
origdigestpre = utils.digest(volume_path, 0, offset)
else:
origdigestpre = ''
logger.debug("the original pre region digest is: %s" % origdigestpre)
origdigestpost = utils.digest(volume_path, offset + 1024 * 1024, 0)
logger.debug("the original post region digest is: %s" % origdigestpost)
st = conn.newStream(0)
if sys.version_info[0] < 3:
f = open(test_path, 'r')
else:
f = open(test_path, 'rb')
logger.info("start upload")
vol.upload(st, offset, length, 0)
logger.info("sent all data")
st.sendAll(handler, f)
logger.info("finished stream")
st.finish()
f.close()
newdigest = utils.digest(volume_path, offset, 1024 * 1024)
logger.debug("the new file digest is: %s" % olddigest)
if offset:
newdigestpre = utils.digest(volume_path, 0, offset)
else:
newdigestpre = ''
logger.debug("the new pre region digest is: %s" % origdigestpre)
newdigestpost = utils.digest(volume_path, offset + 1024 * 1024, 0)
logger.debug("the new post region digest is: %s" % origdigestpost)
if newdigestpre == origdigestpre:
logger.info("file pre region digests match")
else:
logger.error("file pre region digests not match")
return 1
if olddigest == newdigest:
logger.info("file digests match")
else:
logger.error("file digests not match")
return 1
if newdigestpost == origdigestpost:
logger.info("file post region digests match")
else:
logger.error("file post region digests not match")
return 1
except libvirtError as e:
logger.error("libvirt call failed: " + str(e))
return 1
return 0
def dir_vol_upload_clean(params):
"""clean testing environment"""
poolname = params['poolname']
volname = params['volname']
conn = sharedmod.libvirtobj['conn']
poolobj = conn.storagePoolLookupByName(poolname)
path_value = get_pool_path(poolobj)
test_path = path_value + "/" + "vol_test"
vol = poolobj.storageVolLookupByName(volname)
vol.delete(0)
if os.path.exists(test_path):
os.unlink(test_path)
return 0
| gpl-2.0 | 4,381,108,374,907,106,000 | 29.647059 | 79 | 0.605758 | false |
olingrobin/test | day1/user.py | 1 | 1087 |
error_name = open("namefile.txt","a")
error_name.close()
list = {"jin":"123","tom":"456","jak":"789","aimi":"012"}
count = 0
status = False
while True:
user_name = input("请输入用户名:")
error_name = open("namefile.txt","r")
for name in error_name:
if user_name == name.strip():
status = True
break
if status == True:
error_name.close()
print("用户已被禁止登陆")
break
user_password = input("请输入密码:")
for k,v in list.items():
if user_name in k and user_password in v:
print("欢迎用户:",k)
status = True
break
else:
continue
if status == True:
break
count += 1
if count < 3:
print("请检查用户名或密码:")
continue
if count >= 3:
print("输入错误三次,用户",user_name,"已进入黑名单")
error_user = open("namefile.txt","a")
error_user.write(user_name)
error_user.write("\n")
error_user.close()
break
| gpl-3.0 | -9,159,499,533,204,680,000 | 22.642857 | 57 | 0.512588 | false |
DeveloperJose/Vision-Rat-Brain | feature_matching_v3/util_sift.py | 1 | 1540 | # Author: Jose G Perez
# Version 1.0
# Last Modified: January 31, 2018
import numpy as np
import cv2
import os
SIFT = cv2.xfeatures2d.SIFT_create(contrastThreshold=0.05, edgeThreshold=100, sigma=2)
def kp_to_array(kp):
array = np.zeros((len(kp), 7), dtype=np.float32)
for idx in range(array.shape[0]):
k = kp[idx]
array[idx] = np.array([k.pt[0], k.pt[1], k.size,k.angle,k.response,k.octave,k.class_id])
return array
def array_to_kp(array):
kp = []
for idx in range(array.shape[0]):
k = array[idx]
kp.append(cv2.KeyPoint(k[0],k[1],k[2],k[3],k[4],k[5],k[6]))
return kp
def __precompute_atlas(name):
if not os.path.isfile(name + '_SIFT.npz'):
print('Precomputing SIFT for ', name)
atlas_data = np.load(name + ".npz")
atlas_im = atlas_data['images']
atlas_labels = atlas_data['labels']
atlas_kp = []
atlas_des = []
for i in range(0, atlas_im.shape[0]):
kp, des = SIFT.detectAndCompute(atlas_im[i], None)
kp = kp_to_array(kp)
atlas_kp.append(kp)
atlas_des.append(des)
atlas_kp = np.asarray(atlas_kp)
atlas_des = np.asarray(atlas_des)
np.savez_compressed(name + '_SIFT', images=atlas_im, labels=atlas_labels, kp=atlas_kp, des=atlas_des)
def precompute_sift(S_NAME, PW_NAME):
__precompute_atlas(S_NAME)
__precompute_atlas(PW_NAME)
def load_sift(path):
data = np.load(path)
return data['images'], data['labels'], data['kp'], data['des'] | mit | -6,412,502,763,211,562,000 | 30.44898 | 109 | 0.597403 | false |
wd8rde/genesis_g59_py | setup.py | 1 | 1416 | #!/usr/bin/env python
#The MIT License (MIT)
#
#Copyright (c) 2015 Robert Anthony Bouterse, WD8RDE
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
from distutils.core import setup
setup(name='genesis_g59',
version='0.1',
description='Genesis G59 USB Control Utilities',
author='WD8RDE',
author_email='[email protected]',
url='',
packages=['genesis_g59','genesis_g59.g59_si570'],
)
| mit | -5,464,127,228,988,461,000 | 43.25 | 79 | 0.75565 | false |
Dangetsu/vnr | Frameworks/Sakura/py/libs/vnragent/engine.py | 1 | 2147 | # coding: utf8
# engine.py
# 5/3/2014 jichi
# The logic in this file must be consistent with that in vnragent.dll.
if __name__ == '__main__': # DEBUG
import sys
sys.path.append("..")
import os
from glob import glob
from sakurakit.skdebug import dprint
from sakurakit.skfileio import escapeglob
class Engine:
def __init__(self, name='', regionLocked=False, vnrboot=False, **kwargs):
self.name = name # str
self.regionLocked = regionLocked # bool
self.vnrboot = vnrboot # bool
# Not used
#def encoding(self): return 'utf-16' if self.wideChar else 'shift-jis'
class EngineFinder:
def __init__(self, pid=0, exepath='', dirpath=''):
"""
@param* pid long process id
@param* exepath unicode executable path
@param* dirpath unicode process directory path
"""
if not exepath and pid:
exepath = skwin.get_process_path(pid)
if not dirpath and exepath:
dirpath = os.path.dirname(exepath)
self.pid = pid # long
self.exepath = exepath # unicode
self.dirpath = dirpath # unicode
#self.processName = skwin.get_process_name(pid)
def eval(self, e):
"""
@param e list or str
@return bool
"""
if not e:
return False
if isinstance(e, list):
for it in e:
if not self.eval(it):
return False
return True
# e is str or unicode
elif '|' in e:
for it in e.split('|'):
if self.eval(it):
return True
return False
elif e[0] == '!' and len(e) > 1:
return not self.eval(e[1:])
elif '*' in e:
return self.globs(e)
else:
return self.exists(e)
def globs(self, relpath):
"""
@param relpath unicode
@return bool
"""
return bool(self.dirpath and glob(os.path.join(escapeglob(self.dirpath), relpath)))
def exists(self, relpath):
"""
@param relpath unicode
@return bool
"""
return bool(self.dirpath) and os.path.exists(os.path.join(self.dirpath, relpath))
def getAbsPath(self, relpath):
"""
@param relpath unicode
@return unicode
"""
return os.path.join(self.dirpath, relpath)
# EOF
| gpl-3.0 | -3,036,005,952,833,670,000 | 23.965116 | 87 | 0.615277 | false |
gmimano/commcaretest | corehq/apps/reports/filters/select.py | 1 | 5848 | import datetime
import calendar
from django.conf import settings
from django.utils.translation import ugettext_noop
from django.utils.translation import ugettext as _
from casexml.apps.case.models import CommCareCase, CommCareCaseGroup
from corehq.apps.app_manager.models import Application
from corehq.apps.domain.models import Domain, LICENSES
from corehq.apps.groups.models import Group
from corehq.apps.orgs.models import Organization
from corehq.apps.reports.filters.base import BaseSingleOptionFilter, BaseMultipleOptionFilter
class SelectRegionFilter(BaseSingleOptionFilter):
slug = "region"
label = ugettext_noop("Region")
default_text = ugettext_noop("All Regions")
@property
def options(self):
if hasattr(Domain, 'regions'):
available_regions = [(d.replace(' ', '+'), d) for d in Domain.regions()]
else:
available_regions = []
return available_regions
class SelectLicenseFilter(BaseSingleOptionFilter):
slug = "license"
label = ugettext_noop("License")
default_text = ugettext_noop("All Licenses")
@property
def options(self):
return [(code, license_name) for code, license_name in LICENSES.items()]
class SelectCategoryFilter(BaseSingleOptionFilter):
slug = "category"
label = ugettext_noop("Category")
default_text = ugettext_noop("All Categories")
@property
def options(self):
if hasattr(Domain, 'categories'):
available_categories = [(d.replace(' ', '+'), d) for d in Domain.categories()]
else:
available_categories = []
return available_categories
class SelectOrganizationFilter(BaseSingleOptionFilter):
slug = "org"
label = ugettext_noop("Organization")
default_text = ugettext_noop("All Organizations")
@property
def options(self):
return [(o.name, o.title) for o in Organization.get_all()]
class GroupFilterMixin(object):
slug = "group"
label = ugettext_noop("Group")
default_text = ugettext_noop("Everybody")
@property
def options(self):
return [(group.get_id, group.name) for group in Group.get_reporting_groups(self.domain)]
class GroupFilter(GroupFilterMixin, BaseSingleOptionFilter):
placeholder = ugettext_noop('Click to select a group')
class MultiGroupFilter(GroupFilterMixin, BaseMultipleOptionFilter):
placeholder = ugettext_noop('Click to select groups')
class YearFilter(BaseSingleOptionFilter):
slug = "year"
label = ugettext_noop("Year")
default_text = None
@property
def options(self):
start_year = getattr(settings, 'START_YEAR', 2008)
years = [(unicode(y), y) for y in range(start_year, datetime.datetime.utcnow().year + 1)]
years.reverse()
return years
class MonthFilter(BaseSingleOptionFilter):
slug = "month"
label = ugettext_noop("Month")
default_text = None
@property
def options(self):
return [("%02d" % m, calendar.month_name[m]) for m in range(1, 13)]
class CaseTypeFilter(BaseSingleOptionFilter):
slug = "case_type"
label = ugettext_noop("Case Type")
default_text = ugettext_noop("All Case Types")
@property
def options(self):
case_types = self.get_case_types(self.domain)
return [(case, "%s" % case) for case in case_types]
@classmethod
def get_case_types(cls, domain):
key = [domain]
for r in CommCareCase.get_db().view(
'hqcase/all_cases',
startkey=key,
endkey=key + [{}],
group_level=2
).all():
_, case_type = r['key']
if case_type:
yield case_type
@classmethod
def get_case_counts(cls, domain, case_type=None, user_ids=None):
"""
Returns open count, all count
"""
user_ids = user_ids or [{}]
for view_name in ('hqcase/open_cases', 'hqcase/all_cases'):
def individual_counts():
for user_id in user_ids:
key = [domain, case_type or {}, user_id]
try:
yield CommCareCase.get_db().view(
view_name,
startkey=key,
endkey=key + [{}],
group_level=0
).one()['value']
except TypeError:
yield 0
yield sum(individual_counts())
class SelectOpenCloseFilter(BaseSingleOptionFilter):
slug = "is_open"
label = ugettext_noop("Opened / Closed")
default_text = ugettext_noop("Show All")
@property
def options(self):
return [
('open', _("Only Open")),
('closed', _("Only Closed")),
]
class SelectApplicationFilter(BaseSingleOptionFilter):
slug = "app"
label = ugettext_noop("Application")
default_text = ugettext_noop("Select Application [Latest Build Version]")
@property
def options(self):
apps_for_domain = Application.get_db().view(
"app_manager/applications_brief",
startkey=[self.domain],
endkey=[self.domain, {}],
include_docs=True
).all()
return [(app['value']['_id'], _("%(name)s [up to build %(version)s]") % {
'name': app['value']['name'],
'version': app['value']['version']}) for app in apps_for_domain]
class MultiCaseGroupFilter(BaseMultipleOptionFilter):
slug = "case_group"
label = ugettext_noop("Case Group")
default_text = ugettext_noop("All Case Groups")
placeholder = ugettext_noop('Click to select case groups')
@property
def options(self):
return [(g["id"], g["key"][1]) for g in CommCareCaseGroup.get_all(self.domain, include_docs=False)]
| bsd-3-clause | 2,471,994,198,075,688,000 | 31.131868 | 107 | 0.612859 | false |
jasonabele/gnuradio | gr-audio-jack/src/qa_jack.py | 1 | 1284 | #!/usr/bin/env python
#
# Copyright 2005,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import audio_jack
class test_audio_jack (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_000_nop (self):
"""Just see if we can import the module...
They may not have JACK library, etc. Don't try to run anything"""
pass
if __name__ == '__main__':
gr_unittest.run(test_audio_jack, "test_audio_jack.xml")
| gpl-3.0 | -2,296,726,324,861,246,500 | 31.1 | 74 | 0.696262 | false |
UnderXirox/Python-3_Des-fichiers-complementaires | Guide/21_Chaines/21__02_caractéres.py | 1 | 1574 | caracteres = input("Saississez des caractères: ")
# Exemple avec aA019éà@#`{
for caractere in caracteres:
print("Le caractère {} a pour ordinal {}".format(caractere, ord(caractere)))
# Le caractère a a pour ordinal 97
# Le caractère A a pour ordinal 65
# Le caractère 0 a pour ordinal 48
# Le caractère 1 a pour ordinal 49
# Le caractère 9 a pour ordinal 57
# Le caractère é a pour ordinal 233
# Le caractère à a pour ordinal 224
# Le caractère @ a pour ordinal 64
# Le caractère # a pour ordinal 35
# Le caractère ` a pour ordinal 96
# Le caractère { a pour ordinal 123
nombres = input("saisissez des nombres, séparés par un espace: ")
# Exemple avec 123 233 42 420 4200 4242 42000 424242
for nombre in nombres.split(" "):
try:
print("Le caractère d'ordinal {} est {}".format(nombre,
chr(int(nombre))))
except:
print("Le nombre {} n'est pas un ordinal valide".format(nombre))
# Le caractère d'ordinal 123 est {
# Le caractère d'ordinal 233 est é
# Le caractère d'ordinal 42 est *
# Le caractère d'ordinal 420 est Ƥ
# Le caractère d'ordinal 4200 est ၨ
# Le caractère d'ordinal 4242 est ႒
# Le caractère d'ordinal 42000 est ꐐ
# Le caractère d'ordinal 424242 est
print("Et maintenant, voici quelques caractères non usuels:")
print(chr(0x2318),
chr(0x2704),
chr(0x2764),
chr(0x265b),
chr(0x2620),
chr(0x2622),
chr(0x1f053),
chr(0x1f084),
chr(0x1f0d1))
# ⌘ ✄ ❤ ♛ ☠ ☢ 🁓 🂄 🃑
| gpl-3.0 | -8,252,992,701,762,116,000 | 28.096154 | 80 | 0.653007 | false |
django-erp/django-erp | djangoerp/menus/migrations/0002_initial_fixture.py | 1 | 3421 | from django.db import models, migrations
from django.utils.translation import ugettext_noop as _
from django.urls import reverse
from ..utils import create_detail_actions, create_detail_navigation
def install(apps, schema_editor):
# Models.
User = apps.get_model('core.User')
Group = apps.get_model('core.Group')
Permission = apps.get_model('core.Permission')
Menu = apps.get_model('menus.Menu')
Link = apps.get_model('menus.Link')
# Instances.
users_group, is_new = Group.objects.get_or_create(name="users")
add_bookmark, is_new = Permission.objects.get_or_create_by_natural_key("add_link", "menus", "Link")
edit_user, is_new = Permission.objects.get_or_create_by_natural_key("change_user", "core", "User")
delete_user, is_new = Permission.objects.get_or_create_by_natural_key("delete_user", "core", "User")
# Menus.
main_menu, is_new = Menu.objects.get_or_create(
slug="main",
description=_("Main menu")
)
user_area_not_logged_menu, is_new = Menu.objects.get_or_create(
slug="user_area_not_logged",
description=_("User area for anonymous users")
)
user_area_logged_menu, is_new = Menu.objects.get_or_create(
slug="user_area_logged",
description=_("User area for logged users")
)
user_detail_actions, is_new = create_detail_actions(User)
user_detail_navigation, is_new = create_detail_navigation(User)
# Links.
my_dashboard_link, is_new = Link.objects.get_or_create(
menu_id=main_menu.pk,
title=_("My Dashboard"),
slug="my-dashboard",
description=_("Go back to your dashboard"),
url="/"
)
login_link, is_new = Link.objects.get_or_create(
title=_("Login"),
slug="login",
description=_("Login"),
url=reverse("user_login"),
only_authenticated=False,
menu_id=user_area_not_logged_menu.pk
)
administration_link, is_new = Link.objects.get_or_create(
title=_("Administration"),
slug="administration",
description=_("Administration panel"),
url="/admin/",
only_staff=True,
menu_id=user_area_logged_menu.pk
)
logout_link, is_new = Link.objects.get_or_create(
title=_("Logout"),
slug="logout",
description=_("Logout"),
url=reverse("user_logout"),
menu_id=user_area_logged_menu.pk
)
user_edit_link, is_new = Link.objects.get_or_create(
title=_("Edit"),
slug="user-edit",
description=_("Edit"),
url="user_edit",
context='{"pk": "object.pk"}',
menu_id=user_detail_actions.pk
)
user_edit_link.only_with_perms.set([edit_user])
user_delete_link, is_new = Link.objects.get_or_create(
title=_("Delete"),
slug="user-delete",
description=_("Delete"),
url="user_delete",
context='{"pk": "object.pk"}',
menu_id=user_detail_actions.pk
)
user_delete_link.only_with_perms.set([delete_user])
# Permissions.
users_group.permissions.add(add_bookmark)
class Migration(migrations.Migration):
dependencies = [
('menus', '0001_initial'),
('contenttypes', '0002_remove_content_type_name'),
('core', '0002_initial_fixture'),
]
operations = [
migrations.RunPython(install),
]
| mit | -8,335,009,606,325,166,000 | 30.1 | 104 | 0.602455 | false |
google/makani | gs/monitor2/apps/plugins/indicators/tests/control_test.py | 1 | 21368 | # Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for web monitor indicators."""
import ctypes
import time
import unittest
from makani.avionics.common import pack_avionics_messages
from makani.avionics.linux.swig import aio_helper
from makani.avionics.linux.swig import aio_util
from makani.avionics.network import aio_labels
from makani.avionics.network import aio_node
from makani.avionics.network import message_type as aio_message_type
from makani.control import control_types
from makani.control import system_params
from makani.gs.monitor import monitor_params
from makani.gs.monitor2.apps.layout import stoplights
from makani.gs.monitor2.apps.plugins import common
from makani.gs.monitor2.apps.plugins.indicators import control
from makani.gs.monitor2.apps.receiver import test_util
from makani.gs.monitor2.high_frequency_filters import filter_handlers
from makani.lib.python import c_helpers
from makani.lib.python import struct_tree
import mock
AIO_NODE_HELPER = c_helpers.EnumHelper('AioNode', aio_node)
MESSAGE_TYPE_HELPER = c_helpers.EnumHelper('MessageType', aio_message_type)
MONITOR_PARAMS = monitor_params.GetMonitorParams().contents
class TestIndicators(unittest.TestCase):
@classmethod
def setUp(cls):
aio_util.InitFilters()
def _SynthesizeControlTelemetry(self, sequence=0):
return test_util.SynthesizeMessages(
['ControlTelemetry', 'ControlSlowTelemetry'], sequence)
def _MockMessage(self, message, seq_num, timestamp, source_name,
message_type, pack_func, packed_size,
validate_callback=None):
"""Mock a message in the CVT.
Args:
message: The message to insert into the CVT.
seq_num: Desired sequence number of the message.
timestamp: Desired timestamp of the message.
source_name: Name of the aio node, e.g. 'kAioNodeControllerA'.
message_type: Name of the message type, e.g. 'kAioNodeControlTelemetry'.
pack_func: Function to pack the message. Something like
PackControlTelemetry or PackGroundStationWeatherMessage.
packed_size: Number of bytes of the packed message.
validate_callback: Optional function that takes two messages, the input
and the reconstructed message, and evaluates to True if they are
satisfactorily equal.
E.g. lambda msg1, msg2: msg1.x == msg2.x and msg1.y == msg2.y.
"""
source = AIO_NODE_HELPER.Value(source_name)
message_enum = MESSAGE_TYPE_HELPER.Value(message_type)
# Pack the message, get its raw bytestring, and use CvtPut to insert the
# message into the CVT.
packed_msg_ptr = (ctypes.c_ubyte * packed_size)()
pack_func(ctypes.byref(message), 1, packed_msg_ptr)
raw_message = str(buffer(packed_msg_ptr, 0, packed_size))
aio_util.CvtPut(source, message_enum, seq_num, timestamp, raw_message)
if validate_callback is not None:
# Retrieve the message and validate it using the given validate callback.
result = aio_util.CvtPeek(source, message_enum)
assert len(result) == 3
buf = result[0]
reconstructed_message = aio_helper.UnpackMessage(buf, message_type)
assert validate_callback(message, reconstructed_message)
def testWingPosChart(self):
self._messages = self._SynthesizeControlTelemetry()
# Convert data from ctypes object to Python dicts.
data = self._messages.Data(convert_to_basic_types=False)
control_telemetry = data['ControlTelemetry']['ControllerA']
control_telemetry.flight_mode = control_types.kFlightModePerched
indicator = control.WingPosChart(common.FULL_COMMS_MODE)
with mock.patch('makani.control.common.IsControlSystemRunning',
lambda x: True):
xg = control_telemetry.state_est.Xg
xg.x, xg.y, xg.z = 1.5, 2.5, 0.0
_, _, stoplight, _ = indicator.Filter(self._messages)
self.assertEqual(stoplight, stoplights.STOPLIGHT_NORMAL)
def testFilterFrequency(self):
"""Test that the frequency in filtered_data is accurate."""
window = filter_handlers.FILTER_FREQUENCY_WINDOW_SIZE
# Mock some interval times and check that the average frequency is accurate.
# The times are alternating .001 and .003.
times = [.001 + (i%2)*.002 for i in xrange(window)]
# We expect an average interval of .002, and 1.0/.002 is 500 Hz. We let the
# error margin be large enough to distinguish between 1000 and 333 Hz.
expected = 500
error_margin = 250
aio_util.RunAllFilters()
for i in xrange(window):
time.sleep(times[i])
aio_util.RunAllFilters()
filtered_data = aio_helper.GetFilteredData()
freq = filtered_data.filter_frequency.frequency
self.assertGreaterEqual(freq, expected - error_margin)
self.assertLessEqual(freq, expected + error_margin)
self.assertTrue(filtered_data.filter_frequency.valid)
def testWindGustFilteredData(self):
"""Test that the wind gust filter behaves as expected.
Validate that the filter only samples values at a frequency of 20Hz, and
that the tke values returned are correct. This test runs in two phases.
"""
message_type = 'kMessageTypeGroundStationWeather'
message_size = pack_avionics_messages.PACK_GROUNDSTATIONWEATHERMESSAGE_SIZE
source_name = 'kAioNodePlatformSensorsA'
messages = {}
# Allow a 1.5 Hz margin of error.
error_margin = 1.5
first_test_iterations = 50
# Only half of the samples will be taken because the frequency at which we
# insert values into the CVT and call RunAllFilters() is twice the rate
# that the filter handler samples at.
first_test_valid_iterations = first_test_iterations / 2
second_test_iterations = 50
gs_obj = pack_avionics_messages.GroundStationWeatherMessage()
# Call aio_util.RunAllFilters() at a very fast frequency, and check that
# the filter is sampling at ~20Hz. Also begin mocking velocities.
message_frequency = 40
delay = 1.0 / message_frequency
desired_frequency = 20
# Set up arbitrary velocity values to feed into the CVT.
first_u_velocities = [10.0 + i % 2 for i in xrange(first_test_iterations)]
first_v_velocities = [4.0 + 3 * (i % 2)
for i in xrange(first_test_iterations)]
# The filter handler only sees every other sample.
seen_u_velocities = [first_u_velocities[i]
for i in xrange(first_test_iterations) if i % 2 == 1]
seen_v_velocities = [first_v_velocities[i]
for i in xrange(first_test_iterations) if i % 2 == 1]
aio_util.RunAllFilters()
for i in xrange(first_test_iterations):
gs_obj.wind.wind_velocity[0] = first_u_velocities[i]
gs_obj.wind.wind_velocity[1] = first_v_velocities[i]
self._MockMessage(gs_obj, i, 12345, source_name, message_type,
pack_avionics_messages.PackGroundStationWeatherMessage,
message_size)
time.sleep(delay)
aio_util.RunAllFilters()
# Get filtered data and assert the sampling frequency was correct.
filtered_data = aio_helper.GetFilteredData()
frequency = filtered_data.wind_gust.frequency
self.assertGreaterEqual(frequency, desired_frequency - error_margin)
self.assertLessEqual(frequency, desired_frequency + error_margin)
# Call aio_util.RunAllFilters() at a slower frequency, and check that
# the frequency matches that slower frequency. Also mock some velocities
# and verify that the tke is computed correctly for those velocities.
message_frequency = 16
delay = 1.0 / message_frequency
desired_frequency = 16
second_u_velocities = [10.0 + i % 2 for i in xrange(second_test_iterations)]
second_v_velocities = [4.0 + 3 * (i % 2)
for i in xrange(second_test_iterations)]
aio_util.RunAllFilters()
for i in xrange(second_test_iterations):
gs_obj.wind.wind_velocity[0] = second_u_velocities[i]
gs_obj.wind.wind_velocity[1] = second_v_velocities[i]
self._MockMessage(gs_obj, first_test_iterations + i, 12345, source_name,
message_type,
pack_avionics_messages.PackGroundStationWeatherMessage,
message_size)
time.sleep(delay)
aio_util.RunAllFilters()
filtered_data = aio_helper.GetFilteredData()
messages['filtered'] = filtered_data
frequency = filtered_data.wind_gust.frequency
self.assertGreaterEqual(frequency, desired_frequency - error_margin)
self.assertLessEqual(frequency, desired_frequency + error_margin)
# Check that the tke is correct. This takes into account the fact that
# several wind gust samples should have been skipped in the first phase.
tke = filtered_data.wind_gust.tke
seen_iterations = first_test_valid_iterations + second_test_iterations
u_velocities = seen_u_velocities + second_u_velocities
v_velocities = seen_v_velocities + second_v_velocities
u_avg = sum(u_velocities) / seen_iterations
v_avg = sum(v_velocities) / seen_iterations
u_squared_avg = sum([x ** 2 for x in u_velocities]) / seen_iterations
v_squared_avg = sum([x ** 2 for x in v_velocities]) / seen_iterations
expected_tke = .5 * ((u_squared_avg - u_avg ** 2)
+ (v_squared_avg - v_avg ** 2))
self.assertTrue(filtered_data.wind_gust.valid)
self.assertAlmostEqual(tke, expected_tke)
def testFlapsIndicatorSparseCommsCoverage(self):
filtered = aio_helper.GetFilteredData()
filtered.merge_tether_down.valid = True
for i in range(aio_labels.kNumServos):
state = filtered.merge_tether_down.tether_down.servo_statuses[i]
state.no_update_count = 0
state.angle = float(i)
indicator = control.FlapsIndicator(common.SPARSE_COMMS_MODE)
messages = struct_tree.StructTree({'filtered': filtered})
self.assertEqual(stoplights.STOPLIGHT_NORMAL,
indicator.Filter(messages)[1])
def _GetSynthesizedControlTelemetry(self):
messages = self._SynthesizeControlTelemetry()
data = messages.Data(convert_to_basic_types=False)
control_telemetry = data['ControlTelemetry']['ControllerA']
filtered = aio_helper.GetFilteredData()
filtered.merge_tether_down.valid = True
tether_control_telemetry = (
filtered.merge_tether_down.tether_down.control_telemetry)
tether_control_telemetry.no_update_count = 0
data['filtered'] = filtered
messages = struct_tree.StructTree(data, readonly=False)
return control_telemetry, tether_control_telemetry, messages
def testTensionChart(self):
def SetTension(value, tether_control_telemetry, control_telemetry):
control_telemetry.state_est.tether_force_b.sph.tension = value
tether_control_telemetry.tension = value
def TestResult(tension, tether_control_telemetry, control_telemetry,
messages, expected_stoplight, expected_warning):
SetTension(tension, tether_control_telemetry, control_telemetry)
for mode in [common.SPARSE_COMMS_MODE, common.FULL_COMMS_MODE]:
indicator = control.TensionChart(mode)
_, tensions, stoplight, warning = indicator.Filter(messages)
self.assertEqual(stoplight, expected_stoplight)
self.assertEqual(warning, expected_warning)
self.assertAlmostEqual(tensions[-1], tension * 0.001, places=3)
control_telemetry, tether_control_telemetry, messages = (
self._GetSynthesizedControlTelemetry())
control_telemetry.state_est.tether_force_b.valid = True
with mock.patch('makani.control.common.IsControlSystemRunning',
lambda x: True):
with mock.patch('makani.control.common.AnyHoverFlightMode',
lambda x: False):
with mock.patch('makani.control.common.AnyCrosswindFlightMode',
lambda x: False):
TestResult(MONITOR_PARAMS.tether.tension.high,
tether_control_telemetry, control_telemetry,
messages, stoplights.STOPLIGHT_NORMAL, '')
TestResult(MONITOR_PARAMS.tether.tension.very_high,
tether_control_telemetry, control_telemetry,
messages, stoplights.STOPLIGHT_WARNING, '')
TestResult(MONITOR_PARAMS.tether.tension.very_high + 1.0,
tether_control_telemetry, control_telemetry,
messages, stoplights.STOPLIGHT_ERROR, '')
with mock.patch('makani.control.common.AnyCrosswindFlightMode',
lambda x: True):
TestResult(MONITOR_PARAMS.tether.tension_crosswind.low,
tether_control_telemetry, control_telemetry,
messages, stoplights.STOPLIGHT_NORMAL, '')
TestResult(MONITOR_PARAMS.tether.tension_crosswind.high,
tether_control_telemetry, control_telemetry,
messages, stoplights.STOPLIGHT_NORMAL, '')
TestResult(MONITOR_PARAMS.tether.tension_crosswind.very_low,
tether_control_telemetry, control_telemetry,
messages, stoplights.STOPLIGHT_WARNING, '')
TestResult(MONITOR_PARAMS.tether.tension_crosswind.very_high,
tether_control_telemetry, control_telemetry,
messages, stoplights.STOPLIGHT_WARNING, '')
TestResult(MONITOR_PARAMS.tether.tension_crosswind.very_low - 1.0,
tether_control_telemetry, control_telemetry,
messages, stoplights.STOPLIGHT_ERROR, '')
TestResult(MONITOR_PARAMS.tether.tension_crosswind.very_high + 1.0,
tether_control_telemetry, control_telemetry,
messages, stoplights.STOPLIGHT_ERROR, '')
def testTetherReleaseReadinessIndicator(self):
"""Test entering the 'continue' and 'any_warning_or_error' branch."""
loadcells = ['LoadcellPortA', 'LoadcellPortB']
indicator = control.TetherReleaseReadinessIndicator(loadcells)
# Create a message snapshot that fail silently to gracefully handle
# missing messages.
messages = test_util.SynthesizeMessages(
['ControlTelemetry', 'ControlSlowTelemetry', 'Loadcell'], 0,
fail_silently=True)
# Set up the port message so that it contains an empty StructTree in one
# port and an error in the other port.
read_message = messages.Data(convert_to_basic_types=False)
error_code = pack_avionics_messages.kLoadcellErrorBatteryDisconnected
del read_message['Loadcell']['LoadcellPortA']
read_message['Loadcell']['LoadcellPortB'].status.warning = 0
read_message['Loadcell']['LoadcellPortB'].status.error = error_code
read_message['ControlSlowTelemetry']['ControllerA'].flight_plan = (
system_params.kFlightPlanTurnKey)
# Call Filter, check that stoplight value is error,
# and check the string values detect a Low Battery Error in PortB.
result = indicator.Filter(messages)
result_string, result_stoplight = result
result_lines = result_string.split('\n')
self.assertEqual(stoplights.STOPLIGHT_ERROR, result_stoplight)
self.assertEqual('PortB:', result_lines[0])
self.assertEqual('[ERROR]', result_lines[1])
self.assertEqual('BatteryDisconnected', result_lines[2])
# Change flight plan to a low altitude plan.
read_message['ControlSlowTelemetry']['ControllerA'].flight_plan = (
system_params.kFlightPlanLaunchPerch)
result_string, result_stoplight = indicator.Filter(messages)
self.assertEqual(result_string, 'No Key')
self.assertEqual(result_stoplight, stoplights.STOPLIGHT_NORMAL)
# Disable BatterDisconnected error (key is plugged in).
read_message['Loadcell']['LoadcellPortB'].status.error = 0
result_string, result_stoplight = indicator.Filter(messages)
self.assertEqual(result_string, 'Keyed: PortB')
self.assertEqual(result_stoplight, stoplights.STOPLIGHT_ERROR)
def testTetherReleaseIndicator_Invalid_And_Disarmed(self):
indicator = control.TetherReleaseIndicator()
messages = self._SynthesizeControlTelemetry()
read_message = messages.Data(convert_to_basic_types=False)
read_message['ControlSlowTelemetry']['ControllerA'].flight_plan = (
system_params.kFlightPlanTurnKey)
attributes = indicator._GetAllAttributes(messages)
skeleton = attributes[0]
# Test invalid input.
# Set up all messages for invalid input.
for s in skeleton:
s.no_update_count = 33
# Call Filter, check stoplight value, and check the string value
# to make sure the invalid input string was returned.
result = indicator.Filter(messages)
result_string, result_stoplight = result
self.assertEqual(stoplights.STOPLIGHT_UNAVAILABLE, result_stoplight)
self.assertEqual('--\n\n\n', result_string)
# Test that all statuses are disarmed.
# Set up all messages for disarmed state.
for s in skeleton:
s.no_update_count = 1
# Call Filter, check stoplight value, and check the string values
# to make sure each node state is "Disarmed".
result_string, result_stoplight = indicator.Filter(messages)
result_array = result_string.split('\n')
self.assertEqual(stoplights.STOPLIGHT_WARNING, result_stoplight)
self.assertEqual('PortA: Disarmed, safety on', result_array[0])
self.assertEqual('PortB: Disarmed, safety on', result_array[1])
self.assertEqual('StarboardA: Disarmed, safety on', result_array[2])
self.assertEqual('StarboardB: Disarmed, safety on', result_array[3])
def testTetherReleaseIndicator_Invalid_Released_Error_ArmedNoInterlock(self):
"""Test Invalid, Released, Error, and Armed No Interlock."""
indicator = control.TetherReleaseIndicator()
messages = self._SynthesizeControlTelemetry()
read_message = messages.Data(convert_to_basic_types=False)
read_message['ControlSlowTelemetry']['ControllerA'].flight_plan = (
system_params.kFlightPlanTurnKey)
attributes = indicator._GetAllAttributes(messages)
skeleton = attributes[0]
# Set PortA to greater than MAX_NO_UPDATE_COUNT_TETHER_RELEASE.
skeleton[0].no_update_count = 33
# Set PortB to released.
skeleton[1].released = 1
skeleton[1].no_update_count = 1
# Set StarboardA state to "ERROR".
skeleton[2].state = pack_avionics_messages.kActuatorStateError
skeleton[2].no_update_count = 1
# Set StarboardB state to "ARMED" and interlock off.
skeleton[3].state = pack_avionics_messages.kActuatorStateArmed
skeleton[3].interlock_switched = 0
skeleton[3].no_update_count = 1
result_string, result_stoplight = indicator.Filter(messages)
result_array = result_string.split('\n')
# We expect STOPLIGHT_ERROR because at least one of the releases
# is in state RELEASED.
self.assertEqual(stoplights.STOPLIGHT_ERROR, result_stoplight)
self.assertEqual('PortA: --', result_array[0])
self.assertEqual('PortB: Released', result_array[1])
self.assertEqual('StarboardA: Error', result_array[2])
self.assertEqual('StarboardB: Armed, safety on', result_array[3])
def testTetherReleaseIndicator_Invalid_Error_NotArmedInterLock_ArmedRdy(self):
"""Test Invalid, Error, Not armed interlock, and Armed."""
indicator = control.TetherReleaseIndicator()
messages = self._SynthesizeControlTelemetry()
read_message = messages.Data(convert_to_basic_types=False)
read_message['ControlSlowTelemetry']['ControllerA'].flight_plan = (
system_params.kFlightPlanTurnKey)
attributes = indicator._GetAllAttributes(messages)
# Set node_status flags to cause "Error" for PortB
node_status = attributes[2]
node_status[aio_node.kAioNodeLoadcellPortB].flags = (
pack_avionics_messages.kTetherNodeFlagAnyError)
skeleton = attributes[0]
# Set PortA equal to MAX_NO_UPDATE_COUNT_TETHER_RELEASE.
skeleton[0].no_update_count = 32
skeleton[1].no_update_count = 1
# Set StarboardA state to init and interlock_switched on.
skeleton[2].state = pack_avionics_messages.kActuatorStateInit
skeleton[2].interlock_switched = 1
skeleton[2].no_update_count = 1
# Set StarboardB interlock_switch on and state to armed.
skeleton[3].interlock_switched = 1
skeleton[3].state = pack_avionics_messages.kActuatorStateArmed
skeleton[3].no_update_count = 1
result_string, result_stoplight = indicator.Filter(messages)
result_array = result_string.split('\n')
self.assertEqual(stoplights.STOPLIGHT_ERROR, result_stoplight)
self.assertEqual('PortA: --', result_array[0])
self.assertEqual('PortB: Error', result_array[1])
self.assertEqual('StarboardA: Disarmed, safety off', result_array[2])
self.assertEqual('StarboardB: Armed, safety off', result_array[3])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,215,857,769,434,260,000 | 46.169978 | 80 | 0.700768 | false |
OpenDataAlex/etlTest | etltest/utilities/settings.py | 1 | 2135 | """
This file reads the etltest-settings.yml file and loads application settings.
Having a settings file in the users home directory overrides the one in the application directory.
"""
__author__ = 'coty'
import logging
import pprint
import os
import yaml
from .settings_manager import SettingsManager
# Begin CustomLogging
# this import adds the additional log levels I created
# create console handler and set level to info
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# set formatter
console.setFormatter(formatter)
# End CustomLogging
etltest_config = {}
settings_log = logging.getLogger(name="settings")
settings_log.setLevel(logging.DEBUG)
settings_log.addHandler(console)
pp = pprint.PrettyPrinter(indent=4)
settings_filename = os.path.join(SettingsManager.get_file_location(), '.etltest-settings.yml')
settings_fs_locs = ["{}/{}".format('', settings_filename), "".join(settings_filename)]
settings_loaded = False
for the_path in settings_fs_locs:
settings_log.debug("Attempting to load {}".format(the_path))
try:
with open(the_path, 'r') as f:
prop_list = yaml.load(f.read())
for key, value in list(prop_list.items()):
etltest_config[key] = value
settings_log.debug("Settings loaded from {0}, {1}".format(the_path, etltest_config))
settings_loaded = True
break
except (OSError, IOError) as e:
settings_log.warn("{} {}".format(e.strerror, the_path))
if not settings_loaded:
settings_log.warn("Could not find settings file in {}. Using defaults where present.".format(','.join(settings_fs_locs)))
# For now there is nothing in the settings file that is required for operation. Removing the exit call.
# exit()
# INFO = 20
# TESTING = 21
# DEBUG = 10
# TRACE = 5
try:
console.level = etltest_config['logging_level']
except KeyError:
etltest_config['logging_level'] = 20 # set default level to info
console.level = etltest_config['logging_level']
| gpl-3.0 | 1,179,593,800,444,254,500 | 27.851351 | 125 | 0.693208 | false |
DazWorrall/ansible | contrib/inventory/azure_rm.py | 1 | 32749 | #!/usr/bin/env python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
'''
Azure External Inventory Script
===============================
Generates dynamic inventory by making API requests to the Azure Resource
Manager using the Azure Python SDK. For instruction on installing the
Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
Authentication
--------------
The order of precedence is command line arguments, environment variables,
and finally the [default] profile found in ~/.azure/credentials.
If using a credentials file, it should be an ini formatted file with one or
more sections, which we refer to as profiles. The script looks for a
[default] section, if a profile is not specified either on the command line
or with an environment variable. The keys in a profile will match the
list of command line arguments below.
For command line arguments and environment variables specify a profile found
in your ~/.azure/credentials file, or a service principal or Active Directory
user.
Command line arguments:
- profile
- client_id
- secret
- subscription_id
- tenant
- ad_user
- password
Environment variables:
- AZURE_PROFILE
- AZURE_CLIENT_ID
- AZURE_SECRET
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT
- AZURE_AD_USER
- AZURE_PASSWORD
Run for Specific Host
-----------------------
When run for a specific host using the --host option, a resource group is
required. For a specific host, this script returns the following variables:
{
"ansible_host": "XXX.XXX.XXX.XXX",
"computer_name": "computer_name2",
"fqdn": null,
"id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
"image": {
"offer": "CentOS",
"publisher": "OpenLogic",
"sku": "7.1",
"version": "latest"
},
"location": "westus",
"mac_address": "00-00-5E-00-53-FE",
"name": "object-name",
"network_interface": "interface-name",
"network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
"network_security_group": null,
"network_security_group_id": null,
"os_disk": {
"name": "object-name",
"operating_system_type": "Linux"
},
"plan": null,
"powerstate": "running",
"private_ip": "172.26.3.6",
"private_ip_alloc_method": "Static",
"provisioning_state": "Succeeded",
"public_ip": "XXX.XXX.XXX.XXX",
"public_ip_alloc_method": "Static",
"public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
"public_ip_name": "object-name",
"resource_group": "galaxy-production",
"security_group": "object-name",
"security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
"tags": {
"db": "database"
},
"type": "Microsoft.Compute/virtualMachines",
"virtual_machine_size": "Standard_DS4"
}
Groups
------
When run in --list mode, instances are grouped by the following categories:
- azure
- location
- resource_group
- security_group
- tag key
- tag key_value
Control groups using azure_rm.ini or set environment variables:
AZURE_GROUP_BY_RESOURCE_GROUP=yes
AZURE_GROUP_BY_LOCATION=yes
AZURE_GROUP_BY_SECURITY_GROUP=yes
AZURE_GROUP_BY_TAG=yes
Select hosts within specific resource groups by assigning a comma separated list to:
AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
Select hosts for specific tag key by assigning a comma separated list of tag keys to:
AZURE_TAGS=key1,key2,key3
Select hosts for specific locations:
AZURE_LOCATIONS=eastus,westus,eastus2
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
AZURE_TAGS=key1:value1,key2:value2
If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
AZURE_INCLUDE_POWERSTATE=no
azure_rm.ini
------------
As mentioned above, you can control execution using environment variables or a .ini file. A sample
azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
a different path for the .ini file, define the AZURE_INI_PATH environment variable:
export AZURE_INI_PATH=/path/to/custom.ini
Powerstate:
-----------
The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is
up. If the value is anything other than 'running', the machine is down, and will be unreachable.
Examples:
---------
Execute /bin/uname on all instances in the galaxy-qa resource group
$ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a"
Use the inventory script to print instance specific information
$ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty
Use with a playbook
$ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa
Insecure Platform Warning
-------------------------
If you receive InsecurePlatformWarning from urllib3, install the
requests security packages:
pip install requests[security]
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
Company: Ansible by Red Hat
Version: 1.0.0
'''
import argparse
import ConfigParser
import json
import os
import re
import sys
from packaging.version import Version
from os.path import expanduser
HAS_AZURE = True
HAS_AZURE_EXC = None
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.network_management_client import NetworkManagementClient
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient
from azure.mgmt.compute.compute_management_client import ComputeManagementClient
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD'
)
AZURE_CONFIG_SETTINGS = dict(
resource_groups='AZURE_RESOURCE_GROUPS',
tags='AZURE_TAGS',
locations='AZURE_LOCATIONS',
include_powerstate='AZURE_INCLUDE_POWERSTATE',
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
group_by_location='AZURE_GROUP_BY_LOCATION',
group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
group_by_tag='AZURE_GROUP_BY_TAG'
)
AZURE_MIN_VERSION = "0.30.0rc5"
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
class AzureRM(object):
def __init__(self, args):
self._args = args
self._compute_client = None
self._resource_client = None
self._network_client = None
self.debug = False
if args.debug:
self.debug = True
self.credentials = self._get_credentials(args)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password'])
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password.")
def log(self, msg):
if self.debug:
print(msg + u'\n')
def fail(self, msg):
raise Exception(msg)
def _get_profile(self, profile="default"):
path = expanduser("~")
path += "/.azure/credentials"
try:
config = ConfigParser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except:
pass
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
return env_credentials
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = getattr(params, attribute)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['client_id'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
return None
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.log("One-time registration of {0} failed - {1}".format(key, str(exc)))
self.log("You might need to register {0} using an admin account".format(key))
self.log(("To register a provider using the Python CLI: "
"https://docs.microsoft.com/azure/azure-resource-manager/"
"resource-manager-common-deployment-errors#noregisteredproviderfound"))
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Compute')
return self._compute_client
class AzureInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
try:
rm = AzureRM(self._args)
except Exception as e:
sys.exit("{0}".format(str(e)))
self._compute_client = rm.compute_client
self._network_client = rm.network_client
self._resource_client = rm.rm_client
self._security_groups = None
self.resource_groups = []
self.tags = None
self.locations = None
self.replace_dash_in_groups = False
self.group_by_resource_group = True
self.group_by_location = True
self.group_by_security_group = True
self.group_by_tag = True
self.include_powerstate = True
self._inventory = dict(
_meta=dict(
hostvars=dict()
),
azure=[]
)
self._get_settings()
if self._args.resource_groups:
self.resource_groups = self._args.resource_groups.split(',')
if self._args.tags:
self.tags = self._args.tags.split(',')
if self._args.locations:
self.locations = self._args.locations.split(',')
if self._args.no_powerstate:
self.include_powerstate = False
self.get_inventory()
print(self._json_format_dict(pretty=self._args.pretty))
sys.exit(0)
def _parse_cli_args(self):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file for an Azure subscription')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--profile', action='store',
help='Azure profile contained in ~/.azure/credentials')
parser.add_argument('--subscription_id', action='store',
help='Azure Subscription Id')
parser.add_argument('--client_id', action='store',
help='Azure Client Id ')
parser.add_argument('--secret', action='store',
help='Azure Client Secret')
parser.add_argument('--tenant', action='store',
help='Azure Tenant Id')
parser.add_argument('--ad-user', action='store',
help='Active Directory User')
parser.add_argument('--password', action='store',
help='password')
parser.add_argument('--resource-groups', action='store',
help='Return inventory for comma separated list of resource group names')
parser.add_argument('--tags', action='store',
help='Return inventory for comma separated list of tag key:value pairs')
parser.add_argument('--locations', action='store',
help='Return inventory for comma separated list of locations')
parser.add_argument('--no-powerstate', action='store_true', default=False,
help='Do not include the power state of each virtual host')
return parser.parse_args()
def get_inventory(self):
if len(self.resource_groups) > 0:
# get VMs for requested resource groups
for resource_group in self.resource_groups:
try:
virtual_machines = self._compute_client.virtual_machines.list(resource_group)
except Exception as exc:
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group,
str(exc)))
if self._args.host or self.tags:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
else:
# get all VMs within the subscription
try:
virtual_machines = self._compute_client.virtual_machines.list_all()
except Exception as exc:
sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
if self._args.host or self.tags or self.locations:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
def _load_machines(self, machines):
for machine in machines:
id_dict = azure_id_to_dict(machine.id)
# TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
# fixed, we should remove the .lower(). Opened Issue
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574
resource_group = id_dict['resourceGroups'].lower()
if self.group_by_security_group:
self._get_security_groups(resource_group)
host_vars = dict(
ansible_host=None,
private_ip=None,
private_ip_alloc_method=None,
public_ip=None,
public_ip_name=None,
public_ip_id=None,
public_ip_alloc_method=None,
fqdn=None,
location=machine.location,
name=machine.name,
type=machine.type,
id=machine.id,
tags=machine.tags,
network_interface_id=None,
network_interface=None,
resource_group=resource_group,
mac_address=None,
plan=(machine.plan.name if machine.plan else None),
virtual_machine_size=machine.hardware_profile.vm_size,
computer_name=(machine.os_profile.computer_name if machine.os_profile else None),
provisioning_state=machine.provisioning_state,
)
host_vars['os_disk'] = dict(
name=machine.storage_profile.os_disk.name,
operating_system_type=machine.storage_profile.os_disk.os_type.value
)
if self.include_powerstate:
host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name)
if machine.storage_profile.image_reference:
host_vars['image'] = dict(
offer=machine.storage_profile.image_reference.offer,
publisher=machine.storage_profile.image_reference.publisher,
sku=machine.storage_profile.image_reference.sku,
version=machine.storage_profile.image_reference.version
)
# Add windows details
if machine.os_profile is not None and machine.os_profile.windows_configuration is not None:
host_vars['windows_auto_updates_enabled'] = \
machine.os_profile.windows_configuration.enable_automatic_updates
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
host_vars['windows_rm'] = None
if machine.os_profile.windows_configuration.win_rm is not None:
host_vars['windows_rm'] = dict(listeners=None)
if machine.os_profile.windows_configuration.win_rm.listeners is not None:
host_vars['windows_rm']['listeners'] = []
for listener in machine.os_profile.windows_configuration.win_rm.listeners:
host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol,
certificate_url=listener.certificate_url))
for interface in machine.network_profile.network_interfaces:
interface_reference = self._parse_ref_id(interface.id)
network_interface = self._network_client.network_interfaces.get(
interface_reference['resourceGroups'],
interface_reference['networkInterfaces'])
if network_interface.primary:
if self.group_by_security_group and \
self._security_groups[resource_group].get(network_interface.id, None):
host_vars['security_group'] = \
self._security_groups[resource_group][network_interface.id]['name']
host_vars['security_group_id'] = \
self._security_groups[resource_group][network_interface.id]['id']
host_vars['network_interface'] = network_interface.name
host_vars['network_interface_id'] = network_interface.id
host_vars['mac_address'] = network_interface.mac_address
for ip_config in network_interface.ip_configurations:
host_vars['private_ip'] = ip_config.private_ip_address
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
if ip_config.public_ip_address:
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
public_ip_address = self._network_client.public_ip_addresses.get(
public_ip_reference['resourceGroups'],
public_ip_reference['publicIPAddresses'])
host_vars['ansible_host'] = public_ip_address.ip_address
host_vars['public_ip'] = public_ip_address.ip_address
host_vars['public_ip_name'] = public_ip_address.name
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
host_vars['public_ip_id'] = public_ip_address.id
if public_ip_address.dns_settings:
host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
self._add_host(host_vars)
def _selected_machines(self, virtual_machines):
selected_machines = []
for machine in virtual_machines:
if self._args.host and self._args.host == machine.name:
selected_machines.append(machine)
if self.tags and self._tags_match(machine.tags, self.tags):
selected_machines.append(machine)
if self.locations and machine.location in self.locations:
selected_machines.append(machine)
return selected_machines
def _get_security_groups(self, resource_group):
''' For a given resource_group build a mapping of network_interface.id to security_group name '''
if not self._security_groups:
self._security_groups = dict()
if not self._security_groups.get(resource_group):
self._security_groups[resource_group] = dict()
for group in self._network_client.network_security_groups.list(resource_group):
if group.network_interfaces:
for interface in group.network_interfaces:
self._security_groups[resource_group][interface.id] = dict(
name=group.name,
id=group.id
)
def _get_powerstate(self, resource_group, name):
try:
vm = self._compute_client.virtual_machines.get(resource_group,
name,
expand='instanceview')
except Exception as exc:
sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc)))
return next((s.code.replace('PowerState/', '')
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
def _add_host(self, vars):
host_name = self._to_safe(vars['name'])
resource_group = self._to_safe(vars['resource_group'])
security_group = None
if vars.get('security_group'):
security_group = self._to_safe(vars['security_group'])
if self.group_by_resource_group:
if not self._inventory.get(resource_group):
self._inventory[resource_group] = []
self._inventory[resource_group].append(host_name)
if self.group_by_location:
if not self._inventory.get(vars['location']):
self._inventory[vars['location']] = []
self._inventory[vars['location']].append(host_name)
if self.group_by_security_group and security_group:
if not self._inventory.get(security_group):
self._inventory[security_group] = []
self._inventory[security_group].append(host_name)
self._inventory['_meta']['hostvars'][host_name] = vars
self._inventory['azure'].append(host_name)
if self.group_by_tag and vars.get('tags'):
for key, value in vars['tags'].items():
safe_key = self._to_safe(key)
safe_value = safe_key + '_' + self._to_safe(value)
if not self._inventory.get(safe_key):
self._inventory[safe_key] = []
if not self._inventory.get(safe_value):
self._inventory[safe_value] = []
self._inventory[safe_key].append(host_name)
self._inventory[safe_value].append(host_name)
def _json_format_dict(self, pretty=False):
# convert inventory to json
if pretty:
return json.dumps(self._inventory, sort_keys=True, indent=2)
else:
return json.dumps(self._inventory)
def _get_settings(self):
# Load settings from the .ini, if it exists. Otherwise,
# look for environment values.
file_settings = self._load_settings()
if file_settings:
for key in AZURE_CONFIG_SETTINGS:
if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key):
values = file_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif file_settings.get(key):
val = self._to_boolean(file_settings[key])
setattr(self, key, val)
else:
env_settings = self._get_env_settings()
for key in AZURE_CONFIG_SETTINGS:
if key in('resource_groups', 'tags', 'locations') and env_settings.get(key):
values = env_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif env_settings.get(key, None) is not None:
val = self._to_boolean(env_settings[key])
setattr(self, key, val)
def _parse_ref_id(self, reference):
response = {}
keys = reference.strip('/').split('/')
for index in range(len(keys)):
if index < len(keys) - 1 and index % 2 == 0:
response[keys[index]] = keys[index + 1]
return response
def _to_boolean(self, value):
if value in ['Yes', 'yes', 1, 'True', 'true', True]:
result = True
elif value in ['No', 'no', 0, 'False', 'false', False]:
result = False
else:
result = True
return result
def _get_env_settings(self):
env_settings = dict()
for attribute, env_variable in AZURE_CONFIG_SETTINGS.items():
env_settings[attribute] = os.environ.get(env_variable, None)
return env_settings
def _load_settings(self):
basename = os.path.splitext(os.path.basename(__file__))[0]
default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path)))
config = None
settings = None
try:
config = ConfigParser.ConfigParser()
config.read(path)
except:
pass
if config is not None:
settings = dict()
for key in AZURE_CONFIG_SETTINGS:
try:
settings[key] = config.get('azure', key, raw=True)
except:
pass
return settings
def _tags_match(self, tag_obj, tag_args):
'''
Return True if the tags object from a VM contains the requested tag values.
:param tag_obj: Dictionary of string:string pairs
:param tag_args: List of strings in the form key=value
:return: boolean
'''
if not tag_obj:
return False
matches = 0
for arg in tag_args:
arg_key = arg
arg_value = None
if re.search(r':', arg):
arg_key, arg_value = arg.split(':')
if arg_value and tag_obj.get(arg_key, None) == arg_value:
matches += 1
elif not arg_value and tag_obj.get(arg_key, None) is not None:
matches += 1
if matches == len(tag_args):
return True
return False
def _to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try `pip install 'azure>=2.0.0rc5' --upgrade`) - {0}".format(HAS_AZURE_EXC))
if Version(azure_compute_version) < Version(AZURE_MIN_VERSION):
sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} "
"Do you have Azure >= 2.0.0rc5 installed? (try `pip install 'azure>=2.0.0rc5' --upgrade`)".format(AZURE_MIN_VERSION, azure_compute_version))
AzureInventory()
if __name__ == '__main__':
main()
| gpl-3.0 | 7,014,954,423,921,944,000 | 39.834165 | 157 | 0.602156 | false |
cria/microSICol | update_external_db.py | 1 | 6512 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author:Renato Arnellas Coelho renatoac at gmail dot com
#
# Script to update Sicol database
#
# Warning:
# 1 - Add MySQL executable directory to system's PATH environment variable
# 2 - This script _MUST_ be executed on root directory
def updateDB(full_mode=True):
'''
full_mode = whether to update only DB structure (False) or all possible data (True)
@return bool - True = OK! False = Error found
'''
import getpass
import os
import platform
from sys import exit
print "Updating MySQL database..."
if platform.system() == "Windows" or platform.system() == "Microsoft":
import winsound
####################
# User data
####################
dados = {}
dados['mysql_login'] = raw_input("MySQL administrator login: ")
dados['mysql_pwd'] = getpass.getpass("MySQL administrator password: ")
dados['mysql_bd'] = raw_input("MySQL Database (e.g. 'sicol_v123'): ")
dados['mysql_user'] = raw_input("Sicol login to MySQL (e.g. 'sicol'): ")
####################
# Internal data
####################
sicol_path = os.getcwd()+os.sep+'db'+os.sep+'scripts'+os.sep
if platform.system() == "Windows" or platform.system() == "Microsoft":
mysql_path = [x for x in os.environ['PATH'].split(";") if x.lower().find('mysql') != -1]
else: #UNIX
pipe = os.popen("which mysql") #grab where MySQL is installed
mysql_path = pipe.read().strip()
host = "localhost"
user = dados['mysql_login']
pwd = dados['mysql_pwd']
####################
# DB update script
####################
if mysql_path == '' or mysql_path == []:
print "*********** ERROR ***********"
print "Please insert path to executable directory (mysql.exe) in OS 'PATH' variable."
raw_input() #Wait for user input...
return False
else:
if platform.system() == "Windows" or platform.system() == "Microsoft":
#Ignore whether PATH ends with '\' or not
mysql_path = mysql_path[0]
if mysql_path[-1] != '\\': mysql_path += '\\'
mysql_path = '"' + mysql_path + 'mysql.exe"'
try:
bd_version = dados['mysql_bd'].split("_")[1]
except Exception,e:
print "*********** ERROR ***********"
print "Please type \"sicol_v###\" where ### = version number."
raw_input() #Wait for user input...
return False
path_to_db = sicol_path + bd_version + os.sep
# Load mysql_script_empty.sql
dump_file = 'mysql_script_empty.sql'
print "Loading database structure..."
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,path_to_db+dump_file) )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
#return False
# Load mysql_start_dump.sql
dump_file = "dump"+os.sep+"mysql_start_dump.sql"
print "Loading initial dump to database..."
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,path_to_db+dump_file) )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return False
######################
# Load additional data
######################
if full_mode:
if platform.system() == "Windows" or platform.system() == "Microsoft":
winsound.MessageBeep(winsound.MB_ICONASTERISK)
opt = raw_input("Do you want to load test data? (y/n)\n")[0].lower()
if opt == 'y':
# Load mysql_testdata_dump.sql
dump_file = "dump"+os.sep+"mysql_testdata_dump.sql"
print "Loading test data to database..."
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,path_to_db+dump_file) )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return
if platform.system() == "Windows" or platform.system() == "Microsoft":
winsound.MessageBeep(winsound.MB_ICONASTERISK)
opt = raw_input("Do you want to load all Brazilian cities name to database? (y/n)\n")[0].lower()
if opt == 'y':
# Load mysql_cities_dump.sql
dump_file = "dump"+os.sep+"mysql_cities_dump.sql"
print "Loading Brazilian cities name to database..."
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,path_to_db+dump_file) )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return
if platform.system() == "Windows" or platform.system() == "Microsoft":
winsound.MessageBeep(winsound.MB_ICONASTERISK)
opt = raw_input("Do you want to load debug data? (y/n)\n")[0].lower()
if opt == 'y':
# Load mysql_cities_dump.sql
dump_file = "dump"+os.sep+"mysql_debug_dump.sql"
print "Loading debug data to database..."
try:
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,path_to_db+dump_file) )
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return
########################
# End of additional data
########################
# Give database permissions to "sicol" user
print "Transfering access permission to user \"%s\"..." % dados['mysql_user']
try:
#Create temp file in order to change user permissions
f = open('temp_user_access_bd.sql','w')
f.write("GRANT ALL PRIVILEGES ON `%s`.* TO '%s'@localhost IDENTIFIED BY '%s';FLUSH PRIVILEGES;" % (dados['mysql_bd'].replace("_","\\_"),dados['mysql_user'],dados['mysql_user']))
f.close()
os.system("%s -h%s -u%s -p%s < %s" % (mysql_path,host,user,pwd,os.getcwd()+os.sep+'temp_user_access_bd.sql') )
os.unlink('temp_user_access_bd.sql')
except Exception,e:
print "*********** ERROR ***********"
print str(e)
raw_input() #Wait for user input...
return
####################
# End of update
####################
if platform.system() == "Windows" or platform.system() == "Microsoft":
winsound.MessageBeep(winsound.MB_ICONEXCLAMATION)
print "****************************"
raw_input("Update finished. Press [ENTER] to continue.")
#If this script is called locally...
if __name__ == "__main__":
print "*** Update SICol Database ***"
updateDB()
print "*** Update Finished ***"
| gpl-2.0 | 2,520,255,957,991,356,000 | 38.447205 | 181 | 0.555129 | false |
trolldbois/python-haystack-reverse | haystack/reverse/cli.py | 1 | 4337 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
"""Entry points related to reverse. """
import os
import sys
from haystack import argparse_utils
from haystack import cli
from haystack.reverse import api
# the description of the function
REVERSE_DESC = 'Reverse the data structure from the process memory'
REVERSE_SHOW_DESC = 'Show the record at a specific address'
REVERSE_PARENT_DESC = 'List the predecessors pointing to the record at this address'
REVERSE_HEX_DESC = 'Show the Hex values for the record at that address.'
def show_hex(args):
""" Show the Hex values for the record at that address. """
memory_handler = cli.make_memory_handler(args)
process_context = memory_handler.get_reverse_context()
ctx = process_context.get_context_for_address(args.address)
try:
st = ctx.get_record_at_address(args.address)
print(repr(st.bytes))
except ValueError as e:
print(None)
return
def show_predecessors_cmdline(args):
"""
Show the predecessors that point to a record at a particular address.
:param args: cmdline args
:return:
"""
memory_handler = cli.make_memory_handler(args)
process_context = memory_handler.get_reverse_context()
ctx = process_context.get_context_for_address(args.address)
try:
child_record = ctx.get_record_at_address(args.address)
except ValueError as e:
print(None)
return
records = api.get_record_predecessors(memory_handler, child_record)
if len(records) == 0:
print(None)
else:
for p_record in records:
print('#0x%x\n%s\n' % (p_record.address, p_record.to_string()))
return
def reverse_show_cmdline(args):
""" Show the record at a specific address. """
memory_handler = cli.make_memory_handler(args)
process_context = memory_handler.get_reverse_context()
ctx = process_context.get_context_for_address(args.address)
try:
st = ctx.get_record_at_address(args.address)
print(st.to_string())
except ValueError:
print(None)
return
def reverse_cmdline(args):
""" Reverse """
from haystack.reverse import api as rapi
# get the memory handler adequate for the type requested
memory_handler = cli.make_memory_handler(args)
# do the search
rapi.reverse_instances(memory_handler)
return
def reverse():
argv = sys.argv[1:]
desc = REVERSE_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.set_defaults(func=reverse_cmdline)
opts = rootparser.parse_args(argv)
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def reverse_show():
argv = sys.argv[1:]
desc = REVERSE_SHOW_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('address', type=argparse_utils.int16, help='Record memory address in hex')
rootparser.set_defaults(func=reverse_show_cmdline)
opts = rootparser.parse_args(argv)
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def reverse_parents():
argv = sys.argv[1:]
desc = REVERSE_PARENT_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('address', type=argparse_utils.int16, action='store', default=None,
help='Hex address of the child structure')
rootparser.set_defaults(func=show_predecessors_cmdline)
opts = rootparser.parse_args(argv)
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
def reverse_hex():
argv = sys.argv[1:]
desc = REVERSE_HEX_DESC
rootparser = cli.base_argparser(program_name=os.path.basename(sys.argv[0]), description=desc)
rootparser.add_argument('address', type=argparse_utils.int16, action='store', default=None,
help='Specify the address of the record, or encompassed by the record')
rootparser.set_defaults(func=show_hex)
opts = rootparser.parse_args(argv)
# apply verbosity
cli.set_logging_level(opts)
# execute function
opts.func(opts)
return
| gpl-3.0 | 3,501,183,569,568,600,600 | 30.889706 | 102 | 0.682038 | false |
timbuchwaldt/bundlewrap | setup.py | 1 | 2063 | from sys import version_info
from setuptools import find_packages, setup
dependencies = [
"cryptography",
"Jinja2",
"Mako",
"passlib",
"pyyaml",
"requests >= 1.0.0",
"six",
]
if version_info < (3, 2, 0):
dependencies.append("futures")
setup(
name="bundlewrap",
version="3.3.0",
description="Config management with Python",
long_description=(
"By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n"
"While most other config management systems rely on a client-server architecture, BundleWrap works off a repository cloned to your local machine. It then automates the process of SSHing into your servers and making sure everything is configured the way it's supposed to be. You won't have to install anything on managed servers."
),
author="Torsten Rehn",
author_email="[email protected]",
license="GPLv3",
url="http://bundlewrap.org",
packages=find_packages(),
entry_points={
'console_scripts': [
"bw=bundlewrap.cmdline:main",
],
},
keywords=["configuration", "config", "management"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: System :: Installation/Setup",
"Topic :: System :: Systems Administration",
],
install_requires=dependencies,
extras_require={ # used for wheels
':python_version=="2.7"': ["futures"],
},
zip_safe=False,
)
| gpl-3.0 | 6,583,598,873,751,733,000 | 35.192982 | 337 | 0.638875 | false |
torchingloom/edx-platform | lms/djangoapps/bulk_email/models.py | 1 | 11002 | """
Models for bulk email
WE'RE USING MIGRATIONS!
If you make changes to this model, be sure to create an appropriate migration
file and check it in at the same time as your model changes. To do that,
1. Go to the edx-platform dir
2. ./manage.py lms schemamigration bulk_email --auto description_of_your_change
3. Add the migration file created in edx-platform/lms/djangoapps/bulk_email/migrations/
"""
import logging
from django.db import models, transaction
from django.contrib.auth.models import User
from html_to_text import html_to_text
import hashlib
from django.conf import settings
from .fields import SeparatedValuesField
log = logging.getLogger(__name__)
# Bulk email to_options - the send to options that users can
# select from when they send email.
SEND_TO_MYSELF = 'myself'
SEND_TO_STAFF = 'staff'
SEND_TO_ALL = 'all'
SEND_TO_LIST = 'list'
SEND_TO_ALLALL = 'allall'
TO_OPTIONS = [SEND_TO_MYSELF, SEND_TO_STAFF, SEND_TO_LIST, SEND_TO_ALL, SEND_TO_ALLALL]
class Email(models.Model):
"""
Abstract base class for common information for an email.
"""
sender = models.ForeignKey(User, default=1, blank=True, null=True)
slug = models.CharField(max_length=128, db_index=True)
subject = models.CharField(max_length=128, blank=True)
html_message = models.TextField(null=True, blank=True)
text_message = models.TextField(null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta: # pylint: disable=C0111
abstract = True
class CourseEmail(Email):
"""
Stores information for an email to a course.
"""
# Three options for sending that we provide from the instructor dashboard:
# * Myself: This sends an email to the staff member that is composing the email.
#
# * Staff and instructors: This sends an email to anyone in the staff group and
# anyone in the instructor group
#
# * All: This sends an email to anyone enrolled in the course, with any role
# (student, staff, or instructor)
#
TO_OPTION_CHOICES = (
(SEND_TO_MYSELF, 'Myself'),
(SEND_TO_STAFF, 'Staff and instructors'),
(SEND_TO_LIST, 'To list'),
(SEND_TO_ALL, 'All'),
(SEND_TO_ALLALL, 'AllAll')
)
course_id = models.CharField(max_length=255, db_index=True)
location = models.CharField(max_length=255, db_index=True, null=True, blank=True)
to_option = models.CharField(max_length=64, choices=TO_OPTION_CHOICES, default=SEND_TO_MYSELF)
to_list = SeparatedValuesField(null=True)
def __unicode__(self):
return self.subject
@classmethod
def create(cls, course_id, sender, to_option, subject, html_message, text_message=None, location=None, to_list=None):
"""
Create an instance of CourseEmail.
The CourseEmail.save_now method makes sure the CourseEmail entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# automatically generate the stripped version of the text from the HTML markup:
if text_message is None:
text_message = html_to_text(html_message)
# perform some validation here:
if to_option not in TO_OPTIONS:
fmt = 'Course email being sent to unrecognized to_option: "{to_option}" for "{course}", subject "{subject}"'
msg = fmt.format(to_option=to_option, course=course_id, subject=subject)
raise ValueError(msg)
# create the task, then save it immediately:
course_email = cls(
course_id=course_id,
sender=sender,
to_option=to_option,
subject=subject,
html_message=html_message,
text_message=text_message,
location=location,
to_list=to_list,
)
course_email.save_now()
return course_email
@transaction.autocommit
def save_now(self):
"""
Writes CourseEmail immediately, ensuring the transaction is committed.
Autocommit annotation makes sure the database entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, this autocommit here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
self.save()
def send(self):
from instructor_task.tasks import send_bulk_course_email
from instructor_task.api_helper import submit_task
from instructor.utils import DummyRequest
request = DummyRequest()
request.user = self.sender
email_obj = self
to_option = email_obj.to_option
task_type = 'bulk_course_email'
task_class = send_bulk_course_email
# Pass in the to_option as a separate argument, even though it's (currently)
# in the CourseEmail. That way it's visible in the progress status.
# (At some point in the future, we might take the recipient out of the CourseEmail,
# so that the same saved email can be sent to different recipients, as it is tested.)
task_input = {'email_id': self.id, 'to_option': to_option}
task_key_stub = "{email_id}_{to_option}".format(email_id=self.id, to_option=to_option)
# create the key value by using MD5 hash:
task_key = hashlib.md5(task_key_stub).hexdigest()
return submit_task(request, task_type, task_class, self.course_id, task_input, task_key)
class Optout(models.Model):
"""
Stores users that have opted out of receiving emails from a course.
"""
# Allowing null=True to support data migration from email->user.
# We need to first create the 'user' column with some sort of default in order to run the data migration,
# and given the unique index, 'null' is the best default value.
user = models.ForeignKey(User, db_index=True, null=True)
course_id = models.CharField(max_length=255, db_index=True)
class Meta: # pylint: disable=C0111
unique_together = ('user', 'course_id')
# Defines the tag that must appear in a template, to indicate
# the location where the email message body is to be inserted.
COURSE_EMAIL_MESSAGE_BODY_TAG = '{{message_body}}'
class CourseEmailTemplate(models.Model):
"""
Stores templates for all emails to a course to use.
This is expected to be a singleton, to be shared across all courses.
Initialization takes place in a migration that in turn loads a fixture.
The admin console interface disables add and delete operations.
Validation is handled in the CourseEmailTemplateForm class.
"""
html_template = models.TextField(null=True, blank=True)
plain_template = models.TextField(null=True, blank=True)
@staticmethod
def get_template():
"""
Fetch the current template
If one isn't stored, an exception is thrown.
"""
try:
return CourseEmailTemplate.objects.get()
except CourseEmailTemplate.DoesNotExist:
log.exception("Attempting to fetch a non-existent course email template")
raise
@staticmethod
def _render(format_string, message_body, context):
"""
Create a text message using a template, message body and context.
Convert message body (`message_body`) into an email message
using the provided template. The template is a format string,
which is rendered using format() with the provided `context` dict.
This doesn't insert user's text into template, until such time we can
support proper error handling due to errors in the message body
(e.g. due to the use of curly braces).
Instead, for now, we insert the message body *after* the substitutions
have been performed, so that anything in the message body that might
interfere will be innocently returned as-is.
Output is returned as a unicode string. It is not encoded as utf-8.
Such encoding is left to the email code, which will use the value
of settings.DEFAULT_CHARSET to encode the message.
"""
# If we wanted to support substitution, we'd call:
# format_string = format_string.replace(COURSE_EMAIL_MESSAGE_BODY_TAG, message_body)
result = format_string.format(**context)
# Note that the body tag in the template will now have been
# "formatted", so we need to do the same to the tag being
# searched for.
message_body_tag = COURSE_EMAIL_MESSAGE_BODY_TAG.format()
result = result.replace(message_body_tag, message_body, 1)
# finally, return the result, without converting to an encoded byte array.
return result
def render_plaintext(self, plaintext, context):
"""
Create plain text message.
Convert plain text body (`plaintext`) into plaintext email message using the
stored plain template and the provided `context` dict.
"""
return CourseEmailTemplate._render(self.plain_template, plaintext, context)
def render_htmltext(self, htmltext, context):
"""
Create HTML text message.
Convert HTML text body (`htmltext`) into HTML email message using the
stored HTML template and the provided `context` dict.
"""
return CourseEmailTemplate._render(self.html_template, htmltext, context)
class CourseAuthorization(models.Model):
"""
Enable the course email feature on a course-by-course basis.
"""
# The course that these features are attached to.
course_id = models.CharField(max_length=255, db_index=True, unique=True)
# Whether or not to enable instructor email
email_enabled = models.BooleanField(default=False)
@classmethod
def instructor_email_enabled(cls, course_id):
"""
Returns whether or not email is enabled for the given course id.
If email has not been explicitly enabled, returns False.
"""
# If settings.FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] is
# set to False, then we enable email for every course.
if not settings.FEATURES['REQUIRE_COURSE_EMAIL_AUTH']:
return True
try:
record = cls.objects.get(course_id=course_id)
return record.email_enabled
except cls.DoesNotExist:
return False
def __unicode__(self):
not_en = "Not "
if self.email_enabled:
not_en = ""
return u"Course '{}': Instructor Email {}Enabled".format(self.course_id, not_en)
| agpl-3.0 | 351,640,386,097,499,400 | 38.153025 | 121 | 0.668606 | false |
xsteadfastx/subsonic-xbmc-addon | plugin.audio.subsonic/addon.py | 1 | 11859 | from operator import itemgetter
import sys
import urllib
import urlparse
sys.path.append('./resources/lib')
import requests
def build_url(query):
return base_url + '?' + urllib.urlencode(dict([k.encode('utf-8'),unicode(v).encode('utf-8')] for k,v in query.items()))
class Subsonic(object):
def __init__(self, url, username, password):
self.url = url
self.username = username
self.password = password
def api(self, method, parameters={'none': 'none'}):
return self.url + '/rest/' + method + '?u=%s&p=enc:%s&v=1.1.0&c=xbmc-subsonic&f=json&' % (
self.username, self.password.encode('hex')) + urllib.urlencode(parameters)
def artist_list(self):
api_url = self.api('getIndexes.view',
parameters={'musicFolderId': '0'})
r = requests.get(api_url)
artists = []
for index in r.json()['subsonic-response']['indexes']['index']:
for artist in index['artist']:
item = {}
item['name'] = artist['name'].encode('utf-8')
item['id'] = artist['id'].encode('utf-8')
artists.append(item)
return artists
def music_directory_list(self, id):
api_url = self.api('getMusicDirectory.view',
parameters={'id': id})
r = requests.get(api_url)
return r.json()['subsonic-response']['directory']['child']
def genre_list(self):
api_url = self.api('getGenres.view')
r = requests.get(api_url)
return sorted(r.json()['subsonic-response']['genres']['genre'],
key=itemgetter('value'))
def albums_by_genre_list(self, genre):
api_url = self.api('getAlbumList.view',
parameters={'type': 'byGenre',
'genre': genre,
'size': '500'})
r = requests.get(api_url)
return r.json()['subsonic-response']['albumList']['album']
def random_songs_by_genre(self, genre):
api_url = self.api('getRandomSongs.view',
parameters={'size': '500',
'genre': genre})
r = requests.get(api_url)
return r.json()['subsonic-response']['randomSongs']['song']
def random_songs_from_to_year(self, from_year, to_year):
api_url = self.api('getRandomSongs.view',
parameters={'size': '500',
'fromYear': from_year,
'toYear': to_year})
r = requests.get(api_url)
return r.json()['subsonic-response']['randomSongs']['song']
def cover_art(self, id):
return self.api('getCoverArt.view', parameters={'id': id})
def main_page():
menu = [{'mode': 'artist_list', 'foldername': 'Artists'},
{'mode': 'genre_list', 'foldername': 'Genres'},
{'mode': 'random_list', 'foldername': 'Random'}]
for entry in menu:
url = build_url(entry)
li = xbmcgui.ListItem(entry['foldername'],
iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def genre_list():
subsonic = Subsonic(subsonic_url, username, password)
genres = subsonic.genre_list()
for genre in genres:
url = build_url({'mode': 'albums_by_genre_list',
'foldername': genre['value']})
li = xbmcgui.ListItem(genre['value'],
iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def albums_by_genre_list():
genre = args.get('foldername', None)
subsonic = Subsonic(subsonic_url, username, password)
albums = subsonic.albums_by_genre_list(genre[0])
for album in albums:
url = build_url({'mode': 'track_list',
'foldername': unicode(album['title']).encode('utf-8'),
'album_id': unicode(album['id']).encode('utf-8')})
li = xbmcgui.ListItem(album['artist'] + ' - ' + album['title'])
li.setIconImage(subsonic.cover_art(album['id']))
li.setThumbnailImage(subsonic.cover_art(album['id']))
li.setProperty('fanart_image', subsonic.cover_art(album['id']))
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def artist_list():
subsonic = Subsonic(subsonic_url, username, password)
artists = subsonic.artist_list()
for artist in artists:
url = build_url({'mode': 'album_list',
'foldername': artist['name'],
'artist_id': artist['id']})
li = xbmcgui.ListItem(artist['name'])
li.setIconImage(subsonic.cover_art(artist['id']))
li.setThumbnailImage(subsonic.cover_art(artist['id']))
li.setProperty('fanart_image', subsonic.cover_art(artist['id']))
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def album_list():
artist_id = args.get('artist_id', None)
subsonic = Subsonic(subsonic_url, username, password)
albums = subsonic.music_directory_list(artist_id[0])
for album in albums:
url = build_url({'mode': 'track_list',
'foldername': unicode(album['title']).encode('utf-8'),
'album_id': unicode(album['id']).encode('utf-8')})
li = xbmcgui.ListItem(album['title'])
li.setIconImage(subsonic.cover_art(album['id']))
li.setThumbnailImage(subsonic.cover_art(album['id']))
li.setProperty('fanart_image', subsonic.cover_art(album['id']))
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def track_list():
album_id = args.get('album_id', None)
subsonic = Subsonic(subsonic_url, username, password)
tracks = subsonic.music_directory_list(album_id[0])
for track in tracks:
url = subsonic.api(
'stream.view',
parameters={'id': track['id'],
'maxBitRate': bitrate,
'format': trans_format})
li = xbmcgui.ListItem(track['title'])
li.setIconImage(subsonic.cover_art(track['id']))
li.setThumbnailImage(subsonic.cover_art(track['id']))
li.setProperty('fanart_image', subsonic.cover_art(track['id']))
li.setProperty('IsPlayable', 'true')
li.setInfo(
type='Music',
infoLabels={'Artist': track['artist'],
'Title': track['title']})
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
def random_list():
menu = [{'mode': 'random_by_genre_list', 'foldername': 'by Genre'},
{'mode': 'random_from_to_year_list', 'foldername': 'from - to Year'}]
for entry in menu:
url = build_url(entry)
li = xbmcgui.ListItem(entry['foldername'],
iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def random_by_genre_list():
subsonic = Subsonic(subsonic_url, username, password)
genres = subsonic.genre_list()
for genre in genres:
url = build_url({'mode': 'random_by_genre_track_list',
'foldername': genre['value']})
li = xbmcgui.ListItem(genre['value'],
iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li,
isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def random_by_genre_track_list():
genre = args.get('foldername', None)[0]
subsonic = Subsonic(subsonic_url, username, password)
tracks = subsonic.random_songs_by_genre(genre)
for track in tracks:
url = subsonic.api(
'stream.view',
parameters={'id': track['id'],
'maxBitRate': bitrate,
'format': trans_format})
li = xbmcgui.ListItem(track['artist'] + ' - ' + track['title'])
li.setIconImage(subsonic.cover_art(track['id']))
li.setThumbnailImage(subsonic.cover_art(track['id']))
li.setProperty('fanart_image', subsonic.cover_art(track['id']))
li.setProperty('IsPlayable', 'true')
li.setInfo(
type='Music',
infoLabels={'Artist': track['artist'],
'Title': track['title']})
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
def random_from_to_year_list():
dialog = xbmcgui.Dialog()
from_year = dialog.input('From Year', type=xbmcgui.INPUT_NUMERIC)
dialog = xbmcgui.Dialog()
to_year = dialog.input('To Year', type=xbmcgui.INPUT_NUMERIC)
subsonic = Subsonic(subsonic_url, username, password)
tracks = subsonic.random_songs_from_to_year(from_year, to_year)
for track in tracks:
url = subsonic.api(
'stream.view',
parameters={'id': track['id'],
'maxBitRate': bitrate,
'format': trans_format})
li = xbmcgui.ListItem(track['artist'] + ' - ' + track['title'])
li.setIconImage(subsonic.cover_art(track['id']))
li.setThumbnailImage(subsonic.cover_art(track['id']))
li.setProperty('fanart_image', subsonic.cover_art(track['id']))
li.setProperty('IsPlayable', 'true')
li.setInfo(
type='Music',
infoLabels={'Artist': track['artist'],
'Title': track['title']})
xbmcplugin.addDirectoryItem(
handle=addon_handle,
url=url,
listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
if __name__ == '__main__':
import xbmcaddon
import xbmcgui
import xbmcplugin
my_addon = xbmcaddon.Addon('plugin.audio.subsonic')
subsonic_url = my_addon.getSetting('subsonic_url')
username = my_addon.getSetting('username')
password = my_addon.getSetting('password')
trans_format = my_addon.getSetting('format')
bitrate = my_addon.getSetting('bitrate')
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
xbmcplugin.setContent(addon_handle, 'songs')
mode = args.get('mode', None)
if mode is None:
main_page()
elif mode[0] == 'artist_list':
artist_list()
elif mode[0] == 'album_list':
album_list()
elif mode[0] == 'track_list':
track_list()
elif mode[0] == 'genre_list':
genre_list()
elif mode[0] == 'albums_by_genre_list':
albums_by_genre_list()
elif mode[0] == 'random_list':
random_list()
elif mode[0] == 'random_by_genre_list':
random_by_genre_list()
elif mode[0] == 'random_by_genre_track_list':
random_by_genre_track_list()
elif mode[0] == 'random_from_to_year_list':
random_from_to_year_list()
| mit | 1,125,825,423,453,675,600 | 34.71988 | 123 | 0.559912 | false |
CCallahanIV/PyChart | pychart/pychart_datarender/urls.py | 1 | 1210 | """Url patterns for data render app."""
from django.conf.urls import url
from pychart_datarender.views import (
GalleryView,
DataDetailView,
RenderDetailView,
DataLibraryView,
EditDataView,
EditRenderView,
AddDataView,
AddRenderView,
retrieve_data,
render_data,
save_render,
add_owner_view
)
urlpatterns = [
url(r'^gallery/$', GalleryView.as_view(), name='gallery'),
url(r'^(?P<pk>\d+)/$', DataDetailView.as_view(), name='data_detail'),
url(r'^render/(?P<pk>\d+)/$', RenderDetailView.as_view(), name='render_detail'),
url(r'^(?P<pk>\d+)/edit/$', EditDataView.as_view(), name='data_edit'),
url(r'^render/(?P<pk>\d+)/edit/$', EditRenderView.as_view(), name='render_edit'),
url(r'^render/add/$', AddRenderView.as_view(), name='render_add'),
url(r'^retrieve/(?P<pk>\d+)$', retrieve_data, name="get_data"),
url(r'^retrieve/render/$', render_data, name="get_render"),
url(r'^add/$', AddDataView.as_view(), name='data_add'),
url(r'^add/(?P<pk>\d+)$', add_owner_view, name='add_owner'),
url(r'^library/$', DataLibraryView.as_view(), name='data_library_view'),
url(r'^render/create/$', save_render, name="save_render")
]
| mit | -4,373,423,737,442,494,000 | 36.8125 | 85 | 0.628099 | false |
lemming52/white_pawn | leetcode/q015/solution.py | 1 | 1080 | """
Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
Note:
The solution set must not contain duplicate triplets.
"""
from typing import Dict, List
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
nums = sorted(nums)
found = {}
results = []
for i, a in enumerate(nums):
j = i + 1
k = len(nums) - 1
if a > 0:
break
while j < k:
b = nums[j]
c = nums[k]
total = a + b + c
if total == 0:
key = f"{a}{b}{c}"
if not key in found:
found[key] = True
results.append([a, b, c])
j += 1
k -= 1
elif total > 0:
k -= 1
else:
j += 1
return results | mit | 6,733,637,827,880,332,000 | 27.243243 | 161 | 0.39537 | false |
jbassen/edx-platform | lms/djangoapps/courseware/tabs.py | 1 | 10813 | """
This module is essentially a broker to xmodule/tabs.py -- it was originally introduced to
perform some LMS-specific tab display gymnastics for the Entrance Exams feature
"""
from django.conf import settings
from django.utils.translation import ugettext as _, ugettext_noop
from courseware.access import has_access
from courseware.entrance_exams import user_must_complete_entrance_exam
from student.models import UserProfile
from openedx.core.lib.course_tabs import CourseTabPluginManager
from student.models import CourseEnrollment
from xmodule.tabs import CourseTab, CourseTabList, key_checker
from xmodule.tabs import StaticTab
class EnrolledTab(CourseTab):
"""
A base class for any view types that require a user to be enrolled.
"""
@classmethod
def is_enabled(cls, course, user=None):
if user is None:
return True
return bool(CourseEnrollment.is_enrolled(user, course.id) or has_access(user, 'staff', course, course.id))
class CoursewareTab(EnrolledTab):
"""
The main courseware view.
"""
type = 'courseware'
title = ugettext_noop('Courseware')
priority = 10
view_name = 'courseware'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
class CourseInfoTab(CourseTab):
"""
The course info view.
"""
type = 'course_info'
title = ugettext_noop('Course Info')
priority = 20
view_name = 'info'
tab_id = 'info'
is_movable = False
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
return True
class SyllabusTab(EnrolledTab):
"""
A tab for the course syllabus.
"""
type = 'syllabus'
title = ugettext_noop('Syllabus')
priority = 30
view_name = 'syllabus'
allow_multiple = True
is_default = False
is_visible_to_sneak_peek = True
@classmethod
def is_enabled(cls, course, user=None):
if not super(SyllabusTab, cls).is_enabled(course, user=user):
return False
return getattr(course, 'syllabus_present', False)
class ProgressTab(EnrolledTab):
"""
The course progress view.
"""
type = 'progress'
title = ugettext_noop('Progress')
priority = 40
view_name = 'progress'
is_hideable = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ProgressTab, cls).is_enabled(course, user=user):
return False
return not course.hide_progress_tab
class TextbookTabsBase(CourseTab):
"""
Abstract class for textbook collection tabs classes.
"""
# Translators: 'Textbooks' refers to the tab in the course that leads to the course' textbooks
title = ugettext_noop("Textbooks")
is_collection = True
is_default = False
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return user is None or user.is_authenticated()
@classmethod
def items(cls, course):
"""
A generator for iterating through all the SingleTextbookTab book objects associated with this
collection of textbooks.
"""
raise NotImplementedError()
class TextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all textbook tabs.
"""
type = 'textbooks'
priority = None
view_name = 'book'
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
parent_is_enabled = super(TextbookTabs, cls).is_enabled(course, user)
return settings.FEATURES.get('ENABLE_TEXTBOOK') and parent_is_enabled
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.textbooks):
yield SingleTextbookTab(
name=textbook.title,
tab_id='textbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class PDFTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all PDF textbook tabs.
"""
type = 'pdf_textbooks'
priority = None
view_name = 'pdf_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.pdf_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='pdftextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class HtmlTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all Html textbook tabs.
"""
type = 'html_textbooks'
priority = None
view_name = 'html_book'
@classmethod
def items(cls, course):
for index, textbook in enumerate(course.html_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='htmltextbook/{0}'.format(index),
view_name=cls.view_name,
index=index
)
class LinkTab(CourseTab):
"""
Abstract class for tabs that contain external links.
"""
link_value = ''
def __init__(self, tab_dict=None, name=None, link=None):
self.link_value = tab_dict['link'] if tab_dict else link
def link_value_func(_course, _reverse_func):
""" Returns the link_value as the link. """
return self.link_value
self.type = tab_dict['type']
tab_dict['link_func'] = link_value_func
super(LinkTab, self).__init__(tab_dict)
def __getitem__(self, key):
if key == 'link':
return self.link_value
else:
return super(LinkTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'link':
self.link_value = value
else:
super(LinkTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(LinkTab, self).to_json()
to_json_val.update({'link': self.link_value})
return to_json_val
def __eq__(self, other):
if not super(LinkTab, self).__eq__(other):
return False
return self.link_value == other.get('link')
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
return True
class ExternalDiscussionCourseTab(LinkTab):
"""
A course tab that links to an external discussion service.
"""
type = 'external_discussion'
# Translators: 'Discussion' refers to the tab in the courseware that leads to the discussion forums
title = ugettext_noop('Discussion')
priority = None
is_default = False
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalDiscussionCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link'])(tab_dict, raise_error))
@classmethod
def is_enabled(cls, course, user=None): # pylint: disable=unused-argument
if not super(ExternalDiscussionCourseTab, cls).is_enabled(course, user=user):
return False
return course.discussion_link
class ExternalLinkCourseTab(LinkTab):
"""
A course tab containing an external link.
"""
type = 'external_link'
priority = None
is_default = False # An external link tab is not added to a course by default
allow_multiple = True
@classmethod
def validate(cls, tab_dict, raise_error=True):
""" Validate that the tab_dict for this course tab has the necessary information to render. """
return (super(ExternalLinkCourseTab, cls).validate(tab_dict, raise_error) and
key_checker(['link', 'name'])(tab_dict, raise_error))
class SingleTextbookTab(CourseTab):
"""
A tab representing a single textbook. It is created temporarily when enumerating all textbooks within a
Textbook collection tab. It should not be serialized or persisted.
"""
type = 'single_textbook'
is_movable = False
is_collection_item = True
priority = None
def __init__(self, name, tab_id, view_name, index):
def link_func(course, reverse_func, index=index):
""" Constructs a link for textbooks from a view name, a course, and an index. """
return reverse_func(view_name, args=[unicode(course.id), index])
tab_dict = dict()
tab_dict['name'] = name
tab_dict['tab_id'] = tab_id
tab_dict['link_func'] = link_func
super(SingleTextbookTab, self).__init__(tab_dict)
def to_json(self):
raise NotImplementedError('SingleTextbookTab should not be serialized.')
def get_course_tab_list(request, course):
"""
Retrieves the course tab list from xmodule.tabs and manipulates the set as necessary
"""
user = request.user
is_user_enrolled = user.is_authenticated() and CourseEnrollment.is_enrolled(user, course.id)
xmodule_tab_list = CourseTabList.iterate_displayable(
course,
user=user,
settings=settings,
is_user_authenticated=user.is_authenticated(),
is_user_staff=has_access(user, 'staff', course, course.id),
is_user_enrolled=is_user_enrolled,
is_user_sneakpeek=not UserProfile.has_registered(user),
)
# Now that we've loaded the tabs for this course, perform the Entrance Exam work.
# If the user has to take an entrance exam, we'll need to hide away all but the
# "Courseware" tab. The tab is then renamed as "Entrance Exam".
course_tab_list = []
for tab in xmodule_tab_list:
if user_must_complete_entrance_exam(request, user, course):
# Hide all of the tabs except for 'Courseware'
# Rename 'Courseware' tab to 'Entrance Exam'
if tab.type is not 'courseware':
continue
tab.name = _("Entrance Exam")
course_tab_list.append(tab)
# Add in any dynamic tabs, i.e. those that are not persisted
course_tab_list += _get_dynamic_tabs(course, user)
return course_tab_list
def _get_dynamic_tabs(course, user):
"""
Returns the dynamic tab types for the current user.
Note: dynamic tabs are those that are not persisted in the course, but are
instead added dynamically based upon the user's role.
"""
dynamic_tabs = list()
for tab_type in CourseTabPluginManager.get_tab_types():
if getattr(tab_type, "is_dynamic", False):
tab = tab_type(dict())
if tab.is_enabled(course, user=user):
dynamic_tabs.append(tab)
dynamic_tabs.sort(key=lambda dynamic_tab: dynamic_tab.name)
return dynamic_tabs
| agpl-3.0 | 6,793,054,819,562,622,000 | 30.896755 | 114 | 0.638121 | false |
openpolis/op-verify | project/verify/admin.py | 1 | 2315 | from django.contrib import admin
from django.core.management import call_command, CommandError
from django.http import StreamingHttpResponse
from .models import Rule, Verification
__author__ = 'guglielmo'
def run_verification(request, id):
response = StreamingHttpResponse(stream_generator(request, id), content_type="text/html")
return response
def stream_generator(request, id):
rule = Rule.objects.get(pk=id)
yield "Verifying rule: %s ... <br/>" % rule # Returns a chunk of the response to the browser
yield " " * 1000
try:
call_command(rule.task, rule_id=rule.pk, verbosity='2', username=request.user.username)
yield " Rule verification terminated. Status: {0}<br/>".format(rule.status)
yield ' Go back to <a href="/admin/verify/rule/{0}">rule page</a>.<br/>'.format(rule.id)
yield " " * 1000
except CommandError as e:
yield " ! %s<br/>" % e
yield " " * 1000
except Exception as e:
yield " ! Error in execution: %s<br/>" % e
yield " " * 1000
class VerificationInline(admin.TabularInline):
model = Verification
extra = 0
exclude = ('csv_report', )
list_display = readonly_fields = ('launch_ts', 'duration', 'outcome', 'user', 'csv_report_link', 'parameters')
def get_queryset(self, request):
return super(VerificationInline, self).get_queryset(request).order_by('-launch_ts')
class RuleAdmin(admin.ModelAdmin):
list_display = ['__unicode__', 'tags', 'status', 'last_launched_at', 'notes']
inlines = [VerificationInline,]
search_fields = ['title', 'tags']
buttons = [
{
'url': 'run_verification',
'textname': 'Run verification',
'func': run_verification,
},
]
def change_view(self, request, object_id, form_url='', extra_context={}):
extra_context['buttons'] = self.buttons
return super(RuleAdmin, self).change_view(request, object_id, form_url, extra_context=extra_context)
def get_urls(self):
from django.conf.urls import patterns, url, include
urls = super(RuleAdmin, self).get_urls()
my_urls = list( (url(r'^(.+)/%(url)s/$' % b, self.admin_site.admin_view(b['func'])) for b in self.buttons) )
return my_urls + urls
admin.site.register(Rule, RuleAdmin)
| bsd-3-clause | -7,814,162,677,733,859,000 | 35.746032 | 116 | 0.638013 | false |
googleads/google-ads-python | google/ads/googleads/v8/services/services/ad_schedule_view_service/client.py | 1 | 18238 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import ad_schedule_view
from google.ads.googleads.v8.services.types import ad_schedule_view_service
from .transports.base import AdScheduleViewServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AdScheduleViewServiceGrpcTransport
class AdScheduleViewServiceClientMeta(type):
"""Metaclass for the AdScheduleViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AdScheduleViewServiceTransport]]
_transport_registry["grpc"] = AdScheduleViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AdScheduleViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdScheduleViewServiceClient(metaclass=AdScheduleViewServiceClientMeta):
"""Service to fetch ad schedule views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdScheduleViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdScheduleViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdScheduleViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdScheduleViewServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def ad_schedule_view_path(
customer_id: str, campaign_id: str, criterion_id: str,
) -> str:
"""Return a fully-qualified ad_schedule_view string."""
return "customers/{customer_id}/adScheduleViews/{campaign_id}~{criterion_id}".format(
customer_id=customer_id,
campaign_id=campaign_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_ad_schedule_view_path(path: str) -> Dict[str, str]:
"""Parse a ad_schedule_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/adScheduleViews/(?P<campaign_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AdScheduleViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad schedule view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdScheduleViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdScheduleViewServiceTransport):
# transport is a AdScheduleViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdScheduleViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad_schedule_view(
self,
request: ad_schedule_view_service.GetAdScheduleViewRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_schedule_view.AdScheduleView:
r"""Returns the requested ad schedule view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetAdScheduleViewRequest`):
The request object. Request message for
[AdScheduleViewService.GetAdScheduleView][google.ads.googleads.v8.services.AdScheduleViewService.GetAdScheduleView].
resource_name (:class:`str`):
Required. The resource name of the ad
schedule view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.AdScheduleView:
An ad schedule view summarizes the
performance of campaigns by AdSchedule
criteria.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a ad_schedule_view_service.GetAdScheduleViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, ad_schedule_view_service.GetAdScheduleViewRequest
):
request = ad_schedule_view_service.GetAdScheduleViewRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_ad_schedule_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AdScheduleViewServiceClient",)
| apache-2.0 | -5,497,888,377,128,880,000 | 40.45 | 132 | 0.621943 | false |
woodymit/millstone_accidental_source | genome_designer/variants/tests/test_variant_sets.py | 1 | 16435 | """
Tests for adding and removing variants from variant_sets.
"""
import os
import random
from django.conf import settings
from django.test import TestCase
import pyinter
from main.models import AlignmentGroup
from main.models import Chromosome
from main.models import Dataset
from main.models import ExperimentSample
from main.models import ExperimentSampleToAlignment
from main.models import Variant
from main.models import VariantAlternate
from main.models import VariantCallerCommonData
from main.models import VariantEvidence
from main.models import VariantSet
from main.models import VariantToVariantSet
from main.testing_util import create_common_entities
from utils.import_util import add_dataset_to_entity
from utils.import_util import copy_dataset_to_entity_data_dir
from variants.variant_sets import add_variants_to_set_from_bed
from variants.variant_sets import MODIFY_VARIANT_SET_MEMBERSHIP__ADD
from variants.variant_sets import MODIFY_VARIANT_SET_MEMBERSHIP__REMOVE
from variants.variant_sets import update_variant_in_set_memberships
from variants.variant_sets import update_variant_in_set_memberships__all_matching_filter
TEST_FASTA = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'test_genome.fa')
TEST_BED = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'bed_test.bed')
SAMPLE_1_LABEL = 'sample1'
VARIANTSET_1_LABEL = 'New Set A'
VARIANTSET_2_LABEL = 'New Set B'
class TestAddVariantsToSetFromBed(TestCase):
def test_add_variants_to_set_from_bed(self):
common_entities = create_common_entities()
project = common_entities['project']
self.ref_genome_1 = common_entities['reference_genome']
alignment_group = AlignmentGroup.objects.create(
label='Alignment 1',
reference_genome=self.ref_genome_1,
aligner=AlignmentGroup.ALIGNER.BWA)
(self.sample_1, created) = ExperimentSample.objects.get_or_create(
project=project,
label=SAMPLE_1_LABEL)
sample_alignment = ExperimentSampleToAlignment.objects.create(
alignment_group=alignment_group,
experiment_sample=self.sample_1)
# Create variants in the bed regions from best_test.bed
for var_poor_map in range(20):
variant = Variant.objects.create(
type=Variant.TYPE.TRANSITION,
reference_genome=self.ref_genome_1,
chromosome=Chromosome.objects.get(reference_genome=self.ref_genome_1),
position=random.randint(101,200),
ref_value='A')
vccd = VariantCallerCommonData.objects.create(
variant=variant,
source_dataset_id=1,
alignment_group=alignment_group,
data={}
)
for var_no_cov in range(20):
variant = Variant.objects.create(
type=Variant.TYPE.TRANSITION,
reference_genome=self.ref_genome_1,
chromosome=Chromosome.objects.get(reference_genome=self.ref_genome_1),
position=random.randint(301,400),
ref_value='A')
vccd = VariantCallerCommonData.objects.create(
variant=variant,
source_dataset_id=1,
alignment_group=alignment_group,
data={}
)
variant = Variant.objects.create(
type=Variant.TYPE.TRANSITION,
reference_genome=self.ref_genome_1,
chromosome=Chromosome.objects.get(reference_genome=self.ref_genome_1),
position=random.randint(501,600),
ref_value='A')
vccd = VariantCallerCommonData.objects.create(
variant=variant,
source_dataset_id=1,
alignment_group=alignment_group,
data={}
)
new_bed_path = copy_dataset_to_entity_data_dir(
entity= sample_alignment,
original_source_location= TEST_BED)
bed_dataset = add_dataset_to_entity(sample_alignment,
dataset_label= Dataset.TYPE.BED_CALLABLE_LOCI,
dataset_type= Dataset.TYPE.BED_CALLABLE_LOCI,
filesystem_location= new_bed_path)
vs_to_v_map = add_variants_to_set_from_bed(
sample_alignment, bed_dataset)
variant_set_labels = set([vs.label for vs in vs_to_v_map.keys()])
self.assertEqual(set(['POOR_MAPPING_QUALITY', 'NO_COVERAGE']),
variant_set_labels)
for variant_set, variants in vs_to_v_map.items():
for v in variants:
# POOR MAPPING QUAL should be from 101 to 200
if variant_set.label == 'POOR_MAPPING_QUALITY':
self.assertTrue(v.position in pyinter.closedopen(
101, 200))
# NO COVERAGE should be from 301 to 400, 501 to 600
elif variant_set.label == 'NO_COVERAGE':
self.assertTrue(v.position in pyinter.IntervalSet([
pyinter.closedopen(301,400),
pyinter.closedopen(501,600)]))
else:
raise AssertionError(
'bad variant set %s made.' % variant_set.label)
class TestAddAndRemoveVariantsFromSet(TestCase):
def setUp(self):
common_entities = create_common_entities()
project = common_entities['project']
self.ref_genome_1 = common_entities['reference_genome']
self.chromosome = common_entities['chromosome']
self.sample_1 = ExperimentSample.objects.create(
project=project,
label=SAMPLE_1_LABEL)
# Create 100 variants with alts, positions 1 to 100.
# We add them to a fake VariantSet so that they show up in the
# materialized variant view table.
self.first_variant_position = 1
self.num_variants = 100
fake_variant_set = VariantSet.objects.create(
reference_genome=self.ref_genome_1,
label='fake')
alignment_group = AlignmentGroup.objects.create(
label='Alignment 1',
reference_genome=self.ref_genome_1,
aligner=AlignmentGroup.ALIGNER.BWA)
for position in xrange(self.first_variant_position,
self.first_variant_position + self.num_variants):
variant = Variant.objects.create(
type=Variant.TYPE.TRANSITION,
reference_genome=self.ref_genome_1,
chromosome=self.chromosome,
position=position,
ref_value='A')
variant.variantalternate_set.add(
VariantAlternate.objects.create(
variant=variant,
alt_value='G'))
vccd = VariantCallerCommonData.objects.create(
variant=variant,
source_dataset_id=1,
alignment_group=alignment_group,
data={})
VariantEvidence.objects.create(
experiment_sample=self.sample_1,
variant_caller_common_data=vccd,
data={})
VariantToVariantSet.objects.create(
variant=variant,
variant_set=fake_variant_set)
self.var_set1 = VariantSet.objects.create(
reference_genome=self.ref_genome_1,
label=VARIANTSET_1_LABEL)
self.var_set2 = VariantSet.objects.create(
reference_genome=self.ref_genome_1,
label=VARIANTSET_2_LABEL)
def test_add_and_remove(self):
"""Test add and remove without samples involved.
TODO: Make generating Variants deterministic. Right now we use
random positions.
"""
# No variants before adding.
self.assertEqual(0, self.var_set1.variants.all().count())
variant_obj_list = Variant.objects.filter(
reference_genome=self.ref_genome_1,
position__gt=25,
chromosome=self.chromosome)
variant_uid_sample_uid_pair_list = [obj.uid
for obj in variant_obj_list]
### Test add.
response = update_variant_in_set_memberships(
self.ref_genome_1,
variant_uid_sample_uid_pair_list,
MODIFY_VARIANT_SET_MEMBERSHIP__ADD,
self.var_set1.uid)
self.assertEqual(response['alert_type'], 'info', str(response))
self.assertEqual(len(variant_obj_list),
self.var_set1.variants.all().count())
### Test remove.
num_variants_in_set1_before_remove = (
self.var_set1.variants.all().count())
self.assertTrue(self.var_set1.variants.all().count() > 0)
variant_obj_list = Variant.objects.filter(
reference_genome=self.ref_genome_1,
position__gt=75,
chromosome=self.chromosome,
variantset__uid=self.var_set1.uid)
variant_uids_to_remove = [obj.uid for obj in variant_obj_list]
self.assertTrue(len(variant_uids_to_remove) > 0)
response = update_variant_in_set_memberships(
self.ref_genome_1,
variant_uids_to_remove,
MODIFY_VARIANT_SET_MEMBERSHIP__REMOVE,
self.var_set1.uid)
self.assertEqual(response['alert_type'], 'info', str(response))
# Check that we've reduced the number of variants.
self.assertEqual(
num_variants_in_set1_before_remove -
len(variant_uids_to_remove),
self.var_set1.variants.all().count())
def test_add__sample_association(self):
"""Tests adding a Variant to a VariantSet, with an association to a
particular ExperimentSample.
"""
# No variants before adding.
self.assertEqual(0, self.var_set1.variants.all().count())
# Add these variants without sample associations.
variants_to_add_with_no_sample_association = Variant.objects.filter(
reference_genome=self.ref_genome_1,
position__lt=25,
chromosome=self.chromosome)
variants_no_association_data_str_list = [obj.uid
for obj in variants_to_add_with_no_sample_association]
# Add these variants associated with sample 1.
variants_to_add_with_sample_association = Variant.objects.filter(
reference_genome=self.ref_genome_1,
position__gte=25,
chromosome=self.chromosome)
variants_with_association_data_str_list = [
obj.uid + ',' + self.sample_1.uid
for obj in variants_to_add_with_sample_association]
# Put these together and run it all the at the same time.
all_data_str_list = (variants_no_association_data_str_list +
variants_with_association_data_str_list)
response = update_variant_in_set_memberships(
self.ref_genome_1,
all_data_str_list,
MODIFY_VARIANT_SET_MEMBERSHIP__ADD,
self.var_set1.uid)
self.assertEqual(response['alert_type'], 'info', str(response))
# Make sure all the variants are there.
self.assertEqual(self.num_variants,
self.var_set1.variants.all().count())
all_vtvs = VariantToVariantSet.objects.filter(
variant_set=self.var_set1)
# Sanity check.
self.assertEqual(self.num_variants, all_vtvs.count())
# Check that the Variants we expected to have an association have it.
for vtvs in all_vtvs:
if vtvs.variant in variants_to_add_with_sample_association:
self.assertEqual(1, vtvs.sample_variant_set_association.count())
self.assertEqual(self.sample_1,
vtvs.sample_variant_set_association.all()[0])
else:
self.assertEqual(0, vtvs.sample_variant_set_association.count())
def test_all_matching_filter__all__cast(self):
"""Test adding all matching '' filter, cast.
"""
# No variants before adding.
self.assertEqual(0, self.var_set1.variants.all().count())
# Add all variants.
update_variant_in_set_memberships__all_matching_filter(
self.ref_genome_1,
MODIFY_VARIANT_SET_MEMBERSHIP__ADD,
self.var_set1.uid,
'',
False)
self.assertEqual(self.num_variants,
self.var_set1.variants.all().count())
# Make sure no ExperimentSample association.
for vtvs in VariantToVariantSet.objects.filter(
variant_set=self.var_set1):
self.assertEqual(0, vtvs.sample_variant_set_association.count())
def test_all_matching_filter__partial__cast(self):
"""Test adding all matching partial filter, cast.
"""
# No variants before adding.
self.assertEqual(0, self.var_set1.variants.all().count())
# Add all variants.
update_variant_in_set_memberships__all_matching_filter(
self.ref_genome_1,
MODIFY_VARIANT_SET_MEMBERSHIP__ADD,
self.var_set1.uid,
'position <= 50',
False)
self.assertEqual(50, self.var_set1.variants.all().count())
# Make sure no ExperimentSample association.
for vtvs in VariantToVariantSet.objects.filter(
variant_set=self.var_set1):
self.assertEqual(0, vtvs.sample_variant_set_association.count())
def test_all_matching_filter__all__melted(self):
"""Test adding all matching '' filter, melted.
In the melted case, we expect the variants to be associated with all
samples.
"""
# No variants before adding.
self.assertEqual(0, self.var_set1.variants.all().count())
# Add all variants.
update_variant_in_set_memberships__all_matching_filter(
self.ref_genome_1,
MODIFY_VARIANT_SET_MEMBERSHIP__ADD,
self.var_set1.uid,
'',
True)
self.assertEqual(self.num_variants,
self.var_set1.variants.all().count())
# Make sure no ExperimentSample association.
all_vtvs = VariantToVariantSet.objects.filter(
variant_set=self.var_set1)
# Sanity check.
self.assertEqual(self.num_variants, all_vtvs.count())
# Check that the Variants we expected to have an association have it.
for vtvs in all_vtvs:
self.assertEqual(1, vtvs.sample_variant_set_association.count())
self.assertEqual(self.sample_1,
vtvs.sample_variant_set_association.all()[0])
def test_all_matching_filter__partial__melted(self):
"""Test adding all matching partial filter, melted.
In the melted case, we expect the variants to be associated with all
samples.
"""
# No variants before adding.
self.assertEqual(0, self.var_set1.variants.all().count())
EXPECTED_NUM_VARIANTS = 50
# Add all variants.
update_variant_in_set_memberships__all_matching_filter(
self.ref_genome_1,
MODIFY_VARIANT_SET_MEMBERSHIP__ADD,
self.var_set1.uid,
'position <= %d' % EXPECTED_NUM_VARIANTS,
True)
self.assertEqual(EXPECTED_NUM_VARIANTS,
self.var_set1.variants.all().count())
# Make sure no ExperimentSample association.
all_vtvs = VariantToVariantSet.objects.filter(
variant_set=self.var_set1)
# Sanity check.
self.assertEqual(EXPECTED_NUM_VARIANTS, all_vtvs.count())
# Check that the Variants we expected to have an association have it.
for vtvs in all_vtvs:
self.assertEqual(1, vtvs.sample_variant_set_association.count())
self.assertEqual(self.sample_1,
vtvs.sample_variant_set_association.all()[0])
| mit | -2,458,550,373,646,282,000 | 38.698068 | 90 | 0.592029 | false |
jthomale/pycallnumber | tests/test_template.py | 1 | 69462 | from __future__ import unicode_literals
from builtins import str
import re
import pytest
from context import unit as u
from context import template as t
from context import exceptions as e
from helpers import make_obj_factory, generate_params
# SimpleTemplate ******************************************************
# Fixtures, factories, and test data
st_kwargs = ['min_length', 'max_length', 'base_pattern', 'pre_pattern',
'post_pattern']
make_simple_template = make_obj_factory(t.SimpleTemplate, kwarg_list=st_kwargs)
SIMPLE_TEMPLATE_DATA = {
# min, max, base_pattern, pre_pattern, post_pattern
(0, 1, r'(?:[A-Z][0-9])', '', ''): {
'valid': ['', 'A1', 'B2'],
'invalid': ['A', '0', '.', 'AA', '0A', 'AA0', 'A1B2'],
},
(1, 1, r'(?:[A-Z][0-9])', '', ''): {
'valid': ['A1', 'B2'],
'invalid': ['', 'A1B2'],
},
(2, 2, r'(?:[A-Z][0-9])', '', ''): {
'valid': ['A1B2'],
'invalid': ['', 'A1', 'A1B2C3'],
},
(1, 2, r'(?:[A-Z][0-9])', '', ''): {
'valid': ['A1', 'A1B2'],
'invalid': ['', 'A1B2C3'],
},
(1, None, r'(?:[A-Z][0-9])', '', ''): {
'valid': ['A1', 'A1B2', 'A1B2C3', 'A1B2C3D4'],
'invalid': [''],
},
(0, 2, r'(?:[A-Z][0-9])', r'[0-9]', r'\.'): {
'valid': ['0.', '0A0.', '0A1B2.'],
'invalid': ['A0', 'A0.', '0A0', '0A1B2C3.', '0A0.1B1.'],
},
}
SIMPLE_VALID_PARAMS = generate_params(SIMPLE_TEMPLATE_DATA, 'valid')
SIMPLE_INVALID_PARAMS = generate_params(SIMPLE_TEMPLATE_DATA, 'invalid')
# Tests
def test_simple_template_validate_invalid_min_max():
"""Instantiating a SimpleTemplate should raise a SettingsError if
``min_length`` is greater than ``max_length``.
"""
with pytest.raises(e.SettingsError):
t.SimpleTemplate(min_length=1, max_length=0)
@pytest.mark.parametrize('params, tstr', SIMPLE_VALID_PARAMS)
def test_simple_template_validate_true(params, tstr):
"""The given test string, when passed to a SimpleTemplate object
created with the given params' ``validate`` method, should return
True.
"""
template = make_simple_template(*params)
assert template.validate(tstr) is True
@pytest.mark.parametrize('params, tstr', SIMPLE_INVALID_PARAMS)
def test_simple_template_validate_error(params, tstr):
"""The given test string, when passed to a SimpleTemplate object
created with the given params' ``validate`` method, should raise an
InvalidCallNumberStringError.
"""
template = make_simple_template(*params)
with pytest.raises(e.InvalidCallNumberStringError):
template.validate(tstr)
def test_simple_template_describe_short_provided():
"""Passing a ``short_description`` parameter to SimpleTemplate
should mean calls to ``describe_short`` use that exact string.
"""
short_desc = 'short'
template = t.SimpleTemplate(short_description=short_desc)
assert template.describe_short() == short_desc
def test_simple_template_describe_long_provided():
"""Passing a ``long_description`` parameter to SimpleTemplate
should mean calls to ``describe_long`` use that exact string.
"""
long_desc = 'long'
template = t.SimpleTemplate(long_description=long_desc)
assert template.describe_long() == long_desc
def test_simple_template_describe_short_generated_qualities():
"""A description generated by SimpleTemplate.describe_short should
start with 'a string' and not end with a period.
"""
short_desc = t.SimpleTemplate().describe_short()
assert short_desc.startswith('a string') and not short_desc.endswith('.')
def test_simple_template_describe_long_generated_qualities():
"""A description generated by SimpleTemplate.describe_long should
start with 'A SimpleTemplate' and end with a period. It should
contain the full text of SimpleTemplate.describe_short.
"""
long_desc = t.SimpleTemplate().describe_long()
short_desc = t.SimpleTemplate().describe_short()
assert (long_desc.startswith('A SimpleTemplate') and
long_desc.endswith('.') and
short_desc in long_desc)
@pytest.mark.parametrize('dtype', ['base', 'pre', 'post'])
def test_simple_template_describe_generated_base_pre_post(dtype):
"""Descriptions generated by the SimpleTemplate.describe methods
should include a provided ``base_description``. But, they should
only include a ``pre_description`` and ``post_description`` if the
``pre_pattern`` and `post_pattern`` are also provided.
"""
desc = 'test_{}'.format(dtype)
kwargs_no_pattern = {
'{}_description'.format(dtype): desc
}
kwargs_with_pattern = {
'{}_description'.format(dtype): desc,
'{}_pattern'.format(dtype): desc
}
template_no_pattern = t.SimpleTemplate(**kwargs_no_pattern)
template_with_pattern = t.SimpleTemplate(**kwargs_with_pattern)
if dtype == 'base':
assertion_no_pat = desc in template_no_pattern.describe_short()
else:
assertion_no_pat = desc not in template_no_pattern.describe_short()
assert desc in template_with_pattern.describe_short() and assertion_no_pat
@pytest.mark.parametrize('min_, max_, pattern', [
(0, 1, r'0 to 1'),
(1, 1, r'(?<!0 to )1'),
(2, 2, r'(?<!0 to )2'),
(1, 2, r'1 to 2'),
(0, None, r'0 or more')
])
def test_simple_template_describe_generated_base_min_max(min_, max_, pattern):
"""Descriptions generated by the SimpleTemplate.describe methods
should match the appropriate regular expression pattern based on
the min_length and max_length values used to initialize the
SimpleTemplate, plus a textual description provided for
``base_description``.
"""
base_description = 'base'
pattern = r'{} {}'.format(pattern, base_description)
template = t.SimpleTemplate(min_length=min_, max_length=max_,
base_description=base_description)
assert re.search(pattern, template.describe_short()) is not None
# Grouping ************************************************************
# Fixtures, factories, and test data
gr_kwargs = ['min', 'max', 'types', 'name', 'inner_sep_type',
'outer_sep_group', 'outer_sep_placement']
make_grouping = make_obj_factory(t.Grouping, kwarg_list=gr_kwargs)
def make_simple_unit_type(min_length, max_length, base_pattern, classname):
return u.SimpleUnit.derive(min_length=min_length, max_length=max_length,
base_pattern=base_pattern, classname=classname)
alpha_utype = make_simple_unit_type(1, 1, r'[A-Za-z]', 'Alpha')
alpha_opt_utype = make_simple_unit_type(0, 1, r'[A-Za-z]', 'AlphaOpt')
olap_utype = make_simple_unit_type(1, 1, r'[T-Zt-z]', 'Overlap')
digit_utype = make_simple_unit_type(1, 1, r'[0-9]', 'Digit')
pipe_utype = make_simple_unit_type(1, 1, r'\|', 'Pipe')
dot_utype = make_simple_unit_type(1, 1, r'\.', 'Dot')
outer_sep_gr = make_grouping(1, 1, [pipe_utype], 'sep0', None, None, None)
outer_sep_gr_opt = make_grouping(0, 1, [pipe_utype], 'sep0', None, None, None)
GROUPING_DATA = {
# min, max, types, name, inner_sep_type, outer_sep_group,
# outer_sep_placement
('0 to 1, alpha_utype, no separators',
0, 1, (alpha_utype,), 'letter', None, None, None): {
'invalid': ['aa', '0', ' '],
'valid_parse': [
('a', ((alpha_utype, 'a'),)),
('', (None,))
]
},
('1 to 1, alpha_utype, no separators',
1, 1, (alpha_utype,), 'letter', None, None, None): {
'invalid': [''],
'valid_parse': []
},
('1 to 1, alpha_utype, inner separator (pipe)',
1, 1, (alpha_utype,), 'letter', pipe_utype, None, None): {
'invalid': [''],
'valid_parse': []
},
('2 to 2, alpha_utype, no separators',
2, 2, (alpha_utype,), 'letter', None, None, None): {
'invalid': ['a', 'abc'],
'valid_parse': [
('ab', ([(alpha_utype, 'a'), (alpha_utype, 'b')],)),
]
},
('1 to N, alpha_utype, no separators',
1, None, (alpha_utype,), 'letter', None, None, None): {
'invalid': [],
'valid_parse': [
('abc', ([(alpha_utype, 'a'), (alpha_utype, 'b'),
(alpha_utype, 'c')],)),
]
},
('1 to 3, alpha_utype, inner separator (pipe)',
1, 3, (alpha_utype,), 'letter', pipe_utype, None, None): {
'invalid': ['', '|', 'a|', '|a', 'a|b|', 'a|b|c|d', 'abc'],
'valid_parse': [
('a', ([(alpha_utype, 'a')],)),
('a|b', ([(alpha_utype, 'a'), (pipe_utype, '|'),
(alpha_utype, 'b')],)),
]
},
('1 to 3, alpha_opt_utype, inner separator (pipe)',
1, 3, (alpha_opt_utype,), 'letter', pipe_utype, None, None): {
'invalid': ['|', 'a|', '|a', 'a|b|', 'a|b|c|d', 'abc'],
'valid_parse': [
('a', ([(alpha_opt_utype, 'a')],)),
('a|b', ([(alpha_opt_utype, 'a'), (pipe_utype, '|'),
(alpha_opt_utype, 'b')],)),
]
},
('1 to 3, alpha_utype or digit_utype, inner separator (pipe)',
1, 3, (alpha_utype, digit_utype), 'chr', pipe_utype, None, None): {
'invalid': ['0a1', '0|a|1|2'],
'valid_parse': [
('0|1|a', ([(digit_utype, '0'), (pipe_utype, '|'),
(digit_utype, '1'), (pipe_utype, '|'),
(alpha_utype, 'a')],)),
]
},
('1 to 3, alpha_utype or overlap_utype, inner separator (pipe)',
1, 3, (alpha_utype, olap_utype), 'chr', pipe_utype, None, None): {
'invalid': [],
'valid_parse': [
('a|b', ([(alpha_utype, 'a'), (pipe_utype, '|'),
(alpha_utype, 'b')],)),
('a|t|z', ([(alpha_utype, 'a'), (pipe_utype, '|'),
(alpha_utype, 't'), (pipe_utype, '|'),
(alpha_utype, 'z')],)),
]
},
('1 to 3, overlap_utype or alpha_utype, inner separator (pipe)',
1, 3, (olap_utype, alpha_utype), 'chr', pipe_utype, None, None): {
'invalid': [],
'valid_parse': [
('a|b', ([(alpha_utype, 'a'), (pipe_utype, '|'),
(alpha_utype, 'b')],)),
('a|t|z', ([(alpha_utype, 'a'), (pipe_utype, '|'),
(olap_utype, 't'), (pipe_utype, '|'),
(olap_utype, 'z')],)),
]
},
('0 to 1, alpha_utype, required outer separator (pipe) before',
0, 1, (alpha_utype,), 'letter', None, outer_sep_gr, 'before'): {
'invalid': [],
'valid_parse': [
('|a', ((pipe_utype, '|'), (alpha_utype, 'a'))),
]
},
('0 to 1, alpha_utype, required outer separator (pipe) after',
0, 1, (alpha_utype,), 'letter', None, outer_sep_gr, 'after'): {
'invalid': [],
'valid_parse': [
('a|', ((alpha_utype, 'a'), (pipe_utype, '|'))),
]
},
('0 to 1, alpha_utype, optional outer separator (pipe) before',
0, 1, (alpha_utype,), 'letter', None, outer_sep_gr_opt, 'before'): {
'invalid': [],
'valid_parse': [
('', (None, None)),
('a', (None, (alpha_utype, 'a'))),
]
},
('0 to 1, alpha_utype, optional outer separator (pipe) after',
0, 1, (alpha_utype,), 'letter', None, outer_sep_gr_opt, 'after'): {
'invalid': [],
'valid_parse': [
('a', ((alpha_utype, 'a'), None)),
]
},
(('0 to 2, alpha_utype, inner separator (dot) and required outer '
'separator (pipe) before'),
0, 2, (alpha_utype,), 'letter', dot_utype, outer_sep_gr, 'before'): {
'invalid': [],
'valid_parse': [
('|a.a', ((pipe_utype, '|'), [(alpha_utype, 'a'), (dot_utype, '.'),
(alpha_utype, 'a')])),
]
},
}
GROUPING_INVALID_PARAMS = generate_params(GROUPING_DATA, 'invalid')
GROUPING_VALID_PARSE_PARAMS = generate_params(GROUPING_DATA, 'valid_parse')
# Tests
def test_grouping_init_fails_without_types():
"""Trying to initialize a Grouping object without specifying the
``types`` kwarg raises a SettingsError.
"""
with pytest.raises(e.SettingsError):
t.Grouping(name='one')
def test_grouping_init_fails_without_name():
"""Trying to initialize a Grouping object without specifying the
``name`` kwarg raises a SettingsError.
"""
with pytest.raises(e.SettingsError):
t.Grouping(types=[alpha_utype])
def test_grouping_is_optional():
"""If the min value of the grouping is 0, then accessing the
``is_optional`` property should return True.
"""
grouping = t.Grouping(name='one', types=[alpha_utype], min=0)
assert grouping.is_optional
def test_grouping_is_not_optional():
"""If the min value of the grouping is >0, then accessing the
``is_optional`` property should return False.
"""
grouping = t.Grouping(name='one', types=[alpha_utype], min=1)
assert not grouping.is_optional
@pytest.mark.parametrize('params, tstr, expected', GROUPING_VALID_PARSE_PARAMS)
def test_grouping_cnstr_to_units(params, tstr, expected):
"""Calling the ``cnstr_to_units`` method of a Grouping created
using the given params on the provided test string (tstr) should
produce the list of expected results. Each value of ``expected``
should be a tuple, where the first value is a Unit type, and the
second value is the portion of the test string that goes with that
Unit type.
"""
grouping = make_grouping(*params[1:])
split = grouping.cnstr_to_units(tstr, {})
for i, test_part in enumerate(split):
expected_part = expected[i]
if test_part is None:
assert expected_part is None
elif isinstance(test_part, list):
assert isinstance(expected_part, list)
for j, test_sub_part in enumerate(test_part):
ex_type, ex_value = expected_part[j]
assert type(test_sub_part) == ex_type
assert str(test_sub_part) == ex_value
else:
ex_type, ex_value = expected_part
assert type(test_part) == ex_type
assert str(test_part) == ex_value
@pytest.mark.parametrize('params, tstr', GROUPING_INVALID_PARAMS)
def test_grouping_cnstr_to_units_error(params, tstr):
"""Calling the ``cnstr_to_units`` method of a Grouping created
using the given params on the provided test string (tstr) should
either raise an InvalidCallNumberStringError, assuming the test
string matches the pattern produced by the ``get_full_regex``
method.
"""
grouping = make_grouping(*params[1:])
p = r'^{}$'.format(grouping.get_full_regex().pattern)
if re.search(p, tstr):
with pytest.raises(e.InvalidCallNumberStringError):
grouping.cnstr_to_units(tstr, {})
# CompoundTemplate ****************************************************
# Fixtures, factories, and test data
#
# Unit Types
# (utypes matching exactly 1 character)
alpha_utype = make_simple_unit_type(1, 1, r'[A-Za-z]', 'Alpha')
digit_utype = make_simple_unit_type(1, 1, r'[0-9]', 'Digit')
pipe_utype = make_simple_unit_type(1, 1, r'\|', 'Pipe')
dot_utype = make_simple_unit_type(1, 1, r'\.', 'Dot')
# (opt utypes, matching 0 or 1 characters)
alpha_opt_utype = make_simple_unit_type(0, 1, r'[A-Za-z]', 'AlphaOpt')
digit_opt_utype = make_simple_unit_type(0, 1, r'[0-9]', 'DigitOpt')
pipe_opt_utype = make_simple_unit_type(0, 1, r'\|', 'PipeOpt')
dot_opt_utype = make_simple_unit_type(0, 1, r'\.', 'DotOpt')
# (multi utypes, matching 1 or 2 characters)
alpha_multi_utype = make_simple_unit_type(1, 2, r'[A-Za-z]', 'AlphaMulti')
digit_multi_utype = make_simple_unit_type(1, 2, r'[0-9]', 'DigitMulti')
pipe_multi_utype = make_simple_unit_type(1, 2, r'\|', 'PipeMulti')
dot_multi_utype = make_simple_unit_type(1, 2, r'\.', 'DotMulti')
# (max utypes, matching 1 or more characters)
alpha_max_utype = make_simple_unit_type(1, None, r'[A-Za-z]', 'AlphaMax')
# Grouping Parameters, or gp
# (gps matching exactly 1 grouping)
alpha_gp = (1, 1, 'letter1', (alpha_utype,))
beta_gp = (1, 1, 'letter2', (alpha_utype,))
digit_gp = (1, 1, 'digit', (digit_utype,))
sep_pipe_gp = (1, 1, 'sep_pipe', (pipe_utype,), True)
sep_dot_gp = (1, 1, 'sep_dot', (dot_utype,), True)
mixed_gp = (1, 1, 'mixed', (alpha_utype, digit_utype))
# (opt gps, matching 0 or 1 groupings)
alpha_gp_opt = (0, 1, 'letter1', (alpha_utype,))
alpha_opt_gp = (1, 1, 'letter1', (alpha_opt_utype,))
alpha_opt_gp_opt = (0, 1, 'letter1', (alpha_opt_utype,))
digit_gp_opt = (0, 1, 'digit', (digit_utype,))
digit_opt_gp = (1, 1, 'digit', (digit_opt_utype,))
digit_opt_gp_opt = (0, 1, 'digit', (digit_opt_utype,))
sep_pipe_gp_opt = (0, 1, 'sep_pipe', (pipe_utype,), True)
sep_pipe_opt_gp = (1, 1, 'sep_pipe', (pipe_opt_utype,), True)
sep_pipe_opt_gp_opt = (0, 1, 'sep_pipe', (pipe_opt_utype,), True)
mixed_gp_opt = (0, 1, 'mixed', (alpha_utype, digit_utype))
mixed_opt_gp = (1, 1, 'mixed', (alpha_opt_utype, digit_opt_utype))
mixed_mixed_opt_gp1 = (1, 1, 'mixed', (alpha_opt_utype, digit_utype))
mixed_mixed_opt_gp2 = (1, 1, 'mixed', (alpha_utype, digit_opt_utype))
mixed_opt_gp_opt = (0, 1, 'mixed', (alpha_opt_utype, digit_opt_utype))
mixed_mixed_opt_gp_opt1 = (0, 1, 'mixed', (alpha_opt_utype, digit_utype))
mixed_mixed_opt_gp_opt2 = (0, 1, 'mixed', (alpha_utype, digit_opt_utype))
# (multi gps, matching 1 or 2 groupings)
alpha_gp_multi = (1, 2, 'letter1', (alpha_utype,))
alpha_gp_multi_sep = (1, 2, 'letter1', (alpha_utype,), False, dot_utype)
alpha_gp_multi_sep_opt = (1, 2, 'letter1', (alpha_utype,), False,
dot_opt_utype)
alpha_gp_multi_sep_multi = (1, 2, 'letter1', (alpha_utype,), False,
dot_multi_utype)
alpha_multi_gp = (1, 1, 'letter1', (alpha_multi_utype,))
alpha_multi_gp_multi = (1, 2, 'letter1', (alpha_multi_utype,))
# (optmulti_gps, matching 0 or 2 groupings)
alpha_gp_optmulti_sep = (0, 2, 'letter1', (alpha_utype,), False, dot_utype)
alpha_gp_optmulti_sep_opt = (0, 2, 'letter1', (alpha_utype,), False,
dot_opt_utype)
# (max gps, matching 1 or more groupings)
alpha_gp_max = (1, None, 'letter1', (alpha_utype,))
alpha_gp_max_sep = (1, None, 'letter1', (alpha_utype,), False, dot_utype)
alpha_max_gp = (1, 1, 'letter1', (alpha_max_utype,))
alpha_max_gp_max = (1, None, 'letter1', (alpha_max_utype,))
alpha_max_gp_max_sep = (1, None, 'letter1', (alpha_max_utype,), False,
dot_utype)
alpha_max_gp_max_sep_opt = (1, None, 'letter1', (alpha_max_utype,), False,
dot_opt_utype)
def gp_tuple_to_dict(gp_tuple):
"""Convert a groupings parameters (gp) tuple into a dict suitable
to pass to the ``grouping_parameters`` CompoundTemplate.__init__
kwarg.
"""
params = [{'min': 1}, {'max': 1}, {'name': None}, {'possible_types': None},
{'is_separator': False}, {'inner_sep_type': None}]
d = {}
for i, param in enumerate(params):
if i < len(gp_tuple):
d[list(param.keys())[0]] = gp_tuple[i]
else:
d[list(param.keys())[0]] = list(param.values())[0]
return d
COMPOUND_TEMPLATE_DATA = {
# dict key tuple format is:
# (test description, separator_type, grouping_parameters)
#
# GENERAL tests with single groups that match one unit type
('No separators, exact character match groupings',
None, (alpha_gp, beta_gp)): {
'valid': ['aa', 'AA', 'aA', 'Aa', 'az', 'za'],
'invalid': ['', 'aaa', 'a', 'aa1', 'a1', '1a', 'a ', 'a a', ' a'],
'parts': [
('az', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'z')])
]
},
('Default separator, exact character match groupings',
pipe_utype, (alpha_gp, beta_gp)): {
'valid': ['a|a'],
'invalid': ['aa', 'a.a', 'a a', '|a', 'a|'],
'parts': [
('a|z', [(alpha_utype, 'letter1', 'a'), (pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'z')])
]
},
('Explicit separator, exact character match groupings',
None, (alpha_gp, sep_dot_gp, beta_gp)): {
'valid': ['a.a'],
'invalid': ['aa', 'a|a', 'a a'],
'parts': [
('a.z', [(alpha_utype, 'letter1', 'a'),
(dot_utype, 'sep_dot', '.'),
(alpha_utype, 'letter2', 'z')])
]
},
('Default and explicit separators, exact character match groupings',
pipe_utype, (alpha_gp, sep_dot_gp, beta_gp, digit_gp)): {
'valid': ['a.a|1'],
'invalid': ['aa1', 'a.a1', 'aa|1', 'a a 1', 'a.a.1', 'a|a|1', 'a|.a|1',
'a.|a|1'],
'parts': [
('a.z|1', [(alpha_utype, 'letter1', 'a'),
(dot_utype, 'sep_dot', '.'),
(alpha_utype, 'letter2', 'z'), (pipe_utype, 0, '|'),
(digit_utype, 'digit', '1')])
]
},
# OPTIONAL Groupings and Unit Types, no separator
('No separators, first grouping is optional',
None, (alpha_gp_opt, beta_gp)): {
'valid': ['a', 'aa'],
'invalid': ['', '1a', 'aaa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')])
]
},
('No separators, first grouping uses optional utype',
None, (alpha_opt_gp, beta_gp)): {
'valid': ['a', 'aa'],
'invalid': ['', 'aaa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_opt_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')])
]
},
('No separators, first grouping is optional and uses optional utype',
None, (alpha_opt_gp_opt, beta_gp)): {
'valid': ['a', 'aa'],
'invalid': ['', 'aaa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_opt_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')])
]
},
('No separators, last grouping is optional',
None, (beta_gp, alpha_gp_opt)): {
'valid': ['a', 'aa'],
'invalid': ['', 'aaa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'letter2', 'a'),
(alpha_utype, 'letter1', 'a')])
]
},
('No separators, last grouping uses optional utype',
None, (beta_gp, alpha_opt_gp)): {
'valid': ['a', 'aa'],
'invalid': ['', 'aaa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'letter2', 'a'),
(alpha_opt_utype, 'letter1', 'a')])
]
},
('No separators, last grouping is optional and uses optional utype',
None, (beta_gp, alpha_opt_gp_opt)): {
'valid': ['a', 'aa'],
'invalid': ['', 'aaa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'letter2', 'a'),
(alpha_opt_utype, 'letter1', 'a')])
]
},
# OPTIONAL Groupings and Unit Types, explicit separator
('Explicit optional separator grouping',
None, (alpha_gp, sep_pipe_gp_opt, beta_gp)): {
'valid': ['aa', 'a|a'],
'invalid': ['', 'a', '|', 'aaa', 'a|', '|a'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_utype, 'sep_pipe', '|'),
(alpha_utype, 'letter2', 'a')])
]
},
('Explicit separator grouping that uses optional utype',
None, (alpha_gp, sep_pipe_opt_gp, beta_gp)): {
'valid': ['aa', 'a|a'],
'invalid': ['', 'a', '|', 'aaa', 'a|', '|a'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_opt_utype, 'sep_pipe', '|'),
(alpha_utype, 'letter2', 'a')])
]
},
('Explicit optional separator grouping that uses optional utype',
None, (alpha_gp, sep_pipe_opt_gp_opt, beta_gp)): {
'valid': ['aa', 'a|a'],
'invalid': ['', 'a', '|', 'aaa', 'a|', '|a'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_opt_utype, 'sep_pipe', '|'),
(alpha_utype, 'letter2', 'a')])
]
},
('Different explicit separators surrounding an optional grouping',
None, (beta_gp, sep_dot_gp, alpha_gp_opt, sep_pipe_gp, digit_gp)): {
'valid': ['a.a|1', 'a|1'],
'invalid': ['a.1', 'a.|1'],
'parts': [
('a.a|1', [(alpha_utype, 'letter2', 'a'),
(dot_utype, 'sep_dot', '.'),
(alpha_utype, 'letter1', 'a'),
(pipe_utype, 'sep_pipe', '|'),
(digit_utype, 'digit', '1')]),
('a|1', [(alpha_utype, 'letter2', 'a'),
(pipe_utype, 'sep_pipe', '|'),
(digit_utype, 'digit', '1')])
]
},
# OPTIONAL Groupings and Unit Types, default optional separator
('Default optional separator, exact character match groupings',
pipe_opt_utype, (alpha_gp, beta_gp)): {
'valid': ['aa', 'a|a'],
'invalid': ['', 'a', '|', 'a|', '|a'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
('Default optional separator, first grouping is optional',
pipe_opt_utype, (alpha_gp_opt, beta_gp)): {
'valid': ['a', 'aa', 'a|a'],
'invalid': ['', '|', 'aaa', 'a|', '|a'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
('Default optional separator, first grouping uses optional utype',
pipe_opt_utype, (alpha_opt_gp, beta_gp)): {
'valid': ['a', 'aa', 'a|a'],
'invalid': ['', '|', 'aaa', 'a|', '|a'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_opt_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_opt_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
(('Default optional separator, first grouping is optional and uses '
'optional utype'),
# NOTE Here: '|a' is invalid because the first grouping is
# optional; it uses min=0. A blank value means there is no first
# grouping, and so the separator (pipe) is not allowed, as it
# makes no sense to have a separator with a single valid
# grouping.
pipe_opt_utype, (alpha_opt_gp_opt, beta_gp)): {
'valid': ['a', 'aa', 'a|a'],
'invalid': ['', '|', 'a|', '|a', 'aaa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_opt_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_opt_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
('Default optional separator, last grouping is optional',
pipe_opt_utype, (beta_gp, alpha_gp_opt)): {
'valid': ['a', 'aa', 'a|a'],
'invalid': ['', '|', 'a|', '|a', 'aaa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'letter2', 'a'),
(alpha_utype, 'letter1', 'a')]),
('a|a', [(alpha_utype, 'letter2', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter1', 'a')])
]
},
('Default optional separator, last grouping uses optional utype',
pipe_opt_utype, (beta_gp, alpha_opt_gp)): {
'valid': ['a', 'aa', 'a|a'],
'invalid': ['', '|', '|a', 'a|', 'aaa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'letter2', 'a'),
(alpha_opt_utype, 'letter1', 'a')]),
('a|a', [(alpha_utype, 'letter2', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_opt_utype, 'letter1', 'a')])
]
},
(('Default optional separator, last grouping is optional and uses '
'optional utype'),
pipe_opt_utype, (beta_gp, alpha_opt_gp_opt)): {
'valid': ['a', 'aa', 'a|a'],
'invalid': ['', '|', 'a|', '|a', 'aaa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'letter2', 'a'),
(alpha_opt_utype, 'letter1', 'a')]),
('a|a', [(alpha_utype, 'letter2', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_opt_utype, 'letter1', 'a')])
]
},
(('Default optional separator, both groupings are optional and use '
'optional utype'),
pipe_opt_utype, (alpha_opt_gp_opt, digit_opt_gp_opt)): {
'valid': ['', 'a', '1', 'a1', 'a|1'],
'invalid': ['|', 'a|', '|1'],
'parts': [
('a', [(alpha_opt_utype, 'letter1', 'a')]),
('1', [(digit_opt_utype, 'digit', '1')]),
('a1', [(alpha_opt_utype, 'letter1', 'a'),
(digit_opt_utype, 'digit', '1')]),
('a|1', [(alpha_opt_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(digit_opt_utype, 'digit', '1')])
]
},
# OPTIONAL Groupings and Unit Types, default required separator
('Default required separator, first grouping is optional',
pipe_utype, (alpha_gp_opt, beta_gp)): {
'valid': ['a', 'a|a'],
'invalid': ['', 'aa', 'aaa', '|', 'a|', '|a'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
('Default required separator, first grouping uses optional utype',
pipe_utype, (alpha_opt_gp, beta_gp)): {
'valid': ['a|a'],
'invalid': ['', 'a', 'aa', 'aaa', '|', 'a|', '|a'],
'parts': [
('a|a', [(alpha_opt_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
(('Default required separator, first grouping is optional and uses '
'optional utype'),
pipe_utype, (alpha_opt_gp_opt, beta_gp)): {
'valid': ['a', 'a|a'],
'invalid': ['', 'aa', '|', 'aaa', 'a|', '|a'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_opt_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
# MULTIPLE Instances of a Unit Type and/or Grouping, no separators
('No separators, first grouping allows multiple',
None, (alpha_gp_multi, beta_gp)): {
'valid': ['aa', 'aaa'],
'invalid': ['', 'a', 'aaaa'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')])
]
},
('No separators, first grouping uses multiple utype',
None, (alpha_multi_gp, beta_gp)): {
'valid': ['aa', 'aaa'],
'invalid': ['', 'a', 'aaaa'],
'parts': [
('aa', [(alpha_multi_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaa', [(alpha_multi_utype, 'letter1', 'aa'),
(alpha_utype, 'letter2', 'a')])
]
},
('No separators, first grouping allows multiple and uses multiple utype',
None, (alpha_multi_gp_multi, beta_gp)): {
'valid': ['aa', 'aaa', 'aaaaa'],
'invalid': ['', 'a', 'aaaaaa'],
'parts': [
('aa', [(alpha_multi_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaa', [(alpha_multi_utype, 'letter1', 'aa'),
(alpha_utype, 'letter2', 'a')]),
('aaaa', [(alpha_multi_utype, 'letter1', 'aa'),
(alpha_multi_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaaaa', [(alpha_multi_utype, 'letter1', 'aa'),
(alpha_multi_utype, 'letter1', 'aa'),
(alpha_utype, 'letter2', 'a')])
]
},
('No separators, last grouping allows multiple',
None, (beta_gp, alpha_gp_multi)): {
'valid': ['aa', 'aaa'],
'invalid': ['', 'a', 'aaaa'],
'parts': [
('aa', [(alpha_utype, 'letter2', 'a'),
(alpha_utype, 'letter1', 'a')]),
('aaa', [(alpha_utype, 'letter2', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a')])
]
},
('No separators, last grouping uses multiple utype',
None, (beta_gp, alpha_multi_gp)): {
'valid': ['aa', 'aaa'],
'invalid': ['', 'a', 'aaaa'],
'parts': [
('aa', [(alpha_utype, 'letter2', 'a'),
(alpha_multi_utype, 'letter1', 'a')]),
('aaa', [(alpha_utype, 'letter2', 'a'),
(alpha_multi_utype, 'letter1', 'aa')])
]
},
('No separators, last grouping allows multiple and uses multiple utype',
None, (beta_gp, alpha_multi_gp_multi)): {
'valid': ['aa', 'aaa', 'aaaaa'],
'invalid': ['', 'a', 'aaaaaa'],
'parts': [
('aa', [(alpha_utype, 'letter2', 'a'),
(alpha_multi_utype, 'letter1', 'a')]),
('aaa', [(alpha_utype, 'letter2', 'a'),
(alpha_multi_utype, 'letter1', 'aa')]),
('aaaa', [(alpha_utype, 'letter2', 'a'),
(alpha_multi_utype, 'letter1', 'aa'),
(alpha_multi_utype, 'letter1', 'a')]),
('aaaaa', [(alpha_utype, 'letter2', 'a'),
(alpha_multi_utype, 'letter1', 'aa'),
(alpha_multi_utype, 'letter1', 'aa')])
]
},
# MULTIPLE Unit Types/Groupings, inner separators only
('Inner required separator, first grouping allows multiple',
None, (alpha_gp_multi_sep, beta_gp)): {
'valid': ['aa', 'a.aa'],
'invalid': ['', 'a', 'aaa', 'aa.a', 'a.a', 'a.', 'a.a.', 'a.aa.',
'.a', '.a.a', 'a.a.aa'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a.aa', [(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')])
]
},
('Inner optional separator, first grouping allows multiple',
None, (alpha_gp_multi_sep_opt, beta_gp)): {
'valid': ['aa', 'a.aa', 'aaa'],
'invalid': ['', 'a', 'aaaa', 'aa.a', 'a.a', 'a.', 'a.a.', 'a.aa.',
'.a', '.a.a', 'a.a.aa'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a.aa', [(alpha_utype, 'letter1', 'a'),
(dot_opt_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')])
]
},
('Inner multi-separator, first grouping allows multiple',
None, (alpha_gp_multi_sep_multi, beta_gp)): {
'valid': ['aa', 'a.aa', 'a..aa'],
'invalid': ['', 'a', 'aaa', 'aa.a', 'a.a', 'a.', 'a.a.', 'a.aa.',
'.a', '.a.a', 'a.a.aa', 'a...aa'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a.aa', [(alpha_utype, 'letter1', 'a'),
(dot_multi_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a..aa', [(alpha_utype, 'letter1', 'a'),
(dot_multi_utype, '', '..'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')])
]
},
('Inner required separator, last grouping allows multiple',
None, (beta_gp, alpha_gp_multi_sep)): {
'valid': ['aa', 'aa.a'],
'invalid': ['', 'a', 'aaa', 'a.aa', 'a.a', 'a.', 'a.a.', 'a.aa.',
'.a', '.a.a', 'aa.a.a'],
'parts': [
('aa', [(alpha_utype, 'letter2', 'a'),
(alpha_utype, 'letter1', 'a')]),
('aa.a', [(alpha_utype, 'letter2', 'a'),
(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a')])
]
},
# MULTIPLE Unit Types/Groupings, outer separators only
('Default required separator, first grouping allows multiple',
pipe_utype, (alpha_gp_multi, beta_gp)): {
'valid': ['a|a', 'aa|a'],
'invalid': ['', 'a', 'aaaa', 'a|', '|a', 'aaa|a', 'a|aa', 'a|a|a'],
'parts': [
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('aa|a', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
('Default required separator, first grouping uses multiple utype',
pipe_utype, (alpha_multi_gp, beta_gp)): {
'valid': ['a|a', 'aa|a'],
'invalid': ['', 'a', 'aaaa', 'a|', '|a', 'aaa|a', 'a|aa', 'a|a|a'],
'parts': [
('a|a', [(alpha_multi_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('aa|a', [(alpha_multi_utype, 'letter1', 'aa'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
(('Default required separator, first grouping allows multiple and uses '
'multiple utype'),
pipe_utype, (alpha_multi_gp_multi, beta_gp)): {
'valid': ['a|a', 'aa|a', 'aaa|a', 'aaaa|a'],
'invalid': ['', 'a', 'aa', 'aaa', 'aaaa', 'aaaaa', 'aaaaaa', 'a|aa',
'aaaaa|a', 'a|a|a'],
'parts': [
('a|a', [(alpha_multi_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('aa|a', [(alpha_multi_utype, 'letter1', 'aa'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('aaa|a', [(alpha_multi_utype, 'letter1', 'aa'),
(alpha_multi_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('aaaa|a', [(alpha_multi_utype, 'letter1', 'aa'),
(alpha_multi_utype, 'letter1', 'aa'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
# MULTIPLE Unit Types/Groupings, outer+inner separators
(('Default required separator and inner required separator, first '
'grouping allows multiple'),
pipe_utype, (alpha_gp_multi_sep, beta_gp)): {
'valid': ['a|a', 'a.a|a'],
'invalid': ['', 'a', 'aa', 'a|', '|a', 'aa|a', 'a.|a', 'a.a.|a',
'a.a', 'a.a.a', 'aa.a', 'a.aa'],
'parts': [
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('a.a|a', [(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
(('Default optional separator and inner required separator, first '
'grouping allows multiple'),
pipe_opt_utype, (alpha_gp_multi_sep, beta_gp)): {
'valid': ['aa', 'a|a', 'a.aa', 'a.a|a'],
'invalid': ['', 'a', 'a|', '|a', 'aa|a', 'a.|a', 'a.a.|a', 'a.a',
'a.a.a', 'aa.a'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('a.aa', [(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a.a|a', [(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
(('Default required separator and inner optional separator, first '
'grouping allows multiple'),
pipe_utype, (alpha_gp_multi_sep_opt, beta_gp)): {
'valid': ['a|a', 'a.a|a', 'aa|a'],
'invalid': ['', 'a', 'aa', 'a|', '|a', 'a.|a', 'a.a.|a', 'a.a',
'a.a.a', 'aa.a', 'a.aa'],
'parts': [
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('a.a|a', [(alpha_utype, 'letter1', 'a'),
(dot_opt_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('aa|a', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
(('Default optional separator and inner optional separator, first '
'grouping allows multiple'),
pipe_opt_utype, (alpha_gp_multi_sep_opt, beta_gp)): {
'valid': ['aa', 'aaa', 'a.aa', 'a|a', 'aa|a', 'a.a|a'],
'invalid': ['', 'a', 'aaaa', 'a|', '|a', 'a.|a', 'a.a.|a', 'a.a',
'a.a.a', 'aa.a'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a.aa', [(alpha_utype, 'letter1', 'a'),
(dot_opt_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('aa|a', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('a.a|a', [(alpha_utype, 'letter1', 'a'),
(dot_opt_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
# OPTIONAL/MULTIPLE Unit Types/Groupings, inner separators only
(('Inner required separator, first grouping is optional and allows '
'multiple'),
None, (alpha_gp_optmulti_sep, beta_gp)): {
'valid': ['a', 'aa', 'a.aa'],
'invalid': ['', 'aaa', 'aa.a', 'a.a', 'a.', 'a.a.', 'a.aa.',
'.a', '.a.a', 'a.a.aa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a.aa', [(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')])
]
},
(('Inner optional separator, first grouping is optional and allows '
'multiple'),
None, (alpha_gp_optmulti_sep_opt, beta_gp)): {
'valid': ['a', 'aa', 'a.aa', 'aaa'],
'invalid': ['', 'aa.a', 'a.a', 'a.', 'a.a.', 'a.aa.',
'.a', '.a.a', 'a.a.aa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a.aa', [(alpha_utype, 'letter1', 'a'),
(dot_opt_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')])
]
},
# OPTIONAL/MULTIPLE Unit Types/Groupings, outer+inner separators
(('Default required separator and inner required separator, first '
'grouping is optional and allows multiple'),
pipe_utype, (alpha_gp_optmulti_sep, beta_gp)): {
'valid': ['a', 'a|a', 'a.a|a'],
'invalid': ['', '.', '.|', '.|a' 'aa', 'a|', '|a', 'aa|a', 'a.|a',
'a.a.|a', 'a.a', 'a.a.a', 'aa.a', 'a.aa'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('a.a|a', [(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')])
]
},
# OPTIONAL/MIXED Unit Types/Groupings
(('No separators; first grouping is optional and has mixed Unit types'),
None, (mixed_gp_opt, beta_gp)): {
'valid': ['a', 'aa', '1a'],
'invalid': ['', '1', 'a1'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'mixed', 'a'),
(alpha_utype, 'letter2', 'a')]),
('1a', [(digit_utype, 'mixed', '1'),
(alpha_utype, 'letter2', 'a')]),
]
},
(('No separators; first grouping has mixed Unit types, both types are '
'optional'),
None, (mixed_opt_gp, beta_gp)): {
'valid': ['a', 'aa', '1a'],
'invalid': ['', '1', 'a1'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_opt_utype, 'mixed', 'a'),
(alpha_utype, 'letter2', 'a')]),
('1a', [(digit_opt_utype, 'mixed', '1'),
(alpha_utype, 'letter2', 'a')]),
]
},
(('No separators; first grouping has mixed Unit types, first type is '
'optional'),
None, (mixed_mixed_opt_gp1, beta_gp)): {
'valid': ['a', 'aa', '1a'],
'invalid': ['', '1', 'a1'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_opt_utype, 'mixed', 'a'),
(alpha_utype, 'letter2', 'a')]),
('1a', [(digit_utype, 'mixed', '1'),
(alpha_utype, 'letter2', 'a')]),
]
},
(('No separators; first grouping has mixed Unit types, second type is '
'optional'),
None, (mixed_mixed_opt_gp2, beta_gp)): {
'valid': ['a', 'aa', '1a'],
'invalid': ['', '1', 'a1'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'mixed', 'a'),
(alpha_utype, 'letter2', 'a')]),
('1a', [(digit_opt_utype, 'mixed', '1'),
(alpha_utype, 'letter2', 'a')]),
]
},
(('No separators; first grouping is optional and has mixed Unit types, '
'both types are optional'),
None, (mixed_opt_gp_opt, beta_gp)): {
'valid': ['a', 'aa', '1a'],
'invalid': ['', '1', 'a1'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_opt_utype, 'mixed', 'a'),
(alpha_utype, 'letter2', 'a')]),
('1a', [(digit_opt_utype, 'mixed', '1'),
(alpha_utype, 'letter2', 'a')]),
]
},
(('No separators; first grouping is optional and has mixed Unit types, '
'first type is optional'),
None, (mixed_mixed_opt_gp_opt1, beta_gp)): {
'valid': ['a', 'aa', '1a'],
'invalid': ['', '1', 'a1'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_opt_utype, 'mixed', 'a'),
(alpha_utype, 'letter2', 'a')]),
('1a', [(digit_utype, 'mixed', '1'),
(alpha_utype, 'letter2', 'a')]),
]
},
(('No separators; first grouping is optional and has mixed Unit types, '
'second type is optional'),
None, (mixed_mixed_opt_gp_opt2, beta_gp)): {
'valid': ['a', 'aa', '1a'],
'invalid': ['', '1', 'a1'],
'parts': [
('a', [(alpha_utype, 'letter2', 'a')]),
('aa', [(alpha_utype, 'mixed', 'a'),
(alpha_utype, 'letter2', 'a')]),
('1a', [(digit_opt_utype, 'mixed', '1'),
(alpha_utype, 'letter2', 'a')]),
]
},
(('No separators; second grouping is optional and has mixed Unit types'),
None, (alpha_gp, mixed_gp_opt)): {
'valid': ['a', 'aa', 'a1'],
'invalid': ['', '1', '1a'],
'parts': [
('a', [(alpha_utype, 'letter1', 'a')]),
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'mixed', 'a')]),
('a1', [(alpha_utype, 'letter1', 'a'),
(digit_utype, 'mixed', '1')]),
]
},
(('No separators; second grouping has mixed Unit types, both types are '
'optional'),
None, (alpha_gp, mixed_opt_gp)): {
'valid': ['a', 'aa', 'a1'],
'invalid': ['', '1', '1a'],
'parts': [
('a', [(alpha_utype, 'letter1', 'a')]),
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_opt_utype, 'mixed', 'a')]),
('a1', [(alpha_utype, 'letter1', 'a'),
(digit_opt_utype, 'mixed', '1')]),
]
},
(('No separators; second grouping has mixed Unit types, first type is '
'optional'),
None, (alpha_gp, mixed_mixed_opt_gp1)): {
'valid': ['a', 'aa', 'a1'],
'invalid': ['', '1', '1a'],
'parts': [
('a', [(alpha_utype, 'letter1', 'a')]),
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_opt_utype, 'mixed', 'a')]),
('a1', [(alpha_utype, 'letter1', 'a'),
(digit_utype, 'mixed', '1')]),
]
},
(('No separators; second grouping has mixed Unit types, second type is '
'optional'),
None, (alpha_gp, mixed_mixed_opt_gp2)): {
'valid': ['a', 'aa', 'a1'],
'invalid': ['', '1', '1a'],
'parts': [
('a', [(alpha_utype, 'letter1', 'a')]),
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'mixed', 'a')]),
('a1', [(alpha_utype, 'letter1', 'a'),
(digit_opt_utype, 'mixed', '1')]),
]
},
(('No separators; second grouping is optional and has mixed Unit types, '
'both types are optional'),
None, (alpha_gp, mixed_opt_gp_opt)): {
'valid': ['a', 'aa', 'a1'],
'invalid': ['', '1', '1a'],
'parts': [
('a', [(alpha_utype, 'letter1', 'a')]),
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_opt_utype, 'mixed', 'a')]),
('a1', [(alpha_utype, 'letter1', 'a'),
(digit_opt_utype, 'mixed', '1')]),
]
},
(('No separators; second grouping is optional and has mixed Unit types, '
'first type is optional'),
None, (alpha_gp, mixed_mixed_opt_gp_opt1)): {
'valid': ['a', 'aa', 'a1'],
'invalid': ['', '1', '1a'],
'parts': [
('a', [(alpha_utype, 'letter1', 'a')]),
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_opt_utype, 'mixed', 'a')]),
('a1', [(alpha_utype, 'letter1', 'a'),
(digit_utype, 'mixed', '1')]),
]
},
(('No separators; second grouping is optional and has mixed Unit types, '
'second type is optional'),
None, (alpha_gp, mixed_mixed_opt_gp_opt2)): {
'valid': ['a', 'aa', 'a1'],
'invalid': ['', '1', '1a'],
'parts': [
('a', [(alpha_utype, 'letter1', 'a')]),
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'mixed', 'a')]),
('a1', [(alpha_utype, 'letter1', 'a'),
(digit_opt_utype, 'mixed', '1')]),
]
},
# MAX Unit Types/Groupings
('No separators, first grouping has no maximum limit',
None, (alpha_gp_max, beta_gp)): {
'valid': ['aa', 'aaa', 'aaaa'],
'invalid': ['', 'a'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaaa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
]
},
('No separators, first grouping has utype with no maximum limit',
None, (alpha_max_gp, beta_gp)): {
'valid': ['aa', 'aaa', 'aaaa'],
'invalid': ['', 'a'],
'parts': [
('aa', [(alpha_max_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaa', [(alpha_max_utype, 'letter1', 'aa'),
(alpha_utype, 'letter2', 'a')]),
('aaaa', [(alpha_max_utype, 'letter1', 'aaa'),
(alpha_utype, 'letter2', 'a')]),
]
},
(('No separators, first grouping has no maximum limit and a utype with no '
'maximum limit'),
None, (alpha_max_gp_max, beta_gp)): {
'valid': ['aa', 'aaa', 'aaaa'],
'invalid': ['', 'a'],
'parts': [
('aa', [(alpha_max_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaa', [(alpha_max_utype, 'letter1', 'aa'),
(alpha_utype, 'letter2', 'a')]),
('aaaa', [(alpha_max_utype, 'letter1', 'aaa'),
(alpha_utype, 'letter2', 'a')]),
]
},
('No separators, last grouping has no maximum limit',
None, (beta_gp, alpha_gp_max)): {
'valid': ['aa', 'aaa', 'aaaa'],
'invalid': ['', 'a'],
'parts': [
('aa', [(alpha_utype, 'letter2', 'a'),
(alpha_utype, 'letter1', 'a')]),
('aaa', [(alpha_utype, 'letter2', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a')]),
('aaaa', [(alpha_utype, 'letter2', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a')]),
]
},
('Inner required separator, first grouping has no maximum limit',
None, (alpha_gp_max_sep, beta_gp)): {
'valid': ['aa', 'a.aa', 'a.a.aa'],
'invalid': ['', 'a', 'aaa', 'aa.aa'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a.aa', [(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a.a.aa', [(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
]
},
(('Inner required separator, first grouping has no maximum limit and a '
'utype with no maximum limit'),
None, (alpha_max_gp_max_sep, beta_gp)): {
'valid': ['aa', 'aaa', 'a.aa', 'aa.aa', 'aa.aaa', 'a.a.aa',
'aa.aaaa.aaa'],
'invalid': ['', 'a', 'a.a.a'],
'parts': [
('aa', [(alpha_max_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaa', [(alpha_max_utype, 'letter1', 'aa'),
(alpha_utype, 'letter2', 'a')]),
('a.aa', [(alpha_max_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_max_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aa.aa', [(alpha_max_utype, 'letter1', 'aa'),
(dot_utype, '', '.'),
(alpha_max_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aa.aaa', [(alpha_max_utype, 'letter1', 'aa'),
(dot_utype, '', '.'),
(alpha_max_utype, 'letter1', 'aa'),
(alpha_utype, 'letter2', 'a')]),
('a.a.aa', [(alpha_max_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_max_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_max_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aa.aaaa.aa', [(alpha_max_utype, 'letter1', 'aa'),
(dot_utype, '', '.'),
(alpha_max_utype, 'letter1', 'aaaa'),
(dot_utype, '', '.'),
(alpha_max_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
]
},
(('Inner optional separator, first grouping has no maximum limit and a '
'utype with no maximum limit'),
None, (alpha_max_gp_max_sep_opt, beta_gp)): {
'valid': ['aa', 'aaa', 'a.aa', 'aa.aa', 'aa.aaa', 'a.a.aa',
'aa.aaaa.aaa'],
'invalid': ['', 'a', 'a.a.a'],
'parts': [
('aa', [(alpha_max_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaa', [(alpha_max_utype, 'letter1', 'aa'),
(alpha_utype, 'letter2', 'a')]),
('a.aa', [(alpha_max_utype, 'letter1', 'a'),
(dot_opt_utype, '', '.'),
(alpha_max_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aa.aa', [(alpha_max_utype, 'letter1', 'aa'),
(dot_opt_utype, '', '.'),
(alpha_max_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aa.aaa', [(alpha_max_utype, 'letter1', 'aa'),
(dot_opt_utype, '', '.'),
(alpha_max_utype, 'letter1', 'aa'),
(alpha_utype, 'letter2', 'a')]),
('a.a.aa', [(alpha_max_utype, 'letter1', 'a'),
(dot_opt_utype, '', '.'),
(alpha_max_utype, 'letter1', 'a'),
(dot_opt_utype, '', '.'),
(alpha_max_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aa.aaaa.aa', [(alpha_max_utype, 'letter1', 'aa'),
(dot_opt_utype, '', '.'),
(alpha_max_utype, 'letter1', 'aaaa'),
(dot_opt_utype, '', '.'),
(alpha_max_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
]
},
# MAX Unit Types/Groupings, outer separators only
('Default required separator, first grouping has no maximum limit',
pipe_utype, (alpha_gp_max, beta_gp)): {
'valid': ['a|a', 'aa|a', 'aaa|a'],
'invalid': ['', 'a', '|', 'a|aa', 'a|a|a'],
'parts': [
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('aa|a', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('aaa|a', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
]
},
('Default optional separator, first grouping has no maximum limit',
pipe_opt_utype, (alpha_gp_max, beta_gp)): {
'valid': ['aa', 'aaa', 'aaaa', 'a|a', 'aa|a', 'aaa|a'],
'invalid': ['', 'a', '|', 'a|aa', 'a|a|a'],
'parts': [
('aa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('aaaa', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter2', 'a')]),
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('aa|a', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('aaa|a', [(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(alpha_utype, 'letter1', 'a'),
(pipe_opt_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
]
},
# MAX Unit Types/Groupings, inner+outer separators
(('Default required separator and inner required separator, first '
'grouping has no maximum limit'),
pipe_utype, (alpha_gp_max_sep, beta_gp)): {
'valid': ['a|a', 'a.a|a', 'a.a.a|a'],
'invalid': ['', 'a', 'aa', 'aa|a', 'a|aa'],
'parts': [
('a|a', [(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('a.a|a', [(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
('a.a.a|a', [(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(dot_utype, '', '.'),
(alpha_utype, 'letter1', 'a'),
(pipe_utype, 0, '|'),
(alpha_utype, 'letter2', 'a')]),
]
},
}
COMPOUND_VALID_PARAMS = generate_params(COMPOUND_TEMPLATE_DATA, 'valid')
COMPOUND_INVALID_PARAMS = generate_params(COMPOUND_TEMPLATE_DATA, 'invalid')
COMPOUND_PARTS_PARAMS = generate_params(COMPOUND_TEMPLATE_DATA, 'parts')
# Tests
@pytest.mark.parametrize('separator_type, gr_params, fails', [
(None, [alpha_gp], False),
(pipe_utype, [alpha_gp], False),
(pipe_utype, [alpha_gp, beta_gp], False),
(pipe_utype, [alpha_gp, sep_pipe_gp, beta_gp], False),
(None, [alpha_gp, alpha_gp], '``name``.* must be unique'),
(pipe_utype, None, 'groups.* must be a list'),
('a', [alpha_gp], 'separator_type.* must be None or a Unit'),
(None, [sep_pipe_gp, alpha_gp], 'cannot begin or end with a separator'),
(None, [alpha_gp, sep_pipe_gp], 'cannot begin or end with a separator'),
(None, [alpha_gp, sep_pipe_gp, sep_dot_gp, beta_gp],
'cannot have multiple separator groupings in a row')
])
def test_compound_template_initialization(separator_type, gr_params, fails):
"""Passing the given ``separator_type`` and ``groupings`` as kwarg
parameters to initialize a CompoundTemplate object should cause it
to pass (if ``fails`` is False) or raise a SettingsError that
contains the regex given in ``fails``.
"""
if gr_params is not None:
gr_params = [gp_tuple_to_dict(gp_tuple) for gp_tuple in gr_params]
if fails:
with pytest.raises(e.SettingsError) as excinfo:
t.CompoundTemplate(separator_type=separator_type, groups=gr_params)
assert excinfo.match(fails)
else:
assert t.CompoundTemplate(separator_type=separator_type,
groups=gr_params)
@pytest.mark.parametrize('params, tstr', COMPOUND_VALID_PARAMS)
def test_compound_template_validate_true(params, tstr):
"""Given a set of parameters for initializing a CompoundTemplate
and a test string, the test string should return True when passed
to the CompoundTemplate object's ``validate`` method.
"""
separator_type = params[1]
gr_params = [gp_tuple_to_dict(gp_tuple) for gp_tuple in params[2]]
template = t.CompoundTemplate(separator_type=separator_type,
groups=gr_params)
assert bool(template.validate(tstr)) is True
@pytest.mark.parametrize('params, tstr', COMPOUND_INVALID_PARAMS)
def test_compound_template_validate_error(params, tstr):
"""Given a set of paremeters for initializing a CompoundTemplate
and a test string, the test string should raise an
InvalidCallNumberStringError when passed to the CompoundTemplate
object's ``validate`` method.
"""
separator_type = params[1]
gr_params = [gp_tuple_to_dict(gp_tuple) for gp_tuple in params[2]]
template = t.CompoundTemplate(separator_type=separator_type,
groups=gr_params)
with pytest.raises(e.InvalidCallNumberStringError):
template.validate(tstr)
@pytest.mark.parametrize('params, tstr, expected_parts', COMPOUND_PARTS_PARAMS)
def test_compound_template_parts(params, tstr, expected_parts):
"""Given a set of parameters for initializing a CompoundTemplate,
a test string, and a list of expected parts, passing the test
string to the CompoundTemplate object's ``cnstr_to_parts`` method
should result in the expected parts list.
"""
separator_type = params[1]
gr_params = [gp_tuple_to_dict(gp_tuple) for gp_tuple in params[2]]
template = t.CompoundTemplate(separator_type=separator_type,
groups=gr_params)
results = template.cnstr_to_parts(tstr, {})
flattened_results = []
for result in results:
if isinstance(result, list):
flattened_results.extend(result)
elif result is not None:
flattened_results.append(result)
for i, expected in enumerate(expected_parts):
expected_type, expected_name, expected_value = expected
if isinstance(expected_name, int):
default_name = t.CompoundTemplate.default_separator_name
expected_name = '{}{}'.format(default_name, expected_name)
assert type(flattened_results[i]) == expected_type
assert flattened_results[i].name == expected_name
assert str(flattened_results[i]) == expected_value
| bsd-3-clause | -6,235,460,156,068,953,000 | 40.945652 | 79 | 0.463736 | false |
francoricci/sapspid | lib/response.py | 1 | 4278 | #import jsonlib2
import globalsObj
import logging
import traceback
import tornado.web
#import ujson
#import simplejson
import jsonpickle
import uuid
class Result(object):
def __init__(self, **kwargs):
#self.rootLogger = logging.getLogger('root')
for name, value in kwargs.items():
exec("self." + name + " = value")
def reload(self,**kwargs):
self.__init__(**kwargs)
class Error(object):
def __init__(self, **kwargs):
#self.rootLogger = logging.getLogger('root')
for name, value in kwargs.items():
exec("self." + name + " = value")
def setSection(self,section):
if globalsObj.errors_configuration.has_section(section):
errorsDict = dict(globalsObj.errors_configuration.items(section))
for key, val in enumerate(errorsDict.keys()):
exec("self." + val + " = errorsDict[val]")
#if self.code is not None:
# self.code = int(self.code)
return True
else:
logging.getLogger(__name__).error("Error section %s not present" % (section))
return False
def reload(self,**kwargs):
self.__init__(**kwargs)
class ResponseObj(object):
def __init__(self, ID = None, **kwargs):
#self.rootLogger = logging.getLogger('root')
self.apiVersion = globalsObj.configuration.get('version','version')
self.error = None
self.result = None
self.setID(ID)
self.error = Error(**kwargs)
def setResult(self, **kwargs):
self.result = Result(**kwargs)
def setError(self, section=None):
if section is not None:
if self.error.setSection(section):
return True
else:
return False
def setID(self, ID):
if ID is None or ID == "":
self.id = str(uuid.uuid4())
else:
self.id = ID
def jsonWrite(self):
try:
#jsonOut = jsonlib2.write(self, default=lambda o: o.__dict__,sort_keys=False, indent=4,escape_slash=False)
jsonOut = jsonpickle.encode(self, unpicklable=False)
#jsonOut = ujson.dumps(self, ensure_ascii=False, indent=4)
#jsonOut2 = simplejson.dumps(pippo, ensure_ascii=False, indent=4)
return jsonOut
except BaseException as error:
logging.getLogger(__name__).error("Error on json encoding %s" % (error.message))
return False
class RequestHandler(tornado.web.RequestHandler):
@property
def executor(self):
return self.application.executor
def compute_etag(self):
return None
def write_error(self, status_code, errorcode = '3', **kwargs):
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_status(status_code)
# debug info
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
debugTmp = ""
for line in traceback.format_exception(*kwargs["exc_info"]):
debugTmp += line
getResponse = ResponseObj(debugMessage=debugTmp,httpcode=status_code,devMessage=self._reason)
else:
getResponse = ResponseObj(httpcode=status_code,devMessage=self._reason)
getResponse.setError(errorcode)
getResponse.setResult()
self.write(getResponse.jsonWrite())
self.finish()
class StaticFileHandler(tornado.web.StaticFileHandler):
def compute_etag(self):
return None
def write_error(self, status_code, errorcode = '3', **kwargs):
self.set_header('Content-Type', 'application/json; charset=UTF-8')
self.set_status(status_code)
# debug info
if self.settings.get("serve_traceback") and "exc_info" in kwargs:
debugTmp = ""
for line in traceback.format_exception(*kwargs["exc_info"]):
debugTmp += line
getResponse = ResponseObj(debugMessage=debugTmp,httpcode=status_code,devMessage=self._reason)
else:
getResponse = ResponseObj(httpcode=status_code,devMessage=self._reason)
getResponse.setError(errorcode)
getResponse.setResult()
self.write(getResponse.jsonWrite())
self.finish()
| mit | -6,579,047,088,402,858,000 | 31.907692 | 119 | 0.605423 | false |
tatianass/goodreads2 | goodreads/request.py | 1 | 1066 | import requests
import xmltodict
import json
class GoodreadsRequestException(Exception):
def __init__(self, error_msg, url):
self.error_msg = error_msg
self.url = url
def __str__(self):
return self.url, ':', self.error_msg
class GoodreadsRequest():
def __init__(self, client, path, query_dict, req_format='xml'):
"""Initialize request object."""
self.params = query_dict
self.params.update(client.query_dict)
self.host = client.base_url
self.path = path
self.req_format = req_format
def request(self):
resp = requests.get(self.host+self.path, params=self.params, timeout=60)
if resp.status_code != 200:
raise GoodreadsRequestException(resp.reason, self.path)
if self.req_format == 'xml':
data_dict = xmltodict.parse(resp.content)
return data_dict['GoodreadsResponse']
elif self.req_format == 'json':
return json.loads(resp.content)
else:
raise Exception("Invalid format")
| mit | -593,627,982,775,317,100 | 30.352941 | 80 | 0.61257 | false |
gem/sidd | ui/dlg_result.py | 1 | 7317 | # Copyright (c) 2011-2013, ImageCat Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
dialog for editing mapping scheme branches
"""
from PyQt4.QtGui import QDialog, QCloseEvent, QAbstractItemView
from PyQt4.QtCore import Qt, pyqtSlot, QSettings, QVariant, QString, QAbstractTableModel
from operator import itemgetter
from sidd.constants import logAPICall, SIDD_COMPANY, SIDD_APP_NAME, SIDD_VERSION, CNT_FIELD_NAME
from ui.constants import logUICall, UI_PADDING
from ui.qt.dlg_res_detail_ui import Ui_tablePreviewDialog
class DialogResult(Ui_tablePreviewDialog, QDialog):
"""
dialog for visualize result details
"""
# CONSTANTS
#############################
UI_WINDOW_GEOM = 'dlg_result/geometry'
# constructor
###############################
def __init__(self):
super(DialogResult, self).__init__()
self.ui = Ui_tablePreviewDialog()
self.ui.setupUi(self)
self.ui.table_result.setSelectionMode(QAbstractItemView.SingleSelection)
self.ui.table_result.setSortingEnabled(True)
# connect slots (ui event)
self.ui.btn_ok.clicked.connect(self.accept)
self.settings = QSettings(SIDD_COMPANY, '%s %s' %(SIDD_APP_NAME, SIDD_VERSION));
self.restoreGeometry(self.settings.value(self.UI_WINDOW_GEOM).toByteArray());
# window event handler overrides
#############################
def resizeEvent(self, event):
""" handle window resize """
self.ui.table_result.resize(self.width()-2*UI_PADDING,
self.height() - self.ui.table_result.y()-self.ui.btn_ok.height()-2*UI_PADDING)
below_table = self.height() - self.ui.btn_ok.height() - UI_PADDING
self.ui.lb_bldgcount.move(UI_PADDING, below_table)
self.ui.txt_bldgcount.move(self.ui.lb_bldgcount.width()+(2*UI_PADDING), below_table)
self.ui.btn_ok.move(self.width()-UI_PADDING-self.ui.btn_ok.width(), below_table)
@pyqtSlot(QCloseEvent)
def closeEvent(self, event):
self.settings.setValue(self.UI_WINDOW_GEOM, self.saveGeometry());
super(DialogResult, self).closeEvent(event)
# public method
###############################
@logUICall
def showExposureData(self, header, selected):
"""
display selected rows with header
"""
fnames =[] # retrieve field name as table headers
cnt_sum = 0 # total number of buildings
# find index for building count field
cnt_idx = -1
for i, f in header.iteritems():
fnames.append(f.name())
if f.name() == CNT_FIELD_NAME:
cnt_idx = i
if cnt_idx <> -1: # building count index is found
# increment building count
for s in selected:
cnt_sum += s[cnt_idx].toDouble()[0]
# display result
self.resultDetailModel = ResultDetailTableModel(header.values(), selected)
self.ui.table_result.setModel(self.resultDetailModel)
self.ui.table_result.sortByColumn(3, Qt.AscendingOrder)
# display exposure specific ui elements
self.ui.txt_bldgcount.setVisible(True)
self.ui.lb_bldgcount.setVisible(True)
self.ui.txt_bldgcount.setText('%d'% round(cnt_sum))
self.ui.txt_bldgcount.setReadOnly(True)
@logUICall
def showInfoData(self, header, selected):
# sync UI
self.resultDetailModel = ResultDetailTableModel(header.values(), selected)
self.ui.table_result.setModel(self.resultDetailModel)
# hide exposure specific ui elements
self.ui.txt_bldgcount.setVisible(False)
self.ui.lb_bldgcount.setVisible(False)
class ResultDetailTableModel(QAbstractTableModel):
"""
table model supporting visualization of result detail
"""
# constructor
###############################
def __init__(self, fields, selected):
""" constructor """
QAbstractTableModel.__init__(self)
# table header
self.headers = fields
# create copy of values to be shown and modified
# this format makes it easier to sort
self.selected = []
for row in selected:
new_row = []
for i, v in enumerate(row.values()):
if self.headers[i].type() == QVariant.Int:
new_row.append(v.toInt()[0])
elif self.headers[i].type() == QVariant.Double:
new_row.append(v.toDouble()[0])
else:
new_row.append(str(v.toString()))
self.selected.append(new_row)
# override public method
###############################
@logAPICall
def columnCount(self, parent):
""" only two columns exist. always return 2 """
return len(self.headers)
@logAPICall
def rowCount(self, parent):
""" number of rows same as number of siblings """
return len(self.selected)
@logAPICall
def headerData(self, section, orientation, role):
""" return data to diaply for header row """
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
return QString(self.headers[section].name())
else:
# no vertical header
return QVariant()
else:
return QVariant()
@logAPICall
def data(self, index, role):
""" return data to be displayed in a cell """
if role == Qt.DisplayRole:
logAPICall.log('row %s column %s ' %(index.row(), index.column()),
logAPICall.DEBUG_L2)
return QString("%s" % self.selected[index.row()][index.column()])
else:
return QVariant()
def sort(self, ncol, order):
""" sort table """
if ncol < 0 or ncol > len(self.headers):
return
self.layoutAboutToBeChanged.emit()
self.selected.sort(key=itemgetter(ncol), reverse=(order==Qt.DescendingOrder))
self.layoutChanged.emit()
def flags(self, index):
""" cell condition flag """
# NOTE:
# ItemIsEditable flag requires data() and setData() function
return Qt.ItemIsEnabled
| agpl-3.0 | 7,166,657,375,103,733,000 | 37.708108 | 138 | 0.569906 | false |
certik/sfepy | sfepy/mechanics/matcoefs.py | 1 | 3273 | from sfepy.base.base import *
##
# c: 22.07.2008
def youngpoisson_to_lame( young, poisson, plane = 'stress' ):
if plane == 'stress':
lam = young*poisson/(1.0 - poisson*poisson)
mu = young/(2.0*(1.0 + poisson))
elif plane == 'strain':
lam = young*poisson/((1.0 + poisson)*(1.0 - 2.0*poisson))
mu = young/(2.0*(1.0 + poisson))
return {'lambda' : lam, 'mu' : mu }
##
# c: 22.07.2008
def stiffness_tensor_lame( dim, lam, mu ):
sym = (dim + 1) * dim / 2
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
return lam * oot + mu * nm.diag( o + 1.0 )
##
# c: 22.07.2008
def stiffness_tensor_youngpoisson( dim, young, poisson, plane = 'stress' ):
lame = youngpoisson_to_lame( young, poisson, plane )
return stiffness_tensor_Lame( dim, lame['lambda'], lame['mu'] )
class TransformToPlane( Struct ):
"""Transformmations of constitutive law coefficients of 3D problems to 2D."""
def __init__( self, iplane = None ):
"""`iplane` ... vector of indices denoting the plane, e.g.: [0, 1]"""
if iplane is None:
iplane = [0, 1]
# Choose the "master" variables and the "slave" ones
# ... for vectors
i_m = nm.sort( iplane )
i_s = nm.setdiff1d( nm.arange( 3 ), i_m )
# ... for second order tensors (symmetric storage)
i_ms = {(0, 1) : [0, 1, 3],
(0, 2) : [0, 2, 4],
(1, 2) : [1, 2, 5]}[tuple( i_m )]
i_ss = nm.setdiff1d( nm.arange( 6 ), i_ms )
Struct.__init__( self, iplane = iplane,
i_m = i_m, i_s = i_s,
i_ms = i_ms, i_ss = i_ss )
def tensor_plane_stress( self, c3 = None, d3 = None, b3 = None ):
"""Transforms all coefficients of the piezoelectric constitutive law
from 3D to plane stress problem in 2D: strain/stress ordering/ 11 22
33 12 13 23. If `d3` is None, uses only the stiffness tensor `c3`.
`c3` ... stiffness tensor
`d3` ... dielectric tensor
`b3` ... piezoelectric coupling tensor"""
mg = nm.meshgrid
cs = c3[mg(self.i_ss,self.i_ss)]
cm = c3[mg(self.i_ss,self.i_ms)].T
if d3 is None: # elasticity only.
A = cs
Feps = cm
Ainv = nm.linalg.inv( A )
c2 = c3[mg(self.i_ms,self.i_ms)] \
- nm.dot( Feps.T, nm.dot( Ainv, Feps ) )
return c2
else:
dm = d3[mg(self.i_s,self.i_m)].T
ds = d3[mg(self.i_s,self.i_s)]
ii = mg( self.i_s, self.i_ss )
A = nm.r_[nm.c_[cs, b3[ii]],
nm.c_[b3[ii].T, -ds]] #=> sym !!!
F = nm.r_[nm.c_[cm, b3[mg(self.i_m,self.i_ss)]],
nm.c_[b3[mg(self.i_s,self.i_ms)].T, -dm ]]
Feps = F[:,:3]
FE = F[:,3:]
Ainv = nm.linalg.inv( A )
c2 = c3[mg(self.i_ms,self.i_ms)] \
- nm.dot( Feps.T, nm.dot( Ainv, Feps ) )
d2 = d3[mg(self.i_m,self.i_m)] \
- nm.dot( FE.T, nm.dot( Ainv, FE ) )
b2 = b3[mg(self.i_m,self.i_ms)].T \
- nm.dot( FE.T, nm.dot( Ainv, Feps ) )
return c2, d2, b2
| bsd-3-clause | 2,795,579,541,998,849,000 | 32.060606 | 81 | 0.486709 | false |
oneconvergence/group-based-policy | gbpservice/neutron/db/grouppolicy/group_policy_mapping_db.py | 1 | 18294 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.common import log
from neutron.db import model_base
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from gbpservice.neutron.db import gbp_quota_db as gquota
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
LOG = logging.getLogger(__name__)
class PolicyTargetMapping(gpdb.PolicyTarget):
"""Mapping of PolicyTarget to Neutron Port."""
__table_args__ = {'extend_existing': True}
__mapper_args__ = {'polymorphic_identity': 'mapping'}
# REVISIT(ivar): Set null on delete is a temporary workaround until Nova
# bug 1158684 is fixed.
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id',
ondelete='SET NULL'),
nullable=True, unique=True)
class PTGToSubnetAssociation(model_base.BASEV2):
"""Many to many relation between PolicyTargetGroup and Subnets."""
__tablename__ = 'gp_ptg_to_subnet_associations'
policy_target_group_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_policy_target_groups.id'),
primary_key=True)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),
primary_key=True)
class PolicyTargetGroupMapping(gpdb.PolicyTargetGroup):
"""Mapping of PolicyTargetGroup to set of Neutron Subnets."""
__table_args__ = {'extend_existing': True}
__mapper_args__ = {'polymorphic_identity': 'mapping'}
subnets = orm.relationship(PTGToSubnetAssociation,
cascade='all', lazy="joined")
class L2PolicyMapping(gpdb.L2Policy):
"""Mapping of L2Policy to Neutron Network."""
__table_args__ = {'extend_existing': True}
__mapper_args__ = {'polymorphic_identity': 'mapping'}
network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id'),
nullable=True, unique=True)
class L3PolicyRouterAssociation(model_base.BASEV2):
"""Models the many to many relation between L3Policies and Routers."""
__tablename__ = 'gp_l3_policy_router_associations'
l3_policy_id = sa.Column(sa.String(36), sa.ForeignKey('gp_l3_policies.id'),
primary_key=True)
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'),
primary_key=True)
class L3PolicyMapping(gpdb.L3Policy):
"""Mapping of L3Policy to set of Neutron Routers."""
__table_args__ = {'extend_existing': True}
__mapper_args__ = {'polymorphic_identity': 'mapping'}
routers = orm.relationship(L3PolicyRouterAssociation,
cascade='all', lazy="joined")
class ExternalSegmentMapping(gpdb.ExternalSegment):
"""Mapping of L2Policy to Neutron Network."""
__table_args__ = {'extend_existing': True}
__mapper_args__ = {'polymorphic_identity': 'mapping'}
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),
nullable=True, unique=True)
gquota.DB_CLASS_TO_RESOURCE_NAMES[L3PolicyMapping.__name__] = 'l3_policy'
gquota.DB_CLASS_TO_RESOURCE_NAMES[L2PolicyMapping.__name__] = 'l2_policy'
gquota.DB_CLASS_TO_RESOURCE_NAMES[PolicyTargetGroupMapping.__name__] = (
'policy_target_group')
gquota.DB_CLASS_TO_RESOURCE_NAMES[PolicyTargetMapping.__name__] = (
'policy_target')
gquota.DB_CLASS_TO_RESOURCE_NAMES[ExternalSegmentMapping.__name__] = (
'external_segment')
class GroupPolicyMappingDbPlugin(gpdb.GroupPolicyDbPlugin):
"""Group Policy Mapping interface implementation using SQLAlchemy models.
"""
def _make_policy_target_dict(self, pt, fields=None):
res = super(GroupPolicyMappingDbPlugin,
self)._make_policy_target_dict(pt)
res['port_id'] = pt.port_id
return self._fields(res, fields)
def _make_policy_target_group_dict(self, ptg, fields=None):
res = super(GroupPolicyMappingDbPlugin,
self)._make_policy_target_group_dict(ptg)
res['subnets'] = [subnet.subnet_id for subnet in ptg.subnets]
return self._fields(res, fields)
def _make_l2_policy_dict(self, l2p, fields=None):
res = super(GroupPolicyMappingDbPlugin,
self)._make_l2_policy_dict(l2p)
res['network_id'] = l2p.network_id
return self._fields(res, fields)
def _make_l3_policy_dict(self, l3p, fields=None):
res = super(GroupPolicyMappingDbPlugin,
self)._make_l3_policy_dict(l3p)
res['routers'] = [router.router_id for router in l3p.routers]
return self._fields(res, fields)
def _make_external_segment_dict(self, es, fields=None):
res = super(GroupPolicyMappingDbPlugin,
self)._make_external_segment_dict(es)
res['subnet_id'] = es.subnet_id
return self._fields(res, fields)
def _set_port_for_policy_target(self, context, pt_id, port_id):
with context.session.begin(subtransactions=True):
pt_db = self._get_policy_target(context, pt_id)
pt_db.port_id = port_id
def _add_subnet_to_policy_target_group(self, context, ptg_id, subnet_id):
with context.session.begin(subtransactions=True):
ptg_db = self._get_policy_target_group(context, ptg_id)
assoc = PTGToSubnetAssociation(policy_target_group_id=ptg_id,
subnet_id=subnet_id)
ptg_db.subnets.append(assoc)
return [subnet.subnet_id for subnet in ptg_db.subnets]
def _set_network_for_l2_policy(self, context, l2p_id, network_id):
with context.session.begin(subtransactions=True):
l2p_db = self._get_l2_policy(context, l2p_id)
l2p_db.network_id = network_id
def _add_router_to_l3_policy(self, context, l3p_id, router_id):
with context.session.begin(subtransactions=True):
l3p_db = self._get_l3_policy(context, l3p_id)
assoc = L3PolicyRouterAssociation(l3_policy_id=l3p_id,
router_id=router_id)
l3p_db.routers.append(assoc)
return [router.router_id for router in l3p_db.routers]
def _set_subnet_to_es(self, context, es_id, subnet_id):
with context.session.begin(subtransactions=True):
es_db = self._get_external_segment(context, es_id)
es_db.subnet_id = subnet_id
def _update_ess_for_l3p(self, context, l3p_id, ess):
with context.session.begin(subtransactions=True):
l3p_db = self._get_l3_policy(context, l3p_id)
self._set_ess_for_l3p(context, l3p_db, ess)
def _get_l3p_ptgs(self, context, l3p_id):
return super(GroupPolicyMappingDbPlugin, self)._get_l3p_ptgs(
context, l3p_id, l3p_klass=L3PolicyMapping,
ptg_klass=PolicyTargetGroupMapping, l2p_klass=L2PolicyMapping)
@log.log
def create_policy_target(self, context, policy_target):
pt = policy_target['policy_target']
tenant_id = self._get_tenant_id_for_create(context, pt)
with context.session.begin(subtransactions=True):
pt_db = PolicyTargetMapping(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=pt['name'],
description=pt['description'],
policy_target_group_id=
pt['policy_target_group_id'],
port_id=pt['port_id'])
context.session.add(pt_db)
return self._make_policy_target_dict(pt_db)
@log.log
def get_policy_targets(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'policy_target', limit,
marker)
return self._get_collection(context, PolicyTargetMapping,
self._make_policy_target_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@log.log
def create_policy_target_group(self, context, policy_target_group):
ptg = policy_target_group['policy_target_group']
tenant_id = self._get_tenant_id_for_create(context, ptg)
with context.session.begin(subtransactions=True):
if ptg['service_management']:
self._validate_service_management_ptg(context, tenant_id)
ptg_db = PolicyTargetGroupMapping(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
name=ptg['name'], description=ptg['description'],
l2_policy_id=ptg['l2_policy_id'],
network_service_policy_id=ptg['network_service_policy_id'],
shared=ptg.get('shared', False),
service_management=ptg.get('service_management', False))
context.session.add(ptg_db)
if 'subnets' in ptg:
for subnet in ptg['subnets']:
assoc = PTGToSubnetAssociation(
policy_target_group_id=ptg_db.id,
subnet_id=subnet
)
ptg_db.subnets.append(assoc)
self._process_policy_rule_sets_for_ptg(context, ptg_db, ptg)
return self._make_policy_target_group_dict(ptg_db)
@log.log
def update_policy_target_group(self, context, policy_target_group_id,
policy_target_group):
ptg = policy_target_group['policy_target_group']
with context.session.begin(subtransactions=True):
ptg_db = self._get_policy_target_group(
context, policy_target_group_id)
self._process_policy_rule_sets_for_ptg(context, ptg_db, ptg)
if 'subnets' in ptg:
# Add/remove associations for changes in subnets.
new_subnets = set(ptg['subnets'])
old_subnets = set(subnet.subnet_id
for subnet in ptg_db.subnets)
for subnet in new_subnets - old_subnets:
assoc = PTGToSubnetAssociation(
policy_target_group_id=policy_target_group_id,
subnet_id=subnet)
ptg_db.subnets.append(assoc)
for subnet in old_subnets - new_subnets:
assoc = (
context.session.query(
PTGToSubnetAssociation).filter_by(
policy_target_group_id=policy_target_group_id,
subnet_id=subnet).one())
ptg_db.subnets.remove(assoc)
context.session.delete(assoc)
# Don't update ptg_db.subnets with subnet IDs.
del ptg['subnets']
ptg_db.update(ptg)
return self._make_policy_target_group_dict(ptg_db)
@log.log
def create_l2_policy(self, context, l2_policy):
l2p = l2_policy['l2_policy']
tenant_id = self._get_tenant_id_for_create(context, l2p)
with context.session.begin(subtransactions=True):
l2p_db = L2PolicyMapping(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=l2p['name'],
description=l2p['description'],
l3_policy_id=l2p['l3_policy_id'],
network_id=l2p['network_id'],
shared=l2p.get('shared', False))
context.session.add(l2p_db)
return self._make_l2_policy_dict(l2p_db)
@log.log
def get_l2_policies(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'l2_policy', limit,
marker)
return self._get_collection(context, L2PolicyMapping,
self._make_l2_policy_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@log.log
def create_l3_policy(self, context, l3_policy):
l3p = l3_policy['l3_policy']
self.validate_ip_pool(l3p.get('ip_pool', None), l3p['ip_version'])
tenant_id = self._get_tenant_id_for_create(context, l3p)
self.validate_subnet_prefix_length(l3p['ip_version'],
l3p['subnet_prefix_length'],
l3p.get('ip_pool', None))
with context.session.begin(subtransactions=True):
l3p_db = L3PolicyMapping(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=l3p['name'],
ip_version=l3p['ip_version'],
ip_pool=l3p['ip_pool'],
subnet_prefix_length=
l3p['subnet_prefix_length'],
description=l3p['description'],
shared=l3p.get('shared', False))
if 'routers' in l3p:
for router in l3p['routers']:
assoc = L3PolicyRouterAssociation(
l3_policy_id=l3p_db.id,
router_id=router
)
l3p_db.routers.append(assoc)
if 'external_segments' in l3p:
self._set_ess_for_l3p(context, l3p_db,
l3p['external_segments'])
context.session.add(l3p_db)
return self._make_l3_policy_dict(l3p_db)
@log.log
def update_l3_policy(self, context, l3_policy_id, l3_policy):
l3p = l3_policy['l3_policy']
with context.session.begin(subtransactions=True):
l3p_db = self._get_l3_policy(context, l3_policy_id)
if 'subnet_prefix_length' in l3p:
self.validate_subnet_prefix_length(l3p_db.ip_version,
l3p['subnet_prefix_length'],
l3p_db.ip_pool)
if 'routers' in l3p:
# Add/remove associations for changes in routers.
new_routers = set(l3p['routers'])
old_routers = set(router.router_id
for router in l3p_db.routers)
for router in new_routers - old_routers:
assoc = L3PolicyRouterAssociation(
l3_policy_id=l3_policy_id, router_id=router)
l3p_db.routers.append(assoc)
for router in old_routers - new_routers:
assoc = (context.session.query(L3PolicyRouterAssociation).
filter_by(l3_policy_id=l3_policy_id,
router_id=router).
one())
l3p_db.routers.remove(assoc)
context.session.delete(assoc)
# Don't update l3p_db.routers with router IDs.
del l3p['routers']
if 'external_segments' in l3p:
self._set_ess_for_l3p(context, l3p_db,
l3p['external_segments'])
del l3p['external_segments']
l3p_db.update(l3p)
return self._make_l3_policy_dict(l3p_db)
@log.log
def create_external_segment(self, context, external_segment):
es = external_segment['external_segment']
tenant_id = self._get_tenant_id_for_create(context, es)
with context.session.begin(subtransactions=True):
es_db = ExternalSegmentMapping(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
name=es['name'], description=es['description'],
shared=es.get('shared', False), ip_version=es['ip_version'],
cidr=es['cidr'],
port_address_translation=es['port_address_translation'],
subnet_id=es['subnet_id'])
context.session.add(es_db)
if 'external_routes' in es:
self._process_segment_ers(context, es_db, es)
return self._make_external_segment_dict(es_db)
@log.log
def get_external_segments(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'external_segment', limit,
marker)
return self._get_collection(context, ExternalSegmentMapping,
self._make_external_segment_dict,
filters=filters, fields=fields,
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
| apache-2.0 | 7,347,120,841,103,044,000 | 47.142105 | 79 | 0.552586 | false |
uwcirg/true_nth_usa_portal | portal/migrations/versions/4456ad5faf86_.py | 1 | 2041 | """empty message
Revision ID: 4456ad5faf86
Revises: 521aa70e0617
Create Date: 2015-09-03 19:40:53.744703
"""
# revision identifiers, used by Alembic.
revision = '4456ad5faf86'
down_revision = '521aa70e0617'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('user_roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['role_id'], ['roles.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(
['user_id'], ['users.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
op.add_column(u'users', sa.Column(
'confirmed_at', sa.DateTime(), nullable=True))
op.add_column(u'users', sa.Column('is_active', sa.Boolean(),
server_default='1', nullable=False))
op.add_column(u'users', sa.Column(
'password', sa.String(length=255), nullable=True))
op.add_column(u'users', sa.Column('reset_password_token',
sa.String(length=100), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'users', 'reset_password_token')
op.drop_column(u'users', 'password')
op.drop_column(u'users', 'is_active')
op.drop_column(u'users', 'confirmed_at')
op.drop_table('user_roles')
op.drop_table('roles')
### end Alembic commands ###
| bsd-3-clause | 5,831,843,187,538,724,000 | 36.796296 | 76 | 0.55757 | false |
Sult/evetool | populate/misc.py | 1 | 1848 | import eveapi
from apies.models import CallGroup, Call, Api
from characters.models import RefType
api = eveapi.EVEAPIConnection()
data = api.API.CallList()
def call_groups():
for group in data.callGroups:
try:
CallGroup.objects.create(
groupid=group.groupID,
name=group.name,
description=group.description,
)
except:
print "You stupid"
def calls():
for call in data.calls:
if call.accessMask == 8388608:
#no need for limited character info. Or full acces or none
continue
try:
Call.objects.create(
accessmask=call.accessMask,
accounttype=call.type,
name=call.name,
callgroup=CallGroup.objects.get(groupid=call.groupID),
description=call.description,
)
except:
print "Some shit didnt work dude"
# extra = []
# for call in extra:
# Call.objects.create(
# accessmask=call.accessMask,
# accounttype=Api.CHARACTER,
# name=call.name,
# callgroup=CallGroup.objects.get(groupid=call.groupID),
# description=call.description,
# )
# Call.objects.create(
# accessmask=call.accessMask,
# accounttype=Api.CORPORATION,
# name=call.name,
# callgroup=CallGroup.objects.get(groupid=call.groupID),
# description=call.description,
# )
def reftypes():
for ref in api.eve.RefTypes().refTypes:
try:
RefType.objects.create(
reftypeid=ref.refTypeID,
reftypename=ref.refTypeName,
)
except:
"You fucked up mate"
call_groups()
calls()
reftypes()
| mit | 1,425,303,682,853,979,000 | 26.58209 | 70 | 0.551407 | false |
EJH2/ViralBot-Discord | bot/utils/over.py | 1 | 7167 | # coding=utf-8
"""Overrides for Discord.py classes"""
import contextlib
import inspect
import io
import itertools
import re
import discord
from discord.ext.commands import HelpFormatter as HelpF, Paginator, Command
from bot.utils import polr, privatebin
from bot.utils.args import ArgParseConverter as ArgPC
def create_help(cmd, parser):
"""Creates an updated usage for the help command"""
default = cmd.params['args'].default
if cmd.signature.split("[")[-1] == f"args={default}]" if default else "args]":
sio = io.StringIO()
with contextlib.redirect_stdout(sio):
parser.print_help()
sio.seek(0)
s = sio.read()
# Strip the filename and trailing newline from help text
arg_part = s[(len(str(s[7:]).split()[0]) + 8):-1]
k = cmd.qualified_name
spt = len(k.split())
# Remove a duplicate command name + leading arguments
split_sig = cmd.signature.split()[spt:]
return "[".join((" ".join(split_sig)).split("[")[:-1]) + arg_part
return cmd.usage
class HelpFormatter(HelpF):
"""Custom override for the default help command"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._paginator = None
async def format(self):
"""Handles the actual behaviour involved with formatting.
To change the behaviour, this method should be overridden.
Returns
--------
list
A paginated output of the help command.
"""
self._paginator = Paginator()
# we need a padding of ~80 or so
description = self.command.description if not self.is_cog() else inspect.getdoc(self.command)
if description:
# <description> portion
self._paginator.add_line(description, empty=True)
if isinstance(self.command, Command):
# <signature portion>
if self.command.params.get("args", None) and type(self.command.params['args'].annotation) == ArgPC:
self.command.usage = create_help(self.command, self.command.params['args'].annotation.parser)
signature = self.get_command_signature()
self._paginator.add_line(signature, empty=True)
# <long doc> section
if self.command.help:
self._paginator.add_line(self.command.help, empty=True)
# end it here if it's just a regular command
if not self.has_subcommands():
self._paginator.close_page()
return self._paginator.pages
max_width = self.max_name_size
def category(tup):
"""Splits the help command into categories for easier readability"""
cog = tup[1].cog_name
# we insert the zero width space there to give it approximate
# last place sorting position.
return cog + ':' if cog is not None else '\u200bNo Category:'
filtered = await self.filter_command_list()
if self.is_bot():
data = sorted(filtered, key=category)
for category, commands in itertools.groupby(data, key=category):
# there simply is no prettier way of doing this.
commands = sorted(commands)
if len(commands) > 0:
self._paginator.add_line(category)
self._add_subcommands_to_page(max_width, commands)
else:
filtered = sorted(filtered)
if filtered:
self._paginator.add_line('Commands:')
self._add_subcommands_to_page(max_width, filtered)
# add the ending note
self._paginator.add_line()
ending_note = self.get_ending_note()
self._paginator.add_line(ending_note)
return self._paginator.pages
_mentions_transforms = {
'@everyone': '@\u200beveryone',
'@here': '@\u200bhere'
}
_mention_pattern = re.compile('|'.join(_mentions_transforms.keys()))
def _is_submodule(parent, child):
return parent == child or child.startswith(parent + ".")
async def _default_help_command(ctx, *commands: str):
"""Shows this message."""
bot = ctx.bot
destination = ctx.message.author if bot.pm_help else ctx.message.channel
def repl(obj):
return _mentions_transforms.get(obj.group(0), '')
# help by itself just lists our own commands.
if len(commands) == 0:
pages = await bot.formatter.format_help_for(ctx, bot)
elif len(commands) == 1:
# try to see if it is a cog name
name = _mention_pattern.sub(repl, commands[0])
command = None
if name in bot.cogs:
command = bot.cogs[name]
else:
command = bot.all_commands.get(name)
if command is None:
await destination.send(bot.command_not_found.format(name))
return
pages = await bot.formatter.format_help_for(ctx, command)
else:
name = _mention_pattern.sub(repl, commands[0])
command = bot.all_commands.get(name)
if command is None:
await destination.send(bot.command_not_found.format(name))
return
for key in commands[1:]:
try:
key = _mention_pattern.sub(repl, key)
command = command.all_commands.get(key)
if command is None:
await destination.send(bot.command_not_found.format(key))
return
except AttributeError:
await destination.send(bot.command_has_no_subcommands.format(command, key))
return
pages = await bot.formatter.format_help_for(ctx, command)
if bot.pm_help is None:
characters = sum(map(len, pages))
# modify destination based on length of pages.
if characters > 1000:
destination = ctx.message.author
for page in pages:
try:
await destination.send(page)
except discord.Forbidden:
destination = ctx.message.channel
await destination.send(page)
old_send = discord.abc.Messageable.send
async def send(self, content=None, **kwargs):
"""Overrides default send method in order to create a paste if the response is more than 2000 characters"""
if content is not None and any(x in str(content) for x in ["@everyone", "@here"]):
content = content.replace("@everyone", "@\u0435veryone").replace("@here", "@h\u0435re")
if content is not None and len(str(content)) > 2000:
if content.startswith("```py"):
content = "\n".join(content.split("\n")[1:-1])
paste = await privatebin.upload(content, expires="15min", server=self.bot.priv)
if self.bot.polr:
paste = await polr.shorten(paste, **self.bot.polr)
return await old_send(self, f"Hey, I couldn't handle all the text I was gonna send you, so I put it in a paste!"
f"\nThe link is **{paste}**, but it expires in 15 minutes, so get it quick!",
**kwargs)
else:
return await old_send(self, content, **kwargs)
| gpl-3.0 | 2,864,970,659,438,636,000 | 35.380711 | 120 | 0.596205 | false |
makelove/OpenCV-Python-Tutorial | ch05-视频/5.VideoPlay.py | 1 | 1329 | import numpy as np
import cv2
cap = cv2.VideoCapture('../data/vtest.avi')
# cap = cv2.VideoCapture('output.avi')
# cap = cv2.VideoCapture('Minions_banana.mp4')
# 帧率
fps = cap.get(cv2.CAP_PROP_FPS) # 25.0
print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))
# 总共有多少帧
num_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print('共有', num_frames, '帧')
#
frame_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
print('高:', frame_height, '宽:', frame_width)
FRAME_NOW = cap.get(cv2.CAP_PROP_POS_FRAMES) # 第0帧
print('当前帧数', FRAME_NOW) # 当前帧数 0.0
# 读取指定帧,对视频文件才有效,对摄像头无效??
frame_no = 121
cap.set(1, frame_no) # Where frame_no is the frame you want
ret, frame = cap.read() # Read the frame
cv2.imshow('frame_no'+str(frame_no), frame)
FRAME_NOW = cap.get(cv2.CAP_PROP_POS_FRAMES)
print('当前帧数', FRAME_NOW) # 当前帧数 122.0
while cap.isOpened():
ret, frame = cap.read()
FRAME_NOW = cap.get(cv2.CAP_PROP_POS_FRAMES) # 当前帧数
print('当前帧数', FRAME_NOW)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
key = cv2.waitKey(1)
if key == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
| mit | 7,165,087,056,115,826,000 | 25.733333 | 78 | 0.666667 | false |
sein-tao/pyBioUtil | tests/test_fastq.py | 1 | 1800 | #!/usr/bin/env python3
import unittest
TestCase = unittest.TestCase
from BioUtil import fastqFile
import os
print("test fastqFile")
data_dir = os.path.join(os.path.dirname(__file__), "..", "data")
def readfile(file):
fh = open(file, 'r')
content = fh.readlines()
fh.close()
return content
class TestFastq(TestCase):
@classmethod
def setUpClass(self):
self.test_fa = os.path.join(data_dir, "test.fa")
self.test_fq = os.path.join(data_dir, "test.fq")
self.tmp_fa = os.path.join(data_dir, "tmp.fa")
self.tmp_fq = os.path.join(data_dir, "tmp.fq")
@classmethod
def tearDownClass(self):
os.remove(self.tmp_fa)
os.remove(self.tmp_fq)
def test_fasta(self):
with fastqFile(self.tmp_fa, 'w', linewidth=70) as out, fastqFile(self.test_fa) as input:
for rec in input:
# print(rec, file=out)
out.write(rec)
#gold_file = os.path.join(data_dir, "test.100.fa")
gold_file = self.test_fa
gold = readfile(gold_file)
result = readfile(self.tmp_fa)
self.assertEqual(gold, result)
def test_fastq(self):
with fastqFile(self.tmp_fq, 'w') as out, fastqFile(self.test_fq) as input:
for rec in input:
print(rec, file=out)
gold = readfile(self.test_fq)
result = readfile(self.tmp_fq)
self.assertEqual(gold, result)
def test_fastq_write(self):
with fastqFile(self.tmp_fq, 'w') as out, fastqFile(self.test_fq) as input:
for rec in input:
out.write(rec)
gold = readfile(self.test_fq)
result = readfile(self.tmp_fq)
self.assertEqual(gold, result)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 277,510,031,970,988,260 | 28.508197 | 96 | 0.583333 | false |
ziima/pyvmd | pyvmd/tests/test_analyzer.py | 1 | 2785 | """
Tests for trajectory analysis utilities.
"""
import VMD
from mock import sentinel
from pyvmd.analyzer import Analyzer
from pyvmd.molecules import Molecule
from .utils import data, PyvmdTestCase
class TestAnalyzer(PyvmdTestCase):
"""
Test `Analyzer` class.
"""
def setUp(self):
self.mol = Molecule.create()
self.mol.load(data('water.psf'))
# Storage for callback data
self.coords = []
self.frames = []
def test_analyze_callback_args(self):
# Test callback is called with extra arguments
steps = []
def callback(step, *args, **kwargs):
self.assertEqual(step.molecule, self.mol)
self.assertEqual(args, (sentinel.arg1, sentinel.arg2))
self.assertEqual(kwargs, {'key': sentinel.value, 'another': sentinel.junk})
steps.append(step.frame)
analyzer = Analyzer(self.mol, [data('water.1.dcd')])
analyzer.add_callback(callback, sentinel.arg1, sentinel.arg2, key=sentinel.value, another=sentinel.junk)
analyzer.analyze()
self.assertEqual(steps, range(12))
def _get_status(self, status):
# Callback to collect status data
self.frames.append(status.frame)
def _get_x(self, status):
# Callback to collect data
self.coords.append(VMD.atomsel.atomsel('index 0', molid=status.molecule.molid).get('x')[0])
def test_analyze(self):
# Test analyzer works correctly with default parameters
analyzer = Analyzer(self.mol, [data('water.1.dcd'), data('water.2.dcd')])
analyzer.add_callback(self._get_status)
analyzer.add_callback(self._get_x)
analyzer.analyze()
result = [-1.4911567, -1.4851371, -1.4858487, -1.4773947, -1.4746015, -1.4673382, -1.4535547, -1.4307435,
-1.4120502, -1.3853478, -1.3674825, -1.3421925, -1.3177859, -1.2816998, -1.2579591, -1.2262495,
-1.2036057, -1.1834533, -1.174916, -1.1693807, -1.1705244, -1.1722997, -1.1759951, -1.175245]
self.assertAlmostEqualSeqs(self.coords, result)
self.assertEqual(self.frames, range(24))
def test_analyze_params(self):
# Test load every other frame, all 12 at once
self.coords = []
self.frames = []
analyzer = Analyzer(self.mol, [data('water.1.dcd'), data('water.2.dcd')], step=2, chunk=12)
analyzer.add_callback(self._get_status)
analyzer.add_callback(self._get_x)
analyzer.analyze()
result = [-1.4911567, -1.4858487, -1.4746015, -1.4535547, -1.4120502, -1.3674825, -1.3177859, -1.2579591,
-1.2036057, -1.174916, -1.1705244, -1.1759951]
self.assertAlmostEqualSeqs(self.coords, result)
self.assertEqual(self.frames, range(12))
| gpl-3.0 | 6,988,987,339,418,595,000 | 38.225352 | 113 | 0.627289 | false |
mercycorps/tola | htdocs/silo/serializers.py | 1 | 1231 | from django.forms import widgets
from rest_framework import serializers
from silo.models import Silo, Read, ReadType, LabelValueStore, Tag
from django.contrib.auth.models import User
import json
class SiloSerializer(serializers.HyperlinkedModelSerializer):
data = serializers.SerializerMethodField()
class Meta:
model = Silo
fields = ('owner', 'name', 'reads', 'description', 'create_date', 'id', 'data','shared','tags','public')
depth =1
def get_data(self, obj):
link = "/api/silo/" + str(obj.id) + "/data/"
return (self.context['request'].build_absolute_uri(link))
class TagSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Tag
fields = ('name', 'owner')
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'is_staff')
class ReadSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Read
fields = ('owner', 'type', 'read_name', 'read_url')
class ReadTypeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ReadType
fields = ( 'read_type', 'description') | gpl-2.0 | -2,326,723,832,163,240,000 | 29.04878 | 112 | 0.671812 | false |
eighilaza/bouraka | bouraka-django/bouraka/views.py | 1 | 1815 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.template import RequestContext, loader
from news.models import News
from slides.models import Slide
from django.http import HttpResponseRedirect
def home(request):
latest_news_list = News.objects.order_by('-publication_date')[:4]
try:
slide = Slide.objects.get(title='home')
except Slide.DoesNotExist:
slide = None
template = loader.get_template('bouraka/home.html')
context = {
'latest_news_list': latest_news_list,
'slide': slide,
}
return render(request, 'bouraka/home.html', context)
def gallery(request):
return render(request, 'bouraka/gallery.html')
def history(request):
return render(request, 'bouraka/history.html')
def team(request):
return render(request, 'bouraka/team.html')
def shell(request):
return render(request, 'bouraka/shell.html')
def educeco(request):
return render(request, 'bouraka/educeco.html')
def michelin(request):
return render(request, 'bouraka/michelin.html')
def futur(request):
return render(request, 'bouraka/futur.html')
def envol(request):
return render(request, 'bouraka/envol.html')
def epic(request):
return render(request, 'bouraka/epic.html')
def orca(request):
return render(request, 'bouraka/orca.html')
def elec(request):
return render(request, 'bouraka/elec.html')
def roues(request):
return render(request, 'bouraka/roues.html')
def moteur(request):
return render(request, 'bouraka/moteur.html')
def simulateur(request):
return render(request, 'bouraka/simulateur.html')
def accomplishments(request):
return render(request, 'bouraka/accomplishments.html')
def contacts(request):
return render(request, 'bouraka/contacts.html')
| lgpl-3.0 | -63,468,116,221,537,944 | 33.903846 | 69 | 0.722865 | false |
delfick/harpoon | tests/docker/test_docker_run.py | 1 | 14030 | # coding: spec
from harpoon.errors import FailedImage, BadImage, AlreadyBoundPorts, ProgrammerError
from harpoon.option_spec.harpoon_specs import HarpoonSpec
from harpoon.ship.runner import Runner
from tests.helpers import HarpoonCase
from delfick_project.option_merge import Converter, MergedOptions
from delfick_project.norms import sb, Meta
from contextlib import contextmanager
from unittest import mock
import logging
import socket
import codecs
import pytest
import os
import re
pytestmark = pytest.mark.integration
log = logging.getLogger("tests.docker.test_docker_run")
describe HarpoonCase, "Building docker images":
def make_image(self, options, harpoon_options=None, harpoon=None):
config_root = self.make_temp_dir()
if harpoon_options is None and harpoon is None:
harpoon_options = {}
if harpoon_options is not None:
harpoon_options["docker_context"] = self.docker_client
harpoon_options["docker_context_maker"] = self.new_docker_client
elif harpoon:
if harpoon.docker_context is sb.NotSpecified:
harpoon.docker_context = self.docker_client
if harpoon.docker_context_maker is sb.NotSpecified:
harpoon.docker_context_maker = self.new_docker_client
if harpoon_options and harpoon:
raise ProgrammerError("Please only specify one of harpoon_options and harpoon")
if harpoon is None:
harpoon = HarpoonSpec().harpoon_spec.normalise(Meta({}, []), harpoon_options)
if "harpoon" not in options:
options["harpoon"] = harpoon
everything = MergedOptions.using(
{"harpoon": harpoon, "_key_name_1": "awesome_image", "config_root": config_root}
)
harpoon_converter = Converter(convert=lambda *args: harpoon, convert_path=["harpoon"])
everything.add_converter(harpoon_converter)
everything.converters.activate()
if "configuration" not in options:
options["configuration"] = everything
return HarpoonSpec().image_spec.normalise(Meta(everything, []), options)
@contextmanager
def a_port(self, port):
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("localhost", port))
s.listen(1)
yield
finally:
try:
if s is not None:
s.close()
except Exception as error:
log.warning(error)
it "can complain if ports are already bound to something else":
if self.docker_api.base_url.startswith("http"):
pytest.skip("docker api is http based")
commands = ["FROM {0}".format(os.environ["BASE_IMAGE"]), "CMD exit 1"]
fake_sys_stdout = self.make_temp_file()
fake_sys_stderr = self.make_temp_file()
with self.a_port(9999):
with self.a_port(9998):
with self.fuzzyAssertRaisesError(AlreadyBoundPorts, ports=[9999, 9998]):
with self.a_built_image(
{
"context": False,
"commands": commands,
"ports": ["9999:9999", "9998:9998"],
},
{
"no_intervention": True,
"stdout": fake_sys_stdout,
"tty_stdout": fake_sys_stdout,
"tty_stderr": fake_sys_stderr,
},
) as (cached, conf):
Runner().run_container(conf, {conf.name: conf})
it "does not complain if nothing is using a port":
if self.docker_api.base_url.startswith("http"):
pytest.skip("docker api is http based")
commands = ["FROM {0}".format(os.environ["BASE_IMAGE"]), "CMD exit 0"]
fake_sys_stdout = self.make_temp_file()
fake_sys_stderr = self.make_temp_file()
# Make sure we can get 9999
with self.a_port(9999):
pass
with self.a_built_image(
{"context": False, "commands": commands, "ports": ["9999:9999"]},
{
"no_intervention": True,
"stdout": fake_sys_stdout,
"tty_stdout": fake_sys_stdout,
"tty_stderr": fake_sys_stderr,
},
) as (cached, conf):
Runner().run_container(conf, {conf.name: conf})
assert True
it "can has links":
commands1 = [
"FROM python:3",
"EXPOSE 8000",
"RUN echo hi1 > /one",
"CMD python -m http.server",
]
commands2 = [
"FROM python:3",
"EXPOSE 8000",
"RUN echo there2 > /two",
"CMD python -m http.server",
]
commands3 = [
"FROM python:3",
"CMD sleep 1 && curl http://one:8000/one && curl http://two:8000/two",
]
fake_sys_stdout = self.make_temp_file()
fake_sys_stderr = self.make_temp_file()
harpoon_options = {
"no_intervention": True,
"stdout": fake_sys_stdout,
"tty_stdout": fake_sys_stdout,
"tty_stderr": fake_sys_stderr,
}
harpoon = HarpoonSpec().harpoon_spec.normalise(Meta({}, []), harpoon_options)
with self.a_built_image(
{"name": "one", "context": False, "commands": commands1}, harpoon=harpoon
) as (_, conf1):
assert len(conf1.harpoon.docker_context.networks.list()) == 3
links = [[conf1, "one"]]
with self.a_built_image(
{"name": "two", "links": links, "context": False, "commands": commands2},
harpoon=harpoon,
) as (_, conf2):
links = [[conf1, "one"], [conf2, "two"]]
with self.a_built_image(
{"name": "three", "context": False, "commands": commands3, "links": links},
harpoon=harpoon,
) as (_, conf3):
Runner().run_container(
conf3, {conf1.name: conf1, conf2.name: conf2, conf3.name: conf3}
)
assert len(conf3.harpoon.docker_context.networks.list()) == 3
with open(fake_sys_stdout.name) as fle:
output = fle.read().strip()
if isinstance(output, bytes):
output = output.decode("utf-8")
output = [line.strip() for line in output.split("\n") if "lxc-start" not in line]
assert output[-2:] == ["hi1", "there2"]
it "can intervene a broken build":
called = []
original_commit_and_run = Runner.commit_and_run
def commit_and_run(*args, **kwargs):
kwargs["command"] = "echo 'intervention_goes_here'"
called.append("commit_and_run")
return original_commit_and_run(Runner(), *args, **kwargs)
fake_commit_and_run = mock.Mock(name="commit_and_run", side_effect=commit_and_run)
commands = ["FROM {0}".format(os.environ["BASE_IMAGE"]), "RUN exit 1"]
try:
fake_sys_stdout = self.make_temp_file()
fake_sys_stderr = self.make_temp_file()
with mock.patch("harpoon.ship.builder.Runner.commit_and_run", fake_commit_and_run):
with mock.patch.dict(__builtins__, input=lambda *args: "y\n"):
with self.a_built_image(
{"context": False, "commands": commands},
{
"stdout": fake_sys_stdout,
"tty_stdout": fake_sys_stdout,
"tty_stderr": fake_sys_stderr,
},
) as (cached, conf):
pass
except FailedImage as error:
expected = re.compile(
r"The command [\[']/bin/sh -c exit 1[\]'] returned a non-zero code: 1"
)
assert expected.match(str(error.kwargs["msg"])), "Expected {0} to match {1}".format(
str(error.kwargs["msg"]), expected.pattern
)
assert error.kwargs["image"] == "awesome_image"
assert called == ["commit_and_run"]
with open(fake_sys_stdout.name) as fle:
output = fle.read().strip()
if isinstance(output, bytes):
output = output.decode("utf-8")
output = "\n".join([line for line in output.split("\n") if "lxc-start" not in line])
expected = """
Step 1(/2)? : FROM busybox:buildroot-2014.02
---> [a-zA-Z0-9]{12}
Step 2(/2)? : RUN exit 1
---> Running in .+
!!!!
It would appear building the image failed
Do you want to run /bin/bash where the build to help debug why it failed?
intervention_goes_here
"""
self.assertReMatchLines(
expected,
output,
remove=[
re.compile("^Successfully tagged .+"),
re.compile("^Removing intermediate container .+"),
],
)
it "can intervene a broken container":
called = []
original_commit_and_run = Runner.commit_and_run
def commit_and_run(*args, **kwargs):
kwargs["command"] = "echo 'intervention_goes_here'"
called.append("commit_and_run")
return original_commit_and_run(Runner(), *args, **kwargs)
fake_commit_and_run = mock.Mock(name="commit_and_run", side_effect=commit_and_run)
commands = ["FROM {0}".format(os.environ["BASE_IMAGE"]), "CMD sh -c 'exit 1'"]
try:
fake_sys_stdout = self.make_temp_file()
fake_sys_stderr = self.make_temp_file()
with mock.patch("harpoon.ship.builder.Runner.commit_and_run", fake_commit_and_run):
with mock.patch.dict(__builtins__, input=lambda *args: "y\n"):
with self.a_built_image(
{"context": False, "commands": commands},
{
"stdout": fake_sys_stdout,
"tty_stdout": fake_sys_stdout,
"tty_stderr": fake_sys_stderr,
},
) as (cached, conf):
Runner().run_container(conf, {conf.name: conf})
except BadImage as error:
assert "Failed to run container" in str(error)
assert called == ["commit_and_run"]
with codecs.open(fake_sys_stdout.name) as fle:
output = fle.read().strip()
if isinstance(output, bytes):
output = output.decode("utf-8")
output = "\n".join([line for line in output.split("\n") if "lxc-start" not in line])
expected = """
Step 1(/2)? : FROM busybox:buildroot-2014.02
---> [a-zA-Z0-9]{12}
Step 2(/2)? : CMD ['sh', '-c', 'exit 1']
---> Running in .+
--->
Successfully built .+
!!!!
Failed to run the container!
Do you want commit the container in it's current state and /bin/bash into it to debug?
intervention_goes_here
"""
self.assertReMatchLines(
expected,
output,
remove=[
re.compile("^Successfully tagged .+"),
re.compile("^Removing intermediate container .+"),
],
)
it "can intervene a broken container with the tty starting":
called = []
original_commit_and_run = Runner.commit_and_run
def commit_and_run(*args, **kwargs):
kwargs["command"] = "echo 'intervention_goes_here'"
called.append("commit_and_run")
return original_commit_and_run(Runner(), *args, **kwargs)
fake_commit_and_run = mock.Mock(name="commit_and_run", side_effect=commit_and_run)
commands = [
"FROM {0}".format(os.environ["BASE_IMAGE"]),
"""CMD echo 'hi'; sleep 1; exit 1""",
]
try:
fake_sys_stdout = self.make_temp_file()
fake_sys_stderr = self.make_temp_file()
with mock.patch("harpoon.ship.builder.Runner.commit_and_run", fake_commit_and_run):
with mock.patch.dict(__builtins__, input=lambda *args: "y\n"):
with self.a_built_image(
{"context": False, "commands": commands},
{
"stdout": fake_sys_stdout,
"tty_stdout": fake_sys_stdout,
"tty_stderr": fake_sys_stderr,
},
) as (cached, conf):
Runner().run_container(conf, {conf.name: conf})
except BadImage as error:
print(error)
assert "Failed to run container" in str(error)
assert called == ["commit_and_run"]
with codecs.open(fake_sys_stdout.name) as fle:
output = fle.read().strip()
if isinstance(output, bytes):
output = output.decode("utf-8")
output = "\n".join([line for line in output.split("\n") if "lxc-start" not in line])
expected = """
Step 1(/2)? : FROM busybox:buildroot-2014.02
---> [a-zA-Z0-9]{12}
Step 2(/2)? : CMD echo 'hi'; sleep 1; exit 1
---> Running in .+
---> .+
Successfully built .+
hi
!!!!
Failed to run the container!
Do you want commit the container in it's current state and /bin/bash into it to debug?
intervention_goes_here
"""
self.assertReMatchLines(
expected,
output,
remove=[
re.compile("^Successfully tagged .+"),
re.compile("^Removing intermediate container .+"),
],
)
| mit | -8,677,944,220,587,888,000 | 36.01847 | 96 | 0.521098 | false |
beakman/caquitv | external_apps/djangoratings/models.py | 1 | 2158 | from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib.auth.models import User
import datetime
from managers import VoteManager
class Vote(models.Model):
content_type = models.ForeignKey(ContentType, related_name="votes")
object_id = models.PositiveIntegerField()
key = models.CharField(max_length=32)
score = models.IntegerField()
user = models.ForeignKey(User, blank=True, null=True, related_name="votes")
ip_address = models.IPAddressField()
date_added = models.DateTimeField(default=datetime.datetime.now, editable=False)
date_changed = models.DateTimeField(default=datetime.datetime.now, editable=False)
objects = VoteManager()
content_object = generic.GenericForeignKey()
class Meta:
unique_together = (('content_type', 'object_id', 'key', 'user', 'ip_address'))
def __unicode__(self):
return "%s voted %s on %s" % (self.user_display, self.score, self.content_object)
def save(self, *args, **kwargs):
self.date_changed = datetime.datetime.now()
super(Vote, self).save(*args, **kwargs)
def user_display(self):
if self.user:
return "%s (%s)" % (self.user.username, self.ip_address)
return self.ip_address
user_display = property(user_display)
def partial_ip_address(self):
ip = self.ip_address.split('.')
ip[-1] = 'xxx'
return '.'.join(ip)
partial_ip_address = property(partial_ip_address)
class Score(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
key = models.CharField(max_length=32)
score = models.IntegerField()
votes = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
class Meta:
unique_together = (('content_type', 'object_id', 'key'),)
def __unicode__(self):
return "%s scored %s with %s votes" % (self.content_object, self.score, self.votes)
| agpl-3.0 | -277,736,686,817,290,900 | 35.576271 | 91 | 0.645042 | false |
nuxeo/FunkLoad | setup.py | 1 | 3757 | #! /usr/bin/env python
# (C) Copyright 2005-2011 Nuxeo SAS <http://nuxeo.com>
# Author: [email protected]
# Contributors: Tom Lazar, Ross Patterson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
#
"""FunkLoad package setup
"""
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
__version__ = '1.17.2'
setup(
name="funkload",
version=__version__,
description="Functional and load web tester.",
long_description=''.join(open('README.txt').readlines()),
author="Benoit Delbosc",
author_email="[email protected]",
url="http://funkload.nuxeo.org/",
download_url="http://pypi.python.org/packages/source/f/funkload/funkload-%s.tar.gz" % __version__,
license='GPL',
keywords='testing benching load performance functional monitoring',
packages=find_packages('src'),
package_dir={'': 'src'},
scripts=['scripts/fl-monitor-ctl', 'scripts/fl-credential-ctl',
'scripts/fl-run-bench', 'scripts/fl-run-test',
'scripts/fl-build-report',
'scripts/fl-install-demo',
'scripts/fl-record'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Topic :: System :: Benchmark',
'Topic :: System :: Monitoring',
],
# setuptools specific keywords
install_requires = ['webunit >= 1.3.8',
'docutils >= 0.3.7',
'setuptools'],
zip_safe=True,
package_data={'funkload': ['data/*',
'demo/simple/*', 'demo/zope/*',
'demo/cmf/*', 'demo/xmlrpc/*', 'demo/cps/*',
'demo/seam-booking-1.1.5/*', 'demo/*.txt',
'tests/*', ]},
entry_points = {
'console_scripts': [
'fl-monitor-ctl = funkload.Monitor:main',
'fl-credential-ctl = funkload.CredentialFile:main',
'fl-run-bench = funkload.BenchRunner:main',
'fl-run-test = funkload.TestRunner:main',
'fl-build-report = funkload.ReportBuilder:main',
'fl-install-demo = funkload.DemoInstaller:main',
'fl-record = funkload.Recorder:main'],
'funkload.plugins.monitor': [
'CUs = funkload.MonitorPluginsDefault:MonitorCUs',
'MemFree = funkload.MonitorPluginsDefault:MonitorMemFree',
'CPU = funkload.MonitorPluginsDefault:MonitorCPU',
'Network = funkload.MonitorPluginsDefault:MonitorNetwork',
]
},
# this test suite works only on an installed version :(
# test_suite = "funkload.tests.test_Install.test_suite",
)
| gpl-2.0 | 1,994,461,639,555,682,000 | 40.285714 | 102 | 0.615651 | false |
project-icp/bee-pollinator-app | src/icp/pollinator/src/pollinator/crop_yield.py | 1 | 9716 | from __future__ import division
from __future__ import print_function
from scipy.ndimage.filters import generic_filter
from collections import defaultdict
from raster_ops import extract, reclassify_from_data, geometry_mask, \
write_tif
import numpy as np
import csv
import math
import os
# Enableing DEBUG will write all intermediate rasters to disk
DEBUG = False
CUR_PATH = os.path.dirname(__file__)
DEFAULT_DATA_PATH = os.path.join(CUR_PATH, 'data/cdl_data_grouped.csv')
RASTER_PATH = '/opt/icp-crop-data/cdl_reclass_lzw_5070.tif'
SETTINGS = {}
ABUNDANCE_IDX = 0.1 # A constant for managing wild bee yield
CELL_SIZE = 30
FORAGE_DIST = 670
AG_CLASSES = [35, 29, 51, 27, 52, 17, 50, 49, 18, 20, 28, 48]
COVER_CROPS = {35: 53, 29: 54, 51: 55, 27: 56, 52: 57, 17: 58, 50: 59}
def initialize():
"""
Determine model settings which do not change between requests
"""
# Nesting and Floral suitability values per CDL crop type
nesting_reclass, floral_reclass, yield_config = load_crop_data()
max_dist = FORAGE_DIST * 2
# Boundary of matrix for focal window, essentially the number of
# cells a bee can travel
radius = int(round(max_dist/CELL_SIZE)) * 2 + 1
window = np.ones(shape=(radius, radius))
dist_matrix = np.empty(shape=(radius, radius), dtype=np.float32)
focal_center = int(round(radius/2))
# Set cell values to their distance to center of focal window
for (i, j), _ in np.ndenumerate(dist_matrix):
x, y = i+1, j+1
dist_matrix[i, j] = math.sqrt(
((x-0.5) * CELL_SIZE - (focal_center-0.5) * CELL_SIZE)**2 +
((y-0.5) * CELL_SIZE - (focal_center-0.5) * CELL_SIZE)**2)
distances = dist_matrix.ravel()
effective_dist = np.exp(-distances / FORAGE_DIST)
# Where the effective distance > max forage distance, set 0
effective_dist[np.where(distances > max_dist)] = 0
sum_dist = np.sum(effective_dist)
# These settings are valid against all requests and only need to
# be computed once.
SETTINGS['effective_dist'] = effective_dist
SETTINGS['sum_dist'] = sum_dist
SETTINGS['window'] = window
SETTINGS['floral_reclass'] = floral_reclass
SETTINGS['nesting_reclass'] = nesting_reclass
SETTINGS['yield'] = yield_config
def load_crop_data(data_src=DEFAULT_DATA_PATH):
"""
Load the reclassification values for both floral and nesting attributes
from the CDL CSV.
"""
with open(data_src, mode='r') as cdl_data:
reader = csv.reader(cdl_data)
nesting_reclass = []
floral_reclass = []
yield_config = defaultdict(dict)
hf_idx = 3
hn_idx = 4
density_idx = 5
demand_idx = 2
id_idx = 0
next(reader, None) # Skip headers
for row in reader:
id = int(row[id_idx])
nesting_reclass.append([id, float(row[hn_idx])])
floral_reclass.append([id, float(row[hf_idx])])
yield_config[id]['demand'] = float(row[demand_idx])
yield_config[id]['density'] = float(row[density_idx])
return nesting_reclass, floral_reclass, yield_config
def focal_op(x):
"""
Determine focal center value for the window function.
"""
return np.sum(x * SETTINGS['effective_dist']/SETTINGS['sum_dist'])
def calc_abundance(cdl, affine, window, meta):
"""
Calculate farm abundance based on nesting and floral coefficients for
various crop types.
"""
# Create floral and nesting rasters derived from the CDL
fl_out = np.zeros(shape=cdl.shape, dtype=np.float32)
n_out = np.zeros(shape=cdl.shape, dtype=np.float32)
floral = reclassify_from_data(cdl, SETTINGS['floral_reclass'], fl_out)
nesting = reclassify_from_data(cdl, SETTINGS['nesting_reclass'], n_out)
# Create an abundance index based on forage and nesting indexes
# over the area a bee may travel
forage = generic_filter(floral, footprint=SETTINGS['window'],
function=focal_op)
source = forage * nesting
area_abundance = generic_filter(source, footprint=SETTINGS['window'],
function=focal_op)
if DEBUG:
write_tif('cdl', cdl, affine, window, meta)
write_tif('floral', floral, affine, window, meta)
write_tif('nesting', nesting, affine, window, meta)
write_tif('forage', forage, affine, window, meta)
write_tif('source', source, affine, window, meta)
write_tif('abundance', area_abundance, affine, window, meta)
return area_abundance
def yield_calc(crop_id, abundance, managed_hives, config):
"""
Determines the yield change due to landscape factors related to forage
and nesting suitability for wild bees and managed honey bee hives.
Calculate the yield for a single cell position based on values from
the abundance calcualation and the crop data layer.
Args:
crop_id (int): The cell value from the CLD raster
abundance(float): The cell value of abundance at the same position
as crop_id
managed_hives (float): Number of managed hives per acre implemented
config (dict): Crop specific configuration detailing `demand` the crop
places on bee pollination and the recommended `density` of hives
for that crop type
Returns
yield (float): The predicted yield for this cell position
"""
if crop_id not in config:
return 0
demand = config[crop_id]['demand']
rec_hives = config[crop_id]['density']
# Avoid division by 0 for crops which don't have a recommended density
hives_ratio = 0 if rec_hives == 0 else managed_hives/rec_hives
# Calculate the yield for managed honeybee, keeping a ceiling such
# that if more hives are used than recommended, yield remains at 1
yield_hb = (1 - demand) + demand * min(1, hives_ratio)
# Determine the remainig yield to be had from wild bee abundance
yield_wild = (1 - yield_hb) * (abundance / (ABUNDANCE_IDX + abundance))
# Determind total yield from all sources of bee pollination
return yield_hb + yield_wild
def aggregate_crops(yield_field, cdl_field, crops=AG_CLASSES,
paired_crops=COVER_CROPS):
"""
Within the unmasked field portion of the provided yield_field, avg the
yield quantities per ag type, resulting in a total yield increase per
relavent crop type on the field and report the yield in terms of average
crop yield on a scale of 0-100
Args:
yield_field (masked ndarray): The bee shed area of computed yield with
a mask of the field applied.
cdl (masked ndarray): The raw crop data layer corresponding to the same
area covered in `yield_field` with a mask of the field applied
crops (list<int>): Optional. The CDL class types to aggregate on,
defaults to system specified list
paired_crops (dict<int,int>): Optional. The CDL class types that have a
crop they should aggregate with. Keys are the
class types in `crops`; values are class types the keys pair with,
defaults to system specified list
Returns:
dict<cld_id, yield_avg>: A mapping of bee pollinated agricultural
CDL crop types with the avg of their yield across the field
portion of the yield data, reported on 0-100 scale
"""
crop_yields = {}
field_mask = yield_field.mask.copy()
# Average the yield for each each crop type cell, by crop
for crop in crops:
# Create a mask for values that are not this crop type, (or, if it
# has a paired crop, its pair), and include
# the mask which is already applied to non-field areas of AoI
crop_mask = cdl_field != crop
if crop in paired_crops:
crop_mask = crop_mask & (cdl_field != paired_crops[crop])
cdl_mask = np.ma.masked_where(crop_mask, cdl_field).mask
crop_mask = np.ma.mask_or(field_mask, cdl_mask)
# Average the yield from this one crop only over the field
yield_field.mask = crop_mask
crop_yield = np.ma.mean(yield_field).item() * 100 or 0
crop_yields[str(crop)] = crop_yield
# Restore the original mask of just the field
yield_field.mask = field_mask
return crop_yields
def calculate(bee_shed_geom, field_geom, modifications, managed_hives,
raster_path=RASTER_PATH):
"""
Calculate the change in specific crop yield due to bee abundance
"""
# Read in the crop raster clipped to the bee shed geometry
cdl, affine, win, meta = extract(bee_shed_geom, raster_path, modifications)
# Determine pollinator abundance across the entire area
area_abundance = calc_abundance(cdl, affine, win, meta)
# Vectorize the yield function to allow paired element position input
# from the CDL, area abundance raster, plus user input and system config
total_yield = np.vectorize(yield_calc, otypes=[np.float16],
excluded=['managed_hives', 'config'])
# Determine yield change due to abundance and managed hives
yield_area = total_yield(cdl, area_abundance,
managed_hives=managed_hives,
config=SETTINGS['yield'])
# Mask the bee shed into just the delineated field
yield_field = geometry_mask(field_geom, yield_area, affine)
cdl_field = geometry_mask(field_geom, cdl, affine)
# Aggregate yield by agricultural cdl type on the field mask
return aggregate_crops(yield_field, cdl_field)
# Determine settings when module is loaded
if __name__ != '__main__':
initialize()
| apache-2.0 | -6,386,693,959,368,743,000 | 36.513514 | 79 | 0.654385 | false |
alexbiehl/SublimeLinter-stack-ghc | linter.py | 1 | 1410 | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Jon Surrell
# Copyright (c) 2013 Jon Surrell
#
# License: MIT
#
"""This module exports the Stack Ghc plugin class."""
from SublimeLinter.lint import Linter, util
from os.path import basename
class StackGhc(Linter):
"""Provides an interface to stack ghc."""
syntax = ('haskell', 'haskell-sublimehaskell', 'literate haskell')
cmd = ('stack', 'ghc', '--', '-fno-code', '-Wall', '-Wwarn', '-fno-helpful-errors')
regex = (
r'^(?P<filename>.+):'
r'(?P<line>\d+):(?P<col>\d+):'
r'\s+(?P<warning>Warning:\s+)?(?P<message>.+)$'
)
multiline = True
# No stdin
tempfile_suffix = {
'haskell': 'hs',
'haskell-sublimehaskell': 'hs',
'literate haskell': 'lhs'
}
# ghc writes errors to STDERR
error_stream = util.STREAM_STDERR
def split_match(self, match):
"""Override to ignore errors reported in imported files."""
match, line, col, error, warning, message, near = (
super().split_match(match)
)
match_filename = basename(match.groupdict()['filename'])
linted_filename = basename(self.filename)
if match_filename != linted_filename:
return None, None, None, None, None, '', None
return match, line, col, error, warning, message, near
| mit | -9,104,684,087,554,022,000 | 26.115385 | 87 | 0.600709 | false |
walshjon/openmc | openmc/capi/error.py | 1 | 1950 | from ctypes import c_int, c_char
from warnings import warn
from . import _dll
class OpenMCError(Exception):
"""Root exception class for OpenMC."""
class GeometryError(OpenMCError):
"""Geometry-related error"""
class InvalidIDError(OpenMCError):
"""Use of an ID that is invalid."""
class AllocationError(OpenMCError):
"""Error related to memory allocation."""
class OutOfBoundsError(OpenMCError):
"""Index in array out of bounds."""
class DataError(OpenMCError):
"""Error relating to nuclear data."""
class PhysicsError(OpenMCError):
"""Error relating to performing physics."""
class InvalidArgumentError(OpenMCError):
"""Argument passed was invalid."""
class InvalidTypeError(OpenMCError):
"""Tried to perform an operation on the wrong type."""
def _error_handler(err, func, args):
"""Raise exception according to error code."""
# Get error code corresponding to global constant.
def errcode(s):
return c_int.in_dll(_dll, s).value
# Get error message set by OpenMC library
errmsg = (c_char*256).in_dll(_dll, 'openmc_err_msg')
msg = errmsg.value.decode()
# Raise exception type corresponding to error code
if err == errcode('e_allocate'):
raise AllocationError(msg)
elif err == errcode('e_out_of_bounds'):
raise OutOfBoundsError(msg)
elif err == errcode('e_invalid_argument'):
raise InvalidArgumentError(msg)
elif err == errcode('e_invalid_type'):
raise InvalidTypeError(msg)
if err == errcode('e_invalid_id'):
raise InvalidIDError(msg)
elif err == errcode('e_geometry'):
raise GeometryError(msg)
elif err == errcode('e_data'):
raise DataError(msg)
elif err == errcode('e_physics'):
raise PhysicsError(msg)
elif err == errcode('e_warning'):
warn(msg)
elif err < 0:
raise OpenMCError("Unknown error encountered (code {}).".format(err))
| mit | 8,463,138,787,818,959,000 | 25.351351 | 77 | 0.663077 | false |
vivaxy/algorithms | python/problems/validate_stack_sequences.py | 1 | 1434 | """
https://leetcode.com/problems/validate-stack-sequences/
https://leetcode.com/submissions/detail/218117451/
"""
from typing import List
class Solution:
def validateStackSequences(self, pushed: List[int], popped: List[int]) -> bool:
if pushed == popped:
return True
a = []
while len(pushed):
if len(a) == 0:
a.append(pushed.pop(0))
if popped[0] != a[-1]:
a.append(pushed.pop(0))
else:
popped.pop(0)
a.pop()
if len(a) != len(popped):
return False
while len(a):
if a.pop() != popped.pop(0):
return False
return True
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.validateStackSequences(
[1, 2, 3, 4, 5],
[4, 5, 3, 2, 1]), True)
self.assertEqual(solution.validateStackSequences(
[1, 2, 3, 4, 5],
[4, 3, 5, 1, 2]), False)
self.assertEqual(solution.validateStackSequences(
[],
[]), True)
self.assertEqual(solution.validateStackSequences(
[1, 0],
[1, 0]), True)
self.assertEqual(solution.validateStackSequences(
[0, 2, 1],
[0, 1, 2]), True)
if __name__ == '__main__':
unittest.main()
| mit | 2,056,916,776,801,924,600 | 25.072727 | 83 | 0.505579 | false |
ojousima/asylum | project/ndaparser/admin.py | 1 | 2753 | from django.contrib import admin
from django.core.exceptions import PermissionDenied, ImproperlyConfigured
from django.conf.urls import url
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import get_object_or_404, render
from django.utils.text import capfirst
from django.conf import settings
from .views import NordeaUploadView
from creditor.admin import TransactionAdmin
from creditor.handlers import AbstractTransaction
from asylum.utils import get_handler_instance
class NordeaUploadMixin(object):
nda_change_list_template = "ndaparser/admin/change_list.html"
view_class = NordeaUploadView
def get_urls(self):
"""Returns the additional urls used by the uploader."""
urls = super().get_urls()
admin_site = self.admin_site
opts = self.model._meta
info = opts.app_label, opts.model_name,
my_urls = [
url("^nordea/upload/$", admin_site.admin_view(self.upload_view), name='%s_%s_ndaupload' % info),
]
return my_urls + urls
def upload_view(self, request, extra_context=None):
"""Displays a form that can upload transactions form a Nordea "NDA" transaction file."""
# The revisionform view will check for change permission (via changeform_view),
# but we also need to check for add permissions here.
if not self.has_add_permission(request): # pragma: no cover
raise PermissionDenied
model = self.model
opts = model._meta
try:
each_context = self.admin_site.each_context(request)
except TypeError: # Django <= 1.7 pragma: no cover
each_context = self.admin_site.each_context()
# Get the rest of the context.
context = dict(
each_context,
opts = opts,
app_label = opts.app_label,
module_name = capfirst(opts.verbose_name),
title = _("Upload Nordea transactions"),
transactions_handler = get_handler_instance('TRANSACTION_CALLBACKS_HANDLER')
)
context.update(extra_context or {})
view = self.view_class.as_view()
return view(request, context=context)
def changelist_view(self, request, extra_context=None):
context = dict(
orig_template = str(getattr(super(), 'change_list_template')),
)
context.update(extra_context or {})
self.change_list_template = self.nda_change_list_template
return super().changelist_view(request, context)
if settings.NORDEA_UPLOAD_ENABLED:
# Dynamically inject the mixin to transactions admin
TransactionAdmin.__bases__ = (NordeaUploadMixin, ) + TransactionAdmin.__bases__
| mit | 8,746,984,828,914,234,000 | 40.712121 | 108 | 0.668362 | false |
ArkaneMoose/BotBot | botbot/main.py | 1 | 3722 | import sys
import re
import json
import argparse
import euphoria as eu
from .botbot import BotBot
from . import euphutils
from . import snapshot
room_name = 'testing'
password = None
nickname = 'BotBot'
help_text = '''\
@BotBot is a bot for Euphoria created by @myhandsaretypingwords that creates
other bots.
Usage
================================================================================
Create a bot with @BotName with some code.
!createbot @BotName CODE
Same as the previous but specify the room to put the bot in.
!createbot &room @BotName CODE
List all the bots that are currently running and have been created by @BotBot.
!list @BotBot
Send a bot with the name @BotName to the specified room.
!sendbot &room @BotName
Kill a bot with the name @BotName.
!kill @BotName
Pause a bot with the name @BotName.
!pause @BotName
Kill all the bots created by @BotBot.
!killall @BotName
Take a snapshot of the state of @BotBot.
!save @BotBot
Load the latest snapshot.
!load @BotBot latest
Load a snapshot with a specific file name.
!load @BotBot FILENAME
Restart @BotBot.
!restart @BotBot
More Info
================================================================================
View the @BotBot wiki at https://github.com/ArkaneMoose/BotBot/wiki for a
comprehensive guide on how to use @BotBot, including a guide on how to write
@BotBot code and a list of features and restrictions that bots created with
@BotBot have.
Good luck!
================================================================================
Good luck on your journey to becoming a bot programmer.
If you need help, you can ask @myhandsaretypingwords, @nihizg, or any of the
other awesome Euphorians in &programming for help with any bot-related questions.
Have fun, and please be respectful!
@BotBot is open-source! Feel free to view the code, contribute, and report
issues at https://github.com/ArkaneMoose/BotBot.
@BotBot complies with the Euphorian bot standards.\
'''
short_help_text = '''\
@BotBot is a bot for Euphoria created by @myhandsaretypingwords that creates
other bots. Type "!help @BotBot" to learn more.\
'''
def main():
botbot = BotBot(room_name, password, nickname, help_text, short_help_text)
eu.executable.start(botbot)
def get_args():
parser = argparse.ArgumentParser(prog='botbot', description='A meta-bot for Euphoria.', epilog='For details, read the README.md file at https://github.com/ArkaneMoose/BotBot/blob/master/README.md')
parser.add_argument('config-file', nargs='?', help='optional path to a JSON configuration file')
parser.add_argument('-r', '--room', help='room in Euphoria where @BotBot should reside')
parser.add_argument('-p', '--password', help='password for room if necessary')
parser.add_argument('-n', '--nickname', help='custom nickname for @BotBot')
parser.add_argument('-s', '--snapshot-dir', help='directory where snapshots will be read and written')
return parser.parse_args()
if __name__ == '__main__':
args = vars(get_args())
if args.get('config-file'):
with open(args.get('config-file')) as f:
config = json.load(f)
else:
config = {}
room_name = args['room'] or config.get('room', room_name)
password = args['password'] or config.get('password', password)
nickname = args['nickname'] or config.get('nickname', nickname)
help_text = config.get('helpText', help_text.replace('@BotBot', euphutils.mention(nickname)))
short_help_text = config.get('shortHelpText', short_help_text.replace('@BotBot', euphutils.mention(nickname)))
snapshot.snapshot_dir = args['snapshot_dir'] or config.get('snapshotDirectory', snapshot.snapshot_dir)
main()
| mit | -5,159,026,592,452,285,000 | 36.979592 | 201 | 0.671145 | false |
sirca/bdkd_datastore | datastore/tests/unit/bdkd/datastore/util/test_copy_move.py | 1 | 3291 | # Copyright 2015 Nicta
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
import unittest
import argparse
import os
# Load a custom configuration for unit testing
os.environ['BDKD_DATASTORE_CONFIG'] = os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'conf', 'test.conf')
from bdkd.datastore.util import ds_util
FIXTURES = os.path.join(os.path.dirname(__file__),
'..', '..', '..', '..', 'fixtures')
class CopyMoveUtilitiesTest(unittest.TestCase):
def setUp(self):
self.filepath = os.path.join(FIXTURES, 'FeatureCollections', 'Coastlines',
'Seton_etal_ESR2012_Coastlines_2012.1.gpmlz')
self.parser = argparse.ArgumentParser()
subparser = self.parser.add_subparsers(dest='subcmd')
ds_util._create_subparsers(subparser)
def test_copy_same_repository_arguments(self):
args_in = [ 'copy', 'test-repository', 'from_resource', 'to_resource' ]
args = self.parser.parse_args(args_in)
self.assertTrue(args)
self.assertEquals(args.from_repository.name, 'test-repository')
self.assertEquals(args.from_resource_name, 'from_resource')
self.assertEquals(args.to_repository, None)
self.assertEquals(args.to_resource_name, 'to_resource')
def test_copy_across_repositories_arguments(self):
args_in = [ 'copy', 'test-repository', 'from_resource', 'test-repository',
'to_resource' ]
args = self.parser.parse_args(args_in)
self.assertTrue(args)
self.assertEquals(args.from_repository.name, 'test-repository')
self.assertEquals(args.from_resource_name, 'from_resource')
self.assertEquals(args.to_repository.name, 'test-repository')
self.assertEquals(args.to_resource_name, 'to_resource')
def test_move_same_repository_arguments(self):
args_in = [ 'move', 'test-repository', 'from_resource', 'to_resource' ]
args = self.parser.parse_args(args_in)
self.assertTrue(args)
self.assertEquals(args.from_repository.name, 'test-repository')
self.assertEquals(args.from_resource_name, 'from_resource')
self.assertEquals(args.to_repository, None)
self.assertEquals(args.to_resource_name, 'to_resource')
def test_move_across_repositories_arguments(self):
args_in = [ 'move', 'test-repository', 'from_resource', 'test-repository',
'to_resource' ]
args = self.parser.parse_args(args_in)
self.assertTrue(args)
self.assertEquals(args.from_repository.name, 'test-repository')
self.assertEquals(args.from_resource_name, 'from_resource')
self.assertEquals(args.to_repository.name, 'test-repository')
self.assertEquals(args.to_resource_name, 'to_resource')
| apache-2.0 | -4,339,567,989,079,044,000 | 40.658228 | 83 | 0.675175 | false |
HIIT/mediacollection | sites/helsinginuutiset.py | 1 | 1658 | # -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import processor
from datetime import datetime
def parse( url ):
r = requests.get( url )
if r.status_code == 404:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
r.encoding = 'UTF-8'
soup = BeautifulSoup( r.text, "html.parser" )
article = soup.find( 'article' )
if article == None:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
processor.decompose_all( article.find_all( 'script' ) )
departments = article.find( class_ = 'field-name-field-department-tref' )
categories = processor.collect_categories( departments.find_all( 'a' ) )
datetime_list = processor.collect_datetime( article.find( class_ = 'field-name-post-date' ) )
author = article.find( class_ = 'author' )
if author != None:
processor.decompose( author.find( class_ = 'img' ) )
author = processor.collect_text( author.find( 'h3' ) )
else:
author = u''
title = processor.collect_text( article.find( 'h1' ) )
text = processor.collect_text( article.find( class_ = 'field field-name-body' ) )
images = processor.collect_images_by_parent( article.find_all( class_ = 'img' ), '')
captions = processor.collect_image_captions( article.find_all( class_ = 'caption' ) )
return processor.create_dictionary('Helsingin uutiset', url, r.status_code, categories, datetime_list, author, title, u'', text, images, captions)
if __name__ == '__main__':
parse("http://www.helsinginuutiset.fi/artikkeli/433833-arvio-15-000-ihmista-saa-tana-vuonna-tyopaikan-kunnasta-tarvetta-etenkin")
| mit | 8,901,220,503,216,206,000 | 36.681818 | 147 | 0.670084 | false |
flrvm/cobratoolbox | .github/github_stats.py | 1 | 1459 | from github import Github
g = Github("cobrabot", "dd31ac21736aeeaeac764ce1192c17e370679a25")
cobratoolbox = g.get_user("opencobra").get_repo("cobratoolbox")
contributors = {}
for contributor in cobratoolbox.get_stats_contributors():
a = 0
d = 0
c = 0
for week in contributor.weeks:
a += week.a
d += week.d
c += week.c
contributors[contributor.author.login] = {
'additions': a, 'deletions': d, 'commits': c, 'avatar': contributor.author.avatar_url}
print "name: %20s, additions: %10d, deletions: %10d, commits: %10d" % (contributor.author.login, a, d, c)
sorted_by_commits = sorted(contributors.items(), key=lambda x: x[1]['commits'])
table = '\n.. raw:: html\n\n <table style="margin:0px auto" width="100%">'
for k in range(0, 5):
table += """\n
<tr>
<td width="46px"><img src="%s" width=46 height=46 alt=""></td><td><a href="https://github.com/%s">%s</a></td>
<td width="46px"><img src="%s" width=46 height=46 alt=""></td><td><a href="https://github.com/%s">%s</a></td>
</tr>""" % (sorted_by_commits[-(2 * k + 1)][1]['avatar'], sorted_by_commits[-(2 * k + 1)][0], sorted_by_commits[-(2 * k + 1)][0],
sorted_by_commits[-(2 * (k + 1))][1]['avatar'], sorted_by_commits[-(2 * (k + 1))][0], sorted_by_commits[-(2 * (k + 1))][0])
table += "\n </table>"
with open("docs/source/contributors.rst", "w") as readme:
readme.write(table)
| gpl-3.0 | 1,395,502,001,221,579,800 | 40.685714 | 137 | 0.58122 | false |
marios-zindilis/musicbrainz-django-models | _build/model.py | 1 | 3430 | #!/usr/bin/env python3
import sys
try:
MODEL_NAME = sys.argv[1]
except IndexError:
print('Model Name Not Provided')
exit(1)
MODEL_NAME_TITLE = MODEL_NAME.title().replace('_', ' ')
MODEL = 'musicbrainz_django_models/models/{}.py'.format(MODEL_NAME)
INIT = 'musicbrainz_django_models/models/__init__.py'
SQL = '_etc/CreateTables.sql'
SQL_EXISTS = False
SQL_TABLE = []
SQL_TABLE_INCLUDES_ID = False
SQL_TABLE_INCLUDES_GID = False
IMPORTS = [
'from django.db import models',
'from django.utils.encoding import python_2_unicode_compatible',
]
FIELDS = []
GID_DOC = ''
MODELS = []
MODEL_TEMPLATE = '''"""
.. module:: {MODEL_NAME}
The **{MODEL_NAME_TITLE}** Model.
PostgreSQL Definition
---------------------
The :code:`{MODEL_NAME}` table is defined in the MusicBrainz Server as:
.. code-block:: sql
{SQL_TABLE}
"""
{IMPORTS}
@python_2_unicode_compatible
class {MODEL_NAME}(models.Model):
"""
Not all parameters are listed here, only those that present some interest
in their Django implementation.
{GID_DOC}
"""
{FIELDS}
def __str__(self):
return self.name
class Meta:
db_table = '{MODEL_NAME}'
'''
with open(SQL, 'r') as sql:
for line in sql:
if (
line.startswith('CREATE TABLE {} '.format(MODEL_NAME)) or
line == 'CREATE TABLE {}\n'.format(MODEL_NAME)
):
SQL_EXISTS = True
break
if not SQL_EXISTS:
print('CREATE TABLE {} Not Found'.format(MODEL_NAME))
exit(1)
with open(SQL, 'r') as sql:
SQL_TABLE_CAPTURE = False
for line in sql:
if (
line.startswith('CREATE TABLE {} '.format(MODEL_NAME)) or
line == 'CREATE TABLE {}\n'.format(MODEL_NAME)
):
SQL_TABLE_CAPTURE = True
if SQL_TABLE_CAPTURE and line.startswith(');'):
SQL_TABLE.append(line)
SQL_TABLE_CAPTURE = False
break
if SQL_TABLE_CAPTURE:
if not SQL_TABLE_INCLUDES_ID:
SQL_TABLE_INCLUDES_ID = ' serial,' in line.lower()
if not SQL_TABLE_INCLUDES_GID:
SQL_TABLE_INCLUDES_GID = ' uuid ' in line.lower()
SQL_TABLE.append(line)
if SQL_TABLE_INCLUDES_ID:
FIELDS.append(' id = models.AutoField(primary_key=True)')
if SQL_TABLE_INCLUDES_GID:
IMPORTS.append('import uuid')
FIELDS.append(' gid = models.UUIDField(default=uuid.uuid4)')
GID_DOC = """:param gid: this is interesting because it cannot be NULL but a default is
not defined in SQL. The default `uuid.uuid4` in Django will generate a
UUID during the creation of an instance."""
with open(MODEL, 'w') as model:
model.write(MODEL_TEMPLATE.format(
MODEL_NAME=MODEL_NAME,
MODEL_NAME_TITLE=MODEL_NAME_TITLE,
SQL_TABLE=' '.join(SQL_TABLE),
IMPORTS='\n'.join(IMPORTS),
FIELDS='\n'.join(FIELDS),
GID_DOC=GID_DOC
))
with open(INIT, 'r') as init:
MODELS = [line.split()[-1] for line in init if line.startswith('from ')]
MODELS.append(MODEL_NAME)
with open(INIT, 'w') as init:
for mod in MODELS:
init.write('from .{mod} import {mod}\n'.format(mod=mod))
init.write('\n')
init.write('# __all__ silences PEP8 `module imported but unused`:\n')
init.write('__all__ = [\n')
for mod in MODELS:
init.write(' {mod},\n'.format(mod=mod))
init.write(']\n')
| gpl-2.0 | -6,831,984,093,304,596,000 | 26.66129 | 91 | 0.6 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.