repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19po/rtl102.5-playlist | rtl1025-playlist.py | 1 | 2354 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# ------------------ #
# RTL 102.5 info #
# ------------------ #
import urllib
from xml.dom.minidom import parse
import re
import json
def uni(s):
"""
Decode text.
"""
ascii_char = re.findall(r'\[e\]\[c\](\d+)\[p\]', s)
other_char = re.findall(r'\[[a-z]\]+', s)
# find and replace number to ascii character
for char in ascii_char:
if char in s:
s = s.replace(char , unichr(int(char)))
# find and remove [*]
for char in other_char:
if char in s:
s = s.replace(char , '')
return s
def get_info():
"""
Get information.
"""
# check if VLC is turned on
try:
urllib.urlretrieve('http://127.0.0.1:8080/requests/status.xml', '/tmp/info.xml')
except IOError:
print 'VLC is closed.'
return
# replace html characters with xml
with open('/tmp/info.xml', 'r') as fr, open('/tmp/info2.xml', 'w') as fw:
z = ['<', '>']
x = ['<', '>']
for line in fr.readlines():
for i in range(len(z)):
if z[i] in line:
line = line.replace(z[i], x[i])
fw.write(line)
# open xml file, get information and make json file
with open('/tmp/info2.xml', 'r') as fr, open('rtl1025-playlist.json', 'w') as fw:
dom = parse(fr)
cnodes = dom.childNodes
info_dict = {"program_title":"", "speakers":"", "program_image":"",
"artist_name":"", "song_title":"", "song_cover":""}
try:
info_dict["program_title"] = uni(cnodes[0].\
getElementsByTagName('prg_title')[0].firstChild.data)
info_dict["speakers"] = uni(cnodes[0].\
getElementsByTagName('speakers')[0].firstChild.data)
info_dict["program_image"] = cnodes[0].\
getElementsByTagName('image400')[0].firstChild.data
info_dict["artist_name"] = uni(cnodes[0].\
getElementsByTagName('mus_art_name')[0].firstChild.data)
info_dict["song_title"] = uni(cnodes[0].\
getElementsByTagName('mus_sng_title')[0].firstChild.data)
info_dict["song_cover"] = cnodes[0].\
getElementsByTagName('mus_sng_itunescoverbig')[0].firstChild.data
except (IndexError, AttributeError):
pass
# my_dict as json file
fw.write(json.dumps(info_dict))
# display data
with open('rtl1025-playlist.json', 'r') as fw:
j = json.load(fw)
for k, v in j.iteritems():
print "{:15}{:2}{:1}".format(k, ":", v.encode('utf-8'))
if __name__ == '__main__':
get_info()
| mit | 7,107,818,560,704,207,000 | 24.586957 | 82 | 0.601529 | false | 2.805721 | false | false | false |
crossbario/crossbar-examples | django/realtimemonitor/django_project/settings.py | 1 | 2805 | """
Django settings for django_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '75dlm!mxn6a$_wa-1kti3_u(_97-hya!ov@8=rcdk364#cy^9g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'),],
'APP_DIRS': True,
# 'TEMPLATE_DEBUG': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_project.urls'
WSGI_APPLICATION = 'django_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, "static"),
# os.path.abspath(os.path.join(BASE_DIR, "../../_shared-web-resources/autobahn")),
# )
# STATIC_URL = '/static/'
# TEMPLATE_DIRS = ['/templates/',]
STATIC_URL = '/static/'
STATIC_ROOT = "/vagrant/static/"
| apache-2.0 | 6,791,827,221,270,810,000 | 25.971154 | 86 | 0.694118 | false | 3.32346 | false | false | false |
whtsky/WeRoBot | werobot/crypto/__init__.py | 1 | 4581 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import base64
import socket
import struct
import time
try:
from cryptography.hazmat.primitives.ciphers import (
Cipher, algorithms, modes
)
from cryptography.hazmat.backends import default_backend
except ImportError: # pragma: no cover
raise RuntimeError("You need to install Cryptography.") # pragma: no cover
from . import pkcs7
from .exceptions import (
UnvalidEncodingAESKey, AppIdValidationError, InvalidSignature
)
from werobot.utils import (
to_text, to_binary, generate_token, byte2int, get_signature
)
class PrpCrypto(object):
"""
提供接收和推送给公众平台消息的加解密接口
"""
def __init__(self, key):
key = to_binary(key)
self.cipher = Cipher(
algorithms.AES(key),
modes.CBC(key[:16]),
backend=default_backend()
)
def get_random_string(self):
"""
:return: 长度为16的随即字符串
"""
return generate_token(16)
def encrypt(self, text, app_id):
"""
对明文进行加密
:param text: 需要加密的明文
:param app_id: 微信公众平台的 AppID
:return: 加密后的字符串
"""
text = b"".join(
[
to_binary(self.get_random_string()),
struct.pack(b"I", socket.htonl(len(to_binary(text)))),
to_binary(text),
to_binary(app_id)
]
)
text = pkcs7.encode(text)
encryptor = self.cipher.encryptor()
ciphertext = to_binary(encryptor.update(text) + encryptor.finalize())
return base64.b64encode(ciphertext)
def decrypt(self, text, app_id):
"""
对密文进行解密
:param text: 需要解密的密文
:param app_id: 微信公众平台的 AppID
:return: 解密后的字符串
"""
text = to_binary(text)
decryptor = self.cipher.decryptor()
plain_text = decryptor.update(base64.b64decode(text)
) + decryptor.finalize()
padding = byte2int(plain_text, -1)
content = plain_text[16:-padding]
xml_len = socket.ntohl(struct.unpack("I", content[:4])[0])
xml_content = content[4:xml_len + 4]
from_appid = content[xml_len + 4:]
if to_text(from_appid) != app_id:
raise AppIdValidationError(text, app_id)
return xml_content
class MessageCrypt(object):
ENCRYPTED_MESSAGE_XML = """
<xml>
<Encrypt><![CDATA[{encrypt}]]></Encrypt>
<MsgSignature><![CDATA[{signature}]]></MsgSignature>
<TimeStamp>{timestamp}</TimeStamp>
<Nonce><![CDATA[{nonce}]]></Nonce>
</xml>
""".strip()
def __init__(self, token, encoding_aes_key, app_id):
key = base64.b64decode(to_binary(encoding_aes_key + '='))
if len(key) != 32:
raise UnvalidEncodingAESKey(encoding_aes_key)
self.prp_crypto = PrpCrypto(key)
self.token = token
self.app_id = app_id
def decrypt_message(self, timestamp, nonce, msg_signature, encrypt_msg):
"""
解密收到的微信消息
:param timestamp: 请求 URL 中收到的 timestamp
:param nonce: 请求 URL 中收到的 nonce
:param msg_signature: 请求 URL 中收到的 msg_signature
:param encrypt_msg: 收到的加密文本. ( XML 中的 <Encrypt> 部分 )
:return: 解密后的 XML 文本
"""
signature = get_signature(self.token, timestamp, nonce, encrypt_msg)
if signature != msg_signature:
raise InvalidSignature(msg_signature)
return self.prp_crypto.decrypt(encrypt_msg, self.app_id)
def encrypt_message(self, reply, timestamp=None, nonce=None):
"""
加密微信回复
:param reply: 加密前的回复
:type reply: WeChatReply 或 XML 文本
:return: 加密后的回复文本
"""
if hasattr(reply, "render"):
reply = reply.render()
timestamp = timestamp or to_text(int(time.time()))
nonce = nonce or generate_token(5)
encrypt = to_text(self.prp_crypto.encrypt(reply, self.app_id))
signature = get_signature(self.token, timestamp, nonce, encrypt)
return to_text(
self.ENCRYPTED_MESSAGE_XML.format(
encrypt=encrypt,
signature=signature,
timestamp=timestamp,
nonce=nonce
)
)
| mit | 7,696,962,301,549,961,000 | 28.701389 | 79 | 0.581015 | false | 3.264885 | false | false | false |
maestromusic/maestro | maestro/plugins/audiocd/gui.py | 1 | 21270 | # -*- coding: utf-8 -*-
# Maestro Music Manager - https://github.com/maestromusic/maestro
# Copyright (C) 2013-2015 Martin Altmayer, Michael Helmling
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import itertools
import os.path
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialogButtonBox
from maestro import config, utils
from maestro.core import levels, tags, domains, urls
from maestro.core.elements import ContainerType
from maestro.gui import actions, dialogs, delegates, mainwindow, tagwidgets, treeview
from maestro.gui.delegates.abstractdelegate import *
from maestro.models import leveltreemodel
from maestro.plugins.musicbrainz import plugin as mbplugin, xmlapi, elements
translate = QtCore.QCoreApplication.translate
class ImportAudioCDAction(actions.TreeAction):
label = translate('ImportAudioCDAction', 'Rip audio CD ...')
ripper = None
@staticmethod
def _getRelease(theDiscid):
releases = xmlapi.findReleasesForDiscid(theDiscid)
if len(releases) > 1:
dialog = ReleaseSelectionDialog(releases, theDiscid)
if dialog.exec_():
return dialog.selectedRelease
else:
return None
else:
return releases[0]
@staticmethod
def askForDiscId():
"""Asks the user for a CD-ROM device to use.
:returns: Three-tuple of the *device*, *disc id*, and number of tracks.
"""
import discid
device, ok = QtWidgets.QInputDialog.getText(
mainwindow.mainWindow,
translate('AudioCD Plugin', 'Select device'),
translate('AudioCD Plugin', 'CDROM device:'),
QtWidgets.QLineEdit.Normal,
discid.get_default_device())
if not ok:
return None
try:
with discid.read(device) as disc:
disc.read()
except discid.disc.DiscError as e:
dialogs.warning(translate("AudioCD Plugin", "CDROM drive is empty"), str(e))
return None
return device, disc.id, len(disc.tracks)
def doAction(self):
# device, theDiscid, trackCount = '/dev/sr0', 'qx_MV1nqkljh.L37bA_rgVoyAgU-', 3
ans = self.askForDiscId()
if ans is None:
return
device, theDiscid, trackCount = ans
from . import ripper
self.ripper = ripper.Ripper(device, theDiscid)
if config.options.audiocd.earlyrip:
self.ripper.start()
try:
release = self._getRelease(theDiscid)
if release is None:
return
progress = dialogs.WaitingDialog("Querying MusicBrainz", "please wait", False)
progress.open()
def callback(url):
progress.setText(self.tr("Fetching data from:\n{}").format(url))
QtWidgets.qApp.processEvents()
xmlapi.queryCallback = callback
xmlapi.fillReleaseForDisc(release, theDiscid)
progress.close()
xmlapi.queryCallback = None
QtWidgets.qApp.processEvents()
stack = self.level().stack.createSubstack(modalDialog=True)
level = levels.Level("audiocd", self.level(), stack=stack)
dialog = ImportAudioCDDialog(level, release)
if dialog.exec_():
model = self.parent().model()
model.insertElements(model.root, len(model.root.contents), [dialog.container])
if not config.options.audiocd.earlyrip:
self.ripper.start()
stack.close()
except xmlapi.UnknownDiscException:
dialog = SimpleRipDialog(theDiscid, trackCount, self.level())
if dialog.exec_():
if not config.options.audiocd.earlyrip:
self.ripper.start()
self.level().stack.beginMacro(self.tr("Load Audio CD"))
model = self.parent().model()
model.insertElements(model.root, len(model.root.contents), [dialog.container])
self.level().stack.endMacro()
except ConnectionError as e:
dialogs.warning(self.tr('Error communicating with MusicBrainz'), str(e))
if 'progress' in locals():
progress.close()
class ReleaseSelectionDialog(QtWidgets.QDialog):
def __init__(self, releases, discid):
super().__init__(mainwindow.mainWindow)
self.setModal(True)
lay = QtWidgets.QVBoxLayout()
lay.addWidget(QtWidgets.QLabel(self.tr('Select release:')))
for release in releases:
text = ""
if len(release.children) > 1:
pos, medium = release.mediumForDiscid(discid)
text = "[Disc {}: '{}' of {} in] ".format(pos, medium, len(release.children))
text += release.tags["title"][0] + "\nby {}".format(release.tags["artist"][0])
if "date" in release.tags:
text += "\nreleased {}".format(release.tags["date"][0])
if "country" in release.tags:
text += " ({})".format(release.tags["country"][0])
if "barcode" in release.tags:
text += ", barcode={}".format(release.tags["barcode"][0])
but = QtWidgets.QPushButton(text)
but.release = release
but.setStyleSheet("text-align: left")
but.clicked.connect(self._handleClick)
lay.addWidget(but)
btbx = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Cancel)
btbx.rejected.connect(self.reject)
lay.addWidget(btbx)
self.setLayout(lay)
def _handleClick(self):
self.selectedRelease = self.sender().release
self.accept()
class CDROMDelegate(delegates.StandardDelegate):
def __init__(self, view):
self.profile = delegates.profiles.DelegateProfile("cdrom")
self.profile.options['appendRemainingTags'] = True
self.profile.options['showPaths'] = True
self.profile.options['showType'] = True
super().__init__(view, self.profile)
def getUrlWarningItem(self, wrapper):
element = wrapper.element
from . import plugin
if element.isFile() and element.url.scheme == 'audiocd':
tracknr = plugin.parseNetloc(element.url)[1]
return TextItem(self.tr('[Track {:2d}]').format(tracknr),
DelegateStyle(bold=True, color=Qt.blue))
return super().getUrlWarningItem(wrapper)
class AliasComboDelegate(QtWidgets.QStyledItemDelegate):
def __init__(self, box, parent=None):
super().__init__(parent)
self.box = box
def paint(self, painter, option, index):
alias = self.box.entity.aliases[index.row()]
if alias.primary:
option.font.setBold(True)
super().paint(painter, option, index)
option.font.setBold(False)
class AliasComboBox(QtWidgets.QComboBox):
aliasChanged = QtCore.pyqtSignal(object)
def __init__(self, entity, sortNameItem):
super().__init__()
self.addItem(entity.aliases[0].name)
self.entity = entity
self.setEditable(True)
self.setEditText(entity.name)
self.sortNameItem = sortNameItem
self.setItemDelegate(AliasComboDelegate(self))
self.activated.connect(self._handleActivate)
self.editTextChanged.connect(self._handleEditTextChanged)
def showPopup(self):
if not self.entity.loaded:
self.entity.loadAliases()
for alias in self.entity.aliases[1:]:
self.addItem(alias.name)
if alias.locale:
self.setItemData(self.count() - 1,
("primary " if alias.primary else "") + \
"alias for locale {}".format(alias.locale),
Qt.ToolTipRole)
QtWidgets.qApp.processEvents()
return super().showPopup()
def _handleActivate(self, index):
alias = self.entity.aliases[index]
sortname = alias.sortName
self.sortNameItem.setText(sortname)
if self.currentText() != self.entity.name:
self.entity.selectAlias(index)
self.aliasChanged.emit(self.entity)
def _handleEditTextChanged(self, text):
self.entity.name = text
self.aliasChanged.emit(self.entity)
class AliasWidget(QtWidgets.QTableWidget):
"""
TODO: use sort names!
"""
aliasChanged = QtCore.pyqtSignal(object)
def __init__(self, entities):
super().__init__()
self.entities = sorted(entities, key=lambda ent: "".join(sorted(ent.asTag)))
self.columns = [self.tr("Roles"),
self.tr("WWW"),
self.tr("Name"),
self.tr("Sort-Name")]
self.setColumnCount(len(self.columns))
self.verticalHeader().hide()
self.setHorizontalHeaderLabels(self.columns)
self.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.horizontalHeader().setStretchLastSection(True)
self.setRowCount(len(self.entities))
self.cellChanged.connect(self._handleCellChanged)
for row, ent in enumerate(self.entities):
label = QtWidgets.QTableWidgetItem(", ".join(ent.asTag))
label.setFlags(Qt.ItemIsEnabled)
self.setItem(row, 0, label)
label = QtWidgets.QLabel('<a href="{}">{}</a>'.format(ent.url(), self.tr("lookup")))
label.setToolTip(ent.url())
label.setOpenExternalLinks(True)
self.setCellWidget(row, 1, label)
sortNameItem = QtWidgets.QTableWidgetItem(ent.sortName)
combo = AliasComboBox(ent, sortNameItem)
combo.aliasChanged.connect(self.aliasChanged)
self.setCellWidget(row, 2, combo)
self.setItem(row, 3, sortNameItem)
def activeEntities(self):
entities = []
for row, ent in enumerate(self.entities):
if self.cellWidget(row, 2).isEnabled():
entities.append(ent)
return entities
def updateDisabledTags(self, mapping):
for row, ent in enumerate(self.entities):
state = not all((val in mapping and mapping[val] is None) for val in ent.asTag)
for col in range(self.columnCount()):
item = self.item(row, col)
if item:
if state:
item.setFlags(item.flags() | Qt.ItemIsEnabled)
else:
item.setFlags(item.flags() ^ Qt.ItemIsEnabled)
else:
widget = self.cellWidget(row, col)
widget.setEnabled(state)
def _handleCellChanged(self, row, col):
if col != 3:
return
self.entities[row].sortName = self.item(row, col).text()
class TagMapWidget(QtWidgets.QTableWidget):
tagConfigChanged = QtCore.pyqtSignal(dict)
def __init__(self, newtags):
super().__init__()
self.columns = [self.tr("Import"), self.tr("MusicBrainz Name"), self.tr("Maestro Tag")]
self.setColumnCount(len(self.columns))
self.verticalHeader().hide()
self.setHorizontalHeaderLabels(self.columns)
self.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.setRowCount(len(newtags))
self.tagMapping = mbplugin.tagMap.copy()
from ...gui.tagwidgets import TagTypeBox
for row, tagname in enumerate(newtags):
if tagname in self.tagMapping:
tag = self.tagMapping[tagname]
else:
tag = tags.get(tagname)
checkbox = QtWidgets.QTableWidgetItem()
ttBox = TagTypeBox(tag, editable=True)
ttBox.tagChanged.connect(self._handleTagTypeChanged)
mbname = QtWidgets.QTableWidgetItem(tagname)
self.setCellWidget(row, 2, ttBox)
if tag is None:
checkbox.setCheckState(Qt.Unchecked)
ttBox.setEnabled(False)
mbname.setFlags(mbname.flags() ^ Qt.ItemIsEnabled)
else:
checkbox.setCheckState(Qt.Checked)
self.tagMapping[tagname] = tag
mbname.setFlags(Qt.ItemIsEnabled)
self.setItem(row, 0, checkbox)
self.setItem(row, 1, mbname)
self.cellChanged.connect(self._handleCellChange)
def _handleCellChange(self, row, col):
if col != 0:
return
state = self.item(row, 0).checkState() == Qt.Checked
item = self.item(row, 1)
if state:
item.setFlags(item.flags() | Qt.ItemIsEnabled)
self.tagMapping[item.text()] = self.cellWidget(row, 2).getTag()
else:
item.setFlags(item.flags() ^ Qt.ItemIsEnabled)
self.tagMapping[item.text()] = None
self.cellWidget(row, 2).setEnabled(state)
self.tagConfigChanged.emit(self.tagMapping)
def _handleTagTypeChanged(self, tag):
for row in range(self.rowCount()):
if self.cellWidget(row, 2) is self.sender():
break
self.tagMapping[self.item(row, 1).text()] = tag
self.tagConfigChanged.emit(self.tagMapping)
class ImportAudioCDDialog(QtWidgets.QDialog):
"""The main dialog of this plugin, which is used for adding audioCDs to the editor.
Shows the container structure obtained from musicbrainz and allows to configure alias handling
and some other options.
"""
def __init__(self, level, release):
super().__init__(mainwindow.mainWindow)
self.setModal(True)
self.level = level
self.mbNode = elements.MBNode(release)
self.release = release
self.maestroModel = leveltreemodel.LevelTreeModel(level)
self.maestroView = treeview.TreeView(level, affectGlobalSelection=False)
self.maestroView.setModel(self.maestroModel)
self.maestroView.setItemDelegate(CDROMDelegate(self.maestroView))
# collect alias entities in this release
entities = set()
for item in release.walk():
if not item.ignore:
entities.update(val for val in itertools.chain.from_iterable(item.tags.values())
if isinstance(val, xmlapi.AliasEntity))
self.aliasWidget = AliasWidget(entities)
self.aliasWidget.aliasChanged.connect(self.makeElements)
self.newTagWidget = TagMapWidget(release.collectExternalTags())
self.newTagWidget.tagConfigChanged.connect(self.aliasWidget.updateDisabledTags)
self.newTagWidget.tagConfigChanged.connect(self.makeElements)
configLayout = QtWidgets.QVBoxLayout()
self.searchReleaseBox = QtWidgets.QCheckBox(self.tr('search for existing release'))
self.searchReleaseBox.setChecked(True)
self.searchReleaseBox.stateChanged.connect(self.makeElements)
configLayout.addWidget(self.searchReleaseBox)
self.mediumContainerBox = QtWidgets.QCheckBox(self.tr('add containers for discs'))
self.mediumContainerBox.stateChanged.connect(self.makeElements)
self.forceBox = QtWidgets.QCheckBox(self.tr('...even without title'))
self.forceBox.stateChanged.connect(self.makeElements)
configLayout.addWidget(self.mediumContainerBox)
configLayout.addWidget(self.forceBox)
btbx = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
btbx.accepted.connect(self.finalize)
btbx.rejected.connect(self.reject)
lay = QtWidgets.QVBoxLayout()
topLayout = QtWidgets.QHBoxLayout()
topLayout.addLayout(configLayout)
topLayout.addWidget(self.maestroView)
lay.addLayout(topLayout, stretch=5)
lay.addWidget(QtWidgets.QLabel(self.tr("Alias handling:")))
lay.addWidget(self.aliasWidget, stretch=2)
lay.addWidget(QtWidgets.QLabel(self.tr("New tagtypes:")))
lay.addWidget(self.newTagWidget, stretch=1)
lay.addWidget(btbx, stretch=1)
self.setLayout(lay)
self.makeElements()
self.resize(mainwindow.mainWindow.size() * 0.9)
def makeElements(self, *args, **kwargs):
self.maestroModel.clear()
self.level.removeElements(list(self.level.elements.values()))
elemConfig = elements.ElementConfiguration(self.newTagWidget.tagMapping)
elemConfig.searchRelease = self.searchReleaseBox.isChecked()
elemConfig.mediumContainer = self.mediumContainerBox.isChecked()
elemConfig.forceMediumContainer = self.forceBox.isChecked()
self.container = self.release.makeElements(self.level, elemConfig)
self.maestroModel.insertElements(self.maestroModel.root, 0, [self.container])
def finalize(self):
mbplugin.updateDBAliases(self.aliasWidget.activeEntities())
for mbname, maestroTag in self.newTagWidget.tagMapping.items():
config.storage.musicbrainz.tagmap[mbname] = maestroTag.name if maestroTag else None
self.level.commit()
self.accept()
class SimpleRipDialog(QtWidgets.QDialog):
"""Dialog for ripping CDs that are not found in the MusicBrainz database. Allows to enter album
title, artist, date, and a title for each track,
"""
def __init__(self, discId, trackCount, level):
super().__init__(mainwindow.mainWindow)
self.setModal(True)
self.level = level
self.discid = discId
topLayout = QtWidgets.QHBoxLayout()
topLayout.addWidget(QtWidgets.QLabel(self.tr('Album title:')))
self.titleEdit = tagwidgets.TagValueEditor(tags.TITLE)
self.titleEdit.setValue('unknown album')
topLayout.addWidget(self.titleEdit)
midLayout = QtWidgets.QHBoxLayout()
midLayout.addWidget(QtWidgets.QLabel(self.tr('Artist:')))
self.artistEdit = tagwidgets.TagValueEditor(tags.get('artist'))
self.artistEdit.setValue('unknown artist')
midLayout.addWidget(self.artistEdit)
midLayout.addStretch()
midLayout.addWidget(QtWidgets.QLabel(self.tr('Date:')))
self.dateEdit = tagwidgets.TagValueEditor(tags.get('date'))
self.dateEdit.setValue(utils.FlexiDate(1900))
midLayout.addWidget(self.dateEdit)
layout = QtWidgets.QVBoxLayout()
description = QtWidgets.QLabel(self.tr('The MusicBrainz database does not contain a release '
'for this disc. Please fill the tags manually.'))
description.setWordWrap(True)
layout.addWidget(description)
layout.addLayout(topLayout)
layout.addLayout(midLayout)
tableLayout = QtWidgets.QGridLayout()
edits = []
for i in range(1, trackCount+1):
tableLayout.addWidget(QtWidgets.QLabel(self.tr('Track {:2d}:').format(i)), i-1, 0)
edits.append(tagwidgets.TagValueEditor(tags.TITLE))
edits[-1].setValue('unknown title')
tableLayout.addWidget(edits[-1], i-1, 1)
layout.addLayout(tableLayout)
box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
box.accepted.connect(self.finish)
box.rejected.connect(self.reject)
layout.addWidget(box)
self.setLayout(layout)
self.edits = edits
def finish(self):
elems = []
for i, edit in enumerate(self.edits, start=1):
url = urls.URL("audiocd://{0}.{1}{2}/{0}/{1}.flac".format(
self.discid, i, os.path.abspath(config.options.audiocd.rippath)))
elem = self.level.collect(url)
elTags = tags.Storage()
elTags[tags.TITLE] = [edit.getValue()]
elTags[tags.ALBUM] = [self.titleEdit.getValue()]
elTags[tags.get('artist')] = [self.artistEdit.getValue()]
elTags[tags.get('date')] = [self.dateEdit.getValue()]
diff = tags.TagStorageDifference(None, elTags)
self.level.changeTags({elem: diff})
elems.append(elem)
contTags = tags.Storage()
contTags[tags.TITLE] = [self.titleEdit.getValue()]
contTags[tags.ALBUM] = [self.titleEdit.getValue()]
contTags[tags.get('date')] = [self.dateEdit.getValue()]
contTags[tags.get('artist')] = [self.artistEdit.getValue()]
cont = self.level.createContainer(contents=elems, type=ContainerType.Album,
domain=domains.default(), tags=contTags)
self.container = cont
self.accept()
| gpl-3.0 | 4,208,703,697,890,041,300 | 41.370518 | 101 | 0.625153 | false | 4.040653 | true | false | false |
GNOME/pygtkimageview | tests/test_module.py | 1 | 24286 | import gobject
import gtk
from gtk import gdk
import gtkimageview
import time
def test_version_string():
'''
The module has a __version__ attribute which is a string
containing three numbers separted by periods. The version string
is >= 1.0.0.
'''
major, minor, micro = gtkimageview.__version__.split('.')
major, minor, micro = int(major), int(minor), int(micro)
assert major >= 1
if major == 1:
assert minor >= 0
def test_default_tool():
'''
The default tool is ImageToolDragger.
'''
view = gtkimageview.ImageView()
assert isinstance(view.get_tool(), gtkimageview.ImageToolDragger)
def test_set_wrong_pixbuf_type():
'''
A TypeError is raised when set_pixbuf() is called with something
that is not a pixbuf.
'''
view = gtkimageview.ImageView()
try:
view.set_pixbuf('Hi mom!', True)
assert False
except TypeError:
assert True
def set_pixbuf_null():
view = gtkimageview.ImageView()
view.set_pixbuf(None, True)
assert not view.get_pixbuf()
def test_set_pixbuf_default():
'''
Make sure that set_pixbuf():s second parameter has the default
value True.
'''
view = gtkimageview.ImageView()
view.set_fitting(False)
view.set_pixbuf(None)
assert view.get_fitting()
def check_class(parent, init_args):
class TheClass(parent):
__gsignals__ = {'my-signal' : (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_INT,))}
def __init__(self):
parent.__init__(self, *init_args)
self.arg = 0
def do_my_signal(self, arg):
self.arg = arg
gobject.type_register(TheClass)
obj = TheClass()
obj.emit('my-signal', 20)
assert obj.arg == 20
def test_nav_subclass_with_signals():
'''
Ensure that a subclass of ImageNav which adds a signal to the
class works as expected.
'''
check_class(gtkimageview.ImageNav, [gtkimageview.ImageView()])
def test_view_subclass_with_signals():
'''
Ensure that a subclass of ImageView which adds a signal to the
class works as expected.
'''
check_class(gtkimageview.ImageView, [])
def test_selector_subclass_with_signals():
'''
Ensure that a subclass of ImageToolSelector which adds a signal to
the class works as expected.
'''
check_class(gtkimageview.ImageToolSelector, [gtkimageview.ImageView()])
def test_dragger_subclass_with_signals():
'''
Ensure that a subclass of ImageToolDragger which adds a signal to
the class works as expected.
'''
check_class(gtkimageview.ImageToolDragger, [gtkimageview.ImageView()])
def test_scroll_win_subclass_with_signals():
'''
Ensure that a subclass of ImageScrollWin which adds a signal to
the class works as expected.
'''
check_class(gtkimageview.ImageScrollWin, [gtkimageview.ImageView()])
def test_min_max_zoom_functions():
'''
Ensure that the gtkimageview.zooms_* functions are present and
works as expected.
'''
min_zoom = float(gtkimageview.zooms_get_min_zoom())
max_zoom = float(gtkimageview.zooms_get_max_zoom())
assert min_zoom < max_zoom
def test_get_viewport():
'''
Ensure that getting the viewport of the view works as expected.
'''
view = gtkimageview.ImageView()
assert not view.get_viewport()
view.size_allocate(gdk.Rectangle(0, 0, 100, 100))
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 50, 50)
view.set_pixbuf(pixbuf)
rect = view.get_viewport()
assert rect.x == 0 and rect.y == 0
assert rect.width == 50 and rect.height == 50
def test_get_viewport_unallocated():
'''
If the view is not allocated, get_viewport returns a rectangle
with 0 width and height.
'''
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 50, 50)
view = gtkimageview.ImageView()
view.set_pixbuf(pixbuf)
for ofs_x, ofs_y in [(30, 30), (10, 20), (5, 10)]:
view.set_offset(ofs_x, ofs_y)
rect = view.get_viewport()
assert rect.x == ofs_x
assert rect.y == ofs_y
assert rect.width == 0
assert rect.height == 0
def test_get_check_colors():
'''
Ensure that getting the view:s check colors works as expected.
'''
view = gtkimageview.ImageView()
col1, col2 = view.get_check_colors()
assert int(col1)
assert int(col2)
def test_get_check_colors_many_args():
'''
Ensure that a correct error is thrown when get_check_colors() is
invoked with to many arguments.
'''
view = gtkimageview.ImageView()
try:
view.get_check_colors(1, 2, 3)
assert False
except TypeError:
assert True
def test_image_nav_wrong_nr_args():
'''
Ensure that TypeError is raised when ImageNav is instantiated with
the wrong nr of args.
'''
try:
nav = gtkimageview.ImageNav()
assert False
except TypeError:
assert True
try:
nav = gtkimageview.ImageNav(gtkimageview.ImageView(), None, None)
assert False
except TypeError:
assert True
def test_get_draw_rect():
'''
Ensure that getting the draw rectangle works as expected.
'''
view = gtkimageview.ImageView()
assert not view.get_draw_rect()
view.size_allocate(gdk.Rectangle(0, 0, 100, 100))
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 50, 50)
view.set_pixbuf(pixbuf)
rect = view.get_draw_rect()
assert rect.x == 25 and rect.y == 25
assert rect.width == 50 and rect.height == 50
def test_draw_rect_unallocated():
'''
Ensure that get_draw_rect() always return a zero rectangle when
the view is not allocated.
'''
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 50, 50)
view = gtkimageview.ImageView()
view.set_pixbuf(pixbuf)
for x_ofs, y_ofs in [(30, 30), (-10, 20), (0, 0), (5, 10)]:
view.set_offset(x_ofs, y_ofs)
rect = view.get_draw_rect()
assert rect.x == 0
assert rect.y == 0
assert rect.width == 0
assert rect.height == 0
def test_set_offset():
'''
Ensure that setting the offset works as expected.
'''
view = gtkimageview.ImageView()
view.size_allocate(gdk.Rectangle(0, 0, 100, 100))
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 200, 200)
view.set_pixbuf(pixbuf)
view.set_zoom(1)
view.set_offset(0, 0)
rect = view.get_viewport()
assert rect.x == 0 and rect.y == 0
view.set_offset(100, 100, invalidate = True)
rect = view.get_viewport()
assert rect.x == 100 and rect.y == 100
def test_set_transp():
'''
Ensure that setting the views transparency settings works as
expected.
'''
view = gtkimageview.ImageView()
view.set_transp(gtkimageview.TRANSP_COLOR, transp_color = 0xff0000)
col1, col2 = view.get_check_colors()
assert col1 == col2 == 0xff0000
view.set_transp(gtkimageview.TRANSP_GRID)
def test_presence_of_constants():
'''
Check that all enum constants exist in the module.
'''
assert hasattr(gtkimageview, 'TRANSP_BACKGROUND')
assert hasattr(gtkimageview, 'TRANSP_COLOR')
assert hasattr(gtkimageview, 'TRANSP_GRID')
assert hasattr(gtkimageview, 'DRAW_METHOD_CONTAINS')
assert hasattr(gtkimageview, 'DRAW_METHOD_SCALE')
assert hasattr(gtkimageview, 'DRAW_METHOD_SCROLL')
def test_incomplete_iimage_tool():
'''
Ensure that NotImplementedError is raised if an attempt is made to
instantiate an incomplete IImageTool.
'''
class Foo(gtkimageview.IImageTool):
pass
try:
Foo()
assert False
except NotImplementedError:
assert True
def test_pixbuf_draw_opts():
'''
Ensure that the PixbufDrawOpts class is present.
'''
assert hasattr(gtkimageview, 'PixbufDrawOpts')
def test_pixbuf_draw_cache():
'''
Ensure that the PixbufDrawCache class is present.
'''
assert hasattr(gtkimageview, 'PixbufDrawCache')
def test_pixbuf_draw_opts_attrs():
'''
Ensure that all required attributes are present on the
PixbufDrawOpts object.
'''
obj = gtkimageview.PixbufDrawOpts()
assert hasattr(obj, 'zoom')
assert hasattr(obj, 'zoom_rect')
assert hasattr(obj, 'widget_x')
assert hasattr(obj, 'widget_y')
assert hasattr(obj, 'interp')
assert hasattr(obj, 'pixbuf')
assert hasattr(obj, 'check_color1')
assert hasattr(obj, 'check_color2')
def test_pixbuf_draw_cache_attrs():
'''
Ensure that all required attributes are present on the
PixbufDrawCache object.
'''
obj = gtkimageview.PixbufDrawCache()
assert hasattr(obj, 'last_pixbuf')
assert hasattr(obj, 'old')
assert hasattr(obj, 'check_size')
assert callable(obj.draw)
def test_get_draw_method():
'''
Sanity test for the PixbufDrawCache.get_method() classmethod.
'''
obj = gtkimageview.PixbufDrawCache()
assert hasattr(obj, 'get_method')
opts = gtkimageview.PixbufDrawOpts()
gtkimageview.PixbufDrawCache.get_method(opts, opts)
def test_return_of_get_draw_method():
'''
Ensure that PixbufDrawCache.get_method() returns either
DRAW_METHOD_CONTAINS, DRAW_METHOD_SCALE or DRAW_METHOD_SCROLL.
'''
obj = gtkimageview.PixbufDrawCache()
opts = gtkimageview.PixbufDrawOpts()
ret = obj.get_method(opts, opts)
assert ret in (gtkimageview.DRAW_METHOD_CONTAINS,
gtkimageview.DRAW_METHOD_SCALE,
gtkimageview.DRAW_METHOD_SCROLL)
def test_type_error_for_draw_method():
'''
Ensure that TypeError is raised if PixbufDrawCache.get_method() is
called with an argument that is not a PixbufDrawOpts object.
'''
arg_pairs = [(None, None),
(gtkimageview.PixbufDrawOpts(), None),
(None, gtkimageview.PixbufDrawOpts()),
("Hello", "Foo")]
for last_opts, new_opts in arg_pairs:
try:
gtkimageview.PixbufDrawCache.get_method(last_opts, new_opts)
assert False
except TypeError:
assert True
def test_invalidate():
'''
Sanity test for the PixbufDrawCache.invalidate() method.
'''
cache = gtkimageview.PixbufDrawCache()
assert hasattr(cache, 'invalidate')
def test_library_verson():
'''
Ensure sanity of the library_version() function.
'''
version = gtkimageview.library_version()
maj, min, mic = version.split('.')
digits = int(maj), int(min), int(mic)
def test_tool_selector_get_selection():
'''
Ensure that the default selection rectangle is (0,0)-[0,0].
'''
view = gtkimageview.ImageView()
tool = gtkimageview.ImageToolSelector(view)
view.set_tool(tool)
sel = tool.get_selection()
assert sel.x == 0 and sel.y == 0
assert sel.width == 0 and sel.height == 0
def test_set_anim_none():
'''
gtkimageview.AnimView.set_anim can be called with None.
'''
view = gtkimageview.AnimView()
view.set_anim(None)
def test_damage_pixels():
'''
Ensure that gtkimageview.ImageView.damage_pixels can be called.
'''
view = gtkimageview.ImageView()
view.damage_pixels(gdk.Rectangle(0, 0, 100, 100))
view.damage_pixels(None)
view.damage_pixels()
view.damage_pixels(rect = gdk.Rectangle(0, 0, 100, 100))
def test_damage_pixels_badarg():
'''
Ensure that TypeError is raised if argument to
gtkimageview.ImageView.damage_pixels is not None or a
gdk.Rectangle.
'''
view = gtkimageview.ImageView()
try:
view.damage_pixels('hello')
assert False
except TypeError:
assert True
def test_widget_to_image_rect():
'''
Tests that gtkimageview.ImageView.widget_to_image_rect works.
'''
view = gtkimageview.ImageView()
rect = gdk.Rectangle(0, 0, 20, 20)
assert not view.widget_to_image_rect(rect)
view.size_allocate(gdk.Rectangle(0, 0, 100, 100))
assert not view.widget_to_image_rect(rect)
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 50, 50)
view.set_pixbuf(pixbuf)
r = view.widget_to_image_rect(gdk.Rectangle(25, 25, 50, 50))
assert r.x == 0
assert r.y == 0
assert r.width == 50
assert r.height == 50
def test_image_to_widget_rect():
'''
Test that gtkimageview.ImageView.image_to_widget_rect works.
'''
view = gtkimageview.ImageView()
rect = gdk.Rectangle(0, 0, 50, 50)
assert not view.image_to_widget_rect(rect)
view.size_allocate(gdk.Rectangle(0, 0, 100, 100))
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 50, 50)
view.set_pixbuf(pixbuf)
r = view.image_to_widget_rect(rect)
assert r.x == 25
assert r.y == 25
assert r.width == 50
assert r.height == 50
def test_image_to_widget_less_than_1_size():
'''
If the width or the height of the image space rectangle occupies
less than one widget space pixel, then it is rounded up to 1.
'''
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 100, 100)
view = gtkimageview.ImageView()
view.size_allocate(gdk.Rectangle(0, 0, 100, 100))
view.set_pixbuf(pixbuf)
view.set_zoom(0.5)
rect = gdk.Rectangle(0, 0, 1, 1)
r = view.image_to_widget_rect(rect)
assert r.x == 25
assert r.y == 25
assert r.width == 1
assert r.height == 1
def test_big_image_small_allocation():
'''
This is a test for #23. If it eats up all memory it is a failure.
'''
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 5000, 5000);
alloc = gdk.Rectangle(0, 0, 5, 5)
view = gtkimageview.ImageView()
view.set_pixbuf(pixbuf)
view.set_show_frame(False)
view.window = gdk.Window(None,
alloc.width, alloc.height,
gdk.WINDOW_TOPLEVEL,
0,
gdk.INPUT_OUTPUT)
view.size_allocate(alloc)
ev = gdk.Event(gdk.EXPOSE)
ev.area = alloc
view.emit('expose-event', ev)
def test_zoom_to_fit_keybinding():
'''
Ensure that the 'x' keybinding works as expected.
'''
view = gtkimageview.ImageView()
view.set_fitting(False)
gtk.bindings_activate(view, gtk.keysyms.x, 0)
assert view.get_fitting()
def test_step_on_non_anim():
'''
Ensure that calling ``gtkimageview.AnimView.step()`` works as
expected when the view shows a static image.
'''
anim = gdk.PixbufSimpleAnim(100, 100, 10)
anim.add_frame(gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 5000, 5000))
view = gtkimageview.AnimView()
view.set_anim(anim)
view.step()
def test_unload_animation():
'''
Ensure that a running animation can be unloaded. Tests #34.
'''
# Flush the event queue.
while gtk.events_pending():
gtk.main_iteration(True)
anim = gdk.PixbufSimpleAnim(100, 100, 10)
for x in range(100):
pb = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 300, 300)
anim.add_frame(pb)
view = gtkimageview.AnimView()
view.set_anim(anim)
assert view.get_pixbuf()
for x in range(10):
gtk.main_iteration(True)
view.set_anim(None)
# No further events for 1 second and no pixbuf in the view.
start = time.time()
while time.time() < start + 1:
assert not gtk.events_pending()
assert not view.get_pixbuf()
gtk.main_iteration(False)
def test_step_on_last_frame_of_anim():
'''
Ensure that calling ``gtkimageview.AnimView.step()`` on the last
frame of the animation that the view shows works as expected.
'''
anim = gdk.PixbufSimpleAnim(100, 100, 10)
for x in range(2):
anim.add_frame(gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 5000, 5000))
view = gtkimageview.AnimView()
view.set_anim(anim)
for x in range(2):
view.step()
def test_step_on_fast_player():
'''
Ensure that calling step always advances the frame even if the
animation is one of those that play to fast.
'''
# 50 fps = 20ms delay -> will be delayed
anim = gdk.PixbufSimpleAnim(100, 100, 50)
for x in range(10):
anim.add_frame(gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 400, 300))
view = gtkimageview.AnimView()
view.set_anim(anim)
for x in range(9):
pb_old = view.get_pixbuf()
view.step()
pb_new = view.get_pixbuf()
assert pb_old != pb_new
def test_zoom_in_destroyed_window():
'''
This test exposes a bug in GtkRange which causes a segfault when
the window the ``gtkimageview.ImageScrollWin`` widget is in, is
destroyed. Unfortunately it will never be fixed, see #551317.
'''
# view = gtkimageview.ImageView()
# view.set_pixbuf(gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 500, 500))
# scroll = gtkimageview.ImageScrollWin(view)
# win = gtk.Window()
# win.add(scroll)
# win.set_default_size(100, 100)
# win.show_all()
# win.destroy()
#view.set_zoom(3.0)
def test_return_of_motion_notify():
'''
Ensure that all tools returns True if it handled a motion
notify event and False otherwise.
'''
# The pixbuf is larger than the view and should be draggable.
view = gtkimageview.ImageView()
view.size_allocate((0, 0, 50, 50))
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 100, 100)
view.set_pixbuf(pixbuf)
view.set_zoom(1.0)
button_ev = gdk.Event(gdk.BUTTON_PRESS)
button_ev.x = 10.0
button_ev.y = 10.0
button_ev.button = 1
button_ev.window = window = gdk.Window(None,
100, 100,
gdk.WINDOW_TOPLEVEL,
0,
gdk.INPUT_OUTPUT,
x = 100, y = 100)
motion_ev = gdk.Event(gdk.MOTION_NOTIFY)
motion_ev.x = 20.0
motion_ev.y = 20.0
for tool_cls in (gtkimageview.ImageToolDragger,
gtkimageview.ImageToolSelector):
tool = tool_cls(view)
# Simulate a dragging motion. Left mouse button is pressed
# down at (10, 10) and then moved to (20, 20).
tool.do_button_press(tool, button_ev)
assert tool.do_motion_notify(tool, motion_ev)
def test_return_of_button_release():
'''
Ensure that all tools return True if it released the grab in
response to a button release event.
'''
# The pixbuf is larger than the view and should be draggable.
view = gtkimageview.ImageView()
view.size_allocate((0, 0, 50, 50))
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 100, 100)
view.set_pixbuf(pixbuf)
view.set_zoom(1.0)
press_ev = gdk.Event(gdk.BUTTON_PRESS)
press_ev.x = 10.0
press_ev.y = 10.0
press_ev.button = 1
press_ev.window = window = gdk.Window(None,
100, 100,
gdk.WINDOW_TOPLEVEL,
0,
gdk.INPUT_OUTPUT,
x = 100, y = 100)
rel_ev = gdk.Event(gdk.BUTTON_RELEASE)
rel_ev.button = 1
for tool_cls in (gtkimageview.ImageToolDragger,
gtkimageview.ImageToolSelector):
tool = tool_cls(view)
tool.do_button_press(tool, press_ev)
assert tool.do_button_release(tool, rel_ev)
class DummyTool(gobject.GObject, gtkimageview.IImageTool):
def __init__(self):
gobject.GObject.__init__(self)
self.zoom_rect = None
def do_button_press(self, ev_button):
pass
def do_button_release(self, ev_button):
pass
def do_motion_notify(self, ev_motion):
pass
def do_pixbuf_changed(self, reset_fit, rect):
pass
def do_paint_image(self, opts, drawable):
self.zoom_rect = opts.zoom_rect
def do_cursor_at_point(self, x, y):
pass
gobject.type_register(DummyTool)
def test_correct_repaint_offset():
'''
Ensure that there is no off by one error when repainting.
A 1600*1600 pixbuf viewed in a 700*700 image view widget at zoom
1.0 should be perfectly centered so that draw starts at
coordinates 450, 450. However, there may be a mishandled floating
point conversion which causes the draw to start at 449, 449. See
#31.
'''
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 1600, 1600)
tool = DummyTool()
view = gtkimageview.ImageView()
view.set_tool(tool)
view.set_show_frame(False)
view.set_pixbuf(pixbuf)
view.size_allocate((0, 0, 700, 700))
view.window = gdk.Window(None,
700, 700,
gdk.WINDOW_TOPLEVEL,
0,
gdk.INPUT_OUTPUT)
view.set_zoom(1.0)
ev = gdk.Event(gdk.EXPOSE)
ev.area = view.allocation
view.do_expose_event(view, ev)
assert tool.zoom_rect.x == 450
assert tool.zoom_rect.y == 450
def test_scrolling_offbyone():
'''
Ensure that there is no off by one error when scrolling the view.
The view is scrolled two pixels in the vertical direction, the
result should be that the tool is asked to draw two new horizontal
lines of the pixbuf. This is the other problem in bug #31.
'''
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 1600, 1600)
tool = DummyTool()
view = gtkimageview.ImageView()
view.set_tool(tool)
view.set_show_frame(False)
view.set_pixbuf(pixbuf)
view.size_allocate((0, 0, 700, 700))
view.window = gdk.Window(None,
700, 700,
gdk.WINDOW_TOPLEVEL,
0,
gdk.INPUT_OUTPUT)
view.set_zoom(1.0)
view.set_offset(450.0, 448.0)
assert tool.zoom_rect == gdk.Rectangle(450, 448, 700, 2)
view.set_offset(448.0, 450.0)
def test_scrolling_adjustments_offbyone():
'''
Ensure that there is no off by one error when scrolling the view
using the horizontal adjustment.
First the view is scrolled two small steps, then the same distance
in one go. In each case, the same number of pixels should be
painted by the tool. The steps include numbers with the fractional
part >= 0.5 to test the rounding.
'''
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 1600, 1600)
hadj = gtk.Adjustment()
vadj = gtk.Adjustment()
tool = DummyTool()
view = gtkimageview.ImageView()
view.set_show_frame(False)
view.set_tool(tool)
view.set_scroll_adjustments(hadj, vadj)
view.set_pixbuf(pixbuf)
view.size_allocate((0, 0, 700, 700))
view.window = gdk.Window(None,
700, 700,
gdk.WINDOW_TOPLEVEL,
0,
gdk.INPUT_OUTPUT)
view.set_zoom(3.0)
pix_drawn = 0
hadj.value = 2050.0
hadj.value_changed()
for ofs in [2073.71, 2088.41]:
hadj.value = ofs
hadj.value_changed()
pix_drawn += tool.zoom_rect.width
hadj.value = 2050
hadj.value_changed()
hadj.value = 2088.41
hadj.value_changed()
assert tool.zoom_rect.width == pix_drawn
def test_setting_float_offsets_offbyone():
'''
Another test for #31.
'''
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, 1600, 1600)
hadj = gtk.Adjustment()
vadj = gtk.Adjustment()
tool = DummyTool()
view = gtkimageview.ImageView()
view.set_show_frame(False)
view.set_tool(tool)
view.set_scroll_adjustments(hadj, vadj)
view.set_pixbuf(pixbuf)
view.size_allocate((0, 0, 700, 700))
view.window = gdk.Window(None,
700, 700,
gdk.WINDOW_TOPLEVEL,
0,
gdk.INPUT_OUTPUT)
view.set_zoom(3.0)
view.set_offset(2050.0, 2050.0)
pix_drawn = 0
for ofs in [2073.71, 2088.41]:
view.set_offset(ofs, 2050.0)
pix_drawn += tool.zoom_rect.width
view.set_offset(2050.0, 2050.0)
view.set_offset(2088.41, 2050)
assert tool.zoom_rect.width == pix_drawn
| lgpl-2.1 | -5,955,185,135,464,154,000 | 29.898219 | 76 | 0.614881 | false | 3.466952 | true | false | false |
techtonik/pip | tests/functional/test_completion.py | 1 | 8458 | import os
import sys
import pytest
def test_completion_for_bash(script):
"""
Test getting completion for bash shell
"""
bash_completion = """\
_pip_completion()
{
COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
PIP_AUTO_COMPLETE=1 $1 ) )
}
complete -o default -F _pip_completion pip"""
result = script.pip('completion', '--bash')
assert bash_completion in result.stdout, 'bash completion is wrong'
def test_completion_for_zsh(script):
"""
Test getting completion for zsh shell
"""
zsh_completion = """\
function _pip_completion {
local words cword
read -Ac words
read -cn cword
reply=( $( COMP_WORDS="$words[*]" \\
COMP_CWORD=$(( cword-1 )) \\
PIP_AUTO_COMPLETE=1 $words[1] ) )
}
compctl -K _pip_completion pip"""
result = script.pip('completion', '--zsh')
assert zsh_completion in result.stdout, 'zsh completion is wrong'
def test_completion_for_fish(script):
"""
Test getting completion for fish shell
"""
fish_completion = """\
function __fish_complete_pip
set -lx COMP_WORDS (commandline -o) ""
set -lx COMP_CWORD ( \\
math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\
)
set -lx PIP_AUTO_COMPLETE 1
string split \\ -- (eval $COMP_WORDS[1])
end
complete -fa "(__fish_complete_pip)" -c pip"""
result = script.pip('completion', '--fish')
assert fish_completion in result.stdout, 'fish completion is wrong'
def test_completion_for_unknown_shell(script):
"""
Test getting completion for an unknown shell
"""
error_msg = 'no such option: --myfooshell'
result = script.pip('completion', '--myfooshell', expect_error=True)
assert error_msg in result.stderr, 'tests for an unknown shell failed'
def test_completion_alone(script):
"""
Test getting completion for none shell, just pip completion
"""
result = script.pip('completion', expect_error=True)
assert 'ERROR: You must pass --bash or --fish or --zsh' in result.stderr, \
'completion alone failed -- ' + result.stderr
def setup_completion(script, words, cword, cwd=None):
script.environ = os.environ.copy()
script.environ['PIP_AUTO_COMPLETE'] = '1'
script.environ['COMP_WORDS'] = words
script.environ['COMP_CWORD'] = cword
# expect_error is True because autocomplete exists with 1 status code
result = script.run(
'python', '-c', 'import pip._internal;pip._internal.autocomplete()',
expect_error=True,
cwd=cwd,
)
return result, script
def test_completion_for_un_snippet(script):
"""
Test getting completion for ``un`` should return uninstall
"""
res, env = setup_completion(script, 'pip un', '1')
assert res.stdout.strip().split() == ['uninstall'], res.stdout
def test_completion_for_default_parameters(script):
"""
Test getting completion for ``--`` should contain --help
"""
res, env = setup_completion(script, 'pip --', '1')
assert '--help' in res.stdout,\
"autocomplete function could not complete ``--``"
def test_completion_option_for_command(script):
"""
Test getting completion for ``--`` in command (e.g. ``pip search --``)
"""
res, env = setup_completion(script, 'pip search --', '2')
assert '--help' in res.stdout,\
"autocomplete function could not complete ``--``"
def test_completion_short_option(script):
"""
Test getting completion for short options after ``-`` (eg. pip -)
"""
res, env = setup_completion(script, 'pip -', '1')
assert '-h' in res.stdout.split(),\
"autocomplete function could not complete short options after ``-``"
def test_completion_short_option_for_command(script):
"""
Test getting completion for short options after ``-`` in command
(eg. pip search -)
"""
res, env = setup_completion(script, 'pip search -', '2')
assert '-h' in res.stdout.split(),\
"autocomplete function could not complete short options after ``-``"
def test_completion_files_after_option(script, data):
"""
Test getting completion for <file> or <dir> after options in command
(e.g. ``pip install -r``)
"""
res, env = setup_completion(
script=script,
words=('pip install -r r'),
cword='3',
cwd=data.completion_paths,
)
assert 'requirements.txt' in res.stdout, (
"autocomplete function could not complete <file> "
"after options in command"
)
assert os.path.join('resources', '') in res.stdout, (
"autocomplete function could not complete <dir> "
"after options in command"
)
assert not any(out in res.stdout for out in
(os.path.join('REPLAY', ''), 'README.txt')), (
"autocomplete function completed <file> or <dir> that "
"should not be completed"
)
if sys.platform != 'win32':
return
assert 'readme.txt' in res.stdout, (
"autocomplete function could not complete <file> "
"after options in command"
)
assert os.path.join('replay', '') in res.stdout, (
"autocomplete function could not complete <dir> "
"after options in command"
)
def test_completion_not_files_after_option(script, data):
"""
Test not getting completion files after options which not applicable
(e.g. ``pip install``)
"""
res, env = setup_completion(
script=script,
words=('pip install r'),
cword='2',
cwd=data.completion_paths,
)
assert not any(out in res.stdout for out in
('requirements.txt', 'readme.txt',)), (
"autocomplete function completed <file> when "
"it should not complete"
)
assert not any(os.path.join(out, '') in res.stdout
for out in ('replay', 'resources')), (
"autocomplete function completed <dir> when "
"it should not complete"
)
def test_completion_directories_after_option(script, data):
"""
Test getting completion <dir> after options in command
(e.g. ``pip --cache-dir``)
"""
res, env = setup_completion(
script=script,
words=('pip --cache-dir r'),
cword='2',
cwd=data.completion_paths,
)
assert os.path.join('resources', '') in res.stdout, (
"autocomplete function could not complete <dir> after options"
)
assert not any(out in res.stdout for out in (
'requirements.txt', 'README.txt', os.path.join('REPLAY', ''))), (
"autocomplete function completed <dir> when "
"it should not complete"
)
if sys.platform == 'win32':
assert os.path.join('replay', '') in res.stdout, (
"autocomplete function could not complete <dir> after options"
)
def test_completion_subdirectories_after_option(script, data):
"""
Test getting completion <dir> after options in command
given path of a directory
"""
res, env = setup_completion(
script=script,
words=('pip --cache-dir ' + os.path.join('resources', '')),
cword='2',
cwd=data.completion_paths,
)
assert os.path.join('resources',
os.path.join('images', '')) in res.stdout, (
"autocomplete function could not complete <dir> "
"given path of a directory after options"
)
def test_completion_path_after_option(script, data):
"""
Test getting completion <path> after options in command
given absolute path
"""
res, env = setup_completion(
script=script,
words=('pip install -e ' + os.path.join(data.completion_paths, 'R')),
cword='3',
)
assert all(os.path.normcase(os.path.join(data.completion_paths, out))
in res.stdout for out in (
'README.txt', os.path.join('REPLAY', ''))), (
"autocomplete function could not complete <path> "
"after options in command given absolute path"
)
@pytest.mark.parametrize('flag', ['--bash', '--zsh', '--fish'])
def test_completion_uses_same_executable_name(script, flag):
expect_stderr = sys.version_info[:2] == (3, 3)
executable_name = 'pip{}'.format(sys.version_info[0])
result = script.run(
executable_name, 'completion', flag, expect_stderr=expect_stderr
)
assert executable_name in result.stdout
| mit | 7,235,039,292,148,112,000 | 29.756364 | 79 | 0.608536 | false | 3.867398 | true | false | false |
CeON/avroknife | avroknife/test/example_data_stores.py | 1 | 3754 | # Copyright 2013-2015 University of Warsaw
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import avro.schema
from avro.datafile import DataFileWriter
from avro.io import DatumWriter
def create(standard_out_path, nested_out_path, binary_out_path):
"""Create example Avro data stores"""
__create_standard(standard_out_path)
__create_nested(nested_out_path)
__create_binary(binary_out_path)
def __create_standard(out_path):
os.makedirs(out_path)
schema_path = os.path.join(os.path.dirname(__file__), 'data/user.avsc')
schema = avro.schema.parse(open(schema_path).read())
with DataFileWriter(open(os.path.join(out_path, 'part-m-00000.avro'), 'w'),
DatumWriter(), schema) as writer:
writer.append({'position': 0, 'name': 'Alyssa', 'favorite_number': 256})
writer.append({'position': 1, 'name': 'Ben', 'favorite_number': 4, 'favorite_color': 'red'})
with DataFileWriter(open(os.path.join(out_path, 'part-m-00001.avro'), 'w'),
DatumWriter(), schema) as writer:
writer.append({'position': 2, 'name': 'Alyssa2', 'favorite_number': 512})
writer.append({'position': 3, 'name': 'Ben2', 'favorite_number': 8, 'favorite_color': 'blue', 'secret':b'0987654321'})
writer.append({'position': 4, 'name': 'Ben3', 'favorite_number': 2, 'favorite_color': 'green', 'secret':b'12345abcd'})
with DataFileWriter(open(os.path.join(out_path, 'part-m-00002.avro'), 'w'),
DatumWriter(), schema) as writer:
pass
with DataFileWriter(open(os.path.join(out_path, 'part-m-00003.avro'), 'w'),
DatumWriter(), schema) as writer:
writer.append({'position': 5, 'name': 'Alyssa3', 'favorite_number': 16})
writer.append({'position': 6, 'name': 'Mallet', 'favorite_color': 'blue', 'secret': b'asdfgf'})
writer.append({'position': 7, 'name': 'Mikel', 'favorite_color': ''})
def __create_nested(out_path):
os.makedirs(out_path)
schema_path = os.path.join(os.path.dirname(__file__), 'data/nested.avsc')
schema = avro.schema.parse(open(schema_path).read())
with DataFileWriter(open(os.path.join(out_path, 'part-m-00004.avro'), 'w'),
DatumWriter(), schema) as writer:
writer.append({'sup': 1, 'sub':{'level2':2}})
writer.append({'sup': 2, 'sub':{'level2':1}})
def __create_binary(out_path):
os.makedirs(out_path)
schema_path = os.path.join(os.path.dirname(__file__), 'data/binary.avsc')
schema = avro.schema.parse(open(schema_path).read())
with DataFileWriter(open(os.path.join(out_path, 'content.avro'), 'w'),
DatumWriter(), schema) as writer:
various_stuff_data = open(os.path.join(os.path.dirname(__file__),
'data/binary_stuff/various_stuff.tar.gz')).read()
writer.append({'description': 'various stuff',
'packed_files': various_stuff_data})
greetings_data = open(os.path.join(os.path.dirname(__file__),
'data/binary_stuff/greetings.tar.gz')).read()
writer.append({'description': 'greetings',
'packed_files': greetings_data})
| apache-2.0 | 484,122,910,533,058,300 | 49.053333 | 126 | 0.627064 | false | 3.437729 | false | false | false |
yasserglez/pytiger2c | packages/pytiger2c/types/arraytype.py | 1 | 1741 | # -*- coding: utf-8 -*-
"""
Clase de la jerarquía de tipos de Tiger representando el tipo array.
"""
from pytiger2c.types.tigertype import TigerType
class ArrayType(TigerType):
"""
Clase de la jerarquía de tipos de Tiger representando el tipo array.
"""
def _get_code_name(self):
"""
Método para obtener el valor de la propiedad C{code_name}.
"""
return self._code_name
def _set_code_name(self, value):
"""
Método para cambiar el valor de la propiedad C{code_name}.
"""
self._code_name = value
code_name = property(_get_code_name, _set_code_name)
def _get_fields_typenames(self):
"""
Método para obtener el valor de la propiedad C{fields_typenames}.
"""
return self._fields_typenames
fields_typenames = property(_get_fields_typenames)
def _get_fields_types(self):
"""
Método para obtener el valor de la propiedad C{fields_types}.
"""
return self._fields_types
def _set_fields_types(self, fields_types):
"""
Método para cambiar el valor de la propiedad C{fields_types}.
"""
self._fields_types = fields_types
fields_types = property(_get_fields_types, _set_fields_types)
def __init__(self, values_typename):
"""
Inicializa la clase representando el tipo array.
@type values_typename: C{str}
@param values_typename: Nombre del tipo que tendrán los valores del array.
"""
super(ArrayType, self).__init__()
self._fields_typenames = [values_typename]
self._fields_types = None
self._code_name = None
| mit | 6,491,093,563,361,914,000 | 27.409836 | 82 | 0.588575 | false | 3.486922 | false | false | false |
mpetyx/pychatbot | SemanticWebApproach/RoboWriter/allegrordf-1.0.1/franz/openrdf/vocabulary/xmlschema.py | 1 | 3739 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable-msg=C0103
##***** BEGIN LICENSE BLOCK *****
##Version: MPL 1.1
##
##The contents of this file are subject to the Mozilla Public License Version
##1.1 (the "License") you may not use this file except in compliance with
##the License. You may obtain a copy of the License at
##http:##www.mozilla.org/MPL/
##
##Software distributed under the License is distributed on an "AS IS" basis,
##WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
##for the specific language governing rights and limitations under the
##License.
##
##The Original Code is the AllegroGraph Java Client interface.
##
##The Original Code was written by Franz Inc.
##Copyright (C) 2006 Franz Inc. All Rights Reserved.
##
##***** END LICENSE BLOCK *****
from __future__ import absolute_import
from ..model.value import URI
NS = "http://www.w3.org/2001/XMLSchema#"
class XMLSchema:
"""
A 'static' class containing useful XMLSchema URIs.
"""
NAMESPACE = NS
DURATION = URI(namespace=NS, localname="duration")
DATETIME = URI(namespace=NS, localname="dateTime")
TIME = URI(namespace=NS, localname="time")
DATE = URI(namespace=NS, localname="date")
GYEARMONTH = URI(namespace=NS, localname="gYearMonth")
GYEAR = URI(namespace=NS, localname="gYear")
GMONTHDAY = URI(namespace=NS, localname="gMonthDay")
GDAY = URI(namespace=NS, localname="gDay")
GMONTH = URI(namespace=NS, localname="gMonth")
STRING = URI(namespace=NS, localname="string")
BOOLEAN = URI(namespace=NS, localname="boolean")
BASE64BINARY = URI(namespace=NS, localname="base64Binary")
HEXBINARY = URI(namespace=NS, localname="hexBinary")
FLOAT = URI(namespace=NS, localname="float")
DECIMAL = URI(namespace=NS, localname="decimal")
DOUBLE = URI(namespace=NS, localname="double")
ANYURI = URI(namespace=NS, localname="anyURI")
QNAME = URI(namespace=NS, localname="QName")
NOTATION = URI(namespace=NS, localname="NOTATION")
NORMALIZEDSTRING = URI(namespace=NS, localname="normalizedString")
TOKEN = URI(namespace=NS, localname="token")
LANGUAGE = URI(namespace=NS, localname="language")
NMTOKEN = URI(namespace=NS, localname="NMTOKEN")
NMTOKENS = URI(namespace=NS, localname="NMTOKENS")
NAME = URI(namespace=NS, localname="Name")
NCNAME = URI(namespace=NS, localname="NCName")
ID = URI(namespace=NS, localname="ID")
IDREF = URI(namespace=NS, localname="IDREF")
IDREFS = URI(namespace=NS, localname="IDREFS")
ENTITY = URI(namespace=NS, localname="ENTITY")
ENTITIES = URI(namespace=NS, localname="ENTITIES")
INTEGER = URI(namespace=NS, localname="integer")
LONG = URI(namespace=NS, localname="long")
INT = URI(namespace=NS, localname="int")
SHORT = URI(namespace=NS, localname="short")
NUMBER = URI(namespace=NS, localname="number")
BYTE = URI(namespace=NS, localname="byte")
NON_POSITIVE_INTEGER = URI(namespace=NS, localname="nonPositiveInteger")
NEGATIVE_INTEGER = URI(namespace=NS, localname="negativeInteger")
NON_NEGATIVE_INTEGER = URI(namespace=NS, localname="nonNegativeInteger")
POSITIVE_INTEGER = URI(namespace=NS, localname="positiveInteger")
UNSIGNED_LONG = URI(namespace=NS, localname="unsignedLong")
UNSIGNED_INT = URI(namespace=NS, localname="unsignedInt")
UNSIGNED_SHORT = URI(namespace=NS, localname="unsignedShort")
UNSIGNED_BYTE = URI(namespace=NS, localname="unsignedByte")
## map of uri strings to URI objects:
uristr2obj = {}
for name, uri in XMLSchema.__dict__.iteritems():
if name.upper() == name:
XMLSchema.uristr2obj[str(uri)] = uri
del XMLSchema.uristr2obj[NS]
| apache-2.0 | -5,239,778,649,766,541,000 | 40.544444 | 77 | 0.696175 | false | 3.452447 | false | false | false |
hguemar/cinder | cinder/tests/test_vmware_vmdk.py | 1 | 128129 | # Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMware VMDK driver.
"""
from distutils.version import LooseVersion
import os
import mock
import mox
from oslo.utils import units
from cinder import exception
from cinder.image import glance
from cinder import test
from cinder.volume import configuration
from cinder.volume.drivers.vmware import api
from cinder.volume.drivers.vmware import datastore as hub
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim
from cinder.volume.drivers.vmware import vim_util
from cinder.volume.drivers.vmware import vmdk
from cinder.volume.drivers.vmware import vmware_images
from cinder.volume.drivers.vmware import volumeops
class FakeVim(object):
@property
def service_content(self):
return mox.MockAnything()
@property
def client(self):
return mox.MockAnything()
def Login(self, session_manager, userName, password):
return mox.MockAnything()
def Logout(self, session_manager):
pass
def TerminateSession(self, session_manager, sessionId):
pass
def SessionIsActive(self, session_manager, sessionID, userName):
pass
class FakeTaskInfo(object):
def __init__(self, state, result=None):
self.state = state
self.result = result
class FakeError(object):
def __init__(self):
self.localizedMessage = None
self.error = FakeError()
class FakeMor(object):
def __init__(self, type, val):
self._type = type
self.value = val
class FakeObject(object):
def __init__(self):
self._fields = {}
def __setitem__(self, key, value):
self._fields[key] = value
def __getitem__(self, item):
return self._fields[item]
class FakeManagedObjectReference(object):
def __init__(self, lis=None):
self.ManagedObjectReference = lis or []
class FakeDatastoreSummary(object):
def __init__(self, freeSpace, capacity, datastore=None, name=None):
self.freeSpace = freeSpace
self.capacity = capacity
self.datastore = datastore
self.name = name
class FakeSnapshotTree(object):
def __init__(self, tree=None, name=None,
snapshot=None, childSnapshotList=None):
self.rootSnapshotList = tree
self.name = name
self.snapshot = snapshot
self.childSnapshotList = childSnapshotList
class FakeElem(object):
def __init__(self, prop_set=None):
self.propSet = prop_set
class FakeProp(object):
def __init__(self, name=None, val=None):
self.name = name
self.val = val
class FakeRetrieveResult(object):
def __init__(self, objects, token):
self.objects = objects
self.token = token
class FakeObj(object):
def __init__(self, obj=None):
self.obj = obj
# TODO(vbala) Split test methods handling multiple cases into multiple methods,
# each handling a specific case.
class VMwareEsxVmdkDriverTestCase(test.TestCase):
"""Test class for VMwareEsxVmdkDriver."""
IP = 'localhost'
USERNAME = 'username'
PASSWORD = 'password'
VOLUME_FOLDER = 'cinder-volumes'
API_RETRY_COUNT = 3
TASK_POLL_INTERVAL = 5.0
IMG_TX_TIMEOUT = 10
MAX_OBJECTS = 100
TMP_DIR = "/vmware-tmp"
VMDK_DRIVER = vmdk.VMwareEsxVmdkDriver
def setUp(self):
super(VMwareEsxVmdkDriverTestCase, self).setUp()
self._config = mox.MockObject(configuration.Configuration)
self._config.append_config_values(mox.IgnoreArg())
self._config.vmware_host_ip = self.IP
self._config.vmware_host_username = self.USERNAME
self._config.vmware_host_password = self.PASSWORD
self._config.vmware_wsdl_location = None
self._config.vmware_volume_folder = self.VOLUME_FOLDER
self._config.vmware_api_retry_count = self.API_RETRY_COUNT
self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL
self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT
self._config.vmware_max_objects_retrieval = self.MAX_OBJECTS
self._config.vmware_tmp_dir = self.TMP_DIR
self._db = mock.Mock()
self._driver = vmdk.VMwareEsxVmdkDriver(configuration=self._config,
db=self._db)
api_retry_count = self._config.vmware_api_retry_count,
task_poll_interval = self._config.vmware_task_poll_interval,
self._session = api.VMwareAPISession(self.IP, self.USERNAME,
self.PASSWORD, api_retry_count,
task_poll_interval,
create_session=False)
self._volumeops = volumeops.VMwareVolumeOps(self._session,
self.MAX_OBJECTS)
self._vim = FakeVim()
def test_retry(self):
"""Test Retry."""
class TestClass(object):
def __init__(self):
self.counter1 = 0
self.counter2 = 0
@api.Retry(max_retry_count=2, inc_sleep_time=0.001,
exceptions=(Exception))
def fail(self):
self.counter1 += 1
raise exception.CinderException('Fail')
@api.Retry(max_retry_count=2)
def success(self):
self.counter2 += 1
return self.counter2
test_obj = TestClass()
self.assertRaises(exception.CinderException, test_obj.fail)
self.assertEqual(test_obj.counter1, 3)
ret = test_obj.success()
self.assertEqual(1, ret)
def test_create_session(self):
"""Test create_session."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.ReplayAll()
self._session.create_session()
m.UnsetStubs()
m.VerifyAll()
def test_do_setup(self):
"""Test do_setup."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'session')
self._driver.session = self._session
m.ReplayAll()
self._driver.do_setup(mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_check_for_setup_error(self):
"""Test check_for_setup_error."""
self._driver.check_for_setup_error()
def test_get_volume_stats(self):
"""Test get_volume_stats."""
stats = self._driver.get_volume_stats()
self.assertEqual(stats['vendor_name'], 'VMware')
self.assertEqual(stats['driver_version'], self._driver.VERSION)
self.assertEqual(stats['storage_protocol'], 'LSI Logic SCSI')
self.assertEqual(stats['reserved_percentage'], 0)
self.assertEqual(stats['total_capacity_gb'], 'unknown')
self.assertEqual(stats['free_capacity_gb'], 'unknown')
def test_create_volume(self):
"""Test create_volume."""
driver = self._driver
host = mock.sentinel.host
rp = mock.sentinel.resource_pool
folder = mock.sentinel.folder
summary = mock.sentinel.summary
driver._select_ds_for_volume = mock.MagicMock()
driver._select_ds_for_volume.return_value = (host, rp, folder,
summary)
# invoke the create_volume call
volume = {'name': 'fake_volume'}
driver.create_volume(volume)
# verify calls made
driver._select_ds_for_volume.assert_called_once_with(volume)
# test create_volume call when _select_ds_for_volume fails
driver._select_ds_for_volume.side_effect = error_util.VimException('')
self.assertRaises(error_util.VimFaultException, driver.create_volume,
volume)
# Clear side effects.
driver._select_ds_for_volume.side_effect = None
def test_success_wait_for_task(self):
"""Test successful wait_for_task."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
result = FakeMor('VirtualMachine', 'my_vm')
success_task_info = FakeTaskInfo('success', result=result)
m.StubOutWithMock(vim_util, 'get_object_property')
vim_util.get_object_property(self._session.vim,
mox.IgnoreArg(),
'info').AndReturn(success_task_info)
m.ReplayAll()
ret = self._session.wait_for_task(mox.IgnoreArg())
self.assertEqual(ret.result, result)
m.UnsetStubs()
m.VerifyAll()
def test_failed_wait_for_task(self):
"""Test failed wait_for_task."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
failed_task_info = FakeTaskInfo('failed')
m.StubOutWithMock(vim_util, 'get_object_property')
vim_util.get_object_property(self._session.vim,
mox.IgnoreArg(),
'info').AndReturn(failed_task_info)
m.ReplayAll()
self.assertRaises(error_util.VimFaultException,
self._session.wait_for_task,
mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_delete_volume_without_backing(self):
"""Test delete_volume without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
self._volumeops.get_backing('hello_world').AndReturn(None)
m.ReplayAll()
volume = FakeObject()
volume['name'] = 'hello_world'
self._driver.delete_volume(volume)
m.UnsetStubs()
m.VerifyAll()
def test_delete_volume_with_backing(self):
"""Test delete_volume with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
backing = FakeMor('VirtualMachine', 'my_vm')
FakeMor('Task', 'my_task')
m.StubOutWithMock(self._volumeops, 'get_backing')
m.StubOutWithMock(self._volumeops, 'delete_backing')
self._volumeops.get_backing('hello_world').AndReturn(backing)
self._volumeops.delete_backing(backing)
m.ReplayAll()
volume = FakeObject()
volume['name'] = 'hello_world'
self._driver.delete_volume(volume)
m.UnsetStubs()
m.VerifyAll()
def test_create_export(self):
"""Test create_export."""
self._driver.create_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_ensure_export(self):
"""Test ensure_export."""
self._driver.ensure_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_remove_export(self):
"""Test remove_export."""
self._driver.remove_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_terminate_connection(self):
"""Test terminate_connection."""
self._driver.terminate_connection(mox.IgnoreArg(), mox.IgnoreArg(),
force=mox.IgnoreArg())
def test_create_backing_in_inventory_multi_hosts(self):
"""Test _create_backing_in_inventory scanning multiple hosts."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
host1 = FakeObj(obj=FakeMor('HostSystem', 'my_host1'))
host2 = FakeObj(obj=FakeMor('HostSystem', 'my_host2'))
retrieve_result = FakeRetrieveResult([host1, host2], None)
m.StubOutWithMock(self._volumeops, 'get_hosts')
self._volumeops.get_hosts().AndReturn(retrieve_result)
m.StubOutWithMock(self._driver, '_create_backing')
volume = FakeObject()
volume['name'] = 'vol_name'
backing = FakeMor('VirtualMachine', 'my_back')
mux = self._driver._create_backing(volume, host1.obj, {})
mux.AndRaise(error_util.VimException('Maintenance mode'))
mux = self._driver._create_backing(volume, host2.obj, {})
mux.AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'cancel_retrieval')
self._volumeops.cancel_retrieval(retrieve_result)
m.StubOutWithMock(self._volumeops, 'continue_retrieval')
m.ReplayAll()
result = self._driver._create_backing_in_inventory(volume)
self.assertEqual(result, backing)
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_with_instance_and_backing(self):
"""Test initialize_connection with instance and backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
connector = {'instance': 'my_instance'}
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_get_volume_group_folder(self):
"""Test _get_volume_group_folder."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datacenter = FakeMor('Datacenter', 'my_dc')
m.StubOutWithMock(self._volumeops, 'get_vmfolder')
self._volumeops.get_vmfolder(datacenter)
m.ReplayAll()
self._driver._get_volume_group_folder(datacenter)
m.UnsetStubs()
m.VerifyAll()
def test_select_datastore_summary(self):
"""Test _select_datastore_summary."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datastore1 = FakeMor('Datastore', 'my_ds_1')
datastore2 = FakeMor('Datastore', 'my_ds_2')
datastore3 = FakeMor('Datastore', 'my_ds_3')
datastore4 = FakeMor('Datastore', 'my_ds_4')
datastores = [datastore1, datastore2, datastore3, datastore4]
m.StubOutWithMock(self._volumeops, 'get_summary')
summary1 = FakeDatastoreSummary(5, 100)
summary2 = FakeDatastoreSummary(25, 100)
summary3 = FakeDatastoreSummary(50, 100)
summary4 = FakeDatastoreSummary(75, 100)
self._volumeops.get_summary(
datastore1).MultipleTimes().AndReturn(summary1)
self._volumeops.get_summary(
datastore2).MultipleTimes().AndReturn(summary2)
self._volumeops.get_summary(
datastore3).MultipleTimes().AndReturn(summary3)
self._volumeops.get_summary(
datastore4).MultipleTimes().AndReturn(summary4)
m.StubOutWithMock(self._volumeops, 'get_connected_hosts')
host1 = FakeMor('HostSystem', 'my_host_1')
host2 = FakeMor('HostSystem', 'my_host_2')
host3 = FakeMor('HostSystem', 'my_host_3')
host4 = FakeMor('HostSystem', 'my_host_4')
self._volumeops.get_connected_hosts(
datastore1).MultipleTimes().AndReturn([host1, host2, host3, host4])
self._volumeops.get_connected_hosts(
datastore2).MultipleTimes().AndReturn([host1, host2, host3])
self._volumeops.get_connected_hosts(
datastore3).MultipleTimes().AndReturn([host1, host2])
self._volumeops.get_connected_hosts(
datastore4).MultipleTimes().AndReturn([host1, host2])
m.ReplayAll()
summary = self._driver._select_datastore_summary(1, datastores)
self.assertEqual(summary, summary1)
summary = self._driver._select_datastore_summary(10, datastores)
self.assertEqual(summary, summary2)
summary = self._driver._select_datastore_summary(40, datastores)
self.assertEqual(summary, summary4)
self.assertRaises(error_util.VimException,
self._driver._select_datastore_summary,
100, datastores)
m.UnsetStubs()
m.VerifyAll()
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'session', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_get_folder_ds_summary(self, volumeops, session):
"""Test _get_folder_ds_summary."""
volumeops = volumeops.return_value
driver = self._driver
volume = {'size': 10, 'volume_type_id': 'fake_type'}
rp = mock.sentinel.resource_pool
dss = mock.sentinel.datastores
# patch method calls from _get_folder_ds_summary
volumeops.get_dc.return_value = mock.sentinel.dc
volumeops.get_vmfolder.return_value = mock.sentinel.folder
driver._get_storage_profile = mock.MagicMock()
driver._select_datastore_summary = mock.MagicMock()
driver._select_datastore_summary.return_value = mock.sentinel.summary
# call _get_folder_ds_summary
(folder, datastore_summary) = driver._get_folder_ds_summary(volume,
rp, dss)
# verify returned values and calls made
self.assertEqual(mock.sentinel.folder, folder,
"Folder returned is wrong.")
self.assertEqual(mock.sentinel.summary, datastore_summary,
"Datastore summary returned is wrong.")
volumeops.get_dc.assert_called_once_with(rp)
volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc)
driver._get_storage_profile.assert_called_once_with(volume)
size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size, dss)
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
def test_get_disk_type(self, get_volume_type_extra_specs):
"""Test _get_disk_type."""
# Test with no volume type.
volume = {'volume_type_id': None}
self.assertEqual(vmdk.THIN_VMDK_TYPE,
vmdk.VMwareEsxVmdkDriver._get_disk_type(volume))
# Test with valid vmdk_type.
volume_type_id = mock.sentinel.volume_type_id
volume = {'volume_type_id': volume_type_id}
get_volume_type_extra_specs.return_value = vmdk.THICK_VMDK_TYPE
self.assertEqual(vmdk.THICK_VMDK_TYPE,
vmdk.VMwareEsxVmdkDriver._get_disk_type(volume))
get_volume_type_extra_specs.assert_called_once_with(volume_type_id,
'vmware:vmdk_type')
# Test with invalid vmdk_type.
get_volume_type_extra_specs.return_value = 'sparse'
self.assertRaises(error_util.InvalidDiskTypeException,
vmdk.VMwareEsxVmdkDriver._get_disk_type,
volume)
def test_init_conn_with_instance_no_backing(self):
"""Test initialize_connection with instance and without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
volume['volume_type_id'] = None
connector = {'instance': 'my_instance'}
self._volumeops.get_backing(volume['name'])
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
resource_pool = FakeMor('ResourcePool', 'my_rp')
datastores = [FakeMor('Datastore', 'my_ds')]
self._volumeops.get_dss_rp(host).AndReturn((datastores, resource_pool))
m.StubOutWithMock(self._driver, '_get_folder_ds_summary')
folder = FakeMor('Folder', 'my_fol')
summary = FakeDatastoreSummary(1, 1)
self._driver._get_folder_ds_summary(volume, resource_pool,
datastores).AndReturn((folder,
summary))
backing = FakeMor('VirtualMachine', 'my_back')
m.StubOutWithMock(self._volumeops, 'create_backing')
self._volumeops.create_backing(volume['name'],
volume['size'] * units.Mi,
mox.IgnoreArg(), folder,
resource_pool, host,
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(backing)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_without_instance(self):
"""Test initialize_connection without instance and a backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
backing = FakeMor('VirtualMachine', 'my_back')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
connector = {}
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_without_backing(self):
"""Test vmdk.create_snapshot without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
self._volumeops.get_backing(snapshot['volume_name'])
m.ReplayAll()
self._driver.create_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_with_backing(self):
"""Test vmdk.create_snapshot with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snapshot_name'
snapshot['display_description'] = 'snapshot_desc'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'create_snapshot')
self._volumeops.create_snapshot(backing, snapshot['name'],
snapshot['display_description'])
m.ReplayAll()
self._driver.create_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_when_attached(self):
"""Test vmdk.create_snapshot when volume is attached."""
snapshot = FakeObject()
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'in-use'
self.assertRaises(exception.InvalidVolume,
self._driver.create_snapshot, snapshot)
def test_delete_snapshot_without_backing(self):
"""Test delete_snapshot without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
self._volumeops.get_backing(snapshot['volume_name'])
m.ReplayAll()
self._driver.delete_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_with_backing(self):
"""Test delete_snapshot with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['name'] = 'snapshot_name'
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'delete_snapshot')
self._volumeops.delete_snapshot(backing,
snapshot['name'])
m.ReplayAll()
self._driver.delete_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_when_attached(self):
"""Test delete_snapshot when volume is attached."""
snapshot = FakeObject()
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'in-use'
self.assertRaises(exception.InvalidVolume,
self._driver.delete_snapshot, snapshot)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_without_backing(self, mock_vops):
"""Test create_cloned_volume without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_with_backing(self, mock_vops):
"""Test create_cloned_volume with a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = mock.sentinel.volume
fake_size = 1
src_vref = {'name': 'src_snapshot_name', 'size': fake_size}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
src_vmdk = "[datastore] src_vm/src_vm.vmdk"
mock_vops.get_vmdk_path.return_value = src_vmdk
driver._create_backing_by_copying = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
mock_vops.get_vmdk_path.assert_called_once_with(backing)
driver._create_backing_by_copying.assert_called_once_with(volume,
src_vmdk,
fake_size)
@mock.patch.object(VMDK_DRIVER, '_extend_volumeops_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_by_copying(self, volumeops, create_backing,
_extend_virtual_disk):
self._test_create_backing_by_copying(volumeops, create_backing,
_extend_virtual_disk)
def _test_create_backing_by_copying(self, volumeops, create_backing,
_extend_virtual_disk):
"""Test _create_backing_by_copying."""
fake_volume = {'size': 2, 'name': 'fake_volume-0000000000001'}
fake_size = 1
fake_src_vmdk_path = "[datastore] src_vm/src_vm.vmdk"
fake_backing = mock.sentinel.backing
fake_vmdk_path = mock.sentinel.path
#"[datastore] dest_vm/dest_vm.vmdk"
fake_dc = mock.sentinel.datacenter
create_backing.return_value = fake_backing
volumeops.get_vmdk_path.return_value = fake_vmdk_path
volumeops.get_dc.return_value = fake_dc
# Test with fake_volume['size'] greater than fake_size
self._driver._create_backing_by_copying(fake_volume,
fake_src_vmdk_path,
fake_size)
create_backing.assert_called_once_with(fake_volume)
volumeops.get_vmdk_path.assert_called_once_with(fake_backing)
volumeops.get_dc.assert_called_once_with(fake_backing)
volumeops.delete_vmdk_file.assert_called_once_with(fake_vmdk_path,
fake_dc)
volumeops.copy_vmdk_file.assert_called_once_with(fake_dc,
fake_src_vmdk_path,
fake_vmdk_path)
_extend_virtual_disk.assert_called_once_with(fake_volume['size'],
fake_vmdk_path,
fake_dc)
# Reset all the mocks and test with fake_volume['size']
# not greater than fake_size
_extend_virtual_disk.reset_mock()
fake_size = 2
self._driver._create_backing_by_copying(fake_volume,
fake_src_vmdk_path,
fake_size)
self.assertFalse(_extend_virtual_disk.called)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot_without_backing(self, mock_vops):
"""Test create_volume_from_snapshot without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snap_without_backing_snap(self, mock_vops):
"""Test create_volume_from_snapshot without a backing snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot(self, mock_vops):
"""Test create_volume_from_snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap',
'volume_size': 1}
fake_size = snapshot['volume_size']
backing = mock.sentinel.backing
snap_moref = mock.sentinel.snap_moref
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = snap_moref
src_vmdk = "[datastore] src_vm/src_vm-001.vmdk"
mock_vops.get_vmdk_path.return_value = src_vmdk
driver._create_backing_by_copying = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
mock_vops.get_vmdk_path.assert_called_once_with(snap_moref)
driver._create_backing_by_copying.assert_called_once_with(volume,
src_vmdk,
fake_size)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_volume(self, volume_ops, _extend_virtual_disk,
_select_ds_for_volume):
"""Test extend_volume."""
self._test_extend_volume(volume_ops, _extend_virtual_disk,
_select_ds_for_volume)
def _test_extend_volume(self, volume_ops, _extend_virtual_disk,
_select_ds_for_volume):
fake_name = u'volume-00000001'
new_size = '21'
fake_size = '20'
fake_vol = {'project_id': 'testprjid', 'name': fake_name,
'size': fake_size,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
fake_host = mock.sentinel.host
fake_rp = mock.sentinel.rp
fake_folder = mock.sentinel.folder
fake_summary = mock.Mock(spec=object)
fake_summary.datastore = mock.sentinel.datastore
fake_summary.name = 'fake_name'
fake_backing = mock.sentinel.backing
volume_ops.get_backing.return_value = fake_backing
# If there is enough space in the datastore, where the volume is
# located, then the rest of this method will not be called.
self._driver.extend_volume(fake_vol, new_size)
_extend_virtual_disk.assert_called_with(fake_name, new_size)
self.assertFalse(_select_ds_for_volume.called)
self.assertFalse(volume_ops.get_backing.called)
self.assertFalse(volume_ops.relocate_backing.called)
self.assertFalse(volume_ops.move_backing_to_folder.called)
# If there is not enough space in the datastore, where the volume is
# located, then the rest of this method will be called. The first time
# _extend_virtual_disk is called, VimFaultException is raised. The
# second time it is called, there is no exception.
_extend_virtual_disk.reset_mock()
_extend_virtual_disk.side_effect = [error_util.
VimFaultException(mock.Mock(),
'Error'), None]
# When _select_ds_for_volume raises no exception.
_select_ds_for_volume.return_value = (fake_host, fake_rp,
fake_folder, fake_summary)
self._driver.extend_volume(fake_vol, new_size)
_select_ds_for_volume.assert_called_with(new_size)
volume_ops.get_backing.assert_called_with(fake_name)
volume_ops.relocate_backing.assert_called_with(fake_backing,
fake_summary.datastore,
fake_rp,
fake_host)
_extend_virtual_disk.assert_called_with(fake_name, new_size)
volume_ops.move_backing_to_folder.assert_called_with(fake_backing,
fake_folder)
# If get_backing raises error_util.VimException,
# this exception will be caught for volume extend.
_extend_virtual_disk.reset_mock()
_extend_virtual_disk.side_effect = [error_util.
VimFaultException(mock.Mock(),
'Error'), None]
volume_ops.get_backing.side_effect = error_util.VimException('Error')
self.assertRaises(error_util.VimException, self._driver.extend_volume,
fake_vol, new_size)
# If _select_ds_for_volume raised an exception, the rest code will
# not be called.
_extend_virtual_disk.reset_mock()
volume_ops.get_backing.reset_mock()
volume_ops.relocate_backing.reset_mock()
volume_ops.move_backing_to_folder.reset_mock()
_extend_virtual_disk.side_effect = [error_util.
VimFaultException(mock.Mock(),
'Error'), None]
_select_ds_for_volume.side_effect = error_util.VimException('Error')
self.assertRaises(error_util.VimException, self._driver.extend_volume,
fake_vol, new_size)
_extend_virtual_disk.assert_called_once_with(fake_name, new_size)
self.assertFalse(volume_ops.get_backing.called)
self.assertFalse(volume_ops.relocate_backing.called)
self.assertFalse(volume_ops.move_backing_to_folder.called)
def test_copy_image_to_volume_non_vmdk(self):
"""Test copy_image_to_volume for a non-vmdk disk format."""
fake_context = mock.sentinel.context
fake_image_id = 'image-123456789'
fake_image_meta = {'disk_format': 'novmdk'}
image_service = mock.Mock()
image_service.show.return_value = fake_image_meta
fake_volume = {'name': 'fake_name', 'size': 1}
self.assertRaises(exception.ImageUnacceptable,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER,
'_create_virtual_disk_from_preallocated_image')
@mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_sparse_image')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
def test_copy_image_to_volume_non_stream_optimized(
self, create_backing, get_ds_name_folder_path, get_disk_type,
create_disk_from_sparse_image, create_disk_from_preallocated_image,
vops, select_ds_for_volume, generate_uuid, extend_disk):
self._test_copy_image_to_volume_non_stream_optimized(
create_backing,
get_ds_name_folder_path,
get_disk_type,
create_disk_from_sparse_image,
create_disk_from_preallocated_image,
vops,
select_ds_for_volume,
generate_uuid,
extend_disk)
def _test_copy_image_to_volume_non_stream_optimized(
self, create_backing, get_ds_name_folder_path, get_disk_type,
create_disk_from_sparse_image, create_disk_from_preallocated_image,
vops, select_ds_for_volume, generate_uuid, extend_disk):
image_size_in_bytes = 2 * units.Gi
adapter_type = 'lsiLogic'
image_meta = {'disk_format': 'vmdk',
'size': image_size_in_bytes,
'properties': {'vmware_disktype': 'sparse',
'vmwware_adaptertype': adapter_type}}
image_service = mock.Mock(glance.GlanceImageService)
image_service.show.return_value = image_meta
backing = mock.Mock()
def create_backing_mock(volume, create_params):
self.assertTrue(create_params[vmdk.CREATE_PARAM_DISK_LESS])
return backing
create_backing.side_effect = create_backing_mock
ds_name = mock.Mock()
folder_path = mock.Mock()
get_ds_name_folder_path.return_value = (ds_name, folder_path)
summary = mock.Mock()
select_ds_for_volume.return_value = (mock.sentinel.host,
mock.sentinel.rp,
mock.sentinel.folder,
summary)
uuid = "6b77b25a-9136-470e-899e-3c930e570d8e"
generate_uuid.return_value = uuid
host = mock.Mock()
dc_ref = mock.Mock()
vops.get_host.return_value = host
vops.get_dc.return_value = dc_ref
disk_type = vmdk.EAGER_ZEROED_THICK_VMDK_TYPE
get_disk_type.return_value = disk_type
path = mock.Mock()
create_disk_from_sparse_image.return_value = path
create_disk_from_preallocated_image.return_value = path
volume_size = 2
vops.get_disk_size.return_value = volume_size * units.Gi
context = mock.Mock()
volume = {'name': 'volume_name',
'id': 'volume_id',
'size': volume_size}
image_id = mock.Mock()
self._driver.copy_image_to_volume(
context, volume, image_service, image_id)
create_params = {vmdk.CREATE_PARAM_DISK_LESS: True,
vmdk.CREATE_PARAM_BACKING_NAME: uuid}
create_backing.assert_called_once_with(volume, create_params)
create_disk_from_sparse_image.assert_called_once_with(
context, image_service, image_id, image_size_in_bytes,
dc_ref, ds_name, folder_path, uuid)
vops.attach_disk_to_backing.assert_called_once_with(
backing, image_size_in_bytes / units.Ki, disk_type,
adapter_type, path.get_descriptor_ds_file_path())
select_ds_for_volume.assert_called_once_with(volume)
vops.clone_backing.assert_called_once_with(
volume['name'], backing, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type, mock.sentinel.host)
vops.delete_backing.assert_called_once_with(backing)
self.assertFalse(extend_disk.called)
vops.get_disk_size.return_value = 1 * units.Gi
create_backing.reset_mock()
vops.attach_disk_to_backing.reset_mock()
vops.delete_backing.reset_mock()
image_meta['properties']['vmware_disktype'] = 'preallocated'
self._driver.copy_image_to_volume(
context, volume, image_service, image_id)
del create_params[vmdk.CREATE_PARAM_BACKING_NAME]
create_backing.assert_called_once_with(volume, create_params)
create_disk_from_preallocated_image.assert_called_once_with(
context, image_service, image_id, image_size_in_bytes,
dc_ref, ds_name, folder_path, volume['name'], adapter_type)
vops.attach_disk_to_backing.assert_called_once_with(
backing, image_size_in_bytes / units.Ki, disk_type,
adapter_type, path.get_descriptor_ds_file_path())
extend_disk.assert_called_once_with(volume['name'], volume['size'])
extend_disk.reset_mock()
create_disk_from_preallocated_image.side_effect = (
error_util.VimException("Error"))
self.assertRaises(error_util.VimException,
self._driver.copy_image_to_volume,
context, volume, image_service, image_id)
vops.delete_backing.assert_called_once_with(backing)
self.assertFalse(extend_disk.called)
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image(
self, vops, copy_image, flat_extent_path):
self._test_create_virtual_disk_from_preallocated_image(
vops, copy_image, flat_extent_path)
def _test_create_virtual_disk_from_preallocated_image(
self, vops, copy_image, flat_extent_path):
context = mock.Mock()
image_service = mock.Mock()
image_id = mock.Mock()
image_size_in_bytes = 2 * units.Gi
dc_ref = mock.Mock()
ds_name = "nfs"
folder_path = "A/B/"
disk_name = "disk-1"
adapter_type = "ide"
src_path = mock.Mock()
flat_extent_path.return_value = src_path
ret = self._driver._create_virtual_disk_from_preallocated_image(
context, image_service, image_id, image_size_in_bytes, dc_ref,
ds_name, folder_path, disk_name, adapter_type)
create_descriptor = vops.create_flat_extent_virtual_disk_descriptor
create_descriptor.assert_called_once_with(
dc_ref, src_path, image_size_in_bytes / units.Ki, adapter_type,
vmdk.EAGER_ZEROED_THICK_VMDK_TYPE)
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, src_path.get_flat_extent_file_path())
self.assertEqual(src_path, ret)
create_descriptor.reset_mock()
copy_image.reset_mock()
copy_image.side_effect = error_util.VimException("error")
self.assertRaises(
error_util.VimException,
self._driver._create_virtual_disk_from_preallocated_image,
context, image_service, image_id, image_size_in_bytes, dc_ref,
ds_name, folder_path, disk_name, adapter_type)
vops.delete_file.assert_called_once_with(
src_path.get_descriptor_ds_file_path(), dc_ref)
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.'
'MonolithicSparseVirtualDiskPath')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
def test_create_virtual_disk_from_sparse_image(
self, copy_image, copy_temp_virtual_disk, flat_extent_path,
sparse_path):
self._test_create_virtual_disk_from_sparse_image(
copy_image, copy_temp_virtual_disk, flat_extent_path, sparse_path)
def _test_create_virtual_disk_from_sparse_image(
self, copy_image, copy_temp_virtual_disk, flat_extent_path,
sparse_path):
context = mock.Mock()
image_service = mock.Mock()
image_id = mock.Mock()
image_size_in_bytes = 2 * units.Gi
dc_ref = mock.Mock()
ds_name = "nfs"
folder_path = "A/B/"
disk_name = "disk-1"
src_path = mock.Mock()
sparse_path.return_value = src_path
dest_path = mock.Mock()
flat_extent_path.return_value = dest_path
ret = self._driver._create_virtual_disk_from_sparse_image(
context, image_service, image_id, image_size_in_bytes, dc_ref,
ds_name, folder_path, disk_name)
copy_image.assert_called_once_with(
context, dc_ref, image_service, image_id, image_size_in_bytes,
ds_name, src_path.get_descriptor_file_path())
copy_temp_virtual_disk.assert_called_once_with(
dc_ref, src_path, dest_path)
self.assertEqual(dest_path, ret)
@mock.patch.object(vmware_images, 'fetch_stream_optimized_image')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_copy_image_to_volume_stream_optimized(self,
volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image):
"""Test copy_image_to_volume.
Test with an acceptable vmdk disk format and streamOptimized disk type.
"""
self._test_copy_image_to_volume_stream_optimized(volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image)
def _test_copy_image_to_volume_stream_optimized(self, volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image):
fake_context = mock.Mock()
fake_backing = mock.sentinel.backing
fake_image_id = 'image-id'
size = 5 * units.Gi
size_gb = float(size) / units.Gi
fake_volume_size = 1 + size_gb
adapter_type = 'ide'
fake_image_meta = {'disk_format': 'vmdk', 'size': size,
'properties': {'vmware_disktype': 'streamOptimized',
'vmware_adaptertype': adapter_type}}
image_service = mock.Mock(glance.GlanceImageService)
fake_host = mock.sentinel.host
fake_rp = mock.sentinel.rp
fake_folder = mock.sentinel.folder
fake_summary = mock.sentinel.summary
fake_summary.name = "datastore-1"
fake_vm_create_spec = mock.sentinel.spec
fake_disk_type = 'thin'
vol_name = 'fake_volume name'
vol_id = '12345'
fake_volume = {'name': vol_name,
'id': vol_id,
'size': fake_volume_size,
'volume_type_id': None}
cf = session.vim.client.factory
vm_import_spec = cf.create('ns0:VirtualMachineImportSpec')
vm_import_spec.configSpec = fake_vm_create_spec
timeout = self._config.vmware_image_transfer_timeout_secs
image_service.show.return_value = fake_image_meta
volumeops.get_create_spec.return_value = fake_vm_create_spec
volumeops.get_backing.return_value = fake_backing
# If _select_ds_for_volume raises an exception, get_create_spec
# will not be called.
_select_ds_for_volume.side_effect = error_util.VimException('Error')
self.assertRaises(exception.VolumeBackendAPIException,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
self.assertFalse(volumeops.get_create_spec.called)
# If the volume size is greater then than the backing's disk size,
# _extend_vmdk_virtual_disk will be called.
_select_ds_for_volume.side_effect = None
_select_ds_for_volume.return_value = (fake_host, fake_rp,
fake_folder, fake_summary)
profile_id = 'profile-1'
get_profile_id.return_value = profile_id
volumeops.get_disk_size.return_value = size
self._driver.copy_image_to_volume(fake_context, fake_volume,
image_service, fake_image_id)
image_service.show.assert_called_with(fake_context, fake_image_id)
_select_ds_for_volume.assert_called_with(fake_volume)
get_profile_id.assert_called_once_with(fake_volume)
volumeops.get_create_spec.assert_called_with(fake_volume['name'],
0,
fake_disk_type,
fake_summary.name,
profile_id,
adapter_type)
self.assertTrue(fetch_optimized_image.called)
fetch_optimized_image.assert_called_with(fake_context, timeout,
image_service,
fake_image_id,
session=session,
host=self.IP,
resource_pool=fake_rp,
vm_folder=fake_folder,
vm_create_spec=
vm_import_spec,
image_size=size)
_extend_virtual_disk.assert_called_once_with(fake_volume['name'],
fake_volume_size)
# If the volume size is not greater then than backing's disk size,
# _extend_vmdk_virtual_disk will not be called.
volumeops.get_disk_size.return_value = fake_volume_size * units.Gi
_extend_virtual_disk.reset_mock()
self._driver.copy_image_to_volume(fake_context, fake_volume,
image_service, fake_image_id)
self.assertFalse(_extend_virtual_disk.called)
# If fetch_stream_optimized_image raises an exception,
# get_backing and delete_backing will be called.
fetch_optimized_image.side_effect = exception.CinderException
self.assertRaises(exception.CinderException,
self._driver.copy_image_to_volume,
fake_context, fake_volume,
image_service, fake_image_id)
volumeops.get_backing.assert_called_with(fake_volume['name'])
volumeops.delete_backing.assert_called_with(fake_backing)
self.assertFalse(_extend_virtual_disk.called)
def test_copy_volume_to_image_non_vmdk(self):
"""Test copy_volume_to_image for a non-vmdk disk format."""
m = self.mox
image_meta = FakeObject()
image_meta['disk_format'] = 'novmdk'
volume = FakeObject()
volume['name'] = 'vol-name'
volume['instance_uuid'] = None
volume['attached_host'] = None
m.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
self._driver.copy_volume_to_image,
mox.IgnoreArg(), volume,
mox.IgnoreArg(), image_meta)
m.UnsetStubs()
m.VerifyAll()
def test_copy_volume_to_image_when_attached(self):
"""Test copy_volume_to_image when volume is attached."""
m = self.mox
volume = FakeObject()
volume['instance_uuid'] = 'my_uuid'
m.ReplayAll()
self.assertRaises(exception.InvalidVolume,
self._driver.copy_volume_to_image,
mox.IgnoreArg(), volume,
mox.IgnoreArg(), mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_copy_volume_to_image_vmdk(self):
"""Test copy_volume_to_image for a valid vmdk disk format."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'session')
self._driver.session = self._session
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
image_id = 'image-id-1'
image_meta = FakeObject()
image_meta['disk_format'] = 'vmdk'
image_meta['id'] = image_id
image_meta['name'] = image_id
image_service = FakeObject()
vol_name = 'volume-123456789'
project_id = 'project-owner-id-123'
volume = FakeObject()
volume['name'] = vol_name
size_gb = 5
size = size_gb * units.Gi
volume['size'] = size_gb
volume['project_id'] = project_id
volume['instance_uuid'] = None
volume['attached_host'] = None
# volumeops.get_backing
backing = FakeMor("VirtualMachine", "my_vm")
m.StubOutWithMock(self._volumeops, 'get_backing')
self._volumeops.get_backing(vol_name).AndReturn(backing)
# volumeops.get_vmdk_path
datastore_name = 'datastore1'
file_path = 'my_folder/my_nested_folder/my_vm.vmdk'
vmdk_file_path = '[%s] %s' % (datastore_name, file_path)
m.StubOutWithMock(self._volumeops, 'get_vmdk_path')
self._volumeops.get_vmdk_path(backing).AndReturn(vmdk_file_path)
# vmware_images.upload_image
timeout = self._config.vmware_image_transfer_timeout_secs
host_ip = self.IP
m.StubOutWithMock(vmware_images, 'upload_image')
vmware_images.upload_image(mox.IgnoreArg(), timeout, image_service,
image_id, project_id, session=self._session,
host=host_ip, vm=backing,
vmdk_file_path=vmdk_file_path,
vmdk_size=size,
image_name=image_id,
image_version=1)
m.ReplayAll()
self._driver.copy_volume_to_image(mox.IgnoreArg(), volume,
image_service, image_meta)
m.UnsetStubs()
m.VerifyAll()
def test_retrieve_properties_ex_fault_checker(self):
"""Test retrieve_properties_ex_fault_checker is called."""
m = self.mox
class FakeVim(vim.Vim):
def __init__(self):
pass
@property
def client(self):
class FakeRetrv(object):
def RetrievePropertiesEx(self, collector):
pass
def __getattr__(self, name):
if name == 'service':
return FakeRetrv()
return FakeRetrv()
def RetrieveServiceContent(self, type='ServiceInstance'):
return mox.MockAnything()
_vim = FakeVim()
m.ReplayAll()
# retrieve_properties_ex_fault_checker throws authentication error
self.assertRaises(error_util.VimFaultException,
_vim.RetrievePropertiesEx, mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing):
self._test_retype(ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing)
def _test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, genereate_uuid,
delete_temp_backing):
self._driver._storage_policy_enabled = True
context = mock.sentinel.context
diff = mock.sentinel.diff
host = mock.sentinel.host
new_type = {'id': 'abc'}
# Test with in-use volume.
vol = {'size': 1, 'status': 'retyping', 'name': 'vol-1',
'volume_type_id': 'def', 'instance_uuid': '583a8dbb'}
self.assertFalse(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no backing.
vops.get_backing.return_value = None
vol['instance_uuid'] = None
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no disk type conversion, no profile change and
# compliant datastore.
ds_value = mock.sentinel.datastore_value
datastore = mock.Mock(value=ds_value)
vops.get_datastore.return_value = datastore
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
get_volume_type_extra_specs.side_effect = [vmdk.THIN_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
None,
None]
ds_sel.is_datastore_compliant.return_value = True
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no disk type conversion, profile change and
# compliant datastore.
new_profile = mock.sentinel.new_profile
get_volume_type_extra_specs.side_effect = [vmdk.THIN_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
ds_sel.is_datastore_compliant.return_value = True
profile_id = mock.sentinel.profile_id
ds_sel.get_profile_id.return_value = profile_id
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.change_backing_profile.assert_called_once_with(backing,
profile_id)
# Test with disk type conversion, profile change and a backing with
# snapshots. Also test the no candidate datastore case.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = True
ds_sel.select_datastore.return_value = ()
self.assertFalse(self._driver.retype(context, vol, new_type, diff,
host))
exp_req = {hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: [ds_value],
hub.DatastoreSelector.PROFILE_NAME: new_profile,
hub.DatastoreSelector.SIZE_BYTES: units.Gi}
ds_sel.select_datastore.assert_called_once_with(exp_req)
# Modify the previous case with a candidate datastore which is
# different than the backing's current datastore.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = True
host = mock.sentinel.host
rp = mock.sentinel.rp
candidate_ds = mock.Mock(value=mock.sentinel.candidate_ds_value)
summary = mock.Mock(datastore=candidate_ds)
ds_sel.select_datastore.return_value = (host, rp, summary)
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.relocate_backing.assert_called_once_with(
backing, candidate_ds, rp, host, vmdk.THIN_VMDK_TYPE)
vops.move_backing_to_folder.assert_called_once_with(backing, folder)
vops.change_backing_profile.assert_called_once_with(backing,
profile_id)
# Modify the previous case with no profile change.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
'gold-1']
ds_sel.select_datastore.reset_mock()
vops.relocate_backing.reset_mock()
vops.move_backing_to_folder.reset_mock()
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
exp_req = {hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: [ds_value],
hub.DatastoreSelector.PROFILE_NAME: 'gold-1',
hub.DatastoreSelector.SIZE_BYTES: units.Gi}
ds_sel.select_datastore.assert_called_once_with(exp_req)
vops.relocate_backing.assert_called_once_with(
backing, candidate_ds, rp, host, vmdk.THIN_VMDK_TYPE)
vops.move_backing_to_folder.assert_called_once_with(backing, folder)
self.assertFalse(vops.change_backing_profile.called)
# Test with disk type conversion, profile change, backing with
# no snapshots and candidate datastore which is same as the backing
# datastore.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = False
summary.datastore = datastore
uuid = '025b654b-d4ed-47f9-8014-b71a7744eafc'
genereate_uuid.return_value = uuid
clone = mock.sentinel.clone
vops.clone_backing.return_value = clone
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.rename_backing.assert_called_once_with(backing, uuid)
vops.clone_backing.assert_called_once_with(
vol['name'], backing, None, volumeops.FULL_CLONE_TYPE,
datastore, vmdk.THIN_VMDK_TYPE, host)
delete_temp_backing.assert_called_once_with(backing)
vops.change_backing_profile.assert_called_once_with(clone,
profile_id)
# Modify the previous case with exception during clone.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.clone_backing.side_effect = error_util.VimException('error')
vops.rename_backing.reset_mock()
vops.change_backing_profile.reset_mock()
self.assertRaises(
error_util.VimException, self._driver.retype, context, vol,
new_type, diff, host)
exp_rename_calls = [mock.call(backing, uuid),
mock.call(backing, vol['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
self.assertFalse(vops.change_backing_profile.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_vmdk_virtual_disk(self, volume_ops):
"""Test vmdk._extend_vmdk_virtual_disk."""
self._test_extend_vmdk_virtual_disk(volume_ops)
def _test_extend_vmdk_virtual_disk(self, volume_ops):
fake_backing = mock.sentinel.backing
fake_vmdk_path = "[datastore] dest_vm/dest_vm.vmdk"
fake_dc = mock.sentinel.datacenter
fake_name = 'fake_name'
fake_size = 7
# If the backing is None, get_vmdk_path and get_dc
# will not be called
volume_ops.get_backing.return_value = None
volume_ops.get_vmdk_path.return_value = fake_vmdk_path
volume_ops.get_dc.return_value = fake_dc
self._driver._extend_vmdk_virtual_disk(fake_name, fake_size)
volume_ops.get_backing.assert_called_once_with(fake_name)
self.assertFalse(volume_ops.get_vmdk_path.called)
self.assertFalse(volume_ops.get_dc.called)
self.assertFalse(volume_ops.extend_virtual_disk.called)
# Reset the mock and set the backing with a fake,
# all the mocks should be called.
volume_ops.get_backing.reset_mock()
volume_ops.get_backing.return_value = fake_backing
self._driver._extend_vmdk_virtual_disk(fake_name, fake_size)
volume_ops.get_vmdk_path.assert_called_once_with(fake_backing)
volume_ops.get_dc.assert_called_once_with(fake_backing)
volume_ops.extend_virtual_disk.assert_called_once_with(fake_size,
fake_vmdk_path,
fake_dc)
# Test the exceptional case for extend_virtual_disk
volume_ops.extend_virtual_disk.side_effect = error_util.VimException(
'VimException raised.')
self.assertRaises(error_util.VimException,
self._driver._extend_vmdk_virtual_disk,
fake_name, fake_size)
@mock.patch.object(vmware_images, 'download_stream_optimized_disk')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, download_disk):
self._test_backup_volume(session, vops, create_backing, generate_uuid,
temporary_file, file_open, download_disk)
def _test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, download_disk):
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
self._db.volume_get.return_value = volume
vops.get_backing.return_value = None
backing = mock.sentinel.backing
create_backing.return_value = backing
uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = uuid
tmp_file_path = mock.sentinel.tmp_file_path
temporary_file_ret = mock.Mock()
temporary_file.return_value = temporary_file_ret
temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path)
temporary_file_ret.__exit__ = mock.Mock(return_value=None)
vmdk_path = mock.sentinel.vmdk_path
vops.get_vmdk_path.return_value = vmdk_path
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
context = mock.sentinel.context
backup = {'id': 2, 'volume_id': 1}
backup_service = mock.Mock()
self._driver.backup_volume(context, backup, backup_service)
create_backing.assert_called_once_with(volume)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
self.assertEqual(mock.call(tmp_file_path, "wb"),
file_open.call_args_list[0])
download_disk.assert_called_once_with(
context, self.IMG_TX_TIMEOUT, tmp_file, session=session,
host=self.IP, vm=backing, vmdk_file_path=vmdk_path,
vmdk_size=volume['size'] * units.Gi)
self.assertEqual(mock.call(tmp_file_path, "rb"),
file_open.call_args_list[1])
backup_service.backup.assert_called_once_with(backup, tmp_file)
@mock.patch.object(VMDK_DRIVER, 'extend_volume')
@mock.patch.object(VMDK_DRIVER, '_restore_backing')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_restore_backup(self, vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume):
self._test_restore_backup(vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume)
def _test_restore_backup(
self, vops, generate_uuid, temporary_file, file_open,
restore_backing, extend_volume):
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
backup = {'id': 2, 'size': 1}
context = mock.sentinel.context
backup_service = mock.Mock()
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
vops.snapshot_exists.return_value = True
self.assertRaises(
exception.InvalidVolume, self._driver.restore_backup, context,
backup, volume, backup_service)
uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = uuid
tmp_file_path = mock.sentinel.tmp_file_path
temporary_file_ret = mock.Mock()
temporary_file.return_value = temporary_file_ret
temporary_file_ret.__enter__ = mock.Mock(return_value=tmp_file_path)
temporary_file_ret.__exit__ = mock.Mock(return_value=None)
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
vops.snapshot_exists.return_value = False
self._driver.restore_backup(context, backup, volume, backup_service)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
file_open.assert_called_once_with(tmp_file_path, "wb")
backup_service.restore.assert_called_once_with(
backup, volume['id'], tmp_file)
restore_backing.assert_called_once_with(
context, volume, backing, tmp_file_path, backup['size'] * units.Gi)
self.assertFalse(extend_volume.called)
temporary_file.reset_mock()
file_open.reset_mock()
backup_service.reset_mock()
restore_backing.reset_mock()
volume = {'name': 'vol-1', 'id': 1, 'size': 2}
self._driver.restore_backup(context, backup, volume, backup_service)
temporary_file.assert_called_once_with(suffix=".vmdk", prefix=uuid)
file_open.assert_called_once_with(tmp_file_path, "wb")
backup_service.restore.assert_called_once_with(
backup, volume['id'], tmp_file)
restore_backing.assert_called_once_with(
context, volume, backing, tmp_file_path, backup['size'] * units.Gi)
extend_volume.assert_called_once_with(volume, volume['size'])
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER,
'_create_backing_from_stream_optimized_file')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
def test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
self._test_restore_backing(
generate_uuid, create_backing, select_ds, get_disk_type, vops,
delete_temp_backing)
def _test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
src_uuid = "c1037b23-c5e9-4446-815f-3e097cbf5bb0"
generate_uuid.return_value = src_uuid
src = mock.sentinel.src
create_backing.return_value = src
summary = mock.Mock()
summary.datastore = mock.sentinel.datastore
select_ds.return_value = (mock.sentinel.host, mock.ANY, mock.ANY,
summary)
disk_type = vmdk.THIN_VMDK_TYPE
get_disk_type.return_value = disk_type
context = mock.sentinel.context
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
backing = None
tmp_file_path = mock.sentinel.tmp_file_path
backup_size = units.Gi
self._driver._restore_backing(
context, volume, backing, tmp_file_path, backup_size)
create_backing.assert_called_once_with(
context, src_uuid, volume, tmp_file_path, backup_size)
vops.clone_backing.assert_called_once_with(
volume['name'], src, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type, mock.sentinel.host)
delete_temp_backing.assert_called_once_with(src)
create_backing.reset_mock()
vops.clone_backing.reset_mock()
delete_temp_backing.reset_mock()
dest_uuid = "de4b0708-f947-4abe-98f8-75e52ce03b7b"
tmp_uuid = "82c2a4f0-9064-4d95-bd88-6567a36018fa"
generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid]
dest = mock.sentinel.dest
vops.clone_backing.return_value = dest
backing = mock.sentinel.backing
self._driver._restore_backing(
context, volume, backing, tmp_file_path, backup_size)
create_backing.assert_called_once_with(
context, src_uuid, volume, tmp_file_path, backup_size)
vops.clone_backing.assert_called_once_with(
dest_uuid, src, None, volumeops.FULL_CLONE_TYPE,
summary.datastore, disk_type, mock.sentinel.host)
exp_rename_calls = [mock.call(backing, tmp_uuid),
mock.call(dest, volume['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
exp_delete_temp_backing_calls = [mock.call(backing), mock.call(src)]
self.assertEqual(exp_delete_temp_backing_calls,
delete_temp_backing.call_args_list)
delete_temp_backing.reset_mock()
vops.rename_backing.reset_mock()
def vops_rename(backing, new_name):
if backing == dest and new_name == volume['name']:
raise error_util.VimException("error")
vops.rename_backing.side_effect = vops_rename
generate_uuid.side_effect = [src_uuid, dest_uuid, tmp_uuid]
self.assertRaises(
error_util.VimException, self._driver._restore_backing, context,
volume, backing, tmp_file_path, backup_size)
exp_rename_calls = [mock.call(backing, tmp_uuid),
mock.call(dest, volume['name']),
mock.call(backing, volume['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
exp_delete_temp_backing_calls = [mock.call(dest), mock.call(src)]
self.assertEqual(exp_delete_temp_backing_calls,
delete_temp_backing.call_args_list)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(vmware_images, 'upload_stream_optimized_disk')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
def test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, upload_disk, delete_temp_backing):
self._test_create_backing_from_stream_optimized_file(
select_ds, session, get_storage_profile_id, get_disk_type, vops,
file_open, upload_disk, delete_temp_backing)
def _test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, upload_disk, delete_temp_backing):
rp = mock.sentinel.rp
folder = mock.sentinel.folder
summary = mock.Mock()
summary.name = mock.sentinel.name
select_ds.return_value = (mock.ANY, rp, folder, summary)
import_spec = mock.Mock()
session.vim.client.factory.create.return_value = import_spec
profile_id = 'profile-1'
get_storage_profile_id.return_value = profile_id
disk_type = vmdk.THIN_VMDK_TYPE
get_disk_type.return_value = disk_type
create_spec = mock.Mock()
vops.get_create_spec.return_value = create_spec
tmp_file = mock.sentinel.tmp_file
file_open_ret = mock.Mock()
file_open.return_value = file_open_ret
file_open_ret.__enter__ = mock.Mock(return_value=tmp_file)
file_open_ret.__exit__ = mock.Mock(return_value=None)
vm_ref = mock.sentinel.vm_ref
upload_disk.return_value = vm_ref
context = mock.sentinel.context
name = 'vm-1'
volume = {'name': 'vol-1', 'id': 1, 'size': 1}
tmp_file_path = mock.sentinel.tmp_file_path
file_size_bytes = units.Gi
ret = self._driver._create_backing_from_stream_optimized_file(
context, name, volume, tmp_file_path, file_size_bytes)
self.assertEqual(vm_ref, ret)
vops.get_create_spec.assert_called_once_with(
name, 0, disk_type, summary.name, profile_id)
file_open.assert_called_once_with(tmp_file_path, "rb")
upload_disk.assert_called_once_with(
context, self.IMG_TX_TIMEOUT, tmp_file, session=session,
host=self.IP, resource_pool=rp, vm_folder=folder,
vm_create_spec=import_spec, vmdk_size=file_size_bytes)
upload_disk.side_effect = error_util.VimException("error")
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
self.assertRaises(
error_util.VimException,
self._driver._create_backing_from_stream_optimized_file,
context, name, volume, tmp_file_path, file_size_bytes)
delete_temp_backing.assert_called_once_with(backing)
class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
"""Test class for VMwareVcVmdkDriver."""
VMDK_DRIVER = vmdk.VMwareVcVmdkDriver
DEFAULT_VC_VERSION = '5.5'
def setUp(self):
super(VMwareVcVmdkDriverTestCase, self).setUp()
self._config.vmware_host_version = self.DEFAULT_VC_VERSION
self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config,
db=self._db)
def test_get_pbm_wsdl_location(self):
# no version returns None
wsdl = self._driver._get_pbm_wsdl_location(None)
self.assertIsNone(wsdl)
def expected_wsdl(version):
driver_dir = os.path.join(os.path.dirname(__file__), '..',
'volume', 'drivers', 'vmware')
driver_abs_dir = os.path.abspath(driver_dir)
return 'file://' + os.path.join(driver_abs_dir, 'wsdl', version,
'pbmService.wsdl')
# verify wsdl path for different version strings
with mock.patch('os.path.exists') as path_exists:
path_exists.return_value = True
wsdl = self._driver._get_pbm_wsdl_location(LooseVersion('5'))
self.assertEqual(expected_wsdl('5'), wsdl)
wsdl = self._driver._get_pbm_wsdl_location(LooseVersion('5.5'))
self.assertEqual(expected_wsdl('5.5'), wsdl)
wsdl = self._driver._get_pbm_wsdl_location(LooseVersion('5.5.1'))
self.assertEqual(expected_wsdl('5.5'), wsdl)
# if wsdl path does not exist, then it returns None
path_exists.return_value = False
wsdl = self._driver._get_pbm_wsdl_location(LooseVersion('5.5'))
self.assertIsNone(wsdl)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_get_vc_version(self, session):
# test config overrides fetching from VC server
version = self._driver._get_vc_version()
self.assertEqual(self.DEFAULT_VC_VERSION, version)
# explicitly remove config entry
self._driver.configuration.vmware_host_version = None
session.return_value.vim.service_content.about.version = '6.0.1'
version = self._driver._get_vc_version()
self.assertEqual(LooseVersion('6.0.1'), version)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_do_setup_with_pbm_disabled(self, session, get_vc_version):
session_obj = mock.Mock(name='session')
session.return_value = session_obj
get_vc_version.return_value = LooseVersion('5.0')
self._driver.do_setup(mock.ANY)
self.assertFalse(self._driver._storage_policy_enabled)
get_vc_version.assert_called_once_with()
self.assertEqual(session_obj, self._driver.volumeops._session)
self.assertEqual(session_obj, self._driver.ds_sel._session)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_pbm_wsdl_location')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
def test_do_setup_with_invalid_pbm_wsdl(self, get_vc_version,
get_pbm_wsdl_location):
vc_version = LooseVersion('5.5')
get_vc_version.return_value = vc_version
get_pbm_wsdl_location.return_value = None
self.assertRaises(error_util.VMwareDriverException,
self._driver.do_setup,
mock.ANY)
self.assertFalse(self._driver._storage_policy_enabled)
get_vc_version.assert_called_once_with()
get_pbm_wsdl_location.assert_called_once_with(vc_version)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_pbm_wsdl_location')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
def test_do_setup(self, session, get_vc_version, get_pbm_wsdl_location):
session_obj = mock.Mock(name='session')
session.return_value = session_obj
vc_version = LooseVersion('5.5')
get_vc_version.return_value = vc_version
get_pbm_wsdl_location.return_value = 'file:///pbm.wsdl'
self._driver.do_setup(mock.ANY)
self.assertTrue(self._driver._storage_policy_enabled)
get_vc_version.assert_called_once_with()
get_pbm_wsdl_location.assert_called_once_with(vc_version)
self.assertEqual(session_obj, self._driver.volumeops._session)
self.assertEqual(session_obj, self._driver.ds_sel._session)
@mock.patch.object(VMDK_DRIVER, '_extend_volumeops_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_by_copying(self, volumeops, create_backing,
extend_virtual_disk):
self._test_create_backing_by_copying(volumeops, create_backing,
extend_virtual_disk)
def test_init_conn_with_instance_and_backing(self):
"""Test initialize_connection with instance and backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
connector = {'instance': 'my_instance'}
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
datastore = FakeMor('Datastore', 'my_ds')
resource_pool = FakeMor('ResourcePool', 'my_rp')
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
self._volumeops.get_dss_rp(host).AndReturn(([datastore],
resource_pool))
m.StubOutWithMock(self._volumeops, 'get_datastore')
self._volumeops.get_datastore(backing).AndReturn(datastore)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_get_volume_group_folder(self):
"""Test _get_volume_group_folder."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datacenter = FakeMor('Datacenter', 'my_dc')
m.StubOutWithMock(self._volumeops, 'get_vmfolder')
self._volumeops.get_vmfolder(datacenter)
m.StubOutWithMock(self._volumeops, 'create_folder')
self._volumeops.create_folder(mox.IgnoreArg(),
self._config.vmware_volume_folder)
m.ReplayAll()
self._driver._get_volume_group_folder(datacenter)
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_with_instance_and_backing_and_relocation(self):
"""Test initialize_connection with backing being relocated."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
connector = {'instance': 'my_instance'}
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
datastore1 = FakeMor('Datastore', 'my_ds_1')
datastore2 = FakeMor('Datastore', 'my_ds_2')
resource_pool = FakeMor('ResourcePool', 'my_rp')
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
self._volumeops.get_dss_rp(host).AndReturn(([datastore1],
resource_pool))
m.StubOutWithMock(self._volumeops, 'get_datastore')
self._volumeops.get_datastore(backing).AndReturn(datastore2)
m.StubOutWithMock(self._driver, '_get_folder_ds_summary')
folder = FakeMor('Folder', 'my_fol')
summary = FakeDatastoreSummary(1, 1, datastore1)
self._driver._get_folder_ds_summary(volume, resource_pool,
[datastore1]).AndReturn((folder,
summary))
m.StubOutWithMock(self._volumeops, 'relocate_backing')
self._volumeops.relocate_backing(backing, datastore1,
resource_pool, host)
m.StubOutWithMock(self._volumeops, 'move_backing_to_folder')
self._volumeops.move_backing_to_folder(backing, folder)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_clone_backing_linked(self, volume_ops, _extend_vmdk_virtual_disk):
"""Test _clone_backing with clone type - linked."""
fake_size = 3
fake_volume = {'volume_type_id': None, 'name': 'fake_name',
'size': fake_size}
fake_snapshot = {'volume_name': 'volume_name',
'name': 'snapshot_name',
'volume_size': 2}
fake_type = volumeops.LINKED_CLONE_TYPE
fake_backing = mock.sentinel.backing
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.LINKED_CLONE_TYPE,
fake_snapshot['volume_size'])
volume_ops.clone_backing.assert_called_with(fake_volume['name'],
fake_backing,
fake_snapshot,
fake_type,
None,
host=None)
# If the volume size is greater than the original snapshot size,
# _extend_vmdk_virtual_disk will be called.
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
fake_volume['size'])
# If the volume size is not greater than the original snapshot size,
# _extend_vmdk_virtual_disk will not be called.
fake_size = 2
fake_volume['size'] = fake_size
_extend_vmdk_virtual_disk.reset_mock()
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.LINKED_CLONE_TYPE,
fake_snapshot['volume_size'])
self.assertFalse(_extend_vmdk_virtual_disk.called)
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_clone_backing_full(self, volume_ops, _select_ds_for_volume,
_extend_vmdk_virtual_disk):
"""Test _clone_backing with clone type - full."""
fake_host = mock.sentinel.host
fake_backing = mock.sentinel.backing
fake_folder = mock.sentinel.folder
fake_datastore = mock.sentinel.datastore
fake_resource_pool = mock.sentinel.resourcePool
fake_summary = mock.Mock(spec=object)
fake_summary.datastore = fake_datastore
fake_size = 3
fake_volume = {'volume_type_id': None, 'name': 'fake_name',
'size': fake_size}
fake_snapshot = {'volume_name': 'volume_name', 'name': 'snapshot_name',
'volume_size': 2}
_select_ds_for_volume.return_value = (fake_host,
fake_resource_pool,
fake_folder, fake_summary)
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_snapshot['volume_size'])
_select_ds_for_volume.assert_called_with(fake_volume)
volume_ops.clone_backing.assert_called_with(fake_volume['name'],
fake_backing,
fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_datastore,
host=fake_host)
# If the volume size is greater than the original snapshot size,
# _extend_vmdk_virtual_disk will be called.
_extend_vmdk_virtual_disk.assert_called_with(fake_volume['name'],
fake_volume['size'])
# If the volume size is not greater than the original snapshot size,
# _extend_vmdk_virtual_disk will not be called.
fake_size = 2
fake_volume['size'] = fake_size
_extend_vmdk_virtual_disk.reset_mock()
self._driver._clone_backing(fake_volume, fake_backing, fake_snapshot,
volumeops.FULL_CLONE_TYPE,
fake_snapshot['volume_size'])
self.assertFalse(_extend_vmdk_virtual_disk.called)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot_without_backing(self, mock_vops):
"""Test create_volume_from_snapshot without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snap_without_backing_snap(self, mock_vops):
"""Test create_volume_from_snapshot without a backing snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_volume_from_snapshot(self, mock_vops):
"""Test create_volume_from_snapshot."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
snapshot = {'volume_name': 'mock_vol', 'name': 'mock_snap',
'volume_size': 2}
backing = mock.sentinel.backing
snap_moref = mock.sentinel.snap_moref
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
mock_vops.get_snapshot.return_value = snap_moref
driver._clone_backing = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_volume_from_snapshot(volume, snapshot)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('mock_vol')
mock_vops.get_snapshot.assert_called_once_with(backing,
'mock_snap')
default_clone_type = volumeops.FULL_CLONE_TYPE
driver._clone_backing.assert_called_once_with(volume,
backing,
snap_moref,
default_clone_type,
snapshot['volume_size'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_without_backing(self, mock_vops):
"""Test create_cloned_volume without a backing."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name'}
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = None
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_create_cloned_volume_with_backing(self, mock_vops):
"""Test create_cloned_volume with clone type - full."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol'}
src_vref = {'name': 'src_snapshot_name', 'size': 1}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
default_clone_type = volumeops.FULL_CLONE_TYPE
driver._clone_backing = mock.MagicMock()
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
driver._clone_backing.assert_called_once_with(volume,
backing,
None,
default_clone_type,
src_vref['size'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_clone_type')
def test_create_linked_cloned_volume_with_backing(self, get_clone_type,
mock_vops):
"""Test create_cloned_volume with clone type - linked."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'}
src_vref = {'name': 'src_snapshot_name', 'status': 'available',
'size': 1}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
linked_clone = volumeops.LINKED_CLONE_TYPE
get_clone_type.return_value = linked_clone
driver._clone_backing = mock.MagicMock()
mock_vops.create_snapshot = mock.MagicMock()
mock_vops.create_snapshot.return_value = mock.sentinel.snapshot
# invoke the create_volume_from_snapshot api
driver.create_cloned_volume(volume, src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
get_clone_type.assert_called_once_with(volume)
name = 'snapshot-%s' % volume['id']
mock_vops.create_snapshot.assert_called_once_with(backing, name, None)
driver._clone_backing.assert_called_once_with(volume,
backing,
mock.sentinel.snapshot,
linked_clone,
src_vref['size'])
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_clone_type')
def test_create_linked_cloned_volume_when_attached(self, get_clone_type,
mock_vops):
"""Test create_cloned_volume linked clone when volume is attached."""
mock_vops = mock_vops.return_value
driver = self._driver
volume = {'volume_type_id': None, 'name': 'mock_vol', 'id': 'mock_id'}
src_vref = {'name': 'src_snapshot_name', 'status': 'in-use'}
backing = mock.sentinel.backing
driver._verify_volume_creation = mock.MagicMock()
mock_vops.get_backing.return_value = backing
linked_clone = volumeops.LINKED_CLONE_TYPE
get_clone_type.return_value = linked_clone
# invoke the create_volume_from_snapshot api
self.assertRaises(exception.InvalidVolume,
driver.create_cloned_volume,
volume,
src_vref)
# verify calls
driver._verify_volume_creation.assert_called_once_with(volume)
mock_vops.get_backing.assert_called_once_with('src_snapshot_name')
get_clone_type.assert_called_once_with(volume)
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
def test_get_storage_profile(self, get_volume_type_extra_specs):
"""Test vmdk _get_storage_profile."""
# volume with no type id returns None
volume = FakeObject()
volume['volume_type_id'] = None
sp = self._driver._get_storage_profile(volume)
self.assertEqual(None, sp, "Without a volume_type_id no storage "
"profile should be returned.")
# profile associated with the volume type should be returned
fake_id = 'fake_volume_id'
volume['volume_type_id'] = fake_id
get_volume_type_extra_specs.return_value = 'fake_profile'
profile = self._driver._get_storage_profile(volume)
self.assertEqual('fake_profile', profile)
spec_key = 'vmware:storage_profile'
get_volume_type_extra_specs.assert_called_once_with(fake_id, spec_key)
# None should be returned when no storage profile is
# associated with the volume type
get_volume_type_extra_specs.return_value = False
profile = self._driver._get_storage_profile(volume)
self.assertIsNone(profile)
@mock.patch('cinder.volume.drivers.vmware.vim_util.'
'convert_datastores_to_hubs')
@mock.patch('cinder.volume.drivers.vmware.vim_util.'
'convert_hubs_to_datastores')
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_filter_ds_by_profile(self, volumeops, session, hubs_to_ds,
ds_to_hubs):
"""Test vmdk _filter_ds_by_profile() method."""
volumeops = volumeops.return_value
session = session.return_value
# Test with no profile id
datastores = [mock.sentinel.ds1, mock.sentinel.ds2]
profile = 'fake_profile'
volumeops.retrieve_profile_id.return_value = None
self.assertRaises(error_util.VimException,
self._driver._filter_ds_by_profile,
datastores, profile)
volumeops.retrieve_profile_id.assert_called_once_with(profile)
# Test with a fake profile id
profileId = 'fake_profile_id'
filtered_dss = [mock.sentinel.ds1]
# patch method calls from _filter_ds_by_profile
volumeops.retrieve_profile_id.return_value = profileId
pbm_cf = mock.sentinel.pbm_cf
session.pbm.client.factory = pbm_cf
hubs = [mock.sentinel.hub1, mock.sentinel.hub2]
ds_to_hubs.return_value = hubs
volumeops.filter_matching_hubs.return_value = mock.sentinel.hubs
hubs_to_ds.return_value = filtered_dss
# call _filter_ds_by_profile with a fake profile
actual_dss = self._driver._filter_ds_by_profile(datastores, profile)
# verify return value and called methods
self.assertEqual(filtered_dss, actual_dss,
"Wrong filtered datastores returned.")
ds_to_hubs.assert_called_once_with(pbm_cf, datastores)
volumeops.filter_matching_hubs.assert_called_once_with(hubs,
profileId)
hubs_to_ds.assert_called_once_with(mock.sentinel.hubs, datastores)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'session', new_callable=mock.PropertyMock)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'volumeops', new_callable=mock.PropertyMock)
def test_get_folder_ds_summary(self, volumeops, session):
"""Test _get_folder_ds_summary."""
volumeops = volumeops.return_value
driver = self._driver
driver._storage_policy_enabled = True
volume = {'size': 10, 'volume_type_id': 'fake_type'}
rp = mock.sentinel.resource_pool
dss = [mock.sentinel.datastore1, mock.sentinel.datastore2]
filtered_dss = [mock.sentinel.datastore1]
profile = mock.sentinel.profile
def filter_ds(datastores, storage_profile):
return filtered_dss
# patch method calls from _get_folder_ds_summary
volumeops.get_dc.return_value = mock.sentinel.dc
volumeops.get_vmfolder.return_value = mock.sentinel.vmfolder
volumeops.create_folder.return_value = mock.sentinel.folder
driver._get_storage_profile = mock.MagicMock()
driver._get_storage_profile.return_value = profile
driver._filter_ds_by_profile = mock.MagicMock(side_effect=filter_ds)
driver._select_datastore_summary = mock.MagicMock()
driver._select_datastore_summary.return_value = mock.sentinel.summary
# call _get_folder_ds_summary
(folder, datastore_summary) = driver._get_folder_ds_summary(volume,
rp, dss)
# verify returned values and calls made
self.assertEqual(mock.sentinel.folder, folder,
"Folder returned is wrong.")
self.assertEqual(mock.sentinel.summary, datastore_summary,
"Datastore summary returned is wrong.")
volumeops.get_dc.assert_called_once_with(rp)
volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc)
volumeops.create_folder.assert_called_once_with(mock.sentinel.vmfolder,
self.VOLUME_FOLDER)
driver._get_storage_profile.assert_called_once_with(volume)
driver._filter_ds_by_profile.assert_called_once_with(dss, profile)
size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size,
filtered_dss)
# Clear side effects.
driver._filter_ds_by_profile.side_effect = None
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_vmdk_virtual_disk(self, volume_ops):
"""Test vmdk._extend_vmdk_virtual_disk."""
self._test_extend_vmdk_virtual_disk(volume_ops)
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER,
'_create_virtual_disk_from_preallocated_image')
@mock.patch.object(VMDK_DRIVER, '_create_virtual_disk_from_sparse_image')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_ds_name_folder_path')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
def test_copy_image_to_volume_non_stream_optimized(
self, create_backing, get_ds_name_folder_path, get_disk_type,
create_disk_from_sparse_image, create_disk_from_preallocated_image,
vops, select_ds_for_volume, generate_uuid, extend_disk):
self._test_copy_image_to_volume_non_stream_optimized(
create_backing,
get_ds_name_folder_path,
get_disk_type,
create_disk_from_sparse_image,
create_disk_from_preallocated_image,
vops,
select_ds_for_volume,
generate_uuid,
extend_disk)
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_virtual_disk_from_preallocated_image(
self, vops, copy_image, flat_extent_path):
self._test_create_virtual_disk_from_preallocated_image(
vops, copy_image, flat_extent_path)
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.'
'MonolithicSparseVirtualDiskPath')
@mock.patch(
'cinder.volume.drivers.vmware.volumeops.FlatExtentVirtualDiskPath')
@mock.patch.object(VMDK_DRIVER, '_copy_temp_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_copy_image')
def test_create_virtual_disk_from_sparse_image(
self, copy_image, copy_temp_virtual_disk, flat_extent_path,
sparse_path):
self._test_create_virtual_disk_from_sparse_image(
copy_image, copy_temp_virtual_disk, flat_extent_path, sparse_path)
@mock.patch.object(vmware_images, 'fetch_stream_optimized_image')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_copy_image_to_volume_stream_optimized(self, volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image):
"""Test copy_image_to_volume.
Test with an acceptable vmdk disk format and streamOptimized disk type.
"""
self._test_copy_image_to_volume_stream_optimized(volumeops,
session,
get_profile_id,
_select_ds_for_volume,
_extend_virtual_disk,
fetch_optimized_image)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing):
self._test_retype(ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_volume(self, volume_ops, _extend_virtual_disk,
_select_ds_for_volume):
"""Test extend_volume."""
self._test_extend_volume(volume_ops, _extend_virtual_disk,
_select_ds_for_volume)
@mock.patch.object(vmware_images, 'download_stream_optimized_disk')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_create_backing_in_inventory')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'session')
def test_backup_volume(self, session, vops, create_backing, generate_uuid,
temporary_file, file_open, download_disk):
self._test_backup_volume(session, vops, create_backing, generate_uuid,
temporary_file, file_open, download_disk)
@mock.patch.object(VMDK_DRIVER, 'extend_volume')
@mock.patch.object(VMDK_DRIVER, '_restore_backing')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, '_temporary_file')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_restore_backup(self, vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume):
self._test_restore_backup(vops, generate_uuid, temporary_file,
file_open, restore_backing, extend_volume)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER,
'_create_backing_from_stream_optimized_file')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
def test_restore_backing(
self, generate_uuid, create_backing, select_ds, get_disk_type,
vops, delete_temp_backing):
self._test_restore_backing(
generate_uuid, create_backing, select_ds, get_disk_type, vops,
delete_temp_backing)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch.object(vmware_images, 'upload_stream_optimized_disk')
@mock.patch('cinder.openstack.common.fileutils.file_open')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch(
'cinder.volume.drivers.vmware.vmdk.VMwareEsxVmdkDriver._get_disk_type')
@mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
@mock.patch.object(VMDK_DRIVER, 'session')
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
def test_create_backing_from_stream_optimized_file(
self, select_ds, session, get_storage_profile_id, get_disk_type,
vops, file_open, upload_disk, delete_temp_backing):
self._test_create_backing_from_stream_optimized_file(
select_ds, session, get_storage_profile_id, get_disk_type, vops,
file_open, upload_disk, delete_temp_backing)
@mock.patch.object(VMDK_DRIVER, '_get_folder_ds_summary')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_create_backing_with_params(self, vops, get_folder_ds_summary):
resource_pool = mock.sentinel.resource_pool
vops.get_dss_rp.return_value = (mock.Mock(), resource_pool)
folder = mock.sentinel.folder
summary = mock.sentinel.summary
get_folder_ds_summary.return_value = (folder, summary)
volume = {'name': 'vol-1', 'volume_type_id': None, 'size': 1}
host = mock.Mock()
create_params = {vmdk.CREATE_PARAM_DISK_LESS: True}
self._driver._create_backing(volume, host, create_params)
vops.create_backing_disk_less.assert_called_once_with('vol-1',
folder,
resource_pool,
host,
summary.name,
None)
create_params = {vmdk.CREATE_PARAM_ADAPTER_TYPE: 'ide'}
self._driver._create_backing(volume, host, create_params)
vops.create_backing.assert_called_once_with('vol-1',
units.Mi,
vmdk.THIN_VMDK_TYPE,
folder,
resource_pool,
host,
summary.name,
None,
'ide')
vops.create_backing.reset_mock()
backing_name = "temp-vol"
create_params = {vmdk.CREATE_PARAM_BACKING_NAME: backing_name}
self._driver._create_backing(volume, host, create_params)
vops.create_backing.assert_called_once_with(backing_name,
units.Mi,
vmdk.THIN_VMDK_TYPE,
folder,
resource_pool,
host,
summary.name,
None,
'lsiLogic')
@mock.patch('cinder.openstack.common.fileutils.ensure_tree')
@mock.patch('cinder.openstack.common.fileutils.delete_if_exists')
@mock.patch('tempfile.mkstemp')
@mock.patch('os.close')
def test_temporary_file(
self, close, mkstemp, delete_if_exists, ensure_tree):
fd = mock.sentinel.fd
tmp = mock.sentinel.tmp
mkstemp.return_value = (fd, tmp)
prefix = ".vmdk"
suffix = "test"
with self._driver._temporary_file(prefix=prefix,
suffix=suffix) as tmp_file:
self.assertEqual(tmp, tmp_file)
ensure_tree.assert_called_once_with(self.TMP_DIR)
mkstemp.assert_called_once_with(dir=self.TMP_DIR,
prefix=prefix,
suffix=suffix)
close.assert_called_once_with(fd)
delete_if_exists.assert_called_once_with(tmp)
class ImageDiskTypeTest(test.TestCase):
"""Unit tests for ImageDiskType."""
def test_is_valid(self):
self.assertTrue(vmdk.ImageDiskType.is_valid("thin"))
self.assertTrue(vmdk.ImageDiskType.is_valid("preallocated"))
self.assertTrue(vmdk.ImageDiskType.is_valid("streamOptimized"))
self.assertTrue(vmdk.ImageDiskType.is_valid("sparse"))
self.assertFalse(vmdk.ImageDiskType.is_valid("thick"))
def test_validate(self):
vmdk.ImageDiskType.validate("thin")
vmdk.ImageDiskType.validate("preallocated")
vmdk.ImageDiskType.validate("streamOptimized")
vmdk.ImageDiskType.validate("sparse")
self.assertRaises(exception.ImageUnacceptable,
vmdk.ImageDiskType.validate,
"thick")
| apache-2.0 | 8,798,432,270,065,571,000 | 45.507804 | 79 | 0.584107 | false | 3.939522 | true | false | false |
joeywen/nlpy | nlpy/optimize/solvers/ldfp.py | 3 | 3868 | """
A limited-memory DFP method for unconstrained minimization. A symmetric and
positive definite approximation of the Hessian matrix is built and updated at
each iteration following the Davidon-Fletcher-Powell formula. For efficiency,
only the recent observed curvature is incorporated into the approximation,
resulting in a *limited-memory* scheme.
The main idea of this method is that the DFP formula is dual to the BFGS
formula. Therefore, by swapping s and y in the (s,y) pairs, the InverseLBFGS
class updates a limited-memory DFP approximation to the Hessian, rather than
a limited-memory BFGS approximation to its inverse.
"""
from nlpy.model.amplpy import AmplModel
from nlpy.optimize.solvers.lbfgs import InverseLBFGS
from nlpy.optimize.solvers.trunk import TrunkFramework
import numpy as np
__docformat__ = 'restructuredtext'
# Subclass InverseLBFGS to update a LDFP approximation to the Hessian
# (as opposed to a LBFGS approximation to its inverse).
class LDFP(InverseLBFGS):
"""
A limited-memory DFP framework for quasi-Newton methods. See the
documentation of `InverseLBFGS`.
"""
def __init__(self, n, npairs=5, **kwargs):
InverseLBFGS.__init__(self, n, npairs, **kwargs)
def store(self, new_s, new_y):
# Simply swap s and y.
InverseLBFGS.store(self, new_y, new_s)
class StructuredLDFP(InverseLBFGS):
"""
A limited-memory DFP framework for quasi-Newton methods that only
memorizes updates corresponding to certain variables. This is useful
when approximating the Hessian of a constraint with a sparse Jacobian.
"""
def __init__(self, n, npairs=5, **kwargs):
"""
See the documentation of `InverseLBFGS` for complete information.
:keywords:
:vars: List of variables participating in the quasi-Newton
update. If `None`, all variables participate.
"""
self.on = n # Original value of n.
self.vars = kwargs.get('vars', None) # None means all variables.
if self.vars is None:
nvars = n
else:
nvars = len(self.vars)
# This next initialization will set self.n to nvars.
# The original value of n was saved in self.on.
InverseLBFGS.__init__(self, nvars, npairs, **kwargs)
def store(self, new_s, new_y):
"""
Store a new (s,y) pair. This method takes "small" vectors as
input, i.e., corresponding to the variables participating in
the quasi-Newton update.
"""
InverseLBFGS.store(self, new_y, new_s)
def matvec(self, v):
"""
Take a small vector and return a small vector giving the
contribution of the Hessian approximation to the
matrix-vector product.
"""
return InverseLBFGS.matvec(self, v)
# Subclass solver TRUNK to maintain an LDFP approximation to the Hessian and
# perform the LDFP matrix update at the end of each iteration.
class LDFPTrunkFramework(TrunkFramework):
def __init__(self, nlp, TR, TrSolver, **kwargs):
TrunkFramework.__init__(self, nlp, TR, TrSolver, **kwargs)
self.ldfp = LDFP(self.nlp.n, **kwargs)
self.save_g = True
def hprod(self, v, **kwargs):
"""
Compute the matrix-vector product between the limited-memory DFP
approximation kept in storage and the vector `v`.
"""
return self.ldfp.matvec(v)
def PostIteration(self, **kwargs):
"""
This method updates the limited-memory DFP approximation by appending
the most recent (s,y) pair to it and possibly discarding the oldest one
if all the memory has been used.
"""
if self.status != 'Rej':
s = self.alpha * self.solver.step
y = self.g - self.g_old
self.ldfp.store(s, y)
return None
| gpl-3.0 | 7,327,404,677,815,587,000 | 35.149533 | 79 | 0.660031 | false | 3.788443 | false | false | false |
qyzxg/myblog | app/api_0_1/comments.py | 1 | 1674 | #!/usr/bin/python
# -*- coding:utf-8 -*-
from flask import jsonify, request, url_for
from ..models import Post, Comment
from . import api
from ..shares import do_pagination
@api.route('/comments/')
def get_comments():
query = Comment.query.order_by(Comment.created.desc())
page = request.args.get('page', 1, type=int)
pagination, comments = do_pagination(query)
prev = None
if pagination.has_prev:
prev = url_for('api.get_comments', page=page - 1, _external=True)
next_ = None
if pagination.has_next:
next_ = url_for('api.get_comments', page=page + 1, _external=True)
return jsonify({
'posts': [comment.to_json() for comment in comments],
'prev': prev,
'next': next_,
'count': pagination.total
})
@api.route('/comments/<int:id_>/')
def get_comment(id_):
comment = Comment.query.get_or_404(id_)
return jsonify(comment.to_json())
@api.route('/posts/<int:id_>/comments/')
def get_post_comments(id_):
post = Post.query.get_or_404(id_)
page = request.args.get('page', 1, type=int)
query = Comment.query.filter_by(post_id=post.id).order_by(Comment.created.desc())
pagination, comments = do_pagination(query)
prev = None
if pagination.has_prev:
prev = url_for('api.get_comments', page=page - 1, _external=True)
next_ = None
if pagination.has_next:
next_ = url_for('api.get_comments', page=page + 1, _external=True)
return jsonify({
'posts': [comment.to_json() for comment in comments],
'prev': prev,
'next': next_,
'count': pagination.total
})
| gpl-3.0 | -2,697,877,208,270,583,000 | 30.823529 | 85 | 0.603943 | false | 3.402439 | false | false | false |
Kobzol/debug-visualizer | debugger/debugger_api.py | 1 | 10713 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Jakub Beranek
#
# This file is part of Devi.
#
# Devi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Devi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Devi. If not, see <http://www.gnu.org/licenses/>.
#
import time
import util
from enums import DebuggerState, ProcessState
class ProcessExitedEventData(object):
def __init__(self, return_code):
self.return_code = return_code
class ProcessStoppedEventData(object):
def __init__(self, stop_reason):
self.stop_reason = stop_reason
class StartupInfo(object):
def __init__(self, cmd_arguments="", working_directory="", env_vars=None):
"""
@type cmd_arguments: str
@type working_directory: str
@type env_vars: list of tuple of (str, str)
"""
self.cmd_arguments = cmd_arguments
self.working_directory = working_directory
self.env_vars = env_vars if env_vars is not None else []
def copy(self):
return StartupInfo(self.cmd_arguments,
self.working_directory,
list(self.env_vars))
def __repr__(self):
return "StartupInfo: [{}, {}, {}]".format(
self.cmd_arguments, self.working_directory, self.env_vars
)
class HeapManager(object):
def __init__(self, debugger):
"""
@type debugger: debugger.Debugger
"""
self.debugger = debugger
self.on_heap_change = util.EventBroadcaster()
self.on_free_error = util.EventBroadcaster()
def watch(self):
"""
@rtype: str
"""
raise NotImplementedError()
def stop(self):
raise NotImplementedError()
def find_block_by_address(self, addr):
"""
@type addr: str
@rtype: HeapBlock | None
"""
raise NotImplementedError()
def get_total_allocations(self):
"""
@rtype: int
"""
raise NotImplementedError()
def get_total_deallocations(self):
"""
@rtype: int
"""
raise NotImplementedError()
class IOManager(object):
def __init__(self):
self.stdin = None
self.stdout = None
self.stderr = None
def handle_io(self):
raise NotImplementedError()
def stop_io(self):
raise NotImplementedError()
class BreakpointManager(object):
def __init__(self, debugger):
"""
@type debugger: Debugger
"""
self.debugger = debugger
self.on_breakpoint_changed = util.EventBroadcaster()
def add_breakpoint(self, location, line):
"""
Adds a breakpoint, if there is not a breakpoint with the same
location and line already.
@type location: str
@type line: int
@rtype: boolean
"""
raise NotImplementedError()
def toggle_breakpoint(self, location, line):
"""
Toggles a breakpoint on the given location and line.
@type location: str
@type line: int
@rtype: boolean
"""
raise NotImplementedError()
def get_breakpoints(self):
"""
@rtype: list of debugger.Breakpoint
"""
raise NotImplementedError()
def find_breakpoint(self, location, line):
"""
@type location: str
@type line: int
@rtype: debugger.Breakpoint | None
"""
raise NotImplementedError()
def remove_breakpoint(self, location, line):
"""
@type location: str
@type line: int
@rtype: boolean
"""
raise NotImplementedError()
class FileManager(object):
def __init__(self, debugger):
"""
@type debugger: Debugger
"""
self.debugger = debugger
def get_main_source_file(self):
raise NotImplementedError()
def get_current_location(self):
"""
Returns the current file and line of the debugged process.
@rtype: tuple of basestring, int | None
"""
raise NotImplementedError()
def get_line_address(self, filename, line):
"""
Returns the starting address and ending address in hexadecimal
format of code at the specified line in the given file.
Returns None if no code is at the given location.
@type filename: str
@type line: int
@rtype: tuple of int | None
"""
raise NotImplementedError()
def disassemble(self, filename, line):
"""
Returns disassembled code for the given location.
Returns None if no code was found,
@type filename: str
@type line: int
@rtype: str | None
"""
raise NotImplementedError()
def disassemble_raw(self, filename, line):
"""
Disassembles the given line in a raw form (returns a string with the
line and all assembly instructions for it).
@type filename: str
@type line: int
@rtype: str | None
"""
raise NotImplementedError()
class ThreadManager(object):
def __init__(self, debugger):
"""
@type debugger: Debugger
"""
self.debugger = debugger
def get_current_thread(self):
"""
@rtype: debugee.Thread
"""
raise NotImplementedError()
def get_thread_info(self):
"""
Returns (active_thread_id, all_threads).
@rtype: debugee.ThreadInfo | None
"""
raise NotImplementedError()
def set_thread_by_index(self, thread_id):
"""
@type thread_id: int
@rtype: bool
"""
raise NotImplementedError()
def get_current_frame(self, with_variables=False):
"""
@type with_variables: bool
@rtype: debugee.Frame | None
"""
raise NotImplementedError()
def get_frames(self):
"""
@rtype: list of debugee.Frame
"""
raise NotImplementedError()
def get_frames_with_variables(self):
"""
@rtype: list of debugee.Frame
"""
raise NotImplementedError()
def change_frame(self, frame_index):
"""
@type frame_index: int
@rtype: bool
"""
raise NotImplementedError()
class VariableManager(object):
"""
Handles retrieval and updating of variables and raw memory of the
debugged process.
"""
def __init__(self, debugger):
"""
@type debugger: Debugger
"""
self.debugger = debugger
def get_type(self, expression, level=0):
"""
Returns type for the given expression.
@type expression: str
@type level: int
@rtype: debugee.Type
"""
raise NotImplementedError()
def get_variable(self, expression, level=0):
"""
Returns a variable for the given expression-
@type expression: str
@type level: int
@rtype: debugee.Variable
"""
raise NotImplementedError()
def update_variable(self, variable):
"""
Updates the variable's value in the debugged process.
@type variable: debugee.Variable
"""
raise NotImplementedError()
def get_memory(self, address, count):
"""
Returns count bytes from the given address.
@type address: str
@type count: int
@rtype: list of int
"""
raise NotImplementedError()
def get_registers(self):
"""
Returns the register values as a list of tuples with name and
value of the given register.
@rtype: list of register.Register
"""
raise NotImplementedError()
def get_vector_items(self, vector):
"""
@type vector: debugger.debugee.VectorVariable
@rtype: list of debugger.debugee.Variable
"""
raise NotImplementedError()
class Debugger(object):
def __init__(self):
self.state = util.Flags(DebuggerState, DebuggerState.Started)
self.process_state = ProcessState.Invalid
self.io_manager = IOManager()
self.breakpoint_manager = BreakpointManager(self)
self.file_manager = FileManager(self)
self.thread_manager = ThreadManager(self)
self.variable_manager = VariableManager(self)
self.heap_manager = HeapManager(self)
self.on_process_state_changed = util.EventBroadcaster()
self.on_debugger_state_changed = util.EventBroadcaster()
self.state.on_value_changed.redirect(self.on_debugger_state_changed)
self.on_process_state_changed = util.EventBroadcaster()
self.on_frame_changed = util.EventBroadcaster()
self.on_thread_changed = util.EventBroadcaster()
def require_state(self, required_state):
if not self.get_state().is_set(required_state):
raise util.BadStateError(required_state, self.state)
def get_state(self):
return self.state
def get_process_state(self):
return self.process_state
def load_binary(self, binary_path):
raise NotImplementedError()
def launch(self, startup_info=None):
"""
Launches the program with the given startup info.
@type startup_info: StartupInfo | None
@rtype: bool
"""
raise NotImplementedError()
def exec_continue(self):
raise NotImplementedError()
def exec_pause(self):
raise NotImplementedError()
def exec_step_over(self):
raise NotImplementedError()
def exec_step_in(self):
raise NotImplementedError()
def exec_step_out(self):
raise NotImplementedError()
def quit_program(self, return_code=1):
raise NotImplementedError()
def terminate(self):
raise NotImplementedError()
def wait_for_stop(self):
while self.process_state not in (ProcessState.Stopped,
ProcessState.Exited):
time.sleep(0.1)
return self.process_state
def wait_for_exit(self):
while self.process_state != ProcessState.Exited:
time.sleep(0.1)
return self.process_state
| gpl-3.0 | 7,285,009,671,657,888,000 | 25.917085 | 78 | 0.592084 | false | 4.401397 | false | false | false |
AhmadHamzeei/Amir-Accounting | amir/calverter.py | 1 | 11050 | #!/usr/bin/env python
## calverter.py (2008/08/16)
##
## Copyright (C) 2008 Mehdi Bayazee ([email protected])
##
## Iranian (Jalali) calendar:
## http://en.wikipedia.org/wiki/Iranian_calendar
## Islamic (Hijri) calendar:
## http://en.wikipedia.org/wiki/Islamic_calendar
## Gregorian calendar:
## http://en.wikipedia.org/wiki/Gregorian_calendar
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
__author__ = "Mehdi Bayazee"
__copyright__ = "Copyright (C) 2008 Mehdi Bayazee"
__revision__ = "$Id$"
__version__ = "0.1.5"
import math
## \defgroup Utility
## @{
class calverter:
def __init__(self):
self.J0000 = 1721424.5 # Julian date of Gregorian epoch: 0000-01-01
self.J1970 = 2440587.5 # Julian date at Unix epoch: 1970-01-01
self.JMJD = 2400000.5 # Epoch of Modified Julian Date system
self.J1900 = 2415020.5 # Epoch (day 1) of Excel 1900 date system (PC)
self.J1904 = 2416480.5 # Epoch (day 0) of Excel 1904 date system (Mac)
self.NormLeap = ("Normal year", "Leap year")
self.GREGORIAN_EPOCH = 1721425.5
self.GREGORIAN_WEEKDAYS = ("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday")
self.ISLAMIC_EPOCH = 1948439.5;
self.ISLAMIC_WEEKDAYS = ("al-ahad", "al-'ithnayn", "ath-thalatha'", "al-arbia`aa'", "al-khamis", "al-jumu`a", "as-sabt")
self.JALALI_EPOCH = 1948320.5;
self.JALALI_WEEKDAYS = ("Yekshanbeh", "Doshanbeh", "Seshhanbeh", "Chaharshanbeh", "Panjshanbeh", "Jomeh", "Shanbeh")
def jwday(self, j):
"JWDAY: Calculate day of week from Julian day"
return int(math.floor((j + 1.5))) % 7
def weekday_before(self, weekday, jd):
"""
WEEKDAY_BEFORE: Return Julian date of given weekday (0 = Sunday)
in the seven days ending on jd.
"""
return jd - self.jwday(jd - weekday)
def search_weekday(self, weekday, jd, direction, offset):
"""
SEARCH_WEEKDAY: Determine the Julian date for:
weekday Day of week desired, 0 = Sunday
jd Julian date to begin search
direction 1 = next weekday, -1 = last weekday
offset Offset from jd to begin search
"""
return self.weekday_before(weekday, jd + (direction * offset))
# Utility weekday functions, just wrappers for search_weekday
def nearest_weekday(self, weekday, jd):
return self.search_weekday(weekday, jd, 1, 3)
def next_weekday(self, weekday, jd):
return self.search_weekday(weekday, jd, 1, 7)
def next_or_current_weekday(self, weekday, jd):
return self.search_weekday(weekday, jd, 1, 6)
def previous_weekday(self, weekday, jd):
return self.search_weekday(weekday, jd, -1, 1)
def previous_or_current_weekday(self, weekday, jd):
return self.search_weekday(weekday, jd, 1, 0)
def leap_gregorian(self, year):
"LEAP_GREGORIAN: Is a given year in the Gregorian calendar a leap year ?"
return ((year % 4) == 0) and (not(((year % 100) == 0) and ((year % 400) != 0)))
def gregorian_to_jd(self, year, month, day):
"GREGORIAN_TO_JD: Determine Julian day number from Gregorian calendar date"
# Python <= 2.5
if month <= 2 :
tm = 0
elif self.leap_gregorian(year):
tm = -1
else:
tm = -2
# Python 2.5
#tm = 0 if month <= 2 else (-1 if self.leap_gregorian(year) else -2)
return (self.GREGORIAN_EPOCH - 1) + (365 * (year - 1)) + math.floor((year - 1) / 4) + (-math.floor((year - 1) / 100)) + \
math.floor((year - 1) / 400) + math.floor((((367 * month) - 362) / 12) + tm + day)
def jd_to_gregorian(self, jd) :
"JD_TO_GREGORIAN: Calculate Gregorian calendar date from Julian day"
wjd = math.floor(jd - 0.5) + 0.5
depoch = wjd - self.GREGORIAN_EPOCH
quadricent = math.floor(depoch / 146097)
dqc = depoch % 146097
cent = math.floor(dqc / 36524)
dcent = dqc % 36524
quad = math.floor(dcent / 1461)
dquad = dcent % 1461
yindex = math.floor(dquad / 365)
year = int((quadricent * 400) + (cent * 100) + (quad * 4) + yindex)
if not((cent == 4) or (yindex == 4)) :
year += 1
yearday = wjd - self.gregorian_to_jd(year, 1, 1)
# Python <= 2.5
if wjd < self.gregorian_to_jd(year, 3, 1):
leapadj = 0
elif self.leap_gregorian(year):
leapadj = 1
else:
leapadj = 2
# Python 2.5
#leapadj = 0 if wjd < self.gregorian_to_jd(year, 3, 1) else (1 if self.leap_gregorian(year) else 2)
month = int(math.floor((((yearday + leapadj) * 12) + 373) / 367))
day = int(wjd - self.gregorian_to_jd(year, month, 1)) + 1
return year, month, day
def n_weeks(self, weekday, jd, nthweek):
j = 7 * nthweek
if nthweek > 0 :
j += self.previous_weekday(weekday, jd)
else :
j += next_weekday(weekday, jd)
return j
def iso_to_julian(self, year, week, day):
"ISO_TO_JULIAN: Return Julian day of given ISO year, week, and day"
return day + self.n_weeks(0, self.gregorian_to_jd(year - 1, 12, 28), week)
def jd_to_iso(self, jd):
"JD_TO_ISO: Return array of ISO (year, week, day) for Julian day"
year = self.jd_to_gregorian(jd - 3)[0]
if jd >= self.iso_to_julian(year + 1, 1, 1) :
year += 1
week = int(math.floor((jd - self.iso_to_julian(year, 1, 1)) / 7) + 1)
day = self.jwday(jd)
if day == 0 :
day = 7
return year, week, day
def iso_day_to_julian(self, year, day):
"ISO_DAY_TO_JULIAN: Return Julian day of given ISO year, and day of year"
return (day - 1) + self.gregorian_to_jd(year, 1, 1)
def jd_to_iso_day(self, jd):
"JD_TO_ISO_DAY: Return array of ISO (year, day_of_year) for Julian day"
year = self.jd_to_gregorian(jd)[0]
day = int(math.floor(jd - self.gregorian_to_jd(year, 1, 1))) + 1
return year, day
def pad(self, Str, howlong, padwith) :
"PAD: Pad a string to a given length with a given fill character. "
s = str(Str)
while s.length < howlong :
s = padwith + s
return s
def leap_islamic(self, year):
"LEAP_ISLAMIC: Is a given year a leap year in the Islamic calendar ?"
return (((year * 11) + 14) % 30) < 11
def islamic_to_jd(self, year, month, day):
"ISLAMIC_TO_JD: Determine Julian day from Islamic date"
return (day + math.ceil(29.5 * (month - 1)) + \
(year - 1) * 354 + \
math.floor((3 + (11 * year)) / 30) + \
self.ISLAMIC_EPOCH) - 1
def jd_to_islamic(self, jd):
"JD_TO_ISLAMIC: Calculate Islamic date from Julian day"
jd = math.floor(jd) + 0.5
year = int(math.floor(((30 * (jd - self.ISLAMIC_EPOCH)) + 10646) / 10631))
month = int(min(12, math.ceil((jd - (29 + self.islamic_to_jd(year, 1, 1))) / 29.5) + 1))
day = int(jd - self.islamic_to_jd(year, month, 1)) + 1;
return year, month, day
def leap_jalali(self, year):
"LEAP_jalali: Is a given year a leap year in the Jalali calendar ?"
# Python <= 2.5
if year > 0:
rm = 474
else:
rm = 473
# Python 2.5
#return ((((((year - 474 if year > 0 else 473 ) % 2820) + 474) + 38) * 682) % 2816) < 682
return ((((((year - rm) % 2820) + 474) + 38) * 682) % 2816) < 682
def jalali_to_jd(self, year, month, day):
"JALALI_TO_JD: Determine Julian day from Jalali date"
# Python <= 2.5
if year >=0 :
rm = 474
else:
rm = 473
epbase = year - (rm)
# Python 2.5
#epbase = year - 474 if year>=0 else 473
epyear = 474 + (epbase % 2820)
if month <= 7 :
mm = (month - 1) * 31
else:
mm = ((month - 1) * 30) + 6
return day + mm + \
math.floor(((epyear * 682) - 110) / 2816) + \
(epyear - 1) * 365 + \
math.floor(epbase / 2820) * 1029983 + \
(self.JALALI_EPOCH - 1)
def jd_to_jalali(self, jd):
"JD_TO_JALALI: Calculate Jalali date from Julian day"
jd = math.floor(jd) + 0.5
depoch = jd - self.jalali_to_jd(475, 1, 1)
cycle = math.floor(depoch / 1029983)
cyear = depoch % 1029983
if cyear == 1029982 :
ycycle = 2820
else :
aux1 = math.floor(cyear / 366)
aux2 = cyear % 366
ycycle = math.floor(((2134 * aux1) + (2816 * aux2) + 2815) / 1028522) + aux1 + 1
year = int(ycycle + (2820 * cycle) + 474)
if year <= 0 :
year -= 1
yday = (jd - self.jalali_to_jd(year, 1, 1)) + 1
if yday <= 186:
month = int(math.ceil(yday / 31))
else:
month = int(math.ceil((yday - 6) / 30))
day = int(jd - self.jalali_to_jd(year, month, 1)) + 1
return year, month, day
## @}
| gpl-3.0 | 5,848,496,295,162,893,000 | 35.205387 | 132 | 0.486425 | false | 3.322309 | false | false | false |
ZimboPy/zimfarm | farm_project/accounts/migrations/0001_initial.py | 1 | 1858 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-25 00:59
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=120)),
('address2', models.CharField(blank=True, max_length=120, null=True)),
('city', models.CharField(max_length=120)),
('province', models.CharField(max_length=120, null=True)),
('phone', models.CharField(max_length=16)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['updated', '-timestamp'],
},
),
migrations.CreateModel(
name='UserDefaultAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_address_default', to='accounts.UserAddress')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| apache-2.0 | 4,105,231,202,751,320,000 | 41.227273 | 179 | 0.597417 | false | 4.222727 | false | false | false |
Whatang/DrumBurp | src/GUI/DBColourPicker.py | 1 | 14438 | # Copyright 2016 Michael Thomas
#
# See www.whatang.org for more information.
#
# This file is part of DrumBurp.
#
# DrumBurp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DrumBurp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DrumBurp. If not, see <http://www.gnu.org/licenses/>
'''
Created on Feb 22, 2015
@author: mike_000
'''
import copy
from PyQt4.QtGui import (QDialog, QColor, QLabel, QPushButton,
QComboBox, QColorDialog, QPen)
from PyQt4 import QtCore
from GUI.ui_dbColours import Ui_ColourPicker
STYLE_MAP = {"None": QtCore.Qt.NoPen,
"Solid": QtCore.Qt.SolidLine,
"Dashed": QtCore.Qt.DashLine}
STYLES = ["None", "Dashed", "Solid"]
REVERSE_STYLE_MAP = dict((x, y) for (y, x) in STYLE_MAP.iteritems())
class ColouredItem(object):
def __init__(self, backgroundColour, borderStyle, borderColour):
self._borderStyle = None
self.backgroundColour = backgroundColour
self.borderStyle = borderStyle
self.borderColour = borderColour
@property
def borderStyle(self):
return self._borderStyle
@borderStyle.setter
def borderStyle(self, value):
if not isinstance(value, QtCore.Qt.PenStyle):
value = STYLE_MAP.get(value, QtCore.Qt.NoPen)
self._borderStyle = value
@staticmethod
def _colourToString(name, colour):
return name + ":%02x,%02x,%02x,%02x" % colour.getRgb()
@staticmethod
def _colourFromString(colString):
rgba = [int(x, 16) for x in colString.split(",")]
return QColor.fromRgb(*rgba)
@staticmethod
def _lineToString(name, line):
return "%s:%s" % (name, REVERSE_STYLE_MAP[line])
@staticmethod
def _lineFromString(lineString):
return STYLE_MAP[lineString]
def toString(self):
answer = "/".join([self._colourToString("backgroundColour",
self.backgroundColour),
self._lineToString("borderStyle",
self.borderStyle),
self._colourToString("borderColour",
self.borderColour)])
return answer
def fromString(self, colString):
for item in str(colString).split("/"):
if ":" not in item:
continue
name, detail = item.split(":")
if name.endswith("Colour"):
setattr(self, name, self._colourFromString(detail))
elif name.endswith("Style"):
setattr(self, name, self._lineFromString(detail))
class TextColouredItem(ColouredItem):
def __init__(self, textColour):
super(TextColouredItem, self).__init__(QColor(QtCore.Qt.transparent),
"None",
textColour)
class BorderColouredItem(ColouredItem):
def __init__(self, borderStyle, borderColour):
super(BorderColouredItem, self).__init__(QColor(QtCore.Qt.transparent),
borderStyle,
borderColour)
class ColAttrs(object):
KNOWN_COLOURS = []
def __init__(self, longName, attrName, default, background=True, border=True, text=False):
self.longName = longName
self.attrName = attrName
self.default = default
self.background = background
self.border = border
self.text = text
self.KNOWN_COLOURS.append(self)
def makeInstance(self, scheme):
inst = ColourInstance(self)
setattr(scheme, self.attrName, inst)
def setPainter(self, painter, colour):
raise NotImplementedError()
def getInstance(self, scheme):
return getattr(scheme, self.attrName)
class TextColAttrs(ColAttrs):
def __init__(self, longName, attrName, default):
super(TextColAttrs, self).__init__(longName, attrName, default,
False, False, True)
def setPainter(self, painter, colour):
pen = QPen()
pen.setColor(colour.borderColour)
painter.setPen(pen)
class SolidBoxAttrs(ColAttrs):
def __init__(self, longName, attrName, default):
super(SolidBoxAttrs, self).__init__(longName, attrName, default,
True, True, False)
def setPainter(self, painter, colour):
pen = QPen(colour.borderStyle)
pen.setColor(colour.borderColour)
painter.setPen(pen)
painter.setBrush(colour.backgroundColour)
class BorderAttrs(ColAttrs):
def __init__(self, longName, attrName, default):
super(BorderAttrs, self).__init__(longName, attrName, default,
False, True, False)
def setPainter(self, painter, colour):
pen = QPen(colour.borderStyle)
pen.setColor(colour.borderColour)
painter.setPen(pen)
painter.setBrush(QColor(QtCore.Qt.transparent))
_TEXT_ATTRS = TextColAttrs("Text", "text",
TextColouredItem(QColor(QtCore.Qt.black)))
_POTENTIAL_ATTRS = TextColAttrs("New notes", "potential",
TextColouredItem(QColor(QtCore.Qt.blue)))
_DELETE_ATTRS = TextColAttrs("Notes to delete", "delete",
TextColouredItem(QColor(QtCore.Qt.red)))
_NOTE_HIGHLIGHT_ATTRS = SolidBoxAttrs("Note Highlight", "noteHighlight",
ColouredItem(QColor(QtCore.Qt.yellow).lighter(),
"None",
QColor(QtCore.Qt.black)))
_TIME_HIGHLIGHT_ATTRS = BorderAttrs("Time Highlight", "timeHighlight",
BorderColouredItem("Dashed",
QColor(QtCore.Qt.blue).lighter()))
_SEL_MEASURE_ATTRS = SolidBoxAttrs("Selected Measure", "selectedMeasure",
ColouredItem(QColor(QtCore.Qt.gray).lighter(),
"Solid",
QColor(QtCore.Qt.gray).lighter()))
_PLAY_HL_ATTRS = BorderAttrs("Playing Highlight", "playingHighlight",
BorderColouredItem("Solid",
QColor(QtCore.Qt.blue).lighter()))
_NEXT_PLAY_HL_ATTRS = BorderAttrs("Next Playing Highlight", "nextPlayingHighlight",
BorderColouredItem("Dashed",
QColor(QtCore.Qt.blue).lighter()))
_STICKING_ATTRS = SolidBoxAttrs("Sticking Display", "sticking",
ColouredItem(QColor(QtCore.Qt.white),
"Dashed",
QColor(QtCore.Qt.gray)))
class ColourInstance(object):
def __init__(self, colourAttrs):
self.colour = copy.deepcopy(colourAttrs.default)
self.colourAttrs = colourAttrs
def setPainter(self, painter):
self.colourAttrs.setPainter(painter, self)
@property
def borderStyle(self):
return self.colour.borderStyle
@borderStyle.setter
def borderStyle(self, value):
self.colour.borderStyle = value
@property
def borderColour(self):
return self.colour.borderColour
@borderColour.setter
def borderColour(self, value):
self.colour.borderColour = value
@property
def backgroundColour(self):
return self.colour.backgroundColour
@backgroundColour.setter
def backgroundColour(self, value):
self.colour.backgroundColour = value
def toString(self):
return self.colour.toString()
def fromString(self, colString):
return self.colour.fromString(colString)
class ColourScheme(object):
def __init__(self):
for colAttr in ColAttrs.KNOWN_COLOURS:
colAttr.makeInstance(self)
def iterColours(self):
for colour in ColAttrs.KNOWN_COLOURS:
yield getattr(self, colour.attrName)
def iterTextColours(self):
for colour in ColAttrs.KNOWN_COLOURS:
if colour.text:
yield getattr(self, colour.attrName)
def iterAreaColours(self):
for colour in ColAttrs.KNOWN_COLOURS:
if not colour.text:
yield getattr(self, colour.attrName)
class DBColourPicker(QDialog, Ui_ColourPicker):
def __init__(self, colour_scheme, parent=None):
super(DBColourPicker, self).__init__(parent)
self.setupUi(self)
self._originalScheme = copy.deepcopy(colour_scheme)
self._currentScheme = copy.deepcopy(colour_scheme)
reset = self.buttonBox.button(self.buttonBox.Reset)
reset.clicked.connect(self.reset)
restore = self.buttonBox.button(self.buttonBox.RestoreDefaults)
restore.clicked.connect(self.restoreDefaults)
self._colourSelectors = []
self._lineSelectors = []
for row, colour in enumerate(self._currentScheme.iterTextColours()):
colourAttr = colour.colourAttrs
label = QLabel(self.frame)
label.setText(colourAttr.longName)
label.setAlignment(QtCore.Qt.AlignRight)
self.textGrid.addWidget(label, row + 1, 0, 1, 1)
textButton = self._makeLineButton(colourAttr)
self.textGrid.addWidget(textButton, row + 1, 1, 1, 1)
for row, colour in enumerate(self._currentScheme.iterAreaColours()):
colourAttr = colour.colourAttrs
label = QLabel(self.frame_2)
label.setText(colourAttr.longName)
label.setAlignment(QtCore.Qt.AlignRight)
self.areaGrid.addWidget(label, row + 1, 0, 1, 1)
if colourAttr.background:
backgroundButton = self._makeBackgroundButton(colourAttr)
self.areaGrid.addWidget(backgroundButton, row + 1, 1, 1, 1)
if colourAttr.border:
combo = self._makeLineCombo(colourAttr)
self.areaGrid.addWidget(combo, row + 1, 2, 1, 1)
lineButton = self._makeLineButton(colourAttr)
self.areaGrid.addWidget(lineButton, row + 1, 3, 1, 1)
self._setColourValues()
@staticmethod
def _styleButton(button, colour):
button.setText("")
button.setAutoFillBackground(True)
ss = """QPushButton {
background: rgba(%d, %d, %d, %d);
border-color: black;
border-width:1px;
color: black;
border-style: ridge;
}
QPushButton:hover {
border-width:2px;
border-color: red;
}"""
ss %= colour.getRgb()
if colour.getRgb()[3] == 0:
button.setText("Transparent")
button.setStyleSheet(ss)
def _makeColourSelector(self, button, colourAttr, colourType):
def selectColour():
colour = colourAttr.getInstance(self._currentScheme)
currentColour = getattr(colour, colourType)
colourDialog = QColorDialog(currentColour, self)
if colourDialog.exec_():
selected = colourDialog.selectedColor()
if selected != currentColour:
self._styleButton(button, selected)
setattr(colour, colourType, selected)
button.clicked.connect(selectColour)
self._colourSelectors.append((button, colourAttr,
colourType))
def _makeBackgroundButton(self, colourAttr):
backgroundButton = QPushButton(self)
backgroundButton.setObjectName(colourAttr.attrName + "background_col")
self._makeColourSelector(
backgroundButton, colourAttr, "backgroundColour")
return backgroundButton
def _makeLineCombo(self, colourAttr):
combo = QComboBox(self)
combo.setObjectName(colourAttr.attrName + "border_style")
for lineStyle in STYLES:
combo.addItem(lineStyle)
def setLineStyle(newIndex):
colour = colourAttr.getInstance(self._currentScheme)
colour.borderStyle = STYLES[newIndex]
combo.currentIndexChanged.connect(setLineStyle)
self._lineSelectors.append((combo, colourAttr))
return combo
def _makeLineButton(self, colourAttr):
lineButton = QPushButton(self)
lineButton.setObjectName(colourAttr.attrName + "border_col")
self._makeColourSelector(lineButton, colourAttr, "borderColour")
return lineButton
def getColourScheme(self):
return self._currentScheme
def _setColourValues(self):
for button, colourAttr, colourType in self._colourSelectors:
colour = colourAttr.getInstance(self._currentScheme)
colourVal = getattr(colour, colourType)
self._styleButton(button, colourVal)
for combo, colourAttr in self._lineSelectors:
colour = colourAttr.getInstance(self._currentScheme)
currentStyle = colour.borderStyle
for selected, lineStyle in enumerate(STYLES):
if STYLE_MAP[lineStyle] == currentStyle:
combo.setCurrentIndex(selected)
def reset(self):
self._currentScheme = copy.deepcopy(self._originalScheme)
self._setColourValues()
def restoreDefaults(self):
self._currentScheme = copy.deepcopy(ColourScheme())
self._setColourValues()
def main():
from PyQt4.QtGui import QApplication
import sys
app = QApplication(sys.argv)
scheme = ColourScheme()
dialog = DBColourPicker(scheme)
dialog.show()
app.exec_()
if dialog.result():
scheme = dialog.getColourScheme()
for col in scheme.iterColours():
print col.colourAttrs.longName, col.toString()
if __name__ == "__main__":
main()
| gpl-3.0 | 4,722,268,532,639,419,000 | 36.21134 | 94 | 0.599113 | false | 4.279194 | false | false | false |
itsmeolivia/interview | simple-database.py | 1 | 1853 | #!/usr/bin/env python
var_map = {}
val_map = {}
undo = []
rolling_back = False
def in_transaction_block():
return len(undo) != 0 and not rolling_back
def set_var(name, value):
if name in var_map:
if in_transaction_block():
undo.append((set_var, name, var_map[name]))
val_map[var_map[name]] -= 1
elif in_transaction_block():
undo.append((unset, name))
if value in val_map:
val_map[value] += 1
else:
val_map[value] = 1
var_map[name] = value
def get_val(name):
if name in var_map:
print var_map[name]
else:
print "NULL"
def unset(name):
if in_transaction_block():
undo.append((set_var, name, var_map[name]))
val_map[var_map[name]] -= 1
del var_map[name]
def num_equal_to(value):
if value in val_map:
print val_map[value]
else:
print "0"
def commit():
if not in_transaction_block():
print "NO TRANSACTION"
return
global undo
undo = []
def rollback():
if not in_transaction_block():
print "NO TRANSACTION"
return
global rolling_back
rolling_back = True
rolling = None
while rolling != "begin":
rolling = undo.pop()
if rolling != "begin":
rolling[0](*rolling[1:])
rolling_back = False
def begin():
undo.append("begin")
def main():
dispatch_table = {
"GET": get_val,
"SET": set_var,
"UNSET": unset,
"NUMEQUALTO": num_equal_to,
"END": exit,
"BEGIN": begin,
"COMMIT": commit,
"ROLLBACK": rollback,
}
while True:
try:
line = raw_input()
except EOFError:
exit()
command = line.split()
dispatch_table[command[0]](*command[1:])
if __name__ == "__main__":
main()
| mit | -1,301,698,002,153,495,600 | 17.908163 | 55 | 0.533189 | false | 3.412523 | false | false | false |
CloverHealth/pycon2017 | etl_nested/tests/conftest.py | 1 | 2618 | import os
from collections import namedtuple
import pytest
import sqlalchemy as sa
import sqlalchemy.engine as sa_engine
import sqlalchemy.orm as sa_orm
import testing.postgresql
from app import models
# re-useable test database subdirectory
KEEPDB_PATH = '.test_db'
# Test database options
DatabaseConfig = namedtuple(
'DatabaseConfig',
['keepdb_active', 'keepdb_path']
)
@pytest.fixture(scope='session')
def db_options(request, root_path:str) -> DatabaseConfig:
"""
Fixture of test database options for the entire pytest session
:param request: pytest fixture request (FixtureRequest)
"""
keepdb_active = request.config.getoption('--keepdb')
if keepdb_active:
keepdb_path = os.path.join(root_path, KEEPDB_PATH)
else:
keepdb_path = None
return DatabaseConfig(keepdb_active, keepdb_path)
@pytest.fixture(scope='session')
def db_url(db_options: DatabaseConfig):
"""
Postgres conninfo URL for the test database.
This URL is usually a transient database managed by the 'testing.postgres' library.
If the '--keepdb' option is specified, it will force it to be persistent at a known local path.
:param db_options: test database options
"""
testdb_kwargs = {}
if db_options.keepdb_path:
testdb_kwargs['base_dir'] = db_options.keepdb_path
with testing.postgresql.Postgresql(**testdb_kwargs) as postgresql:
yield postgresql.url()
@pytest.fixture(scope='session')
def db_engine(db_options:DatabaseConfig, db_url: str):
"""
Fixture providing SQLAlchemy test database connectivity
:param db_options: database options
:param db_url: test database conninfo URL
"""
db_engine = sa.create_engine(db_url)
# speed up tests by only installing schema if there was no prior database created with --keepdb
if not db_options.keepdb_active or os.path.exists(db_options.keepdb_path):
models.init_database(db_engine)
yield db_engine
db_engine.dispose()
@pytest.fixture
def session(db_engine: sa_engine.Engine):
"""
Fixture providing SQLAlchemy session for operations on ORM-mapped objects
:param db_engine: test database connectivity instance
"""
sessionmaker = sa_orm.sessionmaker(db_engine)
# session is automatically rolled back regardless of test result
# if an uncaught exception occurred, ensure it is still propagated to pytest with the original traceback
session = None
try:
session = sessionmaker()
yield session
except:
raise
finally:
session.rollback()
| bsd-3-clause | -166,317,570,797,609,380 | 26.851064 | 108 | 0.700153 | false | 4.009188 | true | false | false |
akretion/rma | __unported__/crm_claim_rma/tests/test_lp_1282584.py | 5 | 3990 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class test_lp_1282584(common.TransactionCase):
""" Test wizard open the right type of view
The wizard can generate picking.in and picking.out
Let's ensure it open the right view for each picking type
"""
def setUp(self):
super(test_lp_1282584, self).setUp()
cr, uid = self.cr, self.uid
self.WizardMakePicking = self.registry('claim_make_picking.wizard')
ClaimLine = self.registry('claim.line')
Claim = self.registry('crm.claim')
self.product_id = self.ref('product.product_product_4')
self.partner_id = self.ref('base.res_partner_12')
# Create the claim with a claim line
self.claim_id = Claim.create(
cr, uid,
{
'name': 'TEST CLAIM',
'number': 'TEST CLAIM',
'claim_type': 'customer',
'delivery_address_id': self.partner_id,
})
claim = Claim.browse(cr, uid, self.claim_id)
self.warehouse_id = claim.warehouse_id.id
self.claim_line_id = ClaimLine.create(
cr, uid,
{
'name': 'TEST CLAIM LINE',
'claim_origine': 'none',
'product_id': self.product_id,
'claim_id': self.claim_id,
'location_dest_id': claim.warehouse_id.lot_stock_id.id
})
def test_00(self):
"""Test wizard opened view model for a new product return
"""
cr, uid = self.cr, self.uid
wiz_context = {
'active_id': self.claim_id,
'partner_id': self.partner_id,
'warehouse_id': self.warehouse_id,
'picking_type': 'in',
}
wizard_id = self.WizardMakePicking.create(cr, uid, {
}, context=wiz_context)
res = self.WizardMakePicking.action_create_picking(
cr, uid, [wizard_id], context=wiz_context)
self.assertEquals(res.get('res_model'), 'stock.picking.in', "Wrong model defined")
def test_01(self):
"""Test wizard opened view model for a new delivery
"""
cr, uid = self.cr, self.uid
WizardChangeProductQty = self.registry('stock.change.product.qty')
wiz_context = {'active_id': self.product_id}
wizard_chg_qty_id = WizardChangeProductQty.create(cr, uid, {
'product_id': self.product_id,
'new_quantity': 12})
WizardChangeProductQty.change_product_qty(cr, uid, [wizard_chg_qty_id], context=wiz_context)
wiz_context = {
'active_id': self.claim_id,
'partner_id': self.partner_id,
'warehouse_id': self.warehouse_id,
'picking_type': 'out',
}
wizard_id = self.WizardMakePicking.create(cr, uid, {
}, context=wiz_context)
res = self.WizardMakePicking.action_create_picking(
cr, uid, [wizard_id], context=wiz_context)
self.assertEquals(res.get('res_model'), 'stock.picking.out', "Wrong model defined")
| agpl-3.0 | -6,670,841,462,261,813,000 | 35.944444 | 100 | 0.573684 | false | 3.900293 | true | false | false |
nave91/teak-nbtree | src/table.py | 1 | 1430 | from globfile import *
from lib import *
def tableprint(z): #prints table with the summary
print rowprint(colname[z]),'%10s' % 'notes'
print rowprint(expected(colname[z],z)), '%10s' % 'expected'
temp = [ c for c in range(len(colname[z]))]
for c in colname[z]:
if c in nump[z]:
temp[colname[z].index(c)] = str('%0.2f' % round(sd[z][c],2))
else:
temp[colname[z].index(c)] = str('%0.2f' % round(float(most[z][c])/float(n[z][c]),2))
print rowprint(temp),'%10s' % 'certainity'
for row in data[z]:
print rowprint(row)
def tableprint1(z):
print rowprint(colname[z])
for row in data[z]:
print rowprint(row)
def klass1(data, z):
for k in klass[z]:
return data[colname[z].index(k)]
def klassAt(z):
for k in klass[z]:
return colname[z].index(k)
def fromHell(row,z,more,less):
m = 0
out = 0
aLittle = 0.001
if z in more:
for c in more[z]:
ind = colname[z].index(c)
if row[ind] != '?':
m+=1
print ind,z
out += ((row[ind] - hi[z][c]) / (hi[z][c] - lo[z][c] + aLittle))**2
if z in less:
for c in less[z]:
ind = colname[z].index(c)
if row[ind] != '?':
m+=1
out += ((row[ind] - hi[z][c])/ (hi[z][c] - lo[z][c] + aLittle))**2
return out**0.5/m**5 if m == 1 else 1
| gpl-2.0 | 2,276,006,926,522,703,400 | 30.086957 | 96 | 0.502098 | false | 2.831683 | false | false | false |
rht/zulip | zerver/lib/streams.py | 1 | 13621 | from typing import Any, Iterable, List, Mapping, Set, Tuple, Optional, Union
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_stream_name, create_streams_if_needed
from zerver.lib.request import JsonableError
from zerver.models import UserProfile, Stream, Subscription, \
Realm, Recipient, get_stream, \
bulk_get_streams, get_realm_stream, DefaultStreamGroup, get_stream_by_id_in_realm
from django.db.models.query import QuerySet
def check_for_exactly_one_stream_arg(stream_id: Optional[int], stream: Optional[str]) -> None:
if stream_id is None and stream is None:
raise JsonableError(_("Please supply 'stream'."))
if stream_id is not None and stream is not None:
raise JsonableError(_("Please choose one: 'stream' or 'stream_id'."))
def access_stream_for_delete_or_update(user_profile: UserProfile, stream_id: int) -> Stream:
# We should only ever use this for realm admins, who are allowed
# to delete or update all streams on their realm, even private streams
# to which they are not subscribed. We do an assert here, because
# all callers should have the require_realm_admin decorator.
assert(user_profile.is_realm_admin)
error = _("Invalid stream id")
try:
stream = Stream.objects.get(id=stream_id)
except Stream.DoesNotExist:
raise JsonableError(error)
if stream.realm_id != user_profile.realm_id:
raise JsonableError(error)
return stream
# Only set allow_realm_admin flag to True when you want to allow realm admin to
# access unsubscribed private stream content.
def access_stream_common(user_profile: UserProfile, stream: Stream,
error: str,
require_active: bool=True,
allow_realm_admin: bool=False) -> Tuple[Recipient, Optional[Subscription]]:
"""Common function for backend code where the target use attempts to
access the target stream, returning all the data fetched along the
way. If that user does not have permission to access that stream,
we throw an exception. A design goal is that the error message is
the same for streams you can't access and streams that don't exist."""
# First, we don't allow any access to streams in other realms.
if stream.realm_id != user_profile.realm_id:
raise JsonableError(error)
recipient = stream.recipient
try:
sub = Subscription.objects.get(user_profile=user_profile,
recipient=recipient,
active=require_active)
except Subscription.DoesNotExist:
sub = None
# If the stream is in your realm and public, you can access it.
if stream.is_public() and not user_profile.is_guest:
return (recipient, sub)
# Or if you are subscribed to the stream, you can access it.
if sub is not None:
return (recipient, sub)
# For some specific callers (e.g. getting list of subscribers,
# removing other users from a stream, and updating stream name and
# description), we allow realm admins to access stream even if
# they are not subscribed to a private stream.
if user_profile.is_realm_admin and allow_realm_admin:
return (recipient, sub)
# Otherwise it is a private stream and you're not on it, so throw
# an error.
raise JsonableError(error)
def access_stream_by_id(user_profile: UserProfile,
stream_id: int,
require_active: bool=True,
allow_realm_admin: bool=False) -> Tuple[Stream, Recipient, Optional[Subscription]]:
stream = get_stream_by_id(stream_id)
error = _("Invalid stream id")
(recipient, sub) = access_stream_common(user_profile, stream, error,
require_active=require_active,
allow_realm_admin=allow_realm_admin)
return (stream, recipient, sub)
def get_public_streams_queryset(realm: Realm) -> 'QuerySet[Stream]':
return Stream.objects.filter(realm=realm, invite_only=False,
history_public_to_subscribers=True)
def get_stream_by_id(stream_id: int) -> Stream:
error = _("Invalid stream id")
try:
stream = Stream.objects.get(id=stream_id)
except Stream.DoesNotExist:
raise JsonableError(error)
return stream
def check_stream_name_available(realm: Realm, name: str) -> None:
check_stream_name(name)
try:
get_stream(name, realm)
raise JsonableError(_("Stream name '%s' is already taken.") % (name,))
except Stream.DoesNotExist:
pass
def access_stream_by_name(user_profile: UserProfile,
stream_name: str,
allow_realm_admin: bool=False) -> Tuple[Stream, Recipient, Optional[Subscription]]:
error = _("Invalid stream name '%s'") % (stream_name,)
try:
stream = get_realm_stream(stream_name, user_profile.realm_id)
except Stream.DoesNotExist:
raise JsonableError(error)
(recipient, sub) = access_stream_common(user_profile, stream, error,
allow_realm_admin=allow_realm_admin)
return (stream, recipient, sub)
def access_stream_for_unmute_topic_by_name(user_profile: UserProfile,
stream_name: str,
error: str) -> Stream:
"""
It may seem a little silly to have this helper function for unmuting
topics, but it gets around a linter warning, and it helps to be able
to review all security-related stuff in one place.
Our policy for accessing streams when you unmute a topic is that you
don't necessarily need to have an active subscription or even "legal"
access to the stream. Instead, we just verify the stream_id has been
muted in the past (not here, but in the caller).
Long term, we'll probably have folks just pass us in the id of the
MutedTopic row to unmute topics.
"""
try:
stream = get_stream(stream_name, user_profile.realm)
except Stream.DoesNotExist:
raise JsonableError(error)
return stream
def access_stream_for_unmute_topic_by_id(user_profile: UserProfile,
stream_id: int,
error: str) -> Stream:
try:
stream = Stream.objects.get(id=stream_id, realm_id=user_profile.realm_id)
except Stream.DoesNotExist:
raise JsonableError(error)
return stream
def can_access_stream_history(user_profile: UserProfile, stream: Stream) -> bool:
"""Determine whether the provided user is allowed to access the
history of the target stream. The stream is specified by name.
This is used by the caller to determine whether this user can get
historical messages before they joined for a narrowing search.
Because of the way our search is currently structured,
we may be passed an invalid stream here. We return
False in that situation, and subsequent code will do
validation and raise the appropriate JsonableError.
Note that this function should only be used in contexts where
access_stream is being called elsewhere to confirm that the user
can actually see this stream.
"""
if stream.is_history_realm_public() and not user_profile.is_guest:
return True
if stream.is_history_public_to_subscribers():
# In this case, we check if the user is subscribed.
error = _("Invalid stream name '%s'") % (stream.name,)
try:
(recipient, sub) = access_stream_common(user_profile, stream, error)
except JsonableError:
return False
return True
return False
def can_access_stream_history_by_name(user_profile: UserProfile, stream_name: str) -> bool:
try:
stream = get_stream(stream_name, user_profile.realm)
except Stream.DoesNotExist:
return False
return can_access_stream_history(user_profile, stream)
def can_access_stream_history_by_id(user_profile: UserProfile, stream_id: int) -> bool:
try:
stream = get_stream_by_id_in_realm(stream_id, user_profile.realm)
except Stream.DoesNotExist:
return False
return can_access_stream_history(user_profile, stream)
def filter_stream_authorization(user_profile: UserProfile,
streams: Iterable[Stream]) -> Tuple[List[Stream], List[Stream]]:
streams_subscribed = set() # type: Set[int]
recipient_ids = [stream.recipient_id for stream in streams]
subs = Subscription.objects.filter(user_profile=user_profile,
recipient_id__in=recipient_ids,
active=True)
for sub in subs:
streams_subscribed.add(sub.recipient.type_id)
unauthorized_streams = [] # type: List[Stream]
for stream in streams:
# The user is authorized for their own streams
if stream.id in streams_subscribed:
continue
# Users are not authorized for invite_only streams, and guest
# users are not authorized for any streams
if stream.invite_only or user_profile.is_guest:
unauthorized_streams.append(stream)
authorized_streams = [stream for stream in streams if
stream.id not in set(stream.id for stream in unauthorized_streams)]
return authorized_streams, unauthorized_streams
def list_to_streams(streams_raw: Iterable[Mapping[str, Any]],
user_profile: UserProfile,
autocreate: bool=False) -> Tuple[List[Stream], List[Stream]]:
"""Converts list of dicts to a list of Streams, validating input in the process
For each stream name, we validate it to ensure it meets our
requirements for a proper stream name using check_stream_name.
This function in autocreate mode should be atomic: either an exception will be raised
during a precheck, or all the streams specified will have been created if applicable.
@param streams_raw The list of stream dictionaries to process;
names should already be stripped of whitespace by the caller.
@param user_profile The user for whom we are retreiving the streams
@param autocreate Whether we should create streams if they don't already exist
"""
# Validate all streams, getting extant ones, then get-or-creating the rest.
stream_set = set(stream_dict["name"] for stream_dict in streams_raw)
for stream_name in stream_set:
# Stream names should already have been stripped by the
# caller, but it makes sense to verify anyway.
assert stream_name == stream_name.strip()
check_stream_name(stream_name)
existing_streams = [] # type: List[Stream]
missing_stream_dicts = [] # type: List[Mapping[str, Any]]
existing_stream_map = bulk_get_streams(user_profile.realm, stream_set)
member_creating_announcement_only_stream = False
for stream_dict in streams_raw:
stream_name = stream_dict["name"]
stream = existing_stream_map.get(stream_name.lower())
if stream is None:
if stream_dict.get("is_announcement_only", False) and not user_profile.is_realm_admin:
member_creating_announcement_only_stream = True
missing_stream_dicts.append(stream_dict)
else:
existing_streams.append(stream)
if len(missing_stream_dicts) == 0:
# This is the happy path for callers who expected all of these
# streams to exist already.
created_streams = [] # type: List[Stream]
else:
# autocreate=True path starts here
if not user_profile.can_create_streams():
raise JsonableError(_('User cannot create streams.'))
elif not autocreate:
raise JsonableError(_("Stream(s) (%s) do not exist") % ", ".join(
stream_dict["name"] for stream_dict in missing_stream_dicts))
elif member_creating_announcement_only_stream:
raise JsonableError(_('User cannot create a stream with these settings.'))
# We already filtered out existing streams, so dup_streams
# will normally be an empty list below, but we protect against somebody
# else racing to create the same stream. (This is not an entirely
# paranoid approach, since often on Zulip two people will discuss
# creating a new stream, and both people eagerly do it.)
created_streams, dup_streams = create_streams_if_needed(realm=user_profile.realm,
stream_dicts=missing_stream_dicts)
existing_streams += dup_streams
return existing_streams, created_streams
def access_default_stream_group_by_id(realm: Realm, group_id: int) -> DefaultStreamGroup:
try:
return DefaultStreamGroup.objects.get(realm=realm, id=group_id)
except DefaultStreamGroup.DoesNotExist:
raise JsonableError(_("Default stream group with id '%s' does not exist.") % (group_id,))
def get_stream_by_narrow_operand_access_unchecked(operand: Union[str, int], realm: Realm) -> Stream:
"""This is required over access_stream_* in certain cases where
we need the stream data only to prepare a response that user can access
and not send it out to unauthorized recipients.
"""
if isinstance(operand, str):
return get_stream(operand, realm)
return get_stream_by_id_in_realm(operand, realm)
| apache-2.0 | -5,349,482,631,936,922,000 | 43.224026 | 109 | 0.65722 | false | 4.234069 | false | false | false |
markstory/lint-review | lintreview/web.py | 1 | 2433 | import logging
import pkg_resources
from flask import Flask, request, Response
from lintreview.config import load_config
from lintreview.github import get_repository, get_lintrc
from lintreview.tasks import process_pull_request
config = load_config()
app = Flask("lintreview")
app.config.update(config)
log = logging.getLogger(__name__)
version = pkg_resources.get_distribution('lintreview').version
@app.route("/ping")
def ping():
return "lint-review: %s pong\n" % (version,)
@app.route("/review/start", methods=["POST"])
def start_review():
event = request.headers.get('X-Github-Event')
if event == 'ping':
return Response(status=200)
try:
payload = request.get_json()
action = payload["action"]
pull_request = payload["pull_request"]
number = pull_request["number"]
base_repo_url = pull_request["base"]["repo"]["git_url"]
head_repo_url = pull_request["head"]["repo"]["git_url"]
head_repo_ref = pull_request["head"]["ref"]
user = pull_request["base"]["repo"]["owner"]["login"]
head_user = pull_request["head"]["repo"]["owner"]["login"]
repo = pull_request["base"]["repo"]["name"]
head_repo = pull_request["head"]["repo"]["name"]
except Exception as e:
log.error("Got an invalid JSON body. '%s'", e)
return Response(status=403,
response="You must provide a valid JSON body\n")
log.info("Received GitHub pull request notification for "
"%s %s, (%s) from: %s",
base_repo_url, number, action, head_repo_url)
if action not in ("opened", "synchronize", "reopened"):
log.info("Ignored '%s' action." % action)
return Response(status=204)
gh = get_repository(app.config, head_user, head_repo)
try:
lintrc = get_lintrc(gh, head_repo_ref)
log.debug("lintrc file contents '%s'", lintrc)
except Exception as e:
log.warn("Cannot download .lintrc file for '%s', "
"skipping lint checks.", base_repo_url)
log.warn(e)
return Response(status=204)
try:
log.info("Scheduling pull request for %s/%s %s", user, repo, number)
process_pull_request.delay(user, repo, number, lintrc)
except Exception:
log.error('Could not publish job to celery. Make sure its running.')
return Response(status=500)
return Response(status=204)
| mit | -8,445,128,408,838,647,000 | 34.779412 | 76 | 0.622688 | false | 3.760433 | true | false | false |
matheuscas/pyfuzzy_toolbox | tests/test_features.py | 1 | 20690 | from pyfuzzy_toolbox import transformation as trans
from pyfuzzy_toolbox import preprocessing as pre
import pyfuzzy_toolbox.features.count as count_features
import pyfuzzy_toolbox.features.max as max_features
import pyfuzzy_toolbox.features.sum as sum_features
import test_preprocessing as tpre
import nose
print 'Loading test text 1'
bow_sentences_1 = pre.start(tpre.text_1)
bow_sentences_1 = trans.start(bow_sentences_1)
print 'Loading test text 1a'
bow_sentences_1a = pre.start(tpre.text_1a)
bow_sentences_1a = trans.start(bow_sentences_1a)
print 'Loading test text 2a'
bow_sentences_2a = pre.start(tpre.text_2a)
bow_sentences_2a = trans.start(bow_sentences_2a)
""" ----------------------------- SUM FEATURES ----------------------------- """
"""UNIGRAMS"""
def test_sum_of_positive_adjectives_scores():
expected_sum = 0.0855961827957
sum_of_positive_adjectives = sum_features.sum_of_unigrams_scores(bow_sentences_1)
nose.tools.assert_almost_equal(expected_sum, sum_of_positive_adjectives)
def test_sum_of_positive_adverbs_scores():
expected_sum = 0.0
sum_of_positive_adverbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1, unigram=count_features.ADVS)
nose.tools.assert_almost_equal(expected_sum, sum_of_positive_adverbs)
def test_sum_of_positive_verbs_scores():
expected_sum = 0.02447258064516129
sum_of_positive_verbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1, unigram=count_features.VERBS)
nose.tools.assert_almost_equal(expected_sum, sum_of_positive_verbs)
def test_sum_of_negative_adjectives_scores():
expected_sum = -0.06547738317757008
sum_of_negative_adjectives = sum_features.sum_of_unigrams_scores(
bow_sentences_1a, positive=False)
nose.tools.assert_almost_equal(expected_sum, sum_of_negative_adjectives)
def test_sum_of_negative_adverbs_scores():
expected_sum = -0.00891862928349
sum_of_negative_adverbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, positive=False)
nose.tools.assert_almost_equal(expected_sum, sum_of_negative_adverbs)
def test_sum_of_negative_verbs_scores():
expected_sum = 0.0
sum_of_negative_verbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS, positive=False)
nose.tools.assert_almost_equal(expected_sum, sum_of_negative_verbs)
def test_sum_ratio_of_positive_adjectives_scores():
expected_sum = 0.0004601945311596716
sum_of_positive_adjectives = sum_features.sum_of_unigrams_scores(
bow_sentences_1, ratio=True)
nose.tools.assert_almost_equal(expected_sum, sum_of_positive_adjectives)
def test_sum_ratio_of_positive_adverbs_scores():
expected_sum = 0.0
sum_of_positive_adverbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1, unigram=count_features.ADVS, ratio=True)
nose.tools.assert_almost_equal(expected_sum, sum_of_positive_adverbs)
def test_sum_ratio_of_positive_verbs_scores():
expected_sum = 0.00013157301422129724
sum_of_positive_verbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, ratio=True, positive=True)
nose.tools.assert_almost_equal(expected_sum, sum_of_positive_verbs)
def test_sum_ratio_of_negative_adjectives_scores():
expected_sum = -0.0008910665972944851
sum_of_negative_adjectives = sum_features.sum_of_unigrams_scores(
bow_sentences_1, ratio=True, positive=False)
nose.tools.assert_almost_equal(expected_sum, sum_of_negative_adjectives)
def test_sum_ratio_of_negative_adverbs_scores():
expected_sum = -2.7783891848875693e-05
sum_of_negative_adverbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, ratio=True, positive=False)
nose.tools.assert_almost_equal(expected_sum, sum_of_negative_adverbs)
def test_sum_ratio_of_negative_verbs_scores():
expected_sum = -0.000179220719158
sum_of_negative_verbs = sum_features.sum_of_unigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, ratio=True, positive=False)
nose.tools.assert_almost_equal(expected_sum, sum_of_negative_verbs)
def test_positive_to_negative_ratio_sum_scores_adjectives():
expected_ratio_sum = (0.0855961827957 + (-0.165738387097))
positive_to_negative_ratio = sum_features.positive_to_negative_ratio_sum_unigrams_scores(
bow_sentences_1)
nose.tools.assert_almost_equal(
expected_ratio_sum, positive_to_negative_ratio)
def test_positive_to_negative_ratio_sum_scores_adverbs():
expected_ratio_sum = (0.0105152647975 + (-0.00891862928349))
positive_to_negative_ratio = sum_features.positive_to_negative_ratio_sum_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS)
nose.tools.assert_almost_equal(
expected_ratio_sum, positive_to_negative_ratio)
def test_positive_to_negative_ratio_sum_scores_verbs():
expected_ratio_sum = (0.0223977570093 + (0.0))
positive_to_negative_ratio = sum_features.positive_to_negative_ratio_sum_unigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS)
nose.tools.assert_almost_equal(
expected_ratio_sum, positive_to_negative_ratio)
"""BIGRAMS"""
def test_sum_of_positive_adjectives_scores_and_bigrams_with_adjectives():
expected_sum = 0.0855961827957
sum_of_positive_adjectives_and_bigrams_with_adjectives = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1)
nose.tools.assert_almost_equal(
expected_sum, sum_of_positive_adjectives_and_bigrams_with_adjectives)
def test_sum_of_negative_adjectives_scores_and_bigrams_with_adjectives():
expected_sum = -2.2411307476635516
sum_of_negative_adjectives_and_bigrams_with_adjectives = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1a, positive=False)
nose.tools.assert_almost_equal(
expected_sum, sum_of_negative_adjectives_and_bigrams_with_adjectives)
def test_sum_of_positive_adverbs_scores_and_bigrams_with_adverbs():
expected_sum = 0.0
sum_of_positive_adverbs_and_bigrams_with_adverbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS)
nose.tools.assert_almost_equal(
expected_sum, sum_of_positive_adverbs_and_bigrams_with_adverbs)
def test_sum_of_negative_adverbs_scores_and_bigrams_with_adverbs():
expected_sum = -0.00891862928349
sum_of_negative_adverbs_and_bigrams_with_adverbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS, positive=False)
nose.tools.assert_almost_equal(
expected_sum, sum_of_negative_adverbs_and_bigrams_with_adverbs)
def test_sum_of_positive_verbs_scores_and_bigrams_with_verbs():
expected_sum = 0.7079659139784946
sum_of_positive_verbs_and_bigrams_with_verbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS)
nose.tools.assert_almost_equal(
expected_sum, sum_of_positive_verbs_and_bigrams_with_verbs)
def test_sum_of_negative_verbs_scores_and_bigrams_with_verbs():
expected_sum = -0.0333350537634
sum_of_negative_verbs_and_bigrams_with_verbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS, positive=False)
nose.tools.assert_almost_equal(
expected_sum, sum_of_negative_verbs_and_bigrams_with_verbs)
def test_sum_ratio_of_positive_adjectives_scores_and_bigrams_with_adjectives():
expected_sum = 0.0855961827957 / 186
sum_of_positive_adjectives_and_bigrams_with_adjectives = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, ratio=True)
nose.tools.assert_almost_equal(
expected_sum, sum_of_positive_adjectives_and_bigrams_with_adjectives)
def test_sum_ratio_of_negative_adjectives_scores_and_bigrams_with_adjectives():
expected_sum = -0.006981715724808572
sum_of_negative_adjectives_and_bigrams_with_adjectives = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1a, positive=False, ratio=True)
nose.tools.assert_almost_equal(
expected_sum, sum_of_negative_adjectives_and_bigrams_with_adjectives)
def test_sum_ratio_of_positive_adverbs_scores_and_bigrams_with_adverbs():
expected_sum = 0.0
sum_of_positive_adverbs_and_bigrams_with_adverbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS, ratio=True)
nose.tools.assert_almost_equal(
expected_sum, sum_of_positive_adverbs_and_bigrams_with_adverbs)
def test_sum_ratio_of_negative_adverbs_scores_and_bigrams_with_adverbs():
expected_sum = -0.00891862928349 / 321
sum_of_negative_adverbs_and_bigrams_with_adverbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS, positive=False, ratio=True)
nose.tools.assert_almost_equal(
expected_sum, sum_of_negative_adverbs_and_bigrams_with_adverbs)
def test_sum_ratio_of_positive_verbs_scores_and_bigrams_with_verbs():
expected_sum = 0.003806268354723089
sum_of_positive_verbs_and_bigrams_with_verbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS, ratio=True)
nose.tools.assert_almost_equal(
expected_sum, sum_of_positive_verbs_and_bigrams_with_verbs)
def test_sum_ratio_of_negative_verbs_scores_and_bigrams_with_verbs():
expected_sum = -0.0333350537634 / 186
sum_of_negative_verbs_and_bigrams_with_verbs = sum_features.sum_of_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS, positive=False, ratio=True)
nose.tools.assert_almost_equal(
expected_sum, sum_of_negative_verbs_and_bigrams_with_verbs)
def test_positive_to_negative_ratio_sum_scores_adjectives_and_bigrams_with_adjectives():
expected_ratio_sum = 0.0855961827957 - 0.165738387097
positive_to_negative_ratio_sum = sum_features.positive_to_negative_ratio_sum_unigrams_and_bigrams_scores(
bow_sentences_1)
nose.tools.assert_almost_equal(
expected_ratio_sum, positive_to_negative_ratio_sum)
def test_positive_to_negative_ratio_sum_scores_adverbs_and_bigrams_with_adverbs():
expected_ratio_sum = 0.0
positive_to_negative_ratio_sum = sum_features.positive_to_negative_ratio_sum_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS)
nose.tools.assert_almost_equal(
expected_ratio_sum, positive_to_negative_ratio_sum)
def test_positive_to_negative_ratio_sum_scores_verbs_and_bigrams_with_verbs():
expected_ratio_sum = 0.6746308602150538
positive_to_negative_ratio_sum = sum_features.positive_to_negative_ratio_sum_unigrams_and_bigrams_scores(
bow_sentences_1, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS)
nose.tools.assert_almost_equal(
expected_ratio_sum, positive_to_negative_ratio_sum)
""" ----------------------------- COUNT FEATURES ----------------------------- """
"""UNIGRAMS"""
def test_positive_scores_adjectives_count():
expected_count = count_features.count_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADJS, positive=True)
assert expected_count == 16
def test_negative_scores_adjectives_count():
expected_count = count_features.count_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADJS, positive=False)
assert expected_count == 4
def test_positive_scores_adverbs_count():
expected_count = count_features.count_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, positive=True)
assert expected_count == 1
def test_negative_scores_adverbs_count():
expected_count = count_features.count_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, positive=False)
assert expected_count == 2
def test_positive_scores_verbs_count():
expected_count = count_features.count_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS, positive=True)
assert expected_count == 5
def test_negative_scores_verbs_count():
expected_count = count_features.count_of_unigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS, positive=False)
assert expected_count == 0
def test_positive_to_negative_scores_ratio_of_adjectives_count():
expected_count = count_features.positive_to_negative_ratio_count_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADJS)
assert expected_count == (16 - 4)
def test_positive_to_negative_scores_ratio_of_adverbs_count():
expected_count = count_features.positive_to_negative_ratio_count_unigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS)
assert expected_count == (1 - 2)
def test_positive_to_negative_scores_ratio_of_verbs_count():
expected_count = count_features.positive_to_negative_ratio_count_unigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS)
assert expected_count == (5 - 0)
"""BIGRAMS"""
def test_positive_scores_adjectives_count_and_bigrams_with_adjectives():
expected_count = count_features.count_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADJS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADJS, positive=True)
assert expected_count == (16 + 1)
def test_negative_scores_adjectives_count_and_bigrams_with_adjectives():
expected_count = count_features.count_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADJS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADJS, positive=False)
assert expected_count == (4 + 3)
def test_positive_scores_adverbs_count_and_bigrams_with_adverbs():
expected_count = count_features.count_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS, positive=True)
assert expected_count == (1 + 0)
def test_negative_scores_adverbs_count_and_bigrams_with_adverbs():
expected_count = count_features.count_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS, positive=False)
assert expected_count == (2 + 0)
def test_positive_scores_verbs_count_and_bigrams_with_verbs():
expected_count = count_features.count_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS, positive=True)
assert expected_count == (5 + 1)
def test_negative_scores_verbs_count_and_bigrams_with_verbs():
expected_count = count_features.count_of_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS, positive=False)
assert expected_count == (0 + 0)
def test_positive_to_negative_scores_ratio_of_adjectives_count_and_bigrams_with_adjectives():
expected_count = count_features.positive_to_negative_ratio_count_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADJS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADJS)
assert expected_count == (16 + 1) - (4 + 3)
def test_positive_to_negative_scores_ratio_of_adverbs_count_and_bigrams_with_adverbs():
expected_count = count_features.positive_to_negative_ratio_count_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS)
assert expected_count == (1 + 0) - (2 + 0)
def test_positive_to_negative_scores_ratio_of_verbs_count_and_bigrams_with_verbs():
expected_count = count_features.positive_to_negative_ratio_count_unigrams_and_bigrams_scores(
bow_sentences_1a, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS)
assert expected_count == (5 + 1) - (0 + 0)
def test_count_selected_ngrams():
assert count_features.count_selected_ngrams(bow_sentences_1) == 17
assert count_features.count_selected_ngrams(bow_sentences_1a) == 33
assert count_features.count_selected_ngrams(bow_sentences_2a) == 13
""" ----------------------------- MAX FEATURES ----------------------------- """
"""UNIGRAMS"""
def test_max_rule_score_for_adjective():
assert max_features.max_rule_score_for_unigrams(
bow_sentences_1a, unigram=count_features.ADJS)['sign'] == 0
def test_max_rule_score_for_adverbs():
assert max_features.max_rule_score_for_unigrams(
bow_sentences_1a, unigram=count_features.ADVS)['sign'] == 1
def test_max_rule_score_for_verbs():
assert max_features.max_rule_score_for_unigrams(
bow_sentences_1a, unigram=count_features.VERBS)['sign'] == 1
"""BIGRAMS"""
def test_max_rule_score_for_adjective_and_bigrams_with_adjectives():
assert max_features.max_rule_score_for_unigrams_and_bigrams(bow_sentences_1a, unigram=count_features.ADJS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADJS) == 0
def test_max_rule_score_for_adverbs_and_bigrams_with_adverbs():
assert max_features.max_rule_score_for_unigrams_and_bigrams(bow_sentences_1a, unigram=count_features.ADVS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.ADVS) == 1
def test_max_rule_score_for_verbs_and_bigrams_with_verbs():
assert max_features.max_rule_score_for_unigrams_and_bigrams(bow_sentences_1a, unigram=count_features.VERBS, bigram_word_1=count_features.ADVS, bigram_word_2=count_features.VERBS) == 1
""" ----------------------------- PERCENTAGE FEATURES ----------------------------- """
def test_percentage_of_negated_ngrams_by_document_size():
nose.tools.assert_almost_equal(0.00537634408602, count_features.percentage_of_negated_ngrams_by_document_size(bow_sentences_1)['value'])
nose.tools.assert_almost_equal(0.0155763239875, count_features.percentage_of_negated_ngrams_by_document_size(bow_sentences_1a)['value'])
nose.tools.assert_almost_equal(0.0127388535032, count_features.percentage_of_negated_ngrams_by_document_size(bow_sentences_2a)['value'])
""" ----------------------------- MODULE TESTS ----------------------------- """
def test_all_count_features():
features_list = count_features.all(bow_sentences_1)
attributes = []
data = []
for fl in features_list:
attributes.append((fl['name'], 'REAL'))
data.append(fl['value'])
bow_sentences_1_dict = {'attributes': attributes, 'data': data}
# print bow_sentences_1_dict['attributes']
# print '------------------------------------------'
# print bow_sentences_1_dict['data']
assert len(bow_sentences_1_dict['attributes']) == len(bow_sentences_1_dict['data']) == 27
def test_all_sum_features():
features_list = sum_features.all(bow_sentences_1)
attributes = []
data = []
for fl in features_list:
attributes.append((fl['name'], 'REAL'))
data.append(fl['value'])
bow_sentences_1_dict = {'attributes': attributes, 'data': data}
# print bow_sentences_1_dict['attributes']
# print '------------------------------------------'
# print bow_sentences_1_dict['data']
assert len(bow_sentences_1_dict['attributes']) == len(bow_sentences_1_dict['data']) == 40
def test_all_max_features():
features_list = max_features.all(bow_sentences_1)
attributes = []
data = []
for fl in features_list:
attributes.append((fl['name'], 'REAL'))
data.append(fl['value'])
bow_sentences_1_dict = {'attributes': attributes, 'data': data}
# print bow_sentences_1_dict['attributes']
# print '------------------------------------------'
# print bow_sentences_1_dict['data']
assert len(bow_sentences_1_dict['attributes']) == len(bow_sentences_1_dict['data']) == 8
| bsd-3-clause | -8,388,361,246,177,500,000 | 42.375262 | 187 | 0.7174 | false | 2.884428 | true | false | false |
mjsauvinen/P4UL | pyLib/plotTools.py | 1 | 32566 | #!/usr/bin/env python3
import sys
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from utilities import dataFromDict
from matplotlib.ticker import FormatStrFormatter
# 08.04.2016: Mona added an option for colorbar bounds to addImagePlot
plt.rc('xtick', labelsize=24); #plt.rc('ytick.major', size=10)
plt.rc('ytick', labelsize=24); #plt.rc('ytick.minor', size=6)
plt.rcParams["font.family"] = "serif"
plt.rcParams["legend.fontsize"] = "large"
#plt.rcParams["font.serif"] = "Utopia"
#plt.rcParams["font.family"] = "monospace"
#plt.rcParams["font.monospace"] = "Courier"
#plt.rcParams["legend.labelspacing"] = 1.
iCg = 0 # Global integer for color
iMg = 0 # Global integer for markers
iLg = 0 # Global integer for line styles
gxI = -1 # Global x-index for csv animations
gyLst = [] # Global y-value list for csv animations
# The available color maps:
cmaps = { 1:'rainbow', 2:'jet', 3:'hot', 4:'gist_earth', 5:'nipy_spectral',\
6:'coolwarm', 7:'gist_rainbow', 8:'Spectral', 9:'CMRmap', 10:'cubehelix',\
11:'seismic', 12:'bwr', 13:'terrain', 14:'gist_ncar', 15:'gnuplot2', \
16:'BuPu', 17:'GnBu', 18:'RdPu', 19:'YlGnBu', 20:'YlOrRd',\
21:'Oranges', 22:'Reds', 23:'Purples', 24:'Blues'}
# NOTE! Some good ones: 2, 5, 12, 14
# The available color maps in the new version of matplotlib:
cmaps_new = { 1:'viridis', 2:'inferno', 3:'plasma', 4:'magma', 5:'Blues',
6:'BuGn', 7:'BuPu', 8:'GnBu', 9:'Greens', 10:'Greys',
11:'Oranges', 12:'OrRd', 13:'PuBu', 14:'PuBuGn', 15:'PuRd',
16:'Purples', 17:'RdPu', 18:'afmhot', 19:'autumn',
20:'bone', 22:'cool', 23:'copper', 24:'gist_heat',
25:'gray', 26:'hot', 27:'pink', 28:'spring', 29:'summer',
30:'winter', 31:'Reds', 32:'YlGn', 33:'YlGnBu', 34:'YlOrBr',
35:'YlOrRd', 36:'BrBG', 37:'bwr', 38:'coolwarm', 39:'PiYG',
40:'PRGn', 41:'PuOr', 42:'RdBu', 43:'RdGy', 44:'RdYlBu',
45:'RdYlGn', 46:'Spectral', 47:'seismic', 48:'Accent', 49:'Dark2',
50:'Paired', 51:'Pastel1', 52:'Pastel2', 53:'Set1', 54:'Set2',
55:'Set3', 56:'gist_earth',57:'terrain', 58:'ocean', 59:'gist_stern',
60:'brg', 61:'CMRmap', 62:'cubehelix', 63:'gnuplot', 64:'gnuplot2',
65:'gist_ncar',66:'nipy_spectral', 67:'jet', 68:'rainbow', 69:'gist_rainbow',
70:'hsv', 71:'flag', 72:'prism'}
# =*=*=*=* FUNCTION DEFINITIONS *=*=*=*=*=*=*=*=*=*=*=*
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addFigAxes( fig ):
if( len(fig.get_axes()) == 0 ):
ax = fig.add_axes( [0.115, 0.09 , 0.85 , 0.81] ) #[left, up, width, height]
else:
ax = fig.get_axes()[0]
return ax
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def printDict( D , ncols=3 ):
i = 0; pStr = str()
for k, v in D.items():
i+=1
# use at least 13 chars to make columns line up
pStr += ' {}: {:13s} \t'.format(k,v)
if( i%ncols == 0 ):
print(pStr); pStr = str()
# print whatever is left at the end
print(pStr+'\n'); pStr = None; i = None
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def setColormap( img ):
global cmaps
# Select the desired colormap
try:
printDict( cmaps_new, 3 )
icmap = int(input(' Enter integer key for the colormap = '))
try: nc = int(input(' Number of discrete colors in colormap = '))
except: nc = None
cm = plt.get_cmap( cmaps_new[icmap], nc )
img.set_cmap(cm)
except:
print(' Using default colormap.')
pass
return img
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def setColorbarLims( img, lMax=None, lMin=None ):
# Specify the bounds in the colorbar
if( (lMax is None) or (lMin is None) ):
try:
mm = input(' Enter limits for colorbar: <min> <max> =')
lMin,lMax = list( map(float, mm.split()) )
img.set_clim([lMin,lMax])
except:
pass
else:
try:
lMin = float(lMin); lMax = float(lMax)
img.set_clim([lMin,lMax])
except:
pass
return img
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def userColormapSettings( fig, im, Rmax=None, Rmin=None ):
uticks =None # User-defined ticks. <None> leads to default setting.
eformat=None
im = setColorbarLims( im )
im = setColormap( im )
try:
uticks=list( map(float, input(' Enter ticks separated by comma (empty=default):').split(',')) )
except:
uticks=None
if(Rmax is not None):
if(Rmax<1.e-3):
eformat='%.2e'
cb = fig.colorbar(im, ticks=uticks, format=eformat)
return cb
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def random_marker():
markerList = ['x','s','p','h','d','*','o','+']
nm = len(markerList)
im = np.random.random_integers(nm) - 1
mrk = markerList[im]
return mrk
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def marker_stack():
global iMg
markerList = ['+','s','D','o','h','p','*','x']
mrk = markerList[ iMg ]
iMg = min( ( iMg + 1 ), ( len(markerList)-1 ) )
return mrk
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def linestyle_stack(lm=1, il=None):
global iLg
# '-' : solid line style, '--': dashed line style
# '-.' : dash-dot line style, ':' : dotted line style
if( lm == 1 ):
lstyleList = ['-','--','-.',':']
else:
lstyleList = ['-','--'] # ['x','+'] # ['-','--'] #
nlinestyles = len(lstyleList)
if( il is not None and np.isscalar(il) ):
iLg = min( int(il) , (nlinestyles-1) )
lstyle = lstyleList[iLg]
iLg += 1
if( iLg > (nlinestyles-1) ):
iLg = 0
return lstyle
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def color_stack(lm=1, ic=None):
global iCg
'''
Brown '#A52A2A',
DeepPink '#FF1493',
BlueViolet '#8A2BE2',
DarkCyan '#008B8B',
DarkOrange '#FF8C00',
DarkMagenta '#8B008B',
GoldenRod '#DAA520',
SeaGreen '#2E8B57',
OrangeRed '#FF4500',
SlateBlue '#6A5ACD'
'''
if( lm == 1 ):
colorList = ['b','r','c','k','#FF8C00','g','#8B008B',\
'#FF1493','#8A2BE2','#008B8B','m',\
'#2E8B57','#FF4500','#6A5ACD',\
'#A52A2A','#DAA520']
else:
colorList = ['b','b','r','r','c','c','k','k','#FF8C00','#FF8C00','g','g','#8B008B','#8B008B',\
'#FF1493','#FF1493','#8A2BE2','#8A2BE2','#008B8B','#008B8B','m','m',\
'#2E8B57','#2E8B57','#FF4500','#FF4500','#6A5ACD','#6A5ACD',\
'#A52A2A','#A52A2A','#DAA520','#DAA520']
ncolors = len(colorList)
if( ic is not None and np.isscalar(ic) ):
iCg = min( int(ic) , ( ncolors-1 ) )
clr = colorList[iCg]
iCg += 1
if( iCg > (ncolors-1) ):
iCg = 0
return clr
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotBar(fig, xb, yb, labelStr, plotStr=["","",""], wb=0.6, errb=0):
ax = addFigAxes( fig )
bars=ax.bar(xb,yb,width=wb, label=labelStr, yerr=errb, ecolor='r')
ax.set_title( plotStr[0], fontsize=22)
ax.set_xlabel(plotStr[1], fontsize=22)
ax.set_ylabel(plotStr[2], fontsize=22); ax.grid(True)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addImagePlot( fig, R, titleStr, gridOn=False, limsOn=False, plotNests=False):
global cmaps
ax = addFigAxes( fig )
im = ax.imshow(np.real(R), aspect='auto')
while (plotNests):
try:
nestParams=list( map(float, input(' Please enter nest location (top left x, top left y, width, height).\n'
' Leave empty to continue plotting.\n').split(',')) )
annotation=str(input(' Please enter annotation for nest.\n'))
except:
break
try:
nesti = patches.Rectangle((nestParams[0],nestParams[1]),nestParams[2],nestParams[3], linewidth=1, edgecolor='r',
facecolor='none')
ax.add_patch(nesti)
ax.annotate(annotation,(nestParams[0],nestParams[1]),textcoords='offset pixels',xytext=(4,-18),color='r',size='medium')
except:
print(' Nest drawing failed.')
ax.set_title(titleStr)
ax.grid(gridOn)
if(limsOn):
cbar = userColormapSettings( fig, im, np.nanmax(R) )
else:
minval = np.nanmin(R); maxval = np.nanmax(R)
minSign = np.sign( minval )
maxSign = np.sign( maxval )
vmin = min( np.abs(minval), np.abs(maxval) )
vmax = max( np.abs(minval), np.abs(maxval) )
if( vmax/(vmin+1.E-5) < 1.5 ):
vmax *= maxSign; vmin = minSign * vmax
else:
vmax *= maxSign; vmin *= minSign
im = setColorbarLims( im, vmax, vmin )
cbar = fig.colorbar(im)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addImagePlotDict(fig, RDict ):
global cmaps
R = dataFromDict('R', RDict, allowNone=False)
ex = dataFromDict('extent', RDict, allowNone=True)
ttl = dataFromDict('title', RDict, allowNone=True)
xlbl = dataFromDict('xlabel', RDict, allowNone=True)
ylbl = dataFromDict('ylabel', RDict, allowNone=True)
gOn = dataFromDict('gridOn', RDict, allowNone=False)
lOn = dataFromDict('limsOn', RDict, allowNone=False)
cm = dataFromDict('cmap', RDict, allowNone=True)
orig = dataFromDict('origin', RDict, allowNone=True)
ax = addFigAxes( fig )
im = ax.imshow(np.real(R), origin=orig, extent=ex, aspect='auto', cmap=cm)
ax.set_title(ttl); ax.set_xlabel(xlbl); ax.set_ylabel(ylbl)
ax.grid(gOn)
if(lOn):
cbar = userColormapSettings( fig, im, np.nanmax(R), np.nanmin(R) )
else:
cbar = fig.colorbar(im)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addToPlot(fig, x,y,labelStr, plotStr=["","",""], logOn=False):
'''
Add variables x,y to a given plot.
Test whether y has multiple columns --> Require different treatment.
'''
ax = addFigAxes( fig )
d = np.size(np.shape(y)) # Test if y has multiple columns
for i in range(d):
if(d==1):
yt = y
else:
yt = y[:,i]; labelStr+='['+str(i)+']'
if(logOn):
lines=ax.semilogy(x,yt,linestyle_stack(1, None), linewidth=2.0, label=labelStr)
#lines=ax.loglog(x,yt,'-', linewidth=1.3, label=labelStr)
else:
lines=ax.plot(x,yt,linestyle_stack(1, None), linewidth=2.0, label=labelStr)
ax.set_title( plotStr[0], fontsize=22)
ax.set_xlabel(plotStr[1], fontsize=28)
ax.set_ylabel(plotStr[2], fontsize=28); ax.grid(True)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotXX( fig, pDict, ax=None ):
global iCg, iMg, iLg
fileStr = dataFromDict('filename', pDict, allowNone=False)
logOn = dataFromDict('logOn', pDict, allowNone=False)
llogOn = dataFromDict('llogOn', pDict, allowNone=False)
Cx = dataFromDict('Cx', pDict, allowNone=False)
Cy = dataFromDict('Cy', pDict, allowNone=False)
revAxes = dataFromDict('revAxes', pDict, allowNone=False)
linemode = dataFromDict('lm', pDict, allowNone=False)
linewidth= dataFromDict('lw', pDict, allowNone=False)
ylims = dataFromDict('ylims', pDict, allowNone=True)
xlims = dataFromDict('xlims', pDict, allowNone=True)
reset = dataFromDict('reset', pDict, allowNone=True)
try: x = np.loadtxt(fileStr)
except: x = np.loadtxt(fileStr,delimiter=',')
if( ax is None ):
ax = addFigAxes( fig )
# Reset global integer for color, marker and linestyle.
if( reset ):
iCg = 0; iMg = 0; iLg = 0
labelStr = labelString( fileStr )
#lStr = fileStr.rsplit(".", 1)[0] # Remove the ".dat"
#rStr = lStr.rsplit("_")[-1]
#tStr = lStr.split("/", 2)
#if( tStr[0] is "." ):
# lStr = tStr[1]
#else:
# lStr = tStr[0]
#labelStr = lStr+"_"+rStr
# Print each column separately
amax = 0.
Ny = (x.shape[1]-1)
for i in range(Ny):
if( Ny == 1 ):
labelXX = labelStr
else:
labelXX = labelStr+'['+str(i)+']'
if( revAxes ):
yp = Cy*x[:,0]; xp = Cx*x[:,i+1]; dp = xp
else:
xp = Cx*x[:,0]; yp = Cy*x[:,i+1]; dp = yp
if( logOn or llogOn ):
if( revAxes ):
xp = np.abs( xp )
plotf = ax.semilogx
else:
yp = np.abs( yp )
plotf = ax.semilogy
if( llogOn ):
plotf = ax.loglog
else:
plotf = ax.plot
lines = plotf( xp, yp, \
linestyle_stack(lm=linemode), linewidth=linewidth, \
label=labelXX, color=color_stack(lm=linemode))
lmax = np.abs(np.max(dp)) # Local maximum
if( lmax > amax ): amax = lmax
#if( amax <5.e-4 and revAxes):
# if( revAxes ): ax.xaxis.set_major_formatter(FormatStrFormatter('%.2e'))
# else: ax.yaxis.set_major_formatter(FormatStrFormatter('%.2e'))
ax.set_ybound(lower=ylims[0], upper=ylims[1] )
ax.set_xbound(lower=xlims[0], upper=xlims[1] )
ax.set_xlabel(" ")
ax.set_ylabel(" ")
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def ciDataFromFile( filename ):
try: x = np.loadtxt(filename)
except: x = np.loadtxt(filename,delimiter=',')
nrows, ncols = x.shape
#print(' nrows, ncols = {}, {}'.format(nrows,ncols))
if( ncols > 3 ):
# Copy values and clear memory
d = x[:,0]; v = x[:,1]; vl = x[:,2]; vu = x[:,3]
elif( ncols == 2 ):
d = x[:,0]; v = x[:,1]; vl = x[:,1]; vu = x[:,1]
else:
msg = '''
Error! ncols has a strange value {}.
The data must be in [x, v, v_lower, v_upper, (possibly something else)] format.
Or alternatively [x,v] format in which case no confidence intervals will be present.
Exiting...'''.format( ncols )
sys.exit(msg)
# clear memory
x = None
return d, v, vl, vu
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def ciScaleVals( d, v, vl, vu, Cx, Cy, revAxes ):
if( revAxes ):
xx = Cx*v
vl *= Cx; vu *= Cx
d *= Cy
yy = d
else:
yy = Cy*v
vl *= Cy; vu *= Cy
d *= Cx
xx = d
return d, xx, yy, vl, vu
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def ciDiffVals( x1, y1, v1l, v1u, x2, y2, v2l, v2u, revAxes ):
# Because it is possible that all v2u > v1u (or v2u < v1u) we have to prepare for that.
id2 = (v2u>v1u)
id1 = ~id2
id2On=False; id1On=False
if( np.count_nonzero(id2) > 0 ):
id2On = True
v1mu = np.abs( np.mean(v1u[id2]) )
if( np.count_nonzero(id1) > 0 ):
id1On = True
v1ml = np.abs( np.mean(v1l[id1]) )
if( revAxes ):
#if( id2On ): x1[id2] =np.maximum( ((v2l[id2]-v1u[id2])/v1mu)*100., 0.) # If diff is pos, there's overlap
#if( id1On ): x1[id1] =np.minimum( ((v2u[id1]-v1l[id1])/v1ml)*100., 0.) # If diff is neg, -- " --
if( id2On ): x1[id2] =np.maximum( (v2l[id2]-v1u[id2]), 0.) # If diff is pos, there's overlap
if( id1On ): x1[id1] =np.minimum( (v2u[id1]-v1l[id1]), 0.) # If diff is neg, -- " --
y1 = 0.5*( y1 + y2 )
dm = np.mean( np.abs(x1) )
else:
#if( id2On ): y1[id2] =np.maximum( ((v2l[id2]-v1u[id2])/v1mu)*100., 0.) # If diff is pos, there's overlap
#if( id1On ): y1[id1] =np.minimum( ((v2u[id1]-v1l[id1])/v1ml)*100., 0.) # If diff is neg, -- " --
if( id2On ): y1[id2] =np.maximum( (v2l[id2]-v1u[id2]), 0.) # If diff is pos, there's overlap
if( id1On ): y1[id1] =np.minimum( (v2u[id1]-v1l[id1]), 0.) # If diff is neg, -- " --
x1 = 0.5*( x1 + x2 )
dm = np.mean( np.abs(y1) )
return x1, y1, dm
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def labelString( fname ):
ls = fname
if( "." in ls ):
ls = ls.rsplit(".", 1)[0]
if( "/" in ls ):
sL = ls.split('/')
if( len(sL) > 1 ):
lL = list(map( len, sL ))
if( (lL[0] > 1) and ("." not in sL[0]) ):
ls = sL[0]
elif((lL[1] > 1) and ("." not in sL[1]) ):
ls = sL[1]
else:
ls = sL[-1]
return ls
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotCiXY( fig, pDict ):
fn = dataFromDict('filename', pDict, allowNone=False)
Cx = dataFromDict('Cx', pDict, allowNone=True)
Cy = dataFromDict('Cy', pDict, allowNone=True)
linemode= dataFromDict('lm', pDict, allowNone=False)
logOn = dataFromDict('logOn', pDict, allowNone=True)
revAxes = dataFromDict('revAxes', pDict, allowNone=True)
ylims = dataFromDict('ylims', pDict, allowNone=True)
xlims = dataFromDict('xlims', pDict, allowNone=True)
labelStr = labelString( fn )
if( Cx is None ): Cx = 1.
if( Cy is None ): Cy = 1.
d, v, v_l, v_u = ciDataFromFile( fn )
ax = addFigAxes( fig )
d, xp, yp, v_l, v_u = ciScaleVals( d, v, v_l, v_u, Cx, Cy, revAxes )
if( revAxes ): xlb = 'V(d)'; ylb = 'd'
else: ylb = 'V(d)'; xlb = 'd'
if( logOn ):
if( revAxes ):
plotf = ax.semilogx
fillbf = ax.fill_betweenx
else:
plotf = ax.semilogy
fillbf= ax.fill_between
else:
plotf = ax.plot
if( revAxes ):
fillbf = ax.fill_betweenx
else:
fillbf = ax.fill_between
lines = plotf( xp, yp, linestyle_stack(lm=linemode), lw=3., \
label=labelStr, color=color_stack(lm=linemode))
linef = fillbf( d, v_u, v_l, facecolor='white', edgecolor='white', alpha=0.25)
ax.set_ybound(lower=ylims[0], upper=ylims[1] )
ax.set_xbound(lower=xlims[0], upper=xlims[1] )
ax.set_xlabel(xlb)
ax.set_ylabel(ylb)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotCiDiffXY( fig, pDict ):
f1 = dataFromDict('fileref', pDict, allowNone=False)
fn = dataFromDict('filename', pDict, allowNone=False)
Cx = dataFromDict('Cx', pDict, allowNone=True)
Cy = dataFromDict('Cy', pDict, allowNone=True)
linemode= dataFromDict('lm', pDict, allowNone=False)
logOn = dataFromDict('logOn', pDict, allowNone=True)
revAxes = dataFromDict('revAxes', pDict, allowNone=True)
ylims = dataFromDict('ylims', pDict, allowNone=True)
xlims = dataFromDict('xlims', pDict, allowNone=True)
labelStr = labelString( fn )
if( Cx is None ): Cx = 1.
if( Cy is None ): Cy = 1.
d1, v1, v1_l, v1_u = ciDataFromFile( f1 )
d2, v2, v2_l, v2_u = ciDataFromFile( fn )
if( d2[-1] != d1[-1] ):
if( d2[-1] > d1[-1] ): # Quick and dirty handling for cases when d2[-1] > d1[-1]
idx = ( d2 <= d1[-1] ) # Take the terms where values match
d2 = d2[idx]; v2 = v2[idx]; v2_l = v2_l[idx]; v2_u = v2_u[idx] # Shorten
# Compute the ratio to match the resolutions (roughly)
r = np.round( (d2[1]-d2[0])/(d1[1]-d1[0]) ).astype(int)
# Use the matching indecies only
idm = ( np.mod((np.arange(len(d1))+1) , r) == 0 )
d1 = d1[idm]; v1 = v1[idm]; v1_l = v1_l[idm]; v1_u = v1_u[idm]
Lm = min( len(v2), len(v1) )
d2 = d2[:Lm]; v2 = v2[:Lm]; v2_l = v2_l[:Lm]; v2_u = v2_u[:Lm]
d1 = d1[:Lm]; v1 = v1[:Lm]; v1_l = v1_l[:Lm]; v1_u = v1_u[:Lm]
d1, x1, y1, v1_l, v1_u = ciScaleVals( d1, v1, v1_l, v1_u, Cx, Cy, revAxes )
d2, x2, y2, v2_l, v2_u = ciScaleVals( d2, v2, v2_l, v2_u, Cx, Cy, revAxes )
xp, yp, dm = ciDiffVals( x1, y1, v1_l, v1_u, x2, y2, v2_l, v2_u, revAxes )
if( revAxes ): xlb = 'D(d)'; ylb = 'd'
else: ylb = 'D(d)'; xlb = 'd'
ax = addFigAxes( fig )
if( logOn ):
if( revAxes ):
plotf = ax.semilogx
fillbf = ax.fill_betweenx
else:
plotf = ax.semilogy
fillbf= ax.fill_between
else:
plotf = ax.plot
if( revAxes ):
fillbf = ax.fill_betweenx
else:
fillbf = ax.fill_between
lines = plotf( xp, yp, linestyle_stack(lm=linemode), lw=3., \
label=labelStr+r': $\left< | \Delta | \right>$={:.2g}'.format(dm) , color=color_stack(lm=linemode))
#label=r': $\left< | \Delta | \right>$={:.2f}'.format(dm) , color=color_stack(lm=linemode))
#linef = fillbf( d, v_u, v_l, facecolor='gray', alpha=0.25)
ax.set_ybound(lower=ylims[0], upper=ylims[1] )
ax.set_xbound(lower=xlims[0], upper=xlims[1] )
ax.set_xlabel(xlb)
ax.set_ylabel(ylb)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotDY( fig, fileStr, dim=3, revAxes=False ):
dim = min( dim, 3 ); dim=max(dim , 1)
x = np.loadtxt(fileStr)
r = np.zeros( len(x[:,0]), float )
for i in range(dim):
x0 = np.min( x[:,i] )
r += (x[:,i]-x0)**2
d = np.sqrt(r)
ax = addFigAxes( fig )
labelStr = labelString( fileStr )
# Print each column separately
for i in range((x.shape[1]-dim)):
if( revAxes ):
lines=ax.plot(x[:,i+dim],d[:],marker=marker_stack(),
color=color_stack(), fillstyle='none', ls='None' , label=labelStr+'['+str(i)+']' )
else:
lines=ax.plot(d[:],x[:,i+dim],marker=marker_stack(), mew=1.7,
color=color_stack(), fillstyle='none', ls='None', label=labelStr+'['+str(i)+']')
if( revAxes ):
ax.set_ylabel(" D(X,Y,Z) "); ax.set_xlabel(" F(D) ")
else:
ax.set_xlabel(" D(X,Y,Z) "); ax.set_ylabel(" F(D) ")
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotYX( fig, fileStr, logOn ):
x = np.loadtxt(fileStr)
y = x[:,1]
ax = addFigAxes( fig )
# Print each column separately
for i in range((x.shape[1]-3)):
if( logOn ):
lines=ax.semilogy(np.abs(x[:,i+3]), y[:] , linewidth=1.1 , label=fileStr+'_'+str(i))
else:
lines=ax.plot(x[:,i+3], y[:], linewidth=1.1, label=fileStr+'_'+str(i) )
ax.set_xlabel(" F(Y) ")
ax.set_ylabel(" Y ")
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def fullPlotXY(fig,fileStr,figStr,xlabelStr,ylabelStr,lwidth=1.2,fsize=16,logOn=False):
x = np.loadtxt(fileStr)
y = x[:,1]
ax = addFigAxes( fig )
# Print each column separately
for i in range((x.shape[1]-3)):
if( logOn ):
lines=ax.semilogy(np.abs(x[:,i+3]), y[:] , linewidth=lw , label=figStr+'_'+str(i))
else:
lines=ax.plot(x[:,i+3], y[:], linewidth=lwidth, label=figStr+'_'+str(i) )
ax.set_xlabel(xlabelStr, fontsize=fsize)
ax.set_ylabel(ylabelStr, fontsize=fsize)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def plotCSV( fig, fileStr, revAxes=False, magY=False, globalValues=False ):
global gxI
global gyLst
fl = open( fileStr, 'r' )
line = fl.readline() # Read first line which contains all variable names as str.
fl.close()
varList = line.split(',')
for i in range(len(varList)):
varList[i]=varList[i].strip("\"")
x = np.loadtxt(fileStr, delimiter=',', skiprows=1)
if( not globalValues or (globalValues and gxI == -1) ):
n = 0
for v in varList:
print(" => ["+str(n)+"]: "+ v)
n+=1
try:
xI = int(input(" X [index] = "))
except:
sys.exit(' No selection. Exiting program. ')
e = input(" Y [List] = ")
if( e == ''):
select=input(" Select All? [1-9]=> Yes, [Empty]=> No: ")
if( select == ''):
sys.exit(' Exiting program.')
else:
yLst = list(range(len(fileList)))
else:
try: yLst = list( map( int, e.split(',') ) )
except: sys.exit(' Bad entry. Exiting program.')
if( globalValues and gxI == -1 ):
gxI = xI # Store the global values
gyLst = yLst
else: # (globalValues and gxI /= -1)
#print ' Utilizing global values '
xI = gxI # Extract the global values
yLst = gyLst
labelStr = fileStr.split(".")[0]
ax = addFigAxes( fig )
if( not magY ):
yLbl = "" # Start with empty label
for yJ in yLst:
yLbl = yLbl+varList[yJ]+"; " # Compile label
if( revAxes ):
lines=ax.plot(x[:,yJ],x[:,xI],'-', markersize=6, linewidth=1.5, label=labelStr+": "+varList[yJ])
else:
lines=ax.plot(x[:,xI],x[:,yJ],'o-', markersize=6, linewidth=1.5, label=labelStr+": "+varList[yJ])
#writeXY( x[:,xI],x[:,yJ], 'out.dat' )
else:
yt = np.zeros(len(x[:,0]))
yLbl = " Mag(y[:]) " # Set fixed label
for yJ in yLst:
yt += x[:,yJ]**2
if( revAxes ):
lines=ax.plot(np.sqrt(yt),x[:,xI],'-', markersize=6, linewidth=1.5, label=labelStr)
else:
lines=ax.plot(x[:,xI],np.sqrt(yt),'o-', markersize=6, linewidth=1.5, label=labelStr)
if( revAxes ):
ax.set_ylabel(varList[xI]); ax.set_xlabel(yLbl)
else:
ax.set_xlabel(varList[xI]); ax.set_ylabel(yLbl)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def extractFromCSV( csvFile, varNames ):
fl = open( csvFile, 'r' )
line = fl.readline() # Read first line which contains all variable names as str.
fl.close()
varList = line.split(',')
for i in range(len(varList)):
varList[i]=varList[i].strip("\"")
varList[i]=varList[i].strip("\""+"\n") # This is in case the line contain '\n'
Ix = []
for varStr in varNames:
try: Ix.append( varList.index(varStr) )#; print "Index List= {}".format(Ix)
except: None
if (len(Ix) == 0):
print("None of the variables in {0} were found in {1}".format(varNames,varList))
print("Exiting program. ")
sys.exit(1)
x = np.loadtxt(csvFile, delimiter=',', skiprows=1)
data = []
for jcol in Ix:
data.append( np.array(x[:,jcol]) )
return np.array(data)
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def extractFromRAW( rawFile, varNames ):
fl = open( rawFile, 'r' )
# Read (first or second) line which contains all var names as str.
while 1:
line = fl.readline()
if('#' and 'x' in line):
break
fl.close()
varList = line.split(); varList.remove('#')
#print varList
Ix = []
for varStr in varNames:
try: Ix.append( varList.index(varStr) )#; print "Index List= {}".format(Ix)
except: None
#print Ix
if (len(Ix) == 0):
print("None of the variables in {0} were found in {1}".format(varNames,varList))
print("Exiting program. ")
sys.exit(1)
x = np.loadtxt(rawFile)
data = []
for jcol in Ix:
data.append(x[:,jcol])
return data
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addQuiver( X, Y, Ux, Uy , fc, labelStr, titleStr=" " ):
plt.figure()
Q = plt.quiver(X[::fc, ::fc],Y[::fc, ::fc],Ux[::fc, ::fc],Uy[::fc, ::fc],\
pivot='tail', color='b', units='xy', scale=1.5 )
#qk = plt.quiverkey(Q, 0.9, 1.05, 1, labelStr, labelpos='E',fontproperties={'weight': 'bold'})
plt.title(titleStr)
return Q
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addContourf( X, Y, Q, CfDict=None ):
Xdims = np.array(X.shape)
figDims = 12.*(Xdims[::-1].astype(float)/np.max(Xdims))
#figDims = (11,11)
#figDims = (9,11)
fig = plt.figure(figsize=figDims)
#fig, ax = plt.subplots()
ax = addFigAxes( fig )
# Default values
labelStr = ' Q(X,Y) '
titleStr = ' Title: Q(X,Y) '
cm = None
vx = None
vn = None
levels = None
N = 12
if( CfDict is not None ):
titleStr = dataFromDict('title', CfDict, allowNone=False)
labelStr = dataFromDict('label', CfDict, allowNone=False)
cm = dataFromDict('cmap', CfDict, allowNone=True )
N = dataFromDict('N', CfDict, allowNone=True )
vn = dataFromDict('vmin', CfDict, allowNone=True )
vx = dataFromDict('vmax', CfDict, allowNone=True )
levels = dataFromDict('levels', CfDict, allowNone=True )
if( N is None ): N = 12
#print(' vmax = {}, vmin = {} '.format(vx,vn))
#levels = [-1e-6, -1e-7, 0, 1e-7, 1e-6]
#CO = plt.contourf(X,Y,Q, levels )
if( levels is not None ): CO = ax.contourf(X,Y,Q, levels, cmap=cm, vmin=vn, vmax=vx )
else: CO = ax.contourf(X,Y,Q, N , cmap=cm, vmin=vn, vmax=vx )
ax.set_title( titleStr )
cbar = fig.colorbar(CO)
if( vx is not None ): cbar.vmax = vx
if( vn is not None ): cbar.vmin = vn
cbar.ax.set_ylabel(labelStr, fontsize=20, fontstyle='normal', fontweight='book', fontname='serif')
return CO
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addScatterPlot(fig, X, Y, C, fc=4 ):
ax = addFigAxes( fig )
dims = np.array(np.shape(X))//fc # NOTE: integer division necessary
N = np.prod(dims)
ax.scatter(X[::fc,::fc].reshape(N), Y[::fc,::fc].reshape(N), s=10, \
c=C[::fc,::fc].reshape(N), marker=',', cmap=plt.cm.rainbow)
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def arrow2DPlot( fig, fileStr , scale=1.0, ic=0, fillOn=True ):
d = np.loadtxt(fileStr)
labelStr = fileStr.split(".")[0]
try:
x = d[:,0]; y =d[:,1]; dx = d[:,2]; dy =d[:,3]
except:
print(' The file must contain (at least) 4 columns: x, y, dx, dy ')
sys.exit(1)
ax = addFigAxes( fig )
lx = max(scale, 0.825 )*0.0008
lx = min( lx, 0.0016 )
for i in range( len(x) ):
ax.arrow( x[i], y[i], scale*dx[i], scale*dy[i], color=color_stack(ic) , width=lx, \
head_width=5.85*lx, head_length=2.85*lx, overhang=0.25, fill=fillOn )
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def writeXY( x , y , fileName ):
f = open( fileName ,'w') #'w' = for writing
for i in range(len(x)):
f.write("%13.7e \t %13.7e \n" %(x[i], y[i]) )
print('Writing file '+fileName)
f.close()
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def wavePlot( fig, fileStr, logOn ):
x = np.loadtxt(fileStr)
ax = addFigAxes( fig )
labelStr = fileStr.split(".")[0]
# Print each column separately
Ny = (x.shape[1]-1)
for i in range(Ny):
if( Ny == 1 ):
labelXX = labelStr
else:
labelXX = labelStr+'['+str(i)+']'
if( logOn ):
#lines=ax.loglog(x[:,0],np.abs(x[:,i+1]),'o-', linewidth=1.3 , label=labelXX)
lines=ax.semilogy(x[:,0],np.abs(x[:,i+1]),'-', linewidth=1.1 , label=labelXX)
else:
lines=ax.plot(x[:,0],x[:,i+1],'o', linewidth=1.1, label=labelXX)
ax.set_xlabel(" X ")
ax.set_ylabel(" Y ")
return fig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def userLabels( pFig ):
#print('A) pFig.get_axes()[] ')
ax=pFig.get_axes()[0] # Get a handle on the first axes
#print('B) pFig.get_axes()[] ')
#pl.rc('text', usetex=True )
#pl.rc('xtick', labelsize=24)
#pl.rc('ytick', labelsize=24)
titleStr = strEntry( " Plot Title = " , " " )
yLbl = strEntry( " Y Label = " , " Y " )
xLbl = strEntry( " X Label = " , " X " )
"""
fontname: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
fontsize: [ size in points ]
fontweight: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' |
'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ]
fontstyle: [ 'normal' | 'italic' | 'oblique']
"""
ax.set_title(titleStr, fontsize=20, fontstyle='normal', fontweight='demibold', fontname='serif')
ax.set_ylabel(yLbl, fontsize=20, fontstyle='normal', fontweight='book', fontname='serif')
ax.set_xlabel(xLbl, fontsize=20, fontstyle='normal', fontweight='book', fontname='serif')
return pFig
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def strEntry( questionStr , defaultStr ):
try:
oStr = input(str(questionStr))
except:
oStr = str(defaultStr)
return oStr
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def numEntry( questionStr , defaultValue ):
try:
value = input(str(questionStr))
except:
value = float(defaultValue)
return value
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def maxValues( fileStr ):
x = np.loadtxt(fileStr)
mv = []
for i in range(x.shape[1]):
mv.append(np.max(x[:,i]))
return mv
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ADDED MY MONA KURPPA, 2016:
# =*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
def addToPlot_marker(fig, x,y,labelStr, plotStr=["","",""], logOn=False, marker='-'):
'''
Add variables x,y to a given plot.
Test whether y has multiple columns --> Require different treatment.
e.g. marker = '-' or '--' or 'v-'
'''
ax = addFigAxes( fig )
d = np.size(np.shape(y)) # Test if y has multiple columns
for i in range(d):
if(d==1):
yt = y
else:
yt = y[:,i]; labelStr+='['+str(i)+']'
if(logOn):
lines=ax.loglog(x,yt,marker, linewidth=1.3, label=labelStr)
else:
lines=ax.plot(x,yt,marker, linewidth=1.6, label=labelStr)
ax.set_title( plotStr[0], fontsize=22)
ax.set_xlabel(plotStr[1], fontsize=22)
ax.set_ylabel(plotStr[2], fontsize=22); ax.grid(True)
return fig
| mit | -5,993,872,331,603,090,000 | 29.751653 | 140 | 0.53347 | false | 2.426134 | false | false | false |
giefferre/gestpypay | gestpypay/gestpypay.py | 1 | 11978 | # coding=utf-8
'''
GestPYPay 1.0.0
(C) 2012 Gianfranco Reppucci <[email protected]>
https://github.com/giefferre/gestpypay
GestPYPay is an implementation in Python of GestPayCrypt and
GestPayCryptHS italian bank Banca Sella Java classes. It allows to
connect to online credit card payment GestPay.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public License
version 2.1 as published by the Free Software Foundation.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details at
http://www.gnu.org/copyleft/lgpl.html
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
'''
import urllib
import re
import requests
def empty(variable):
if not variable:
return True
return False
class GestPayCrypt:
# attributes
ShopLogin = None # Shop Login (e.g. Codice Esercente)
Currency = None # Currency code (242 = EUR)
Amount = None # Transaction Amount (e.g. 100.00)
ShopTransactionID = None # Merchant transaction id
CardNumber = None # Credit Card Number
ExpMonth = None # Credit Card Expiration Month
ExpYear = None # Credit Card Expiration Year
BuyerName = None # Client Name and Surname
BuyerEmail = None # Client Email Address
Language = None # Language
CustomInfo = None # Additional Informations
AuthorizationCode = None # Transaction Authorization Code
ErrorCode = None # Error code
ErrorDescription = None # Error description
BankTransactionID = None # GestPay transaction id
AlertCode = None # Alert code
AlertDescription = None # Alert description
EncryptedString = None # Crypted string
ToBeEncript = None # String to be encrypted
Decrypted = None # Decrypted string
TransactionResult = None # Transaction result
ProtocolAuthServer = None # 'http' or 'https'
DomainName = None # GetPay Domain
separator = None # Separator string for crypted string
Version = None
Min = None
CVV = None
country = None
vbvrisp = None
vbv = None
# constructor
def __init__(self, *args, **kwargs):
self.ShopLogin = ""
self.Currency = ""
self.Amount = ""
self.ShopTransactionID = ""
self.CardNumber = ""
self.ExpMonth = ""
self.ExpYear = ""
self.BuyerName = ""
self.BuyerEmail = ""
self.Language = ""
self.CustomInfo = ""
self.AuthorizationCode = ""
self.ErrorCode = "0"
self.ErrorDescription = ""
self.BankTransactionID = ""
self.AlertCode = ""
self.AlertDescription = ""
self.EncryptedString = ""
self.ToBeEncrypt = ""
self.Decrypted = ""
self.ProtocolAuthServer = "http"
self.DomainName = "ecomm.sella.it"
self.ScriptEnCrypt = "/CryptHTTP/Encrypt.asp"
self.ScriptDecrypt = "/CryptHTTP/Decrypt.asp"
self.separator = "*P1*"
self.Version = "1.0"
self.Min = ""
self.CVV = ""
self.country = ""
self.vbvrisp = ""
self.vbv = ""
self.debug = False
# write methods
def SetShopLogin(self, val):
self.ShopLogin = val
def SetCurrency(self, val):
self.Currency = val
def SetAmount(self, val):
self.Amount = val
def SetShopTransactionID(self, val):
self.ShopTransactionID = urllib.quote_plus(val.strip())
def SetCardNumber(self, val):
self.CardNumber = val
def SetExpMonth(self, val):
self.ExpMonth = val
def SetExpYear(self, val):
self.ExpYear = val
def SetMIN(self, val):
self.Min = val
def SetCVV(self, val):
self.CVV = val
def SetBuyerName(self, val):
self.BuyerName = urllib.quote_plus(val.strip())
def SetBuyerEmail(self, val):
self.BuyerEmail = val.strip()
def SetLanguage(self, val):
self.Language = val.strip()
def SetCustomInfo(self, val):
self.CustomInfo = urllib.quote_plus(val.strip())
def SetEncryptedString(self, val):
self.EncryptedString = val
# read only methods
def GetShopLogin(self):
return self.ShopLogin
def GetCurrency(self):
return self.Currency
def GetAmount(self):
return self.Amount
def GetCountry(self):
return self.country
def GetVBV(self):
return self.vbv
def GetVBVrisp(self):
return self.vbvrisp
def GetShopTransactionID(self):
return urllib.unquote_plus(self.ShopTransactionID)
def GetBuyerName(self):
return urllib.unquote_plus(self.BuyerName)
def GetBuyerEmail(self):
return self.BuyerEmail
def GetCustomInfo(self):
return urllib.unquote_plus(self.CustomInfo)
def GetAuthorizationCode(self):
return self.AuthorizationCode
def GetErrorCode(self):
return self.ErrorCode
def GetErrorDescription(self):
return self.ErrorDescription
def GetBankTransactionID(self):
return self.BankTransactionID
def GetTransactionResult(self):
return self.TransactionResult
def GetAlertCode(self):
return self.AlertCode
def GetAlertDescription(self):
return self.AlertDescription
def GetEncryptedString(self):
return self.EncryptedString
# encryption / decryption
def Encrypt(self):
err = ""
self.ErrorCode = "0"
self.ErrorDescription = ""
self.ToBeEncrypt = ""
if empty(self.ShopLogin):
self.ErrorCode = "546"
self.ErrorDescription = "IDshop not valid"
return False
if empty(self.Currency):
self.ErrorCode = "552"
self.ErrorDescription = "Currency not valid"
return False
if empty(self.Amount):
self.ErrorCode = "553"
self.ErrorDescription = "Amount not valid"
return False
if empty(self.ShopTransactionID):
self.ErrorCode = "551"
self.ErrorDescription = "Shop Transaction ID not valid"
return False
self.ToEncrypt(self.CVV, "PAY1_CVV")
self.ToEncrypt(self.Min, "PAY1_MIN")
self.ToEncrypt(self.Currency, "PAY1_UICCODE")
self.ToEncrypt(self.Amount, "PAY1_AMOUNT")
self.ToEncrypt(self.ShopTransactionID, "PAY1_SHOPTRANSACTIONID")
self.ToEncrypt(self.CardNumber, "PAY1_CARDNUMBER")
self.ToEncrypt(self.ExpMonth, "PAY1_EXPMONTH")
self.ToEncrypt(self.ExpYear, "PAY1_EXPYEAR")
self.ToEncrypt(self.BuyerName, "PAY1_CHNAME")
self.ToEncrypt(self.BuyerEmail, "PAY1_CHEMAIL")
self.ToEncrypt(self.Language, "PAY1_IDLANGUAGE")
self.ToEncrypt(self.CustomInfo, "")
self.ToBeEncrypt = self.ToBeEncrypt.replace(" ", "+")
uri = self.ScriptEnCrypt + "?a=" + self.ShopLogin + "&b=" + self.ToBeEncrypt[len(self.separator):]
full_url = self.ProtocolAuthServer + "://" + self.DomainName + uri
if self.debug:
print "URL richiesta: " + full_url + "\n"
self.EncryptedString = self.HttpGetResponse(full_url, True)
if self.EncryptedString == -1:
return False
if self.debug:
print "Stringa criptata: " + self.EncryptedString + "\n"
return True
def Decrypt(self):
err = ""
self.ErrorCode = "0"
self.ErrorDescription = ""
if empty(self.ShopLogin):
self.ErrorCode = "546"
self.ErrorDescription = "IDshop not valid"
return False
if empty(self.EncryptedString):
self.ErrorCode = "1009"
self.ErrorDescription = "String to Decrypt not valid"
return False
uri = self.ScriptDecrypt + "?a=" + self.ShopLogin + "&b=" + self.EncryptedString
full_url = self.ProtocolAuthServer + "://" + self.DomainName + uri
if self.debug:
print "URL richiesta: " + full_url + "\n"
self.Decrypted = self.HttpGetResponse(full_url, False)
if self.Decrypted == -1:
return False
elif empty(self.Decrypted):
self.ErrorCode = "9999"
self.ErrorDescription = "Empty decrypted string"
return False
self.Decrypted = self.Decrypted.replace("+", " ")
if self.debug:
print "Stringa decriptata: " + self.Decrypted + "\n"
self.Parsing()
return True
# helpers
def ToEncrypt(self, value, tagvalue):
equal = "=" if tagvalue else ""
if not empty(value):
self.ToBeEncrypt += "%s%s%s%s" % (self.separator, tagvalue, equal, value)
def HttpGetResponse(self, url, crypt):
response = ""
req = "crypt" if crypt else "decrypt"
line = self.HttpGetLine(url)
if line == -1:
return -1
if self.debug:
print line
reg = re.compile("#" + req + "string#([\w\W]*)#\/" + req + "string#").findall(line)
err = re.compile("#error#([\w\W]*)#\/error#").findall(line)
if self.debug:
print url
print req
print line
print reg
print err
if len(reg) > 0:
response = reg[0].strip()
elif len(err) > 0:
err = err[0].split('-')
if empty(err[0]) and empty(err[1]):
self.ErrorCode = "9999"
self.ErrorDescription = "Unknown error"
else:
self.ErrorCode = err[0].strip()
self.ErrorDescription = err[1].strip()
return -1
else:
self.ErrorCode = "9999"
self.ErrorDescription = "Response from server not valid"
return -1
return response
def HttpGetLine(self, url):
try:
r = requests.get(url)
except Exception, e:
print e
self.ErrorCode = "9999"
self.ErrorDescription = "Impossible to connect to host: " + host
return -1
output = ""
for line in r.iter_lines():
output = line
break
return output
def Parsing(self):
keyval = self.Decrypted.split(self.separator)
for tagPAY1 in keyval:
tagPAY1val = tagPAY1.split("=")
if re.search("^PAY1_UICCODE", tagPAY1):
self.Currency = tagPAY1val[1]
elif re.search("^PAY1_AMOUNT", tagPAY1):
self.Amount = tagPAY1val[1]
elif re.search("^PAY1_SHOPTRANSACTIONID", tagPAY1):
self.ShopTransactionID = tagPAY1val[1]
elif re.search("^PAY1_CHNAME", tagPAY1):
self.BuyerName = tagPAY1val[1]
elif re.search("^PAY1_CHEMAIL", tagPAY1):
self.BuyerEmail = tagPAY1val[1]
elif re.search("^PAY1_AUTHORIZATIONCODE", tagPAY1):
self.AuthorizationCode = tagPAY1val[1]
elif re.search("^PAY1_ERRORCODE", tagPAY1):
self.ErrorCode = tagPAY1val[1]
elif re.search("^PAY1_ERRORDESCRIPTION", tagPAY1):
self.ErrorDescription = tagPAY1val[1]
elif re.search("^PAY1_BANKTRANSACTIONID", tagPAY1):
self.BankTransactionID = tagPAY1val[1]
elif re.search("^PAY1_ALERTCODE", tagPAY1):
self.AlertCode = tagPAY1val[1]
elif re.search("^PAY1_ALERTDESCRIPTION", tagPAY1):
self.AlertDescription = tagPAY1val[1]
elif re.search("^PAY1_CARDNUMBER", tagPAY1):
self.CardNumber = tagPAY1val[1]
elif re.search("^PAY1_EXPMONTH", tagPAY1):
self.ExpMonth = tagPAY1val[1]
elif re.search("^PAY1_EXPYEAR", tagPAY1):
self.ExpYear = tagPAY1val[1]
elif re.search("^PAY1_COUNTRY", tagPAY1):
self.ExpYear = tagPAY1val[1]
elif re.search("^PAY1_VBVRISP", tagPAY1):
self.ExpYear = tagPAY1val[1]
elif re.search("^PAY1_VBV", tagPAY1):
self.ExpYear = tagPAY1val[1]
elif re.search("^PAY1_IDLANGUAGE", tagPAY1):
self.Language = tagPAY1val[1]
elif re.search("^PAY1_TRANSACTIONRESULT", tagPAY1):
self.TransactionResult = tagPAY1val[1]
else:
self.CustomInfo += tagPAY1 + self.separator
self.CustomInfo = self.CustomInfo[:-len(self.separator)]
class GestPayCryptHS(GestPayCrypt):
# constructor
def __init__(self, *args, **kwargs):
self.ShopLogin = ""
self.Currency = ""
self.Amount = ""
self.ShopTransactionID = ""
self.CardNumber = ""
self.ExpMonth = ""
self.ExpYear = ""
self.BuyerName = ""
self.BuyerEmail = ""
self.Language = ""
self.CustomInfo = ""
self.AuthorizationCode = ""
self.ErrorCode = "0"
self.ErrorDescription = ""
self.BankTransactionID = ""
self.AlertCode = ""
self.AlertDescription = ""
self.EncryptedString = ""
self.ToBeEncrypt = ""
self.Decrypted = ""
self.ProtocolAuthServer = "https"
self.DomainName = "ecomm.sella.it"
self.ScriptEnCrypt = "/CryptHTTPS/Encrypt.asp"
self.ScriptDecrypt = "/CryptHTTPS/Decrypt.asp"
self.separator = "*P1*"
self.Version = "1.0"
self.Min = ""
self.CVV = ""
self.country = ""
self.vbvrisp = ""
self.vbv = ""
self.debug = False | lgpl-3.0 | 4,937,968,520,034,323,000 | 23.446939 | 100 | 0.688178 | false | 2.942275 | false | false | false |
macarthur-lab/xbrowse | xbrowse_server/base/management/commands/add_project.py | 1 | 1500 | from django.core.management.base import BaseCommand
from xbrowse_server.base.model_utils import create_xbrowse_model
from xbrowse_server.base.models import Project
import sys
from django.utils import timezone
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("project_id")
parser.add_argument("project_name", nargs="?")
def handle(self, *args, **options):
if 'project_id' not in options:
print '\n'
print 'Creates a project in Seqr.\n'
print 'Please provide a project ID as an argument. Optionally, provide a more human-readable project name as a second argument. '
print 'Example: python manage.py add_project 1kg\n'
sys.exit()
project_id = options['project_id']
if "." in project_id:
sys.exit("ERROR: A '.' in the project ID is not supported")
if Project.objects.filter(project_id=project_id).exists():
print '\nSorry, I am unable to create that project since it exists already\n'
sys.exit()
project_name = options.get('project_name') or project_id
print('Creating project with id "%(project_id)s" and name "%(project_name)s"' % locals())
try:
create_xbrowse_model(Project, project_id=project_id, project_name=project_name, created_date=timezone.now())
except Exception as e:
print('\nError creating project:', e, '\n')
sys.exit()
| agpl-3.0 | -3,050,226,718,671,357,000 | 39.540541 | 141 | 0.636 | false | 4.076087 | false | false | false |
synappio/chapman | chapman/model/m_task.py | 1 | 2068 | import logging
from random import getrandbits
from ming import Field
from ming.declarative import Document
from ming import schema as S
from .m_base import doc_session, dumps, pickle_property, Resource
log = logging.getLogger(__name__)
class TaskState(Document):
class __mongometa__:
name = 'chapman.task'
session = doc_session
indexes = [
[('parent_id', 1), ('data.composite_position', 1)],
]
_id = Field(int, if_missing=lambda: getrandbits(63))
type = Field(str)
parent_id = Field(int, if_missing=None)
status = Field(str, if_missing='pending')
_result = Field('result', S.Binary)
data = Field({str: None})
options = Field(dict(
queue=S.String(if_missing='chapman'),
priority=S.Int(if_missing=10),
immutable=S.Bool(if_missing=False),
ignore_result=S.Bool(if_missing=False),
path=S.String(if_missing=None),
semaphores = [str],
))
on_complete = Field(int, if_missing=None)
active = Field([int]) # just one message active
queued = Field([int]) # any number queued
result = pickle_property('_result')
@classmethod
def set_result(cls, id, result):
cls.m.update_partial(
{'_id': id},
{'$set': {
'result': dumps(result),
'status': result.status}})
def __repr__(self):
parts = [self.type, self._id]
if self.options['path']:
parts.append(self.options['path'])
return '<{}>'.format(
' '.join(map(str, parts)))
class TaskStateResource(Resource):
cls=TaskState
def __init__(self, id):
self.id = id
def __repr__(self):
obj = TaskState.m.get(_id=self.id)
return '<TaskStateResource({}:{}): {} / {}>'.format(
obj.type, obj._id, obj.active, obj.queued)
def acquire(self, msg_id):
return super(TaskStateResource, self).acquire(msg_id, 1)
def release(self, msg_id):
return super(TaskStateResource, self).release(msg_id, 1)
| mit | 6,082,227,630,684,573,000 | 27.722222 | 65 | 0.583172 | false | 3.609075 | false | false | false |
mahangu/ocon | scraper/scrape.py | 1 | 2935 | # The Open Corpus of Newswriting (OCON)
# Copyright (C) 2014 Mahangu Weerasinghe ([email protected])
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import urlparse
import os
import justext
import urllib2
import re #regex stuff
from ftfy import fix_text #unicode cleanup
INPUT_DIR="input/"
OUTPUT_DIR="output/"
def get_article_list(filename):
lines = tuple(open(filename, "r"))
lines_seen = set() # holds lines already seen
for line in open(filename, "r"):
if line not in lines_seen: # not a duplicate
lines_seen.add(line)
return tuple(lines_seen)
def grab_article(url):
article = ""
import requests
import justext
#url = "http://archives.dailynews.lk/2008/01/11/news38.asp"
url = url.strip("\n")
url = url.strip("\r")
url = url.strip(" ")
print url
response = requests.get(url)
print response
paragraphs = justext.justext(response.content, justext.get_stoplist("English"))
for paragraph in paragraphs:
if not paragraph.is_boilerplate:
#print paragraph.text
article = article + unicode(paragraph.text) + "\n\n"
if article!=None:
return unicode(article)
else:
return None;
for file in os.listdir(INPUT_DIR):
if file.endswith(".txt"):
split_filename = re.findall(r"[^\W_]+", file) #splitting up the filename so we can get newspaper name and date from it
NEWSPAPER = split_filename[0]
DATE = split_filename[1]
article_url_list = ""
article_url_list = get_article_list(INPUT_DIR + file)
print article_url_list
for article_url in article_url_list:
scheme = urlparse.urlparse(article_url).scheme
if article_url!="\n" and scheme=="http": #checking for newlines and mailto: links
hostname = urlparse.urlparse(article_url).hostname
path = urlparse.urlparse(article_url).path #grab the part after the .TLD
path = path.replace("/", "") #remove forward slashes
raw_text = unicode(grab_article(article_url))
if raw_text!=None:
text = fix_text(raw_text)
text = text + "\n\n\n\n"
split_path = re.findall(r"[^\W_]+", path) #sanitising the path so it doesn't end up crazy long
short_path = split_path[0]
print short_path
text_file = open(OUTPUT_DIR + NEWSPAPER + "_" + DATE + "_" + hostname + "_" + short_path + ".txt", "a+")
text_file.write(text.encode('utf8'))
text_file.close()
| gpl-3.0 | 4,495,185,156,545,178,000 | 30.902174 | 120 | 0.689949 | false | 3.25388 | false | false | false |
LeResKP/pyramid_auth | pyramid_auth/forms.py | 1 | 1566 | import tw2.forms as twf
import tw2.core as twc
class UserExists(twc.Validator):
"""Validate the user exists in the DB. It's used when we want to
authentificate it.
"""
__unpackargs__ = ('login', 'password', 'validate_func', 'request')
msgs = {
'mismatch': ('Login failed. Please check your '
'credentials and try again.'),
}
def _validate_python(self, value, state):
super(UserExists, self)._validate_python(value, state)
login = value[self.login]
password = value[self.password]
for v in [login, password]:
try:
if issubclass(v, twc.validation.Invalid):
# No need to validate the password of the user, the login
# or password are invalid
return
except TypeError:
pass
res = self.validate_func(self.request, login, password)
if not res:
raise twc.ValidationError('mismatch', self)
if res is not True:
value['user'] = res
def create_login_form(request, validate_func):
class LoginForm(twf.TableForm):
login = twf.TextField(validator=twc.Validator(required=True))
password = twf.PasswordField(validator=twc.Validator(required=True))
submit = twf.SubmitButton(id='submit', value='Login')
validator = UserExists(
login='login',
password='password',
validate_func=validate_func,
request=request,
)
return LoginForm
| mit | 2,895,962,187,333,698,000 | 32.319149 | 77 | 0.581098 | false | 4.302198 | false | false | false |
qurben/mopidy-jukebox | backend/mopidy_jukebox/web.py | 1 | 9095 | """
RequestHandlers for the Jukebox application
IndexHandler - Show version
TracklistHandler - Show current tracklist
SongHandler - Show track information
VoteHandler - Add and remove votes
SkipHandler - Add and remove skips
SearchHandler - Search the library
"""
from __future__ import absolute_import, unicode_literals
import json
import uuid
from datetime import datetime
from functools import wraps
from mopidy.models import ModelJSONEncoder
from tornado import web, escape, gen, auth
from .library import Tracklist
from .models import Vote, User, Session
from .util import track_json
def authenticate(f):
"""
Decorator for checking if a user is authenticated
"""
@wraps(f)
def wrapper(self):
"""
:type self: RequestHandler
"""
try:
self.request.session = Session.get(Session.secret == self.get_cookie('session'))
f(self)
except Session.DoesNotExist:
self.set_status(403)
return wrapper
class LoginHandler(web.RequestHandler):
def get(self):
cookie = self.get_cookie('session')
if cookie:
try:
session = Session.get(Session.secret == cookie)
self.set_status(200)
self.write("Successfully logged in")
except Session.DoesNotExist:
self.redirect('/jukebox-api/auth/google')
else:
self.redirect('/jukebox-api/auth/google')
class LogoutHandler(web.RequestHandler):
@authenticate
def get(self):
self.request.session.delete()
self.clear_cookie('session')
self.set_status(200)
self.write("Successfully logged out")
class GoogleOAuth2LoginHandler(web.RequestHandler,
auth.GoogleOAuth2Mixin):
def initialize(self, google_oauth, google_oauth_secret):
self.settings[self._OAUTH_SETTINGS_KEY] = {
'key': google_oauth,
'secret': google_oauth_secret,
}
@gen.coroutine
def get(self):
# own url without GET variables
redirect_uri = self.request.protocol + "://" + self.request.host + self.request.uri.split('?')[0]
if self.get_argument('code', False):
try:
access = yield self.get_authenticated_user(
redirect_uri=redirect_uri,
code=self.get_argument('code'))
google_user = yield self.oauth2_request(
"https://www.googleapis.com/oauth2/v1/userinfo",
access_token=access["access_token"])
try:
user = User.get(uid=google_user['id'])
except User.DoesNotExist:
user = User.create(uid=google_user['id'], name=google_user['name'], email=google_user['email'],
picture=google_user['picture'])
user.save()
# a user can have 1 session
Session.delete().where(Session.user == user).execute()
session = Session(user=user, secret=uuid.uuid1())
session.save()
self.set_cookie('session', str(session.secret))
self.set_status(200)
self.write("Successfully logged in")
except auth.AuthError:
self.set_status(400, "Bad Request")
self.write("400: Bad Request")
else:
yield self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.settings[self._OAUTH_SETTINGS_KEY]['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
class IndexHandler(web.RequestHandler):
def initialize(self, version, core):
self.core = core
self.version = version
def get(self):
self.write({'message': 'Welcome to the Jukebox API', 'version': self.version})
self.set_header("Content-Type", "application/json")
class TracklistHandler(web.RequestHandler):
def initialize(self, core):
self.core = core
@authenticate
def get(self):
tracklist = self.core.tracklist.get_tl_tracks().get()
self.write({
'tracklist': [{'id': id, 'track': track_json(track)} for (id, track) in tracklist]
})
self.set_header("Content-Type", "application/json")
class TrackHandler(web.RequestHandler):
def initialize(self, core):
self.core = core
def post(self):
"""
Get information for a specific track
:return:
"""
try:
track_uri = self.get_body_argument('track', '')
track = self.core.library.lookup(track_uri).get()[0]
self.write(track_json(track))
except web.MissingArgumentError:
self.write({"error": "'track' key not found"})
self.set_status(400)
class UserHandler(web.RequestHandler):
def initialize(self, core):
self.core = core
@authenticate
def get(self):
"""
Get information about the active user
:return:
"""
user = self.request.session.user
self.set_header("Content-Type", "application/json")
self.write({
'name': user.name,
'picture': user.picture,
'email': user.email,
'uid': user.uid,
})
class VoteHandler(web.RequestHandler):
def initialize(self, core):
self.core = core
@authenticate
def post(self):
"""
Get the vote for a specific track
:return:
"""
user = self.request.session.user
try:
track_uri = self.get_body_argument('track')
vote = Vote.get(Vote.track_uri == track_uri)
track = self.core.library.lookup(track_uri).get()[0]
self.write({'track': track_json(track),
'user': user.name,
'timestamp': vote.timestamp.isoformat()})
self.set_header("Content-Type", "application/json")
except web.MissingArgumentError:
self.set_status(400)
self.write({"error": "'track' key not found"})
@authenticate
def put(self):
"""
Vote for a specific track
:return:
"""
try:
track_uri = self.get_body_argument('track')
active_user = self.request.session.user
if Vote.select().where(Vote.track_uri == track_uri, Vote.user == active_user):
return self.set_status(409, 'Vote already exists')
my_vote = Vote(track_uri=track_uri, user=active_user, timestamp=datetime.now())
if my_vote.save() is 1:
# Add this track to now playing TODO: remove
Tracklist.update_tracklist(self.core.tracklist)
self.set_status(201)
else:
self.set_status(500)
except web.MissingArgumentError:
self.set_status(400)
self.write({"error": "'track' key not found"})
@authenticate
def delete(self):
"""
Delete the vote for a specific track
:return:
"""
try:
track_uri = self.get_body_argument('track')
if not track_uri:
self.write({"error": "'track' key not found"})
return self.set_status(400)
active_user = self.request.session.user
q = Vote.delete().where(Vote.track_uri == track_uri and Vote.user == active_user)
if q.execute() is 0:
self.set_status(404, "No vote deleted")
else:
Tracklist.update_tracklist(self.core.tracklist)
self.set_status(204, "Vote deleted")
except web.MissingArgumentError:
self.set_status(400)
self.write({"error":"'track' key not found"})
class SkipHandler(web.RequestHandler):
def initialize(self, core):
self.core = core
class SearchHandler(web.RequestHandler):
def initialize(self, core):
self.core = core
def error(self, code, message):
self.write({
'error': code,
'message': message
})
self.set_status(code, message)
def post(self):
field = self.get_body_argument('field', '')
values = self.get_body_argument('values', '')
if not field:
return self.error(400, 'Please provide a field')
search = {field: [values]}
search_result = self.core.library.search(search).get()[0]
self.set_header("Content-Type", "application/json")
self.write("""{
"uri": "%s",
"albums": %s,
"artists": %s,
"tracks": %s
}""" % (search_result.uri,
json.dumps(search_result.albums, cls=ModelJSONEncoder),
json.dumps(search_result.artists, cls=ModelJSONEncoder),
json.dumps(search_result.tracks, cls=ModelJSONEncoder)))
| mit | -5,404,643,345,612,076,000 | 29.830508 | 115 | 0.561407 | false | 4.2087 | false | false | false |
citrix-openstack-build/ironic-lib | ironic_lib/disk_utils.py | 1 | 20287 | # Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import gzip
import logging
import math
import os
import re
import requests
import shutil
import six
import stat
import tempfile
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import units
from ironic_lib.openstack.common._i18n import _
from ironic_lib.openstack.common._i18n import _LE
from ironic_lib.openstack.common._i18n import _LW
from ironic_lib.openstack.common import imageutils
from ironic_lib import disk_partitioner
from ironic_lib import exception
from ironic_lib import utils
opts = [
cfg.IntOpt('efi_system_partition_size',
default=200,
help='Size of EFI system partition in MiB when configuring '
'UEFI systems for local boot.',
deprecated_group='deploy'),
cfg.StrOpt('dd_block_size',
default='1M',
help='Block size to use when writing to the nodes disk.',
deprecated_group='deploy'),
cfg.IntOpt('iscsi_verify_attempts',
default=3,
help='Maximum attempts to verify an iSCSI connection is '
'active, sleeping 1 second between attempts.',
deprecated_group='deploy'),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='disk_utils')
LOG = logging.getLogger(__name__)
_PARTED_PRINT_RE = re.compile(r"^(\d+):([\d\.]+)MiB:"
"([\d\.]+)MiB:([\d\.]+)MiB:(\w*)::(\w*)")
_ISCSI_RE = re.compile(r"^ip-[\d+.]*:\w+-iscsi-[\w+.]*-lun-\d+")
def list_partitions(device):
"""Get partitions information from given device.
:param device: The device path.
:returns: list of dictionaries (one per partition) with keys:
number, start, end, size (in MiB), filesystem, flags
"""
output = utils.execute(
'parted', '-s', '-m', device, 'unit', 'MiB', 'print',
use_standard_locale=True, run_as_root=True)[0]
if isinstance(output, bytes):
output = output.decode("utf-8")
lines = [line for line in output.split('\n') if line.strip()][2:]
# Example of line: 1:1.00MiB:501MiB:500MiB:ext4::boot
fields = ('number', 'start', 'end', 'size', 'filesystem', 'flags')
result = []
for line in lines:
match = _PARTED_PRINT_RE.match(line)
if match is None:
LOG.warn(_LW("Partition information from parted for device "
"%(device)s does not match "
"expected format: %(line)s"),
dict(device=device, line=line))
continue
# Cast int fields to ints (some are floats and we round them down)
groups = [int(float(x)) if i < 4 else x
for i, x in enumerate(match.groups())]
result.append(dict(zip(fields, groups)))
return result
def is_iscsi_device(dev):
"""check whether the device path belongs to an iscsi device. """
basename = os.path.basename(dev)
return bool(_ISCSI_RE.match(basename))
def get_disk_identifier(dev):
"""Get the disk identifier from the disk being exposed by the ramdisk.
This disk identifier is appended to the pxe config which will then be
used by chain.c32 to detect the correct disk to chainload. This is helpful
in deployments to nodes with multiple disks.
http://www.syslinux.org/wiki/index.php/Comboot/chain.c32#mbr:
:param dev: Path for the already populated disk device.
:returns The Disk Identifier.
"""
disk_identifier = utils.execute('hexdump', '-s', '440', '-n', '4',
'-e', '''\"0x%08x\"''',
dev,
run_as_root=True,
check_exit_code=[0],
attempts=5,
delay_on_retry=True)
return disk_identifier[0]
def make_partitions(dev, root_mb, swap_mb, ephemeral_mb,
configdrive_mb, commit=True, boot_option="netboot",
boot_mode="bios"):
"""Partition the disk device.
Create partitions for root, swap, ephemeral and configdrive on a
disk device.
:param root_mb: Size of the root partition in mebibytes (MiB).
:param swap_mb: Size of the swap partition in mebibytes (MiB). If 0,
no partition will be created.
:param ephemeral_mb: Size of the ephemeral partition in mebibytes (MiB).
If 0, no partition will be created.
:param configdrive_mb: Size of the configdrive partition in
mebibytes (MiB). If 0, no partition will be created.
:param commit: True/False. Default for this setting is True. If False
partitions will not be written to disk.
:param boot_option: Can be "local" or "netboot". "netboot" by default.
:param boot_mode: Can be "bios" or "uefi". "bios" by default.
:returns: A dictionary containing the partition type as Key and partition
path as Value for the partitions created by this method.
"""
LOG.debug("Starting to partition the disk device: %(dev)s",
{'dev': dev})
if is_iscsi_device(dev):
part_template = dev + '-part%d'
else:
part_template = dev + '%d'
part_dict = {}
# For uefi localboot, switch partition table to gpt and create the efi
# system partition as the first partition.
if boot_mode == "uefi" and boot_option == "local":
dp = disk_partitioner.DiskPartitioner(dev, disk_label="gpt")
part_num = dp.add_partition(CONF.disk_utils.efi_system_partition_size,
fs_type='fat32',
bootable=True)
part_dict['efi system partition'] = part_template % part_num
else:
dp = disk_partitioner.DiskPartitioner(dev)
if ephemeral_mb:
LOG.debug("Add ephemeral partition (%(size)d MB) to device: %(dev)s",
{'dev': dev, 'size': ephemeral_mb})
part_num = dp.add_partition(ephemeral_mb)
part_dict['ephemeral'] = part_template % part_num
if swap_mb:
LOG.debug("Add Swap partition (%(size)d MB) to device: %(dev)s",
{'dev': dev, 'size': swap_mb})
part_num = dp.add_partition(swap_mb, fs_type='linux-swap')
part_dict['swap'] = part_template % part_num
if configdrive_mb:
LOG.debug("Add config drive partition (%(size)d MB) to device: "
"%(dev)s", {'dev': dev, 'size': configdrive_mb})
part_num = dp.add_partition(configdrive_mb)
part_dict['configdrive'] = part_template % part_num
# NOTE(lucasagomes): Make the root partition the last partition. This
# enables tools like cloud-init's growroot utility to expand the root
# partition until the end of the disk.
LOG.debug("Add root partition (%(size)d MB) to device: %(dev)s",
{'dev': dev, 'size': root_mb})
part_num = dp.add_partition(root_mb, bootable=(boot_option == "local" and
boot_mode == "bios"))
part_dict['root'] = part_template % part_num
if commit:
# write to the disk
dp.commit()
return part_dict
def is_block_device(dev):
"""Check whether a device is block or not."""
attempts = CONF.disk_utils.iscsi_verify_attempts
for attempt in range(attempts):
try:
s = os.stat(dev)
except OSError as e:
LOG.debug("Unable to stat device %(dev)s. Attempt %(attempt)d "
"out of %(total)d. Error: %(err)s",
{"dev": dev, "attempt": attempt + 1,
"total": attempts, "err": e})
time.sleep(1)
else:
return stat.S_ISBLK(s.st_mode)
msg = _("Unable to stat device %(dev)s after attempting to verify "
"%(attempts)d times.") % {'dev': dev, 'attempts': attempts}
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def dd(src, dst):
"""Execute dd from src to dst."""
utils.dd(src, dst, 'bs=%s' % CONF.disk_utils.dd_block_size, 'oflag=direct')
def qemu_img_info(path):
"""Return an object containing the parsed output from qemu-img info."""
if not os.path.exists(path):
return imageutils.QemuImgInfo()
out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
return imageutils.QemuImgInfo(out)
def convert_image(source, dest, out_format, run_as_root=False):
"""Convert image to other format."""
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
utils.execute(*cmd, run_as_root=run_as_root)
def populate_image(src, dst):
data = qemu_img_info(src)
if data.file_format == 'raw':
dd(src, dst)
else:
convert_image(src, dst, 'raw', True)
# TODO(rameshg87): Remove this one-line method and use utils.mkfs
# directly.
def mkfs(fs, dev, label=None):
"""Execute mkfs on a device."""
utils.mkfs(fs, dev, label)
def block_uuid(dev):
"""Get UUID of a block device."""
out, _err = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev,
run_as_root=True,
check_exit_code=[0])
return out.strip()
def get_image_mb(image_path, virtual_size=True):
"""Get size of an image in Megabyte."""
mb = 1024 * 1024
if not virtual_size:
image_byte = os.path.getsize(image_path)
else:
data = qemu_img_info(image_path)
image_byte = data.virtual_size
# round up size to MB
image_mb = int((image_byte + mb - 1) / mb)
return image_mb
def get_dev_block_size(dev):
"""Get the device size in 512 byte sectors."""
block_sz, cmderr = utils.execute('blockdev', '--getsz', dev,
run_as_root=True, check_exit_code=[0])
return int(block_sz)
def destroy_disk_metadata(dev, node_uuid):
"""Destroy metadata structures on node's disk.
Ensure that node's disk appears to be blank without zeroing the entire
drive. To do this we will zero the first 18KiB to clear MBR / GPT data
and the last 18KiB to clear GPT and other metadata like LVM, veritas,
MDADM, DMRAID, etc.
"""
# NOTE(NobodyCam): This is needed to work around bug:
# https://bugs.launchpad.net/ironic/+bug/1317647
LOG.debug("Start destroy disk metadata for node %(node)s.",
{'node': node_uuid})
try:
utils.execute('dd', 'if=/dev/zero', 'of=%s' % dev,
'bs=512', 'count=36', run_as_root=True,
check_exit_code=[0])
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to erase beginning of disk for node "
"%(node)s. Command: %(command)s. Error: %(error)s."),
{'node': node_uuid,
'command': err.cmd,
'error': err.stderr})
# now wipe the end of the disk.
# get end of disk seek value
try:
block_sz = get_dev_block_size(dev)
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to get disk block count for node %(node)s. "
"Command: %(command)s. Error: %(error)s."),
{'node': node_uuid,
'command': err.cmd,
'error': err.stderr})
else:
seek_value = block_sz - 36
try:
utils.execute('dd', 'if=/dev/zero', 'of=%s' % dev,
'bs=512', 'count=36', 'seek=%d' % seek_value,
run_as_root=True, check_exit_code=[0])
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to erase the end of the disk on node "
"%(node)s. Command: %(command)s. "
"Error: %(error)s."),
{'node': node_uuid,
'command': err.cmd,
'error': err.stderr})
def _get_configdrive(configdrive, node_uuid):
"""Get the information about size and location of the configdrive.
:param configdrive: Base64 encoded Gzipped configdrive content or
configdrive HTTP URL.
:param node_uuid: Node's uuid. Used for logging.
:raises: InstanceDeployFailure if it can't download or decode the
config drive.
:returns: A tuple with the size in MiB and path to the uncompressed
configdrive file.
"""
# Check if the configdrive option is a HTTP URL or the content directly
is_url = utils.is_http_url(configdrive)
if is_url:
try:
data = requests.get(configdrive).content
except requests.exceptions.RequestException as e:
raise exception.InstanceDeployFailure(
_("Can't download the configdrive content for node %(node)s "
"from '%(url)s'. Reason: %(reason)s") %
{'node': node_uuid, 'url': configdrive, 'reason': e})
else:
data = configdrive
try:
data = six.BytesIO(base64.b64decode(data))
except TypeError:
error_msg = (_('Config drive for node %s is not base64 encoded '
'or the content is malformed.') % node_uuid)
if is_url:
error_msg += _(' Downloaded from "%s".') % configdrive
raise exception.InstanceDeployFailure(error_msg)
configdrive_file = tempfile.NamedTemporaryFile(delete=False,
prefix='configdrive',
dir=CONF.ironic_lib.tempdir)
configdrive_mb = 0
with gzip.GzipFile('configdrive', 'rb', fileobj=data) as gunzipped:
try:
shutil.copyfileobj(gunzipped, configdrive_file)
except EnvironmentError as e:
# Delete the created file
utils.unlink_without_raise(configdrive_file.name)
raise exception.InstanceDeployFailure(
_('Encountered error while decompressing and writing '
'config drive for node %(node)s. Error: %(exc)s') %
{'node': node_uuid, 'exc': e})
else:
# Get the file size and convert to MiB
configdrive_file.seek(0, os.SEEK_END)
bytes_ = configdrive_file.tell()
configdrive_mb = int(math.ceil(float(bytes_) / units.Mi))
finally:
configdrive_file.close()
return (configdrive_mb, configdrive_file.name)
def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format,
image_path, node_uuid, preserve_ephemeral=False,
configdrive=None, boot_option="netboot",
boot_mode="bios"):
"""Create partitions and copy an image to the root partition.
:param dev: Path for the device to work on.
:param root_mb: Size of the root partition in megabytes.
:param swap_mb: Size of the swap partition in megabytes.
:param ephemeral_mb: Size of the ephemeral partition in megabytes. If 0,
no ephemeral partition will be created.
:param ephemeral_format: The type of file system to format the ephemeral
partition.
:param image_path: Path for the instance's disk image.
:param node_uuid: node's uuid. Used for logging.
:param preserve_ephemeral: If True, no filesystem is written to the
ephemeral block device, preserving whatever content it had (if the
partition table has not changed).
:param configdrive: Optional. Base64 encoded Gzipped configdrive content
or configdrive HTTP URL.
:param boot_option: Can be "local" or "netboot". "netboot" by default.
:param boot_mode: Can be "bios" or "uefi". "bios" by default.
:returns: a dictionary containing the following keys:
'root uuid': UUID of root partition
'efi system partition uuid': UUID of the uefi system partition
(if boot mode is uefi).
NOTE: If key exists but value is None, it means partition doesn't
exist.
"""
# the only way for preserve_ephemeral to be set to true is if we are
# rebuilding an instance with --preserve_ephemeral.
commit = not preserve_ephemeral
# now if we are committing the changes to disk clean first.
if commit:
destroy_disk_metadata(dev, node_uuid)
try:
# If requested, get the configdrive file and determine the size
# of the configdrive partition
configdrive_mb = 0
configdrive_file = None
if configdrive:
configdrive_mb, configdrive_file = _get_configdrive(configdrive,
node_uuid)
part_dict = make_partitions(dev, root_mb, swap_mb, ephemeral_mb,
configdrive_mb, commit=commit,
boot_option=boot_option,
boot_mode=boot_mode)
ephemeral_part = part_dict.get('ephemeral')
swap_part = part_dict.get('swap')
configdrive_part = part_dict.get('configdrive')
root_part = part_dict.get('root')
if not is_block_device(root_part):
raise exception.InstanceDeployFailure(
_("Root device '%s' not found") % root_part)
for part in ('swap', 'ephemeral', 'configdrive',
'efi system partition'):
part_device = part_dict.get(part)
LOG.debug("Checking for %(part)s device (%(dev)s) on node "
"%(node)s.", {'part': part, 'dev': part_device,
'node': node_uuid})
if part_device and not is_block_device(part_device):
raise exception.InstanceDeployFailure(
_("'%(partition)s' device '%(part_device)s' not found") %
{'partition': part, 'part_device': part_device})
# If it's a uefi localboot, then we have created the efi system
# partition. Create a fat filesystem on it.
if boot_mode == "uefi" and boot_option == "local":
efi_system_part = part_dict.get('efi system partition')
mkfs(dev=efi_system_part, fs='vfat', label='efi-part')
if configdrive_part:
# Copy the configdrive content to the configdrive partition
dd(configdrive_file, configdrive_part)
finally:
# If the configdrive was requested make sure we delete the file
# after copying the content to the partition
if configdrive_file:
utils.unlink_without_raise(configdrive_file)
populate_image(image_path, root_part)
if swap_part:
mkfs(dev=swap_part, fs='swap', label='swap1')
if ephemeral_part and not preserve_ephemeral:
mkfs(dev=ephemeral_part, fs=ephemeral_format, label="ephemeral0")
uuids_to_return = {
'root uuid': root_part,
'efi system partition uuid': part_dict.get('efi system partition')
}
try:
for part, part_dev in six.iteritems(uuids_to_return):
if part_dev:
uuids_to_return[part] = block_uuid(part_dev)
except processutils.ProcessExecutionError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to detect %s"), part)
return uuids_to_return
| apache-2.0 | -111,548,274,905,857,920 | 38.856582 | 79 | 0.587618 | false | 3.959212 | true | false | false |
bgris/ODL_bgris | examples/tomo/filtered_backprojection_cone_2d_partial_scan.py | 1 | 2456 | """
Example using a filtered back-projection (FBP) in fan beam using `fbp_op`.
Note that the FBP is only approximate in this geometry, but still gives a
decent reconstruction that can be used as an initial guess in more complicated
methods.
Here we look at a partial scan, where the angular interval is not 2 * pi.
This caues issues for the regular FBP reconstruction, but can be improved
via a Parker weighting.
"""
import numpy as np
import odl
# --- Set-up geometry of the problem --- #
# Discrete reconstruction space: discretized functions on the cube
# [-20, 20]^2 with 300 samples per dimension.
reco_space = odl.uniform_discr(
min_pt=[-20, -20], max_pt=[20, 20], shape=[300, 300],
dtype='float32')
# Make a circular cone beam geometry with flat detector
# Angles: uniformly spaced, n = 360, min = 0, max = pi + fan angle
angle_partition = odl.uniform_partition(0, np.pi + 0.7, 360)
# Detector: uniformly sampled, n = 558, min = -40, max = 40
detector_partition = odl.uniform_partition(-40, 40, 558)
# Geometry with large fan angle
geometry = odl.tomo.FanFlatGeometry(
angle_partition, detector_partition, src_radius=80, det_radius=40)
# --- Create Filtered Back-Projection (FBP) operator --- #
# Ray transform (= forward projection). We use the ASTRA CUDA backend.
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda')
# Create FBP operator using utility function
# We select a Hann filter, and only use the lowest 80% of frequencies to avoid
# high frequency noise.
fbp = odl.tomo.fbp_op(ray_trafo, filter_type='Hann', frequency_scaling=0.8)
# Apply parker weighting in order to improve reconstruction
parker_weighting = odl.tomo.parker_weighting(ray_trafo)
parker_weighting.show()
parker_weighted_fbp = fbp * parker_weighting
# --- Show some examples --- #
# Create a discrete Shepp-Logan phantom (modified version)
phantom = odl.phantom.shepp_logan(reco_space, modified=True)
# Create projection data by calling the ray transform on the phantom
proj_data = ray_trafo(phantom)
# Calculate filtered back-projection of data
fbp_reconstruction = fbp(proj_data)
pw_fbp_reconstruction = parker_weighted_fbp(proj_data)
# Shows a slice of the phantom, projections, and reconstruction
phantom.show(title='Phantom')
proj_data.show(title='Projection data (sinogram)')
fbp_reconstruction.show(title='Filtered back-projection')
pw_fbp_reconstruction.show(title='Parker weighted filtered back-projection')
| gpl-3.0 | -4,292,229,415,815,887,000 | 34.594203 | 78 | 0.747557 | false | 3.206266 | false | false | false |
elshaka/Romano | romano/ui/add_carrier.py | 1 | 3446 | # -*- coding: utf-8 -*-
from PySide import QtGui, QtCore
from .ui_add_carrier import Ui_AddCarrier
from mango.models.carrier import Carrier
from .error_message_box import ErrorMessageBox
class AddCarrier(QtGui.QDialog):
def __init__(self, parent):
super(AddCarrier, self).__init__(parent)
self.api = parent.api
self.ui = Ui_AddCarrier()
self.ui.setupUi(self)
self.api.get_carriers()
self.ui.frequentWidget.setEnabled(False)
self.carriersTableModel = CarriersTableModel([], self)
self.filterCarriersProxyModel = QtGui.QSortFilterProxyModel()
self.filterCarriersProxyModel.setSourceModel(self.carriersTableModel)
self.filterCarriersProxyModel.setFilterKeyColumn(-1)
self.filterCarriersProxyModel.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.ui.carriersTableView.setModel(self.filterCarriersProxyModel)
self.ui.filterLineEdit.textChanged.connect(self.filterCarriersProxyModel.setFilterRegExp)
self.api.getCarriersFinished.connect(self.carriersTableModel.refreshCarriers)
self.ui.newButton.clicked.connect(self.enableCarrierType)
self.ui.frequentButton.clicked.connect(self.enableCarrierType)
self.ui.carriersTableView.doubleClicked.connect(self.addCarrier)
self.ui.addButton.clicked.connect(self.addCarrier)
self.ui.cancelButton.clicked.connect(self.reject)
def addCarrier(self):
if self.ui.newButton.isChecked():
errors = []
name = self.ui.nameLineEdit.text()
if name == "":
errors.append("Debe indicar un nombre")
if not errors:
self.new = True
self.carrier = Carrier(name, None)
if self.ui.saveAsFrequentBox.isChecked():
self.carrier.frequent = True
self.accept()
else:
ErrorMessageBox(errors).exec_()
else:
errors = []
carrierFilteredIndex = self.ui.carriersTableView.currentIndex()
if carrierFilteredIndex.row() == -1:
errors.append("Debe seleccionar una transportista")
if not errors:
self.new = False
carrierIndex = self.filterCarriersProxyModel.mapToSource(carrierFilteredIndex)
self.carrier = self.carriersTableModel.getCarrier(carrierIndex.row())
self.accept()
else:
ErrorMessageBox(errors).exec_()
def enableCarrierType(self):
if self.ui.newButton.isChecked():
self.ui.newWidget.setEnabled(True)
self.ui.frequentWidget.setEnabled(False)
else:
self.ui.newWidget.setEnabled(False)
self.ui.frequentWidget.setEnabled(True)
class CarriersTableModel(QtCore.QAbstractTableModel):
def __init__(self, carriers, parent):
super(CarriersTableModel, self).__init__(parent)
self._carriers = carriers
self._headers = ['Nombre']
def getCarrier(self, row):
return self._carriers[row]
def refreshCarriers(self, carriers):
self.beginResetModel()
self._carriers = carriers
self.endResetModel()
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self._headers[section]
def rowCount(self, parent):
return len(self._carriers)
def columnCount(self, parent):
return len(self._headers)
def data(self, index, role):
row = index.row()
column = index.column()
if role == QtCore.Qt.DisplayRole:
if column == 0:
return self._carriers[row].name
| gpl-3.0 | 5,968,440,507,558,739,000 | 33.808081 | 93 | 0.700232 | false | 3.516327 | false | false | false |
a0x77n/chucky-ng | chucky/joernInterface/nodes/Node.py | 2 | 1093 | from joernInterface.JoernInterface import jutils
class Node(object):
def __init__(self, node_id, properties = None):
self.node_id = node_id
self.properties = properties
self.node_selection = 'g.v("{}")'.format(self.node_id)
def __str__(self):
return str(self.node_id)
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.node_id == other.node_id
def __ne__(self, other):
return self.node_id != other.node_id
def __hash__(self):
return self.node_id
def load_properties(self):
_, node = jutils.raw_lookup(self.node_selection)[0]
self.properties = node.get_properties()
def get_property(self, label):
if not self.properties:
self.load_properties()
if label in self.properties:
return self.properties[label]
else:
return None
def getId(self):
return self.node_id
@property
def node_type(self):
return self.get_property('type')
| gpl-3.0 | -5,819,698,073,411,503,000 | 23.840909 | 62 | 0.561757 | false | 3.903571 | false | false | false |
robobario/repl | server.py | 1 | 2679 | from __future__ import print_function
from tornado.gen import Task, Return, coroutine
import tornado.process
import tornado.web
from tornado.ioloop import IOLoop
import re
from repl import Repl
import os.path
import time
import json
import subprocess
repls = {}
pattern = re.compile(r"/(\d+)")
safe_repls = ["prolog","scala","python","haskell","ruby","clojure","erlang","kotlin","nodejs"]
def create_repl(ioloop,repl_type):
global repls
repl = Repl(ioloop, repl_type)
repls[repl.identity] = repl
return repl.identity
def clean_idle_repls():
global repls
try:
to_del = []
for key, repl in repls.iteritems():
if repl.is_expired():
to_del.append(key)
repl.close()
for key in to_del:
del repls[key]
ioloop = tornado.ioloop.IOLoop.current()
finally:
ioloop.call_later(2, clean_idle_repls)
class KillReplHandler(tornado.web.RequestHandler):
def get(self, path):
num = int(path)
if num in repls:
repls[num].close()
del repls[num]
self.set_status(200)
self.finish()
else:
self.clear()
self.set_status(404)
self.finish("<html><body>non existant repl type</body></html>")
class NewReplHandler(tornado.web.RequestHandler):
def get(self, repl_type):
if repl_type in safe_repls:
repl_id = create_repl(ioloop, repl_type)
self.write(json.dumps(repl_id))
else:
self.clear()
self.set_status(404)
self.finish("<html><body>non existant repl type</body></html>")
@tornado.web.stream_request_body
class MainHandler(tornado.web.RequestHandler):
def get(self, path):
num = int(path)
if num not in repls:
self.set_status(404)
else:
repls[num].drain_to_handler(self)
def post(self, path):
self.write("")
def data_received(self, chunk):
num = int(pattern.match(self.request.path).group(1))
if num not in repls:
self.set_status(404)
else:
repls[num].write_async(chunk)
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static")
}
class RootHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/index.html")
application = tornado.web.Application([
(r"/", RootHandler),
(r"/kill/(\d+)", KillReplHandler),
(r"/(\d+)", MainHandler),
(r"/new/([a-zA-Z0-9\-]+)", NewReplHandler),
], **settings)
if __name__ == "__main__":
application.listen(8888)
ioloop = tornado.ioloop.IOLoop.current()
ioloop.call_later(5, clean_idle_repls)
ioloop.start()
| mit | 764,434,140,892,750,500 | 26.060606 | 94 | 0.610302 | false | 3.382576 | false | false | false |
brutalhonesty/pineapple-eyefi | main.py | 1 | 8225 | #!/usr/bin/env python
from flask import Flask, request, redirect, url_for, abort, render_template, make_response, after_this_request
import flickrapi
from werkzeug import secure_filename
import xml.etree.cElementTree as ET
import xml.etree.ElementTree as xml
import os
import md5
import tarfile
import hashlib
import binascii
import random
import string
import webbrowser
# Eye-Fi Port
PORT = 59278
# KEY for Eye-Fi Mobi Cards
KEY = u'00000000000000000000000000000000'
# Server nonce
SERVER_CRED = ''
# Client nonce
SESSION = ''
FILE_ID = 1
# UPLOAD_FOLDER = '/sd/uploads'
UPLOAD_FOLDER = '/tmp'
FLICKR_API_KEY = u'034800f3a9eb9d88d054c9d00a67d82e'
FLICKR_API_SECRET = u'fa6a19f351f9aced'
# Create application.
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# Env vars.
# app.config.from_envvar('FLASKR_SETTINGS', silent=False)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/soap/eyefilm/v1', methods=['POST'])
def start_session():
if 'Soapaction' not in request.headers:
abort(400)
header_value = request.headers['Soapaction']
if header_value == '"urn:StartSession"':
app.logger.info('Running Start session..')
root = ET.fromstring(request.data)
for child in root:
for step_child in child:
for step_step_child in step_child:
if step_step_child.tag == 'macaddress':
macaddress = step_step_child.text
elif step_step_child.tag == 'cnonce':
cnonce = step_step_child.text
elif step_step_child.tag == 'transfermode':
transfermode = step_step_child.text
elif step_step_child.tag == 'transfermodetimestamp':
transfermode_timestamp = step_step_child.text
credential = _get_credential(macaddress, cnonce, KEY)
_set_cnonce(credential)
new_snonce = _get_new_snonce()
_set_snonce(new_snonce)
resp = make_response(render_template(
'start_session_response.xml',
credential=credential,
snonce=SERVER_CRED,
transfermode=transfermode,
transfermode_timestamp=transfermode_timestamp))
resp.headers['Content-Type'] = 'text/xml; charset="utf-8"'
resp.headers['Connection'] = 'keep-alive'
return resp
elif header_value == '"urn:GetPhotoStatus"':
app.logger.info('Running Get Photo Status..')
root = ET.fromstring(request.data)
for child in root:
for step_child in child:
for step_step_child in step_child:
if step_step_child.tag == 'credential':
credential = step_step_child.text
elif step_step_child.tag == 'macaddress':
macaddress = step_step_child.text
elif step_step_child.tag == 'filename':
file_name = step_step_child.text
elif step_step_child.tag == 'filesize':
file_size = step_step_child.text
elif step_step_child.tag == 'filesignature':
file_sig = step_step_child.text
elif step_step_child.tag == 'flags':
flags = step_step_child.text
old_credential = _get_credential(macaddress, KEY, SERVER_CRED)
if old_credential == credential:
@after_this_request
def set_file_id(resp):
global FILE_ID
FILE_ID += 1
return resp
resp = make_response(render_template(
'get_photo_status_response.xml',
file_id=FILE_ID,
offset=0))
resp.headers['Content-Type'] = 'text/xml; charset="utf-8"'
resp.headers['Connection'] = 'keep-alive'
return resp
else:
abort(400)
else:
abort(400)
@app.route('/api/soap/eyefilm/v1/upload', methods=['POST'])
def capture_upload():
app.logger.info('Running file upload...')
app.logger.info(request.headers)
app.logger.info(request.form)
app.logger.info(request.files)
# We ignore this for now..
integrity_digest = request.form['INTEGRITYDIGEST']
app.logger.info('integrity_digest')
app.logger.info(integrity_digest)
upload_data = request.form['SOAPENVELOPE']
app.logger.info('upload_data')
app.logger.info(upload_data)
# Image object
image_tar = request.files['FILENAME']
app.logger.info('image_tar')
app.logger.info(image_tar)
# Get file from req
tar_filename = secure_filename(image_tar.filename)
image_filename = tar_filename.rsplit('.', 1)[0]
app.logger.info('image_filename')
app.logger.info(image_filename)
# Save file to upload dir
tar_file_path = os.path.join(app.config['UPLOAD_FOLDER'], tar_filename)
app.logger.info('tar_file_path')
app.logger.info(tar_file_path)
image_tar.save(tar_file_path)
image_file_path = os.path.join(app.config['UPLOAD_FOLDER'], image_filename)
app.logger.info('image_file_path')
app.logger.info(image_file_path)
ar = tarfile.open(tar_file_path, mode='r')
ar.extractall(path=app.config['UPLOAD_FOLDER'])
ar.close()
root = ET.fromstring(upload_data)
for child in root:
for step_child in child:
for step_step_child in step_child:
if step_step_child.tag == 'fileid':
file_id = step_step_child.text
elif step_step_child.tag == 'macaddress':
macaddress = step_step_child.text
elif step_step_child.tag == 'filename':
pass
elif step_step_child.tag == 'filesize':
filesize = step_step_child.text
elif step_step_child.tag == 'filesignature':
file_sig = step_step_child.text
elif step_step_child.tag == 'encryption':
encryption = step_step_child.text
elif step_step_child.tag == 'flags':
flags = step_step_child.text
@after_this_request
def flickr(resp):
_flickr_upload_photo(image_filename, image_file_path)
return resp
return render_template('upload_photo_response.xml')
def _flickr_upload_photo(file_name, file_path):
flickr = flickrapi.FlickrAPI(FLICKR_API_KEY, FLICKR_API_SECRET)
if not flickr.token_valid(perms=u'write'):
# Get a request token
flickr.get_request_token(oauth_callback='oob')
# Open a browser at the authentication URL. Do this however
# you want, as long as the user visits that URL.
authorize_url = flickr.auth_url(perms=u'write')
webbrowser.open_new_tab(authorize_url)
# Get the verifier code from the user. Do this however you
# want, as long as the user gives the application the code.
verifier = unicode(raw_input('Verifier code: '))
# Trade the request token for an access token
flickr.get_access_token(verifier)
return flickr.upload(
is_public=1,
fileobj=open(file_path, 'rb'),
filename=file_name,
content_type=1,
format='rest')
else:
return flickr.upload(
is_public=1,
fileobj=open(file_path, 'rb'),
filename=file_name,
content_type=1,
format='rest')
def _get_new_snonce():
m = md5.new()
random_word = '.'.join(random.choice(string.lowercase) for i in range(40))
m.update(random_word)
return m.hexdigest()
def _set_cnonce(cnonce):
global SESSION
SESSION = cnonce
def _set_snonce(credential):
global SERVER_CRED
SERVER_CRED = credential
def _get_credential(mac, cnonce, key):
cred_str = mac + cnonce + key
bin_cred_str = binascii.unhexlify(cred_str)
m = hashlib.md5()
m.update(bin_cred_str)
return m.hexdigest()
if __name__ == '__main__':
app.debug = True
app.run(port=PORT, host='0.0.0.0')
| mit | -2,527,520,502,396,802,600 | 35.074561 | 110 | 0.591489 | false | 3.723404 | false | false | false |
breandan/java-algebra-system | examples/prime-decomp_algeb_trans.py | 3 | 2178 | #
# jython examples for jas.
# $Id$
#
import sys
from java.lang import System
from jas import PolyRing, QQ, AN, RF
from jas import terminate, startLog
# polynomial examples: prime/primary decomposition in Q(sqrt(2))(x)(sqrt(x))[y,z]
Q = PolyRing(QQ(),"w2",PolyRing.lex);
print "Q = " + str(Q);
[e,a] = Q.gens();
#print "e = " + str(e);
print "a = " + str(a);
root = a**2 - 2;
print "root = " + str(root);
Q2 = AN(root,field=True);
print "Q2 = " + str(Q2.factory());
[one,w2] = Q2.gens();
#print "one = " + str(one);
#print "w2 = " + str(w2);
print;
Qp = PolyRing(Q2,"x",PolyRing.lex);
print "Qp = " + str(Qp);
[ep,wp,ap] = Qp.gens();
#print "ep = " + str(ep);
#print "wp = " + str(wp);
#print "ap = " + str(ap);
print;
Qr = RF(Qp);
print "Qr = " + str(Qr.factory());
[er,wr,ar] = Qr.gens();
#print "er = " + str(er);
#print "wr = " + str(wr);
#print "ar = " + str(ar);
print;
Qwx = PolyRing(Qr,"wx",PolyRing.lex);
print "Qwx = " + str(Qwx);
[ewx,wwx,ax,wx] = Qwx.gens();
#print "ewx = " + str(ewx);
print "ax = " + str(ax);
#print "wwx = " + str(wwx);
print "wx = " + str(wx);
print;
rootx = wx**2 - ax;
print "rootx = " + str(rootx);
Q2x = AN(rootx,field=True);
print "Q2x = " + str(Q2x.factory());
[ex2,w2x2,ax2,wx] = Q2x.gens();
#print "ex2 = " + str(ex2);
#print "w2x2 = " + str(w2x2);
#print "ax2 = " + str(ax2);
#print "wx = " + str(wx);
print;
Yr = PolyRing(Q2x,"y,z",PolyRing.lex)
print "Yr = " + str(Yr);
[e,w2,x,wx,y,z] = Yr.gens();
print "e = " + str(e);
print "w2 = " + str(w2);
print "x = " + str(x);
print "wx = " + str(wx);
print "y = " + str(y);
print "z = " + str(z);
print;
f1 = ( y**2 - x ) * ( y**2 - 2 );
#f1 = ( y**2 - x )**3 * ( y**2 - 2 )**2;
f2 = ( z**2 - y**2 );
print "f1 = ", f1;
print "f2 = ", f2;
print;
F = Yr.ideal( list=[f1,f2] );
print "F = ", F;
print;
#sys.exit();
startLog();
t = System.currentTimeMillis();
P = F.primeDecomp();
#P = F.primaryDecomp();
t1 = System.currentTimeMillis() - t;
print "P = ", P;
print;
print "prime/primary decomp time =", t1, "milliseconds";
print;
print "F = ", F;
print;
#startLog();
terminate();
| gpl-2.0 | 6,919,120,197,560,014,000 | 19.166667 | 81 | 0.522498 | false | 2.145813 | false | false | false |
darktears/chromium-crosswalk | tools/profile_chrome/systrace_controller.py | 7 | 3589 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import threading
import zlib
from profile_chrome import controllers
from profile_chrome import util
from pylib.constants import host_paths
with host_paths.SysPath(host_paths.DEVIL_PATH):
from devil.utils import cmd_helper
_SYSTRACE_OPTIONS = [
# Compress the trace before sending it over USB.
'-z',
# Use a large trace buffer to increase the polling interval.
'-b', '16384'
]
# Interval in seconds for sampling systrace data.
_SYSTRACE_INTERVAL = 15
_TRACING_ON_PATH = '/sys/kernel/debug/tracing/tracing_on'
class SystraceController(controllers.BaseController):
def __init__(self, device, categories, ring_buffer):
controllers.BaseController.__init__(self)
self._device = device
self._categories = categories
self._ring_buffer = ring_buffer
self._done = threading.Event()
self._thread = None
self._trace_data = None
def __repr__(self):
return 'systrace'
@staticmethod
def GetCategories(device):
return device.RunShellCommand('atrace --list_categories')
def StartTracing(self, _):
self._thread = threading.Thread(target=self._CollectData)
self._thread.start()
def StopTracing(self):
self._done.set()
def PullTrace(self):
self._thread.join()
self._thread = None
if self._trace_data:
output_name = 'systrace-%s' % util.GetTraceTimestamp()
with open(output_name, 'w') as out:
out.write(self._trace_data)
return output_name
def IsTracingOn(self):
result = self._RunAdbShellCommand(['cat', _TRACING_ON_PATH])
return result.strip() == '1'
def _RunAdbShellCommand(self, command):
# We use a separate interface to adb because the one from AndroidCommands
# isn't re-entrant.
# TODO(jbudorick) Look at providing a way to unhandroll this once the
# adb rewrite has fully landed.
device_param = (['-s', str(self._device)] if str(self._device) else [])
cmd = ['adb'] + device_param + ['shell'] + command
return cmd_helper.GetCmdOutput(cmd)
def _RunATraceCommand(self, command):
cmd = ['atrace', '--%s' % command] + _SYSTRACE_OPTIONS + self._categories
return self._RunAdbShellCommand(cmd)
def _ForceStopAtrace(self):
# atrace on pre-M Android devices cannot be stopped asynchronously
# correctly. Use synchronous mode to force stop.
cmd = ['atrace', '-t', '0']
return self._RunAdbShellCommand(cmd)
def _CollectData(self):
trace_data = []
self._RunATraceCommand('async_start')
try:
while not self._done.is_set():
self._done.wait(_SYSTRACE_INTERVAL)
if not self._ring_buffer or self._done.is_set():
trace_data.append(
self._DecodeTraceData(self._RunATraceCommand('async_dump')))
finally:
trace_data.append(
self._DecodeTraceData(self._RunATraceCommand('async_stop')))
if self.IsTracingOn():
self._ForceStopAtrace()
self._trace_data = ''.join([zlib.decompress(d) for d in trace_data])
@staticmethod
def _DecodeTraceData(trace_data):
try:
trace_start = trace_data.index('TRACE:')
except ValueError:
raise RuntimeError('Systrace start marker not found')
trace_data = trace_data[trace_start + 6:]
# Collapse CRLFs that are added by adb shell.
if trace_data.startswith('\r\n'):
trace_data = trace_data.replace('\r\n', '\n')
# Skip the initial newline.
return trace_data[1:]
| bsd-3-clause | 8,110,999,072,824,533,000 | 30.208696 | 77 | 0.669546 | false | 3.610664 | false | false | false |
sorenh/cc | vendor/boto/boto/ec2/autoscale/activity.py | 11 | 2215 | # Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Activity(object):
def __init__(self, connection=None):
self.connection = connection
self.start_time = None
self.activity_id = None
self.progress = None
self.status_code = None
self.cause = None
self.description = None
def __repr__(self):
return 'Activity:%s status:%s progress:%s' % (self.description,
self.status_code,
self.progress)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ActivityId':
self.activity_id = value
elif name == 'StartTime':
self.start_time = value
elif name == 'Progress':
self.progress = value
elif name == 'Cause':
self.cause = value
elif name == 'Description':
self.description = value
elif name == 'StatusCode':
self.status_code = value
else:
setattr(self, name, value)
| apache-2.0 | -4,411,640,662,643,931,000 | 39.272727 | 74 | 0.637923 | false | 4.56701 | false | false | false |
mahak/neutron | neutron/ipam/drivers/neutrondb_ipam/db_models.py | 8 | 3087 | # Copyright 2015 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import model_base
import sqlalchemy as sa
from sqlalchemy import orm as sa_orm
# Database models used by the neutron DB IPAM driver
# NOTE(salv-orlando): The following data model creates redundancy with
# models_v2.IPAllocationPool. This level of data redundancy could be tolerated
# considering that the following model is specific to the IPAM driver logic.
# It therefore represents an internal representation of a subnet allocation
# pool and can therefore change in the future, where as
# models_v2.IPAllocationPool is the representation of IP allocation pools in
# the management layer and therefore its evolution is subject to APIs backward
# compatibility policies
class IpamAllocationPool(model_base.BASEV2, model_base.HasId):
"""Representation of an allocation pool in a Neutron subnet."""
ipam_subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('ipamsubnets.id',
ondelete="CASCADE"),
nullable=False)
first_ip = sa.Column(sa.String(64), nullable=False)
last_ip = sa.Column(sa.String(64), nullable=False)
def __repr__(self):
return "%s - %s" % (self.first_ip, self.last_ip)
class IpamSubnet(model_base.BASEV2, model_base.HasId):
"""Association between IPAM entities and neutron subnets.
For subnet data persistency - such as cidr and gateway IP, the IPAM
driver relies on Neutron's subnet model as source of truth to limit
data redundancy.
"""
neutron_subnet_id = sa.Column(sa.String(36),
nullable=True)
allocation_pools = sa_orm.relationship(IpamAllocationPool,
backref='subnet',
lazy="joined",
cascade='delete')
class IpamAllocation(model_base.BASEV2):
"""Model class for IP Allocation requests. """
ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True)
status = sa.Column(sa.String(36))
# The subnet identifier is redundant but come handy for looking up
# IP addresses to remove.
ipam_subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('ipamsubnets.id',
ondelete="CASCADE"),
primary_key=True,
nullable=False)
| apache-2.0 | -3,437,296,657,181,676,000 | 42.478873 | 78 | 0.641075 | false | 4.35402 | false | false | false |
csirtgadgets/csirtg-mail-py | csirtg_mail/client.py | 1 | 2098 | import sys
import logging
import textwrap
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from pprint import pprint
import json
from csirtg_mail import parse_email_from_string
LOG_FORMAT = '%(asctime)s - %(levelname)s - %(name)s[%(lineno)s] - %(message)s'
logger = logging.getLogger(__name__)
def main():
p = ArgumentParser(
description=textwrap.dedent('''\
csirtg-mail is a CLI tool for debugging, it allows you to easily input a email message and print out the
py-cgmail data structure.
example usage:
$ cat test.eml | csirtg-mail
$ csirtg-mail --file test.eml
'''),
formatter_class=RawDescriptionHelpFormatter,
prog='csirtg-mail'
)
p.add_argument("-f", "--file", dest="file", help="specify email file")
p.add_argument("-d", "--debug", help="enable debugging",
action="store_true")
p.add_argument("-s", "--sanitize",
help="strip parameters (...?foo=bar) from parsed URLs", action="store_true")
p.add_argument("--urls", help="print URLS to stdout", action="store_true")
args = p.parse_args()
loglevel = logging.INFO
if args.debug:
loglevel = logging.DEBUG
console = logging.StreamHandler()
logging.getLogger('').setLevel(loglevel)
console.setFormatter(logging.Formatter(LOG_FORMAT))
logging.getLogger('').addHandler(console)
options = vars(args)
# get email from file or stdin
if options.get("file"):
with open(options["file"], errors='ignore') as f:
email = f.read()
else:
email = sys.stdin.read()
# parse email message
results = parse_email_from_string(
email, sanitize_urls=options.get("sanitize"))
if args.urls:
for e in results:
for u in e['urls']:
print(u)
raise SystemExit
if args.debug:
results = json.dumps(results, indent=4)
else:
results = json.dumps(results)
print(results)
if __name__ == "__main__":
main()
| lgpl-3.0 | -4,362,267,693,185,706,500 | 25.556962 | 112 | 0.613918 | false | 3.93621 | false | false | false |
zougloub/libchardet | python/setup.py | 1 | 1385 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
from Cython.Build import cythonize
extensions = [
Extension("libchardet._libchardet", ["libchardet/_libchardet.pyx"],
include_dirs = ["../src"],
libraries = ["chardet"],
library_dirs = ["../build/src"],
),
]
setup(
name = 'libchardet',
author = 'Jérôme Carretero (zougloub)',
author_email = '[email protected]',
url = r"https://github.com/zougloub/libchardet",
description = "Character Encoding Detector",
long_description= "libchardet detects the most probable character" \
" encodings in a string.\n" \
"It doesn't give the best results but the goal is for the library to be" \
" expandable and clear.\n",
version = '0.1',
license = 'MPL 2.0',
classifiers = [
# http://pypi.python.org/pypi?:action=list_classifiers
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MPL License',
'Programming Language :: Cython',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords = [
'cython',
'libchardet', 'chardet', 'encoding',
],
packages = ['libchardet'],
ext_modules = cythonize(extensions),
)
| mpl-2.0 | 550,494,486,899,818,400 | 26.8125 | 76 | 0.653651 | false | 3.501266 | false | false | false |
infinisql/infinisql | manager/infinisqlmgr/management/health.py | 1 | 8863 | __author__ = 'Christopher Nelson'
import os
import time
import psutil
from infinisqlmgr.management.metric import Metric
memory = ["total", "available", "percent", "used", "free", "active", "inactive", "buffers", "cached"]
swap = ["total", "used", "free", "percent", "sin", "sout"]
cpu = ["user", "nice", "system", "idle", "iowait", "irq", "softirq", "steal", "guest", "guest_nice"]
disk_space = ["total", "used", "free", "percent"]
disk_io = ["read_count", "write_count", "read_bytes", "write_bytes", "read_time", "write_time"]
net_io = ["bytes_sent", "bytes_recv", "packets_sent", "packets_recv", "errin", "errout", "dropin", "dropout"]
class Health(object):
def __init__(self, node_id, config):
"""
Creates a new health object for the given node. Databases for health statistics will be created in
the 'data_dir'. The databases use the "whisper" database format from graphite, so they automatically
handle long-term storage with decreasing resolution. Once the databases are created they never grow
or shrink, regardless of the amount of data stored.
:param node_id: The node to create this health object for.
:param data_dir: The data directory to use for the health stats.
"""
self.path = os.path.join(config.get("metrics", "data_dir"), "health", node_id[0], str(node_id[1]))
self.node_id = node_id
self.memory_alert = False
self.swap_alert = False
self.cpu_load = Metric(self.path, "cpu.load")
self.mem = [Metric(self.path, "mem.%s" % item) for item in memory]
self.swp = [Metric(self.path, "swp.%s" % item) for item in swap]
self.cpu = [Metric(self.path, "cpu.%s" % item) for item in cpu]
self.dsk_sp = {}
self.dsk_io = {}
self.net = {}
def get_metric_names(self):
metrics = []
for root, dirnames, filenames in os.walk(self.path):
for filename in filenames:
if filename.endswith(".dp"):
metrics.append(os.path.join(root, filename).replace(self.path, "").replace("/", ".")[1:-3])
return sorted(metrics)
def capture(self):
"""
Captures stats of the local system and writes them into the series database.
:return: None
"""
self.cpu_load.update(psutil.cpu_percent(interval=None))
for i, value in enumerate(psutil.cpu_times()):
self.cpu[i].update(value)
for i,value in enumerate(psutil.virtual_memory()):
self.mem[i].update(value)
for i,value in enumerate(psutil.swap_memory()):
self.swp[i].update(value)
net_io_data = psutil.net_io_counters(pernic=True)
for name in net_io_data:
if name not in self.net:
self.net[name] = [Metric(self.path, "net.io.%s.%s" % (name,item)) for item in net_io]
net = self.net[name]
for i,value in enumerate(net_io_data[name]):
net[i].update(value)
dsk_io_data = psutil.disk_io_counters(perdisk=True)
for name in dsk_io_data:
if name not in self.dsk_io:
self.dsk_io[name] = [Metric(self.path, "dsk.io.%s.%s" % (name,item)) for item in disk_io]
dsk_io = self.dsk_io[name]
for i,value in enumerate(dsk_io_data[name]):
dsk_io[i].update(value)
self.disk_partitions = psutil.disk_partitions()
for disks in self.disk_partitions:
device = disks[0].replace("/dev/", "")
name = "-".join([el for el in device.split("/") if el])
# Create an new set of data points if we find a new disk.
if name not in self.dsk_sp:
self.dsk_sp[name] = [Metric(self.path, "dsk.space.%s.%s" % (name,item)) for item in disk_space]
# Find the disk we are storing data for
dsk = self.dsk_sp[name]
# Update the disk stats
for i, value in enumerate(psutil.disk_usage(disks[1])):
dsk[i].update(value)
def lookup(self, name):
"""
Lookup a metric name and resolve it to a metric database.
:param name: The metric name to resolve.
:return: A data point if it was resolvable, or None
"""
parts = name.split(".")
if parts[0] == "cpu":
if parts[1] == "load":
return self.cpu_load
return self.cpu[cpu.index(parts[1])]
elif parts[0] == "mem":
return self.mem[memory.index(parts[1])]
elif parts[0] == "dsk":
if parts[1] == "space":
return self.dsk_sp[parts[2]][disk_space.index(parts[3])]
elif parts[1] == "io":
return self.dsk_io[parts[2]][disk_io.index(parts[3])]
elif parts[0] == "net":
if parts[1] == "io":
return self.net_io[parts[2]][net_io.index(parts[3])]
return None
def min(self, dp, from_time, until_time=None):
"""
Request the minimum value from the given metric.
:param dp: The metric to check for minimum value.
:param from_time: The earliest time in the series.
:param until_time: The latest time in the series (optional). If omitted this defaults to now.
:return: The minimum value from the series requested.
"""
if type(dp) == type(str()):
dp = self.lookup(dp)
return min([x for x in dp.fetch(from_time, until_time)[1] if x is not None])
def max(self, dp, from_time, until_time=None):
"""
Request the maximum value from the given metric.
:param dp: The metric to check for maximum value.
:param from_time: The earliest time in the series.
:param until_time: The latest time in the series (optional). If omitted this defaults to now.
:return: The maximum value from the series requested.
"""
if type(dp) == type(str()):
dp = self.lookup(dp)
return max([x for x in dp.fetch(from_time, until_time)[1] if x is not None])
def avg(self, dp, from_time, until_time=None):
"""
Request the average value for the given metric.
:param dp: The metric to use to compute the average value.
:param from_time: The earliest time in the series.
:param until_time: The latest time in the series (optional). If omitted this defaults to now.
:return: The average value from the series requested.
"""
if type(dp) == type(str()):
dp = self.lookup(dp)
values = [x for x in dp.fetch(from_time, until_time)[1] if x is not None]
return sum(values) / len(values)
def is_healthy(self, dp, seconds, has_alert, low_water, high_water):
"""
Checks to see if the given metric has been healthy over the last 'seconds' seconds. If 'has_alert' is true then
the metric must be lower than 'low_water', otherwise it must be lower than 'high_water'. Returns True if it's
healthy, false if it's not.
:param dp: The metric to check.
:param seconds: The number of seconds of history to evaluate.
:param has_alert: True if the metric was previously in an unhealthy state.
:param low_water: The low water mark if has_alert is True.
:param high_water: The high water mark.
:return: True if the metric is healthy, False otherwise.
"""
percent_used = self.avg(dp, time.time() - seconds)
if has_alert:
return percent_used < low_water
return percent_used < high_water
def is_memory_healthy(self, seconds, low_water, high_water):
"""
Checks to see if memory is in a healthy state. This is a convenience for is_healthy("mem.percent")
:param seconds: The number of seconds of history to check for health.
:param low_water: The low water level in memory percent used.
:param high_water: The high water level in memory percent used.
:return: True if memory is healthy, False otherwise.
"""
self.memory_alert = not self.is_healthy("mem.percent", seconds, self.memory_alert, low_water, high_water)
return not self.memory_alert
def is_swap_healthy(self, seconds, low_water, high_water):
"""
Checks to see if swap is in a healthy state. This is a convenience for is_healthy("swp.percent")
:param seconds: The number of seconds of history to check for health.
:param low_water: The low water level in swap percent used.
:param high_water: The high water level in swap percent used.
:return: True if swap is healthy, False otherwise.
"""
self.swap_alert = not self.is_healthy("swp.percent", seconds, self.swap_alert, low_water, high_water)
return not self.swap_alert
| gpl-3.0 | 4,261,215,599,936,240,000 | 43.762626 | 119 | 0.597089 | false | 3.711474 | false | false | false |
axanthos/orange3-textable | _textable/widgets/OWTextableIntersect.py | 1 | 18790 | """
Class OWTextableIntersect
Copyright 2012-2019 LangTech Sarl ([email protected])
-----------------------------------------------------------------------------
This file is part of the Orange3-Textable package.
Orange3-Textable is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Orange3-Textable is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Orange3-Textable. If not, see <http://www.gnu.org/licenses/>.
"""
__version__ = '0.15.2'
import LTTL.Segmenter as Segmenter
from LTTL.Segmentation import Segmentation
from .TextableUtils import (
OWTextableBaseWidget, InfoBox, SendButton, AdvancedSettings,
pluralize, updateMultipleInputs, SegmentationListContextHandler,
SegmentationsInputList, ProgressBar
)
from Orange.widgets import widget, gui, settings
class OWTextableIntersect(OWTextableBaseWidget):
"""Orange widget for segment in-/exclusion based on other segmentation"""
name = "Intersect"
description = "In-/exclude segments based on another segmentation"
icon = "icons/Intersect.png"
priority = 4004
# Input and output channels...
inputs = [('Segmentation', Segmentation, "inputData", widget.Multiple)]
outputs = [
('Selected data', Segmentation, widget.Default),
('Discarded data', Segmentation)
]
settingsHandler = SegmentationListContextHandler(
version=__version__.rsplit(".", 1)[0]
)
segmentations = SegmentationsInputList() # type: list
# Settings...
copyAnnotations = settings.Setting(True)
mode = settings.Setting(u'Include')
autoNumber = settings.Setting(False)
autoNumberKey = settings.Setting('num')
displayAdvancedSettings = settings.Setting(False)
source = settings.ContextSetting(0)
filtering = settings.ContextSetting(0)
sourceAnnotationKey = settings.ContextSetting(u'(none)')
filteringAnnotationKey = settings.ContextSetting(u'(none)')
want_main_area = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.infoBox = InfoBox(widget=self.controlArea)
self.sendButton = SendButton(
widget=self.controlArea,
master=self,
callback=self.sendData,
infoBoxAttribute='infoBox',
sendIfPreCallback=self.updateGUI,
)
self.advancedSettings = AdvancedSettings(
widget=self.controlArea,
master=self,
callback=self.sendButton.settingsChanged,
)
# GUI...
# TODO: update docs to match removal of source annotation from basic
self.advancedSettings.draw()
# Intersect box
self.intersectBox = gui.widgetBox(
widget=self.controlArea,
box=u'Intersect',
orientation='vertical',
addSpace=False,
)
self.modeCombo = gui.comboBox(
widget=self.intersectBox,
master=self,
value='mode',
sendSelectedValue=True,
items=[u'Include', u'Exclude'],
orientation='horizontal',
label=u'Mode:',
labelWidth=180,
callback=self.sendButton.settingsChanged,
tooltip=(
u"Specify whether source segments whose type is\n"
u"present in the filter segmentation should be\n"
u"included in or excluded from the output\n"
u"segmentation."
),
)
self.modeCombo.setMinimumWidth(140)
gui.separator(widget=self.intersectBox, height=3)
self.sourceCombo = gui.comboBox(
widget=self.intersectBox,
master=self,
value='source',
orientation='horizontal',
label=u'Source segmentation:',
labelWidth=180,
callback=self.sendButton.settingsChanged,
tooltip=(
u"The segmentation from which a subset of segments\n"
u"will be selected to build the output segmentation."
),
)
gui.separator(widget=self.intersectBox, height=3)
self.sourceAnnotationCombo = gui.comboBox(
widget=self.intersectBox,
master=self,
value='sourceAnnotationKey',
sendSelectedValue=True,
emptyString=u'(none)',
orientation='horizontal',
label=u'Source annotation key:',
labelWidth=180,
callback=self.sendButton.settingsChanged,
tooltip=(
u"Indicate whether source segments will be selected\n"
u"based on annotation values corresponding to a\n"
u"specific annotation key or rather on their content\n"
u"(value 'none')."
),
)
gui.separator(widget=self.intersectBox, height=3)
self.filteringCombo = gui.comboBox(
widget=self.intersectBox,
master=self,
value='filtering',
orientation='horizontal',
label=u'Filter segmentation:',
labelWidth=180,
callback=self.sendButton.settingsChanged,
tooltip=(
u"The segmentation whose types will be used to\n"
u"include source segments in (or exclude them from)\n"
u"the output segmentation."
),
)
gui.separator(widget=self.intersectBox, height=3)
self.filteringAnnotationCombo = gui.comboBox(
widget=self.intersectBox,
master=self,
value='filteringAnnotationKey',
sendSelectedValue=True,
emptyString=u'(none)',
orientation='horizontal',
label=u'Filter annotation key:',
labelWidth=180,
callback=self.sendButton.settingsChanged,
tooltip=(
u"Indicate whether filter segment types are based\n"
u"on annotation values corresponding to a specific\n"
u"annotation key or rather on segment content\n"
u"(value 'none')."
),
)
gui.separator(widget=self.intersectBox, height=3)
self.advancedSettings.advancedWidgets.append(self.intersectBox)
self.advancedSettings.advancedWidgetsAppendSeparator()
# Options box...
optionsBox = gui.widgetBox(
widget=self.controlArea,
box=u'Options',
orientation='vertical',
addSpace=False
)
optionsBoxLine2 = gui.widgetBox(
widget=optionsBox,
box=False,
orientation='horizontal',
addSpace=True,
)
gui.checkBox(
widget=optionsBoxLine2,
master=self,
value='autoNumber',
label=u'Auto-number with key:',
labelWidth=180,
callback=self.sendButton.settingsChanged,
tooltip=(
u"Annotate output segments with increasing numeric\n"
u"indices."
),
)
self.autoNumberKeyLineEdit = gui.lineEdit(
widget=optionsBoxLine2,
master=self,
value='autoNumberKey',
orientation='horizontal',
callback=self.sendButton.settingsChanged,
tooltip=(
u"Annotation key for output segment auto-numbering."
),
)
gui.checkBox(
widget=optionsBox,
master=self,
value='copyAnnotations',
label=u'Copy annotations',
callback=self.sendButton.settingsChanged,
tooltip=(
u"Copy all annotations from input to output segments."
),
)
gui.separator(widget=optionsBox, height=2)
self.advancedSettings.advancedWidgets.append(optionsBox)
self.advancedSettings.advancedWidgetsAppendSeparator()
# Basic intersect box
self.basicIntersectBox = gui.widgetBox(
widget=self.controlArea,
box=u'Intersect',
orientation='vertical',
)
self.basicModeCombo = gui.comboBox(
widget=self.basicIntersectBox,
master=self,
value='mode',
sendSelectedValue=True,
items=[u'Include', u'Exclude'],
orientation='horizontal',
label=u'Mode:',
labelWidth=180,
callback=self.sendButton.settingsChanged,
tooltip=(
u"Specify whether source segments whose type is\n"
u"present in the filter segmentation should be\n"
u"included in or excluded from the output\n"
u"segmentation."
),
)
self.basicModeCombo.setMinimumWidth(140)
gui.separator(widget=self.basicIntersectBox, height=3)
self.basicSourceCombo = gui.comboBox(
widget=self.basicIntersectBox,
master=self,
value='source',
orientation='horizontal',
label=u'Source segmentation:',
labelWidth=180,
callback=self.sendButton.settingsChanged,
tooltip=(
u"The segmentation from which a subset of segments\n"
u"will be selected to build the output segmentation."
),
)
gui.separator(widget=self.basicIntersectBox, height=3)
self.basicFilteringCombo = gui.comboBox(
widget=self.basicIntersectBox,
master=self,
value='filtering',
orientation='horizontal',
label=u'Filter segmentation:',
labelWidth=180,
callback=self.sendButton.settingsChanged,
tooltip=(
u"The segmentation whose types will be used to\n"
u"include source segments in (or exclude them from)\n"
u"the output segmentation."
),
)
gui.separator(widget=self.basicIntersectBox, height=3)
self.advancedSettings.basicWidgets.append(self.basicIntersectBox)
self.advancedSettings.basicWidgetsAppendSeparator()
gui.rubber(self.controlArea)
# Send button...
self.sendButton.draw()
# Info box...
self.infoBox.draw()
self.sendButton.sendIf()
self.adjustSizeWithTimer()
def sendData(self):
"""(Have LTTL.Segmenter) perform the actual filtering"""
# Check that there's something on input...
if len(self.segmentations) == 0:
self.infoBox.setText(u'Widget needs input.', 'warning')
self.send('Selected data', None, self)
self.send('Discarded data', None, self)
return
assert self.source >= 0
assert self.filtering >= 0
# TODO: remove message 'No label was provided.' from docs
# Source and filtering parameter...
source = self.segmentations[self.source][1]
filtering = self.segmentations[self.filtering][1]
if self.displayAdvancedSettings:
source_annotation_key = self.sourceAnnotationKey or None
if self.sourceAnnotationKey == u'(none)':
source_annotation_key = None
filtering_annotation_key = self.filteringAnnotationKey or None
if filtering_annotation_key == u'(none)':
filtering_annotation_key = None
else:
source_annotation_key = None
filtering_annotation_key = None
# Check that autoNumberKey is not empty (if necessary)...
if self.displayAdvancedSettings and self.autoNumber:
if self.autoNumberKey:
autoNumberKey = self.autoNumberKey
num_iterations = 2 * len(source['segmentation'])
else:
self.infoBox.setText(
u'Please enter an annotation key for auto-numbering.',
'warning'
)
self.send('Selected data', None, self)
self.send('Discarded data', None, self)
return
else:
autoNumberKey = None
num_iterations = len(source)
# Basic settings...
if self.displayAdvancedSettings:
copyAnnotations = self.copyAnnotations
else:
copyAnnotations = True
# Perform filtering...
self.infoBox.setText(u"Processing, please wait...", "warning")
self.controlArea.setDisabled(True)
progressBar = ProgressBar(
self,
iterations=num_iterations
)
(filtered_data, discarded_data) = Segmenter.intersect(
source=source,
source_annotation_key=source_annotation_key,
filtering=filtering,
filtering_annotation_key=filtering_annotation_key,
mode=self.mode.lower(),
label=self.captionTitle,
copy_annotations=self.copyAnnotations,
auto_number_as=autoNumberKey,
progress_callback=progressBar.advance,
)
progressBar.finish()
self.controlArea.setDisabled(False)
message = u'%i segment@p sent to output.' % len(filtered_data)
message = pluralize(message, len(filtered_data))
self.infoBox.setText(message)
self.send('Selected data', filtered_data, self)
self.send('Discarded data', discarded_data, self)
self.sendButton.resetSettingsChangedFlag()
def inputData(self, newItem, newId=None):
"""Process incoming data."""
self.closeContext()
updateMultipleInputs(
self.segmentations,
newItem,
newId,
self.onInputRemoval
)
self.infoBox.inputChanged()
self.updateGUI()
def onInputRemoval(self, index):
"""Handle removal of input with given index"""
if index < self.source:
self.source -= 1
elif index == self.source \
and self.source == len(self.segmentations) - 1:
self.source -= 1
if index < self.filtering:
self.filtering -= 1
elif index == self.filtering \
and self.filtering == len(self.segmentations) - 1:
self.filtering -= 1
def updateGUI(self):
"""Update GUI state"""
if self.displayAdvancedSettings:
sourceCombo = self.sourceCombo
filteringCombo = self.filteringCombo
intersectBox = self.intersectBox
else:
sourceCombo = self.basicSourceCombo
filteringCombo = self.basicFilteringCombo
intersectBox = self.basicIntersectBox
sourceCombo.clear()
self.sourceAnnotationCombo.clear()
self.sourceAnnotationCombo.addItem(u'(none)')
self.advancedSettings.setVisible(self.displayAdvancedSettings)
if len(self.segmentations) == 0:
self.source = -1
self.sourceAnnotationKey = u''
intersectBox.setDisabled(True)
return
else:
if len(self.segmentations) == 1:
self.source = 0
for segmentation in self.segmentations:
sourceCombo.addItem(segmentation[1].label)
self.source = max(self.source, 0)
sourceAnnotationKeys \
= self.segmentations[self.source][1].get_annotation_keys()
for k in sourceAnnotationKeys:
self.sourceAnnotationCombo.addItem(k)
if self.sourceAnnotationKey not in sourceAnnotationKeys:
self.sourceAnnotationKey = u'(none)'
self.sourceAnnotationKey = self.sourceAnnotationKey
intersectBox.setDisabled(False)
self.autoNumberKeyLineEdit.setDisabled(not self.autoNumber)
filteringCombo.clear()
for index in range(len(self.segmentations)):
filteringCombo.addItem(self.segmentations[index][1].label)
self.filtering = max(self.filtering, 0)
segmentation = self.segmentations[self.filtering]
if self.displayAdvancedSettings:
self.filteringAnnotationCombo.clear()
self.filteringAnnotationCombo.addItem(u'(none)')
filteringAnnotationKeys = segmentation[1].get_annotation_keys()
for key in filteringAnnotationKeys:
self.filteringAnnotationCombo.addItem(key)
if self.filteringAnnotationKey not in filteringAnnotationKeys:
self.filteringAnnotationKey = u'(none)'
self.filteringAnnotationKey = self.filteringAnnotationKey
def setCaption(self, title):
if 'captionTitle' in dir(self):
changed = title != self.captionTitle
super().setCaption(title)
if changed:
self.sendButton.settingsChanged()
else:
super().setCaption(title)
def handleNewSignals(self):
"""Overridden: called after multiple signals have been added"""
self.openContext(self.uuid, self.segmentations)
self.updateGUI()
self.sendButton.sendIf()
if __name__ == '__main__':
import sys
import re
from PyQt5.QtWidgets import QApplication
from LTTL.Input import Input
appl = QApplication(sys.argv)
ow = OWTextableIntersect()
seg1 = Input(u'hello world', 'text')
seg2 = Segmenter.tokenize(
seg1,
[
(re.compile(r'hello'), u'tokenize', {'tag': 'interj'}),
(re.compile(r'world'), u'tokenize', {'tag': 'noun'}),
],
label='words',
)
seg3 = Segmenter.tokenize(
seg2,
[(re.compile(r'[aeiou]'), u'tokenize')],
label='V'
)
seg4 = Segmenter.tokenize(
seg2,
[(re.compile(r'[hlwrdc]'), u'tokenize')],
label='C'
)
seg5 = Segmenter.tokenize(
seg2,
[(re.compile(r' '), u'tokenize')],
label='S'
)
seg6 = Segmenter.concatenate(
[seg3, seg4, seg5],
import_labels_as='category',
label='chars',
sort=True,
merge_duplicates=True,
)
seg7 = Segmenter.tokenize(
seg6,
[(re.compile(r'l'), u'tokenize')],
label='pivot'
)
ow.inputData(seg2, 1)
ow.inputData(seg6, 2)
ow.inputData(seg7, 3)
ow.show()
appl.exec_()
ow.saveSettings()
| gpl-3.0 | 1,970,531,423,506,143,500 | 34.790476 | 77 | 0.589835 | false | 4.546334 | false | false | false |
ucloud/uai-sdk | uaitrain/arch/tensorflow/uflag.py | 1 | 3122 | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
'''
UCloud Train basic flags
'''
import tensorflow as tf
flags = tf.app.flags
# =======================================================================
# Constant variables
# --work_dir=/data
# --data_dir=/data/data
# --output_dir=/data/output
#
# Note: Use this params as contant values
# Do not set this params !!!
# =======================================================================
'''
Default work dir. The working dir for the traing job, it will contains:
/data/data --data_dir
/data/output --output_dir
Note: DO NOT CHANGE THIS VALUE
UCloud Train Job Executor Will Set it Automatically
'''
flags.DEFINE_string("work_dir", "/data", "Default work path")
'''
Default data path used in Training, all data will be downloaded into this path
Please use data in this path as input for Training
Note: DO NOT CHANGE THIS VALUE
UCloud Train Job Executor Will Set it Automatically
'''
flags.DEFINE_string("data_dir", "/data/data", "Default data path")
'''
Default output path used in Training, files in this path will be uploaded to UFile
after training finished.
You can also assume your checkpoint files inside output_path (If you provided
in the UCloud console), files will also be downloaded into this path befor
Training start
Note: DO NOT CHANGE THIS VALUE
UCloud Train Job Executor Will Set it Automatically
'''
flags.DEFINE_string("output_dir", "/data/output", "Default output path")
'''
Default tensorboard output path used in Training, iles in this path will be uploaded to UFile
after training finished.
This dir is same as output_dir
Note: DO NOT CHANGE THIS VALUE
UCloud Train Job Executor Will Set it Automatically
'''
flags.DEFINE_string("log_dir", "/data/output", "Default log path")
'''
Define num_gpus for training
Note: DO NOT CHANGE THIS VALUE
UCloud Train Job Executor Will Set it Automatically
'''
flags.DEFINE_integer("num_gpus", 0, "Num of avaliable gpus")
# =======================================================================
# Usable variables
# --max_step=<int>
#
# Note: You can SET and USE these params
# UCloud may use these params as guidance for training projects
# =======================================================================
'''
You can use this param to transfer the max_step value
Note: You can use it as your wish
'''
flags.DEFINE_integer("max_step", 0, "Max Step")
| apache-2.0 | 3,565,311,756,186,062,000 | 31.863158 | 95 | 0.633568 | false | 4.107895 | false | false | false |
campagnola/neuroanalysis | tools/import_spike_detection.py | 1 | 3323 | """Script used to generate evoked spike test data
Usage: python -i import_spike_detection.py expt_id cell_id
This will load all spikes evoked in the specified cell one at a time.
For each one you can select whether to write the data out to a new test file.
Note that files are saved without results; to generate these, you must run
unit tests with --audit.
"""
import pickle, sys
import numpy as np
from scipy.optimize import curve_fit
from neuroanalysis.spike_detection import detect_evoked_spikes, SpikeDetectTestCase
from neuroanalysis.ui.spike_detection import SpikeDetectUI
from neuroanalysis.data import TSeries, TSeriesList, PatchClampRecording
from multipatch_analysis.database import default_db as db
from multipatch_analysis.data import Analyzer, PulseStimAnalyzer, MultiPatchProbe
import pyqtgraph as pg
pg.dbg() # for inspecting exception stack
expt_id = float(sys.argv[1])
cell_id = int(sys.argv[2])
ui = SpikeDetectUI()
skip_btn = pg.QtGui.QPushButton('skip')
ui.widget.addWidget(skip_btn)
save_btn = pg.QtGui.QPushButton('save')
ui.widget.addWidget(save_btn)
session = db.session()
def iter_pulses():
"""Generator that yields all selected pulses one at a time.
"""
# look up experiment from database and load the NWB file
expt = db.experiment_from_timestamp(expt_id)
cell = expt.cells[cell_id]
channel = cell.electrode.device_id
sweeps = expt.data.contents
for sweep in sweeps:
# Ignore sweep if it doesn't have the requested channel, or the correct stimulus
try:
pre_rec = sweep[channel]
except KeyError:
continue
if not isinstance(pre_rec, MultiPatchProbe):
continue
print("sweep: %d channel: %d" % (sweep.key, channel))
# Get chunks for each stim pulse
pulse_stim = PulseStimAnalyzer.get(pre_rec)
chunks = pulse_stim.pulse_chunks()
for chunk in chunks:
yield (expt_id, cell_id, sweep, channel, chunk)
all_pulses = iter_pulses()
last_result = None
def load_next():
global all_pulses, ui, last_result
try:
(expt_id, cell_id, sweep, channel, chunk) = next(all_pulses)
except StopIteration:
ui.widget.hide()
return
# run spike detection on each chunk
pulse_edges = chunk.meta['pulse_edges']
spikes = detect_evoked_spikes(chunk, pulse_edges, ui=ui)
ui.show_result(spikes)
# copy just the necessary parts of recording data for export to file
export_chunk = PatchClampRecording(channels={k:TSeries(chunk[k].data, t0=chunk[k].t0, sample_rate=chunk[k].sample_rate) for k in chunk.channels})
export_chunk.meta.update(chunk.meta)
# construct test case
tc = SpikeDetectTestCase()
tc._meta = {
'expt_id': expt_id,
'cell_id': cell_id,
'device_id': channel,
'sweep_id': sweep.key,
}
tc._input_args = {
'data': export_chunk,
'pulse_edges': chunk.meta['pulse_edges'],
}
last_result = tc
def save_and_load_next():
global last_result
# write results out to test file
test_file = 'test_data/evoked_spikes/%s.pkl' % (last_result.name)
last_result.save_file(test_file)
load_next()
skip_btn.clicked.connect(load_next)
save_btn.clicked.connect(save_and_load_next)
load_next()
| mit | -9,213,058,678,356,565,000 | 29.209091 | 149 | 0.681011 | false | 3.38736 | true | false | false |
jeffh/describe | describe/mock/stub.py | 1 | 5002 | from describe.mock.utils import TWO_OPS_FULL, ONE_OPS, NIL
from describe.mock.expectations import Invoke, ExpectationList, ExpectationSet, Expectation
from describe.mock.mock import Mock
from describe.utils import Replace
def stub(*args, **attributes):
if args:
stub = Stub(args[0])
else:
stub = Stub()
for key, value in attributes.items():
setattr(stub, key, value)
return stub
def stub_attr(obj, key, value=NIL):
if value is NIL:
value = stub()
return Replace(obj, key, value)
class StubErrorDelegate(object):
def __init__(self):
self.instance = None
self.items = {}
def _new_stub(self, attrname):
new_stub = self.instance.__class__()
setattr(self.instance, attrname, new_stub)
return new_stub
def no_expectations(self, expectations, sender, attrname, args, kwargs):
recent_history = reversed(expectations.history)
for expect in recent_history:
try:
return expectations.validate_expectation(expect, sender, attrname, args, kwargs)
except (ExpectationList.FailedToSatisfyArgumentsError, ExpectationList.FailedToSatisfyAttrnameError):
pass
return self._new_stub(attrname)
def fails_to_satisfy_attrname(self, expectations, sender, attrname, args, kwargs, expectation):
return self._new_stub(attrname)
def fails_to_satisfy_arguments(self, expectations, sender, attrname, args, kwargs, expectation):
return self._new_stub(attrname)
class Stub(Mock):
def __init__(self, name='Stub'):
delegate = StubErrorDelegate()
super(self.__class__, self).__init__(name=name, error_delegate=delegate)
delegate.instance = self
# saved for reference
if 0:
IGNORE_LIST = set((
'_Stub__attributes', '_Stub__magic', '_Stub__items', '__class__', '_create_magic_method',
# 'expects'
))
def process(dictionary, name, cls):
if name not in dictionary:
dictionary[name] = cls()
if isinstance(dictionary[name], Invoke):
return dictionary[name]()
return dictionary[name]
class Stub(object):
"""Stubs are objects that can stand-in for any other object. It simply returns more stubs when
accessed or invoked.
This is used for testing functionality that doesn't particularly care about the objects they
are manipulating, (ie - a function that splits an array in half doesn't care about what kinds
of elements are in there)
"""
def __init__(self, **attributes):
self.__attributes = attributes
self.__items = {}
self.__magic = {}
@classmethod
def attr(cls, obj, name, value=NIL):
return StubAttr(obj, name, getattr(obj, name, NIL), value).replace()
# @property
# def expects(self):
# raise TypeError('reserved for API')
def __getattribute__(self, name):
if name in IGNORE_LIST:
return super(Stub, self).__getattribute__(name)
return process(self.__attributes, name, self.__class__)
def __setattr__(self, name, value):
if name in IGNORE_LIST:
return super(Stub, self).__setattr__(name, value)
self.__attributes[name] = value
def __getitem__(self, name):
return self.__items.get(name, None)
def __setitem__(self, name, value):
self.__items[name] = value
def __call__(self, *args, **kwargs):
full_name = '__call__'
return process(self.__magic, full_name, self.__class__)
def _create_magic_method(name):
full_name = '__%s__' % name
def getter(self):
return process(self.__magic, full_name, self.__class__)
getter.__name__ = full_name
return property(getter)
for op in TWO_OPS_FULL + ONE_OPS:
exec('__%s__ = _create_magic_method(%r)' % (op, op))
class StubAttr(object):
"Manages the lifetime of a stub on an attribute."
def __init__(self, obj, name, orig_value, new_value):
self.obj, self.name, self.orig_value, self.new_value = obj, name, orig_value, new_value
@property
def stub(self):
return self.new_value
def replace(self):
if self.new_value is NIL:
self.new_value = Stub()
setattr(self.obj, self.name, self.new_value)
return self
def restore(self):
if self.orig_value is NIL:
delattr(self.obj, self.name)
else:
setattr(self.obj, self.name, self.orig_value)
return self
def __enter__(self):
return self.replace().stub
def __exit__(self, type, info, tb):
self.restore()
def __del__(self):
self.restore()
| mit | 5,373,466,267,676,072,000 | 32.346667 | 113 | 0.579568 | false | 4.182274 | false | false | false |
fingeronthebutton/Selenium2Library | src/Selenium2Library/locators/windowmanager.py | 7 | 5724 | from types import *
from robot import utils
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.common.exceptions import NoSuchWindowException
class WindowManager(object):
def __init__(self):
self._strategies = {
'title': self._select_by_title,
'name': self._select_by_name,
'url': self._select_by_url,
None: self._select_by_default
}
def get_window_ids(self, browser):
return [ window_info[1] for window_info in self._get_window_infos(browser) ]
def get_window_names(self, browser):
return [ window_info[2] for window_info in self._get_window_infos(browser) ]
def get_window_titles(self, browser):
return [ window_info[3] for window_info in self._get_window_infos(browser) ]
def select(self, browser, locator):
assert browser is not None
if locator is not None:
if isinstance(locator, list):
self._select_by_excludes(browser, locator)
return
if locator.lower() == "self" or locator.lower() == "current":
return
if locator.lower() == "new" or locator.lower() == "popup":
self._select_by_last_index(browser)
return
(prefix, criteria) = self._parse_locator(locator)
strategy = self._strategies.get(prefix)
if strategy is None:
raise ValueError("Window locator with prefix '" + prefix + "' is not supported")
return strategy(browser, criteria)
# Strategy routines, private
def _select_by_title(self, browser, criteria):
self._select_matching(
browser,
lambda window_info: window_info[3].strip().lower() == criteria.lower(),
"Unable to locate window with title '" + criteria + "'")
def _select_by_name(self, browser, criteria):
self._select_matching(
browser,
lambda window_info: window_info[2].strip().lower() == criteria.lower(),
"Unable to locate window with name '" + criteria + "'")
def _select_by_url(self, browser, criteria):
self._select_matching(
browser,
lambda window_info: window_info[4].strip().lower() == criteria.lower(),
"Unable to locate window with URL '" + criteria + "'")
def _select_by_default(self, browser, criteria):
if criteria is None or len(criteria) == 0 or criteria.lower() == "null":
handles = browser.get_window_handles()
browser.switch_to_window(handles[0])
return
try:
starting_handle = browser.get_current_window_handle()
except NoSuchWindowException:
starting_handle = None
for handle in browser.get_window_handles():
browser.switch_to_window(handle)
if criteria == handle:
return
for item in browser.get_current_window_info()[2:4]:
if item.strip().lower() == criteria.lower():
return
if starting_handle:
browser.switch_to_window(starting_handle)
raise ValueError("Unable to locate window with handle or name or title or URL '" + criteria + "'")
def _select_by_last_index(self, browser):
handles = browser.get_window_handles()
try:
if handles[-1] == browser.get_current_window_handle():
raise AssertionError("No new window at last index. Please use '@{ex}= | List Windows' + new window trigger + 'Select Window | ${ex}' to find it.")
except IndexError:
raise AssertionError("No window found")
except NoSuchWindowException:
raise AssertionError("Currently no focus window. where are you making a popup window?")
browser.switch_to_window(handles[-1])
def _select_by_excludes(self, browser, excludes):
for handle in browser.get_window_handles():
if handle not in excludes:
browser.switch_to_window(handle)
return
raise ValueError("Unable to locate new window")
# Private
def _parse_locator(self, locator):
prefix = None
criteria = locator
if locator is not None and len(locator) > 0:
locator_parts = locator.partition('=')
if len(locator_parts[1]) > 0:
prefix = locator_parts[0].strip().lower()
criteria = locator_parts[2].strip()
if prefix is None or prefix == 'name':
if criteria is None or criteria.lower() == 'main':
criteria = ''
return (prefix, criteria)
def _get_window_infos(self, browser):
window_infos = []
try:
starting_handle = browser.get_current_window_handle()
except NoSuchWindowException:
starting_handle = None
try:
for handle in browser.get_window_handles():
browser.switch_to_window(handle)
window_infos.append(browser.get_current_window_info())
finally:
if starting_handle:
browser.switch_to_window(starting_handle)
return window_infos
def _select_matching(self, browser, matcher, error):
try:
starting_handle = browser.get_current_window_handle()
except NoSuchWindowException:
starting_handle = None
for handle in browser.get_window_handles():
browser.switch_to_window(handle)
if matcher(browser.get_current_window_info()):
return
if starting_handle:
browser.switch_to_window(starting_handle)
raise ValueError(error)
| apache-2.0 | 8,729,557,066,157,296,000 | 39.595745 | 162 | 0.587352 | false | 4.430341 | false | false | false |
EnstaBretagneClubRobo/enstaB-ros | ai_mapping_robot/scripts/waiter_subscriber.py | 1 | 3838 | #!/usr/bin/env python
import rospy
import smach
import smach_ros
import time
from std_msgs.msg import Empty,String,Int8
from gps_handler.srv import *
from ai_mapping_robot.msg import ErrorMessage
from ai_mapping_robot.msg import InitData
import tf.transformations as trans
from math import *
from pwm_serial_py.srv import Over_int
############### wait Init Data ##############################
def initDataCallback(msg):
global waitInitDataMsg
global initDataMsg
initDataMsg = msg
waitInitDataMsg = 0
def waitForInitData(time):
global waitInitDataMsg,initDataMsg
start = rospy.get_time()
waitInitDataMsg = 1
rospy.loginfo("wait InitData ...")
s = rospy.Subscriber('init_data',InitData,initDataCallback)
while waitInitDataMsg and rospy.get_time()-start < time:
rospy.sleep(1.0/20.0)
s.unregister()
if not waitInitDataMsg:
return initDataMsg
else:
return 'Error'
#################Wait GPS List of waypoints ###################
def casesCallback(msg):
global waitGPSData
global lastGPS,t1
t1 = msg.data.split('\n')
lastGPS = t1[:-1].split(";")
waitGPSData = 0
def waitForGPSData(AI,time):
global waitGPSData,lastGPS,t1
start = rospy.get_time()
waitGPSData = 1
s = rospy.Subscriber('gps_string',String,casesCallback)
rospy.loginfo("wait GPSData ...")
while waitGPSData and rospy.get_time()-start < time:
if AI.preempt_requested():
rospy.loginfo("Go building GPS is being preempted")
AI.service_preempt()
return 'preempted'
rospy.sleep(1.0/20.0)
s.unregister()
if not waitGPSData:
return (lastGPS,t1)
else:
return 'Error'
################ Entry Init ######################################
def findHeading(listener,cmdPublisher,heading):
(r,p,yaw)=(0,0,0)
try:
(trans1,rot1) = listener.lookupTransform("local_origin", "fcu", rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
(r,p,yaw) = trans.euler_from_quaternion(rot1)
e = heading-yaw;
#insert lidar data
while abs(e)>0.1:
try:
(trans1,rot1) = listener.lookupTransform("local_origin", "fcu", rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.loginfo("echec local_origin fcu")
(r,p,yaw) = trans.euler_from_quaternion(rot1)
err= heading-yaw
u=20*(2/PI)*atan(tan(err/2));#atan for modulo 2*pi*/
sendCommand(1500,1500+u);
sendCommand(1500,1500);
def sendCommand(channelSpeed,channelYaw):
try:
send_pwm = rospy.ServiceProxy('/pwm_serial_send',Over_int)
resp1 = send_pwm([channelSpeed,0,channelYaw,0,0,0,0,0])
return resp1.result
except rospy.ServiceException, e:
print "Service call failed : %s"%e
def dataCallback(msg):
global waitDataMsg
waitDataMsg = 0
def waitForRemote(time):
global waitDataMsg
start = rospy.get_time()
waitDataMsg = 1
rospy.loginfo("wait For Remote ...")
s = rospy.Subscriber('/restart_msg',Int8,dataCallback)
while waitDataMsg and rospy.get_time()-start < time:
rospy.sleep(1.0/20.0)
s.unregister()
return not waitDataMsg
############ Restart #########################
def remoteGoCallback(msg):
global waitRestartMsg,message
waitRestartMsg = 0
message = msg
def waitForRemoteGo(time):
global waitRestartMsg,message
message = Int8(0)
start = rospy.get_time()
waitRestartMsg = 1
rospy.loginfo("wait RemoteGo ...")
s = rospy.Subscriber('/restart_msg',Int8,remoteGoCallback)
while waitRestartMsg and rospy.get_time()-start < time:
rospy.sleep(1.0/20.0)
s.unregister()
return message
| gpl-3.0 | -3,359,531,243,991,592,000 | 28.523077 | 89 | 0.634706 | false | 3.460775 | false | false | false |
puentesarrin/pymongolab | setup.py | 1 | 1854 | # -*- coding: utf-8 *-*
import os
import sys
import subprocess
try:
from setuptools import setup
except ImportError:
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup
from distutils.cmd import Command
version = "1.2.+"
class doc(Command):
description = "generate documentation"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
path = "doc/build/%s" % version
try:
os.makedirs(path)
except:
pass
status = subprocess.call(["sphinx-build", "-E", "-b", "html",
"doc", path])
if status:
raise RuntimeError("documentation step '%s' failed" % ("html",))
sys.stdout.write("\nDocumentation step '%s' performed, results here:\n"
" %s/\n" % ("html", path))
f = open("README.rst")
try:
try:
readme_content = f.read()
except:
readme_content = ""
finally:
f.close()
setup(
name="pymongolab",
version=version,
description="PyMongoLab is a client library for MongoLab REST API.",
long_description=readme_content,
author=u"Jorge Puente Sarrín",
author_email="[email protected]",
url="http://pymongolab.puentesarr.in",
packages=['mongolabclient', 'pymongolab'],
keywords=["mongolab", "pymongolab", "mongolabclient", "mongo", "mongodb"],
install_requires=["pymongo", "requests"],
license="Apache License, Version 2.0",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Database"],
cmdclass={"doc": doc},
)
| apache-2.0 | -2,752,534,590,338,345,000 | 25.471429 | 79 | 0.606044 | false | 3.884696 | false | false | false |
cloudtools/troposphere | troposphere/iotsitewise.py | 1 | 5261 | # Copyright (c) 2012-2021, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 31.0.0
from troposphere import Tags
from . import AWSObject, AWSProperty
class IamRole(AWSProperty):
props = {
"arn": (str, False),
}
class IamUser(AWSProperty):
props = {
"arn": (str, False),
}
class User(AWSProperty):
props = {
"id": (str, False),
}
class AccessPolicyIdentity(AWSProperty):
props = {
"IamRole": (IamRole, False),
"IamUser": (IamUser, False),
"User": (User, False),
}
class PortalProperty(AWSProperty):
props = {
"id": (str, False),
}
class Project(AWSProperty):
props = {
"id": (str, False),
}
class AccessPolicyResource(AWSProperty):
props = {
"Portal": (PortalProperty, False),
"Project": (Project, False),
}
class AccessPolicy(AWSObject):
resource_type = "AWS::IoTSiteWise::AccessPolicy"
props = {
"AccessPolicyIdentity": (AccessPolicyIdentity, True),
"AccessPolicyPermission": (str, True),
"AccessPolicyResource": (AccessPolicyResource, True),
}
class AssetHierarchy(AWSProperty):
props = {
"ChildAssetId": (str, True),
"LogicalId": (str, True),
}
class AssetProperty(AWSProperty):
props = {
"Alias": (str, False),
"LogicalId": (str, True),
"NotificationState": (str, False),
}
class Asset(AWSObject):
resource_type = "AWS::IoTSiteWise::Asset"
props = {
"AssetHierarchies": ([AssetHierarchy], False),
"AssetModelId": (str, True),
"AssetName": (str, True),
"AssetProperties": ([AssetProperty], False),
"Tags": (Tags, False),
}
class Attribute(AWSProperty):
props = {
"DefaultValue": (str, False),
}
class VariableValue(AWSProperty):
props = {
"HierarchyLogicalId": (str, False),
"PropertyLogicalId": (str, True),
}
class ExpressionVariable(AWSProperty):
props = {
"Name": (str, True),
"Value": (VariableValue, True),
}
class TumblingWindow(AWSProperty):
props = {
"Interval": (str, True),
}
class MetricWindow(AWSProperty):
props = {
"Tumbling": (TumblingWindow, False),
}
class Metric(AWSProperty):
props = {
"Expression": (str, True),
"Variables": ([ExpressionVariable], True),
"Window": (MetricWindow, True),
}
class Transform(AWSProperty):
props = {
"Expression": (str, True),
"Variables": ([ExpressionVariable], True),
}
class PropertyType(AWSProperty):
props = {
"Attribute": (Attribute, False),
"Metric": (Metric, False),
"Transform": (Transform, False),
"TypeName": (str, True),
}
class AssetModelProperty(AWSProperty):
props = {
"DataType": (str, True),
"DataTypeSpec": (str, False),
"LogicalId": (str, True),
"Name": (str, True),
"Type": (PropertyType, True),
"Unit": (str, False),
}
class AssetModelCompositeModel(AWSProperty):
props = {
"CompositeModelProperties": ([AssetModelProperty], False),
"Description": (str, False),
"Name": (str, True),
"Type": (str, True),
}
class AssetModelHierarchy(AWSProperty):
props = {
"ChildAssetModelId": (str, True),
"LogicalId": (str, True),
"Name": (str, True),
}
class AssetModel(AWSObject):
resource_type = "AWS::IoTSiteWise::AssetModel"
props = {
"AssetModelCompositeModels": ([AssetModelCompositeModel], False),
"AssetModelDescription": (str, False),
"AssetModelHierarchies": ([AssetModelHierarchy], False),
"AssetModelName": (str, True),
"AssetModelProperties": ([AssetModelProperty], False),
"Tags": (Tags, False),
}
class Dashboard(AWSObject):
resource_type = "AWS::IoTSiteWise::Dashboard"
props = {
"DashboardDefinition": (str, True),
"DashboardDescription": (str, True),
"DashboardName": (str, True),
"ProjectId": (str, False),
"Tags": (Tags, False),
}
class GatewayCapabilitySummary(AWSProperty):
props = {
"CapabilityConfiguration": (str, False),
"CapabilityNamespace": (str, True),
}
class Greengrass(AWSProperty):
props = {
"GroupArn": (str, True),
}
class GatewayPlatform(AWSProperty):
props = {
"Greengrass": (Greengrass, True),
}
class Gateway(AWSObject):
resource_type = "AWS::IoTSiteWise::Gateway"
props = {
"GatewayCapabilitySummaries": ([GatewayCapabilitySummary], False),
"GatewayName": (str, True),
"GatewayPlatform": (GatewayPlatform, True),
"Tags": (Tags, False),
}
class Portal(AWSObject):
resource_type = "AWS::IoTSiteWise::Portal"
props = {
"PortalAuthMode": (str, False),
"PortalContactEmail": (str, True),
"PortalDescription": (str, False),
"PortalName": (str, True),
"RoleArn": (str, True),
"Tags": (Tags, False),
}
| bsd-2-clause | 3,051,025,688,730,493,000 | 20.386179 | 74 | 0.578977 | false | 3.712773 | false | false | false |
ohmu/pglookout | test/conftest.py | 1 | 4623 | """
pglookout - test configuration
Copyright (c) 2016 Ohmu Ltd
See LICENSE for details
"""
from pglookout import logutil, pgutil
from pglookout.pglookout import PgLookout
from py import path as py_path # pylint: disable=no-name-in-module
from unittest.mock import Mock
import os
import pytest
import signal
import subprocess
import tempfile
import time
PG_VERSIONS = ["13", "12", "11", "10", "9.6", "9.5", "9.4", "9.3", "9.2"]
logutil.configure_logging()
@pytest.yield_fixture
def pgl():
pgl_ = PgLookout("pglookout.json")
pgl_.config["remote_conns"] = {}
pgl_.check_for_maintenance_mode_file = Mock()
pgl_.check_for_maintenance_mode_file.return_value = False
pgl_.cluster_monitor._connect_to_db = Mock() # pylint: disable=protected-access
pgl_.create_alert_file = Mock()
pgl_.execute_external_command = Mock()
try:
yield pgl_
finally:
pgl_.quit()
class TestPG:
def __init__(self, pgdata):
self.pgbin = self.find_pgbin()
self.pgdata = pgdata
self.pg = None
@staticmethod
def find_pgbin(versions=None):
pathformats = ["/usr/pgsql-{ver}/bin", "/usr/lib/postgresql/{ver}/bin"]
for ver in versions or PG_VERSIONS:
for pathfmt in pathformats:
pgbin = pathfmt.format(ver=ver)
if os.path.exists(pgbin):
return pgbin
return "/usr/bin"
@property
def pgver(self):
with open(os.path.join(self.pgdata, "PG_VERSION"), "r") as fp:
return fp.read().strip()
def connection_string(self, user="testuser", dbname="postgres"):
return pgutil.create_connection_string({
"dbname": dbname,
"host": self.pgdata,
"port": 5432,
"user": user,
})
def createuser(self, user="testuser"):
self.run_cmd("createuser", "-h", self.pgdata, "-p", "5432", "-s", user)
def run_cmd(self, cmd, *args):
argv = [os.path.join(self.pgbin, cmd)]
argv.extend(args)
subprocess.check_call(argv)
def run_pg(self):
self.pg = subprocess.Popen([
os.path.join(self.pgbin, "postgres"),
"-D", self.pgdata, "-k", self.pgdata,
"-p", "5432", "-c", "listen_addresses=",
])
time.sleep(1.0) # let pg start
def kill(self, force=True, immediate=True):
if self.pg is None:
return
if force:
os.kill(self.pg.pid, signal.SIGKILL)
elif immediate:
os.kill(self.pg.pid, signal.SIGQUIT)
else:
os.kill(self.pg.pid, signal.SIGTERM)
timeout = time.monotonic() + 10
while (self.pg.poll() is None) and (time.monotonic() < timeout):
time.sleep(0.1)
if not force and self.pg.poll() is None:
raise Exception("PG pid {} not dead".format(self.pg.pid))
# NOTE: cannot use 'tmpdir' fixture here, it only works in 'function' scope
@pytest.yield_fixture(scope="session")
def db():
tmpdir_obj = py_path.local(tempfile.mkdtemp(prefix="pglookout_dbtest_"))
tmpdir = str(tmpdir_obj)
# try to find the binaries for these versions in some path
pgdata = os.path.join(tmpdir, "pgdata")
db = TestPG(pgdata) # pylint: disable=redefined-outer-name
db.run_cmd("initdb", "-D", pgdata, "--encoding", "utf-8")
# NOTE: point $HOME to tmpdir - $HOME shouldn't affect most tests, but
# psql triest to find .pgpass file from there as do our functions that
# manipulate pgpass. By pointing $HOME there we make sure we're not
# making persistent changes to the environment.
os.environ["HOME"] = tmpdir
# allow replication connections
with open(os.path.join(pgdata, "pg_hba.conf"), "w") as fp:
fp.write(
"local all all trust\n"
"local replication all trust\n"
)
with open(os.path.join(pgdata, "postgresql.conf"), "a") as fp:
fp.write(
"max_wal_senders = 2\n"
"wal_level = archive\n"
# disable fsync and synchronous_commit to speed up the tests a bit
"fsync = off\n"
"synchronous_commit = off\n"
# don't need to wait for autovacuum workers when shutting down
"autovacuum = off\n"
)
if db.pgver < "13":
fp.write("wal_keep_segments = 100\n")
db.run_pg()
try:
db.createuser()
db.createuser("otheruser")
yield db
finally:
db.kill()
try:
tmpdir_obj.remove(rec=1)
except: # pylint: disable=bare-except
pass
| apache-2.0 | -888,965,786,231,963,300 | 31.328671 | 84 | 0.587497 | false | 3.462921 | true | false | false |
rombie/contrail-controller | src/vnsw/opencontrail-vrouter-netns/opencontrail_vrouter_netns/cert_mgr/tls.py | 3 | 1366 | import os
from OpenSSL import crypto
class TLS:
def __init__(self, id=None, certificate=None, private_key=None,
passphrase=None, intermediates=None, primary_cn=None):
self.id = id
self.certificate = certificate
self.private_key = private_key
self.passphrase = passphrase
self.intermediates = intermediates
self.primary_cn = primary_cn
def build_pem(self):
pem = ()
if self.intermediates:
for c in self.intermediates:
pem = pem + (c,)
if self.certificate:
pem = pem + (self.certificate,)
if self.private_key:
pem = pem + (self.private_key,)
pem = "\n".join(pem)
return pem
@staticmethod
def get_primary_cn(certificate):
cert = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)
subject = cert.get_subject()
issued_to = subject.CN
issuer = cert.get_issuer()
issued_by = issuer.CN
return issued_to
def create_pem_file(self, dest_dir):
if self is None:
return None
pem = self.build_pem()
pem_file_name = dest_dir + '/'+ self.primary_cn + '.pem'
f = open(pem_file_name, 'w')
f.write(pem)
f.close()
os.chmod(pem_file_name, 0600)
return pem_file_name
| apache-2.0 | -7,272,465,427,452,684,000 | 28.695652 | 72 | 0.564422 | false | 3.672043 | false | false | false |
baroquebobcat/pants | src/python/pants/backend/codegen/thrift/lib/apache_thrift_gen_base.py | 2 | 5609 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import shutil
from twitter.common.collections import OrderedSet
from pants.backend.codegen.thrift.lib.thrift import Thrift
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.option.custom_types import target_option
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.memo import memoized_property
from pants.util.process_handler import subprocess
class ApacheThriftGenBase(SimpleCodegenTask):
# The name of the thrift generator to use. Subclasses must set.
# E.g., java, py (see `thrift -help` for all available generators).
thrift_generator = None
# Subclasses may set their own default generator options.
default_gen_options_map = None
@classmethod
def register_options(cls, register):
super(ApacheThriftGenBase, cls).register_options(register)
# NB: As of thrift 0.9.2 there is 1 warning that -strict promotes to an error - missing a
# struct field id. If an artifact was cached with strict off, we must re-gen with strict on
# since this case may be present and need to generate a thrift compile error.
register('--strict', default=True, fingerprint=True, type=bool,
help='Run thrift compiler with strict warnings.')
# The old --gen-options was string-typed, so we keep it that way for backwards compatibility,
# and reluctantly use the clunky name --gen-options-map for the new, map-typed options.
# TODO: Do a deprecation cycle to restore the old name.
register('--gen-options-map', type=dict, advanced=True, fingerprint=True,
default=cls.default_gen_options_map,
help='Use these options for the {} generator.'.format(cls.thrift_generator))
register('--deps', advanced=True, type=list, member_type=target_option,
help='A list of specs pointing to dependencies of thrift generated code.')
register('--service-deps', advanced=True, type=list, member_type=target_option,
help='A list of specs pointing to dependencies of thrift generated service '
'code. If not supplied, then --deps will be used for service deps.')
@classmethod
def subsystem_dependencies(cls):
return super(ApacheThriftGenBase, cls).subsystem_dependencies() + (Thrift.scoped(cls),)
def synthetic_target_extra_dependencies(self, target, target_workdir):
for source in target.sources_relative_to_buildroot():
if self._declares_service(os.path.join(get_buildroot(), source)):
return self._service_deps
return self._deps
def execute_codegen(self, target, target_workdir):
target_cmd = self._thrift_cmd[:]
bases = OrderedSet(tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt))
for base in bases:
target_cmd.extend(('-I', base))
if hasattr(target, 'compiler_args'):
target_cmd.extend(list(target.compiler_args or []))
target_cmd.extend(('-o', target_workdir))
for source in target.sources_relative_to_buildroot():
cmd = target_cmd[:]
cmd.append(os.path.join(get_buildroot(), source))
with self.context.new_workunit(name=source,
labels=[WorkUnitLabel.TOOL],
cmd=' '.join(cmd)) as workunit:
result = subprocess.call(cmd,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'))
if result != 0:
raise TaskError('{} ... exited non-zero ({})'.format(self._thrift_binary, result))
# The thrift compiler generates sources to a gen-[lang] subdir of the `-o` argument. We
# relocate the generated sources to the root of the `target_workdir` so that our base class
# maps them properly.
gen_dir = os.path.join(target_workdir, 'gen-{}'.format(self.thrift_generator))
for path in os.listdir(gen_dir):
shutil.move(os.path.join(gen_dir, path), target_workdir)
os.rmdir(gen_dir)
@memoized_property
def _thrift_binary(self):
return Thrift.scoped_instance(self).select(context=self.context)
@memoized_property
def _deps(self):
deps = self.get_options().deps
return list(self.resolve_deps(deps))
@memoized_property
def _service_deps(self):
service_deps = self.get_options().service_deps
return list(self.resolve_deps(service_deps)) if service_deps else self._deps
SERVICE_PARSER = re.compile(r'^\s*service\s+(?:[^\s{]+)')
def _declares_service(self, source):
with open(source) as thrift:
return any(line for line in thrift if self.SERVICE_PARSER.search(line))
@memoized_property
def _thrift_cmd(self):
cmd = [self._thrift_binary]
def opt_str(item):
return item[0] if not item[1] else '{}={}'.format(*item)
gen_opts_map = self.get_options().gen_options_map or {}
gen_opts = [opt_str(item) for item in gen_opts_map.items()]
generator_spec = ('{}:{}'.format(self.thrift_generator, ','.join(gen_opts)) if gen_opts
else self.thrift_generator)
cmd.extend(('--gen', generator_spec))
if self.get_options().strict:
cmd.append('-strict')
if self.get_options().level == 'debug':
cmd.append('-verbose')
return cmd
| apache-2.0 | 6,981,577,605,815,294,000 | 41.172932 | 97 | 0.679444 | false | 3.873619 | false | false | false |
edx/edxanalytics | src/edxanalytics/edxanalytics/mitx_settings.py | 1 | 2896 | import sys
import os
from path import path
XQUEUE_INTERFACE = {
"url": "http://127.0.0.1:3032",
"django_auth": {
"username": "lms",
"password": "abcd"
},
"basic_auth": ('anant', 'agarwal'),
}
MITX_FEATURES = {
'SAMPLE': False,
'USE_DJANGO_PIPELINE': True,
'DISPLAY_HISTOGRAMS_TO_STAFF': True,
'REROUTE_ACTIVATION_EMAIL': False, # nonempty string = address for all activation emails
'DEBUG_LEVEL': 0, # 0 = lowest level, least verbose, 255 = max level, most verbose
## DO NOT SET TO True IN THIS FILE
## Doing so will cause all courses to be released on production
'DISABLE_START_DATES': False, # When True, all courses will be active, regardless of start date
# When True, will only publicly list courses by the subdomain. Expects you
# to define COURSE_LISTINGS, a dictionary mapping subdomains to lists of
# course_ids (see dev_int.py for an example)
'SUBDOMAIN_COURSE_LISTINGS': False,
# When True, will override certain branding with university specific values
# Expects a SUBDOMAIN_BRANDING dictionary that maps the subdomain to the
# university to use for branding purposes
'SUBDOMAIN_BRANDING': False,
'FORCE_UNIVERSITY_DOMAIN': False, # set this to the university domain to use, as an override to HTTP_HOST
# set to None to do no university selection
'ENABLE_TEXTBOOK': True,
'ENABLE_DISCUSSION_SERVICE': True,
'ENABLE_PSYCHOMETRICS': False, # real-time psychometrics (eg item response theory analysis in instructor dashboard)
'ENABLE_SQL_TRACKING_LOGS': False,
'ENABLE_LMS_MIGRATION': False,
'ENABLE_MANUAL_GIT_RELOAD': False,
'DISABLE_LOGIN_BUTTON': False, # used in systems where login is automatic, eg MIT SSL
'STUB_VIDEO_FOR_TESTING': False, # do not display video when running automated acceptance tests
# extrernal access methods
'ACCESS_REQUIRE_STAFF_FOR_COURSE': False,
'AUTH_USE_OPENID': False,
'AUTH_USE_MIT_CERTIFICATES': False,
'AUTH_USE_OPENID_PROVIDER': False,
# analytics experiments
'ENABLE_INSTRUCTOR_ANALYTICS': False,
# Flip to True when the YouTube iframe API breaks (again)
'USE_YOUTUBE_OBJECT_API': False,
# Give a UI to show a student's submission history in a problem by the
# Staff Debug tool.
'ENABLE_STUDENT_HISTORY_VIEW': True
}
############################# SET PATH INFORMATION #############################
ENV_ROOT = os.path.abspath(os.path.join(__file__, "..", "..", "..", "..", ".."))
COURSES_ROOT = "{0}/{1}".format(ENV_ROOT,"data")
DATA_DIR = COURSES_ROOT
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': DATA_DIR,
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
}
}
}
GENERATE_PROFILE_SCORES = False
| agpl-3.0 | -8,380,245,411,244,201,000 | 32.287356 | 121 | 0.651243 | false | 3.544676 | false | false | false |
ampammam34/ActroidKinematics | ActroidKinematics.py | 1 | 8277 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
"""
@file ActroidKinematics.py
@brief ModuleDescription
@date $Date$
"""
import sys
import time
import numpy as np
import scipy as sp
import math
sys.path.append(".")
# Import RTM module
import RTC
import OpenRTM_aist
# Import Service implementation class
# <rtc-template block="service_impl">
# </rtc-template>
# Import Service stub modules
# <rtc-template block="consumer_import">
# </rtc-template>
# This module's spesification
# <rtc-template block="module_spec">
actroidkinematics_spec = ["implementation_id", "ActroidKinematics",
"type_name", "ActroidKinematics",
"description", "ModuleDescription",
"version", "1.0.0",
"vendor", "VenderName",
"category", "Category",
"activity_type", "STATIC",
"max_instance", "1",
"language", "Python",
"lang_type", "SCRIPT",
""]
# </rtc-template>
##
# @class ActroidKinematics
# @brief ModuleDescription
#
#
class ActroidKinematics(OpenRTM_aist.DataFlowComponentBase):
##
# @brief constructor
# @param manager Maneger Object
#
def __init__(self, manager):
OpenRTM_aist.DataFlowComponentBase.__init__(self, manager)
self._d_posein = RTC.TimedDoubleSeq(RTC.Time(0,0),[])
"""
"""
self._poseinIn = OpenRTM_aist.InPort("posein", self._d_posein)
self._d_poseout = RTC.TimedPose3D(RTC.Time(0,0),0)
"""
"""
self._poseoutOut = OpenRTM_aist.OutPort("poseout", self._d_poseout)
# initialize of configuration-data.
# <rtc-template block="init_conf_param">
# </rtc-template>
##
#
# The initialize action (on CREATED->ALIVE transition)
# formaer rtc_init_entry()
#
# @return RTC::ReturnCode_t
#
#
def onInitialize(self):
# Bind variables and configuration variable
# Set InPort buffers
self.addInPort("posein",self._poseinIn)
# Set OutPort buffers
self.addOutPort("poseout",self._poseoutOut)
# Set service provider to Ports
# Set service consumers to Ports
# Set CORBA Service Ports
return RTC.RTC_OK
# ##
# #
# # The finalize action (on ALIVE->END transition)
# # formaer rtc_exiting_entry()
# #
# # @return RTC::ReturnCode_t
#
# #
#def onFinalize(self, ec_id):
#
# return RTC.RTC_OK
# ##
# #
# # The startup action when ExecutionContext startup
# # former rtc_starting_entry()
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onStartup(self, ec_id):
#
# return RTC.RTC_OK
# ##
# #
# # The shutdown action when ExecutionContext stop
# # former rtc_stopping_entry()
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onShutdown(self, ec_id):
#
# return RTC.RTC_OK
# ##
# #
# # The activated action (Active state entry action)
# # former rtc_active_entry()
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onActivated(self, ec_id):
#
# return RTC.RTC_OK
# ##
# #
# # The deactivated action (Active state exit action)
# # former rtc_active_exit()
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onDeactivated(self, ec_id):
#
# return RTC.RTC_OK
##
#
# The execution action that is invoked periodically
# former rtc_active_do()
#
# @param ec_id target ExecutionContext Id
#
# @return RTC::ReturnCode_t
#
#
def onExecute(self, ec_id):
th = []
try:
def rotationXandOffset(x, y, z, th):
s = math.sin(th)
c = math.cos(th)
P = np.array([[1,0,0,x],[0,c,s,y],[0,-s,c,z],[0,0,0,1]])
return P
def rotationYandOffset(x, y, z, th):
s = math.sin(th)
c = math.cos(th)
P = np.array([[c,0,s,x],[0,1,0,y],[-s,0,c,z],[0,0,0,1]])
return P
def rotationZandOffset(x, y, z, th):
s = math.sin(th)
c = math.cos(th)
P = np.array([[c,s,0,x],[-s,c,0,y],[0,0,1,z],[0,0,0,1]])
return P
#if __name__ == '__main__':
if self._poseinIn.isNew():
data = self._poseinIn.read()
for num in range(8, 15):
value = data.data[num]
th.append(value)
l1 = 10
l2 = 12
l3 = 15
T = [0]*7
T1 = rotationYandOffset(0, 0, 0, th[0])
T2 = rotationXandOffset(0, 0, 0, th[1])
T3 = rotationZandOffset(0, 0, l1, th[2])
T4 = rotationYandOffset(0, 0, 0, th[3])
T5 = rotationZandOffset(0, 0, l2, th[4])
T6 = rotationYandOffset(0, 0, 0, th[5])
T7 = rotationXandOffset(l3, 0, 0, th[6])
Hand = np.array([[0],[0],[0],[1]])
T = [T1,T2,T3,T4,T5,T6,T7]
target_T = sp.dot(T1,sp.dot(T2,sp.dot(T3,sp.dot(T4,sp.dot(T5,sp.dot(T6,sp.dot(T7,Hand)))))))
print 'Hand Positoin is ', target_T
# 最初のデータからの結果しか出ない。
#raw_input(); #リターンキーを押下するまでロック
return RTC.RTC_OK
except Exception, e:
print 'Exception : ', e
traceback.print_exc()
#これは print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback, limit, file) の省略表現
pass
return RTC.RTC_OK
# ##
# #
# # The aborting action when main logic error occurred.
# # former rtc_aborting_entry()
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onAborting(self, ec_id):
#
# return RTC.RTC_OK
# ##
# #
# # The error action in ERROR state
# # former rtc_error_do()
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onError(self, ec_id):
#
# return RTC.RTC_OK
# ##
# #
# # The reset action that is invoked resetting
# # This is same but different the former rtc_init_entry()
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onReset(self, ec_id):
#
# return RTC.RTC_OK
# ##
# #
# # The state update action that is invoked after onExecute() action
# # no corresponding operation exists in OpenRTm-aist-0.2.0
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onStateUpdate(self, ec_id):
#
# return RTC.RTC_OK
# ##
# #
# # The action that is invoked when execution context's rate is changed
# # no corresponding operation exists in OpenRTm-aist-0.2.0
# #
# # @param ec_id target ExecutionContext Id
# #
# # @return RTC::ReturnCode_t
# #
# #
#def onRateChanged(self, ec_id):
#
# return RTC.RTC_OK
def ActroidKinematicsInit(manager):
profile = OpenRTM_aist.Properties(defaults_str=actroidkinematics_spec)
manager.registerFactory(profile,
ActroidKinematics,
OpenRTM_aist.Delete)
def MyModuleInit(manager):
ActroidKinematicsInit(manager)
# Create a component
comp = manager.createComponent("ActroidKinematics")
def main():
mgr = OpenRTM_aist.Manager.init(sys.argv)
mgr.setModuleInitProc(MyModuleInit)
mgr.activateManager()
mgr.runManager()
if __name__ == "__main__":
main()
| gpl-3.0 | -1,119,475,930,547,662,300 | 22.747826 | 124 | 0.519224 | false | 2.926071 | false | false | false |
mohabusama/gevent-guacamole | guacg/app.py | 1 | 2391 | import gevent
from geventwebsocket import WebSocketApplication
from guacamole.client import GuacamoleClient, PROTOCOL_NAME
try:
# Add local_settings.py with RDP connection variables
from local_settings import (
PROTOCOL, USERNAME, PASSWORD, HOST, PORT, DOMAIN, APP, SEC)
except ImportError:
USERNAME = ''
PASSWORD = ''
HOST = ''
PORT = 3389
DOMAIN = ''
APP = ''
SEC = ''
class GuacamoleApp(WebSocketApplication):
def __init__(self, ws):
self.client = None
self._listener = None
super(GuacamoleApp, self).__init__(ws)
@classmethod
def protocol_name(cls):
"""
Return our protocol.
"""
return PROTOCOL_NAME
def on_open(self, *args, **kwargs):
"""
New Web socket connection opened.
"""
if self.client:
# we have a running client?!
self.client.close()
# @TODO: get guacd host and port!
self.client = GuacamoleClient('localhost', 4822)
# @TODO: get Remote server connection properties
self.client.handshake(protocol=PROTOCOL, hostname=HOST,
port=PORT, username=USERNAME,
password=PASSWORD, domain=DOMAIN,
security=SEC, remote_app=APP)
self._start_listener()
def on_message(self, message):
"""
New message received on the websocket.
"""
# send message to guacd server
self.client.send(message)
def on_close(self, reason):
"""
Websocket closed.
"""
# @todo: consider reconnect from client. (network glitch?!)
self._stop_listener()
self.client.close()
self.client = None
def _start_listener(self):
if self._listener:
self._stop_listener()
self._listener = gevent.spawn(self.guacd_listener)
self._listener.start()
def _stop_listener(self):
if self._listener:
self._listener.kill()
self._listener = None
def guacd_listener(self):
"""
A listener that would handle any messages sent from Guacamole server
and push directly to browser client (over websocket).
"""
while True:
instruction = self.client.receive()
self.ws.send(instruction)
| mit | -463,516,985,167,099,800 | 25.865169 | 76 | 0.570054 | false | 4.30036 | false | false | false |
elhoyos/colombiatransparente | settings.py | 1 | 2081 | # Settings para ColombiaTransparente
#
# Debe utilizar local_settings.py para inicializar variables especificas.
# Ver fin del archivo.
import os.path
PROJECT_DIR = os.path.dirname(__file__)
# A estos usarios se les manda emails
# cuando hay excepciones
ADMINS = (
('Julian Pulgarin', '[email protected]'),
)
SEND_BROKEN_LINK_EMAILS = True
MANAGERS = ADMINS
TIME_ZONE = 'America/Bogota'
LANGUAGE_CODE = 'es-co'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = os.path.join(PROJECT_DIR, 'media/')
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
FIXTURE_DIRS = (
os.path.join(PROJECT_DIR, 'fixtures'),
)
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.contrib.messages.context_processors.messages"
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
'transparencia',
'sorl.thumbnail',
'django_bcrypt',
'django.contrib.markup',
)
# local_settings.py inicializa variables especificas de la instalacion de CT.
try:
from local_settings import * # en el mismo directorio que este archivo
except ImportError:
import sys
sys.stderr.write("Error: Must use a local_settings.py file to set specific settings for this CT installation.")
sys.exit(1)
| gpl-3.0 | 7,379,405,612,311,434,000 | 24.378049 | 115 | 0.719846 | false | 3.256651 | false | false | false |
pacificclimate/pycds | tests/basics/conftest.py | 1 | 2154 | from pytest import fixture
from sqlalchemy.orm import sessionmaker
from pycds import Contact, Network, Station, History, Variable
@fixture
def empty_sesh(base_engine, set_search_path):
"""Test-function scoped database session, with no schema or content.
All session actions are rolled back on teardown.
"""
sesh = sessionmaker(bind=base_engine)()
set_search_path(sesh)
yield sesh
sesh.rollback()
sesh.close()
@fixture
def pycds_sesh_with_small_data(pycds_sesh):
# TODO: Use add_then_delete_objs (which ought to be renamed) here so that objects
# are removed after test
moti = Network(name="MoTIe")
ec = Network(name="EC")
wmb = Network(name="FLNROW-WMB")
pycds_sesh.add_all([moti, ec, wmb])
simon = Contact(name="Simon", networks=[moti])
eric = Contact(name="Eric", networks=[wmb])
pat = Contact(name="Pat", networks=[ec])
pycds_sesh.add_all([simon, eric, pat])
stations = [
Station(
native_id="11091",
network=moti,
histories=[
History(
station_name="Brandywine",
the_geom="SRID=4326;POINT(-123.11806 50.05417)",
)
],
),
Station(
native_id="1029",
network=wmb,
histories=[
History(
station_name="FIVE MILE",
the_geom="SRID=4326;POINT(-122.68889 50.91089)",
)
],
),
Station(
native_id="2100160",
network=ec,
histories=[
History(
station_name="Beaver Creek Airport",
the_geom="SRID=4326;POINT(-140.866667 62.416667)",
)
],
),
]
pycds_sesh.add_all(stations)
variables = [
Variable(name="CURRENT_AIR_TEMPERATURE1", unit="celsius", network=moti),
Variable(name="precipitation", unit="mm", network=ec),
Variable(name="relative_humidity", unit="percent", network=wmb),
]
pycds_sesh.add_all(variables)
yield pycds_sesh
| gpl-3.0 | 3,304,209,749,878,142,000 | 28.506849 | 85 | 0.548282 | false | 3.663265 | false | false | false |
Jeff-Tian/mybnb | Python27/Lib/test/test_sysconfig.py | 2 | 13330 | """Tests for sysconfig."""
import unittest
import sys
import os
import shutil
import subprocess
from copy import copy, deepcopy
from test.test_support import run_unittest, TESTFN, unlink, get_attribute
import sysconfig
from sysconfig import (get_paths, get_platform, get_config_vars,
get_path, get_path_names, _INSTALL_SCHEMES,
_get_default_scheme, _expand_vars,
get_scheme_names, get_config_var)
import _osx_support
class TestSysConfig(unittest.TestCase):
def setUp(self):
"""Make a copy of sys.path"""
super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
self.makefile = None
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._uname = None
os.uname = self._get_uname
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = copy(sysconfig._CONFIG_VARS)
self.old_environ = deepcopy(os.environ)
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
if self.makefile is not None:
os.unlink(self.makefile)
self._cleanup_testfn()
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
sysconfig._CONFIG_VARS = copy(self._config_vars)
for key, value in self.old_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
for key in os.environ.keys():
if key not in self.old_environ:
del os.environ[key]
super(TestSysConfig, self).tearDown()
def _set_uname(self, uname):
self._uname = uname
def _get_uname(self):
return self._uname
def _cleanup_testfn(self):
path = TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_get_path_names(self):
self.assertEqual(get_path_names(), sysconfig._SCHEME_KEYS)
def test_get_paths(self):
scheme = get_paths()
default_scheme = _get_default_scheme()
wanted = _expand_vars(default_scheme, None)
wanted = wanted.items()
wanted.sort()
scheme = scheme.items()
scheme.sort()
self.assertEqual(scheme, wanted)
def test_get_path(self):
# xxx make real tests here
for scheme in _INSTALL_SCHEMES:
for name in _INSTALL_SCHEMES[scheme]:
res = get_path(name, scheme)
def test_get_config_vars(self):
cvars = get_config_vars()
self.assertIsInstance(cvars, dict)
self.assertTrue(cvars)
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-amd64')
# windows XP, itanium
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Itanium)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-ia64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxint
try:
sys.maxint = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-ppc')
sys.maxint = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-ppc64')
finally:
sys.maxint = maxint
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxint
try:
sys.maxint = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-i386')
sys.maxint = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-x86_64')
finally:
sys.maxint = maxint
# macbook with fat binaries (fat, universal or fat64)
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-intel')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3'%(arch,))
self.assertEqual(get_platform(), 'macosx-10.4-%s'%(arch,))
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEqual(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_get_config_h_filename(self):
config_h = sysconfig.get_config_h_filename()
self.assertTrue(os.path.isfile(config_h), config_h)
def test_get_scheme_names(self):
wanted = ('nt', 'nt_user', 'os2', 'os2_home', 'osx_framework_user',
'posix_home', 'posix_prefix', 'posix_user')
self.assertEqual(get_scheme_names(), wanted)
@unittest.skipIf(sys.platform.startswith('win'),
'Test is not Windows compatible')
def test_get_makefile_filename(self):
makefile = sysconfig.get_makefile_filename()
self.assertTrue(os.path.isfile(makefile), makefile)
# Issue 22199
self.assertEqual(sysconfig._get_makefile_filename(), makefile)
def test_symlink(self):
# Issue 7880
symlink = get_attribute(os, "symlink")
def get(python):
cmd = [python, '-c',
'import sysconfig; print sysconfig.get_platform()']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(TESTFN)
symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
unlink(link)
def test_user_similar(self):
# Issue #8759: make sure the posix scheme for the users
# is similar to the global posix_prefix one
base = get_config_var('base')
user = get_config_var('userbase')
# the global scheme mirrors the distinction between prefix and
# exec-prefix but not the user scheme, so we have to adapt the paths
# before comparing (issue #9100)
adapt = sys.prefix != sys.exec_prefix
for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'):
global_path = get_path(name, 'posix_prefix')
if adapt:
global_path = global_path.replace(sys.exec_prefix, sys.prefix)
base = base.replace(sys.exec_prefix, sys.prefix)
user_path = get_path(name, 'posix_user')
self.assertEqual(user_path, global_path.replace(base, user, 1))
@unittest.skipUnless(sys.platform == "darwin", "test only relevant on MacOSX")
def test_platform_in_subprocess(self):
my_platform = sysconfig.get_platform()
# Test without MACOSX_DEPLOYMENT_TARGET in the environment
env = os.environ.copy()
if 'MACOSX_DEPLOYMENT_TARGET' in env:
del env['MACOSX_DEPLOYMENT_TARGET']
with open('/dev/null', 'w') as devnull_fp:
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=devnull_fp,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
# Test with MACOSX_DEPLOYMENT_TARGET in the environment, and
# using a value that is unlikely to be the default one.
env = os.environ.copy()
env['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=open('/dev/null'),
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
def test_main():
run_unittest(TestSysConfig)
if __name__ == "__main__":
test_main()
| apache-2.0 | -7,487,843,109,434,952,000 | 37.672619 | 97 | 0.525056 | false | 3.843714 | true | false | false |
phracek/devassistant | test/integration/test_help.py | 2 | 4964 | from __future__ import unicode_literals
import sys
import pytest
from test.integration.misc import populate_dapath, run_da
class TestHelp(object):
top_level_help = '\n'.join([
'You can either run assistants with:',
'\033[1mda [--debug] {create,tweak,prepare,extras} [ASSISTANT [ARGUMENTS]] ...\033[0m',
'',
'Where:',
'\033[1mcreate \033[0mused for creating new projects',
'\033[1mtweak \033[0mused for working with existing projects',
'\033[1mprepare \033[0mused for preparing environment for upstream projects',
'\033[1mextras \033[0mused for performing custom tasks not related to a specific project',
'You can shorten "create" to "crt", "tweak" to "twk" and "extras" to "extra".',
'',
'Or you can run a custom action:',
'\033[1mda [--debug] [ACTION] [ARGUMENTS]\033[0m',
'',
'Available actions:',
'\033[1mdoc \033[0mDisplay documentation for a DAP package.',
'\033[1mhelp \033[0mPrint detailed help.',
'\033[1mpkg \033[0mLets you interact with online DAPI service and your local DAP packages.',
'\033[1mversion \033[0mPrint version',
''])
no_assistant_help_newlines = '\n'.join([
'No subassistants available.',
'',
'To search DevAssistant Package Index (DAPI) for new assistants,',
'you can either browse https://dapi.devassistant.org/ or run',
'',
'"da pkg search <term>".',
'',
'Then you can run',
'',
'"da pkg install <DAP-name>"',
'',
'to install the desired DevAssistant package (DAP).'
])
no_assistants_help_singleline = '\n'.join([
' No subassistants available. To search DevAssistant Package Index (DAPI)',
' for new assistants, you can either browse https://dapi.devassistant.org/',
' or run "da pkg search <term>". Then you can run "da pkg install <DAP-',
' name>" to install the desired DevAssistant package (DAP).'
])
def test_top_level_help(self):
res = run_da('-h')
# use repr because of bash formatting chars
assert repr(res.stdout) == repr(self.top_level_help)
def test_top_level_without_arguments(self):
res = run_da('', expect_error=True)
msg = 'Couldn\'t parse input, displaying help ...\n\n'
# use repr because of bash formatting chars
assert repr(res.stdout) == repr(msg + self.top_level_help)
@pytest.mark.parametrize('alias', [
# test both assistant primary name and an alias
'crt',
'create',
])
def test_category_with_no_assistants_without_arguments(self, alias):
res = run_da(alias, expect_error=True, expect_stderr=True)
assert self.no_assistant_help_newlines in res.stderr
@pytest.mark.parametrize('alias', [
# test both assistant primary name and an alias
'crt',
'create',
])
def test_category_with_no_assistants_help(self, alias):
res = run_da(alias + ' -h')
assert self.no_assistants_help_singleline in res.stdout
def test_didnt_choose_subassistant(self):
env = populate_dapath({'assistants': {'crt': ['a.yaml', {'a': ['b.yaml']}]}})
res = env.run_da('create a', expect_error=True, expect_stderr=True)
assert 'You have to select a subassistant' in res.stderr
def test_subassistants_help(self):
env = populate_dapath({'assistants': {'crt': ['a.yaml', {'a': ['b.yaml']}]}})
res = env.run_da('create a -h')
assert res.stdout == '\n'.join([
'usage: create a [-h] {b} ...',
'',
'optional arguments:',
' -h, --help show this help message and exit',
'',
'subassistants:',
' Following subassistants will help you with setting up your project.',
'',
' {b}',
''])
def test_didnt_choose_subaction(self):
res = run_da('pkg', expect_error=True, expect_stderr=True)
assert 'You have to select a subaction' in res.stderr
def test_subactions_help(self):
res = run_da('pkg -h')
# TODO: seems that subparsers order cannot be influenced in 2.6
# investigate and possibly improve this test
if sys.version_info[:2] == (2, 6):
return
assert res.stdout == '\n'.join([
'usage: pkg [-h] {info,install,lint,list,remove,search,uninstall,update} ...',
'',
'Lets you interact with online DAPI service and your local DAP packages.',
'',
'optional arguments:',
' -h, --help show this help message and exit',
'',
'subactions:',
' This action has following subactions.',
'',
' {info,install,lint,list,remove,search,uninstall,update}',
''])
| gpl-2.0 | -4,774,708,795,361,658,000 | 39.357724 | 105 | 0.574738 | false | 3.726727 | true | false | false |
open-craft/opencraft | instance/migrations/0039_auto_20160416_1729.py | 1 | 2134 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instance', '0038_merge'),
]
operations = [
migrations.RenameField(
model_name='openstackserver',
old_name='progress',
new_name='_progress',
),
migrations.RenameField(
model_name='openstackserver',
old_name='status',
new_name='_status',
),
migrations.AlterField(
model_name='openstackserver',
name='_progress',
field=models.CharField(choices=[('failed', 'type'), ('running', 'type'), ('success', 'type')], default='running', db_column='progress', max_length=7),
),
migrations.AlterField(
model_name='openstackserver',
name='_status',
field=models.CharField(choices=[('active', 'type'), ('booted', 'type'), ('new', 'type'), ('provisioning', 'type'), ('ready', 'type'), ('rebooting', 'type'), ('started', 'type'), ('terminated', 'type')], default='new', db_column='status', max_length=20, db_index=True),
),
migrations.AlterField(
model_name='generallogentry',
name='level',
field=models.CharField(choices=[('DEBUG', 'Debug'), ('INFO', 'Info'), ('WARNING', 'Warning'), ('ERROR', 'Error'), ('CRITICAL', 'Critical')], default='INFO', max_length=9, db_index=True),
),
migrations.AlterField(
model_name='instancelogentry',
name='level',
field=models.CharField(choices=[('DEBUG', 'Debug'), ('INFO', 'Info'), ('WARNING', 'Warning'), ('ERROR', 'Error'), ('CRITICAL', 'Critical')], default='INFO', max_length=9, db_index=True),
),
migrations.AlterField(
model_name='serverlogentry',
name='level',
field=models.CharField(choices=[('DEBUG', 'Debug'), ('INFO', 'Info'), ('WARNING', 'Warning'), ('ERROR', 'Error'), ('CRITICAL', 'Critical')], default='INFO', max_length=9, db_index=True),
),
]
| agpl-3.0 | 211,208,470,338,546,900 | 42.55102 | 280 | 0.550609 | false | 4.217391 | false | false | false |
rbianchi66/survey | src/qquestion.py | 1 | 1232 | from PyQt4 import QtGui, Qt
class Question(QtGui.QFrame):
def __init__(self, qnumber, question, card, *args):
super(QtGui.QFrame, self).__init__(*args)
print "qnumber:", qnumber
print "question:", question
self.card = card
self.main_layout = QtGui.QGridLayout()
self.answers_layout = QtGui.QVBoxLayout()
#row = cur.execute("select valore from domande where id=%d" % qid).fetchone()
title = QtGui.QLabel("Domanda %d" % qnumber)
title.setFont(QtGui.QFont("Arial",9,75))
self.main_layout.addWidget(title, 0, 0)
self.question = QtGui.QLabel(question["name"])
self.main_layout.addWidget(self.question, 1, 0)
self.setFixedHeight(200)
rows = [(qnumber,i,a) for i,a in enumerate(question["answers"])]
# cur = self.conn.cursor()
# rows = cur.execute("select id_domanda,id,valore from risposte where id_domanda=%d" % qid).fetchall()
# cur.close()
self.showButtons(rows)
self.setLayout(self.main_layout)
self.main_layout.addLayout(self.answers_layout, 2, 0)
def updateValue(self):
pass
def showButtons(self, rows):
pass | gpl-2.0 | 7,829,553,505,050,234,000 | 39.133333 | 110 | 0.602273 | false | 3.560694 | false | false | false |
thautwarm/JavaProj | sqlclass.py | 1 | 1087 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 26 13:20:51 2017
@author: Thautwarm
"""
def EntityWrapper(func):
def _func(**kw2):
attrs=kw2['attrs']
types=kw2['types']
return func(attrs,types)
return _func
def InitWithTypeMapSet(Name,attrs,types):
Unit = lambda x,y:\
"""
TypeMap.put("%s","%s");"""%(x,y)
body=\
"""
public static Map<String,String> %s(){
Map<String,String> TypeMap=new HashMap<String,String>();
%s
return TypeMap;
}
"""%("getTypeMap",'\n'.join(Unit(attr_i,type_i) for attr_i,type_i in zip(attrs,types) ))
return body
@EntityWrapper
def toSQLValuesSet(attrs,types):
tosql=[attr_i if type_i!="Date" else '(new Timestamp (%s.getTime()))'%attr_i for attr_i,type_i in zip(attrs,types)]
body=\
"""
public String toSQLValues(){
return %s;
}
"""%("""+","+""".join(tosql))
return body
@EntityWrapper
def toSQLColumnsSet(attrs,types):
body=\
"""
public static String toSQLColumns(){
return "%s";
}
"""%(','.join(attrs))
return body
| apache-2.0 | -2,187,566,233,595,417,900 | 20.76 | 120 | 0.577737 | false | 3.114613 | false | false | false |
wgm2111/wgm-my-data-tools | examples/view_amazon_meta.py | 1 | 1163 |
#
# me: Will Martin
# data: 3.12.2015
# license: BSD
#
"""
Run this program to look ad the amazon-meta data interactively.
"""
# future
from __future__ import print_function, division
# standard
import os
# works on unix machines
FILENAME = os.path.abspath(r"../amazon-meta/amazon-meta.txt")
# Parameters for the interactive prompt
PROMPT = "[amazon-meta] : "
# data printing routine
def print_till_blank(amfile):
"""
Amazon data entry printer.
in: amazon-meta-file
out: None
"""
# print lines until a blank is found.
while True:
line = amfile.readline().strip()
print(line)
if line == "":
break
return None
# interactive data viewer
print("Running the interactive data viewer.\n", end='\n')
# open the amazon database
with open(FILENAME, 'r') as amfile:
# Print the header
print_till_blank(amfile)
print("\nKeep pressing enter to view data:")
# start the propt
while True:
# Run the prompt
print(PROMPT, end="")
ui = raw_input()
if ui == "":
print_till_blank(amfile)
else:
break
| bsd-3-clause | -1,376,832,607,013,369,900 | 17.460317 | 63 | 0.601892 | false | 3.645768 | false | false | false |
USGSDenverPychron/pychron | pychron/experiment/health/analysis_health.py | 1 | 1125 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Str
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.loggable import Loggable
class AnalysisHealth(Loggable):
analysis_type = Str
# ============= EOF =============================================
| apache-2.0 | -4,794,001,600,473,407,000 | 39.178571 | 81 | 0.534222 | false | 5.136986 | false | false | false |
bencord0/ListeningSocketHandler | ListeningSocketHandler/handlers.py | 1 | 2200 | #!/usr/bin/env python
"""
ListeningSocketHandler
======================
A python logging handler.
logging.handlers.SocketHandler is a TCP Socket client that sends log records to a tcp server.
This class is the opposite.
When a TCP client connects (e.g. telnet or netcat), log records are streamed through the connection.
"""
import logging
import sys
import socket
import threading
# Workaround for http://bugs.python.org/issue14308
# http://stackoverflow.com/questions/13193278/understand-python-threading-bug
threading._DummyThread._Thread__stop = lambda x: 42
class ListeningSocketHandler(logging.Handler):
def __init__(self, port=0, ipv6=False):
super(ListeningSocketHandler, self).__init__()
self.port = port
self.ipv6 = ipv6
self.clients = set()
if self.ipv6:
self.socket = socket.socket(socket.AF_INET6)
self.socket.bind(("::", self.port))
else:
self.socket = socket.socket(socket.AF_INET)
self.socket.bind(("0.0.0.0", self.port))
self.socket.listen(5)
print ("ListeningSocketHandler on port: {}".format(self.socket.getsockname()[1]))
def start_accepting(self):
while True:
conn, addr = self.socket.accept()
self.clients.add(conn)
self._accept_thread = threading.Thread(target=start_accepting, args=(self,))
self._accept_thread.daemon = True
self._accept_thread.start()
def emit(self, record):
closed_clients = set()
for client in self.clients:
try:
try:
# Python3
message = bytes(record.getMessage() + "\r\n", 'UTF-8')
except TypeError:
# Python2
message = bytes(record.getMessage() + "\r\n").encode('UTF-8')
client.sendall(message)
except socket.error:
closed_clients.add(client)
for client in closed_clients:
client.close() # just to be sure
self.clients.remove(client)
def getsockname(self):
return self.socket.getsockname()
| apache-2.0 | -2,963,136,701,782,625,000 | 33.920635 | 104 | 0.587727 | false | 4.104478 | false | false | false |
kefin/django-garage | garage/html_utils.py | 1 | 5787 | # -*- coding: utf-8 -*-
"""
garage.html_utils
Utility functions to handle html-text conversions.
* FIXME: need to reorganize, refactor and delete unnecessary code.
* created: 2008-06-22 kevin chan <[email protected]>
* updated: 2014-11-21 kchan
"""
from __future__ import (absolute_import, unicode_literals)
import six
import re
from htmlentitydefs import codepoint2name, name2codepoint
from markdown import markdown
from textile import textile
# functions to escape html special characters
def html_escape(text):
"""
Escape reserved html characters within text.
"""
htmlchars = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
if isinstance(text, six.text_type):
text = ''.join([htmlchars.get(c, c) for c in text])
return text
def html_entities(u):
"""
Convert non-ascii characters to old-school html entities.
"""
result = []
for c in u:
if ord(c) < 128:
result.append(c)
else:
try:
result.append('&%s;' % codepoint2name[ord(c)])
except KeyError:
result.append("&#%s;" % ord(c))
return ''.join(result)
def escape(txt):
"""
Escapes html reserved characters (<>'"&) and convert non-ascii
text to html entities.
* To escape only html reserved characters (<>'"&), use
`html_escape`.
"""
return html_escape(html_entities(txt))
def unescape(text):
"""
Removes HTML or XML character references and entities from a text string.
* Note: does not strip html tags (use `strip_tags` instead for that).
:Info: http://effbot.org/zone/re-sub.htm#unescape-html
:param text: The HTML (or XML) source text.
:return: The plain text, as a Unicode string, if necessary.
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def strip_tags(html_txt):
"""
Strip tags from html text (uses strip_tags from django.utils.html).
* also unescapes html entities
* fall back on using `re.sub` if django's `strip_tags` is not
importable for some reason.
"""
try:
from django.utils.html import strip_tags as _strip_tags
except ImportError:
stripped = re.sub(r'<[^>]*?>', '', html_txt)
else:
stripped = _strip_tags(html_txt)
return unescape(stripped)
# functions for converting plain text content to html
# * available conversion methods:
# * no conversion
# * markdown
# * textile
# * simple conversion of line breaks
# * visual editor (using wysiwyg editor like TinyMCE)
NO_CONVERSION = 1
MARKDOWN_CONVERSION = 2
TEXTILE_CONVERSION = 3
SIMPLE_CONVERSION = 4
VISUAL_EDITOR = 5
CONVERSION_CHOICES = (
(NO_CONVERSION, 'None'),
(MARKDOWN_CONVERSION, 'Markdown'),
(TEXTILE_CONVERSION, 'Textile'),
(SIMPLE_CONVERSION, 'Simple (Convert Line Breaks)'),
(VISUAL_EDITOR, 'Visual (WYSIWYG) Editor'),
)
CONVERSION_METHODS = (
(NO_CONVERSION, 'none'),
(MARKDOWN_CONVERSION, 'markdown'),
(TEXTILE_CONVERSION, 'textile'),
(SIMPLE_CONVERSION, 'markdown'),
(VISUAL_EDITOR, 'visual')
)
def txt2html(txt, method):
try:
assert txt is not None and len(txt) > 0
if method == MARKDOWN_CONVERSION:
txt = markdown(txt)
elif method == TEXTILE_CONVERSION:
txt = textile(txt)
elif method == SIMPLE_CONVERSION:
txt = markdown(txt)
else:
# NO_CONVERSION
pass
except (TypeError, AssertionError):
pass
return txt
def get_cvt_method(name):
"""
Get conversion method "code" corresponding to name
"""
c = {
'none': NO_CONVERSION,
'markdown': MARKDOWN_CONVERSION,
'textile': TEXTILE_CONVERSION
}
try:
method = c.get(name.lower(), NO_CONVERSION)
except (TypeError, AttributeError):
method = NO_CONVERSION
return method
def get_cvt_method_name(code):
"""
Get conversion method name corresponding to "code"
"""
if code > 0:
code -= 1
try:
codenum, name = CONVERSION_METHODS[code]
except:
codenum, name = CONVERSION_METHODS[NO_CONVERSION]
return name
def to_html(txt, cvt_method='markdown'):
"""
Convert text block to html
* cvt_method is name of method (markdown, textile, or none)
* cf. txt2html where method is the conversion "code" (number)
"""
return txt2html(txt, get_cvt_method(cvt_method))
# html-to-text utility function
def html_to_text(html):
"""
Utility function to convert html content to plain text.
* uses Django `strip_tags` function to convert html to text;
* multiple blank lines are reduced to 2;
* strips beginning and ending white space.
* does not perform any kind of formatting or structuring to the
plain text result.
:param html: html content
:returns: plain text content after conversion
"""
from garage.text_utils import tidy_txt
txt = tidy_txt(strip_tags(html))
lines = []
for line in txt.splitlines():
s = line.strip()
if len(s) > 0:
lines.append(s)
txt = '\n\n'.join(lines)
txt = '%s\n' % txt
return txt
| bsd-3-clause | -2,367,341,503,561,398,000 | 25.067568 | 77 | 0.593745 | false | 3.731141 | false | false | false |
freedomboxtwh/Plinth | plinth/modules/radicale/forms.py | 9 | 1466 | #
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Forms for radicale module.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
from plinth.forms import ServiceForm
CHOICES = [
('owner_only', _('Only the owner of a calendar/addressbook can view or '
'make changes.')),
('owner_write', _('Any user can view any calendar/addressbook, but only '
'the owner can make changes.')),
('authenticated', _('Any user can view or make changes to any '
'calendar/addressbook.')),
]
class RadicaleForm(ServiceForm):
"""Specialized configuration form for radicale service."""
access_rights = forms.ChoiceField(choices=CHOICES, required=True,
widget=forms.RadioSelect())
| agpl-3.0 | 5,371,999,269,105,826,000 | 35.65 | 77 | 0.686221 | false | 4.274052 | false | false | false |
DeBortoliWines/Bika-LIMS | bika/lims/browser/__init__.py | 1 | 5104 | """Bika's browser views are based on this one, for a nice set of utilities.
"""
from Products.CMFCore.utils import getToolByName
from AccessControl import ClassSecurityInfo
from Products.CMFPlone.i18nl10n import ulocalized_time
from Products.Five.browser import BrowserView
from bika.lims import logger
from zope.cachedescriptors.property import Lazy as lazy_property
from zope.i18n import translate
import plone, json
class BrowserView(BrowserView):
security = ClassSecurityInfo()
logger = logger
def __init__(self, context, request):
super(BrowserView, self).__init__(context, request)
security.declarePublic('ulocalized_time')
def ulocalized_time(self, time, long_format=None, time_only=None):
if time:
# no printing times if they were not specified in inputs
if time.second() + time.minute() + time.hour() == 0:
long_format = False
time_str = ulocalized_time(time, long_format, time_only, self.context,
'bika', self.request)
return time_str
@lazy_property
def portal(self):
return getToolByName(self.context, 'portal_url').getPortalObject()
@lazy_property
def portal_url(self):
return self.portal.absolute_url().split("?")[0]
@lazy_property
def portal_catalog(self):
return getToolByName(self.context, 'portal_catalog')
@lazy_property
def reference_catalog(self):
return getToolByName(self.context, 'reference_catalog')
@lazy_property
def bika_analysis_catalog(self):
return getToolByName(self.context, 'bika_analysis_catalog')
@lazy_property
def bika_setup_catalog(self):
return getToolByName(self.context, 'bika_setup_catalog')
@lazy_property
def bika_catalog(self):
return getToolByName(self.context, 'bika_catalog')
@lazy_property
def portal_membership(self):
return getToolByName(self.context, 'portal_membership')
@lazy_property
def portal_groups(self):
return getToolByName(self.context, 'portal_groups')
@lazy_property
def portal_workflow(self):
return getToolByName(self.context, 'portal_workflow')
@lazy_property
def checkPermission(self, perm, obj):
return self.portal_membership.checkPermission(perm, obj)
def user_fullname(self, userid):
member = self.portal_membership.getMemberById(userid)
if member is None:
return userid
member_fullname = member.getProperty('fullname')
c = self.portal_catalog(portal_type = 'Contact', getUsername = userid)
contact_fullname = c[0].getObject().getFullname() if c else None
return contact_fullname or member_fullname or userid
def user_email(self, userid):
member = self.portal_membership.getMemberById(userid)
if member is None:
return userid
member_email = member.getProperty('email')
c = self.portal_catalog(portal_type = 'Contact', getUsername = userid)
contact_email = c[0].getObject().getEmailAddress() if c else None
return contact_email or member_email or ''
def python_date_format(self, long_format=None, time_only=False):
"""This convert bika domain date format msgstrs to Python
strftime format strings, by the same rules as ulocalized_time.
XXX i18nl10n.py may change, and that is where this code is taken from.
"""
# get msgid
msgid = long_format and 'date_format_long' or 'date_format_short'
if time_only:
msgid = 'time_format'
# get the formatstring
formatstring = translate(msgid, domain='bika', mapping={}, context=self.request)
if formatstring is None or formatstring.startswith('date_') or formatstring.startswith('time_'):
self.logger.error("bika/%s/%s could not be translated" %
(self.request.get('LANGUAGE'), msgid))
# msg catalog was not able to translate this msgids
# use default setting
properties = getToolByName(self.context, 'portal_properties').site_properties
if long_format:
format = properties.localLongTimeFormat
else:
if time_only:
format = properties.localTimeOnlyFormat
else:
format = properties.localTimeFormat
return format
return formatstring.replace(r"${", '%').replace('}', '')
@lazy_property
def date_format_long(self):
fmt = self.python_date_format(long_format=1)
if fmt == "date_format_long":
fmt = "%Y-%m-%d %I:%M %p"
return fmt
@lazy_property
def date_format_short(self):
fmt = self.python_date_format()
if fmt == "date_format_short":
fmt = "%Y-%m-%d"
return fmt
@lazy_property
def time_format(self):
fmt = self.python_date_format(time_only=True)
if fmt == "time_format":
fmt = "%I:%M %p"
return fmt
| agpl-3.0 | 4,701,973,804,261,306,000 | 35.457143 | 104 | 0.633033 | false | 4.089744 | false | false | false |
flyte/zmq-io-modules | zmq_io/out_pifacedigitalio.py | 1 | 1283 | import argparse
import json
import pifacedigitalio as pfdio
import zmq
PINS = (
0b00000001,
0b00000010,
0b00000100,
0b00001000,
0b00010000,
0b00100000,
0b01000000,
0b10000000
)
def parse_args():
"""
Specify and parse command line arguments.
"""
p = argparse.ArgumentParser()
p.add_argument("pub_uri")
p.add_argument("--prefix", default="INPUT")
return p.parse_args()
def set_up_pub_socket(uri):
"""
Create ZeroMQ PUB socket and bind it to the specified uri.
"""
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind(uri)
return socket
def input_changed(event):
"""
Handler for input changes. Forms a dictionary containing event information and PUBlishes it
using the global ZeroMQ PUB socket.
"""
input_port = event.chip.input_port.value
data = {
"state": {i: bool(input_port & PINS[i]) for i, _ in enumerate(PINS)}
}
socket.send("%s%s" % (args.prefix, json.dumps(data)))
if __name__ == "__main__":
args = parse_args()
socket = set_up_pub_socket(args.pub_uri)
listener = pfdio.InputEventListener()
for i, _ in enumerate(PINS):
listener.register(i, pfdio.IODIR_BOTH, input_changed)
listener.activate()
| unlicense | 6,201,230,335,833,343,000 | 21.12069 | 95 | 0.636009 | false | 3.39418 | false | false | false |
flipmarley/encrypt-and-wrap | enc.py | 1 | 1985 | #!/usr/bin/python
"""
Generate encrypted messages wrapped in a self-decrypting python script
usage: python enc.py password > out.py
where password is the encryption password and out.py is the message/script file
to decrypt use: python out.py password
this will print the message to stdout.
"""
import sys, random
def encrypt(key, msg):
encrypted = []
for i, c in enumerate(msg):
key_c = ord(key[i % len(key)])-32
msg_c = ord(c)-32
encrypted.append(chr(((msg_c + key_c) % 95)+32))
return ''.join(encrypted)
def decrypt(key, enc):
msg=[]
for i, c in enumerate(enc):
key_c = ord(key[i % len(key)])-32
enc_c = ord(c)-32
msg.append(chr(((enc_c - key_c) % 95)+32))
return ''.join(msg)
def make_randstr(msg_len):
sl = []
r = random.SystemRandom()
for i in range(msg_len):
sl.append(chr(r.randint(32,126)))
return ''.join(sl)
if __name__ == '__main__':
msg = sys.stdin.read().replace("\n","\\n").replace("\t","\\t")
randstr = make_randstr(len(msg))
key = encrypt(sys.argv[1], randstr)
encrypted = encrypt(key, msg)
decrypted = decrypt(key, encrypted)
if not msg == decrypted:
raise Exception("Encryption Fail")
print """
#!/usr/bin/python
import sys
def encrypt(key, msg):
encrypted = []
for i, c in enumerate(msg):
key_c = ord(key[i % len(key)])-32
msg_c = ord(c)-32
encrypted.append(chr(((msg_c + key_c) % 95)+32))
return ''.join(encrypted)
def decrypt(key, enc):
msg=[]
for i, c in enumerate(enc):
key_c = ord(key[i % len(key)])-32
enc_c = ord(c)-32
msg.append(chr(((enc_c - key_c) % 95)+32))
return ''.join(msg)
if __name__ == '__main__':"""
print "\trandstr = ", repr(randstr)
print "\tenc = ", repr(encrypted)
print "\tkey = encrypt(sys.argv[1], randstr)"
print "\tdecrypted = decrypt(key, enc).replace(\"\\\\n\",\"\\n\").replace(\"\\\\t\",\"\\t\")"
print "\tprint decrypted"
| mit | 1,148,674,825,854,159,400 | 26.957746 | 97 | 0.582872 | false | 3.063272 | false | false | false |
jrichte43/ProjectEuler | Problem-0358/solutions.py | 1 | 1809 |
__problem_title__ = "Cyclic numbers"
__problem_url___ = "https://projecteuler.net/problem=358"
__problem_description__ = "A with digits has a very interesting property: When it is multiplied " \
"by 1, 2, 3, 4, ... , all the products have exactly the same digits, " \
"in the same order, but rotated in a circular fashion! The smallest " \
"cyclic number is the 6-digit number 142857 : 142857 × 1 = 142857 " \
"142857 × 2 = 285714 142857 × 3 = 428571 142857 × 4 = 571428 142857 × " \
"5 = 714285 142857 × 6 = 857142 The next cyclic number is " \
"0588235294117647 with 16 digits : 0588235294117647 × 1 = " \
"0588235294117647 0588235294117647 × 2 = 1176470588235294 " \
"0588235294117647 × 3 = 1764705882352941 ... 0588235294117647 × 16 = " \
"9411764705882352 Note that for cyclic numbers, leading zeros are " \
"important. There is only one cyclic number for which, the eleven " \
"leftmost digits are 00000000137 and the five rightmost digits are " \
"56789 (i.e., it has the form 00000000137...56789 with an unknown " \
"number of digits in the middle). Find the sum of all its digits."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
| gpl-3.0 | -6,630,862,513,542,238,000 | 46.342105 | 99 | 0.550862 | false | 4.024609 | false | false | false |
ryfeus/lambda-packs | Tensorflow_OpenCV_Nightly/source/tensorflow/contrib/seq2seq/ops/gen_beam_search_ops.py | 3 | 2306 | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
from google.protobuf import text_format as _text_format
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
def gather_tree(step_ids, parent_ids, sequence_length, name=None):
r"""Calculates the full beams from the per-step ids and parent beam ids.
This op implements the following mathematical equations:
```python
TODO(ebrevdo): fill in
```
Args:
step_ids: A `Tensor`. Must be one of the following types: `int32`.
`[max_time, batch_size, beam_width]`.
parent_ids: A `Tensor`. Must have the same type as `step_ids`.
`[max_time, batch_size, beam_width]`.
sequence_length: A `Tensor`. Must have the same type as `step_ids`.
`[batch_size, beam_width]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `step_ids`.
`[max_time, batch_size, beam_width]`.
"""
result = _op_def_lib.apply_op("GatherTree", step_ids=step_ids,
parent_ids=parent_ids,
sequence_length=sequence_length, name=name)
return result
_ops.RegisterShape("GatherTree")(None)
def _InitOpDefLibrary():
op_list = _op_def_pb2.OpList()
_text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "GatherTree"
input_arg {
name: "step_ids"
type_attr: "T"
}
input_arg {
name: "parent_ids"
type_attr: "T"
}
input_arg {
name: "sequence_length"
type_attr: "T"
}
output_arg {
name: "beams"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_INT32
}
}
}
}
"""
_op_def_lib = _InitOpDefLibrary()
| mit | 8,465,168,776,607,803,000 | 25.204545 | 75 | 0.655247 | false | 3.22067 | false | false | false |
iivvoo/resturo | resturo/serializers.py | 1 | 2545 | from django.contrib.auth.models import User
from rest_framework import serializers
from rest_framework_jwt.settings import api_settings
from .models import EmailVerification, modelresolver
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name', 'email',
'verified', 'is_staff', 'is_superuser', 'is_active',
'date_joined')
read_only_fields = ('is_staff', 'is_superuser',
'is_active', 'date_joined',)
verified = serializers.SerializerMethodField()
def get_verified(self, obj):
try:
return obj.verification.verified
except EmailVerification.DoesNotExist:
return True
class UserCreateSerializer(serializers.ModelSerializer):
jwt_token = serializers.CharField(read_only=True)
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name',
'email', 'password', 'jwt_token')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = self.Meta.model(
email=validated_data['email'],
username=validated_data['username'],
first_name=validated_data['first_name'],
last_name=validated_data['last_name']
)
user.set_password(validated_data['password'])
user.save()
# XXX should be jwt / token agnostic!
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
user.jwt_token = token
return user
class OrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = modelresolver('Organization')
fields = ("id", "name")
class PasswordResetSerializer(serializers.Serializer):
class Meta:
model = modelresolver('Organization')
token = serializers.CharField()
password = serializers.CharField()
class InviteSerializer(serializers.Serializer):
handle = serializers.CharField()
strict = serializers.BooleanField()
role = serializers.IntegerField()
class JoinSerializer(serializers.Serializer):
JOIN_ACCEPT = 1
JOIN_REJECT = 2
token = serializers.CharField()
action = serializers.ChoiceField(choices=(JOIN_ACCEPT, JOIN_REJECT),
default=JOIN_ACCEPT)
| isc | -2,698,504,682,134,451,000 | 28.941176 | 72 | 0.634578 | false | 4.365352 | false | false | false |
toshka/torrt | torrt/notifiers/mail.py | 1 | 2079 | import logging
import socket
from email.mime.text import MIMEText
from smtplib import SMTP, SMTPAuthenticationError
from torrt.base_notifier import BaseNotifier
from torrt.utils import NotifierClassesRegistry
LOGGER = logging.getLogger(__name__)
class EmailNotifier(BaseNotifier):
alias = 'email'
def __init__(self, email, host='localhost', port=25, user=None, password=None, use_tls=False, sender=None):
self.email = email
self.sender = sender
self.host = host
self.port = int(port)
self.user = user
self.password = password
self.use_tls = str(use_tls) == 'True'
self.connection = self.get_connection()
def get_connection(self):
try:
connection = SMTP(self.host, self.port)
connection.ehlo()
except socket.error as e:
LOGGER.error('Could not connect to SMTP server: %s' % e)
return
if self.use_tls:
try:
connection.starttls()
connection.ehlo()
except Exception as e:
LOGGER.error(e)
return
if self.user and self.password:
try:
connection.login(self.user, self.password)
except SMTPAuthenticationError as e:
LOGGER.error(e)
return
return connection
def send_message(self, msg):
self.connection.sendmail(self.sender, [self.email], msg)
def test_configuration(self):
return bool(self.connection)
def make_message(self, torrent_data):
text = '''The following torrents were updated:\n%s\n\nBest regards,\ntorrt.''' \
% '\n'.join(map(lambda t: t['name'], torrent_data.values()))
msg = MIMEText(text)
msg['Subject'] = 'New torrents were added to download queue.'
msg['From'] = self.sender
msg['To'] = self.email
LOGGER.info('Notification message was sent to user %s' % self.email)
return msg.as_string()
NotifierClassesRegistry.add(EmailNotifier)
| bsd-3-clause | -2,388,599,502,392,679,400 | 29.573529 | 111 | 0.599327 | false | 4.158 | false | false | false |
nagyv/account-financial-tools | account_netting/wizard/account_move_make_netting.py | 14 | 4813 | # -*- coding: utf-8 -*-
# (c) 2015 Pedro M. Baeza
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api, exceptions, _
class AccountMoveMakeNetting(models.TransientModel):
_name = "account.move.make.netting"
journal = fields.Many2one(
comodel_name="account.journal", required=True,
domain="[('type', '=', 'general')]")
move_lines = fields.Many2many(comodel_name="account.move.line")
balance = fields.Float(readonly=True)
balance_type = fields.Selection(
selection=[('pay', 'To pay'), ('receive', 'To receive')],
readonly=True)
@api.model
def default_get(self, fields):
if len(self.env.context.get('active_ids', [])) < 2:
raise exceptions.ValidationError(
_("You should compensate at least 2 journal entries."))
move_lines = self.env['account.move.line'].browse(
self.env.context['active_ids'])
if (any(x not in ('payable', 'receivable') for
x in move_lines.mapped('account_id.type'))):
raise exceptions.ValidationError(
_("All entries must have a receivable or payable account"))
if any(move_lines.mapped('reconcile_id')):
raise exceptions.ValidationError(
_("All entries mustn't been reconciled"))
partner_id = None
for move in move_lines:
if (not move.partner_id or (
move.partner_id != partner_id and partner_id is not None)):
raise exceptions.ValidationError(
_("All entries should have a partner and the partner must "
"be the same for all."))
partner_id = move.partner_id
res = super(AccountMoveMakeNetting, self).default_get(fields)
res['move_lines'] = [(6, 0, move_lines.ids)]
balance = (sum(move_lines.mapped('debit')) -
sum(move_lines.mapped('credit')))
res['balance'] = abs(balance)
res['balance_type'] = 'pay' if balance < 0 else 'receive'
return res
@api.multi
def button_compensate(self):
self.ensure_one()
# Create account move
move = self.env['account.move'].create(
{
'ref': _('AR/AP netting'),
'journal_id': self.journal.id,
})
# Group amounts by account
account_groups = self.move_lines.read_group(
[('id', 'in', self.move_lines.ids)],
['account_id', 'debit', 'credit'], ['account_id'])
debtors = []
creditors = []
total_debtors = 0
total_creditors = 0
for account_group in account_groups:
balance = account_group['debit'] - account_group['credit']
group_vals = {
'account_id': account_group['account_id'][0],
'balance': abs(balance),
}
if balance > 0:
debtors.append(group_vals)
total_debtors += balance
else:
creditors.append(group_vals)
total_creditors += abs(balance)
# Create move lines
move_line_model = self.env['account.move.line']
netting_amount = min(total_creditors, total_debtors)
field_map = {1: 'debit', 0: 'credit'}
for i, group in enumerate([debtors, creditors]):
available_amount = netting_amount
for account_group in group:
if account_group['balance'] > available_amount:
amount = available_amount
else:
amount = account_group['balance']
move_line_vals = {
field_map[i]: amount,
'move_id': move.id,
'partner_id': self.move_lines[0].partner_id.id,
'date': move.date,
'period_id': move.period_id.id,
'journal_id': move.journal_id.id,
'name': move.ref,
'account_id': account_group['account_id'],
}
move_line_model.create(move_line_vals)
available_amount -= account_group['balance']
if available_amount <= 0:
break
# Make reconciliation
for move_line in move.line_id:
to_reconcile = move_line + self.move_lines.filtered(
lambda x: x.account_id == move_line.account_id)
to_reconcile.reconcile_partial()
# Open created move
action = self.env.ref('account.action_move_journal_line').read()[0]
action['view_mode'] = 'form'
del action['views']
del action['view_id']
action['res_id'] = move.id
return action
| agpl-3.0 | -521,005,906,590,457,400 | 40.852174 | 79 | 0.534178 | false | 4.09617 | false | false | false |
boraklavun/python- | Kullanıcı Veritabanı.py | 1 | 1722 |
babaListe=list()
Ad_Soyad = input('Ad_Soyad:')
babaListe.insert(1,Ad_Soyad)
Yas = input('Yas:')
babaListe.insert(2,Yas)
Boy = input('Boy:')
babaListe.insert(3,Boy)
print(babaListe)
print('======================')
anneListe=list()
Ad_Soyad = input('Ad_Soyad:')
anneListe.insert(1,Ad_Soyad)
Yas = input('Yas:')
anneListe.insert(2,Yas)
Boy = input('Boy:')
anneListe.insert(3,Boy)
print(anneListe)
print('======================')
dedeListe=list()
Ad_Soyad = input('Ad_Soyad:')
dedeListe.insert(1,Ad_Soyad)
Yas = input('Yas:')
dedeListe.insert(2,Yas)
Boy = input('Boy:')
dedeListe.insert(3,Boy)
print(dedeListe)
print('======================')
nineListe=list()
Ad_Soyad = input('Ad_Soyad:')
nineListe.insert(1,Ad_Soyad)
Yas = input('Yas:')
nineListe.insert(2,Yas)
Boy = input('Boy:')
nineListe.insert(3,Boy)
print(nineListe)
print('=======================')
babaListe=list()
Ad_Soyad = input('Ad_Soyad:')
babaListe.insert(1,Ad_Soyad)
Yas = input('Yas:')
babaListe.insert(2,Yas)
Boy = input('Boy:')
babaListe.insert(3,Boy)
print(babaListe)
print('======================')
anneListe=list()
Ad_Soyad = input('Ad_Soyad:')
anneListe.insert(1,Ad_Soyad)
Yas = input('Yas:')
anneListe.insert(2,Yas)
Boy = input('Boy:')
anneListe.insert(3,Boy)
print(anneListe)
print('======================')
dedeListe=list()
Ad_Soyad = input('Ad_Soyad:')
dedeListe.insert(1,Ad_Soyad)
Yas = input('Yas:')
dedeListe.insert(2,Yas)
Boy = input('Boy:')
dedeListe.insert(3,Boy)
print(dedeListe)
print('======================')
nineListe=list()
Ad_Soyad = input('Ad_Soyad:')
nineListe.insert(1,Ad_Soyad)
Yas = input('Yas:')
nineListe.insert(2,Yas)
Boy = input('Boy:')
nineListe.insert(3,Boy)
print(nineListe)
print('=======================')
| gpl-3.0 | 6,551,307,017,379,564,000 | 18.568182 | 32 | 0.622532 | false | 2.202046 | false | true | false |
levilucio/SyVOLT | UMLRT2Kiltera_MM/Properties/unit_contracts/HUnitR03b_ConnectedLHS.py | 1 | 2112 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitR03b_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitR03b_ConnectedLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitR03b_ConnectedLHS, self).__init__(name='HUnitR03b_ConnectedLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitR03b_ConnectedLHS')
self["equations"] = []
# Set the node attributes
# match class State(3.1.m.0State) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__State"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'3.1.m.0State')
# match class State(3.1.m.1State) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__State"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'3.1.m.1State')
# match association State--states-->Statenode
self.add_node()
self.vs[2]["MT_pre__attr1"] = """return attr_value == "states" """
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__directLink_S"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'3.1.m.0Stateassoc23.1.m.1State')
# Add the edges
self.add_edges([
(0,2), # match class State(3.1.m.0State) -> association states
(2,1), # association State -> match class State(3.1.m.1State)
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
def eval_attr12(self, attr_value, this):
return True
# define evaluation methods for each match association.
def eval_attr13(self, attr_value, this):
return attr_value == "states"
def constraint(self, PreNode, graph):
return True
| mit | 7,741,502,801,073,365,000 | 29.608696 | 100 | 0.645833 | false | 2.721649 | false | false | false |
desbma/glances | glances/exports/glances_export.py | 1 | 5467 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
I am your father...
...for all Glances exports IF.
"""
# Import system libs
# None...
# Import Glances lib
from glances.core.glances_logging import logger
class GlancesExport(object):
"""Main class for Glances export IF."""
def __init__(self, config=None, args=None):
"""Init the export class."""
# Export name (= module name without glances_)
self.export_name = self.__class__.__module__[len('glances_'):]
logger.debug("Init export interface %s" % self.export_name)
# Init the config & args
self.config = config
self.args = args
# By default export is disable
# Had to be set to True in the __init__ class of child
self.export_enable = False
def exit(self):
"""Close the export module."""
logger.debug("Finalise export interface %s" % self.export_name)
def plugins_to_export(self):
"""Return the list of plugins to export."""
return ['cpu',
'percpu',
'load',
'mem',
'memswap',
'network',
'diskio',
'fs',
'processcount',
'ip',
'system',
'uptime',
'sensors',
'docker']
def get_item_key(self, item):
"""Return the value of the item 'key'."""
try:
ret = item[item['key']]
except KeyError:
logger.error("No 'key' available in {0}".format(item))
if isinstance(ret, list):
return ret[0]
else:
return ret
def parse_tags(self):
""" Parses some tags into a dict"""
if self.tags:
try:
self.tags = dict([x.split(':') for x in self.tags.split(',')])
except ValueError:
# one of the keyvalue pairs was missing
logger.info('invalid tags passed: %s', self.tags)
self.tags = {}
else:
self.tags = {}
def update(self, stats):
"""Update stats to a server.
The method builds two lists: names and values
and calls the export method to export the stats.
Be aware that CSV export overwrite this class and use a specific one.
"""
if not self.export_enable:
return False
# Get all the stats & limits
all_stats = stats.getAllExports()
all_limits = stats.getAllLimits()
# Get the plugins list
plugins = stats.getAllPlugins()
# Loop over available plugins
for i, plugin in enumerate(plugins):
if plugin in self.plugins_to_export():
if isinstance(all_stats[i], dict):
all_stats[i].update(all_limits[i])
elif isinstance(all_stats[i], list):
all_stats[i] += all_limits[i]
else:
continue
export_names, export_values = self.__build_export(all_stats[i])
self.export(plugin, export_names, export_values)
return True
def __build_export(self, stats):
"""Build the export lists."""
export_names = []
export_values = []
if isinstance(stats, dict):
# Stats is a dict
# Is there a key ?
if 'key' in list(stats.keys()):
pre_key = '{0}.'.format(stats[stats['key']])
else:
pre_key = ''
# Walk through the dict
try:
iteritems = stats.iteritems()
except AttributeError:
iteritems = stats.items()
for key, value in iteritems:
if isinstance(value, list):
try:
value = value[0]
except IndexError:
value = ''
if isinstance(value, dict):
item_names, item_values = self.__build_export(value)
item_names = [pre_key + key.lower() + str(i) for i in item_names]
export_names += item_names
export_values += item_values
else:
export_names.append(pre_key + key.lower())
export_values.append(value)
elif isinstance(stats, list):
# Stats is a list (of dict)
# Recursive loop through the list
for item in stats:
item_names, item_values = self.__build_export(item)
export_names += item_names
export_values += item_values
return export_names, export_values
| lgpl-3.0 | 1,691,273,024,867,034,400 | 32.335366 | 85 | 0.532468 | false | 4.441105 | false | false | false |
rpwagner/tiled-display | flWii/moteCursor.py | 1 | 3801 | import time, sys, os
from mote import detectAllPossible
from moteCache import MoteCache
from mote import Mote
import traceback
from connectToMote import connectToMote
from moteX11 import MoteX11, Rect
class MoteMouseProcessor:
def __init__(self, connectedMote):
self.mote = connectedMote
def processMouse(self):
# print "processAndUpdateMouse", self.mote, self.mote.connected, self.mote.irMode
if self.mote == None or not self.mote.irMode != None:
return None, None
# Use standard bar with 2 dots
# Must receive at least 2 dots to be valid (and change mouse pos)
#
if self.mote.isIrModeFull():
print "Unimplemented"
elif self.mote.isIrModeExt():
print "Unimplemented"
elif self.mote.isIrModeBasic():
# the wiimote can report up to 4 points
# we'll to convert the two brightest into "base" position so
# we can generate a mouse x,y from them
pointList = self.mote.extractNormalizedPoints()
if len(pointList) > 0:
# print "points:", pointList
#pointSet = pointList[-1]
for pointSet in pointList:
# self.updateMinMaxEdge(pointSet)
if len(pointSet) > 1: # just use the frst two points (we're assuming they're the brightest)
# We're going to require at least two valid led coordinates (i.e. not 0)
if not (pointSet[0][0] == 0.0 or pointSet[1][0] == 0.0 or pointSet[0][1] == 0.0 or pointSet[1][1] == 0.0
or pointSet[0][0] == 1.0 or pointSet[1][0] == 1.0 or pointSet[0][1] == 1.0 or pointSet[1][1] == 1.0):
midpoint = ( (pointSet[0][0] + pointSet[1][0]) / 2. ,
(pointSet[0][1] + pointSet[1][1]) / 2. )
scale = 1.4
scaledMidpoint = ( ((midpoint[0]-.5) * scale) + 0.5,
((midpoint[1]-.5) * scale) + 0.5)
# print "Setting mouse pos:", scaledMidpoint
#self.moteX11.setMousePosNormalized(1.0 - scaledMidpoint[0], scaledMidpoint[1])
return (1.0-scaledMidpoint[0], scaledMidpoint[1])
"""
pt = self.mote.extractLastNormalizedPoint()
if pt != None:
scale = 1.4
scaledPoint = ( ((pt[0]-.5) * scale) + 0.5,
((pt[1]-.5) * scale) + 0.5)
self.moteX11.setMousePosNormalized(1.0 - scaledPoint[0], scaledPoint[1])
"""
else: # basic
print "Unhandled ir mode:", self.mote.irMode
print "DEBUG:", self.mote, self.mote.connected
raise Exception("Unhandled ir mode")
return None, None
class MoteMouse(MoteMouseProcessor):
def __init__(self, connectedMote, moteX11):
MoteMouseProcessor.__init__(self, connectedMote)
# self.mote = connectedMote # done in parent class
self.moteX11 = moteX11
def processAndUpdateMouse(self):
x, y = MoteMouseProcessor.processMouse(self)
if x != None:
self.moteX11.setMousePosNormalized(x, y)
if __name__ == "__main__":
mote = connectToMote()
x = MoteX11()
try:
x.connectToX()
moteMouse = MoteMouse(mote,x)
while 1:
moteMouse.processAndUpdateMouse()
time.sleep(0.0001)
except:
traceback.print_exc()
finally:
mote.disconnect()
if mote.readThread != None:
print "Exiting, joining thread"
mote.readThread.join()
| apache-2.0 | -2,776,247,221,308,069,400 | 37.01 | 130 | 0.534333 | false | 3.816265 | false | false | false |
oldm/OldMan | oldman/iri.py | 1 | 3706 | from uuid import uuid1
from .exception import OMDataStoreError, OMRequiredHashlessIRIError
class IriGenerator(object):
"""An :class:`~oldman.iri.IriGenerator` object generates
the IRIs of some new :class:`~oldman.resource.Resource` objects.
"""
def __init__(self):
pass
def generate(self, **kwargs):
"""Generates an IRI.
:return: Unique IRI (unicode string).
"""
raise NotImplementedError()
class PrefixedUUIDIriGenerator(IriGenerator):
"""Uses a prefix, a fragment and a unique UUID1 number to generate IRIs.
Recommended generator because UUID1 is robust and fast (no DB access).
:param prefix: IRI prefix.
:param fragment: IRI fragment to append to the hash-less IRI. Defaults to `None`.
"""
def __init__(self, prefix, fragment=None):
self._prefix = prefix
self._fragment = fragment
def generate(self, **kwargs):
"""See :func:`oldman.iri.IriGenerator.generate`."""
partial_iri = _skolemize(prefix=self._prefix)
if self._fragment is not None:
return u"%s#%s" % (partial_iri, self._fragment)
return partial_iri
class BlankNodeIriGenerator(PrefixedUUIDIriGenerator):
"""Generates skolem IRIs that denote blank nodes.
:param hostname: Defaults to `"localhost"`.
"""
def __init__(self, hostname=u"localhost"):
prefix = u"http://%s/.well-known/genid/" % hostname
PrefixedUUIDIriGenerator.__init__(self, prefix=prefix)
class IncrementalIriGenerator(IriGenerator):
"""Generates IRIs with short numbers.
Beautiful but **slow** in concurrent settings. The number generation implies a critical section
and a sequence of two SPARQL requests, which represents a significant bottleneck.
:param prefix: IRI prefix.
:param graph: :class:`rdflib.Graph` object where to store the counter.
:param class_iri: IRI of the RDFS class of which new :class:`~oldman.resource.Resource` objects are instance of.
Usually corresponds to the class IRI of the :class:`~oldman.model.Model` object that
owns this generator.
:param fragment: IRI fragment to append to the hash-less IRI. Defaults to `None`.
"""
def __init__(self, prefix, data_store, class_iri, fragment=None):
self._prefix = prefix
self._data_store = data_store
self._class_iri = class_iri
self._fragment = fragment
self._data_store.check_and_repair_counter(class_iri)
def generate(self, **kwargs):
"""See :func:`oldman.iri.IriGenerator.generate`."""
number = self._data_store.generate_instance_number(self._class_iri)
partial_iri = u"%s%d" % (self._prefix, number)
if self._fragment is not None:
return u"%s#%s" % (partial_iri, self._fragment)
return partial_iri
def reset_counter(self):
"""
For test purposes only
"""
self._data_store.reset_instance_counter(self._class_iri)
class UUIDFragmentIriGenerator(IriGenerator):
"""Generates an hashed IRI from a hash-less IRI.
Its fragment is a unique UUID1 number.
"""
def generate(self, hashless_iri, **kwargs):
"""See :func:`oldman.iri.IriGenerator.generate`."""
if hashless_iri is None:
raise OMRequiredHashlessIRIError(u"Hash-less IRI is required to generate an IRI")
if '#' in hashless_iri:
raise OMRequiredHashlessIRIError(u"%s is not a valid hash-less IRI" % hashless_iri)
return u"%s#%s" % (hashless_iri, uuid1().hex)
def _skolemize(prefix=u"http://localhost/.well-known/genid/"):
return u"%s%s" % (prefix, uuid1().hex)
| bsd-3-clause | 6,700,871,481,564,555,000 | 33.635514 | 116 | 0.650027 | false | 3.797131 | false | false | false |
abhilashnta/edx-platform | openedx/core/djangoapps/profile_images/views.py | 23 | 6206 | """
This module implements the upload and remove endpoints of the profile image api.
"""
from contextlib import closing
import datetime
import logging
from django.utils.translation import ugettext as _
from django.utils.timezone import utc
from rest_framework import permissions, status
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework.response import Response
from rest_framework.views import APIView
from openedx.core.djangoapps.user_api.errors import UserNotFound
from openedx.core.lib.api.authentication import (
OAuth2AuthenticationAllowInactiveUser,
SessionAuthenticationAllowInactiveUser,
)
from openedx.core.lib.api.permissions import IsUserInUrl, IsUserInUrlOrStaff
from openedx.core.djangoapps.user_api.accounts.image_helpers import get_profile_image_names, set_has_profile_image
from .images import validate_uploaded_image, create_profile_images, remove_profile_images, ImageValidationError
log = logging.getLogger(__name__)
LOG_MESSAGE_CREATE = 'Generated and uploaded images %(image_names)s for user %(user_id)s'
LOG_MESSAGE_DELETE = 'Deleted images %(image_names)s for user %(user_id)s'
def _make_upload_dt():
"""
Generate a server-side timestamp for the upload. This is in a separate
function so its behavior can be overridden in tests.
"""
return datetime.datetime.utcnow().replace(tzinfo=utc)
class ProfileImageUploadView(APIView):
"""
**Use Cases**
Upload an image to be used for the user's profile.
The requesting user must be signed in. The signed in user can only
upload his or her own profile image.
**Example Requests**
POST /api/profile_images/v1/{username}/upload
**Response for POST**
If the requesting user tries to upload the image for a different user:
* If the requesting user has staff access, the request returns a 403
error.
* If the requesting user does not have staff access, the request returns
a 404 error.
If no user matches the "username" parameter, the request returns a 404
error.
If the upload could not be performed, the request returns a 400 error is
with details.
If the upload is successful, the request returns a 204 status with no
additional content.
"""
parser_classes = (MultiPartParser, FormParser,)
authentication_classes = (OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser)
permission_classes = (permissions.IsAuthenticated, IsUserInUrl)
def post(self, request, username):
"""
POST /api/profile_images/v1/{username}/upload
"""
# validate request:
# verify that the user's
# ensure any file was sent
if 'file' not in request.FILES:
return Response(
{
"developer_message": u"No file provided for profile image",
"user_message": _(u"No file provided for profile image"),
},
status=status.HTTP_400_BAD_REQUEST
)
# process the upload.
uploaded_file = request.FILES['file']
# no matter what happens, delete the temporary file when we're done
with closing(uploaded_file):
# image file validation.
try:
validate_uploaded_image(uploaded_file)
except ImageValidationError as error:
return Response(
{"developer_message": error.message, "user_message": error.user_message},
status=status.HTTP_400_BAD_REQUEST,
)
# generate profile pic and thumbnails and store them
profile_image_names = get_profile_image_names(username)
create_profile_images(uploaded_file, profile_image_names)
# update the user account to reflect that a profile image is available.
set_has_profile_image(username, True, _make_upload_dt())
log.info(
LOG_MESSAGE_CREATE,
{'image_names': profile_image_names.values(), 'user_id': request.user.id}
)
# send client response.
return Response(status=status.HTTP_204_NO_CONTENT)
class ProfileImageRemoveView(APIView):
"""
**Use Cases**
Remove all of the profile images associated with the user's account.
The requesting user must be signed in.
Users with staff access can remove profile images for other user
accounts.
Users without staff access can only remove their own profile images.
**Example Requests**
POST /api/profile_images/v1/{username}/remove
**Response for POST**
Requesting users who do not have staff access and try to remove another
user's profile image receive a 404 error.
If no user matches the "username" parameter, the request returns a 404
error.
If the request could not remove the image, the request returns a 400
error with details.
If the request successfully removes the image, the request returns a 204
status with no additional content.
"""
authentication_classes = (OAuth2AuthenticationAllowInactiveUser, SessionAuthenticationAllowInactiveUser)
permission_classes = (permissions.IsAuthenticated, IsUserInUrlOrStaff)
def post(self, request, username): # pylint: disable=unused-argument
"""
POST /api/profile_images/v1/{username}/remove
"""
try:
# update the user account to reflect that the images were removed.
set_has_profile_image(username, False)
# remove physical files from storage.
profile_image_names = get_profile_image_names(username)
remove_profile_images(profile_image_names)
log.info(
LOG_MESSAGE_DELETE,
{'image_names': profile_image_names.values(), 'user_id': request.user.id}
)
except UserNotFound:
return Response(status=status.HTTP_404_NOT_FOUND)
# send client response.
return Response(status=status.HTTP_204_NO_CONTENT)
| agpl-3.0 | 1,304,677,595,282,145,000 | 33.670391 | 114 | 0.662101 | false | 4.600445 | false | false | false |
samrose3/eventhunt | src/eventhunt/settings/base.py | 1 | 2084 | """Django settings for eventhunt project."""
import datetime
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) # remove /sswmain/settings to get base folder
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ajsdgas7&*kosdsa21[]jaksdhlka-;kmcv8l$#diepsm8&ah^'
# Eventbrite OAUTH_TOKEN
EVENTBRITE_OAUTH_TOKEN = os.environ.get('EVENTBRITE_OAUTH_TOKEN')
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['']
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'rest_framework',
'django_extensions',
'api',
'base'
)
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware'
)
ROOT_URLCONF = 'eventhunt.urls'
WSGI_APPLICATION = 'eventhunt.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
ACCOUNT_ACTIVATION_DAYS = 7 # days
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static_dist'),
)
# store static files locally and serve with whitenoise
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# ############# REST FRAMEWORK ###################
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 20,
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser',
),
}
| mit | -7,201,158,964,377,637,000 | 24.108434 | 117 | 0.706334 | false | 3.383117 | false | false | false |
rmg/Skoarcery | Skoarcery/factoary/Code_Parser_Py.py | 1 | 4401 | import unittest
from Skoarcery import langoids, terminals, nonterminals, dragonsets, parsetable, emissions
from Skoarcery.langoids import Terminal, Nonterminal
class Code_Parser_Py(unittest.TestCase):
def setUp(self):
terminals.init()
nonterminals.init()
langoids.init()
dragonsets.init()
parsetable.init()
emissions.init()
def test_pyrdpp(self):
from Skoarcery.dragonsets import FIRST, FOLLOW
from Skoarcery.terminals import Empty
fd = open("../pymp/rdpp.py", "w")
PY = emissions.PY
PY.fd = fd
# Header
# Imports
# class SkoarParseException
# class SkoarParser:
# __init__
# fail
self.code_start()
PY.tab += 1
N = nonterminals.nonterminals.values()
# write each nonterminal as a function
for A in N:
R = A.production_rules
#PY.cmt(str(A))
PY.stmt("def " + A.name + "(self, parent):")
PY.tab += 1
PY.stmt("self.tab += 1")
if A.intermediate:
PY.stmt("noad = parent")
else:
PY.stmt("noad = SkoarNoad('" + A.name + "', None, parent)")
PY.nl()
#PY.code_line("print('" + A.name + "')")
for P in R:
if P.derives_empty:
continue
# A -> alpha
alpha = P.production
desires = FIRST(alpha)
if Empty in desires:
desires.discard(Empty)
desires.update(FOLLOW(A))
PY.cmt(str(P))
i = 0
n = len(desires)
PY.stmt("desires = [", end="")
for toke in desires:
PY.raw(toke.toker_name)
i += 1
if i != n:
if i % 5 == 0:
PY.raw(",\n")
PY.stmt(" ", end="")
else:
PY.raw(", ")
else:
PY.raw("]\n")
PY.if_("self.toker.sees(desires)")
#PY.print(str(P))
for x in alpha:
if isinstance(x, Terminal):
PY.stmt("noad.add_toke('" + x.toker_name + "', self.toker.burn(" + x.toker_name + "))")
#PY.print("burning: " + x.name)
else:
if x.intermediate:
PY.stmt("self." + x.name + "(noad)")
else:
PY.stmt("noad.add_noad(self." + x.name + "(noad))")
else:
PY.return_("noad")
PY.tab -= 1
PY.nl()
if A.derives_empty:
PY.cmt("<e>")
#PY.print("burning empty")
PY.return_("noad")
else:
PY.cmt("Error State")
PY.stmt("self.fail()")
PY.tab -= 1
PY.nl()
PY.tab -= 1
fd.close()
def code_start(self):
from Skoarcery.terminals import Empty
PY = emissions.PY
PY.file_header("rdpp.py", "PyRDPP - Create Recursive Descent Predictive Parser")
s = "from Skoarcery.pymp.apparatus import SkoarNoad\n"\
"from Skoarcery.pymp.lex import "
T = terminals.tokens.values()
n = len(T)
i = 0
for t in T:
if t == Empty:
n -= 1
continue
s += t.toker_name
i += 1
if i < n:
if i % 5 == 0:
s += ", \\\n "
else:
s += ", "
PY.raw(s + """
class SkoarParseException(Exception):
pass
class SkoarParser:
def __init__(self, runtime):
self.runtime = runtime
self.toker = runtime.toker
self.tab = 0
def fail(self):
self.toker.dump()
raise SkoarParseException
@property
def tabby(self):
if self.tab == 0:
return ""
return ("{:>" + str(self.tab * 2) + "}").format(" ")
def print(self, line, end):
print(self.tabby + line, end=end)
""")
| artistic-2.0 | -8,356,453,499,325,439,000 | 24.293103 | 111 | 0.416951 | false | 3.954178 | false | false | false |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/theano/gpuarray/tests/rnn_support.py | 1 | 8280 | from __future__ import absolute_import, print_function, division
import theano
import theano.tensor as T
import numpy as np
class Model(object):
def __init__(self, name=""):
self.name = name
self.layers = []
self.params = []
self.other_updates = {}
def add_layer(self, layer):
self.layers.append(layer)
for p in layer.params:
self.params.append(p)
if hasattr(layer, 'other_updates'):
for y in layer.other_updates:
self.other_updates[y[0]] = y[1]
def get_params(self):
return self.params
def uniform(stdev, size):
"""uniform distribution with the given stdev and size"""
return np.random.uniform(
low=-stdev * np.sqrt(3),
high=stdev * np.sqrt(3),
size=size
).astype(theano.config.floatX)
def linear_transform_weights(input_dim, output_dim,
param_list=None, name=""):
"theano shared variable given input and output dimension"
weight_inialization = uniform(np.sqrt(2.0 / input_dim),
(input_dim, output_dim))
W = theano.shared(weight_inialization, name=name)
assert(param_list is not None)
param_list.append(W)
return W
def bias_weights(length, param_list=None, name=""):
"theano shared variable for bias unit, given length"
bias_initialization = np.zeros(length).astype(theano.config.floatX)
bias = theano.shared(
bias_initialization,
name=name
)
if param_list is not None:
param_list.append(bias)
return bias
class Layer(object):
'''Generic Layer Template which all layers should inherit'''
def __init__(self, name=""):
self.name = name
self.params = []
def get_params(self):
return self.params
class GRU(Layer):
def __init__(self, input_dim, output_dim, input_layer, s0=None, name=""):
'''Layers information'''
self.name = name
self.input_dim = input_dim
self.hidden_dim = output_dim
self.output_dim = output_dim
self.input_layer = input_layer
self.X = input_layer.output()
self.s0 = s0
self.params = []
'''Layers weights'''
'''self.params is passed so that any paramters could be appended to it'''
self.W_r = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_r")
self.b_wr = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wr")
self.W_i = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_i")
self.b_wi = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wi")
self.W_h = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_h")
self.b_wh = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wh")
self.R_r = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_r")
self.b_rr = bias_weights((output_dim,), param_list=self.params, name=name + ".b_rr")
self.R_i = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_i")
self.b_ru = bias_weights((output_dim,), param_list=self.params, name=name + ".b_ru")
self.R_h = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_h")
self.b_rh = bias_weights((output_dim,), param_list=self.params, name=name + ".b_rh")
'''step through processed input to create output'''
def step(inp, s_prev):
i_t = T.nnet.sigmoid(
T.dot(inp, self.W_i) + T.dot(s_prev, self.R_i) + self.b_wi + self.b_ru)
r_t = T.nnet.sigmoid(
T.dot(inp, self.W_r) + T.dot(s_prev, self.R_r) + self.b_wr + self.b_rr)
h_hat_t = T.tanh(
T.dot(inp, self.W_h) + (r_t * (T.dot(s_prev, self.R_h) + self.b_rh)) + self.b_wh)
s_curr = ((1.0 - i_t) * h_hat_t) + (i_t * s_prev)
return s_curr
outputs_info = self.s0
states, updates = theano.scan(
fn=step,
sequences=[self.X],
outputs_info=outputs_info
)
self.Y = states
def output(self):
return self.Y
class LSTM(Layer):
def __init__(self, input_dim, output_dim, input_layer, s0=None, c0=None,
name=""):
'''Layers information'''
self.name = name
self.input_dim = input_dim
self.hidden_dim = output_dim
self.output_dim = output_dim
self.input_layer = input_layer
self.X = input_layer.output()
self.s0 = s0
self.c0 = c0
self.params = []
'''Layers weights'''
'''self.params is passed so that any paramters could be appended to it'''
self.W_i = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_i")
self.b_wi = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wi")
self.W_f = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_f")
self.b_wf = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wf")
self.W_c = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_c")
self.b_wc = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wc")
self.W_o = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W_o")
self.b_wo = bias_weights((output_dim,), param_list=self.params, name=name + ".b_wo")
self.R_i = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_i")
self.b_ri = bias_weights((output_dim,), param_list=self.params, name=name + ".b_ri")
self.R_f = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_f")
self.b_rf = bias_weights((output_dim,), param_list=self.params, name=name + ".b_rf")
self.R_c = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_c")
self.b_rc = bias_weights((output_dim,), param_list=self.params, name=name + ".b_rc")
self.R_o = linear_transform_weights(output_dim, output_dim, param_list=self.params, name=name + ".R_o")
self.b_ro = bias_weights((output_dim,), param_list=self.params, name=name + ".b_ro")
'''step through processed input to create output'''
def step(x_t, h_tm1, c_tm1):
i_t = T.nnet.sigmoid(
T.dot(x_t, self.W_i) + T.dot(h_tm1, self.R_i) + self.b_wi + self.b_ri)
f_t = T.nnet.sigmoid(
T.dot(x_t, self.W_f) + T.dot(h_tm1, self.R_f) + self.b_wf + self.b_rf)
o_t = T.nnet.sigmoid(
T.dot(x_t, self.W_o) + T.dot(h_tm1, self.R_o) + self.b_ro + self.b_wo)
c_hat_t = T.tanh(
T.dot(x_t, self.W_c) + T.dot(h_tm1, self.R_c) + self.b_wc + self.b_rc)
c_t = f_t * c_tm1 + i_t * c_hat_t
h_t = o_t * T.tanh(c_t)
return h_t, c_t
outputs_info = [self.s0, self.c0]
states, updates = theano.scan(
fn=step,
sequences=[self.X],
outputs_info=outputs_info
)
self.Y = states[0]
self.C = states[1]
def output(self):
return self.Y
class FC(Layer):
def __init__(self, input_dim, output_dim, input_layer, name=""):
self.input_layer = input_layer
self.name = name
self.params = []
self.input_dim = input_dim
self.output_dim = output_dim
self.X = self.input_layer.output()
self.W = linear_transform_weights(input_dim, output_dim, param_list=self.params, name=name + ".W")
self.b = bias_weights((output_dim,), param_list=self.params, name=name + ".b")
def output(self):
return T.dot(self.X, self.W) + self.b
class WrapperLayer(Layer):
def __init__(self, X, name=""):
self.params = []
self.name = name
self.X = X
def output(self):
return self.X
| agpl-3.0 | 5,858,367,406,402,465,000 | 34.844156 | 111 | 0.575242 | false | 3.179724 | false | false | false |
google-research/google-research | ebp/ebp/common/generator.py | 1 | 17042 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import tensorflow.compat.v1 as tf
import sonnet as snt
import numpy as np
from ebp.common.tf_utils import MLP
from ebp.common.flow_family import iCondResFlow
from ebp.common.flow_family import HyperNet, NormFlow
class Generator(snt.AbstractModule):
def __init__(self,
pc_dim=(2048, 3),
fc_dims=(64, 128, 512, 1024),
act=tf.nn.relu,
entropy_reg=True,
batch_norm=False,
name='gen'):
super(Generator, self).__init__(name=name)
self.pc_dim = pc_dim
self.act = act
self.batch_norm = batch_norm
self.entropy_reg = entropy_reg
self.fc_body = []
self.fc_sigma_body = []
self.bn_body = []
self.bn_sigma_body = []
with self._enter_variable_scope():
for i, fc_dim in enumerate(fc_dims):
fc = snt.Linear(fc_dim, name='fc_%d' % i)
self.fc_body.append(fc)
self.bn_body.append(
snt.BatchNorm(offset=True, scale=True, name='bn_%d' % i))
self.fc_final = snt.Linear(np.prod(pc_dim), name='fc_final')
for i, fc_dim in enumerate(fc_dims):
fc = snt.Linear(fc_dim, name='fc_sigma_%d' % i)
self.fc_sigma_body.append(fc)
self.bn_sigma_body.append(
snt.BatchNorm(offset=True, scale=True, name='bn_sigma_%d' % i))
self.fc_sigma_final = snt.Linear(np.prod(pc_dim), name='fc_sigma_final')
def _build(self, z, is_training=True):
x = self.fc_body[0](z)
if self.batch_norm:
x = self.bn_body[0](x, is_training)
for i in range(1, len(self.fc_body)):
x = self.act(x)
x = self.fc_body[i](x)
if self.batch_norm:
x = self.bn_body[i](x, is_training)
x = self.act(x)
x = self.fc_final(x)
logprob = None
if self.entropy_reg:
sigma = self.fc_sigma_body[0](z)
for fc in self.fc_sigma_body[1:]:
sigma = self.act(sigma)
sigma = fc(sigma)
sigma = self.act(sigma)
sigma = self.fc_sigma_final(sigma)
sigma = tf.sigmoid(sigma)
#sigma = tf.abs(1e-3 * tf.sigmoid(sigma))
logprob = tf.reduce_sum(-tf.log(sigma + 1e-6), axis=1)
x = x + sigma * tf.random_normal(tf.shape(sigma))
x = tf.reshape(x, (-1,) + self.pc_dim)
#with tf.control_dependencies([tf.print('ent', tf.reduce_mean(logprob))]):
return x, tf.identity(logprob)
def generate_noise(self, num_samples, z_dim=128, mu=0, sigma=0.2):
return np.random.normal(mu, sigma, (num_samples, *z_dim))
class LVMBlock(snt.AbstractModule):
def __init__(self,
gauss_dim,
depth=3,
act_hidden=tf.nn.relu,
name='lvm_block'):
super(LVMBlock, self).__init__(name=name)
hidden_dims = [min(gauss_dim, 256)] * depth
with self._enter_variable_scope():
self.mlp = snt.nets.MLP(
output_sizes=hidden_dims, activation=act_hidden, activate_final=True)
self.w_mu = tf.get_variable('w_mu', shape=[hidden_dims[-1], gauss_dim])
self.b_mu = tf.get_variable('b_mu', shape=[1, gauss_dim])
self.w_logsig = tf.get_variable(
'w_logsig', shape=[hidden_dims[-1], gauss_dim])
self.b_logsig = tf.get_variable('b_logsig', shape=[1, gauss_dim])
def _build(self, inputs):
z = self.mlp(inputs)
mu = tf.matmul(z, self.w_mu) + self.b_mu
logsig = tf.matmul(z, self.w_logsig) + self.b_logsig
sigma = tf.sigmoid(logsig)
sigma = tf.exp(logsig)
eps = tf.random.normal(
shape=tf.shape(mu), mean=0, stddev=1, dtype=tf.float32)
x = mu + sigma * eps
ent = tf.reduce_sum(-tf.log(sigma + 1e-6), axis=-1)
return x, mu, logsig, ent
class RNNGenerator(snt.AbstractModule):
def __init__(self,
block_size,
rnn_input_dim=128,
state_dim=128,
pc_dim=(2048, 3),
cell_type='lstm',
act_hidden=tf.nn.relu,
gen_depth=3,
name='rnn_generator'):
"""Args:
state_dim: dimensionality of hidden states of the RNN cell
block_size: number of points to generate at once
pc_dim: a single point cloud's dimension
cell_type: one of [lstm, gru].
"""
assert (pc_dim[0] % block_size == 0)
super(RNNGenerator, self).__init__(name=name)
self.rnn_input_dim = rnn_input_dim
self.pc_dim = pc_dim
self.gauss_dim = block_size * pc_dim[-1]
self.block_size = block_size
self.num_blocks = pc_dim[0] // block_size
self.state_dim = state_dim
self.cell_type = cell_type
with self._enter_variable_scope():
self.input_proj = snt.nets.MLP(
output_sizes=[rnn_input_dim * 2, rnn_input_dim],
activation=act_hidden,
activate_final=True)
if cell_type == 'lstm':
self.rnn_cell = tf.nn.rnn_cell.BasicLSTMCell(state_dim)
elif cell_type == 'gru':
self.rnn_cell = tf.nn.rnn_cell.GRUCell(state_dim)
else:
raise ValueError('cell_type {} not implemented'.format(cell_type))
self.output_lvm = LVMBlock(
self.gauss_dim, act_hidden=act_hidden, depth=gen_depth)
def _build(self, z):
x, mu, logsig, ent = self.output_lvm(z)
state_input = self.input_proj(tf.concat([x, mu, logsig], axis=-1))
sample_output = tf.expand_dims(x, 0)
ent_output = tf.expand_dims(ent, 0)
if self.cell_type == 'lstm':
init_state = tf.nn.rnn_cell.LSTMStateTuple(z, z)
else:
init_state = z
def loop_body(prev_state, state_input, sample_output, ent_output):
state_output, next_state = self.rnn_cell(state_input, prev_state)
x, mu, logsig, ent = self.output_lvm(state_output)
sample_output = tf.concat([sample_output, tf.expand_dims(x, 0)], axis=0)
ent_output = tf.concat([ent_output, tf.expand_dims(ent, 0)], axis=0)
# prep for next iteration
state_input = self.input_proj(tf.concat([x, mu, logsig], axis=-1))
return next_state, state_input, sample_output, ent_output
def loop_cond(prev_state, state_input, sample_output, ent_output):
return tf.shape(ent_output)[0] < self.num_blocks
if self.cell_type == 'lstm':
shape_invariant = tf.nn.rnn_cell.LSTMStateTuple(
tf.TensorShape((None, self.state_dim)),
tf.TensorShape((None, self.state_dim)))
else:
shape_invariant = tf.TensorShape((None, self.state_dim))
_, _, sample_output, ent_output = tf.while_loop(
loop_cond,
loop_body, [init_state, state_input, sample_output, ent_output],
shape_invariants=[
shape_invariant,
tf.TensorShape((None, self.rnn_input_dim)),
tf.TensorShape((None, None, self.gauss_dim)),
tf.TensorShape((None, None))
])
sample_output = tf.reshape(
tf.transpose(sample_output, [1, 0, 2]), (-1,) + self.pc_dim)
ent_output = tf.reduce_sum(ent_output, axis=0)
return sample_output, ent_output
class GPRNN(snt.AbstractModule):
def __init__(self,
block_size,
act_hidden=tf.nn.relu,
pc_dim=(2048, 3),
init_z_dim=128,
name='rnn_generator'):
super(GPRNN, self).__init__(name=name)
self.pc_dim = pc_dim
gauss_dim = block_size * pc_dim[-1]
assert (pc_dim[0] % block_size == 0)
self.num_blocks = pc_dim[0] // block_size - 1
with self._enter_variable_scope():
self.first_block = LVMBlock(
init_z_dim, gauss_dim, act_hidden=self.act_hidden)
if self.num_blocks > 0:
self.lvm_block = LVMBlock(
gauss_dim * pc_dim[-1], gauss_dim, act_hidden=self.act_hidden)
def _build(self, z):
list_x = []
list_ent = []
x, mu, logsig, ent = self.first_block(z)
list_x.append(x)
list_ent.append(ent)
for _ in range(self.num_blocks):
x, mu, logsig, ent = self.lvm_block(tf.concat([x, mu, logsig], axis=-1))
list_x.append(x)
list_ent.append(ent)
x = tf.reshape(tf.concat(list_x, axis=-1), (-1,) + self.pc_dim)
ent = tf.reduce_sum(list_ent, axis=0)
return x, tf.identity(ent)
class DeterministicEncoder(snt.AbstractModule):
"""The Encoder."""
def __init__(self, output_sizes):
super(DeterministicEncoder, self).__init__(name='DeterministicEncoder')
"""CNP encoder.
Args:
output_sizes: An iterable containing the output sizes of the encoding MLP.
"""
self._output_sizes = output_sizes
def _build(self, context_x, context_y, num_context_points):
"""Encodes the inputs into one representation.
Args:
context_x: Tensor of size bs x observations x m_ch. For this 1D regression
task this corresponds to the x-values.
context_y: Tensor of size bs x observations x d_ch. For this 1D regression
task this corresponds to the y-values.
num_context_points: A tensor containing a single scalar that indicates the
number of context_points provided in this iteration.
Returns:
representation: The encoded representation averaged over all context
points.
"""
# Concatenate x and y along the filter axes
encoder_input = tf.concat([context_x, context_y], axis=-1)
# Get the shapes of the input and reshape to parallelise across observations
batch_size, _, filter_size = encoder_input.shape.as_list()
hidden = tf.reshape(encoder_input, (batch_size * num_context_points, -1))
hidden.set_shape((None, filter_size))
# Pass through MLP
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE):
for i, size in enumerate(self._output_sizes[:-1]):
hidden = tf.nn.relu(
tf.layers.dense(hidden, size, name='Encoder_layer_{}'.format(i)))
# Last layer without a ReLu
hidden = tf.layers.dense(
hidden, self._output_sizes[-1], name='Encoder_layer_{}'.format(i + 1))
# Bring back into original shape
hidden = tf.reshape(hidden, (batch_size, num_context_points, size))
# Aggregator: take the mean over all points
representation = tf.reduce_mean(hidden, axis=1)
return representation
class DeterministicDecoder(snt.AbstractModule):
"""The Decoder."""
def __init__(self, output_sizes):
"""CNP decoder.
Args:
output_sizes: An iterable containing the output sizes of the decoder MLP
as defined in `basic.Linear`.
"""
super(DeterministicDecoder, self).__init__(name='DeterministicDecoder')
self._output_sizes = output_sizes
def _build(self, representation, target_x, num_total_points):
"""Decodes the individual targets.
Args:
representation: The encoded representation of the context
target_x: The x locations for the target query
num_total_points: The number of target points.
Returns:
dist: A multivariate Gaussian over the target points.
mu: The mean of the multivariate Gaussian.
sigma: The standard deviation of the multivariate Gaussian.
"""
# Concatenate the representation and the target_x
representation = tf.tile(
tf.expand_dims(representation, axis=1), [1, num_total_points, 1])
input = tf.concat([representation, target_x], axis=-1)
# Get the shapes of the input and reshape to parallelise across observations
batch_size, _, filter_size = input.shape.as_list()
hidden = tf.reshape(input, (batch_size * num_total_points, -1))
hidden.set_shape((None, filter_size))
# Pass through MLP
with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
for i, size in enumerate(self._output_sizes[:-1]):
hidden = tf.nn.relu(
tf.layers.dense(hidden, size, name='Decoder_layer_{}'.format(i)))
# Last layer without a ReLu
hidden = tf.layers.dense(
hidden, self._output_sizes[-1], name='Decoder_layer_{}'.format(i + 1))
# Bring back into original shape
hidden = tf.reshape(hidden, (batch_size, num_total_points, -1))
# Get the mean an the variance
mu, log_sigma = tf.split(hidden, 2, axis=-1)
# Bound the variance
sigma = 0.1 + 0.9 * tf.nn.softplus(log_sigma)
# Get the distribution
dist = tf.contrib.distributions.MultivariateNormalDiag(
loc=mu, scale_diag=sigma)
return dist, mu, sigma
class MLPGen(snt.AbstractModule):
def __init__(self,
dim,
hidden_dim,
depth,
output_dim,
act_hidden=tf.nn.relu,
sp_iters=0,
mlp=None,
name='mlp_gauss'):
super(MLPGen, self).__init__(name=name)
self.dim = dim
with self._enter_variable_scope():
if mlp is None:
self.mlp = MLP(self.dim + 31, hidden_dim, depth, 1, act_hidden,
sp_iters)
else:
self.mlp = mlp
def _build(self, raw_x):
z = tf.random.normal(
shape=[tf.shape(raw_x)[0], tf.shape(raw_x)[1], 32],
mean=0,
stddev=1,
dtype=tf.float32)
x = tf.concat([raw_x, z], -1)
y = self.mlp(x)
y = tf.reshape(y, [-1, tf.shape(raw_x)[1], 1])
return tf.concat([raw_x, y], -1)
class iCondGen(snt.AbstractModule):
def __init__(self,
dim,
cond_dim,
num_layers,
act_hidden='tanh',
sp_iters=1,
name='icondres_flow'):
super(iCondGen, self).__init__(name=name)
self.dim = dim
self.cond_dim = cond_dim
self.i_cond_flow = iCondResFlow(dim, cond_dim, num_layers, act_hidden,
sp_iters)
def _build(self, raw_x):
x = tf.reshape(raw_x, [-1, self.dim])
z = tf.random.normal(
shape=[tf.shape(x)[0], self.cond_dim],
mean=0,
stddev=1,
dtype=tf.float32)
y, logp = self.i_cond_flow(z, x, 0)
y = tf.reshape(y, [-1, tf.shape(raw_x)[1], 1])
logp = tf.reshape(logp, [-1, 1])
return tf.concat([raw_x, y], -1), logp
class iDoubleCondGen(snt.AbstractModule):
def __init__(self,
dim,
condx_dim,
condz_dim,
num_layers,
act_hidden='tanh',
sp_iters=1,
name='icondres_flow'):
super(iDoubleCondGen, self).__init__(name=name)
self.dim = dim
self.condx_dim = condx_dim
self.condz_dim = condz_dim
with self._enter_variable_scope():
self.i_cond_flow = iCondResFlow(dim, condz_dim + condz_dim, num_layers,
act_hidden, sp_iters)
self.fc = snt.Linear(condz_dim)
self.mlp = MLP(condz_dim, condz_dim, 2, condz_dim, tf.nn.relu)
def _build(self, raw_x, z_cond):
x = tf.reshape(raw_x, [-1, self.dim])
z_cond = tf.tile(z_cond, [1, tf.shape(raw_x)[1]])
z_cond = tf.reshape(z_cond, [-1, self.condz_dim])
z_cond = self.mlp(z_cond)
z = tf.random.normal(
shape=[tf.shape(x)[0], self.condx_dim],
mean=0,
stddev=1,
dtype=tf.float32)
x = self.fc(x)
ctx = tf.concat([x, z_cond], axis=-1)
y, logp = self.i_cond_flow(z, ctx, 0)
y = tf.reshape(y, [-1, tf.shape(raw_x)[1], 1])
logp = tf.reshape(logp, [-1, tf.shape(raw_x)[1], 1])
logp = tf.reduce_sum(logp, axis=1, keepdims=False)
return tf.concat([raw_x, y], -1), logp
class HyperGen(snt.AbstractModule):
def __init__(self, dim, condx_dim, condz_dim, num_layers, name='HyperGen'):
super(HyperGen, self).__init__(name=name)
self.dim = dim
self.condx_dim = condx_dim
self.condz_dim = condz_dim
with self._enter_variable_scope():
self.fc = snt.Linear(condz_dim)
self.norm_flow = NormFlow(self.dim, num_layers, 'planar')
self.hnet = HyperNet(
2 * condz_dim, 256, self.norm_flow.num_params, depth=2)
def _build(self, raw_x, z_cond):
x = tf.reshape(raw_x, [-1, self.dim])
z_cond = tf.tile(z_cond, [1, tf.shape(raw_x)[1]])
z_cond = tf.reshape(z_cond, [-1, self.condz_dim])
z = tf.random.normal(
shape=[tf.shape(x)[0], 1, self.dim], mean=0, stddev=1, dtype=tf.float32)
x = self.fc(x)
ctx = tf.concat([x, z_cond], axis=-1)
params = self.hnet(ctx)
y, logp = self.norm_flow(z, 0, params)
y = tf.reshape(y, [-1, tf.shape(raw_x)[1], 1])
logp = tf.reshape(logp, [-1, tf.shape(raw_x)[1], 1])
logp = tf.reduce_sum(logp, axis=1, keepdims=False)
return tf.concat([raw_x, y], -1), logp
| apache-2.0 | -4,928,008,984,375,269,000 | 33.084 | 80 | 0.600047 | false | 3.191984 | false | false | false |
GeoMop/GeoMop | src/gm_base/model_data/validation/validator.py | 1 | 7817 | """Validator for Flow123D data structure
.. codeauthor:: Tomas Krizek <[email protected]>
"""
from ..notifications import Notification
from gm_base.geomop_util import TextValue, Span, Parameter
from . import checks
from ..data_node import DataNode
from ..format import is_scalar, is_param
class Validator:
"""Handles data structure validation."""
def __init__(self, notification_handler):
"""Initializes the validator with a NotificationHandler."""
self.notification_handler = notification_handler
self.valid = True
self.params = []
def validate(self, node, input_type):
"""
Performs data validation of node with the specified input_type.
Validation is performed recursively on all children nodes as well.
Options are added to nodes where applicable (record keys, selection, ...).
Returns True when all data was correctly validated, False otherwise.
Attribute errors contains a list of occurred errors.
"""
self.valid = True
self.params = []
self._validate_node(node, input_type)
return self.valid
def _validate_node(self, node, input_type):
"""
Determines if node contains correct value.
Method verifies node recursively. All descendant nodes are checked.
"""
if node is None:
raise Notification.from_name('ValidationError', 'Invalid node (None)')
# parameters
# TODO: enable parameters in unknown IST?
if hasattr(node, 'value'):
match = is_param(node.value)
if match:
# extract parameters
new_param = Parameter(match.group(1))
exists = False
for param in self.params:
if param.name == new_param.name:
exists = True
break
if not exists:
self.params.append(new_param)
node.input_type = input_type
# assume parameters are correct, do not validate further
return
if input_type['base_type'] != 'Abstract' and hasattr(node, 'type') \
and node.type is not None and 'implemented_abstract_record' not in input_type:
notification = Notification.from_name('UselessTag', node.type.value)
notification.span = node.type.span
self.notification_handler.report(notification)
node.input_type = input_type
if is_scalar(input_type):
self._validate_scalar(node, input_type)
elif input_type['base_type'] == 'Record':
self._validate_record(node, input_type)
elif input_type['base_type'] == 'Abstract':
self._validate_abstract(node, input_type)
elif input_type['base_type'] == 'Array':
self._validate_array(node, input_type)
else:
notification = Notification.from_name('InputTypeNotSupported',
input_type['base_type'])
self._report_notification(notification)
def _validate_scalar(self, node, input_type):
"""Validates a Scalar node."""
if input_type['base_type'] == 'Selection':
node.options = input_type['values']
try:
checks.check_scalar(node, input_type)
except Notification as notification:
if notification.name in ['InvalidSelectionOption', 'ValueTooBig', 'ValueTooSmall',
'ValidationTypeError']:
notification.span = node.span
else:
notification.span = get_node_key(node).notification_span
self._report_notification(notification)
def _validate_record(self, node, input_type):
"""Validates a Record node."""
if not node.implementation == DataNode.Implementation.mapping:
notification = Notification.from_name('ValidationTypeError', 'Record')
notification.span = get_node_key(node).notification_span
self._report_notification(notification)
return
keys = node.children_keys
node.options = input_type['keys'].keys()
keys.extend(input_type['keys'].keys())
for key in set(keys):
if node.origin == DataNode.Origin.error:
continue
child = node.get_child(key)
if child is not None and \
child.origin==DataNode.Origin.duplicit:
notification = Notification.from_name('DuplicateRecord')
notification.span = child.key.span
self._report_notification(notification)
continue
if child is not None and \
child.origin==DataNode.Origin.redefination:
notification = Notification.from_name('RedefinateRecord')
notification.span = child.key.span
self._report_notification(notification)
continue
try:
checks.check_record_key(node.children_keys, key, input_type)
except Notification as notification:
if notification.name == 'UnknownRecordKey':
notification.span = child.notification_span
else:
notification.span = get_node_key(node).notification_span
self._report_notification(notification)
else:
if child is not None:
child_input_type = input_type['keys'][key]['type']
self._validate_node(child, child_input_type)
def _validate_abstract(self, node, input_type):
"""Validates an AbtractRecord node."""
try:
concrete_type = checks.get_abstractrecord_type(node, input_type)
except Notification as notification:
if notification.name == 'InvalidAbstractType':
notification.span = node.type.span
else:
notification.span = get_node_key(node).notification_span
self._report_notification(notification)
else:
if node.type is None:
# if default_descendant defines the Abstract type, add it to data structure
node.type = TextValue()
node.type.value = concrete_type.get('name')
node.type.span = Span(node.span.start, node.span.start)
concrete_type['implemented_abstract_record'] = input_type
node.input_type = concrete_type
self._validate_record(node, concrete_type)
def _validate_array(self, node, input_type):
"""Validates an Array node."""
if not node.implementation == DataNode.Implementation.sequence:
notification = Notification.from_name('ValidationTypeError', 'Array')
notification.span = get_node_key(node).notification_span
self._report_notification(notification)
return
try:
checks.check_array(node.children, input_type)
except Notification as notification:
notification.span = get_node_key(node).notification_span
self._report_notification(notification)
for child in node.children:
self._validate_node(child, input_type['subtype'])
def _report_notification(self, notification):
"""Reports a notification."""
if notification.severity.value >= Notification.Severity.error.value:
self.valid = False
self.notification_handler.report(notification)
def get_node_key(node):
"""Return node that has originated from the text structure (not autoconversion)."""
while node.origin != DataNode.Origin.structure:
node = node.parent
return node
| gpl-3.0 | 4,545,214,501,728,727,600 | 41.950549 | 94 | 0.594346 | false | 4.740449 | false | false | false |
aevri/phabricator-tools | py/phl/phlgit_fetch__t.py | 4 | 3223 | """Test suite for phlgit_fetch."""
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# TODO
# -----------------------------------------------------------------------------
# Tests:
# TODO
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import unittest
import phlgit_fetch
import phlgitu_fixture
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testBreathing(self):
# pychecker won't recognise the attributes on 'f' if we create it in
# the closing parameter list and use 'as', at least not if we create an
# alias to the CentralisedWithTwoWorkers class
#
f = phlgitu_fixture.CentralisedWithTwoWorkers()
with contextlib.closing(f):
phlgit_fetch.prune_safe(f.w0.repo, 'origin')
def _setupBranchBomb(self, fixture):
"""Setup a situation where fetching on w0 will fail.
:fixture: a phlgitu_fixture.CentralisedWithTwoWorkers
:returns: None
"""
fixture.w1.repo('push', 'origin', 'HEAD:refs/heads/mybranch')
fixture.w0.repo('fetch', '--prune')
fixture.w1.repo('push', 'origin', ':refs/heads/mybranch')
fixture.w1.repo('push', 'origin', 'HEAD:refs/heads/mybranch/bomb')
def testBranchBomb(self):
f = phlgitu_fixture.CentralisedWithTwoWorkers()
with contextlib.closing(f):
self._setupBranchBomb(f)
phlgit_fetch.prune_safe(f.w0.repo, 'origin')
f.w0.repo('fetch', '--prune')
phlgit_fetch.all_prune(f.w0.repo)
def testFetchSpec(self):
fetchspec = ["+refs/heads/*:refs/remotes/origin/*"]
fetchspec_nonexistant = ["+refs/nope/*:refs/heads/__private_nope/*"]
f = phlgitu_fixture.CentralisedWithTwoWorkers()
with contextlib.closing(f):
phlgit_fetch.prune_safe(f.w0.repo, 'origin', [])
phlgit_fetch.prune_safe(f.w0.repo, 'origin', fetchspec)
phlgit_fetch.prune_safe(f.w0.repo, 'origin', fetchspec_nonexistant)
# -----------------------------------------------------------------------------
# Copyright (C) 2014-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| apache-2.0 | -1,273,190,873,056,103,700 | 35.625 | 79 | 0.56345 | false | 4.013699 | true | false | false |
seasonstar/bibi | application/services/jobs/image.py | 1 | 2773 | from PIL import Image, ImageOps
import urllib.request
from io import StringIO
import boto
from boto.s3.key import Key
from configs import settings
from application.cel import celery
@celery.task
def upload(space, path, image=None, url=None, async=True, make_thumbnails=True):
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket_name = space
bucket = conn.get_bucket(bucket_name)
k = Key(bucket)
def make_thumb(image):
im = Image.open(image)
for size in [(400, 400), (150, 150)]:
output = StringIO()
im2 = ImageOps.fit(im, size, Image.ANTIALIAS)
im2.save(output, "JPEG")
k.key = "thumbnails/%sx%s/%s"%(size[0], size[1], path)
k.set_contents_from_string(output.getvalue())
k.make_public()
output.close()
# save original img
if image is None and url:
fd = urllib.request.urlopen(url)
image = StringIO(fd.read())
else:
image = StringIO(image)
k.key = path
k.set_contents_from_file(image)
k.make_public()
# make thumbnails
if make_thumbnails:
make_thumb(image)
image.close()
orig_url = "http://assets.maybi.cn/%s"%path
return orig_url
@celery.task
def make_thumbnails(space, path, url, async=True):
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket_name = space
bucket = conn.get_bucket(bucket_name)
k = Key(bucket)
# save original img
fd = urllib.request.urlopen(url)
image = StringIO(fd.read())
im = Image.open(image)
for size in [(480, 480), (180, 180)]:
output = StringIO()
im2 = ImageOps.fit(im, size, Image.ANTIALIAS)
im2.save(output, "JPEG")
k.key = "post_thumbs/%sx%s/%s"%(size[0], size[1], path)
k.set_contents_from_string(output.getvalue())
k.make_public()
output.close()
@celery.task
def save_avatar(space, path, url, save_original=False, async=True):
conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket_name = space
bucket = conn.get_bucket(bucket_name)
k = Key(bucket)
fd = urllib.request.urlopen(url)
image = StringIO(fd.read())
# save original img
if save_original:
k.key = path
k.set_contents_from_file(image)
k.make_public()
im = Image.open(image)
for size in [(200, 200), (80, 80)]:
output = StringIO()
im2 = ImageOps.fit(im, size, Image.ANTIALIAS)
im2.save(output, "JPEG")
k.key = "avatar_thumbs/%sx%s/%s"%(size[0], size[1], path)
k.set_contents_from_string(output.getvalue())
k.make_public()
output.close()
| apache-2.0 | -7,345,736,648,261,493,000 | 26.186275 | 86 | 0.612333 | false | 3.305125 | false | false | false |
BrainTech/openbci | obci/interfaces/bci/p300_fda_lines/signalParser.py | 2 | 12389 | from xml.dom import minidom
import numpy as np
import os.path as osp
# A map from xml`s sample_type string to numpy.fromfile function argument.
# Numpy treats 'float' as float64 (=double) and 'float32' as float32
NP_TYPES = {'double': 'float',
'float': 'float32'}
class signalParser(object):
"""This class can extract some information from signal and it's xml descriptors"""
def __init__(self, file_prefix):
"""Check for file and it's descriptors
This function initializes class and checks for files:
.raw - contains the signal
.xml - contains signal description
.tag - contains experiment tags
"""
file_prefix = osp.expanduser(file_prefix)
if osp.exists(file_prefix+'.raw'):
self.raw_file = file_prefix + '.raw'
else:
raise IOError(file_prefix+'.raw doest exist!')
if osp.exists(file_prefix+'.xml'):
self.xml_file = file_prefix + '.xml'
else:
raise IOError(file_prefix+'.xml does not exist!')
if osp.exists(file_prefix+'.tag'):
self.tag_file = file_prefix + '.tag'
else:
print "Warning: "+file_prefix+".tag does not exist!"
self.montage = 0
self.channel_count, self.sample_count, self.sampling_frequency = self.__get_xml_elems()
self.channel_list = self.__get_channel_list()
def extract_channel(self,channel_list, filt = None):
"""This extracts channels from .raw file
The filt parameter should be a function of len(channel_list) parameters.
if filt is None than raw signals are returned.
If not, the output of filt function is returned"""
return self.__get_filtered_channels(self.__channels_no(channel_list), filt)
def __get_xml_elems(self):
"""Returns number of channels"""
fxml = minidom.parse(self.xml_file)
return int(fxml.getElementsByTagName('rs:channelCount')[0].firstChild.data), \
int(fxml.getElementsByTagName('rs:sampleCount')[0].firstChild.data), \
float(fxml.getElementsByTagName('rs:samplingFrequency')[0].firstChild.data)
def getSamplingFrequency(self):
return self.sampling_frequency
def __get_channel_list(self):
"""Returns list of channels from .xml file"""
fxml = minidom.parse(self.xml_file)
return [x.firstChild.data for x in fxml.getElementsByTagName('rs:label')]
def getChannelList(self):
return self.__get_channel_list()
def __get_filtered_channels(self, channel_list, filt):
"""Returns channels filtered wit filt function"""
fxml = minidom.parse(self.xml_file)
sample_type = fxml.getElementsByTagName('rs:sampleType')[0].firstChild.data
ch_no = self.channel_count
sig = np.fromfile(self.raw_file, NP_TYPES[sample_type.lower()])
signal = np.zeros([len(channel_list), self.sample_count])
print ("DEBUG GET FILTERED: "+str(sample_type)+ " / "+str(ch_no)+" / "+str(sig.shape)+" / "+str(signal.shape)+" / "+str(channel_list))
for i,v in enumerate(channel_list):
signal[i] = sig[v::ch_no][0:self.sample_count]
if filt != None:
return filt(signal)
else: return signal
def __channels_no(self, ch_list):
"""If in ch_list is string describing a channel, it is converted to channel no using .xml file"""
ch_str_list = self.channel_list
real_ch_list = []
for i in ch_list:
if isinstance(i, int):
real_ch_list.append(i)
elif isinstance(i, str) or isinstance(i, unicode):
try:
real_ch_list.append(ch_str_list.index(i))
except ValueError:
print "Wrong channel name "+i
raise
else:
raise ValueError("Channel name must be a string or integer")
return real_ch_list
def get_channel(self, channel):
"""Returns number of channel (if channel is a string) or channel name (if channel is an integer)"""
ch_str_list = self.channel_list
if isinstance(channel, int):
return ch_str_list[channel]
elif isinstance(channel, str):
try:
return ch_str_list.index(channel)
except ValueError:
print "Can not find this channel"
raise
else:
raise ValueError("Channel must be a string or an integer")
def setMontage(self, montage):
self.montage = self.extract_channel(montage).mean(axis=0)
def getData(self, channels):
s = self.extract_channel(channels)
return s - self.montage
def getAllTags(self,inSamples=True):
ftags = minidom.parse(self.tag_file)
tagArray = []
for tag in ftags.getElementsByTagName('tag'):
tagTime = float(tag.attributes['position'].value)
if tagTime - t > approxTimeDiff:
tagArray.append(tagTime)
if inSamples:
return np.array(tagArray)*self.sampling_frequency
else:
return np.array(tagArray)
def getTrialTags(self, approxTimeDiff=2, inSamples=True):
ftags = minidom.parse(self.tag_file)
tagArray = []
t = 0
for tag in ftags.getElementsByTagName('tag'):
tagTime = float(tag.attributes['position'].value)
if tagTime - t > approxTimeDiff:
tagArray.append(tagTime)
t = tagTime
if inSamples:
return np.array(tagArray)*self.sampling_frequency
else:
return np.array(tagArray)
def get_train_tags(self, trial_separator_name='trial', screen = False, tag_filter = None, ccof = False ):
"""Extracts positions an stimulation frequencies from .tag file
Parameters:
===========
screen [= False] : bool
if True a 'freq' tag will be considered when choosing stimulation frequency
tag_filter [= None] : tuple
a tuple of strings. First element is a name of a tag, second is value of the tag.
This will limit the function to consider only tags specified in the tuple.
ccof [= Flase] : bool
if True a concentrating_on_field tag will be considered when choosing stimulation frequency
Returns:
========
tags : list
a list of tuples. First element is time (in seconds) denoting start of the stimulation.
Second element is frequency of the stimulation
"""
ftags = minidom.parse(self.tag_file)
exp_update_list_all = [e for e in ftags.getElementsByTagName('tag')\
if e.attributes['name'].value == trial_separator_name]# \
#or e.attributes['name'].value == 'experiment__screen_break']
if tag_filter is None:
exp_update_list = exp_update_list_all
else:
exp_update_list = [e for e in exp_update_list_all \
if e.getElementsByTagName(tag_filter[0])[0].firstChild.data == tag_filter[1]]
tag_list = []
for i,exp in enumerate(exp_update_list):
position = float(exp.attributes['position'].value)
if screen:
#cor_tab = [36, 38, 40, 42]
scr = exp.getElementsByTagName('freq')[0].firstChild.data
#scr_idx = int(scr)#int(scr.split('_')[-1])
#frq = cor_tab[scr_idx - 1]
#frq = np.array(eval(exp.getElementsByTagName('freqs')[0].firstChild.data))[scr_idx - 1]
frq = int(scr)
elif ccof:
scr = exp.getElementsByTagName('concentrating_on_field')[0].firstChild.data
frq = np.array(eval(exp.getElementsByTagName('freqs')[0].firstChild.data))[int(scr)]
else:
f1 = exp.getElementsByTagName('freqs')[0].firstChild.data
frq = eval(f1)[1]
#frq = frq_list[int(exp.getElementsByTagName('concentrating_on_field')[0].firstChild.data)]
#tag = screen_tag[0].firstChild.data
tag_list.append((position, frq))
return tag_list
#######################################################
def get_all_tags(self, idx=1, samples = True, Fs = None):
ftag = minidom.parse(self.tag_file)
tag_list = [e for e in ftag.getElementsByTagName('tag') \
if e.attributes['name'].value == 'blink']
exp_list = {}
fsp = self.sampling_frequency
if(samples):
if Fs != None:
fsp = Fs
else: fsp = 1.0
for e in tag_list:
index = e.getElementsByTagName('index')[0].firstChild.data
timestamp = float(e.attributes['position'].value)
exp_list[timestamp*fsp] = int(index)
return exp_list
def get_not_p300_tags(self, idx=1, samples = True, Fs = None):
"""Returns tags with words from different groups
Parameters:
-----------
idx [= 1]: int
defines which tags to return
samples : bool
if true, positions will be returned as samples not in seconds
Fs : float or None
the sampling frequency used to convert positions to samples
Returns:
--------
exp_list : list
a list of positions of target
"""
ftag = minidom.parse(self.tag_file)
tag_list = [e for e in ftag.getElementsByTagName('tag') \
if e.attributes['name'].value == 'blink']
exp_list = []
fsp = self.sampling_frequency
if(samples):
if Fs != None:
fsp = Fs
else: fsp = 1.0
# If int passed as target -> change it into list
if isinstance(idx, int): idx = [idx]
for e in tag_list:
index = e.getElementsByTagName('index')[0].firstChild.data
if int(index) not in idx:
exp_list.append(float(e.attributes['position'].value))
return np.array(exp_list) * fsp
def get_p300_tags(self, idx=1, samples = True, Fs = None):
"""Returns tags with words from different groups
Parameters:
-----------
idx [= 1]: int
defines which tags to return
samples : bool
if true, positions will be returned as samples not in seconds
Fs : float or None
the sampling frequency used to convert positions to samples
Returns:
--------
exp_list : list
a list of positions of target
"""
ftag = minidom.parse(self.tag_file)
tag_list = [e for e in ftag.getElementsByTagName('tag') \
if e.attributes['name'].value == 'blink']
exp_list = []
fsp = self.sampling_frequency
if(samples):
if Fs != None:
fsp = Fs
else: fsp = 1.0
for e in tag_list:
index = e.getElementsByTagName('index')[0].firstChild.data
# If int passed as target -> change it into list
if isinstance(idx, int): idx = [idx]
for e in tag_list:
index = e.getElementsByTagName('index')[0].firstChild.data
if int(index) in idx:
exp_list.append(float(e.attributes['position'].value))
return np.array(exp_list) * fsp
def getTargetNontarget(self, signal, trgTags, ntrgTags):
self.chL = signal.shape[0]
self.Fs = self.getSamplingFrequency()
print "self.Fs: ", self.Fs
## Get target data and stuck it into numpy arrays
target = np.zeros((len(trgTags), self.chL, self.Fs))
nontarget = np.zeros((len(ntrgTags), self.chL, self.Fs))
# Target trials
for idx, tag in enumerate(trgTags):
index = int(tag)
target[idx] = signal[:,index:index+self.Fs]
# Nontarget trials
for idx, tag in enumerate(ntrgTags):
index = int(tag)
nontarget[idx] = signal[:, index:index+self.Fs]
return target, nontarget
| gpl-3.0 | 2,992,136,461,262,662,000 | 36.429003 | 142 | 0.563161 | false | 4.150419 | false | false | false |
beckdaniel/GPyOpt | GPyOpt/core/bo.py | 2 | 13393 | # Copyright (c) 2015, Javier Gonzalez
# Copyright (c) 2015, the GPy Authors (see GPy AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import time
from ..util.general import best_value, reshape, spawn
from ..core.optimization import lp_batch_optimization, random_batch_optimization, predictive_batch_optimization
try:
from ..plotting.plots_bo import plot_acquisition, plot_convergence
except:
pass
class BO(object):
def __init__(self, acquisition_func):
self.acquisition_func = acquisition_func
def _init_model(self):
pass
def run_optimization(self, max_iter = None, n_inbatch=1, acqu_optimize_method='fast_random', acqu_optimize_restarts=200, batch_method='predictive',
eps = 1e-8, n_procs=1, true_gradients = True, verbose=True):
"""
Runs Bayesian Optimization for a number 'max_iter' of iterations (after the initial exploration data)
:param max_iter: exploration horizon, or number of acquisitions. It nothing is provided optimizes the current acquisition.
:param n_inbatch: number of samples to collected everytime *f* is evaluated (one by default).
:param acqu_optimize_method: method to optimize the acquisition function
-'DIRECT': uses the DIRECT algorithm of Jones and Stuckmann.
-'CMA': uses the Covariance Matrix Adaptation Algorithm.
-'brute': Run local optimizers in a grid of points.
-'random': Run local optimizers started at random locations.
-'fast_brute': the same as brute but runs only one optimizer in the best location. It is used by default.
-'fast_random': the same as random but runs only one optimizer in the best location.
:param acqu_optimize_restarts: numbers of random restarts in the optimization of the acquisition function, default = 20.
:param batch_method: method to collect samples in batches
-'predictive': uses the predicted mean in the selected sample to update the acquisition function.
-'lp': used a penalization of the acquisition function to based on exclusion zones.
-'random': collects the element of the batch randomly
:param eps: minimum distance between two consecutive x's to keep running the model
:param n_procs: The number of processes used for evaluating the given function *f* (ideally nprocs=n_inbatch).
:param true_gradients: If the true gradients (can be slow) of the acquisition ar an approximation is used (True, default).
:param save_interval: number of iterations after which a file is produced with the current results.
"""
# --- Load the parameters of the function into the object.
if max_iter == None:
self.max_iter = 10*self.input_dim
else:
self.max_iter = max_iter
self.num_acquisitions = 0
self.n_inbatch=n_inbatch
self.batch_method = batch_method
if batch_method=='lp':
from .acquisition import AcquisitionMP
if not isinstance(self.acquisition_func, AcquisitionMP):
self.acquisition_func = AcquisitionMP(self.acquisition_func, self.acquisition_func.acquisition_par)
self.eps = eps
self.acqu_optimize_method = acqu_optimize_method
self.acqu_optimize_restarts = acqu_optimize_restarts
self.acquisition_func.set_model(self.model)
self.n_procs = n_procs
# --- Decide wether we use the true gradients to optimize the acquitision function
if true_gradients !=True:
self.true_gradients = False
self.acquisition_func.d_acquisition_function = None
else:
self.true_gradients = true_gradients
# --- Get starting of running time
self.time = time.time()
# --- If this is the first time to optimization is run - update the model and normalize id needed
if self.first_time_optimization:
self._update_model()
prediction = self.model.predict(self.X)
self.s_in_min = np.sqrt(abs(prediction[1]))
self.first_time_optimization = False
# --- Initialization of stop conditions.
k=0
distance_lastX = np.sqrt(sum((self.X[self.X.shape[0]-1,:]-self.X[self.X.shape[0]-2,:])**2))
# --- BO loop: this loop does the hard work.
while k<self.max_iter and distance_lastX > self.eps:
# --- Augment X
self.X = np.vstack((self.X,self.suggested_sample))
# --- Evaluate *f* in X and augment Y
if self.n_procs==1:
self.Y = np.vstack((self.Y,self.f(np.array(self.suggested_sample))))
else:
try:
# --- Parallel evaluation of *f* if several cores are available
from multiprocessing import Process, Pipe
from itertools import izip
divided_samples = [self.suggested_sample[i::self.n_procs] for i in xrange(self.n_procs)]
pipe=[Pipe() for i in xrange(self.n_procs)]
proc=[Process(target=spawn(self.f),args=(c,x)) for x,(p,c) in izip(divided_samples,pipe)]
[p.start() for p in proc]
[p.join() for p in proc]
rs = [p.recv() for (p,c) in pipe]
self.Y = np.vstack([self.Y]+rs)
except:
if not hasattr(self, 'parallel_error'):
print 'Error in parallel computation. Fall back to single process!'
self.parallel_error = True
self.Y = np.vstack((self.Y,self.f(np.array(self.suggested_sample))))
# --- Update internal elements (needed for plotting)
self.num_acquisitions += 1
pred_min = self.model.predict(reshape(self.suggested_sample,self.input_dim))
self.s_in_min = np.vstack((self.s_in_min,np.sqrt(abs(pred_min[1]))))
# --- Update model
try:
self._update_model()
except np.linalg.linalg.LinAlgError:
break
# --- Update stop conditions
k +=1
distance_lastX = np.sqrt(sum((self.X[self.X.shape[0]-1,:]-self.X[self.X.shape[0]-2,:])**2))
# --- Stop messages and execution time
self.Y_best = best_value(self.Y)
self.x_opt = self.X[np.argmin(self.Y),:]
self.fx_opt = min(self.Y)
self.time = time.time() - self.time
# --- Print stopping reason
if verbose: print '*Optimization completed:'
if k==self.max_iter and distance_lastX > self.eps:
if verbose: print ' -Maximum number of iterations reached.'
return 1
else:
if verbose: print ' -Method converged.'
return 0
def change_to_sparseGP(self, num_inducing):
"""
Changes standard GP estimation to sparse GP estimation
:param num_inducing: number of inducing points for sparse-GP modeling
"""
if self.sparse == True:
raise 'Sparse GP is already in use'
else:
self.num_inducing = num_inducing
self.sparse = True
self._init_model(self.X,self.Y)
def change_to_standardGP(self):
"""
Changes sparse GP estimation to standard GP estimation
"""
if self.sparse == False:
raise 'Sparse GP is already in use'
else:
self.sparse = False
self._init_model(self.X,self.Y)
def _optimize_acquisition(self):
"""
Optimizes the acquisition function. This function selects the type of batch method and passes the arguments for the rest of the optimization.
"""
# ------ Elements of the acquisition function
acqu_name = self.acqu_name
acquisition = self.acquisition_func.acquisition_function
d_acquisition = self.acquisition_func.d_acquisition_function
acquisition_par = self.acquisition_par
model = self.model
# ------ Parameters to optimize the acquisition
acqu_optimize_restarts = self.acqu_optimize_restarts
acqu_optimize_method = self.acqu_optimize_method
n_inbatch = self.n_inbatch
bounds = self.bounds
# ------ Selection of the batch method (if any, predictive used when n_inbathc=1)
if self.batch_method == 'predictive':
X_batch = predictive_batch_optimization(acqu_name, acquisition_par, acquisition, d_acquisition, bounds, acqu_optimize_restarts, acqu_optimize_method, model, n_inbatch)
elif self.batch_method == 'lp':
X_batch = lp_batch_optimization(self.acquisition_func, bounds, acqu_optimize_restarts, acqu_optimize_method, model, n_inbatch)
elif self.batch_method == 'random':
X_batch = random_batch_optimization(acquisition, d_acquisition, bounds, acqu_optimize_restarts,acqu_optimize_method, model, n_inbatch)
return X_batch
def _update_model(self):
"""
Updates X and Y in the model and re-optimizes the parameters of the new model
"""
# ------- Normalize acquisition function (if needed)
if self.normalize:
self.model.set_XY(self.X,(self.Y-self.Y.mean())/(self.Y.std()))
else:
self.model.set_XY(self.X,self.Y)
# ------- Optimize model when required
if (self.num_acquisitions%self.model_optimize_interval)==0:
self.model.optimization_runs = [] # clear previous optimization runs so they don't get used.
self.model.optimize_restarts(num_restarts=self.model_optimize_restarts, verbose=self.verbosity)
# ------- Optimize acquisition function
self.suggested_sample = self._optimize_acquisition()
def plot_acquisition(self,filename=None):
"""
Plots the model and the acquisition function.
if self.input_dim = 1: Plots data, mean and variance in one plot and the acquisition function in another plot
if self.input_dim = 2: as before but it separates the mean and variance of the model in two different plots
:param filename: name of the file where the plot is saved
"""
return plot_acquisition(self.bounds,self.input_dim,self.model,self.model.X,self.model.Y,self.acquisition_func.acquisition_function,self.suggested_sample,filename)
def plot_convergence(self,filename=None):
"""
Makes three plots to evaluate the convergence of the model
plot 1: Iterations vs. distance between consecutive selected x's
plot 2: Iterations vs. the mean of the current model in the selected sample.
plot 3: Iterations vs. the variance of the current model in the selected sample.
:param filename: name of the file where the plot is saved
"""
return plot_convergence(self.X,self.Y_best,self.s_in_min,filename)
def get_evaluations(self):
return self.X.copy(), self.Y.copy()
def save_report(self, report_file= 'GPyOpt-results.txt ' ):
"""
Save a report with the results of the optimization. A file is produced every
:param report_file: name of the file in which the results of the optimization are saved.
"""
with open(report_file,'w') as file:
file.write('---------------------------------' + ' Results file ' + '--------------------------------------\n')
file.write('GPyOpt Version 1.0.0 \n')
file.write('Date and time: ' + time.strftime("%c")+'\n')
if self.num_acquisitions==self.max_iter:
file.write('Optimization completed: ' +'YES, ' + str(self.X.shape[0]).strip('[]') + ' samples collected.\n')
else:
file.write('Optimization completed: ' +'NO,' + str(self.X.shape[0]).strip('[]') + ' samples collected.\n')
file.write('Optimization time: ' + str(self.time).strip('[]') +' seconds.\n')
file.write('---------------------------------' + ' Problem set up ' + '------------------------------------\n')
file.write('Problem Dimension: ' + str(self.input_dim).strip('[]') +'\n')
file.write('Problem bounds: ' + str(self.bounds).strip('[]') +'\n')
file.write('Batch size: ' + str(self.n_inbatch).strip('[]') +'\n')
file.write('Acquisition: ' + self.acqu_name + '\n')
file.write('Acquisition optimizer: ' + self.acqu_optimize_method+ '\n')
file.write('Sparse GP: ' + str(self.sparseGP).strip('[]') + '\n')
file.write('---------------------------------' + ' Summary ' + '------------------------------------------\n')
file.write('Best found minimum: ' + str(min(self.Y)).strip('[]') +'\n')
file.write('Minumum location: ' + str(self.X[np.argmin(self.Y),:]).strip('[]') +'\n')
file.close()
| bsd-3-clause | -6,851,183,451,001,491,000 | 47.701818 | 191 | 0.583962 | false | 4.034036 | false | false | false |
bcaine/maddux | maddux/objects/obstacle.py | 1 | 3774 | """
A stationary rectangular solid that something may collide with
"""
import numpy as np
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from static import StaticObject
class Obstacle(StaticObject):
def __init__(self, pt1, pt2, color='r'):
"""Create a 3D Rectangle from 2 points
:param pt1: The first point (x, y, z) defining the rect
:type pt1: numpy.ndarray
:param pt2: The second point (x, y, z) defining the rect
:type pt2: numpy.ndarray
:param color: color of the obstacle
:type color: str
:rtype: None
"""
self.pt1 = pt1
self.pt2 = pt2
self.color = color
# TODO: Make this use numpy arrays instead of lists
def get_paths(self):
"""Returns the paths for each of the surfaces of the
rectangle for plotting.
:returns (bottom, top, front, back, left, right)
:rtype: list of 6 4x3 numpy.ndarrays
"""
[x1, y1, z1] = self.pt1
[x2, y2, z2] = self.pt2
pt1 = [x1, y1, z1]
pt2 = [x1, y1, z2]
pt3 = [x1, y2, z1]
pt4 = [x1, y2, z2]
pt5 = [x2, y1, z1]
pt6 = [x2, y1, z2]
pt7 = [x2, y2, z1]
pt8 = [x2, y2, z2]
bottom = [pt1, pt3, pt7, pt5]
top = [pt2, pt4, pt8, pt6]
front = [pt1, pt2, pt6, pt5]
back = [pt3, pt4, pt8, pt7]
left = [pt1, pt2, pt4, pt3]
right = [pt5, pt6, pt8, pt7]
paths = [bottom, top, front, back, left, right]
return paths
def is_hit(self, position):
"""Checks if the rectangle is hit by a point or path
:param position: An objects position (x, y, z) or positions if
it is a path([x1, x2, ..], [y1, y2, ..], [z1, z2, ..]
:type position: numpy.ndarray or numpy.matrix
:returns: Whether the obstacle was hit by a point or path
:rtype: bool
"""
is_point = len(position.shape) == 1
if is_point:
x, y, z = position
else:
assert position.shape[1] == 3
x = position[:, 0]
y = position[:, 1]
z = position[:, 2]
[x1, y1, z1] = self.pt1
[x2, y2, z2] = self.pt2
x_hit = (x >= x1) & (x <= x2)
y_hit = (y >= y1) & (y <= y2)
z_hit = (z >= z1) & (z <= z2)
all_hit = x_hit & y_hit & z_hit
if is_point:
return (x_hit and y_hit and z_hit)
else:
return np.any(all_hit)
def is_hit_by_sphere(self, center, radius):
"""Checks if the rectangle is hit by a sphere
:param center: Sphere's center (x, y, z)
:type center: numpy.ndarray
:param radius: The sphere's radius
:type radius: int
:returns: Whether obstacle was hit by a sphere
:rtype: bool
"""
[x1, y1, z1] = self.pt1
[x2, y2, z2] = self.pt2
x, y, z = center
x_hit = (x + radius >= x1) & (x - radius <= x2)
y_hit = (y + radius >= y1) & (y - radius <= y2)
z_hit = (z + radius >= z1) & (z - radius <= z2)
return x_hit and y_hit and z_hit
def display(self):
"""Display obstacle properties
:rtype: None
"""
print "Center: {}".format(self.center)
print "Width: {}".format(self.width)
print "Height: {}".format(self.height)
print "Depth: {}".format(self.depth)
def plot(self, ax):
"""Plots the obstacle at its location
:param ax: Figure to plot on
:type ax: matplotlib.axes
:rtpye: None
"""
paths = self.get_paths()
rectangle = Poly3DCollection(paths, facecolors=self.color)
ax.add_collection3d(rectangle)
| mit | 888,665,443,313,857,300 | 27.164179 | 78 | 0.515633 | false | 3.203735 | false | false | false |
UU-Hydro/PCR-GLOBWB_model | modflow/scripts/groundwater_MODFLOW.py | 1 | 104750 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# PCR-GLOBWB (PCRaster Global Water Balance) Global Hydrological Model
#
# Copyright (C) 2016, Ludovicus P. H. (Rens) van Beek, Edwin H. Sutanudjaja, Yoshihide Wada,
# Joyce H. C. Bosmans, Niels Drost, Inge E. M. de Graaf, Kor de Jong, Patricia Lopez Lopez,
# Stefanie Pessenteiner, Oliver Schmitz, Menno W. Straatsma, Niko Wanders, Dominik Wisser,
# and Marc F. P. Bierkens,
# Faculty of Geosciences, Utrecht University, Utrecht, The Netherlands
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import subprocess
import os
import types
from pcraster.framework import *
import pcraster as pcr
import logging
logger = logging.getLogger(__name__)
import waterBodies_for_modflow as waterBodies
import virtualOS as vos
from ncConverter import *
class GroundwaterModflow(object):
def getState(self):
result = {}
# groundwater head (unit: m) for all layers
for i in range(1, self.number_of_layers+1):
var_name = 'groundwaterHeadLayer'+str(i)
result[var_name] = vars(self)[var_name]
return result
def getGroundwaterDepth(self):
result = {}
# groundwater head (unit: m) for all layers
for i in range(1, self.number_of_layers+1):
var_name = 'groundwaterDepthLayer'+str(i)
headname = 'groundwaterHeadLayer' +str(i)
result[var_name] = self.dem_average - vars(self)[headname]
return result
def getVariableValuesForPCRGLOBWB(self):
result = {}
result['relativeGroundwaterHead'] = pcr.ifthen(self.landmask, self.relativeGroundwaterHead)
result['baseflow'] = pcr.ifthen(self.landmask, self.baseflow)
result['storGroundwater'] = pcr.ifthen(self.landmask, self.storGroundwater)
return result
def __init__(self, iniItems, landmask):
object.__init__(self)
# cloneMap, temporary directory for the resample process, temporary directory for the modflow process, absolute path for input directory, landmask
self.cloneMap = iniItems.cloneMap
self.tmpDir = iniItems.tmpDir
self.tmp_modflow_dir = iniItems.tmp_modflow_dir
self.inputDir = iniItems.globalOptions['inputDir']
self.landmask = landmask
# configuration from the ini file
self.iniItems = iniItems
# number of modflow layers:
self.number_of_layers = int(iniItems.modflowParameterOptions['number_of_layers'])
# topography properties: read several variables from the netcdf file
for var in ['dem_minimum','dem_maximum','dem_average','dem_standard_deviation',\
'slopeLength','orographyBeta','tanslope',\
'dzRel0000','dzRel0001','dzRel0005',\
'dzRel0010','dzRel0020','dzRel0030','dzRel0040','dzRel0050',\
'dzRel0060','dzRel0070','dzRel0080','dzRel0090','dzRel0100']:
vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['topographyNC'], \
var, self.cloneMap)
vars(self)[var] = pcr.cover(vars(self)[var], 0.0)
# channel properties: read several variables from the netcdf file
for var in ['lddMap','cellAreaMap','gradient','bankfull_width',
'bankfull_depth','dem_floodplain','dem_riverbed']:
vars(self)[var] = vos.netcdf2PCRobjCloneWithoutTime(self.iniItems.modflowParameterOptions['channelNC'], \
var, self.cloneMap)
vars(self)[var] = pcr.cover(vars(self)[var], 0.0)
# minimum channel width
minimum_channel_width = 5.0 # TODO: Define this one in the configuration file
self.bankfull_width = pcr.max(minimum_channel_width, self.bankfull_width)
#~ # cell fraction if channel water reaching the flood plain # NOT USED YET
#~ self.flood_plain_fraction = self.return_innundation_fraction(pcr.max(0.0, self.dem_floodplain - self.dem_minimum))
# coefficient of Manning
self.manningsN = vos.readPCRmapClone(self.iniItems.modflowParameterOptions['manningsN'],\
self.cloneMap,self.tmpDir,self.inputDir)
# minimum channel gradient
minGradient = 0.00005 # TODO: Define this one in the configuration file
minGradient = 0.000005 # 24 March 2016: I lower this so that we don't have too deep water table. # TODO: Define this one in the configuration file
self.gradient = pcr.max(minGradient, pcr.cover(self.gradient, minGradient))
# correcting lddMap
self.lddMap = pcr.ifthen(pcr.scalar(self.lddMap) > 0.0, self.lddMap)
self.lddMap = pcr.lddrepair(pcr.ldd(self.lddMap))
# channelLength = approximation of channel length (unit: m) # This is approximated by cell diagonal.
cellSizeInArcMin = np.round(pcr.clone().cellSize()*60.) # FIXME: This one will not work if you use the resolution: 0.5, 1.5, 2.5 arc-min
verticalSizeInMeter = cellSizeInArcMin*1852.
horizontalSizeInMeter = self.cellAreaMap/verticalSizeInMeter
self.channelLength = ((horizontalSizeInMeter)**(2)+\
(verticalSizeInMeter)**(2))**(0.5)
# option for lakes and reservoir
self.onlyNaturalWaterBodies = False
if self.iniItems.modflowParameterOptions['onlyNaturalWaterBodies'] == "True": self.onlyNaturalWaterBodies = True
####### options for sensitivity analysis on river depth and conductivites : Inge jan 2019 ##########
self.factor_cond = vos.readPCRmapClone(self.iniItems.modflowParameterOptions['factorcond'],\
self.cloneMap,self.tmpDir,self.inputDir)
self.factor_riverdepth = vos.readPCRmapClone(self.iniItems.routingOptions['depthFactor'],\
self.cloneMap,self.tmpDir,self.inputDir)
self.dem_riverbed= pcr.cover(self.dem_riverbed*self.factor_riverdepth)
######################################################################################
# a netcdf file containing the groundwater properties
if iniItems.groundwaterOptions['groundwaterPropertiesNC'] != "None":
groundwaterPropertiesNC = vos.getFullPath(\
iniItems.groundwaterOptions[\
'groundwaterPropertiesNC'],self.inputDir)
######################################################################################
#####################################################################################################################################################
# assign aquifer specific yield (dimensionless)
if iniItems.groundwaterOptions['groundwaterPropertiesNC'] == "None" or 'specificYield' in iniItems.groundwaterOptions.keys():
self.specificYield = vos.readPCRmapClone(\
iniItems.groundwaterOptions['specificYield'],self.cloneMap,self.tmpDir,self.inputDir)
else:
self.specificYield = vos.netcdf2PCRobjCloneWithoutTime(\
groundwaterPropertiesNC,'specificYield',self.cloneMap)
self.specificYield = pcr.cover(self.specificYield,0.0)
self.specificYield = pcr.max(0.010,self.specificYield) # TODO: Set the minimum values of specific yield.
self.specificYield = pcr.min(1.000,self.specificYield)
#####################################################################################################################################################
#####################################################################################################################################################
# assign aquifer hydraulic conductivity (unit: m/day)
if iniItems.groundwaterOptions['groundwaterPropertiesNC'] == "None" or 'kSatAquifer' in iniItems.groundwaterOptions.keys():
self.kSatAquifer = vos.readPCRmapClone(\
iniItems.groundwaterOptions['kSatAquifer'],self.cloneMap,self.tmpDir,self.inputDir)
else:
self.kSatAquifer = vos.netcdf2PCRobjCloneWithoutTime(\
groundwaterPropertiesNC,'kSatAquifer',self.cloneMap)
self.kSatAquifer = pcr.cover(self.kSatAquifer,0.0)
self.kSatAquifer = pcr.max(0.010,self.kSatAquifer)
#####################################################################################################################################################
#####################################################################################################################################################
# try to assign the reccesion coefficient (unit: day-1) from the netcdf file of groundwaterPropertiesNC
try:
self.recessionCoeff = vos.netcdf2PCRobjCloneWithoutTime(\
groundwaterPropertiesNC,'recessionCoeff',\
cloneMapFileName = self.cloneMap)
except:
self.recessionCoeff = None
msg = "The 'recessionCoeff' cannot be read from the file: "+groundwaterPropertiesNC
logger.warning(msg)
# assign the reccession coefficient based on the given pcraster file
if 'recessionCoeff' in iniItems.groundwaterOptions.keys():
if iniItems.groundwaterOptions['recessionCoeff'] != "None":\
self.recessionCoeff = vos.readPCRmapClone(iniItems.groundwaterOptions['recessionCoeff'],self.cloneMap,self.tmpDir,self.inputDir)
# calculate the reccession coefficient based on the given parameters
if isinstance(self.recessionCoeff,types.NoneType) and\
'recessionCoeff' not in iniItems.groundwaterOptions.keys():
msg = "Calculating the groundwater linear reccesion coefficient based on the given parameters."
logger.info(msg)
# reading the 'aquiferWidth' value from the landSurfaceOptions (slopeLength)
if iniItems.landSurfaceOptions['topographyNC'] == None:
aquiferWidth = vos.readPCRmapClone(iniItems.landSurfaceOptions['slopeLength'],self.cloneMap,self.tmpDir,self.inputDir)
else:
topoPropertiesNC = vos.getFullPath(iniItems.landSurfaceOptions['topographyNC'],self.inputDir)
aquiferWidth = vos.netcdf2PCRobjCloneWithoutTime(topoPropertiesNC,'slopeLength',self.cloneMap)
# covering aquiferWidth with its maximum value
aquiferWidth = pcr.ifthen(self.landmask, pcr.cover(aquiferWidth, pcr.mapmaximum(aquiferWidth)))
# aquifer thickness (unit: m) for recession coefficient
aquiferThicknessForRecessionCoeff = vos.readPCRmapClone(iniItems.groundwaterOptions['aquiferThicknessForRecessionCoeff'],\
self.cloneMap,self.tmpDir,self.inputDir)
# calculate recessionCoeff (unit; day-1)
self.recessionCoeff = (math.pi**2.) * aquiferThicknessForRecessionCoeff / \
(4.*self.specificYield*(aquiferWidth**2.))
# assign the reccession coefficient based on the given pcraster file
if 'recessionCoeff' in iniItems.groundwaterOptions.keys():
if iniItems.groundwaterOptions['recessionCoeff'] != "None":\
self.recessionCoeff = vos.readPCRmapClone(iniItems.groundwaterOptions['recessionCoeff'],self.cloneMap,self.tmpDir,self.inputDir)
# minimum and maximum values for groundwater recession coefficient (day-1)
self.recessionCoeff = pcr.cover(self.recessionCoeff,0.00)
self.recessionCoeff = pcr.min(0.9999,self.recessionCoeff)
if 'minRecessionCoeff' in iniItems.groundwaterOptions.keys():
minRecessionCoeff = float(iniItems.groundwaterOptions['minRecessionCoeff'])
else:
minRecessionCoeff = 1.0e-4 # This is the minimum value used in Van Beek et al. (2011).
self.recessionCoeff = pcr.max(minRecessionCoeff,self.recessionCoeff)
#####################################################################################################################################################
#####################################################################################################################################################
# assign the river/stream/surface water bed conductivity
# - the default value is equal to kSatAquifer
self.riverBedConductivity = self.kSatAquifer
# - assign riverBedConductivity coefficient based on the given pcraster file
if 'riverBedConductivity' in iniItems.groundwaterOptions.keys():
if iniItems.groundwaterOptions['riverBedConductivity'] != "None":\
self.riverBedConductivity = vos.readPCRmapClone(iniItems.groundwaterOptions['riverBedConductivity'],self.cloneMap,self.tmpDir,self.inputDir)
#
# surface water bed thickness (unit: m)
bed_thickness = 0.1
# surface water bed resistance (unit: day)
bed_resistance = bed_thickness / (self.riverBedConductivity)
minimum_bed_resistance = 1.0
self.bed_resistance = pcr.max(minimum_bed_resistance,\
bed_resistance,)
##############################################################################################################################################
#####################################################################################################################################################
# total groundwater thickness (unit: m)
# - For PCR-GLOBWB, the estimate of total groundwater thickness is needed to estimate for the following purpose:
# - productive aquifer areas (where capillary rise can occur and groundwater depletion can occur)
# - and also to estimate fossil groundwater capacity (the latter is needed only for run without MODFLOW)
totalGroundwaterThickness = None
if 'estimateOfTotalGroundwaterThickness' in iniItems.groundwaterOptions.keys():
totalGroundwaterThickness = vos.readPCRmapClone(iniItems.groundwaterOptions['estimateOfTotalGroundwaterThickness'],
self.cloneMap, self.tmpDir, self.inputDir)
# extrapolation of totalGroundwaterThickness
# - TODO: Make a general extrapolation option as a function in the virtualOS.py
totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness,
pcr.windowaverage(totalGroundwaterThickness, 0.75))
totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness,
pcr.windowaverage(totalGroundwaterThickness, 0.75))
totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness,
pcr.windowaverage(totalGroundwaterThickness, 0.75))
totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness,
pcr.windowaverage(totalGroundwaterThickness, 1.00))
totalGroundwaterThickness = pcr.cover(totalGroundwaterThickness, 0.0)
# set minimum thickness
if 'minimumTotalGroundwaterThickness' in iniItems.groundwaterOptions.keys():
minimumThickness = pcr.scalar(float(\
iniItems.groundwaterOptions['minimumTotalGroundwaterThickness']))
totalGroundwaterThickness = pcr.max(minimumThickness, totalGroundwaterThickness)
# set maximum thickness
if 'maximumTotalGroundwaterThickness' in iniItems.groundwaterOptions.keys():
maximumThickness = float(self.iniItems.groundwaterOptions['maximumTotalGroundwaterThickness'])
totalGroundwaterThickness = pcr.min(maximumThickness, totalGroundwaterThickness)
# estimate of total groundwater thickness (unit: m)
self.totalGroundwaterThickness = totalGroundwaterThickness
#####################################################################################################################################################
##############################################################################################################################################
# confining layer thickness (for more than one layer)
self.usePreDefinedConfiningLayer = False
if self.number_of_layers > 1 and self.iniItems.modflowParameterOptions['usePreDefinedConfiningLayer'] == "True":
self.usePreDefinedConfiningLayer = True
# confining layer thickness (unit: m)
self.confiningLayerThickness = pcr.cover(\
vos.readPCRmapClone(self.iniItems.modflowParameterOptions['confiningLayerThickness'],\
self.cloneMap, self.tmpDir, self.inputDir), 0.0)
# maximum confining layer vertical conductivity (unit: m/day)
self.maximumConfiningLayerVerticalConductivity = pcr.cover(\
vos.readPCRmapClone(self.iniItems.modflowParameterOptions['maximumConfiningLayerVerticalConductivity'],\
self.cloneMap, self.tmpDir, self.inputDir), 0.0)
# confining layer resistance (unit: day)
self.maximumConfiningLayerResistance = pcr.cover(\
vos.readPCRmapClone(self.iniItems.modflowParameterOptions['maximumConfiningLayerResistance'],\
self.cloneMap, self.tmpDir, self.inputDir), 0.0)
#confining layer location #*
self.estimateConfinedLayers = pcr.cover(\
vos.readPCRmapClone(self.iniItems.modflowParameterOptions['estimateConfinedLayers'],\
self.cloneMap, self.tmpDir, self.inputDir), 0.0)
##############################################################################################################################################
#####################################################################################################################################################
# extent of the productive aquifer (a boolean map)
# - Principle: In non-productive aquifer areas, no capillary rise and groundwater abstraction should not exceed recharge
#
self.productive_aquifer = pcr.ifthen(self.landmask, pcr.boolean(1.0))
excludeUnproductiveAquifer = True
if excludeUnproductiveAquifer:
if 'minimumTransmissivityForProductiveAquifer' in iniItems.groundwaterOptions.keys() and\
(iniItems.groundwaterOptions['minimumTransmissivityForProductiveAquifer'] != "None" or\
iniItems.groundwaterOptions['minimumTransmissivityForProductiveAquifer'] != "False"):
minimumTransmissivityForProductiveAquifer = \
vos.readPCRmapClone(iniItems.groundwaterOptions['minimumTransmissivityForProductiveAquifer'],\
self.cloneMap, self.tmpDir, self.inputDir)
self.productive_aquifer = pcr.cover(\
pcr.ifthen(self.kSatAquifer * totalGroundwaterThickness > minimumTransmissivityForProductiveAquifer, pcr.boolean(1.0)), pcr.boolean(0.0))
# - TODO: Check and re-calculate the GLHYMPS map to confirm the kSatAquifer value in groundwaterPropertiesNC (e.g. we miss some parts of HPA).
#####################################################################################################################################################
#####################################################################################################################################################
# option to ignore capillary rise
self.ignoreCapRise = False
if 'ignoreCapRise' in self.iniItems.modflowParameterOptions.keys() and \
self.iniItems.modflowParameterOptions['ignoreCapRise'] == "True": self.ignoreCapRise = True
#####################################################################################################################################################
#####################################################################################################################################################
# assumption for the thickness (m) of accessible groundwater (needed for coupling to PCR-GLOBWB)
# - Note that this assumption value does not affect the modflow calculation. The values is needed merely for reporting "accesibleGroundwaterVolume".
accesibleDepth = 1000.0
if 'accesibleDepth' in self.iniItems.modflowParameterOptions.keys():
if self.iniItems.modflowParameterOptions['accesibleDepth'] != "None":
accesibleDepth = float(self.iniItems.modflowParameterOptions['accesibleDepth'])
self.max_accesible_elevation = self.dem_average - accesibleDepth
# list of the convergence criteria for HCLOSE (unit: m)
# - Deltares default's value is 0.001 m # check this value with Jarno
#~ self.criteria_HCLOSE = [0.001, 0.005, 0.01, 0.02, 0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
#~ self.criteria_HCLOSE = [0.001, 0.01, 0.1, 0.5, 1.0]
#~ self.criteria_HCLOSE = [0.001, 0.01, 0.1, 0.15, 0.2, 0.5, 1.0]
#~ self.criteria_HCLOSE = [0.001, 0.005, 0.01, 0.1, 0.15, 0.2, 0.5, 1.0]
#~ self.criteria_HCLOSE = [0.001, 0.005, 0.01, 0.1, 0.2, 0.5, 1.0]
self.criteria_HCLOSE = [0.001, 0.005, 0.01, 0.1, 0.2, 0.3, 0.5, 0.75, 1.0]
#self.criteria_HCLOSE = [0.01, 0.1, 0.15, 0.2, 0.5, 1.0]
#self.criteria_HCLOSE = [0.5, 1.0]
#self.criteria_HCLOSE = [1.0, 1.0]
self.criteria_HCLOSE = sorted(self.criteria_HCLOSE)
# list of the convergence criteria for RCLOSE (unit: m3)
# - Deltares default's value for their 25 and 250 m resolution models is 10 m3 # check this value with Jarno
cell_area_assumption = verticalSizeInMeter * float(pcr.cellvalue(pcr.mapmaximum(horizontalSizeInMeter),1)[0])
#~ self.criteria_RCLOSE = [10., 100., 10.* cell_area_assumption/(250.*250.), 10.* cell_area_assumption/(25.*25.), 100.* cell_area_assumption/(25.*25.)]
#~ self.criteria_RCLOSE = [10.* cell_area_assumption/(250.*250.), 10.* cell_area_assumption/(25.*25.), 100.* cell_area_assumption/(25.*25.)]
#~ self.criteria_RCLOSE = [10.* cell_area_assumption/(25.*25.), 100.* cell_area_assumption/(25.*25.)]
self.criteria_RCLOSE = [10.* cell_area_assumption/(25.*25.), 100.* cell_area_assumption/(25.*25.), 10000.* cell_area_assumption/(25.*25.)]
#~ self.criteria_RCLOSE = [10.* cell_area_assumption/(25.*25.), 10000.* cell_area_assumption/(25.*25.)]
self.criteria_RCLOSE = sorted(self.criteria_RCLOSE)
# initiate somes variables/objects/classes to None
# - lakes and reservoir objects (they will be constant for the entrie year, only change at the beginning of the year)
self.WaterBodies = None
# - surface water bed conductance (also only change at the beginning of the year)
self.bed_conductance = None
# initiate pcraster modflow object to None
self.pcr_modflow = None
# the following condition is needed if we have to
self.valuesRechargeAndAbstractionInMonthlyTotal = False
if 'valuesRechargeAndAbstractionInMonthlyTotal' in self.iniItems.modflowTransientInputOptions.keys():
if self.iniItems.modflowTransientInputOptions['valuesRechargeAndAbstractionInMonthlyTotal'] == "True":\
self.valuesRechargeAndAbstractionInMonthlyTotal = True
# minimum and maximum transmissivity values (unit: m2/day)
self.minimumTransmissivity = 10.0 # assumption used by Deltares
self.maximumTransmissivity = 100000.0 # ridiculosly high (for 20 m/day with the thickness = 5 km)
if 'minimumTransmissivity' in self.iniItems.modflowParameterOptions.keys():
self.minimumTransmissivity = float(self.iniItems.modflowParameterOptions['minimumTransmissivity'])
if 'maximumTransmissivity' in self.iniItems.modflowParameterOptions.keys():
self.maximumTransmissivity = float(self.iniItems.modflowParameterOptions['maximumTransmissivity'])
# option for online coupling purpose, we also need to know the location of pcrglobwb output
self.online_coupling = self.iniItems.online_coupling_between_pcrglobwb_and_modflow
# initiate old style reporting (this is usually used for debugging process)
self.initiate_old_style_reporting(iniItems)
def initiate_modflow(self):
logger.info("Initializing pcraster modflow.")
# TODO: removing all previous pcraster modflow files:
# initialise pcraster modflow
self.pcr_modflow = pcr.initialise(pcr.clone())
# setup the DIS package specifying the grids/layers used for the groundwater model
# - Note the layer specification must start with the bottom layer (layer 1 is the lowermost layer)
if self.number_of_layers == 1: self.set_grid_for_one_layer_model()
if self.number_of_layers == 2: self.set_grid_for_two_layer_model()
# specification for the boundary condition (ibound)
# - active cells only in landmask
# - constant head for outside the landmask
ibound = pcr.ifthen(self.landmask, pcr.nominal(1))
ibound = pcr.cover(ibound, pcr.nominal(-1))
self.ibound = ibound
for i in range(1, self.number_of_layers+1): self.pcr_modflow.setBoundary(self.ibound, i)
# setup the BCF package
if self.number_of_layers == 1: self.set_bcf_for_one_layer_model()
if self.number_of_layers == 2: self.set_bcf_for_two_layer_model()
# TODO: defining/incorporating anisotrophy values
def set_grid_for_one_layer_model(self):
# grid specification - one layer model
top = self.dem_average
bottom = top - self.totalGroundwaterThickness
self.pcr_modflow.createBottomLayer(bottom, top)
# make the following value(s) available for the other modules/methods:
self.thickness_of_layer_1 = top - bottom
self.total_thickness = self.thickness_of_layer_1
self.bottom_layer_1 = bottom
def set_grid_for_two_layer_model(self):
# grid specification - two layer model
# - top upper layer is elevation
top_layer_2 = self.dem_average
# - thickness of layer 2 is at least 10% of totalGroundwaterThickness
bottom_layer_2 = self.dem_average - 0.10 * self.totalGroundwaterThickness
# - thickness of layer 2 should be until 5 m below the river bed
bottom_layer_2 = pcr.min(self.dem_riverbed - 5.0, bottom_layer_2)
# - make sure that the minimum thickness of layer 2 is at least 0.1 m
thickness_of_layer_2 = pcr.max(0.1, top_layer_2 - bottom_layer_2)
bottom_layer_2 = top_layer_2 - thickness_of_layer_2
# - thickness of layer 1 is at least 5.0 m
thickness_of_layer_1 = pcr.max(5.0, self.totalGroundwaterThickness - thickness_of_layer_2)
bottom_layer_1 = bottom_layer_2 - thickness_of_layer_1
if self.usePreDefinedConfiningLayer:
# make sure that totalGroundwaterThickness is at least 50 m thicker than confiningLayerThickness
total_thickness = pcr.max(self.totalGroundwaterThickness, self.confiningLayerThickness + 50.0)
# - top upper layer is elevation
top_layer_2 = self.dem_average
# - thickness of layer 2 is based on the predefined confiningLayerThickness
bottom_layer_2 = self.dem_average - self.confiningLayerThickness
# - thickness of layer 2 should be until 5 m below the river bed elevation
bottom_layer_2 = pcr.min(self.dem_riverbed - 5.0, bottom_layer_2)
# - make sure that the minimum thickness of layer 2 is at least 0.1 m
thickness_of_layer_2 = pcr.max(0.1, top_layer_2 - bottom_layer_2)
bottom_layer_2 = top_layer_2 - thickness_of_layer_2
# - thickness of layer 1 is at least 5.0 m
thickness_of_layer_1 = pcr.max(5.0, total_thickness - thickness_of_layer_2)
bottom_layer_1 = bottom_layer_2 - thickness_of_layer_1
# set grid in modflow
self.pcr_modflow.createBottomLayer(bottom_layer_1, bottom_layer_2)
self.pcr_modflow.addLayer(top_layer_2)
# make the following value(s) available for the other modules/methods:
self.thickness_of_layer_1 = thickness_of_layer_1
self.thickness_of_layer_2 = thickness_of_layer_2
self.total_thickness = self.thickness_of_layer_1 + self.thickness_of_layer_2
self.bottom_layer_1 = bottom_layer_1
self.bottom_layer_2 = bottom_layer_2
self.top_layer_2 = top_layer_2
#~ # report elevation in pcraster map
#~ pcr.report(pcr.ifthen(self.landmask, self.top_layer_2), "top_uppermost_layer.map")
#~ pcr.report(pcr.ifthen(self.landmask, self.bottom_layer_2), "bottom_uppermost_layer.map")
#~ pcr.report(pcr.ifthen(self.landmask, self.bottom_layer_1), "bottom_lowermost_layer.map")
def set_bcf_for_one_layer_model(self):
# specification for storage coefficient (BCF package)
# - correction due to the usage of lat/lon coordinates
primary = pcr.cover(self.specificYield * self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize()), 0.0)
primary = pcr.max(1e-10, primary)
secondary = primary # dummy values as we used the layer type 00
self.pcr_modflow.setStorage(primary, secondary, 1)
# specification for horizontal conductivities (BCF package)
horizontal_conductivity = self.kSatAquifer # unit: m/day
# set the minimum value for transmissivity
horizontal_conductivity = pcr.max(self.minimumTransmissivity, \
horizontal_conductivity * self.total_thickness) / self.total_thickness
# set the maximum value for transmissivity
horizontal_conductivity = pcr.min(self.maximumTransmissivity, \
horizontal_conductivity * self.total_thickness) / self.total_thickness
# specification for horizontal conductivities (BCF package)
vertical_conductivity = horizontal_conductivity # dummy values, as one layer model is used
#~ # for areas with ibound <= 0, we set very high horizontal conductivity values: # TODO: Check this, shall we implement this?
#~ horizontal_conductivity = pcr.ifthenelse(self.ibound > 0, horizontal_conductivity, \
#~ pcr.mapmaximum(horizontal_conductivity))
# set BCF package
self.pcr_modflow.setConductivity(00, horizontal_conductivity, \
vertical_conductivity, 1)
# make the following value(s) available for the other modules/methods:
self.specific_yield_1 = self.specificYield
def set_bcf_for_two_layer_model(self):
# specification for storage coefficient (BCF package)
# - correction due to the usage of lat/lon coordinates
primary = pcr.cover(self.specificYield * self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize()), 0.0)
primary = pcr.max(1e-20, primary)
#~ secondary = pcr.max(1e-5, primary * 0.001) # dummy values if we use layer type 00
secondary = pcr.cover(pcr.min(0.005, self.specificYield) * self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize()), 0.0) # dummy values if we use layer type 00
secondary = pcr.max(1e-20, secondary)
self.pcr_modflow.setStorage(primary, secondary, 1)
self.pcr_modflow.setStorage(primary, secondary, 2)
# specification for conductivities (BCF package)
horizontal_conductivity = self.kSatAquifer # unit: m/day
# layer 2 (upper layer) - horizontal conductivity
horizontal_conductivity_layer_2 = pcr.max(self.minimumTransmissivity, \
horizontal_conductivity * self.thickness_of_layer_2) / self.thickness_of_layer_2
horizontal_conductivity_layer_2 = pcr.min(self.maximumTransmissivity, \
horizontal_conductivity * self.thickness_of_layer_2) / self.thickness_of_layer_2
# layer 2 (upper layer) - vertical conductivity
# INGE: kh:kv = 1:0.1
vertical_conductivity_layer_2 = (self.kSatAquifer*0.1) * self.cellAreaMap/\
(pcr.clone().cellSize()*pcr.clone().cellSize())
if self.usePreDefinedConfiningLayer:
# vertical conductivity values are limited by the predefined minimumConfiningLayerVerticalConductivity and maximumConfiningLayerResistance
vertical_conductivity_layer_2 = pcr.min(self.kSatAquifer, self.maximumConfiningLayerVerticalConductivity)
vertical_conductivity_layer_2 = pcr.ifthenelse(self.confiningLayerThickness > 0.0, vertical_conductivity_layer_2, self.kSatAquifer)
vertical_conductivity_layer_2 = pcr.max(self.thickness_of_layer_2/self.maximumConfiningLayerResistance, \
vertical_conductivity_layer_2)
# minimum resistance is one day
vertical_conductivity_layer_2 = pcr.min(self.thickness_of_layer_2/1.0,\
vertical_conductivity_layer_2)
# correcting vertical conductivity
vertical_conductivity_layer_2 *= self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize())
# layer 1 (lower layer)
horizontal_conductivity_layer_1 = pcr.max(self.minimumTransmissivity, \
horizontal_conductivity * self.thickness_of_layer_1) / self.thickness_of_layer_1
horizontal_conductivity_layer_1 = pcr.min(self.maximumTransmissivity, \
horizontal_conductivity * self.thickness_of_layer_1) / self.thickness_of_layer_1
# ignoring the vertical conductivity in the lower layer
# such that the values of resistance (1/vcont) depend only on vertical_conductivity_layer_2
vertical_conductivity_layer_1 = pcr.spatial(pcr.scalar(1e99)) * self.cellAreaMap/\
(pcr.clone().cellSize()*pcr.clone().cellSize())
vertical_conductivity_layer_2 *= 0.5
# see: http://inside.mines.edu/~epoeter/583/08/discussion/vcont/modflow_vcont.htm
#~ # for areas with ibound <= 0, we set very high horizontal conductivity values: # TODO: Check this, shall we implement this?
#~ horizontal_conductivity_layer_2 = pcr.ifthenelse(self.ibound > 0, horizontal_conductivity_layer_2, \
#~ pcr.mapmaximum(horizontal_conductivity_layer_2))
#~ horizontal_conductivity_layer_1 = pcr.ifthenelse(self.ibound > 0, horizontal_conductivity_layer_1, \
#~ pcr.mapmaximum(horizontal_conductivity_layer_1))
# set conductivity values to MODFLOW
self.pcr_modflow.setConductivity(00, horizontal_conductivity_layer_2, \
vertical_conductivity_layer_2, 2)
self.pcr_modflow.setConductivity(00, horizontal_conductivity_layer_1, \
vertical_conductivity_layer_1, 1)
#~ self.pcr_modflow.setConductivity(02, horizontal_conductivity_layer_1, \
#~ vertical_conductivity_layer_1, 1)
# make the following value(s) available for the other modules/methods:
#self.specific_yield_1 = self.specificYield
#self.specific_yield_2 = self.specificYield
# INGE : increase specific yields of aquifers (not confining layers)
self.specific_yield_2 = pcr.cover(pcr.ifthenelse(self.estimateConfinedLayers >0.0, 0.11, self.specificYield), self.specificYield)
self.specific_yield_1 = pcr.cover(pcr.ifthenelse(self.estimateConfinedLayers >0.0, self.specificYield * 3, self.specificYield), self.specificYield)
def get_initial_heads(self):
if self.iniItems.modflowTransientInputOptions['usingPredefinedInitialHead'] == "True":
msg = "Using pre-defined groundwater head(s) given in the ini/configuration file."
logger.info(msg)
# using pre-defined groundwater head(s) described in the ini/configuration file
for i in range(1, self.number_of_layers+1):
var_name = 'groundwaterHeadLayer'+str(i)
vars(self)[var_name] = vos.readPCRmapClone(self.iniItems.modflowTransientInputOptions[var_name+'Ini'],\
self.cloneMap, self.tmpDir, self.inputDir)
vars(self)[var_name] = pcr.cover(vars(self)[var_name], 0.0)
else:
msg = "Estimating initial conditions based on the steady state simulation using the input as defined in the ini/configuration file."
logger.info(msg)
# using the digital elevation model as the initial heads
for i in range(1, self.number_of_layers+1):
var_name = 'groundwaterHeadLayer'+str(i)
vars(self)[var_name] = self.dem_average
# using initial head estimate given in the configuration file
if 'usingInitialHeadEstimate' in self.iniItems.modflowSteadyStateInputOptions.keys() and\
self.iniItems.modflowSteadyStateInputOptions['usingInitialHeadEstimate'] == "True":
for i in range(1, self.number_of_layers+1):
var_name = 'groundwaterHeadLayer'+str(i)
vars(self)[var_name] = vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions[var_name+'Estimate'],\
self.cloneMap, self.tmpDir, self.inputDir)
vars(self)[var_name] = pcr.cover(vars(self)[var_name], 0.0)
# calculate/simulate a steady state condition (until the modflow converges)
# get the current state(s) of groundwater head and put them in a dictionary
groundwaterHead = self.getState()
self.modflow_simulation("steady-state", groundwaterHead, None, 1, 1)
# An extra steady state simulation using transient simulation with constant input
self.transient_simulation_with_constant_input()
# extrapolating the calculated heads for areas/cells outside the landmask (to remove isolated cells)
#
# - the calculate groundwater head within the landmask region
for i in range(1, self.number_of_layers+1):
var_name = 'groundwaterHeadLayer'+str(i)
vars(self)[var_name] = pcr.ifthen(self.landmask, vars(self)[var_name])
# keep the ocean values (dem <= 0.0) - this is in order to maintain the 'behaviors' of sub marine groundwater discharge
vars(self)[var_name] = pcr.cover(vars(self)[var_name], pcr.ifthen(self.dem_average <= 0.0, self.dem_average))
# extrapolation
vars(self)[var_name] = pcr.cover(vars(self)[var_name], pcr.windowaverage(vars(self)[var_name], 3.*pcr.clone().cellSize()))
vars(self)[var_name] = pcr.cover(vars(self)[var_name], pcr.windowaverage(vars(self)[var_name], 5.*pcr.clone().cellSize()))
vars(self)[var_name] = pcr.cover(vars(self)[var_name], pcr.windowaverage(vars(self)[var_name], 7.*pcr.clone().cellSize()))
vars(self)[var_name] = pcr.cover(vars(self)[var_name], self.dem_average)
# TODO: Define the window sizes as part of the configuration file. Also consider to use the inverse distance method.
# TODO: Also please consider to use Deltares's trick to remove isolated cells.
def transient_simulation_with_constant_input(self):
time_step_length = 30 # unit: days
number_of_extra_years = 10
if "extraSpinUpYears" in self.iniItems.modflowSteadyStateInputOptions.keys() and\
self.iniItems.modflowSteadyStateInputOptions['extraSpinUpYears'] != "None":
number_of_extra_years = int(\
self.iniItems.modflowSteadyStateInputOptions['extraSpinUpYears'])
number_of_extra_months = 12 * number_of_extra_years
# maximum number of months = 999
if number_of_extra_months > 999:
msg = "To avoid a very long spin up, we limit the number of extra months to 999 months."
logger.info(msg)
number_of_extra_months = min(999, number_of_extra_months)
if number_of_extra_months > 0:
# preparing extra spin up folder/directory:
extra_spin_up_directory = self.iniItems.endStateDir + "/extra_spin_up/"
if os.path.exists(extra_spin_up_directory): shutil.rmtree(extra_spin_up_directory)
os.makedirs(extra_spin_up_directory)
for i_month in range(1, number_of_extra_months + 1):
msg = "\n"
msg += "\n"
msg += "Extra steady state simulation (transient simulation with constant input and monthly stress period): " + str(i_month) + " from " + str(number_of_extra_months)
msg += "\n"
msg += "\n"
logger.info(msg)
groundwaterHead = self.getState()
self.modflow_simulation("steady-state-extra", groundwaterHead, None, time_step_length, time_step_length)
# reporting the calculated head to pcraster files
# - extension for output file:
extension = "00" + str(i_month)
if i_month > 9: extension = "0" + str(i_month)
if i_month > 99: extension = str(i_month)
for i in range(1, self.number_of_layers+1):
var_name = 'groundwaterHeadLayer' + str(i)
file_name = extra_spin_up_directory + "/gwhead" + str(i) + "_." + extension
pcr.report(groundwaterHead[var_name], file_name)
def estimate_bottom_of_bank_storage(self):
# influence zone depth (m) # TODO: Define this one as part of
influence_zone_depth = 5.0
# bottom_elevation = flood_plain elevation - influence zone
bottom_of_bank_storage = self.dem_floodplain - influence_zone_depth
# reducing noise (so we will not introduce unrealistic sinks) # TODO: Define the window size as part of the configuration/ini file
bottom_of_bank_storage = pcr.max(bottom_of_bank_storage,\
pcr.windowaverage(bottom_of_bank_storage, 3.0 * pcr.clone().cellSize()))
# bottom_elevation > river bed
bottom_of_bank_storage = pcr.max(self.dem_riverbed, bottom_of_bank_storage)
# reducing noise by comparing to its downstream value (so we will not introduce unrealistic sinks)
bottom_of_bank_storage = pcr.max(bottom_of_bank_storage, \
(bottom_of_bank_storage +
pcr.cover(pcr.downstream(self.lddMap, bottom_of_bank_storage), bottom_of_bank_storage))/2.)
# bottom_elevation >= 0.0 (must be higher than sea level)
bottom_of_bank_storage = pcr.max(0.0, bottom_of_bank_storage)
# bottom_elevation <= dem_average (this is to drain overland flow)
bottom_of_bank_storage = pcr.min(bottom_of_bank_storage, self.dem_average)
bottom_of_bank_storage = pcr.cover(bottom_of_bank_storage, self.dem_average)
# for the mountainous region, the bottom of bank storage equal to its lowest point
# - extent of mountainous region
mountainous_extent = pcr.ifthen((self.dem_average - self.dem_floodplain) > 50.0, pcr.boolean(1.0))
# - sub_catchment classes
sub_catchment_class = pcr.ifthen(mountainous_extent, \
pcr.subcatchment(self.lddMap, pcr.nominal(pcr.uniqueid(mountainous_extent))))
# - bottom of bak storage
bottom_of_bank_storage = pcr.cover(pcr.areaminimum(bottom_of_bank_storage, sub_catchment_class), \
bottom_of_bank_storage)
# rounding down
bottom_of_bank_storage = pcr.rounddown(bottom_of_bank_storage * 1000.)/1000.
# TODO: We may want to improve this concept - by incorporating the following:
# - smooth bottom_elevation
# - upstream areas in the mountainous regions and above perrenial stream starting points may also be drained (otherwise water will be accumulated and trapped there)
# - bottom_elevation > minimum elevation that is estimated from the maximum of S3 from the PCR-GLOBWB simulation
return bottom_of_bank_storage
def initiate_old_style_reporting(self,iniItems):
self.report = True
try:
self.outDailyTotNC = iniItems.oldReportingOptions['outDailyTotNC'].split(",")
self.outMonthTotNC = iniItems.oldReportingOptions['outMonthTotNC'].split(",")
self.outMonthAvgNC = iniItems.oldReportingOptions['outMonthAvgNC'].split(",")
self.outMonthEndNC = iniItems.oldReportingOptions['outMonthEndNC'].split(",")
self.outAnnuaTotNC = iniItems.oldReportingOptions['outAnnuaTotNC'].split(",")
self.outAnnuaAvgNC = iniItems.oldReportingOptions['outAnnuaAvgNC'].split(",")
self.outAnnuaEndNC = iniItems.oldReportingOptions['outAnnuaEndNC'].split(",")
except:
self.report = False
if self.report == True:
self.outNCDir = iniItems.outNCDir
self.netcdfObj = PCR2netCDF(iniItems)
#
# daily output in netCDF files:
if self.outDailyTotNC[0] != "None":
for var in self.outDailyTotNC:
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_dailyTot.nc",\
var,"undefined")
# MONTHly output in netCDF files:
# - cummulative
if self.outMonthTotNC[0] != "None":
for var in self.outMonthTotNC:
# initiating monthlyVarTot (accumulator variable):
vars(self)[var+'MonthTot'] = None
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_monthTot.nc",\
var,"undefined")
# - average
if self.outMonthAvgNC[0] != "None":
for var in self.outMonthAvgNC:
# initiating monthlyTotAvg (accumulator variable)
vars(self)[var+'MonthTot'] = None
# initiating monthlyVarAvg:
vars(self)[var+'MonthAvg'] = None
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_monthAvg.nc",\
var,"undefined")
# - last day of the month
if self.outMonthEndNC[0] != "None":
for var in self.outMonthEndNC:
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_monthEnd.nc",\
var,"undefined")
# YEARly output in netCDF files:
# - cummulative
if self.outAnnuaTotNC[0] != "None":
for var in self.outAnnuaTotNC:
# initiating yearly accumulator variable:
vars(self)[var+'AnnuaTot'] = None
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_annuaTot.nc",\
var,"undefined")
# - average
if self.outAnnuaAvgNC[0] != "None":
for var in self.outAnnuaAvgNC:
# initiating annualyVarAvg:
vars(self)[var+'AnnuaAvg'] = None
# initiating annualyTotAvg (accumulator variable)
vars(self)[var+'AnnuaTot'] = None
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_annuaAvg.nc",\
var,"undefined")
# - last day of the year
if self.outAnnuaEndNC[0] != "None":
for var in self.outAnnuaEndNC:
# creating the netCDF files:
self.netcdfObj.createNetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_annuaEnd.nc",\
var,"undefined")
def update(self,currTimeStep):
# at the end of the month, calculate/simulate a steady state condition and obtain its calculated head values
if currTimeStep.isLastDayOfMonth():
# get the previous state
groundwaterHead = self.getState()
# length of a stress period
PERLEN = currTimeStep.day
if currTimeStep.startTime.day != 1 and currTimeStep.monthIdx == 1:
PERLEN = currTimeStep.day - currTimeStep.startTime.day + 1
# number of time step within a stress period
NSTP = PERLEN * 3
self.PERLEN = PERLEN # number of days within a stress period
self.NSTP = NSTP # number of time steps within a stress period
self.modflow_simulation("transient", groundwaterHead,
currTimeStep,
PERLEN,
NSTP)
# old-style reporting (this is usually used for debugging process)
self.old_style_reporting(currTimeStep)
def modflow_simulation(self,\
simulation_type,\
initialGroundwaterHeadInADictionary,\
currTimeStep = None,\
PERLEN = 1.0,
NSTP = 1, \
MXITER = 1500,\
ITERI = 1250,\
NPCOND = 1,\
RELAX = 0.98,\
NBPOL = 2,\
DAMP = 1,\
ITMUNI = 4, LENUNI = 2, TSMULT = 1.0):
# initiate pcraster modflow object including its grid/layer/elevation:
# - constant for the entire simulation
if self.pcr_modflow == None: self.initiate_modflow()
if simulation_type == "transient":
logger.info("Preparing MODFLOW input for a transient simulation.")
SSTR = 0
if simulation_type == "steady-state":
logger.info("Preparing MODFLOW input for a steady-state simulation.")
SSTR = 1
if simulation_type == "steady-state-extra":
msg = "Preparing MODFLOW input for an 'extra' steady-state simulation: "
msg += "a transient simulation with constant input for 30 day (monthly) stress period with daily time step."
logger.info(msg)
SSTR = 0
# extract and set initial head for modflow simulation
groundwaterHead = initialGroundwaterHeadInADictionary
for i in range(1, self.number_of_layers+1):
var_name = 'groundwaterHeadLayer'+str(i)
initial_head = pcr.scalar(groundwaterHead[var_name])
self.pcr_modflow.setInitialHead(initial_head, i)
# read input files (for the steady-state condition, we use pcraster maps):
if simulation_type == "steady-state" or simulation_type == "steady-state-extra":
# - discharge (m3/s) from PCR-GLOBWB
discharge = vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions['avgDischargeInputMap'],\
self.cloneMap, self.tmpDir, self.inputDir)
# - recharge/capillary rise (unit: m/day) from PCR-GLOBWB
gwRecharge = vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions['avgGroundwaterRechargeInputMap'],\
self.cloneMap, self.tmpDir, self.inputDir)
#
# - groundwater abstraction (unit: m/day) from PCR-GLOBWB
gwAbstraction = pcr.spatial(pcr.scalar(0.0))
gwAbstraction = vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions['avgGroundwaterAbstractionInputMap'],\
self.cloneMap, self.tmpDir, self.inputDir)
# - average channel storage (unit: m3) from PCR-GLOBWB
channelStorage = None
if 'avgChannelStorageInputMap' in self.iniItems.modflowSteadyStateInputOptions.keys() and\
self.iniItems.modflowSteadyStateInputOptions['avgChannelStorageInputMap'][-4:] != "None":
channelStorage = pcr.cover(\
vos.readPCRmapClone(self.iniItems.modflowSteadyStateInputOptions['avgChannelStorageInputMap'],\
self.cloneMap, self.tmpDir, self.inputDir), 0.0)
# read input files
if simulation_type == "transient":
if self.online_coupling:
# for online coupling, we will read files from pcraster maps
directory = self.iniItems.main_output_directory + "/global/maps/"
# - discharge (m3/s) from PCR-GLOBWB
discharge_file_name = directory + "monthly_discharge_cubic_meter_per_second_" + str(currTimeStep.fulldate) + ".map"
discharge = pcr.cover(vos.readPCRmapClone(discharge_file_name, self.cloneMap, self.tmpDir), 0.0)
# - recharge/capillary rise (unit: m/day) from PCR-GLOBWB
gwRecharge_file_name = directory + "groundwater_recharge_meter_per_day_" + str(currTimeStep.fulldate) + ".map"
gwRecharge = pcr.cover(vos.readPCRmapClone(gwRecharge_file_name, self.cloneMap, self.tmpDir), 0.0)
# - groundwater abstraction (unit: m/day) from PCR-GLOBWB
gwAbstraction_file_name = directory + "groundwater_abstraction_meter_per_day_" + str(currTimeStep.fulldate) + ".map"
gwAbstraction = pcr.cover(vos.readPCRmapClone(gwAbstraction_file_name, self.cloneMap, self.tmpDir), 0.0)
# - channel storage (unit: m/day)
channel_storage_file_name = directory + "channel_storage_cubic_meter_" + str(currTimeStep.fulldate) + ".map"
channelStorage = pcr.cover(vos.readPCRmapClone(channel_storage_file_name, self.cloneMap, self.tmpDir), 0.0)
# TODO: Try to read from netcdf files, avoid reading from pcraster maps (avoid resampling using gdal)
else:
# for offline coupling, we will read files from netcdf files
# - discharge (m3/s) from PCR-GLOBWB
discharge = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['dischargeInputNC'],
"discharge", str(currTimeStep.fulldate), None, self.cloneMap)
# - recharge/capillary rise (unit: m/day) from PCR-GLOBWB
gwRecharge = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['groundwaterRechargeInputNC'],\
"groundwater_recharge", str(currTimeStep.fulldate), None, self.cloneMap)
# - groundwater abstraction (unit: m/day) from PCR-GLOBWB
gwAbstraction = pcr.spatial(pcr.scalar(0.0))
if self.iniItems.modflowTransientInputOptions['groundwaterAbstractionInputNC'][-4:] != "None":
gwAbstraction = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['groundwaterAbstractionInputNC'],\
"total_groundwater_abstraction", str(currTimeStep.fulldate), None, self.cloneMap)
# - for offline coupling, the provision of channel storage (unit: m3) is only optional
channelStorage = None
if 'channelStorageInputNC' in self.iniItems.modflowTransientInputOptions.keys() and\
self.iniItems.modflowTransientInputOptions['channelStorageInputNC'][-4:] != "None":
channelStorage = vos.netcdf2PCRobjClone(self.iniItems.modflowTransientInputOptions['channelStorageInputNC'],\
"channel_storage", str(currTimeStep.fulldate), None, self.cloneMap)
#####################################################################################################################################################
# for a steady-state simulation, the capillary rise is usually ignored:
if (simulation_type == "steady-state" or\
simulation_type == "steady-state-extra"):
self.ignoreCapRise = True
if 'ignoreCapRiseSteadyState' in self.iniItems.modflowSteadyStateInputOptions.keys() and\
self.iniItems.modflowSteadyStateInputOptions['ignoreCapRiseSteadyState'] == "False": self.ignoreCapRise = False
#####################################################################################################################################################
# ignore capillary rise if needed:
if self.ignoreCapRise: gwRecharge = pcr.max(0.0, gwRecharge)
# convert the values of abstraction and recharge to daily average
if self.valuesRechargeAndAbstractionInMonthlyTotal:
gwAbstraction = gwAbstraction/currTimeStep.day
gwRecharge = gwRecharge/currTimeStep.day
# set recharge, river, well and drain packages
self.set_drain_and_river_package(discharge, channelStorage, currTimeStep, simulation_type)
self.set_recharge_package(gwRecharge)
self.set_well_package(gwAbstraction)
# set parameter values for the DIS package
self.pcr_modflow.setDISParameter(ITMUNI, LENUNI, PERLEN, NSTP, TSMULT, SSTR)
#
# Some notes about the values
#
# ITMUNI = 4 # indicates the time unit (0: undefined, 1: seconds, 2: minutes, 3: hours, 4: days, 5: years)
# LENUNI = 2 # indicates the length unit (0: undefined, 1: feet, 2: meters, 3: centimeters)
# PERLEN = 1.0 # duration of a stress period
# NSTP = 1 # number of time steps in a stress period
# TSMULT = 1.0 # multiplier for the length of the successive iterations
# SSTR = 1 # 0 - transient, 1 - steady state
# initiate the index for HCLOSE and RCLOSE for the interation until modflow_converged
self.iteration_HCLOSE = 0
self.iteration_RCLOSE = 0
self.modflow_converged = False
# execute MODFLOW
while self.modflow_converged == False:
# convergence criteria
HCLOSE = self.criteria_HCLOSE[self.iteration_HCLOSE]
RCLOSE = self.criteria_RCLOSE[self.iteration_RCLOSE]
# set PCG solver
self.pcr_modflow.setPCG(MXITER, ITERI, NPCOND, HCLOSE, RCLOSE, RELAX, NBPOL, DAMP)
# some notes for PCG solver values
#
# MXITER = 50 # maximum number of outer iterations # Deltares use 50
# ITERI = 30 # number of inner iterations # Deltares use 30
# NPCOND = 1 # 1 - Modified Incomplete Cholesky, 2 - Polynomial matrix conditioning method;
# HCLOSE = 0.01 # HCLOSE (unit: m)
# RCLOSE = 10.* 400.*400. # RCLOSE (unit: m3)
# RELAX = 1.00 # relaxation parameter used with NPCOND = 1
# NBPOL = 2 # indicates whether the estimate of the upper bound on the maximum eigenvalue is 2.0 (but we don ot use it, since NPCOND = 1)
# DAMP = 1 # no damping (DAMP introduced in MODFLOW 2000)
msg = "Executing MODFLOW with HCLOSE = "+str(HCLOSE)+" and RCLOSE = "+str(RCLOSE)+" and MXITER = "+str(MXITER)+" and ITERI = "+str(ITERI)+" and PERLEN = "+str(PERLEN)+" and NSTP = "+str(NSTP)
logger.info(msg)
try:
self.pcr_modflow.run()
self.modflow_converged = self.pcr_modflow.converged() # TODO: Ask Oliver to fix the non-convergence issue that can appear before reaching the end of stress period.
#~ self.modflow_converged = self.old_check_modflow_convergence()
except:
self.modflow_converged = False
print self.modflow_converged
if self.modflow_converged == False:
logger.info('')
msg = "MODFLOW FAILED TO CONVERGE with HCLOSE = "+str(HCLOSE)+" and RCLOSE = "+str(RCLOSE)
logger.info(msg)
logger.info('')
####################################################################################################################################### OPTIONAL ######
# for the steady state simulation, we still save the calculated head(s)
# so that we can use them as the initial estimate for the next iteration (by doing this, it may ease the convergence?? - TODO: check this
# NOTE: We must NOT extract the calculated heads of a transient simulation result that does not converge.
if simulation_type == "steady-state":
msg = "Set the result from the uncoverged modflow simulation as the initial new estimate (for a steady-state simulation only)."
logger.info(msg)
# obtain the result from the uncoverged modflow simulation
for i in range(1, self.number_of_layers+1):
var_name = 'groundwaterHeadLayer'+str(i)
vars(self)[var_name] = None
vars(self)[var_name] = self.pcr_modflow.getHeads(i)
# set the result from the uncoverged modflow simulation as the initial new estimate
for i in range(1, self.number_of_layers+1):
var_name = 'groundwaterHeadLayer'+str(i)
initial_head = pcr.scalar(vars(self)[var_name])
self.pcr_modflow.setInitialHead(initial_head, i)
####################################################################################################################################### OPTIONAL ######
# set a new iteration index for the RCLOSE
self.iteration_RCLOSE += 1
# reset if the index has reached the length of available criteria
if self.iteration_RCLOSE > (len(self.criteria_RCLOSE)-1): self.iteration_RCLOSE = 0
# set a new iteration index for the HCLOSE
if self.iteration_RCLOSE == 0: self.iteration_HCLOSE += 1
# if we already using all available HCLOSE
if self.iteration_RCLOSE == 0 and self.iteration_HCLOSE == len(self.criteria_HCLOSE):
msg = "\n\n\n"
msg += "NOT GOOD!!! MODFLOW STILL FAILED TO CONVERGE with HCLOSE = "+str(HCLOSE)+" and RCLOSE = "+str(RCLOSE)
msg += "\n\n"
# for a steady-state simulation, we give up
if simulation_type == "steady-state":
msg += "But, we give up and we can only decide/suggest to use the last calculated groundwater heads."
msg += "\n\n"
logger.warning(msg)
# force MODFLOW to converge
self.modflow_converged = True
else:
additional_HLCOSE = HCLOSE * 2.0
msg += "We will try again using the HCLOSE: " + str(additional_HLCOSE)
msg += "\n\n"
logger.warning(msg)
self.criteria_HCLOSE.append(additional_HLCOSE)
self.criteria_HCLOSE = sorted(self.criteria_HCLOSE)
# TODO: Shall we also increase RCLOSE ??
else:
msg = "\n\n\n"
msg += "HURRAY!!! MODFLOW CONVERGED with HCLOSE = "+str(HCLOSE)+" and RCLOSE = "+str(RCLOSE)
msg += "\n\n"
logger.info(msg)
# obtaining the results from modflow simulation
if self.modflow_converged: self.get_all_modflow_results(simulation_type)
# clear modflow object
self.pcr_modflow = None
# calculate some variables that will be accessed from PCR-GLOBWB (for online coupling purpose)
self.calculate_values_for_pcrglobwb()
def calculate_values_for_pcrglobwb(self):
logger.info("Calculate some variables for PCR-GLOBWB (needed for online coupling purpose: 'relativeGroundwaterHead', 'baseflow', and 'storGroundwater'")
# relative uppermost groundwater head (unit: m) above the minimum elevation within grid
uppermost_head = vars(self)['groundwaterHeadLayer'+str(self.number_of_layers)]
self.relativeGroundwaterHead = uppermost_head - self.dem_minimum
# baseflow (unit: m/day)
# - initiate the (accumulated) volume rate (m3/day) (for accumulating the fluxes from all layers)
totalBaseflowVolumeRate = pcr.scalar(0.0)
# - accumulating fluxes from all layers
for i in range(1, self.number_of_layers+1):
# from the river leakage
var_name = 'riverLeakageLayer'+str(i)
totalBaseflowVolumeRate += pcr.cover(vars(self)[var_name], 0.0)
# from the drain package
var_name = 'drainLayer'+str(i)
totalBaseflowVolumeRate += pcr.cover(vars(self)[var_name], 0.0)
# use only in the landmask region
if i == self.number_of_layers: totalBaseflowVolumeRate = pcr.ifthen(self.landmask, totalBaseflowVolumeRate)
# - convert the unit to m/day and convert the flow direction
# for this variable, positive values indicates flow leaving aquifer (following PCR-GLOBWB assumption, opposite direction from MODFLOW)
self.baseflow = pcr.scalar(-1.0) * (totalBaseflowVolumeRate/self.cellAreaMap)
# storGroundwater (unit: m)
# - from the lowermost layer
accesibleGroundwaterThickness = pcr.ifthen(self.landmask, \
self.specific_yield_1 * \
pcr.max(0.0, self.groundwaterHeadLayer1 - pcr.max(self.max_accesible_elevation, \
self.bottom_layer_1)))
# - from the uppermost layer
if self.number_of_layers == 2:\
accesibleGroundwaterThickness += pcr.ifthen(self.landmask, \
self.specific_yield_2 * \
pcr.max(0.0, self.groundwaterHeadLayer2 - pcr.max(self.max_accesible_elevation, \
self.bottom_layer_2)))
# - TODO: Make this flexible for a model that has more than two layers.
# - storGroundwater (unit: m) that can be accessed for abstraction
self.storGroundwater = accesibleGroundwaterThickness
def get_all_modflow_results(self, simulation_type):
logger.info("Get all modflow results.")
# obtaining the results from modflow simulation
for i in range(1, self.number_of_layers+1):
# groundwater head (unit: m)
var_name = 'groundwaterHeadLayer'+str(i)
vars(self)[var_name] = None
vars(self)[var_name] = self.pcr_modflow.getHeads(i)
# river leakage (unit: m3/day)
var_name = 'riverLeakageLayer'+str(i)
vars(self)[var_name] = None
vars(self)[var_name] = self.pcr_modflow.getRiverLeakage(i)
# drain (unit: m3/day)
var_name = 'drainLayer'+str(i)
vars(self)[var_name] = None
vars(self)[var_name] = self.pcr_modflow.getDrain(i)
# bdgfrf - cell-by-cell flows right (m3/day)
var_name = 'flowRightFaceLayer'+str(i)
vars(self)[var_name] = None
vars(self)[var_name] = self.pcr_modflow.getRightFace(i)
# bdgfff - cell-by-cell flows front (m3/day)
var_name = 'flowFrontFaceLayer'+str(i)
vars(self)[var_name] = None
vars(self)[var_name] = self.pcr_modflow.getFrontFace(i)
# bdgflf - cell-by-cell flows lower (m3/day)
# Note: No flow through the lower face of the bottom layer
if i > 1:
var_name = 'flowLowerFaceLayer'+str(i)
vars(self)[var_name] = None
vars(self)[var_name] = self.pcr_modflow.getLowerFace(i)
# flow to/from constant head cells (unit: m3/day)
var_name = 'flowConstantHeadLayer'+str(i)
vars(self)[var_name] = None
vars(self)[var_name] = self.pcr_modflow.getConstantHead(i)
# cell-by-cell storage flow term (unit: m3)
if simulation_type == "transient":
var_name = 'flowStorageLayer'+str(i)
vars(self)[var_name] = None
vars(self)[var_name] = self.pcr_modflow.getStorage(i)
#~ # for debuging only
#~ pcr.report(self.groundwaterHeadLayer1 , "gw_head_layer_1.map")
#~ pcr.report(self.groundwaterDepthLayer1, "gw_depth_layer_1.map")
def old_check_modflow_convergence(self, file_name = "pcrmf.lst"):
# open and read the lst file
file_name = self.tmp_modflow_dir + "/" + file_name
f = open(file_name) ; all_lines = f.read() ; f.close()
# split the content of the file into several lines
all_lines = all_lines.replace("\r","")
all_lines = all_lines.split("\n")
# scan the last 200 lines and check if the model
modflow_converged = True
for i in range(0,200):
if 'FAILED TO CONVERGE' in all_lines[-i]: modflow_converged = False
print modflow_converged
return modflow_converged
def set_drain_and_river_package(self, discharge, channel_storage, currTimeStep, simulation_type):
logger.info("Set the river package.")
# set WaterBodies class to define the extent of lakes and reservoirs (constant for the entie year, annual resolution)
# and also set drain package (constant for the entire year, unless there are changes in the WaterBodies class)
if simulation_type == "steady-state" or simulation_type == "steady-state-extra":
self.WaterBodies = waterBodies.WaterBodies(self.iniItems,\
self.landmask,\
self.onlyNaturalWaterBodies)
self.WaterBodies.getParameterFiles(date_given = self.iniItems.globalOptions['startTime'],\
cellArea = self.cellAreaMap, \
ldd = self.lddMap)
if simulation_type == "transient":
if self.WaterBodies == None:
self.WaterBodies = waterBodies.WaterBodies(self.iniItems,\
self.landmask,\
self.onlyNaturalWaterBodies)
self.WaterBodies.getParameterFiles(date_given = str(currTimeStep.fulldate),\
cellArea = self.cellAreaMap, \
ldd = self.lddMap)
if currTimeStep.month == 1:
self.WaterBodies.getParameterFiles(date_given = str(currTimeStep.fulldate),\
cellArea = self.cellAreaMap, \
ldd = self.lddMap)
# reset bed conductance at the first month (due to possibility of new inclusion of lakes/reservoirs)
if currTimeStep == None or currTimeStep.month == 1: self.bed_conductance = None
if isinstance(self.bed_conductance, types.NoneType):
logger.info("Estimating surface water bed elevation.")
#~ # - for lakes and resevoirs, alternative 1: make the bottom elevation deep --- Shall we do this? NOTE: This will provide unrealistic groundwater depth. Need further investigations (consider to use US).
#~ additional_depth = 1500.
#~ surface_water_bed_elevation = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, \
#~ self.dem_riverbed - additional_depth)
#
#~ # - for lakes and resevoirs, alternative 2: estimate bed elevation from dem and bankfull depth
#~ surface_water_bed_elevation = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, self.dem_average)
#~ surface_water_bed_elevation = pcr.areaaverage(surface_water_bed_elevation, self.WaterBodies.waterBodyIds)
#~ surface_water_bed_elevation -= pcr.areamaximum(self.bankfull_depth, self.WaterBodies.waterBodyIds)
# - for lakes and resevoirs, alternative 3: estimate bed elevation from DEM only
# This is to avoid that groundwater heads fall too far below DEM
# This will also smooth groundwater heads.
surface_water_bed_elevation = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, self.dem_average)
# surface water bed elevation for rivers, lakes and reservoirs
surface_water_bed_elevation = pcr.cover(surface_water_bed_elevation, self.dem_riverbed)
#~ surface_water_bed_elevation = self.dem_riverbed # This is an alternative, if we do not want to introduce very deep bottom elevations of lakes and/or reservoirs.
# rounding values for surface_water_bed_elevation
self.surface_water_bed_elevation = pcr.rounddown(surface_water_bed_elevation * 100.)/100.
logger.info("Estimating surface water bed conductance.")
############################################################################################################################################
# lake and reservoir fraction (dimensionless)
lake_and_reservoir_fraction = pcr.cover(\
pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, \
self.WaterBodies.fracWat), 0.0)
# river fraction (dimensionless)
river_fraction = (1.0 - lake_and_reservoir_fraction) * (self.bankfull_width * self.channelLength)/self.cellAreaMap
# lake and reservoir resistance (day)
lake_and_reservoir_resistance = self.bed_resistance
# - assuming a minimum resistance (due to the sedimentation, conductivity: 0.001 m/day and thickness 0.50 m)
lake_and_reservoir_resistance = pcr.max(0.50 / 0.001, self.bed_resistance)
#~ # to further decrease bed conductance in lakes and reservoir, we limit the lake and reservoir fraction as follows:
#~ lake_and_reservoir_fraction = pcr.cover(\
#~ pcr.min(lake_and_reservoir_fraction,\
#~ pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, \
#~ pcr.areaaverage(self.bankfull_width * self.channelLength, self.WaterBodies.waterBodyIds))), 0.0)
#~ # make the lake and reservor resistance even higher (to avoid too high seepage) # TODO: Investigate this !!!!
#~ lake_and_reservoir_resistance *= 10.
# lake and reservoir conductance (m2/day)
lake_and_reservoir_conductance = (1.0/lake_and_reservoir_resistance) * lake_and_reservoir_fraction * \
self.cellAreaMap
# river conductance (m2/day)
river_conductance = (1.0/self.bed_resistance) * river_fraction *\
self.cellAreaMap
# surface water bed condutance (unit: m2/day)
bed_conductance = lake_and_reservoir_conductance + river_conductance
self.bed_conductance = pcr.cover(bed_conductance, 0.0)
############################################################################################################################################
# set minimum conductance values (to remove water above surface level)
# - assume all cells have minimum river width
minimum_width = 2.0 # Sutanudjaja et al. (2011)
minimum_conductance = (1.0/self.bed_resistance) * \
pcr.max(minimum_width, self.bankfull_width) * self.channelLength/self.cellAreaMap
self.bed_conductance = pcr.max(minimum_conductance, self.bed_conductance)
logger.info("Estimating outlet widths of lakes and/or reservoirs.")
# - 'channel width' for lakes and reservoirs
channel_width = pcr.areamaximum(self.bankfull_width, self.WaterBodies.waterBodyIds)
self.channel_width = pcr.cover(channel_width, self.bankfull_width)
logger.info("Estimating surface water elevation.")
# - convert discharge value to surface water elevation (m)
river_water_height = (self.channel_width**(-3/5)) * (discharge**(3/5)) * ((self.gradient)**(-3/10)) *(self.manningsN**(3/5))
surface_water_elevation = self.dem_riverbed + \
river_water_height
#
# - calculating water level (unit: m) above the flood plain # TODO: Improve this concept (using Rens's latest innundation scheme)
#----------------------------------------------------------
water_above_fpl = pcr.max(0.0, surface_water_elevation - self.dem_floodplain) # unit: m, water level above the floodplain (not distributed)
water_above_fpl *= self.bankfull_depth * self.channel_width / self.cellAreaMap # unit: m, water level above the floodplain (distributed within the cell)
# TODO: Improve this concept using Rens's latest scheme
#
# - corrected surface water elevation
surface_water_elevation = pcr.ifthenelse(surface_water_elevation > self.dem_floodplain, \
self.dem_floodplain + water_above_fpl, \
surface_water_elevation)
# - surface water elevation for lakes and reservoirs:
lake_reservoir_water_elevation = pcr.ifthen(self.WaterBodies.waterBodyOut, pcr.min(surface_water_elevation, self.dem_floodplain))
lake_reservoir_water_elevation = pcr.areamaximum(lake_reservoir_water_elevation, self.WaterBodies.waterBodyIds)
lake_reservoir_water_elevation = pcr.cover(lake_reservoir_water_elevation, \
pcr.areaaverage(surface_water_elevation, self.WaterBodies.waterBodyIds))
# - maximum and minimum values for lake_reservoir_water_elevation
lake_reservoir_water_elevation = pcr.min(self.dem_floodplain, lake_reservoir_water_elevation)
lake_reservoir_water_elevation = pcr.max(self.surface_water_bed_elevation, lake_reservoir_water_elevation)
# - smoothing
lake_reservoir_water_elevation = pcr.areaaverage(surface_water_elevation, self.WaterBodies.waterBodyIds)
lake_reservoir_water_elevation = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, lake_reservoir_water_elevation)
#
# - to minimize negative channel storage, ignore river infiltration in smaller rivers ; no infiltration if HRIV = RBOT (and h < RBOT)
minimum_channel_width = 5.0
surface_water_elevation = pcr.ifthenelse(self.channel_width > minimum_channel_width, surface_water_elevation, \
self.surface_water_bed_elevation)
#
# - merge lake and reservoir water elevation
surface_water_elevation = pcr.cover(lake_reservoir_water_elevation, surface_water_elevation)
#
# - covering missing values and rounding
surface_water_elevation = pcr.cover(surface_water_elevation, self.dem_average)
surface_water_elevation = pcr.rounddown(surface_water_elevation * 1000.)/1000.
#
# - make sure that HRIV >= RBOT ; no infiltration if HRIV = RBOT (and h < RBOT)
surface_water_elevation = pcr.max(surface_water_elevation, self.surface_water_bed_elevation)
# - to minimize negative channel storage, ignore river infiltration with low surface_water_elevation
minimum_water_height = 0.50
surface_water_elevation = pcr.ifthenelse((surface_water_elevation - self.surface_water_bed_elevation) > minimum_water_height, surface_water_elevation, \
self.surface_water_bed_elevation)
# - to minimize negative channel storage, ignore river infiltration with low channel storage
if not isinstance(channel_storage, types.NoneType):
minimum_channel_storage = pcr.max(0.0, 0.10 * self.bankfull_depth * self.bankfull_width * self.channelLength) # unit: m3
surface_water_elevation = pcr.ifthenelse(channel_storage > minimum_channel_storage, surface_water_elevation, self.surface_water_bed_elevation)
# - also ignore river infiltration in the mountainous region
mountainous_extent = pcr.cover(\
pcr.ifthen((self.dem_average - self.dem_floodplain) > 50.0, pcr.boolean(1.0)), pcr.boolean(0.0))
surface_water_elevation = pcr.ifthenelse(mountainous_extent, self.surface_water_bed_elevation, surface_water_elevation)
# make sure that HRIV >= RBOT ; no infiltration if HRIV = RBOT (and h < RBOT)
surface_water_elevation = pcr.rounddown(surface_water_elevation * 1000.)/1000.
surface_water_elevation = pcr.max(surface_water_elevation, self.surface_water_bed_elevation)
# reducing the size of table by ignoring cells outside the landmask region
bed_conductance_used = pcr.ifthen(self.landmask, self.bed_conductance)
bed_conductance_used = pcr.cover(bed_conductance_used, 0.0)
#~ # for the case HRIV == RBOT, we can use drain package --------- NOT NEEDED
#~ additional_drain_elevation = pcr.cover(\
#~ pcr.ifthen(surface_water_elevation <= self.surface_water_bed_elevation, self.surface_water_bed_elevation), 0.0)
#~ additional_drain_conductance = pcr.cover(\
#~ pcr.ifthen(surface_water_elevation <= self.surface_water_bed_elevation, bed_conductance_used), 0.0)
#~ bed_conductance_used = \
#~ pcr.ifthenelse(surface_water_elevation <= self.surface_water_bed_elevation, 0.0, bed_conductance_used)
#~ #
#~ # set the DRN package only to the uppermost layer
#~ self.pcr_modflow.setDrain(additional_drain_elevation, \
#~ additional_drain_conductance, self.number_of_layers)
# set the RIV package only to the uppermost layer
self.pcr_modflow.setRiver(surface_water_elevation, self.surface_water_bed_elevation, bed_conductance_used, self.number_of_layers)
# TODO: Improve the concept of RIV package, particularly while calculating surface water elevation in lakes and reservoirs
# set drain package
self.set_drain_package()
def set_recharge_package(self, \
gwRecharge, gwAbstraction = 0.0,
gwAbstractionReturnFlow = 0.0): # Note: We ignored the latter as MODFLOW should capture this part as well.
# We also moved the abstraction to the WELL package
logger.info("Set the recharge package.")
# specify the recharge package
# + recharge/capillary rise (unit: m/day) from PCR-GLOBWB
# - groundwater abstraction (unit: m/day) from PCR-GLOBWB
# + return flow of groundwater abstraction (unit: m/day) from PCR-GLOBWB
net_recharge = gwRecharge - gwAbstraction + \
gwAbstractionReturnFlow
# - correcting values (considering MODFLOW lat/lon cell properties)
# and pass them to the RCH package
net_RCH = pcr.cover(net_recharge * self.cellAreaMap/(pcr.clone().cellSize()*pcr.clone().cellSize()), 0.0)
net_RCH = pcr.cover(pcr.ifthenelse(pcr.abs(net_RCH) < 1e-20, 0.0, net_RCH), 0.0)
# put the recharge to the top grid/layer
self.pcr_modflow.setRecharge(net_RCH, 1)
#~ # if we want to put RCH in the lower layer
#~ self.pcr_modflow.setIndicatedRecharge(net_RCH, pcr.spatial(pcr.nominal(1)))
def set_well_package(self, gwAbstraction):
logger.info("Set the well package.")
if self.number_of_layers == 1: self.set_well_package_for_one_layer_model(gwAbstraction)
if self.number_of_layers == 2: self.set_well_package_for_two_layer_model(gwAbstraction)
def set_well_package_for_one_layer_model(self, gwAbstraction):
gwAbstraction = pcr.cover(gwAbstraction, 0.0)
gwAbstraction = pcr.max(gwAbstraction, 0.0)
# abstraction volume (negative value, unit: m3/day)
abstraction = pcr.cover(gwAbstraction, 0.0) * self.cellAreaMap * pcr.scalar(-1.0)
# set the well package
self.pcr_modflow.setWell(abstraction, 1)
def set_well_package_for_two_layer_model(self, gwAbstraction):
gwAbstraction = pcr.cover(gwAbstraction, 0.0)
gwAbstraction = pcr.max(gwAbstraction, 0.0)
# abstraction for the layer 1 (lower layer) is limited only in productive aquifer
abstraction_layer_1 = pcr.cover(pcr.ifthen(self.productive_aquifer, gwAbstraction), 0.0)
# abstraction for the layer 2 (upper layer)
abstraction_layer_2 = pcr.max(0.0, gwAbstraction - abstraction_layer_1)
# abstraction volume (negative value, unit: m3/day)
abstraction_layer_1 = abstraction_layer_1 * self.cellAreaMap * pcr.scalar(-1.0)
abstraction_layer_2 = abstraction_layer_2 * self.cellAreaMap * pcr.scalar(-1.0)
# set the well package
self.pcr_modflow.setWell(abstraction_layer_1, 1)
self.pcr_modflow.setWell(abstraction_layer_2, 2)
def set_well_package_OLD(self, gwAbstraction):
logger.info("Set the well package.")
# reducing the size of table by ignoring cells with zero abstraction
gwAbstraction = pcr.ifthen(gwAbstraction > 0.0, gwAbstraction)
# abstraction only in productive aquifer
gwAbstraction = pcr.ifthen(self.productive_aquifer, gwAbstraction)
# abstraction volume (negative value, unit: m3/day)
abstraction = gwAbstraction * self.cellAreaMap * pcr.scalar(-1.0)
# FIXME: The following cover operations should not be necessary (Oliver should fix this).
abstraction = pcr.cover(abstraction, 0.0)
# set the well based on number of layers
if self.number_of_layers == 1: self.pcr_modflow.setWell(abstraction, 1)
if self.number_of_layers == 2: self.pcr_modflow.setWell(abstraction, 1) # at the bottom layer
#~ print('test')
def set_drain_package(self):
logger.info("Set the drain package (for the release of over bank storage).")
# specify the drain package the drain package is used to simulate the drainage of bank storage
# - estimate bottom of bank storage for flood plain areas
drain_elevation = self.estimate_bottom_of_bank_storage() # unit: m
# - for lakes and/or reservoirs, ignore the drainage
drain_conductance = pcr.ifthen(pcr.scalar(self.WaterBodies.waterBodyIds) > 0.0, pcr.scalar(0.0))
# - drainage conductance is a linear reservoir coefficient
drain_conductance = pcr.cover(drain_conductance, \
self.recessionCoeff * self.specificYield * self.cellAreaMap) # unit: m2/day
#~ drain_conductance = pcr.ifthenelse(drain_conductance < 1e-20, 0.0, \
#~ drain_conductance)
#~ drain_conductance = pcr.rounddown(drain_conductance*10000.)/10000. # It is not a good idea to round the values down (water can be trapped).
# reducing the size of table by ignoring cells outside landmask region
drain_conductance = pcr.ifthen(self.landmask, drain_conductance)
drain_conductance = pcr.cover(drain_conductance, 0.0)
#~ # set the DRN package only to the uppermost layer
#~ self.pcr_modflow.setDrain(drain_elevation, drain_conductance, self.number_of_layers)
# set the DRN package only to both layers ------ 4 January 2016: I think that we should use this as we want all recharge will be released as baseflow.
self.pcr_modflow.setDrain(drain_elevation, drain_conductance, 1)
self.pcr_modflow.setDrain(drain_elevation, drain_conductance, 2)
#~ # set the DRN package only to the lowermost layer
#~ self.pcr_modflow.setDrain(drain_elevation, drain_conductance, 1)
#~ self.pcr_modflow.setDrain(pcr.spatial(pcr.scalar(0.0)),pcr.spatial(pcr.scalar(0.0)), 2)
def return_innundation_fraction(self,relative_water_height):
# - fractions of flooded area (in percentage) based on the relative_water_height (above the minimum dem)
DZRIV = relative_water_height
CRFRAC_RIV = pcr.min(1.0,1.00-(self.dzRel0100-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0100-self.dzRel0090) )
CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0090,0.90-(self.dzRel0090-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0090-self.dzRel0080),CRFRAC_RIV )
CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0080,0.80-(self.dzRel0080-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0080-self.dzRel0070),CRFRAC_RIV )
CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0070,0.70-(self.dzRel0070-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0070-self.dzRel0060),CRFRAC_RIV )
CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0060,0.60-(self.dzRel0060-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0060-self.dzRel0050),CRFRAC_RIV )
CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0050,0.50-(self.dzRel0050-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0050-self.dzRel0040),CRFRAC_RIV )
CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0040,0.40-(self.dzRel0040-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0040-self.dzRel0030),CRFRAC_RIV )
CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0030,0.30-(self.dzRel0030-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0030-self.dzRel0020),CRFRAC_RIV )
CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0020,0.20-(self.dzRel0020-DZRIV)*0.10/pcr.max(1e-3,self.dzRel0020-self.dzRel0010),CRFRAC_RIV )
CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0010,0.10-(self.dzRel0010-DZRIV)*0.05/pcr.max(1e-3,self.dzRel0010-self.dzRel0005),CRFRAC_RIV )
CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0005,0.05-(self.dzRel0005-DZRIV)*0.04/pcr.max(1e-3,self.dzRel0005-self.dzRel0001),CRFRAC_RIV )
CRFRAC_RIV = pcr.ifthenelse(DZRIV<self.dzRel0001,0.01-(self.dzRel0001-DZRIV)*0.01/pcr.max(1e-3,self.dzRel0001) ,CRFRAC_RIV )
CRFRAC_RIV = pcr.ifthenelse(DZRIV<=0,0, CRFRAC_RIV)
# - minimum value of innundation fraction is river/channel area
CRFRAC_RIV = pcr.cover(pcr.max(0.0,pcr.min(1.0,pcr.max(CRFRAC_RIV,(self.bankfull_depth*self.bankfull_width/self.cellAreaMap)))),scalar(0)) ;
# TODO: Improve this concept using Rens's latest scheme
def old_style_reporting(self,currTimeStep):
if self.report == True:
timeStamp = datetime.datetime(currTimeStep.year,\
currTimeStep.month,\
currTimeStep.day,\
0)
# writing daily output to netcdf files
timestepPCR = currTimeStep.timeStepPCR
if self.outDailyTotNC[0] != "None":
for var in self.outDailyTotNC:
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_dailyTot.nc",\
var,\
pcr2numpy(self.__getattribute__(var),vos.MV),\
timeStamp,timestepPCR-1)
# writing monthly output to netcdf files
# -cummulative
if self.outMonthTotNC[0] != "None":
for var in self.outMonthTotNC:
# introduce variables at the beginning of simulation or
# reset variables at the beginning of the month
if currTimeStep.timeStepPCR == 1 or \
currTimeStep.day == 1:\
vars(self)[var+'MonthTot'] = pcr.scalar(0.0)
# accumulating
vars(self)[var+'MonthTot'] += vars(self)[var]
# reporting at the end of the month:
if currTimeStep.endMonth == True:
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_monthTot.nc",\
var,\
pcr2numpy(self.__getattribute__(var+'MonthTot'),\
vos.MV),timeStamp,currTimeStep.monthIdx-1)
# -average
if self.outMonthAvgNC[0] != "None":
for var in self.outMonthAvgNC:
# only if a accumulator variable has not been defined:
if var not in self.outMonthTotNC:
# introduce accumulator at the beginning of simulation or
# reset accumulator at the beginning of the month
if currTimeStep.timeStepPCR == 1 or \
currTimeStep.day == 1:\
vars(self)[var+'MonthTot'] = pcr.scalar(0.0)
# accumulating
vars(self)[var+'MonthTot'] += vars(self)[var]
# calculating average & reporting at the end of the month:
if currTimeStep.endMonth == True:
vars(self)[var+'MonthAvg'] = vars(self)[var+'MonthTot']/\
currTimeStep.day
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_monthAvg.nc",\
var,\
pcr2numpy(self.__getattribute__(var+'MonthAvg'),\
vos.MV),timeStamp,currTimeStep.monthIdx-1)
#
# -last day of the month
if self.outMonthEndNC[0] != "None":
for var in self.outMonthEndNC:
# reporting at the end of the month:
if currTimeStep.endMonth == True:
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_monthEnd.nc",\
var,\
pcr2numpy(self.__getattribute__(var),vos.MV),\
timeStamp,currTimeStep.monthIdx-1)
# writing yearly output to netcdf files
# -cummulative
if self.outAnnuaTotNC[0] != "None":
for var in self.outAnnuaTotNC:
# introduce variables at the beginning of simulation or
# reset variables at the beginning of the month
if currTimeStep.timeStepPCR == 1 or \
currTimeStep.doy == 1:\
vars(self)[var+'AnnuaTot'] = pcr.scalar(0.0)
# accumulating
vars(self)[var+'AnnuaTot'] += vars(self)[var]
# reporting at the end of the year:
if currTimeStep.endYear == True:
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_annuaTot.nc",\
var,\
pcr2numpy(self.__getattribute__(var+'AnnuaTot'),\
vos.MV),timeStamp,currTimeStep.annuaIdx-1)
# -average
if self.outAnnuaAvgNC[0] != "None":
for var in self.outAnnuaAvgNC:
# only if a accumulator variable has not been defined:
if var not in self.outAnnuaTotNC:
# introduce accumulator at the beginning of simulation or
# reset accumulator at the beginning of the year
if currTimeStep.timeStepPCR == 1 or \
currTimeStep.doy == 1:\
vars(self)[var+'AnnuaTot'] = pcr.scalar(0.0)
# accumulating
vars(self)[var+'AnnuaTot'] += vars(self)[var]
#
# calculating average & reporting at the end of the year:
if currTimeStep.endYear == True:
vars(self)[var+'AnnuaAvg'] = vars(self)[var+'AnnuaTot']/\
currTimeStep.doy
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_annuaAvg.nc",\
var,\
pcr2numpy(self.__getattribute__(var+'AnnuaAvg'),\
vos.MV),timeStamp,currTimeStep.annuaIdx-1)
#
# -last day of the year
if self.outAnnuaEndNC[0] != "None":
for var in self.outAnnuaEndNC:
# reporting at the end of the year:
if currTimeStep.endYear == True:
self.netcdfObj.data2NetCDF(str(self.outNCDir)+"/"+ \
str(var)+"_annuaEnd.nc",\
var,\
pcr2numpy(self.__getattribute__(var),vos.MV),\
timeStamp,currTimeStep.annuaIdx-1)
| gpl-3.0 | 8,623,587,767,162,142,000 | 59.795125 | 217 | 0.561718 | false | 4.021576 | false | false | false |
rosswhitfield/mantid | Framework/PythonInterface/test/python/mantid/kernel/ArrayOrderedPairsValidatorTest.py | 3 | 2171 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
import testhelpers
from mantid.kernel import IntArrayOrderedPairsValidator, FloatArrayOrderedPairsValidator, \
IntArrayProperty, FloatArrayProperty
from mantid.api import PythonAlgorithm
class ArrayOrderedPairsValidatorTest(unittest.TestCase):
def test_fail_odd_entries(self):
alg = self._create_alg()
int_vals = [5,7,13]
float_vals = [2.1]
self.assertRaises(ValueError, alg.setProperty, "IntInput", int_vals)
self.assertRaises(ValueError, alg.setProperty, "FloatInput", float_vals)
def test_fail_unordered_pairs(self):
alg = self._create_alg()
int_vals = [5, 18, 4, 2]
float_vals = [2.1, 5.7, 4.3, 1.5]
self.assertRaises(ValueError, alg.setProperty, "IntInput", int_vals)
self.assertRaises(ValueError, alg.setProperty, "FloatInput", float_vals)
def test_pass_ordered_pairs(self):
alg = self._create_alg()
int_vals = [5, 18, 4, 9]
float_vals = [2.1, 5.7, 4.3, 6.7]
testhelpers.assertRaisesNothing(self, alg.setProperty, "IntInput", int_vals)
testhelpers.assertRaisesNothing(self, alg.setProperty, "FloatInput", float_vals)
def _create_alg(self):
"""
Creates a test algorithm with a ordered pairs validator
"""
class TestAlgorithm(PythonAlgorithm):
def PyInit(self):
int_validator = IntArrayOrderedPairsValidator()
self.declareProperty(IntArrayProperty("IntInput", int_validator))
float_validator = FloatArrayOrderedPairsValidator()
self.declareProperty(FloatArrayProperty("FloatInput", float_validator))
def PyExec(self):
pass
alg = TestAlgorithm()
alg.initialize()
return alg
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -2,631,381,919,034,988,500 | 36.431034 | 91 | 0.655919 | false | 3.788831 | true | false | false |
pra85/calibre | src/calibre/ebooks/oeb/transforms/jacket.py | 4 | 8883 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import sys
from xml.sax.saxutils import escape
from lxml import etree
from calibre import guess_type, strftime
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.ebooks.oeb.base import XPath, XHTML_NS, XHTML, xml2text, urldefrag
from calibre.library.comments import comments_to_html
from calibre.utils.date import is_date_undefined
from calibre.ebooks.chardet import strip_encoding_declarations
JACKET_XPATH = '//h:meta[@name="calibre-content" and @content="jacket"]'
class Jacket(object):
'''
Book jacket manipulation. Remove first image and insert comments at start of
book.
'''
def remove_images(self, item, limit=1):
path = XPath('//h:img[@src]')
removed = 0
for img in path(item.data):
if removed >= limit:
break
href = item.abshref(img.get('src'))
image = self.oeb.manifest.hrefs.get(href, None)
if image is not None:
self.oeb.manifest.remove(image)
img.getparent().remove(img)
removed += 1
return removed
def remove_first_image(self):
deleted_item = None
for item in self.oeb.spine:
removed = self.remove_images(item)
if removed > 0:
self.log('Removed first image')
body = XPath('//h:body')(item.data)
if body:
raw = xml2text(body[0]).strip()
imgs = XPath('//h:img|//svg:svg')(item.data)
if not raw and not imgs:
self.log('Removing %s as it has no content'%item.href)
self.oeb.manifest.remove(item)
deleted_item = item
break
if deleted_item is not None:
for item in list(self.oeb.toc):
href = urldefrag(item.href)[0]
if href == deleted_item.href:
self.oeb.toc.remove(item)
def insert_metadata(self, mi):
self.log('Inserting metadata into book...')
try:
tags = map(unicode, self.oeb.metadata.subject)
except:
tags = []
try:
comments = unicode(self.oeb.metadata.description[0])
except:
comments = ''
try:
title = unicode(self.oeb.metadata.title[0])
except:
title = _('Unknown')
root = render_jacket(mi, self.opts.output_profile,
alt_title=title, alt_tags=tags,
alt_comments=comments)
id, href = self.oeb.manifest.generate('calibre_jacket', 'jacket.xhtml')
item = self.oeb.manifest.add(id, href, guess_type(href)[0], data=root)
self.oeb.spine.insert(0, item, True)
self.oeb.inserted_metadata_jacket = item
def remove_existing_jacket(self):
for x in self.oeb.spine[:4]:
if XPath(JACKET_XPATH)(x.data):
self.remove_images(x, limit=sys.maxint)
self.oeb.manifest.remove(x)
self.log('Removed existing jacket')
break
def __call__(self, oeb, opts, metadata):
'''
Add metadata in jacket.xhtml if specified in opts
If not specified, remove previous jacket instance
'''
self.oeb, self.opts, self.log = oeb, opts, oeb.log
self.remove_existing_jacket()
if opts.remove_first_image:
self.remove_first_image()
if opts.insert_metadata:
self.insert_metadata(metadata)
# Render Jacket {{{
def get_rating(rating, rchar, e_rchar):
ans = ''
try:
num = float(rating)/2
except:
return ans
num = max(0, num)
num = min(num, 5)
if num < 1:
return ans
ans = ("%s%s") % (rchar * int(num), e_rchar * (5 - int(num)))
return ans
def render_jacket(mi, output_profile,
alt_title=_('Unknown'), alt_tags=[], alt_comments='',
alt_publisher=('')):
css = P('jacket/stylesheet.css', data=True).decode('utf-8')
try:
title_str = mi.title if mi.title else alt_title
except:
title_str = _('Unknown')
title = '<span class="title">%s</span>' % (escape(title_str))
series = escape(mi.series if mi.series else '')
if mi.series and mi.series_index is not None:
series += escape(' [%s]'%mi.format_series_index())
if not mi.series:
series = ''
try:
publisher = mi.publisher if mi.publisher else alt_publisher
except:
publisher = ''
try:
if is_date_undefined(mi.pubdate):
pubdate = ''
else:
pubdate = strftime(u'%Y', mi.pubdate.timetuple())
except:
pubdate = ''
rating = get_rating(mi.rating, output_profile.ratings_char, output_profile.empty_ratings_char)
tags = mi.tags if mi.tags else alt_tags
if tags:
tags = output_profile.tags_to_string(tags)
else:
tags = ''
comments = mi.comments if mi.comments else alt_comments
comments = comments.strip()
orig_comments = comments
if comments:
comments = comments_to_html(comments)
try:
author = mi.format_authors()
except:
author = ''
def generate_html(comments):
args = dict(xmlns=XHTML_NS,
title_str=title_str,
css=css,
title=title,
author=author,
publisher=publisher,
pubdate_label=_('Published'), pubdate=pubdate,
series_label=_('Series'), series=series,
rating_label=_('Rating'), rating=rating,
tags_label=_('Tags'), tags=tags,
comments=comments,
footer=''
)
for key in mi.custom_field_keys():
try:
display_name, val = mi.format_field_extended(key)[:2]
key = key.replace('#', '_')
args[key] = escape(val)
args[key+'_label'] = escape(display_name)
except:
# if the val (custom column contents) is None, don't add to args
pass
if False:
print("Custom column values available in jacket template:")
for key in args.keys():
if key.startswith('_') and not key.endswith('_label'):
print(" %s: %s" % ('#' + key[1:], args[key]))
# Used in the comment describing use of custom columns in templates
# Don't change this unless you also change it in template.xhtml
args['_genre_label'] = args.get('_genre_label', '{_genre_label}')
args['_genre'] = args.get('_genre', '{_genre}')
generated_html = P('jacket/template.xhtml',
data=True).decode('utf-8').format(**args)
# Post-process the generated html to strip out empty header items
soup = BeautifulSoup(generated_html)
if not series:
series_tag = soup.find(attrs={'class':'cbj_series'})
if series_tag is not None:
series_tag.extract()
if not rating:
rating_tag = soup.find(attrs={'class':'cbj_rating'})
if rating_tag is not None:
rating_tag.extract()
if not tags:
tags_tag = soup.find(attrs={'class':'cbj_tags'})
if tags_tag is not None:
tags_tag.extract()
if not pubdate:
pubdate_tag = soup.find(attrs={'class':'cbj_pubdata'})
if pubdate_tag is not None:
pubdate_tag.extract()
if output_profile.short_name != 'kindle':
hr_tag = soup.find('hr', attrs={'class':'cbj_kindle_banner_hr'})
if hr_tag is not None:
hr_tag.extract()
return strip_encoding_declarations(
soup.renderContents('utf-8').decode('utf-8'))
from calibre.ebooks.oeb.base import RECOVER_PARSER
try:
root = etree.fromstring(generate_html(comments), parser=RECOVER_PARSER)
except:
try:
root = etree.fromstring(generate_html(escape(orig_comments)),
parser=RECOVER_PARSER)
except:
root = etree.fromstring(generate_html(''),
parser=RECOVER_PARSER)
return root
# }}}
def linearize_jacket(oeb):
for x in oeb.spine[:4]:
if XPath(JACKET_XPATH)(x.data):
for e in XPath('//h:table|//h:tr|//h:th')(x.data):
e.tag = XHTML('div')
for e in XPath('//h:td')(x.data):
e.tag = XHTML('span')
break
| gpl-3.0 | 1,269,841,315,977,730,300 | 32.647727 | 98 | 0.545874 | false | 3.880734 | false | false | false |
Scienziatopazzo/machine-learning | logistic_regression_test.py | 1 | 2071 | from logistic_regression import LogRegression
import random
def random_point():
'''
Returns a random 2-dimensional vector of floats between -1 and +1
'''
return [random.uniform(-1., 1.), random.uniform(-1., 1.)]
def generate_line():
'''
Randomly generates a line from 2 random points in [-1,1]x[-1,1]
and returns the tuple (m, q, inv) for y = mx + q with inv a boolean which decides what side of the line maps to +1
(ignores vertical lines)
'''
while (True):
pointA = random_point()
pointB = random_point()
if ((pointB[0] - pointA[0]) != 0):
break
m = (pointB[1] - pointA[1]) / (pointB[0] - pointA[0])
q = pointA[1] - m*pointA[0]
inv = bool(random.getrandbits(1))
return (m, q, inv)
def compute_y(line, point):
'''
Takes an (m, q, inv) tuple representing a line and takes a point, computes y
Returns 1 if the point is over the line, returns -1 if it's under it
'''
if (point[1] >= (line[0]*point[0] + line[1])):
if (line[2]):
return 1
else:
return -1
else:
if (line[2]):
return -1
else:
return 1
def generate_dataset(line, n):
'''
Takes an (m, q, inv) tuple representing a line and n=total number of datapoints to generate
Returns a length n list of tuples (x, y) with x a random vector and y=f(x)
'''
data = []
for c in range(n):
x = random_point()
y = compute_y(line, x)
data.append((x, y))
return data
def experiment(n):
l = LogRegression(2, 0.01)
total_Eout = 0.0
total_epochs = 0
for run in range(100):
line = generate_line()
data = generate_dataset(line, n)
l.reset(data)
l.gradient_descent(0.01)
total_epochs += l.epochs
new_data = generate_dataset(line, n*10)
total_Eout += l.cross_entropy_error(new_data)
avg_Eout = total_Eout / 100
avg_epochs = total_epochs / 100
return (avg_Eout, avg_epochs)
print(experiment(100))
| mit | -4,998,428,522,859,220,000 | 26.613333 | 119 | 0.571222 | false | 3.340323 | false | false | false |
ojimac/RenoirKensaku-by-Titanium | scripts/crowl.py | 1 | 2707 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import chardet
from pyquery import PyQuery as pq
#urls = [
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?acode=13&count=127',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=1&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=2&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=3&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=4&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=5&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=6&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=7&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=8&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=9&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=10&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=11&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=12&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
# 'http://standard.navitime.biz/renoir/AroundMapSearch.act?page=13&count=127&acode=13&cond=0.0.0.0.0.0&isDefaultSearch=false',
#]
urls = ['http://standard.navitime.biz/renoir/AroundMapSearch.act?acode=13&count=127']
if __name__ == '__main__':
detected = []
for url in urls:
data = ''.join(urllib.urlopen(url).readlines())
guess = chardet.detect(data)
result = dict(url = url, data = data, **guess)
detected.append(result)
result = []
for p in detected:
unicoded = p['data'].decode(p['encoding'])
d = pq(unicoded)
for item in d.find('.item'):
shop_name = pq(item).find('.spotName a').text();
shop_detail_url = pq(item).find('.spotName a').eq(1).attr.href
address = pq(item).find('.f12s').eq(0).text()
tel = pq(item).find('.f12s').eq(1).text()
print shop_name
print 'http://standard.navitime.biz/renoir/' + shop_detail_url
print address
print tel
print '----'
| apache-2.0 | -4,669,841,154,816,645,000 | 55.395833 | 127 | 0.700776 | false | 2.511132 | false | false | false |
csdms-contrib/gc2d | python/src/models/gc2d.py | 1 | 76335 | #!/usr/bin/env python
import sys
import getopt
import numpy
import time
import scipy
import logging
from scipy import interpolate
from scipy import signal
from scipy.io.numpyio import fwrite
# Available Mass Balance
class MassBalance:
( BAD_VAL ,
ZERO_BALANCE ,
CONSTANT_ELA ,
ELA_LOWERING ,
ELA_TIME_SERIES ,
EXTERNAL_FUNC ,
ELA_LOWERING2 ,
BALANCE_FILE ,
D180_TIME_SERIES ) = range( 9 )
class BoundaryCond:
( BAD_VAL ,
ICE_FREE_BOUND ,
ZERO_FLUX_BOUND ,
CONST_FLUX_BOUND ,
SURF_ELEV_BOUND ,
SURF_SLOPE_BOUND ) = range( 6 )
class Parameters:
g = numpy.longdouble(9.81) # gravitional acceleration
rhoI = numpy.longdouble(917) # density of ice
rhoW = numpy.longdouble(1000) # density of water
glensA = numpy.longdouble( (6.8e-15)*3.15e7/(1e9) ) # Patterson, 1994; MacGregor, 2000
day = numpy.longdouble(0.00274) # length of a day in years
# Time
t = numpy.longdouble(0) # set time to zero
tMax = numpy.longdouble(100000) # maximum simulation time in years
dtMax = numpy.longdouble(0.4 * 365*day) # maximum timestep in years
dtDefault = numpy.longdouble(0.4 * 365*day) # timestep if VARIABLE_DT_TOGGLE==0
# Attractor Sliding -- only if ICESLIDE_TOGGLE==1 (generally used)
UsChar = numpy.longdouble(10)
taubChar = numpy.longdouble(100000)
# Glacier Properties
MinGlacThick = numpy.longdouble(1)
WEST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND
EAST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND
NORTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND
SOUTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND
MASS_BALANCE_TOGGLE = MassBalance.ELA_LOWERING # select climate scenerio (off|on|select)
initELA = numpy.longdouble(3000)
gradBz = numpy.longdouble(0.01)
maxBz = numpy.longdouble(2)
ELAStepSize = numpy.longdouble(-50)
ELAStepInterval = numpy.longdouble(500)
tmin = numpy.longdouble(200) # Years, spin-up time
# Standard Sliding -- used if ICESLIDE_TOGGLE==2 (generally not used)
B = numpy.longdouble(0.0012) # m/(Pa*yr) -- MacGregor, 2000
DepthToWaterTable = numpy.longdouble(20) # distance from ice surface to water table
MaxFloatFraction = numpy.longdouble(80) # limits water level in ice
Hpeff = numpy.longdouble(20) # effective pressure (meters of water)
# Avalanching
angleOfRepose = numpy.longdouble(30)
avalanchFreq = numpy.longdouble(3) # average number per year
# Calving
seaLevel = numpy.longdouble(-100) # meters
calvingCoef = numpy.longdouble(2) # year^-1
# Thermal
c = numpy.longdouble(2060) # specific heat capacity (J/(kg*K))
Qg = numpy.longdouble(0.05*3.15e7) # Geothermal heat flux (W/m^2)*seconds/year = (J/year)/(m^2)
gradTz = numpy.longdouble(-0.0255) # Geothermal Gradient
# Available Boundary Conditions
ICE_FREE_BOUND = 1 # Ice Free Boundary
ZERO_FLUX_BOUND = 2 # Zero Ice Flux
CONST_FLUX_BOUND = 3 # Constant Ice Flux
SURF_ELEV_BOUND = 4 # Constant Surface Elevation
SURF_SLOPE_BOUND = 5 # Continuous Ice Surface Slope
#g = numpy.longdouble(9.81) # gravitional acceleration
#rhoI = numpy.longdouble(917) # density of ice
#glensA = numpy.longdouble( (6.8e-15)*3.15e7/(1e9) ) # Patterson, 1994; MacGregor, 2000
# Attractor Sliding -- only if ICESLIDE_TOGGLE==1 (generally used)
#UsChar = numpy.longdouble(10)
#taubChar = numpy.longdouble(100000)
# Glacier Properties
#MinGlacThick = numpy.longdouble(1)
#WEST_BC_TOGGLE = ICE_FREE_BOUND
#EAST_BC_TOGGLE = ICE_FREE_BOUND
#NORTH_BC_TOGGLE = ICE_FREE_BOUND
#SOUTH_BC_TOGGLE = ICE_FREE_BOUND
# Available Mass Balance
ZERO_BALANCE = 1 # Constant Ice Flux
CONSTANT_ELA = 2 # Ice Free Boundary
ELA_LOWERING = 3 # Zero Ice Flux
ELA_TIME_SERIES = 4 # Continuous Ice Surface Slope
EXTERNAL_FUNC = 5 # Constant Surface Elevation
ELA_LOWERING2 = 6 # Zero Ice Flux
BALANCE_FILE = 7 # Zero Ice Flux
D18O_TIME_SERIES = 8 # Load d18O record and convert to ELA history
#MASS_BALANCE_TOGGLE = ELA_LOWERING # select climate scenerio (off|on|select)
#initELA = numpy.longdouble(3000)
#gradBz = numpy.longdouble(0.01)
#maxBz = numpy.longdouble(2)
#ELAStepSize = numpy.longdouble(-50)
#ELAStepInterval = numpy.longdouble(500)
#tmin = numpy.longdouble(200) # Years, spin-up time
def compress_grid( H , Zb , COMPRESS_TOGGLE=False , RESTART_TOGGLE=0 ):
# COMPRESS - ONLY SIMULATE SUB-RECTANGLE THAT CONTAINS ICE
if COMPRESS_TOGGLE and H.max() > 1 and RESTART_TOGGLE != 2:
H_FullSpace = H.copy()
Zb_FullSpace = Zb.copy()
if THERMAL_TOGGLE:
Ts_FullSpace = Ts.copy()
Tb_FullSpace = Tb.copy()
Tm_FullSpace = Tm.copy()
#[indrw,indcl] = find(H ~= 0);
indrw,indcl = numpy.where( H!=0 )
mxrw,mxcl = Zb.shape
mnrw = max( 0 , min(indrw) - 2 )
mxrw = min( mxrw , max(indrw) + 2 )
mncl = max( 0 , min(indcl) - 2 )
mxcl = min( mxcl , max(indcl) + 2 )
H = H [ mnrw:mxrw , mncl:mxcl ]
Zb = Zb[ mnrw:mxrw , mncl:mxcl ]
Zi = Zb + numpy.choose( H<0 , (H,0) )
#Zi = Zb + numpy.choose( numpy.less(H,0) , (H,0) )
#Zi = Zb + max( H, 0 ) ;
rws,cls = H.shape
if THERMAL_TOGGLE:
Ts = Ts[ mnrw:mxrw , mncl:mxcl ]
Tb = Tb[ mnrw:mxrw , mncl:mxcl ]
Tm = Tm[ mnrw:mxrw , mncl:mxcl ]
mxrws,mxcls = Zb_FullSpace.shape
rws,cls = Zb.shape
compression_ratio = (mxcls*mxrws)/(cls*rws)
COMPRESSED_FLAG = 1
else:
#Zi = Zb + max( H, 0 ) # included for restarts
Zi = Zb + numpy.choose( H<0 , (H,0) )
compression_ratio = 1.
COMPRESSED_FLAG = 0
return ( Zi , compression_ratio , COMPRESSED_FLAG )
def add_halo( x ):
x_ext = numpy.concatenate( ( x[:,0,numpy.newaxis] , x , x[:,-1,numpy.newaxis] ) , axis=1 )
x_ext = numpy.concatenate( ( [x_ext[0,:]] , x_ext , [x_ext[-1,:]] ) )
return x_ext
def set_bc( H , Zb , Zi , THERMAL_TOGGLE=False , WEST_BC_TOGGLE=ICE_FREE_BOUND , EAST_BC_TOGGLE=ICE_FREE_BOUND , SOUTH_BC_TOGGLE=ICE_FREE_BOUND , NORTH_BC_TOGGLE=ICE_FREE_BOUND ):
######
### MODIFY BOUNDARY CELLS TO ENFORCE BOUNDARY CONDITIONS
# DEFAULT BOUNDARY CONDITION IS ZERO FLUX
H_ext = add_halo( H )
Zb_ext = add_halo( Zb )
Zi_ext = add_halo( Zi )
if THERMAL_TOGGLE:
Ts_ext = add_halo( Ts )
Tb_ext = add_halo( Tb )
Tm_ext = add_halo( Tm )
# WESTERN BOUNDARY CONDTION
if WEST_BC_TOGGLE == SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = mean(Zb[:,0]) + Hbound
H_ext[:,0] = ZiBound - Zb_ext[:,0]
elif WEST_BC_TOGGLE == CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif WEST_BC_TOGGLE == SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[:,0] = 2*Zi_ext[:,1] - Zi_ext[:,2]
H_ext [:,0] = Zi_ext[:,0] - Zb_ext[:,0]
H_ext [:,0] = numpy.choose( H_ext[:,0]<0 , (H_ext[:,0],0) )
elif WEST_BC_TOGGLE == ICE_FREE_BOUND: # Ice Free Boundary
H_ext[:,0] = 0
# EASTERN BOUNDARY CONDTION
if EAST_BC_TOGGLE == SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = mean(Zb[:,-1]) + Hbound
H_ext[:,-1] = ZiBound - Zb_ext[:,-1]
elif EAST_BC_TOGGLE == CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif EAST_BC_TOGGLE == SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[:,-1] = 2*Zi_ext[:,-2] - Zi_ext[:,-3]
H_ext [:,-1] = Zi_ext[:,-1] - Zb_ext[:,-1]
H_ext [:,-1] = numpy.choose( H_ext[:,-1]<0 , (H_ext[:,-1],0) )
elif EAST_BC_TOGGLE == ICE_FREE_BOUND: # Ice Free Boundary
H_ext[:,-1] = 0
# SOUTHERN BOUNDARY CONDTION
if SOUTH_BC_TOGGLE == SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = mean(Zb[0,:]) + Hbound
H_ext[0,:] = ZiBound - Zb_ext[0,:]
elif SOUTH_BC_TOGGLE == CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif SOUTH_BC_TOGGLE == SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[0,:] = 2*Zi_ext[1,:] - Zi_ext[2,:]
H_ext [0,:] = Zi_ext[0,:] - Zb_ext[0,:]
H_ext [0,:] = numpy.choose( H_ext[0,:]<0 , (H_ext[0,:],0) )
elif SOUTH_BC_TOGGLE == ICE_FREE_BOUND: # Ice Free Boundary
H_ext[0,:] = 0
# NORTHERN BOUNDARY CONDTION
if NORTH_BC_TOGGLE == SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = mean(Zb[-1,:]) + Hbound
H_ext[-1,:] = ZiBound - Zb_ext[-1,:]
elif NORTH_BC_TOGGLE == CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif NORTH_BC_TOGGLE == SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[-1,:] = 2*Zi_ext[-2,:] - Zi_ext[-3,:]
H_ext [-1,:] = Zi_ext[-1,:] - Zb_ext[-1,:]
H_ext [-1,:] = numpy.choose( H_ext[-1,:]<0 , (H_ext[-1,:],0) )
elif NORTH_BC_TOGGLE == ICE_FREE_BOUND: # Ice Free Boundary
H_ext[-1,:] = 0
Zi_ext = Zb_ext + H_ext
return ( H_ext , Zb_ext , Zi_ext )
def difference_grid( A , dx , dy ):
dAdx_ext = ( A[:,1:] - A[:,:-1] ) / dx
dAdy_ext = ( A[1:,:] - A[:-1,:] ) / dy
dAdx = dAdx_ext[1:-1,:]
dAdy = dAdy_ext[:,1:-1]
return ( dAdx , dAdy )
def basal_shear_stress( H_ext , Zi_ext , dx=1. , dy=1. , g=Parameters.g , rhoI=Parameters.rhoI ):
######
### CALCULATE THE BASAL SHEAR STRESS
# forward differences
dZidxX_ext = ( Zi_ext[:,1:] - Zi_ext[:,:-1] ) / dx
dZidyY_ext = ( Zi_ext[1:,:] - Zi_ext[:-1,:] ) / dy
dZidxX = dZidxX_ext[1:-1,:]
dZidyY = dZidyY_ext[:,1:-1]
HX_ext = ( H_ext[:,1:] + H_ext[:,:-1] ) / 2.
HY_ext = ( H_ext[1:,:] + H_ext[:-1,:] ) / 2.
HX = HX_ext[1:-1,:]
HY = HY_ext[:,1:-1]
taubxX_ext = -rhoI * g * HX_ext * dZidxX_ext
taubyY_ext = -rhoI * g * HY_ext * dZidyY_ext
taubxX = taubxX_ext[1:-1,:]
taubyY = taubyY_ext[:,1:-1]
taubxY = ( taubxX_ext[:-1,:-1] + taubxX_ext[:-1,1:] +
taubxX_ext[1: ,:-1] + taubxX_ext[1: ,1:] ) / 4.
taubyX = ( taubyY_ext[:-1,:-1] + taubyY_ext[:-1,1:] +
taubyY_ext[1: ,:-1] + taubyY_ext[1: ,1:] ) / 4.
taubX = numpy.sqrt( taubxX**2 + taubyX**2 )
taubY = numpy.sqrt( taubxY**2 + taubyY**2 )
taubX = numpy.choose( HX>0 , (0,taubX) )
taubY = numpy.choose( HY>0 , (0,taubY) )
# fill in zero values with 1 for use in division
xcmpnt = numpy.choose( numpy.abs(taubX)<1e-5 , ( taubxX / taubX , 0. ) )
ycmpnt = numpy.choose( numpy.abs(taubY)<1e-5 , ( taubyY / taubY , 0. ) )
return ( ( xcmpnt , ycmpnt ) , ( taubX , taubY ) , ( HX , HY ) )
def iceflow( taubX , taubY , HX , HY , xcmpnt , ycmpnt , THERMAL_TOGGLE=False , MinGlacThick=1. , glensA=Parameters.glensA ):
######
### CALCULATE ICE VELOCITY DUE TO DEFORMATION
if THERMAL_TOGGLE:
A_ext = numpy.zeros(H_ext.shape , dtype=numpy.longdouble )
ind = nonzero( ravel(H_ext) >= MinGlacThick )
Ts_ext = To + lapseRate*( Zi_ext - Elev0 )
#A_ext(ind) = interp3( eHs, eTs, eTm, eA, H_ext(ind), Ts_ext(ind), Tm_ext(ind) ) ;
try:
put( A_ext , ind , interpolate.interp3d( eHs , eTs , eTm )( take(H_ext,ind) , take(Ts_ext,ind) , take(Tm_ext,ind) ) )
except:
logging.error( "NaN in A, likely H_node exceeds H_glens limits" )
return -1
AX = ( A_ext[1:-1, :-1] + A_ext[1:-1,1: ] ) / 2.
AY = ( A_ext[ :-1,1:-1] + A_ext[1: ,1:-1] ) / 2.
else:
AX = glensA
AY = glensA
# here's the guts of calculating the depth averaged velocity
UdxX = numpy.abs( .4 * AX * taubX*taubX*taubX * HX ) * xcmpnt
UdyY = numpy.abs( .4 * AY * taubY*taubY*taubY * HY ) * ycmpnt
#UdxX = numpy.fix(UdxX*1e6)*1e-6
#UdyY = numpy.fix(UdyY*1e6)*1e-6
return ( UdxX , UdyY )
def ice_sliding( taubX , taubY , xcmpnt , ycmpnt , THERMAL_TOGGLE=False , FREEZEON_TOGGLE=0 , UsChar=Parameters.UsChar , taubChar=Parameters.taubChar ):
######
### CALCULATE SLIDING VELOCITY
# here's the guts of calculating the sliding velocity
UsxX = numpy.choose( numpy.abs(taubX)<1e-5 , ( UsChar * numpy.exp(1 - taubChar / taubX) * xcmpnt ,
UsChar * numpy.exp(1 - taubChar ) * xcmpnt ) )
UsyY = numpy.choose( numpy.abs(taubY)<1e-5 , ( UsChar * numpy.exp(1 - taubChar / taubY) * ycmpnt ,
UsChar * numpy.exp(1 - taubChar ) * ycmpnt ) )
if THERMAL_TOGGLE and FREEZEON_TOGGLE:
notFrozen = Tb_ext > -.5 or Zb_ext < seaLevel
notFrozenX = ( notFrozen[1:-1, :-1] + notFrozen[1:-1,1: ] ) / 2.
notFrozenY = ( notFrozen[ :-1,1:-1] + notFrozen[1: ,1:-1] ) / 2.
UsxX *= notFrozenX
UsyY *= notFrozenY
return ( UsxX , UsyY )
def sum_ice_motion( UdxX , UdyY , UsxX , UsyY ):
UxX = UdxX + UsxX
UyY = UdyY + UsyY
return ( UxX , UyY )
def mass_conservation( H_ext , UxX , UyY , HX , HY , dZidxX , dZidyY , dx=1. , dy=1. , MinGlacThick=1. , WEST_BC_TOGGLE=ICE_FREE_BOUND , EAST_BC_TOGGLE=ICE_FREE_BOUND , SOUTH_BC_TOGGLE=ICE_FREE_BOUND , NORTH_BC_TOGGLE=ICE_FREE_BOUND ):
######
### MASS CONSERVATION -- CONTINUITY
# ensure that no ice is drawn from the rock
#CLASS = H_ext >= MinGlacThick
CLASS = numpy.choose( H_ext>=MinGlacThick , (0.,1.) )
DCLASSx = ( CLASS[1:-1,1: ] - CLASS[1:-1, :-1] ) * numpy.sign( dZidxX )
DCLASSy = ( CLASS[1: ,1:-1] - CLASS[ :-1,1:-1] ) * numpy.sign( dZidyY )
UxX = numpy.choose( numpy.abs(DCLASSx+1)<1e-5 , (UxX,0.) )
UyY = numpy.choose( numpy.abs(DCLASSy+1)<1e-5 , (UyY,0.) )
# calculate both components of the ice flux
qxX = UxX * HX
qyY = UyY * HY
if WEST_BC_TOGGLE == CONST_FLUX_BOUND: qxX[: , 0] = BoundaryFlux
if EAST_BC_TOGGLE == CONST_FLUX_BOUND: qxX[: ,-1] = BoundaryFlux
if SOUTH_BC_TOGGLE == CONST_FLUX_BOUND: qyY[0 , :] = BoundaryFlux
if NORTH_BC_TOGGLE == CONST_FLUX_BOUND: qyY[-1, :] = BoundaryFlux
# here's the guts of the continuity equation
dqdxX = ( qxX[ :,1:] - qxX[: ,:-1] ) / dx
dqdyY = ( qyY[1:, :] - qyY[:-1,: ] ) / dy
dHdt = -dqdxX - dqdyY
return ( dHdt , ( qxX , qyY ) )
def mass_balance( Zi , t , MASS_BALANCE_TOGGLE=MassBalance.ELA_LOWERING , initELA=Parameters.initELA , tmin=Parameters.tmin , ELAStepSize=Parameters.ELAStepSize , ELAStepInterval=Parameters.ELAStepInterval , gradBz=Parameters.gradBz , maxBz=Parameters.maxBz ):
######
### CALCULATE MASS BALANCE
# the imposed mass balance is the imposed climate
# there are many possibilities, here are only a few
# all must populate the 2D matrix Bxy of size = size(Zb)
# with values of net precip/melt rate in m/yr
# define the scalar, ELA (m), for plotting
if MASS_BALANCE_TOGGLE == CONSTANT_ELA:
# Simple ELA, maxBz, gradBz
ELA = initELA
#Bxy = min( maxBz , gradBz * ( Zi - ELA ) )
Bxy = gradBz * ( Zi - ELA )
Bxy = numpy.choose( Bxy>maxBz , (Bxy,maxBz) )
elif MASS_BALANCE_TOGGLE == ELA_LOWERING:
# ELA changing with time experiment
# ELAStepSize = -10 ; # positive/negative values raise/lower ELA
# ELAStepInterval = 500 ;
ELA = initELA + ELAStepSize * max( 0 , numpy.floor( (t-tmin)/ELAStepInterval ) )
Bxy = gradBz * ( Zi - ELA )
Bxy = numpy.choose( Bxy>maxBz , (Bxy,maxBz) )
elif MASS_BALANCE_TOGGLE == ELA_LOWERING2:
# ELA changing with time experiment
tau = numpy.longdouble(25) # intrinsic timescale of ice dynamics
tmin = numpy.longdouble(0) # time to begin ELA modification
initELA = numpy.longdouble(4200) # initial ELA
stepSize = numpy.longdouble(-10) # positive/negative values raise/lower ELA
dELAdt = numpy.longdouble(-0.1)
ELA = initELA + stepSize * max( 0, numpy.floor( (t-tmin) / (8*tau) ) )
Bxy = gradBz * ( Zi - ELA )
Bxy = numpy.choose( Bxy>maxBz , (Bxy,maxBz) )
elif MASS_BALANCE_TOGGLE == EXTERNAL_FUNC:
# external mass balance function
try: Bxy
except NameError:
# Mass Balance 2D Must Return Bxy (2d Matrix)
Bxy = mass_balance_gc2d( t , cellsize , Zi )
nextGetBxy = t + getBxyInterval
else:
if t >= nextGetBxy:
Bxy = mass_balance_gc2d( t , cellsize , Zi )
nextGetBxy = t + getBxyInterval
elif MASS_BALANCE_TOGGLE == ELA_TIME_SERIES or MASS_BALANCE_TOGGLE == D18O_TIME_SERIES:
# ELA time series
ELA = interpolate.interp1d( trecord , ELArecord )( t )
Bxy = gradBz * ( Zi - ELA )
Bxy = numpy.choose( Bxy>maxBz , (Bxy,maxBz) )
elif MASS_BALANCE_TOGGLE == BALANCE_FILE:
# external mass balance file
Bxy = load_dem_var( filenameDEM, 'Bxy' )
ind = nonzero( ravel(abs(Bxy)==min(abs(Bxy))) )
ELA = mean( take( ravel(Zi) , ind ) )
elif MASS_BALANCE_TOGGLE == ZERO_BALANCE:
ELA = 0
Bxy = numpy.zeros( Zb.shape , dtype=numpy.longdouble )
else:
logging.error( "Unrecognized Mass Balance" )
return -1
return ( Bxy , ELA )
def get_timestep( H , Zi_ext , Zi , dHdt , Bxy , dtMax=None , dtDefault=None ):
#######
### CALCULATE TIMESTEP
# now that we know the rate of change in ice surface heights due to
# ice motion and due to precipitation or melt we need to know over
# what period of time we can project forward with these rates and
# maintain stability of the ice surface. The basic idea here is that
# we don't want to take a timestep any longer then it would take to
# reverse the ice surface slope between two cells, such that ice
# should be flowing in the other direction. In fact, let's make our
# timestep much less then that.
# this calculation sets the timestep such that the change
# in ice surface elevation nowhere exceeds a set fraction
# of the local standard deviation in ice surface elevations
# include ice changes by precip and melt
dHdtTot = dHdt + Bxy
adHdt = numpy.abs(dHdtTot)
# something like standard deviation of 3x3 cell areas around each cell
filt = numpy.ones( (3,3) , dtype=numpy.longdouble ) / 9.
ZiMean = filter2d( filt , Zi_ext , 'valid' )
dHmax = numpy.sqrt( filter2d( filt, (ZiMean - Zi)**2 ) )
# only consider cells with ice thickness > 10 m
isGlac = H>10.
# find limiting timestep for each considered cell
ind = ( numpy.logical_and( numpy.logical_and( adHdt!=0 , dHmax!=0 ) , isGlac!=0 ) ).flatten().nonzero()
if ind[0].size>0:
dtLimits = dHmax.flatten()[ ind ] / adHdt.flatten()[ ind ]
dt = dtLimits.min()
idt = ( dtLimits==dt ).nonzero()
#ind = find( adHdt~=0 & dHmax~=0 & isGlac~=0 ) ;
#dtLimits = dHmax(ind)./adHdt(ind) ;
#[dt, idt] = min( dtLimits ) ;
# locate the x and y position of limiting cell for plotting
#[rwDT,clDT] = ind2sub( size(adHdt), ind(idt) ) ;
# limit timestep to dtMax or some fraction of the calculated timestep
if dtMax is not None :
dt = min( dtMax, dt/2. )
else:
# catch an error, (e.g. if H<10 in all cells )
#if dt.size==0:
dt = dtDefault
#dt = numpy.fix(dt*1e6)*1e-6
return dt
def update_vars( H , Zb , Zi , Bxy , qxX , qyY , dHdt , t , dt , conserveIce , dx=1. , dy=1. ):
t = t + dt
# numTimeSteps = numTimeSteps + 1 ;
# timeSteps(numTimeSteps) = dt ;
# increase in ice thicknesses due to precip
Bxy_pos = numpy.choose( Bxy>0 , (0,Bxy) )
H += Bxy_pos * dt
# change ice thicknesses due to ice motion
H += dHdt*dt
# decrease in ice thicknesses due to melt
Bxy_neg = numpy.choose( Bxy<0 , (0,Bxy) )
Bxy_neg = - numpy.choose( H<-Bxy_neg , (-Bxy_neg,H) )
H += Bxy_neg * dt
# record ice addition or removal by climate
snowFall = ( Bxy_neg + Bxy_pos ) * dt
conserveIce = conserveIce + snowFall.sum(axis=0).sum()
# record ice flux through boundaries
qbound = qyY[0,:].sum(axis=0).sum() - qyY[-1,:].sum(axis=0).sum() + qxX[:,0].sum(axis=0).sum() - qxX[:,-1].sum(axis=0).sum()
conserveIce = conserveIce + dt * qbound / dx
Zi = Zb + numpy.choose( H<0 , (H,0) )
if numpy.isnan(Zi).any():
#save workspacedump
logging.error( "NaN in ice thickness" )
return -1
return ( t , H , Zi , conserveIce )
def avalanche( H , angleOfRepose=30. ):
######
### AVALANCHE SNOW OFF OF STEEP SURFACES
# move ice downslope until the ice surface is everywhere
# less then or near the angle of repose
rws,cls = Zb.shape
dHRepose = dx*numpy.tan(angleOfRepose*numpy.pi/180.)
Ho = numpy.choose( H<0 , (H,0) )
while True:
dZidx_down = numpy.zeros( (rws,cls) , dtype=numpy.longdouble )
dZidx_up = numpy.zeros( (rws,cls) , dtype=numpy.longdouble )
dZidx_down[:,1:] = numpy.choose( Zi[:,1:] < Zi[:,:-1] , ( Zi[:,1:] - Zi[:,:-1] , 0 ) )
dZidx_up [:,:-1] = numpy.choose( Zi[:,:-1] < Zi[:,1:] , ( Zi[:,:-1] - Zi[:,1:] , 0 ) )
dZidx = numpy.choose( dZidx_up > dZidx_down , ( dZidx_down , dZidx_up ) )
dZidy_left = numpy.zeros( (rws,cls) , dtype=numpy.longdouble )
dZidy_right = numpy.zeros( (rws,cls) , dtype=numpy.longdouble )
dZidy_left [1:,:] = numpy.choose( Zi[1:,:] < Zi[:-1,:] , ( Zi[1:,:] - Zi[:-1,:] , 0 ) )
dZidy_right[:-1,:] = numpy.choose( Zi[:-1,:] < Zi[1:,:] , ( Zi[:-1,:] - Zi[1:,:] , 0 ) )
dZidy = numpy.choose( dZidy_left > dZidy_right , ( dZidy_right , dZidy_left ) )
grad = numpy.sqrt( dZidx**2 + dZidy**2 )
gradT = dZidy_left + dZidy_right + dZidx_down + dZidx_up
gradT = numpy.choose( gradT==0 , (gradT,1) )
grad = numpy.choose( Ho <0.1 , (grad ,0) )
mxGrad = grad.max()
if mxGrad <= 1.1*dHRepose:
break
delH = numpy.choose( grad<dHRepose , ( ( grad-dHRepose)/3. , 0 ) )
Htmp = Ho.copy()
Ho = numpy.choose( Htmp<delH , ( Htmp-delH , 0 ) )
delH = Htmp - Ho
delHdn = numpy.zeros( (rws,cls) , dtype=numpy.longdouble )
delHup = numpy.zeros( (rws,cls) , dtype=numpy.longdouble )
delHlt = numpy.zeros( (rws,cls) , dtype=numpy.longdouble )
delHrt = numpy.zeros( (rws,cls) , dtype=numpy.longdouble )
delHup[:,1: ] = delH[:, :-1] * dZidx_up [:, :-1] / gradT[:, :-1]
delHdn[:, :-1] = delH[:,1: ] * dZidx_down[:,1: ] / gradT[:,1: ]
delHrt[1: ,:] = delH[ :-1,:] * dZidy_right[ :-1,:] / gradT[ :-1,:]
delHlt[ :-1,:] = delH[1: ,:] * dZidy_left [1: ,:] / gradT[1: ,:]
Ho = Ho + delHdn + delHup + delHlt + delHrt
Ho = numpy.choose( Ho<0 , (Ho,0) )
Zi = Zb + Ho
#H = Ho + (H<0).*H ;
H = Ho + numpy.choose( H<0 , (0,H) )
return H
def calve( H , dt , CALVING_TOGGLE=True ):
######
### CALVING GLACIER FRONT
if CALVING_TOGGLE:
# one reason this is difficult is that the height of ice in the cell
# is really just recording the volume of ice, the position of the
# margin in the cell not the actual ice height. Here floation
# height is assumed (or higher if necessary to account for ice volume)
Hold = H.copy()
calvedIce = 0
# count time backwards with a sshorted timestep until the whole
# timestep used during this itteration has been simulated
dtTot = dt
while dtTot > 0:
# find the calving front, aka the wet glacier margin
G = H > 1
W = numpy.logical_and( G==0 , Zb <= seaLevel )
filt = numpy.array( [[0,1,0],[1,1,1],[0,1,0]] , dtype=numpy.longdouble )
Wfilt = filter2d( filt , W )
Wfilt[:,(0,-1)] = Wfilt[:,(2,-3)]
Wfilt[(0,-1),:] = Wfilt[(2,-3),:]
wetGmargin = Gi * Wfilt > 0
indWGM = wetGmargin.ravel().nonzero()
# if calving front exists, find water depth, ensure it's positive
if indWGM.size>0:
WDmarg = seaLevel - Zb.flatten()[indWGM]
WDmarg = numpy.choose( WDmarg<0 , (WDmarg,0) )
ind = (WDmarg!=0).nonzero()
indWGM = take( indWGM , ind )
WDmarg = take( WDmarg , ind )
#WDmarg = max( 0, seaLevel - Zb(indWGM) ) ;
#ind = find( WDmarg == 0 ) ;
#indWGM(ind) = [] ;
#WDmarg(ind) = [] ;
# if calving front exists, remove some ice
if indWGM.size>0:
# ice thickness in calving cells
Hmarg = H.flatten()[indWGM]
Hmarg = numpy.choose( Hmarg<WDmarg/0.917 , (Hmarg,WDmarg/0.917) )
# a new timestep is calculated such that the calving rate times the
# timesstep does not exceed the total contents of any calving cell.
dLinCalvdt = calvingCoef * WDmarg # front migration rate
dVolCalvdt = dx * dLinCalvdt * Hmarg # rate of volume calved
volAvailMarg = dx * dx * H.flatten()[indWGM] # ice volume available
calvDt = min( dtTot, ( volAvailMarg / dVolCalvdt ).min() ) # calving timestep
# remove this calving timestep from total time to calve
dtTot = dtTot - calvDt
# convert the volume calved to ice thickness and remove
calve = dVolCalvdt * calvDt / ( dx * dx )
H[indWGM] = H[indWGM] - calve
# record total volume calved for posterity
calvedIce = calvedIce + calve.sum(asis=0).sum() * dx * dx
else:
dtTot = 0
# record ice removal by calving for conservation test
conserveIce = conserveIce + ( H - Hold ).sum(axis=0).sum()
def print_watch_point( fd , x ):
y = numpy.double( x )
fwrite( fd , y.size , y )
fd.flush()
def filter2d( b , x , shape='same' ):
y = scipy.signal.convolve( b ,x , mode=shape )
return y
def load_dem( file ):
vars = scipy.io.loadmat( file )
cellsize = numpy.longdouble(vars['cellsize'])
easting = numpy.longdouble(vars['easting'])
northing = numpy.longdouble(vars['northing'])
topo = numpy.longdouble(vars['topo'])
n_rows , n_cols = topo.shape
logging.info( 'Shape of topo is %d by %d' , n_rows , n_cols )
logging.info( 'Shape of easting is %d' , easting.size )
logging.info( 'Shape of northing is %d' , northing.size )
if easting.size != n_cols:
sys.exit( 'Easting does not match dimension of topo (%d != %d)' % (easting.size,n_cols) )
if northing.size != n_rows:
sys.exit( 'Northing does not match dimension of topo (%d != %d)' % (northing.size,n_rows) )
return ( topo , easting , northing , cellsize )
def load_dem_var( file , val_s ):
vars = scipy.io.loadmat( file )
if vars.has_key( val_s ):
var = vars[val_s]
else:
var = None
return var
def load_input_args( ):
CLEAR_FIGURE = 1
CONTOUR_INTERVAL = 50.
DEBUG_TOGGLE = 0
DT_LIMIT = 0
ELA_CONTOUR = 1.
ICE_CONTOUR = 1.
NEW_FIGURE = 0
QUIVER_VECS = 0
RECONSTRUCT = 0
SUBFIGURE = 0
THERMAL_CONTOUR = 0
return 1
class Usage( Exception ):
def __init__( self , msg ):
self.msg = msg
def old_gc2d( argv=None , inputFile='Animas_200.mat' ):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt( argv[1:] , "h" , ["help"] )
except getopt.error, msg:
raise Usage(msg)
except Usage, err:
print >> sys.stderr, err.msg
print >> sys.stderr, "for help use --help"
return 2
RESTART_TOGGLE = 0
######
### Load a saved state
if RESTART_TOGGLE == 0 or RESTART_TOGGLE == 3: # LOAD A SAVED STATE
# CODE BEHAVIOR TOGGLES
# toggles turn on/off segments of the code or select
# between multiple possibilities for a given process
# values can be reset in INIT_COND segment
GUISTART_TOGGLE = 0 # started simulation with the gui (off|on)
SAVE_TOGGLE = 1 # saving (off|on)
PLOT_TOGGLE = 1 # plotting (off|on)
REPORT_TOGGLE = 1 # reporting (off|on)
COMPRESS_TOGGLE = 0 # only simulate area with ice (off|on)
VARIABLE_DT_TOGGLE = 1 # state dependent time step (off|on)
INIT_COND_TOGGLE = 1 # load DEM and climate (synth|valley|sheet)
GENERIC_ICE_TOGGLE = 0 # start with generic ice surface (off|on)
ICEFLOW_TOGGLE = 1 # ice motion by deformation (off|on)
ICESLIDE_TOGGLE = 0 # ice motion by sliding (off|on|select)
THERMAL_TOGGLE = 0 # temp dependance of flow (off|on)
FREEZEON_TOGGLE = 0 # basal ice freeze to bed (off|on)
AVALANCH_TOGGLE = 0 # avalanch off steep surfaces (off|on)
ERODE_TOGGLE = 0 # erode the bed (off|on|select)
CALVING_TOGGLE = 0 # calving front (off|on)
CRN_TOGGLE = 0 # CRN accumulation (off|on)
# Available Mass Balance
ZERO_BALANCE = 1 # Constant Ice Flux
CONSTANT_ELA = 2 # Ice Free Boundary
ELA_LOWERING = 3 # Zero Ice Flux
ELA_TIME_SERIES = 4 # Continuous Ice Surface Slope
EXTERNAL_FUNC = 5 # Constant Surface Elevation
ELA_LOWERING2 = 6 # Zero Ice Flux
BALANCE_FILE = 7 # Zero Ice Flux
D18O_TIME_SERIES = 8 # Load d18O record and convert to ELA history
MASS_BALANCE_TOGGLE = ELA_LOWERING # select climate scenerio (off|on|select)
# Available Boundary Conditions
ICE_FREE_BOUND = 1 # Ice Free Boundary
ZERO_FLUX_BOUND = 2 # Zero Ice Flux
CONST_FLUX_BOUND = 3 # Constant Ice Flux
SURF_ELEV_BOUND = 4 # Constant Surface Elevation
SURF_SLOPE_BOUND = 5 # Continuous Ice Surface Slope
WEST_BC_TOGGLE = ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
EAST_BC_TOGGLE = ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
SOUTH_BC_TOGGLE = ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
NORTH_BC_TOGGLE = ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
# OUTPUT BEHAVIOR
plotInterval = 60 * 120 # seconds
saveInterval = 100 # whole years
reportInterval = 30 # seconds
nextPlot = 0 # initialize to plot on first timestep
nextSave = 0 # initialize to save on first timestep
nextReport = 0 # initialize to report on first timestep
outputFile = 'savetmp'
######
### Set numerical and physical constants
# Constants
g = numpy.longdouble(9.81) # gravitional acceleration
rhoI = numpy.longdouble(917) # density of ice
rhoW = numpy.longdouble(1000) # density of water
day = numpy.longdouble(0.00274) # length of a day in years
# Time
t = numpy.longdouble(0) # set time to zero
tMax = numpy.longdouble(100000) # maximum simulation time in years
dtMax = numpy.longdouble(0.4 * 365*day) # maximum timestep in years
dtDefault = numpy.longdouble(0.4 * 365*day) # timestep if VARIABLE_DT_TOGGLE==0
# Glacier Properties
MinGlacThick = numpy.longdouble(1)
# Ice Deformation
glensA = numpy.longdouble((6.8e-15)*3.15e7/(1e9)) # Patterson, 1994; MacGregor, 2000
# Attractor Sliding -- only if ICESLIDE_TOGGLE==1 (generally used)
UsChar = numpy.longdouble(10)
taubChar = numpy.longdouble(100000)
# Standard Sliding -- used if ICESLIDE_TOGGLE==2 (generally not used)
B = numpy.longdouble(0.0012) # m/(Pa*yr) -- MacGregor, 2000
DepthToWaterTable = numpy.longdouble(20) # distance from ice surface to water table
MaxFloatFraction = numpy.longdouble(80) # limits water level in ice
Hpeff = numpy.longdouble(20) # effective pressure (meters of water)
# Avalanching
angleOfRepose = numpy.longdouble(30)
avalanchFreq = numpy.longdouble(3) # average number per year
# Calving
seaLevel = numpy.longdouble(-100) # meters
calvingCoef = numpy.longdouble(2) # year^-1
# Thermal
c = numpy.longdouble(2060) # specific heat capacity (J/(kg*K))
Qg = numpy.longdouble(0.05*3.15e7) # Geothermal heat flux (W/m^2)*seconds/year = (J/year)/(m^2)
gradTz = numpy.longdouble(-0.0255) # Geothermal Gradient
# Mass Balance
initELA = numpy.longdouble(4500)
initELA = numpy.longdouble(3000)
gradBz = numpy.longdouble(0.01)
maxBz = numpy.longdouble(2)
ELAStepSize = numpy.longdouble(-50)
ELAStepInterval = numpy.longdouble(500)
tmin = numpy.longdouble(200) # Years, spin-up time
######
### RELOAD INPUT ARGUMENTS
#load inputArgs
inputArgs = load_input_args
#if ( GUISTART_TOGGLE & exist('guiSimParams.mat','file') )
# load guiSimParams
# delete guiSimParams.mat
# clear newInitFile
#elseif ( ~GUISTART_TOGGLE & exist( './guiPlotParams.mat', 'file' ) )
# delete guiPlotParams.mat
#end
######
### INITIALIZE COUNTERS
# numTimeSteps = 0 ;
# timeSteps = zeros(1000000,1) ;
######
### INITIALIZE BED and ICE TOPOGRAPHY, and CLIMATE VARIABLES
# Must define topo, cellsize, dx, and dy
if INIT_COND_TOGGLE:
### .mat file contains: 'topo' = matrix of bed elevations and 'cellsize',
### both in meters. 'easting' and 'northing' are included for plotting
if INIT_COND_TOGGLE == 1: # Valley glaciers
# filenameDEM = 'Yosemite200_rot35_400x650' ;
# filenameDEM = 'Nederland100' ;
# filenameDEM = 'KingsCanyon200Rot256x256shift' ;
# filenameDEM = 'sample200' ;
# filenameDEM = 'animas_200' ;
# filenameDEM = '4J_newDEM_200' ;
# filenameDEM = 'reproj4j_200' ;
filenameDEM = inputFile
filenameDEM = 'Animas_200.mat'
#load( filenameDEM ) ;
( topo , easting , northing , cellsize ) = load_dem( filenameDEM )
dx = numpy.longdouble(200) # set a new dx
dy = numpy.longdouble(dx)
# AAR and eroded volume watershed mask
mask_file = 'watershed_mask'
try:
#load( mask_file );
watershed_mask = load_mask( mask_file )
except:
watershed_mask = numpy.ones( topo.shape , dtype=numpy.longdouble ) # Use the whole grid if no watershed mask is available
logging.warning( 'No watershed mask found; using the whole grid for AAR and eroded flux calculations.' )
# Mass Balance
try: initELA
except NameError:
initELA = numpy.longdouble(3350)
maxBz = numpy.longdouble(2)
gradBz = numpy.longdouble(1./100.)
else:
if INIT_COND_TOGGLE == 2: # Ice sheets
filenameDEM = 'Baffin200d'
filenameDEM = 'ValleyNonFjordTopo'
#load( filenameDEM ) ;
( topo , easting , northing ) = load_dem( filenameDEM )
dx = numpy.longdouble(2000) # set a new dx
dy = dx
UsChar = numpy.longdouble(100)
taubChar = numpy.longdouble(50000)
#load( filenameDEM, 'Bxy' ) ;
Bxy = load_dem_var( filenameDEM , 'Bxy' )
# Mass Balance
initELA = numpy.longdouble(3500)
maxBz = numpy.longdouble(0)
gradBz = numpy.longdouble(1./100)
Hbound = numpy.longdouble(2000)
Elev0 = numpy.longdouble(0) # reference elevation
To = numpy.longdouble(-30) # temperature at Elev0
lapseRate = numpy.longdouble(-0.0065) # degrees per meter
COMPRESS_TOGGLE = 0
GENERIC_ICE_TOGGLE = 0
MASS_BALANCE_TOGGLE = ELA_TIME_SERIES
CALVING_TOGGLE = 1
ERODE_TOGGLE = 0
THERMAL_TOGGLE = 0
FREEZEON_TOGGLE = 0
HORZTL_ADVECT_TOGGLE = 0
GEOTHERMAL_HEAT_TOGGLE = 0
STRAIN_HEAT_TOGGLE = 0
SLIDING_HEAT_TOGGLE = 0
SURFACE_HEAT_FLUX_TOGGLE= 0
THERMAL_3D_TOGGLE = 0
WEST_BC_TOGGLE = ZERO_FLUX_BOUND
EAST_BC_TOGGLE = ZERO_FLUX_BOUND
SOUTH_BC_TOGGLE = ZERO_FLUX_BOUND
NORTH_BC_TOGGLE = ZERO_FLUX_BOUND
elif INIT_COND_TOGGLE == 3: # gui_start
#load( filenameDEM ) ;
( topo , easting , northing ) = load_dem( filenameDEM )
dy = dx
rws,cls = topo.shape
#if !exist('easting') : easting = numpy.arange( cls )
#if !exist('northing'): northing = numpy.arange( rws )
try: easting
except NameError: easting = numpy.arange( cls )
try: northing
except NameError: northing = numpy.arange( rws )
# resample DEM at new node spacing
if cellsize != dx:
rws,cls = topo.shape
xOld = numpy.arange(cls-1)*cellsize
yOld = numpy.arange(rws-1)*cellsize
#xOld = (0:cls-1)*cellsize ;
#yOld = (0:rws-1)*cellsize ;
XOld,YOld = numpy.meshgrid( xOld , yOld )
#if rem(max(xOld),dx) == 0 and rem(max(yOld),dy) == 0:
if max(xOld) % dx == 0 and max(yOld) % dy == 0:
clsNew = max(xOld)/dx + 1
rwsNew = max(yOld)/dy + 1
else:
clsNew = numpy.ceil( xOld[-1] / dx )
rwsNew = numpy.ceil( yOld[-1] / dy )
x = numpy.arange(clsNew)*dx
y = numpy.arange(rwsNew)*dy
X,Y = numpy.meshgrid( x , y )
topo = interpolate.interp2d( XOld , YOld , topo , kind='linear' )( X , Y )
#topo = interpolate.interp2d( XOld , YOld , topo, X, Y ) ;
easting = interpolate.interp1d( xOld , easting , kind='linear' )( x )
northing = interpolate.interp1d( yOld , northing , kind='linear' )( y )
cellsize = dx
# Set the bed elevation to 'topo'
Zb = topo.copy()
initZb = Zb.copy()
#if !exist('H'): H = numpy.zeros(Zb.shape)
try: H
except NameError: H = numpy.zeros( Zb.shape , dtype=numpy.longdouble )
Zi = H + Zb
#clear topo
rws,cls = Zb.shape
x = numpy.arange( cls )*dx
y = numpy.arange( rws )*dy
X,Y = numpy.meshgrid( x , y )
# Create a generic ice surface
if GENERIC_ICE_TOGGLE:
# This code segment rotates the topo such that the
# ice boundary is on the left side of the simulation
# need to check code; better to rotate DEM prior to use
ZiBound = numpy.mean(Zb[:,0]) + Hbound
taub = 200000
H = numpy.zeros(Zb.shape, dtype=numpy.longdouble )
rws,cls = Zb.shape
beta = taub/(rhoI*g)
jtermlast = cls-2
icefree = 0
# for each row, find the cell for which the ice surface
# height at the left boundary would be ZiBound if the
# terminus that starts in that cell
#for i =1:rws
for i in range(rws):
mZb = Zb[i,:]
slope = -numpy.diff(mZb)/dx
# search starts in front of the terminus
# of the adjacent row that was just found
jterm = min( jtermlast+1, cls-2 )
while jterm > 0:
# backwater calculation
mH = numpy.zeros(mZb.shape, dtype=numpy.longdouble )
for j in range(jterm-1,-1,-1):
term1 = ( -slope[j]/2. - (mH[j+1]/dx) )**2
term2 = -(2./dx) * ( slope[j] * mH[j+1] - beta )
deltaH = -slope[j]*dx/2. - mH[j+1] + dx * numpy.sqrt(term1+term2)
mH[j] = mH[j+1] + deltaH
# the following ensures that the search for
# the terminus was started beyond the terminus
mZi = mZb + mH
if mZi[0] > ZiBound:
icefree = 1
elif icefree and mZi[0] < ZiBound:
H[i,:] = mH
jtermlast = jterm
icefree = 0
break
else:
jterm = jterm + 2
if jterm >= cls-1:
logging.error( "Generic ice overruns boundary" )
return -1
jterm = jterm - 1
Zi = Zb + H
rws,cls = Zb.shape
filt = numpy.ones( (3,3) , dtype=numpy.longdouble ) / 9
ZiBig = numpy.zeros( (rws+2,cls+2) , dtype=numpy.longdouble )
ZiBig[1:-1,1:-1] = Zi
for i in range(10):
ZiBig[(0,-1),:] = ZiBig[(1,-2),:]
ZiBig[:,(0,-1)] = ZiBig[:,(1,-2)]
ZiBig = filter2d( filt , ZiBig )
Zi = ZiBig[1:-2,1:-2]
ind = H == 0
Zi[ind] = Zb[ind]
conserveIce = H.sum(axis=0).sum()
iceVolumeLast = conserveIce*dx*dy
else: # SYNTHETIC BEDROCK TOPOGRAPHY
logging.error( "Must code synthetic initial condition" )
return -1
### INIT_COND_TOGGLE
######
### Load a saved state
######
# Initialize matrices
#n_rows = 100
#n_cols = 200
#H = numpy.ones( ( n_rows , n_cols ) )*100
#Zb = numpy.ones( ( n_rows , n_cols ) )
#Tb = numpy.ones( ( n_rows , n_cols ) )
#Tm = numpy.ones( ( n_rows , n_cols ) )
#Ts = numpy.ones( ( n_rows , n_cols ) )
#
#COMPRESS_TOGGLE = True
#THERMAL_TOGGLE = True
#
#RESTART_TOGGLE = 1
# Start the time loop
fd_watch = {}
fd_watch['thick'] = open( 'thickness_py.bin' , 'wb' )
fd_watch['taubxx'] = open( 'taubxX_py.bin' , 'wb' )
fd_watch['taubyy'] = open( 'taubyY_py.bin' , 'wb' )
fd_watch['taubx'] = open( 'taubX_py.bin' , 'wb' )
fd_watch['tauby'] = open( 'taubY_py.bin' , 'wb' )
fd_watch['xcmpnt'] = open( 'xcmpnt_py.bin' , 'wb' )
fd_watch['ycmpnt'] = open( 'ycmpnt_py.bin' , 'wb' )
fd_watch['udxx'] = open( 'UdxX_py.bin' , 'wb' )
fd_watch['udyy'] = open( 'UdyY_py.bin' , 'wb' )
fd_watch['usxx'] = open( 'UsxX_py.bin' , 'wb' )
fd_watch['usyy'] = open( 'UsyY_py.bin' , 'wb' )
fd_csv = open( 'dt.csv' , 'w' )
( H , Zb , dx , dy ) = load_state( inputFile )
run_for( t , tMax , H , Zb , dx , dy )
return
counter = 0
tic = time.time()
while t<tMax or RESTART_TOGGLE==2:
# COMPRESS - ONLY SIMULATE SUB-RECTANGLE THAT CONTAINS ICE
( Zi , compression_ratio , COMPRESSED_FLAG ) = compress_grid( H , Zb , COMPRESS_TOGGLE=False )
######
### MODIFY BOUNDARY CELLS TO ENFORCE BOUNDARY CONDITIONS
( H_ext , Zb_ext , Zi_ext ) = set_bc( H , Zb , Zi )
######
### CALCULATE THE BASAL SHEAR STRESS
# forward differences
#dZidxX_ext = ( Zi_ext[:,1:] - Zi_ext[:,:-1] ) / dx
#dZidyY_ext = ( Zi_ext[1:,:] - Zi_ext[:-1,:] ) / dy
#dZidxX = dZidxX_ext[1:-1,:]
#dZidyY = dZidyY_ext[:,1:-1]
( dZidxX , dZidyY ) = difference_grid( Zi_ext , dx , dy )
( ( xcmpnt , ycmpnt ) , ( taubX , taubY ) , ( HX , HY ) ) = basal_shear_stress( H_ext , Zi_ext , dx=dx , dy=dy )
######
### CALCULATE ICE VELOCITY DUE TO DEFORMATION
if ICEFLOW_TOGGLE:
( UdxX , UdyY ) = iceflow( taubX , taubY , HX , HY , xcmpnt , ycmpnt )
else:
UdxX = numpy.zeros( xcmpnt.shape , dtype=numpy.longdouble )
UdyY = numpy.zeros( ycmpnt.shape , dtype=numpy.longdouble )
######
### CALCULATE SLIDING VELOCITY
if ICESLIDE_TOGGLE:
( UsxX , UsyY ) = ice_sliding( taubX , taubY , xcmpnt , ycmpnt )
else:
UsxX = numpy.zeros( xcmpnt.shape , dtype=numpy.longdouble )
UsyY = numpy.zeros( ycmpnt.shape , dtype=numpy.longdouble )
# sum all contributions to ice motion
( UxX , UyY ) = sum_ice_motion( UdxX , UdyY , UsxX , UsyY )
######
### MASS CONSERVATION -- CONTINUITY
( dHdt , ( qxX , qyY ) ) = mass_conservation( H_ext , UxX , UyY , HX , HY , dZidxX , dZidyY , dx=dx , dy=dy );
######
### CALCULATE MASS BALANCE
( Bxy , ELA ) = mass_balance( Zi , t )
#######
### CALCULATE TIMESTEP
if VARIABLE_DT_TOGGLE:
dt = get_timestep( H , Zi_ext , Zi , dHdt , Bxy , dtMax=dtMax , dtDefault=dtDefault )
else:
dt = dtDefault
######
### UPDATE the TIME and ICE THICKNESS
( t , H , Zi , conserveIce ) = update_vars( H , Zb , Zi , Bxy , qxX , qyY , dHdt , t , dt , conserveIce , dx=dx , dy=dy )
fd_csv.write( '%f\n' % t )
fd_csv.flush()
# Calculate AAR
# AccumGrid = (Zi > ELA) .* (H > 0);
IndGlacier = numpy.choose( H >0 , (0,watershed_mask) )
AccumGrid = numpy.choose( Bxy>0 , (0,IndGlacier ) )
AccumArea = AccumGrid.sum(axis=0).sum()
TotArea = IndGlacier.sum(axis=0).sum()
AAR = AccumArea / TotArea
######
### CALCULATION OF ICE TEMPERATURES
if THERMAL_TOGGLE == 0:
pass
elif THERMAL_TOGGLE == 1:
Ts = To + lapseRate*( Zi - Elev0 )
Tb = Ts - gradTz * H
Tb = numpy.choose( Tb>0 , (Tb,0) )
Tm = Ts.copy()
Htemp = Ts / gradTz
ind = nonzero( H.flatten() <= Htemp )
put( Tm , ind , ( Ts.flatten()[ind] + Tb.flatten()[ind] ) * .5 )
ind = nonzero( H.flatten() > Htemp )
put( Tm , ind , Ts.flatten()[ind] * (1. - Htemp.flatten()[ind] / ( 2.*H.flatten()[ind] ) ) )
elif THERMAL_TOGGLE == 2:
thermal_gc2d
######
### COMPRESS - ONLY SIMULATE SUB-RECTANGLE THAT CONTAINS ICE
#if COMPRESS_TOGGLE and H.max() > 1 and RESTART_TOGGLE != 2:
# disp( 'Error!!!' )
# H_FullSpace = H.copy()
# Zb_FullSpace = Zb.copy()
# if THERMAL_TOGGLE:
# Ts_FullSpace = Ts.copy()
# Tb_FullSpace = Tb.copy()
# Tm_FullSpace = Tm.copy()
# indrw,indcl = (H!=0).nonzero()
# mxrw ,mxcl = Zb.shape
# mnrw = max( 0 , indrw.min() - 2 )
# mxrw = min( mxrw , indrw.max() + 2 )
# mncl = max( 0 , indcl.min() - 2 )
# mxcl = min( mxcl , indcl.max() + 2 )
# H = H [mnrw:mxrw,mncl:mxcl]
# Zb = Zb[mnrw:mxrw,mncl:mxcl]
# Zi = Zb + numpy.choose( H<0 , (H,0) )
# rws,cls = H.shape
# if THERMAL_TOGGLE:
# Ts = Ts[mnrw:mxrw,mncl:mxcl]
# Tb = Tb[mnrw:mxrw,mncl:mxcl]
# Tm = Tm[mnrw:mxrw,mncl:mxcl]
# mxrws,mxcls = Zb_FullSpace.shape
# rws ,cls = Zb.shape
# compression_ratio = (mxcls*mxrws)/(cls*rws)
# COMPRESSED_FLAG = 1
#else:
# Zi = Zb + numpy.choose( H<0 , (H,0) ) # included for restarts
# compression_ratio = 1
# COMPRESSED_FLAG = 0
# THIS IS THE END OF THE CONTINUUM CALCULATIONS
# NOW SIMULATE PROCESSES FOR WHICH WE HAVE NO EQUATIONS
######
### AVALANCHE SNOW OFF OF STEEP SURFACES
if AVALANCH_TOGGLE and ( numpy.random.uniform() < dt*avalanchFreq ):
avalanche( H )
######
### CALVING GLACIER FRONT
if CALVING_TOGGLE:
calve( H , dt )
if counter%1==0:
print_watch_point( fd_watch['thick'] , H )
#print_watch_point( fd_watch['taubxx'] , taubxX[:,1:] )
#print_watch_point( fd_watch['taubyy'] , taubyY[1:,:] )
#print_watch_point( fd_watch['taubx'] , taubX [:,1:] )
#print_watch_point( fd_watch['tauby'] , taubY [1:,:] )
#print_watch_point( fd_watch['xcmpnt'] , xcmpnt[:,1:] )
#print_watch_point( fd_watch['ycmpnt'] , ycmpnt[1:,:] )
#print_watch_point( fd_watch['udxx'] , UdxX [:,1:] )
#print_watch_point( fd_watch['udyy'] , UdyY [1:,:] )
#print_watch_point( fd_watch['usxx'] , UsxX [:,1:] )
#print_watch_point( fd_watch['usyy'] , UsyY [1:,:] )
counter += 1
if counter > 3000: return
######
### ERODE THE BED and TRACK CRN INVENTORY
if CRN_TOGGLE:
CRN_gc2d # Call the CRN module
######
### ERODE THE BED - now handled in CRN module
#
# if ERODE_TOGGLE:
# erode_gc2d
#
######
### REPORT SOME STUFF
toc = time.time()
if REPORT_TOGGLE and toc >= nextReport:
logging.info( 'elapsed time: %1.2f seconds' , (toc-tic) )
logging.info( 'simulation time: %1.2f yr' , t )
logging.info( 'timestep: %1.2e yr' , dt )
logging.info( 'ELA: %1.0f m' , ELA )
logging.info( 'AAR: %1.2f' , AAR )
# print 'Erosion mass flux: %1.1e kg/yr' % eroded_mass_flux
# fractional ice conservation
iceVolume = numpy.choose( H<0 , (H,0) ).sum(axis=0).sum()*dx*dy
logging.info( 'total ice: %1.2e km^3' , (iceVolume*1e-9) )
logging.info( 'excess ice: %1.2f m^3' , (iceVolume - conserveIce*dx*dy) )
logging.info( 'ice change: %f m^3' , (iceVolume - iceVolumeLast) )
logging.info( 'max ice thickness: %1.2e km' , (H.max()/1000.) )
if iceVolume != 0:
logging.info( 'ice conservation (%%): %1.15f' , (100 - 100*( iceVolume - conserveIce*dx*dy ) / iceVolume) )
iceVolumeLast = iceVolume
if CALVING_TOGGLE:
logging.info( 'calved ice volume: %1.2e m^3' , calvedIce )
if COMPRESS_TOGGLE:
logging.info( 'compression ratio = %f' , compression_ratio )
nextReport = toc + reportInterval
fd_watch.close()
logging.info( "Finished!" )
return 0
def run_for( t , t_max , H , Zb , dx , dy , ICEFLOW_TOGGLE=True , ICESLIDE_TOGGLE=False , VARIABLE_DT_TOGGLE=True , dtDefault=Parameters.dtDefault , dtMax=Parameters.dtMax):
fd_watch = {}
fd_watch['thick'] = open( 'thickness_py.bin' , 'wb' )
conserveIce = numpy.longdouble(0.)
counter = 0
tic = time.time()
while t<t_max:
# COMPRESS - ONLY SIMULATE SUB-RECTANGLE THAT CONTAINS ICE
( Zi , compression_ratio , COMPRESSED_FLAG ) = compress_grid( H , Zb , COMPRESS_TOGGLE=False )
######
### MODIFY BOUNDARY CELLS TO ENFORCE BOUNDARY CONDITIONS
( H_ext , Zb_ext , Zi_ext ) = set_bc( H , Zb , Zi )
( dZidxX , dZidyY ) = difference_grid( Zi_ext , dx , dy )
######
### CALCULATE THE BASAL SHEAR STRESS
( ( xcmpnt , ycmpnt ) , ( taubX , taubY ) , ( HX , HY ) ) = basal_shear_stress( H_ext , Zi_ext , dx=dx , dy=dy )
######
### CALCULATE ICE VELOCITY DUE TO DEFORMATION
if ICEFLOW_TOGGLE:
( UdxX , UdyY ) = iceflow( taubX , taubY , HX , HY , xcmpnt , ycmpnt )
else:
UdxX = numpy.zeros( xcmpnt.shape , dtype=numpy.longdouble )
UdyY = numpy.zeros( ycmpnt.shape , dtype=numpy.longdouble )
######
### CALCULATE SLIDING VELOCITY
if ICESLIDE_TOGGLE:
( UsxX , UsyY ) = ice_sliding( taubX , taubY , xcmpnt , ycmpnt )
else:
UsxX = numpy.zeros( xcmpnt.shape , dtype=numpy.longdouble )
UsyY = numpy.zeros( ycmpnt.shape , dtype=numpy.longdouble )
# sum all contributions to ice motion
( UxX , UyY ) = sum_ice_motion( UdxX , UdyY , UsxX , UsyY )
######
### MASS CONSERVATION -- CONTINUITY
( dHdt , ( qxX , qyY ) ) = mass_conservation( H_ext , UxX , UyY , HX , HY , dZidxX , dZidyY , dx=dx , dy=dy );
######
### CALCULATE MASS BALANCE
( Bxy , ELA ) = mass_balance( Zi , t )
#######
### CALCULATE TIMESTEP
if VARIABLE_DT_TOGGLE:
dt = get_timestep( H , Zi_ext , Zi , dHdt , Bxy , dtMax=dtMax , dtDefault=dtDefault )
else:
dt = dtDefault
######
### UPDATE the TIME and ICE THICKNESS
( t , H , Zi , conserveIce ) = update_vars( H , Zb , Zi , Bxy , qxX , qyY , dHdt , t , dt , conserveIce , dx=dx , dy=dy )
if counter%1==0:
print_watch_point( fd_watch['thick'] , H )
counter = counter + 1
class Toggles:
# CODE BEHAVIOR TOGGLES
# toggles turn on/off segments of the code or select
# between multiple possibilities for a given process
# values can be reset in INIT_COND segment
GUISTART_TOGGLE = 0 # started simulation with the gui (off|on)
SAVE_TOGGLE = 1 # saving (off|on)
PLOT_TOGGLE = 1 # plotting (off|on)
REPORT_TOGGLE = 1 # reporting (off|on)
COMPRESS_TOGGLE = 0 # only simulate area with ice (off|on)
VARIABLE_DT_TOGGLE = 1 # state dependent time step (off|on)
INIT_COND_TOGGLE = 1 # load DEM and climate (synth|valley|sheet)
GENERIC_ICE_TOGGLE = 0 # start with generic ice surface (off|on)
ICEFLOW_TOGGLE = 1 # ice motion by deformation (off|on)
ICESLIDE_TOGGLE = 0 # ice motion by sliding (off|on|select)
THERMAL_TOGGLE = 0 # temp dependance of flow (off|on)
FREEZEON_TOGGLE = 0 # basal ice freeze to bed (off|on)
AVALANCH_TOGGLE = 0 # avalanch off steep surfaces (off|on)
ERODE_TOGGLE = 0 # erode the bed (off|on|select)
CALVING_TOGGLE = 0 # calving front (off|on)
CRN_TOGGLE = 0 # CRN accumulation (off|on)
MASS_BALANCE_TOGGLE = MassBalance.ELA_LOWERING # select climate scenerio (off|on|select)
WEST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
EAST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
SOUTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
NORTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
def init_valley_glacier( file='Animas_200.mat' ):
# filenameDEM = 'Yosemite200_rot35_400x650' ;
# filenameDEM = 'Nederland100' ;
# filenameDEM = 'KingsCanyon200Rot256x256shift' ;
# filenameDEM = 'sample200' ;
# filenameDEM = 'animas_200' ;
# filenameDEM = '4J_newDEM_200' ;
# filenameDEM = 'reproj4j_200' ;
( topo , easting , northing , cellsize ) = load_dem( file )
dx = numpy.longdouble(200) # set a new dx
dy = numpy.longdouble(dx)
# AAR and eroded volume watershed mask
mask_file = 'watershed_mask'
try:
#load( mask_file );
watershed_mask = load_mask( mask_file )
except:
watershed_mask = numpy.ones( topo.shape , dtype=numpy.longdouble ) # Use the whole grid if no watershed mask is available
logging.warning( 'No watershed mask found; using the whole grid for AAR and eroded flux calculations.' )
# Mass Balance
try: initELA
except NameError:
initELA = numpy.longdouble(3350)
maxBz = numpy.longdouble(2)
gradBz = numpy.longdouble(1./100.)
return ( topo , easting , northing , cellsize )
def init_ice_sheet( file ):
file = 'Baffin200d'
file = 'ValleyNonFjordTopo'
#load( filenameDEM ) ;
( topo , easting , northing ) = load_dem( file )
dx = numpy.longdouble(2000) # set a new dx
dy = dx
UsChar = numpy.longdouble(100)
taubChar = numpy.longdouble(50000)
#load( filenameDEM, 'Bxy' ) ;
Bxy = load_dem_var( filenameDEM , 'Bxy' )
# Mass Balance
initELA = numpy.longdouble(3500)
maxBz = numpy.longdouble(0)
gradBz = numpy.longdouble(1./100)
Hbound = numpy.longdouble(2000)
Elev0 = numpy.longdouble(0) # reference elevation
To = numpy.longdouble(-30) # temperature at Elev0
lapseRate = numpy.longdouble(-0.0065) # degrees per meter
COMPRESS_TOGGLE = 0
GENERIC_ICE_TOGGLE = 0
MASS_BALANCE_TOGGLE = ELA_TIME_SERIES
CALVING_TOGGLE = 1
ERODE_TOGGLE = 0
THERMAL_TOGGLE = 0
FREEZEON_TOGGLE = 0
HORZTL_ADVECT_TOGGLE = 0
GEOTHERMAL_HEAT_TOGGLE = 0
STRAIN_HEAT_TOGGLE = 0
SLIDING_HEAT_TOGGLE = 0
SURFACE_HEAT_FLUX_TOGGLE= 0
THERMAL_3D_TOGGLE = 0
WEST_BC_TOGGLE = ZERO_FLUX_BOUND
EAST_BC_TOGGLE = ZERO_FLUX_BOUND
SOUTH_BC_TOGGLE = ZERO_FLUX_BOUND
NORTH_BC_TOGGLE = ZERO_FLUX_BOUND
return ( topo , easting , northing )
def load_state( file , RESTART_TOGGLE=0 , INIT_COND_TOGGLE=True , GENERIC_ICE_TOGGLE=False ):
######
### Load a saved state
if RESTART_TOGGLE == 0 or RESTART_TOGGLE == 3: # LOAD A SAVED STATE
# CODE BEHAVIOR TOGGLES
# toggles turn on/off segments of the code or select
# between multiple possibilities for a given process
# values can be reset in INIT_COND segment
toggles = Toggles
# OUTPUT BEHAVIOR
plotInterval = 60 * 120 # seconds
saveInterval = 100 # whole years
reportInterval = 30 # seconds
nextPlot = 0 # initialize to plot on first timestep
nextSave = 0 # initialize to save on first timestep
nextReport = 0 # initialize to report on first timestep
outputFile = 'savetmp'
######
### Set numerical and physical constants
params = Parameters
# Constants
g = numpy.longdouble(9.81) # gravitional acceleration
rhoI = numpy.longdouble(917) # density of ice
rhoW = numpy.longdouble(1000) # density of water
day = numpy.longdouble(0.00274) # length of a day in years
# Time
t = numpy.longdouble(0) # set time to zero
tMax = numpy.longdouble(100000) # maximum simulation time in years
dtMax = numpy.longdouble(0.4 * 365*day) # maximum timestep in years
dtDefault = numpy.longdouble(0.4 * 365*day) # timestep if VARIABLE_DT_TOGGLE==0
# Glacier Properties
MinGlacThick = numpy.longdouble(1)
# Ice Deformation
glensA = numpy.longdouble((6.8e-15)*3.15e7/(1e9)) # Patterson, 1994; MacGregor, 2000
# Attractor Sliding -- only if ICESLIDE_TOGGLE==1 (generally used)
UsChar = numpy.longdouble(10)
taubChar = numpy.longdouble(100000)
# Standard Sliding -- used if ICESLIDE_TOGGLE==2 (generally not used)
B = numpy.longdouble(0.0012) # m/(Pa*yr) -- MacGregor, 2000
DepthToWaterTable = numpy.longdouble(20) # distance from ice surface to water table
MaxFloatFraction = numpy.longdouble(80) # limits water level in ice
Hpeff = numpy.longdouble(20) # effective pressure (meters of water)
# Avalanching
angleOfRepose = numpy.longdouble(30)
avalanchFreq = numpy.longdouble(3) # average number per year
# Calving
seaLevel = numpy.longdouble(-100) # meters
calvingCoef = numpy.longdouble(2) # year^-1
# Thermal
c = numpy.longdouble(2060) # specific heat capacity (J/(kg*K))
Qg = numpy.longdouble(0.05*3.15e7) # Geothermal heat flux (W/m^2)*seconds/year = (J/year)/(m^2)
gradTz = numpy.longdouble(-0.0255) # Geothermal Gradient
# Mass Balance
initELA = numpy.longdouble(4500)
initELA = numpy.longdouble(3000)
gradBz = numpy.longdouble(0.01)
maxBz = numpy.longdouble(2)
ELAStepSize = numpy.longdouble(-50)
ELAStepInterval = numpy.longdouble(500)
tmin = numpy.longdouble(200) # Years, spin-up time
######
### RELOAD INPUT ARGUMENTS
#load inputArgs
inputArgs = load_input_args
#if ( GUISTART_TOGGLE & exist('guiSimParams.mat','file') )
# load guiSimParams
# delete guiSimParams.mat
# clear newInitFile
#elseif ( ~GUISTART_TOGGLE & exist( './guiPlotParams.mat', 'file' ) )
# delete guiPlotParams.mat
#end
######
### INITIALIZE COUNTERS
# numTimeSteps = 0 ;
# timeSteps = zeros(1000000,1) ;
######
### INITIALIZE BED and ICE TOPOGRAPHY, and CLIMATE VARIABLES
# Must define topo, cellsize, dx, and dy
if INIT_COND_TOGGLE:
### .mat file contains: 'topo' = matrix of bed elevations and 'cellsize',
### both in meters. 'easting' and 'northing' are included for plotting
if INIT_COND_TOGGLE == 1: # Valley glaciers
# filenameDEM = 'Yosemite200_rot35_400x650' ;
# filenameDEM = 'Nederland100' ;
# filenameDEM = 'KingsCanyon200Rot256x256shift' ;
# filenameDEM = 'sample200' ;
# filenameDEM = 'animas_200' ;
# filenameDEM = '4J_newDEM_200' ;
# filenameDEM = 'reproj4j_200' ;
filenameDEM = file
filenameDEM = 'Animas_200.mat'
#load( filenameDEM ) ;
( topo , easting , northing , cellsize ) = load_dem( filenameDEM )
dx = numpy.longdouble(200) # set a new dx
dy = numpy.longdouble(dx)
# AAR and eroded volume watershed mask
mask_file = 'watershed_mask'
try:
#load( mask_file );
watershed_mask = load_mask( mask_file )
except:
watershed_mask = numpy.ones( topo.shape , dtype=numpy.longdouble ) # Use the whole grid if no watershed mask is available
logging.warning( 'No watershed mask found; using the whole grid for AAR and eroded flux calculations.' )
# Mass Balance
try: initELA
except NameError:
initELA = numpy.longdouble(3350)
maxBz = numpy.longdouble(2)
gradBz = numpy.longdouble(1./100.)
elif INIT_COND_TOGGLE==2: # Ice sheets
filenameDEM = 'Baffin200d'
filenameDEM = 'ValleyNonFjordTopo'
#load( filenameDEM ) ;
( topo , easting , northing ) = load_dem( filenameDEM )
dx = numpy.longdouble(2000) # set a new dx
dy = dx
UsChar = numpy.longdouble(100)
taubChar = numpy.longdouble(50000)
#load( filenameDEM, 'Bxy' ) ;
Bxy = load_dem_var( filenameDEM , 'Bxy' )
# Mass Balance
initELA = numpy.longdouble(3500)
maxBz = numpy.longdouble(0)
gradBz = numpy.longdouble(1./100)
Hbound = numpy.longdouble(2000)
Elev0 = numpy.longdouble(0) # reference elevation
To = numpy.longdouble(-30) # temperature at Elev0
lapseRate = numpy.longdouble(-0.0065) # degrees per meter
COMPRESS_TOGGLE = 0
GENERIC_ICE_TOGGLE = 0
MASS_BALANCE_TOGGLE = ELA_TIME_SERIES
CALVING_TOGGLE = 1
ERODE_TOGGLE = 0
THERMAL_TOGGLE = 0
FREEZEON_TOGGLE = 0
HORZTL_ADVECT_TOGGLE = 0
GEOTHERMAL_HEAT_TOGGLE = 0
STRAIN_HEAT_TOGGLE = 0
SLIDING_HEAT_TOGGLE = 0
SURFACE_HEAT_FLUX_TOGGLE= 0
THERMAL_3D_TOGGLE = 0
WEST_BC_TOGGLE = ZERO_FLUX_BOUND
EAST_BC_TOGGLE = ZERO_FLUX_BOUND
SOUTH_BC_TOGGLE = ZERO_FLUX_BOUND
NORTH_BC_TOGGLE = ZERO_FLUX_BOUND
elif INIT_COND_TOGGLE == 3: # gui_start
#load( filenameDEM ) ;
( topo , easting , northing ) = load_dem( filenameDEM )
dy = dx
rws,cls = topo.shape
#if !exist('easting') : easting = numpy.arange( cls )
#if !exist('northing'): northing = numpy.arange( rws )
try: easting
except NameError: easting = numpy.arange( cls )
try: northing
except NameError: northing = numpy.arange( rws )
# resample DEM at new node spacing
if cellsize != dx:
rws,cls = topo.shape
xOld = numpy.arange(cls-1)*cellsize
yOld = numpy.arange(rws-1)*cellsize
#xOld = (0:cls-1)*cellsize ;
#yOld = (0:rws-1)*cellsize ;
XOld,YOld = numpy.meshgrid( xOld , yOld )
#if rem(max(xOld),dx) == 0 and rem(max(yOld),dy) == 0:
if max(xOld) % dx == 0 and max(yOld) % dy == 0:
clsNew = max(xOld)/dx + 1
rwsNew = max(yOld)/dy + 1
else:
clsNew = numpy.ceil( xOld[-1] / dx )
rwsNew = numpy.ceil( yOld[-1] / dy )
x = numpy.arange(clsNew)*dx
y = numpy.arange(rwsNew)*dy
X,Y = numpy.meshgrid( x , y )
topo = interpolate.interp2d( XOld , YOld , topo , kind='linear' )( X , Y )
#topo = interpolate.interp2d( XOld , YOld , topo, X, Y ) ;
easting = interpolate.interp1d( xOld , easting , kind='linear' )( x )
northing = interpolate.interp1d( yOld , northing , kind='linear' )( y )
cellsize = dx
# Set the bed elevation to 'topo'
Zb = topo.copy()
initZb = Zb.copy()
#if !exist('H'): H = numpy.zeros(Zb.shape)
try: H
except NameError: H = numpy.zeros( Zb.shape , dtype=numpy.longdouble )
Zi = H + Zb
#clear topo
rws,cls = Zb.shape
x = numpy.arange( cls )*dx
y = numpy.arange( rws )*dy
X,Y = numpy.meshgrid( x , y )
# Create a generic ice surface
if GENERIC_ICE_TOGGLE:
# This code segment rotates the topo such that the
# ice boundary is on the left side of the simulation
# need to check code; better to rotate DEM prior to use
ZiBound = numpy.mean(Zb[:,0]) + Hbound
taub = 200000
H = numpy.zeros(Zb.shape, dtype=numpy.longdouble )
rws,cls = Zb.shape
beta = taub/(rhoI*g)
jtermlast = cls-2
icefree = 0
# for each row, find the cell for which the ice surface
# height at the left boundary would be ZiBound if the
# terminus that starts in that cell
#for i =1:rws
for i in range(rws):
mZb = Zb[i,:]
slope = -numpy.diff(mZb)/dx
# search starts in front of the terminus
# of the adjacent row that was just found
jterm = min( jtermlast+1, cls-2 )
while jterm > 0:
# backwater calculation
mH = numpy.zeros(mZb.shape, dtype=numpy.longdouble )
for j in range(jterm-1,-1,-1):
term1 = ( -slope[j]/2. - (mH[j+1]/dx) )**2
term2 = -(2./dx) * ( slope[j] * mH[j+1] - beta )
deltaH = -slope[j]*dx/2. - mH[j+1] + dx * numpy.sqrt(term1+term2)
mH[j] = mH[j+1] + deltaH
# the following ensures that the search for
# the terminus was started beyond the terminus
mZi = mZb + mH
if mZi[0] > ZiBound:
icefree = 1
elif icefree and mZi[0] < ZiBound:
H[i,:] = mH
jtermlast = jterm
icefree = 0
break
else:
jterm = jterm + 2
if jterm >= cls-1:
logging.error( "Generic ice overruns boundary" )
return -1
jterm = jterm - 1
Zi = Zb + H
rws,cls = Zb.shape
filt = numpy.ones( (3,3) , dtype=numpy.longdouble ) / 9
ZiBig = numpy.zeros( (rws+2,cls+2) , dtype=numpy.longdouble )
ZiBig[1:-1,1:-1] = Zi
for i in range(10):
ZiBig[(0,-1),:] = ZiBig[(1,-2),:]
ZiBig[:,(0,-1)] = ZiBig[:,(1,-2)]
ZiBig = filter2d( filt , ZiBig )
Zi = ZiBig[1:-2,1:-2]
ind = H == 0
Zi[ind] = Zb[ind]
conserveIce = H.sum(axis=0).sum()
iceVolumeLast = conserveIce*dx*dy
else: # SYNTHETIC BEDROCK TOPOGRAPHY
logging.error( "Must code synthetic initial condition" )
return -1
### INIT_COND_TOGGLE
######
### Load a saved state
######
return ( H , Zb , dx , dy )
def gc2d( argv=None , inputFile='Animas_200.mat' ):
# if argv is None:
# argv = sys.argv
# try:
# try:
# opts, args = getopt.getopt( argv[1:] , "h" , ["help"] )
# except getopt.error, msg:
# raise Usage(msg)
# except Usage, err:
# print >> sys.stderr, err.msg
# print >> sys.stderr, "for help use --help"
# return 2
( H , Zb , dx , dy ) = load_state( inputFile )
run_for( Parameters.t , Parameters.tMax , H , Zb , dx , dy )
return 0
from csdms import Component
class gc2d( Component ):
def __init__( self ):
self._name = 'GC2D'
self._vars = {}
self.set_var( 'H' , None )
self.set_var( 'Zb' , None )
self.set_var( 'dx' , None )
self.set_var( 'dy' , None )
def setup( self , file=None ):
Component( self._name ).setup()
( H , Zb , dx , dy ) = load_state( file )
self.set_var( 'H' , H )
self.set_var( 'Zb' , Zb )
self.set_var( 'dx' , dx )
self.set_var( 'dy' , dy )
def run_for( self , duration , start=0. ):
#if type( duration ) == unum.Unum:
# duration_in_y = duration.convert( YR ).asNumber()
#else:
duration_in_y = duration
#if type( start ) == unum.Unum:
# start_in_y = start.convert( YR ).asNumber()
#else:
start_in_y = start
Component( self._name ).run()
H = self.get_var( 'H' )
Zb = self.get_var( 'Zb' )
dx = self.get_var( 'dx' )
dy = self.get_var( 'dy' )
run_for( start_in_y , start_in_y+duration_in_y , H , Zb , dx , dy )
def teardown( self ):
Component( self._name ).teardown()
if __name__ == "__main__":
logging.basicConfig( level=logging.INFO ,
format='%(asctime)s %(levelname)-8s %(message)s' ,
datefmt='%a, %d %b %Y %H:%M:%S' ,
filename='gc2d.log' ,
filemode='w' )
sys.exit( gc2d() )
| apache-2.0 | -1,780,222,918,545,055,700 | 36.272949 | 260 | 0.514888 | false | 3.08499 | false | false | false |
cindy-zimmerman/vmb-mrest | flask_mrest/models.py | 1 | 12211 | import base64
import json
from sqlalchemy.exc import IntegrityError, InvalidRequestError
import sqlalchemy as sa
from alchemyjsonschema import BaseModelWalker, SingleModelWalker, SchemaFactory
from errorhandlers import page_not_found
from flask import current_app, make_response, request
from jsonschema import validate, ValidationError
from sqlalchemy.ext.declarative import declarative_base
from mrest_client.auth import decode_auth_data
from flask_mrest.auth import mrest_authenticate, mrest_404
from hashlib import sha256
SABase = declarative_base()
def dictify_item(item, model):
columns = [c.name for c in model.__table__.columns]
columnitems = dict([(c, getattr(item, c)) for c in columns])
return columnitems
def query_to_json(query, model):
if isinstance(query, SABase):
return json.dumps(dictify_item(query, model))
else:
items = []
for item in query:
items.append(dictify_item(item, model))
return json.dumps(items)
class BaseModel(object):
"""
An MREST model with no route handlers. A good base to use for private models which will require custom routes.
See child SuperModel class for route handler examples.
"""
def __init__(self, name, model_name, plain_methods=None, id_methods=None, sa_model=None, excludes=None,
walker=None):
"""
:param str name: The display name of the model (typically capitalized)
:param str model_name: The model name (lower case, for routing, tables, etc)
:param list plain_methods: Methods to use for plain route
:param list id_methods: Methods to use for id route
:param SABase sa_model: The SQLAlchemy model
:param list excludes: a list of excludes to pass to the walker
:param BaseModelWalker walker:
"""
if not excludes:
excludes = []
if not id_methods:
id_methods = []
if not plain_methods:
plain_methods = []
self.name = name
self.model_name = model_name
self.plain_methods = plain_methods
self.id_methods = id_methods
self._private_routes = None
self._public_routes = None
if sa_model is not None:
self._sa_model = sa_model
else:
self._sa_model = None
self.excludes = excludes
if isinstance(walker, BaseModelWalker):
self.walker = walker
else:
self.walker = SingleModelWalker
self._json_schema = None
@property
def sa_model(self):
"""
Provide the SQLAlchemy model as a separate object, so that it isn't cluttered with unnecessary attributes.
:return: The SQLAlchemy model to use for this super model
"""
if self._sa_model is None:
self._sa_model = SABase # This default is meaningless. Create your own class to inherit from Base.
return self._sa_model
@property
def json_schema(self):
"""
Provide the SQLAlchemy model as a separate object, so that it isn't cluttered with unnecessary attributes.
:return: The json schema for this model
"""
if self._json_schema is None:
factory = SchemaFactory(self.walker)
self._json_schema = factory.__call__(self.sa_model, excludes=self.excludes)
# TODO change to custom route with valid json-reference as per
# http://tools.ietf.org/html/draft-zyp-json-schema-04#section-6.2
self._json_schema['$schema'] = "http://json-schema.org/draft-04/schema#"
self._json_schema['private_routes'] = self.private_routes
self._json_schema['public_routes'] = self.public_routes
print self._json_schema
return self._json_schema
@property
def private_routes(self):
if self._private_routes is None:
self._private_routes = {"/": [], "/:id": []}
for method in ('get', 'post', 'put', 'delete'):
name = getattr(self, method).__name__
if name == 'authenticated_handler':
self._private_routes['/'].append(method.upper())
for method in ('get', 'post', 'put', 'delete'):
name = getattr(self, method + "_one").__name__
if name == 'authenticated_handler':
self._private_routes['/:id'].append(method.upper())
return self._private_routes
@property
def public_routes(self):
if self._public_routes is None:
self._public_routes = {"/": [], "/:id": []}
for method in ('get', 'post', 'put', 'delete'):
name = getattr(self, method).__name__
if name != 'authenticated_handler' and name != 'pnf_handler':
self._public_routes['/'].append(method.upper())
for method in ('get', 'post', 'put', 'delete'):
name = getattr(self, method + "_one").__name__
if name != 'authenticated_handler' and name != 'pnf_handler':
self._public_routes['/:id'].append(method.upper())
return self._public_routes
def route_plain(self):
"""
Handler for /<model> routes.
"""
if request.method == 'GET':
return self.get()
elif request.method == 'POST':
return self.post()
elif request.method == 'PUT':
return self.put()
elif request.method == 'DELETE':
return self.delete()
else:
return "this server does not support %s requests" % request.method
def route_id(self, itemid):
"""
Handler for /<model>/<itemid> routes.
"""
if request.method == 'GET':
return self.get_one(itemid)
elif request.method == 'POST':
return self.post_one(itemid)
elif request.method == 'PUT':
return self.put_one(itemid)
elif request.method == 'DELETE':
return self.delete_one(itemid)
else:
return "this server does not support %s requests" % request.method
@mrest_404
def get(self):
"""
Handler for GET /<model>
"""
pass
@mrest_404
def post(self):
"""
Handler for POST /<model>
"""
pass
@mrest_404
def put(self):
"""
Handler for PUT /<model>
"""
pass
@mrest_404
def delete(self):
"""
Handler for DELETE /<model>
"""
pass
@mrest_404
def get_one(self, itemid):
"""
Handler for GET /<model>/<id>
"""
pass
@mrest_404
def post_one(self, itemid):
"""
Handler for POST /<model>/<id>
"""
pass
@mrest_404
def put_one(self, itemid):
"""
Handler for PUT /<model>/<id>
"""
pass
@mrest_404
def delete_one(self, itemid):
"""
Handler for DELETE /<model>/<id>
"""
pass
class SuperModel(BaseModel):
"""
An MREST model with all route handlers defined with default behavior.
A good base to use for completely public models which need the generic REST functionality.
"""
def __init__(self, name, model_name, **kwargs):
if 'plain_methods' in kwargs:
del kwargs['plain_methods']
if 'id_methods' in kwargs:
del kwargs['id_methods']
super(SuperModel, self).__init__(name, model_name, plain_methods=['GET', 'POST'],
id_methods=['GET', 'PUT', 'DELETE'], **kwargs)
def get(self):
items = current_app.sa['session'].query(self.sa_model).all()
current_app.sa['session'].commit()
return make_response(query_to_json(items, self.sa_model), 200)
@mrest_authenticate
def post(self):
args = decode_auth_data(request.data)
try:
validate(args, current_app.json_schemas[self.model_name])
except ValidationError:
return page_not_found()
item = self.sa_model(**args)
current_app.sa['session'].add(item)
current_app.sa['session'].commit()
return make_response(query_to_json(item, self.sa_model), 200)
@mrest_authenticate
def get_one(self, itemid):
try:
item = current_app.sa['session'].query(self.sa_model).filter(self.sa_model.id == itemid).one()
except Exception as e:
return page_not_found()
return make_response(query_to_json(item, self.sa_model), 200)
@mrest_authenticate
def put_one(self, itemid):
try:
item = current_app.sa['session'].query(self.sa_model).filter(self.sa_model.id == itemid).one()
except Exception as e:
return page_not_found()
args = decode_auth_data(request.get_data())
# delete unsafe values
if 'id' in args:
del args['id']
# override existing values
dictitem = dictify_item(item, self.sa_model)
for arg in args:
if arg in dictitem:
dictitem[arg] = args[arg]
try:
validate(dictitem, current_app.json_schemas[self.model_name])
except ValidationError as ve:
current_app.logger.info("ValidationError received %s" % ve)
return page_not_found()
cid = dictitem['id']
del dictitem['id']
try:
current_app.sa['session'].query(self.sa_model).filter(self.sa_model.id == cid).update(dictitem)
except Exception as e:
return page_not_found()
current_app.sa['session'].commit()
return make_response(query_to_json(item, self.sa_model), 201)
@mrest_authenticate
def delete_one(self, itemid):
try:
item = current_app.sa['session'].query(self.sa_model).filter(self.sa_model.id == itemid).one()
except Exception as e:
return page_not_found()
current_app.sa['session'].delete(item)
current_app.sa['session'].commit()
return make_response("", 204)
class UserSA(SABase):
"""model for an api user or item user"""
__tablename__ = "user"
id = sa.Column(sa.String(120), primary_key=True, nullable=False, doc="primary key")
pubpem = sa.Column(sa.String(255), nullable=False)
def __repr__(self):
return "<User(id='%s')>" % self.id
class UserModel(BaseModel):
"""
The ECC auth user object. Override with your user model, if you wish.
"""
def __init__(self):
super(UserModel, self).__init__('User', 'user', plain_methods=['POST'],
id_methods=['GET'], sa_model=UserSA)
def post(self):
args = decode_auth_data(request.data)
try:
validate(args, current_app.json_schemas[self.model_name])
except ValidationError:
return page_not_found()
pubpem = base64.b64decode(args['pubpem'])
pubhash = sha256(pubpem).hexdigest()
item = self.sa_model(id=pubhash, pubpem=pubpem)
current_app.sa['session'].add(item)
try:
current_app.sa['session'].commit()
except IntegrityError as ie:
current_app.logger.info("user already existed %r" % pubhash)
current_app.sa['session'].rollback()
return make_response(query_to_json(item, self.sa_model), 200)
except InvalidRequestError as ire:
current_app.logger.info("user already existed %r" % pubhash)
current_app.sa['session'].rollback()
return make_response(query_to_json(item, self.sa_model), 200)
current_app.logger.info("created user %r" % item)
return make_response(query_to_json(item, self.sa_model), 200)
@mrest_authenticate
def get_one(self, itemid):
"""
Handler for /user/<itemid> routes.
"""
try:
item = current_app.sa['session'].query(self.sa_model).filter(self.sa_model.id == itemid).one()
except Exception as e:
return page_not_found()
return make_response(query_to_json(item, self.sa_model), 200)
| mit | 398,567,916,959,636,160 | 34.291908 | 114 | 0.577266 | false | 3.994439 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.