repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
rcatwood/Savu | savu/test/jenkins/plugin_tests/loader_tests/i18_xrd_loader_test.py | 1 | 1404 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: nx_xrd_loader_test
:platform: Unix
:synopsis: testing the nx_xrd loader
.. moduleauthor:: Aaron Parsons <[email protected]>
"""
import unittest
from savu.test import test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class NxXrdLoaderTest(unittest.TestCase):
# @unittest.skip("the test data isn't ready yet. Adp")
def test_nx_xrd(self):
data_file = '/dls/i18/data/2016/sp12601-1/processing/Savu_Test_Data/70214_Cat2_RT_1.nxs'
process_file = tu.get_test_process_path('basic_xrd_process_i18.nxs')
run_protected_plugin_runner(tu.set_options(data_file,
process_file=process_file))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | -1,597,061,129,882,492,400 | 35 | 96 | 0.701567 | false |
aglitke/vdsm | vdsm/storage/image.py | 1 | 51140 | #
# Copyright 2009-2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import os
import logging
import threading
import uuid
from contextlib import contextmanager
import volume
from vdsm import qemuImg
from sdc import sdCache
import sd
import misc
import fileUtils
import imageSharing
from vdsm.config import config
from vdsm.utils import ActionStopped
import storage_exception as se
import task
from threadLocal import vars
import resourceFactories
import resourceManager as rm
log = logging.getLogger('Storage.Image')
rmanager = rm.ResourceManager.getInstance()
# Disk type
UNKNOWN_DISK_TYPE = 0
SYSTEM_DISK_TYPE = 1
DATA_DISK_TYPE = 2
SHARED_DISK_TYPE = 3
SWAP_DISK_TYPE = 4
TEMP_DISK_TYPE = 5
DISK_TYPES = {UNKNOWN_DISK_TYPE: 'UNKNOWN', SYSTEM_DISK_TYPE: 'SYSTEM',
DATA_DISK_TYPE: 'DATA', SHARED_DISK_TYPE: 'SHARED',
SWAP_DISK_TYPE: 'SWAP', TEMP_DISK_TYPE: 'TEMP'}
# What volumes to synchronize
SYNC_VOLUMES_ALL = 'ALL'
SYNC_VOLUMES_INTERNAL = 'INTERNAL'
SYNC_VOLUMES_LEAF = 'LEAF'
# Image Operations
UNKNOWN_OP = 0
COPY_OP = 1
MOVE_OP = 2
OP_TYPES = {UNKNOWN_OP: 'UNKNOWN', COPY_OP: 'COPY', MOVE_OP: 'MOVE'}
RENAME_RANDOM_STRING_LEN = 8
# Temporary size of a volume when we optimize out the prezeroing
TEMPORARY_VOLUME_SIZE = 20480 # in sectors (10M)
def _deleteImage(dom, imgUUID, postZero):
"""This ancillary function will be removed.
Replaces Image.delete() in Image.[copyCollapsed(), move(), multimove()].
"""
allVols = dom.getAllVolumes()
imgVols = sd.getVolsOfImage(allVols, imgUUID)
if not imgVols:
log.warning("No volumes found for image %s. %s", imgUUID, allVols)
return
elif postZero:
dom.zeroImage(dom.sdUUID, imgUUID, imgVols)
else:
dom.deleteImage(dom.sdUUID, imgUUID, imgVols)
class Image:
""" Actually represents a whole virtual disk.
Consist from chain of volumes.
"""
log = logging.getLogger('Storage.Image')
_fakeTemplateLock = threading.Lock()
@classmethod
def createImageRollback(cls, taskObj, imageDir):
"""
Remove empty image folder
"""
cls.log.info("createImageRollback: imageDir=%s" % (imageDir))
if os.path.exists(imageDir):
if not len(os.listdir(imageDir)):
fileUtils.cleanupdir(imageDir)
else:
cls.log.error("createImageRollback: Cannot remove dirty image "
"folder %s" % (imageDir))
def __init__(self, repoPath):
self.repoPath = repoPath
self.storage_repository = config.get('irs', 'repository')
def create(self, sdUUID, imgUUID):
"""Create placeholder for image's volumes
'sdUUID' - storage domain UUID
'imgUUID' - image UUID
"""
imageDir = os.path.join(self.repoPath, sdUUID, sd.DOMAIN_IMAGES,
imgUUID)
if not os.path.isdir(imageDir):
self.log.info("Create placeholder %s for image's volumes",
imageDir)
taskName = "create image rollback: " + imgUUID
vars.task.pushRecovery(task.Recovery(taskName, "image", "Image",
"createImageRollback",
[imageDir]))
os.mkdir(imageDir)
return imageDir
def getImageDir(self, sdUUID, imgUUID):
"""
Return image directory
"""
return os.path.join(self.repoPath, sdUUID, sd.DOMAIN_IMAGES, imgUUID)
def deletedVolumeName(self, uuid):
"""
Create REMOVED_IMAGE_PREFIX + <random> + uuid string.
"""
randomStr = misc.randomStr(RENAME_RANDOM_STRING_LEN)
return "%s%s_%s" % (sd.REMOVED_IMAGE_PREFIX, randomStr, uuid)
def __chainSizeCalc(self, sdUUID, imgUUID, volUUID, size):
"""
Compute an estimate of the whole chain size
using the sum of the actual size of the chain's volumes
"""
chain = self.getChain(sdUUID, imgUUID, volUUID)
newsize = 0
template = chain[0].getParentVolume()
if template:
newsize = template.getVolumeSize()
for vol in chain:
newsize += vol.getVolumeSize()
if newsize > size:
newsize = size
newsize = int(newsize * 1.1) # allocate %10 more for cow metadata
return newsize
def getChain(self, sdUUID, imgUUID, volUUID=None):
"""
Return the chain of volumes of image as a sorted list
(not including a shared base (template) if any)
"""
chain = []
volclass = sdCache.produce(sdUUID).getVolumeClass()
# Use volUUID when provided
if volUUID:
srcVol = volclass(self.repoPath, sdUUID, imgUUID, volUUID)
# For template images include only one volume (the template itself)
# NOTE: this relies on the fact that in a template there is only
# one volume
if srcVol.isShared():
return [srcVol]
# Find all the volumes when volUUID is not provided
else:
# Find all volumes of image
uuidlist = volclass.getImageVolumes(self.repoPath, sdUUID, imgUUID)
if not uuidlist:
raise se.ImageDoesNotExistInSD(imgUUID, sdUUID)
srcVol = volclass(self.repoPath, sdUUID, imgUUID, uuidlist[0])
# For template images include only one volume (the template itself)
if len(uuidlist) == 1 and srcVol.isShared():
return [srcVol]
# Searching for the leaf
for vol in uuidlist:
srcVol = volclass(self.repoPath, sdUUID, imgUUID, vol)
if srcVol.isLeaf():
break
srcVol = None
if not srcVol:
self.log.error("There is no leaf in the image %s", imgUUID)
raise se.ImageIsNotLegalChain(imgUUID)
# Build up the sorted parent -> child chain
while not srcVol.isShared():
chain.insert(0, srcVol)
if srcVol.getParent() == volume.BLANK_UUID:
break
srcVol = srcVol.getParentVolume()
self.log.info("sdUUID=%s imgUUID=%s chain=%s ", sdUUID, imgUUID, chain)
return chain
def getTemplate(self, sdUUID, imgUUID):
"""
Return template of the image
"""
tmpl = None
# Find all volumes of image (excluding template)
chain = self.getChain(sdUUID, imgUUID)
# check if the chain is build above a template, or it is a standalone
pvol = chain[0].getParentVolume()
if pvol:
tmpl = pvol
elif chain[0].isShared():
tmpl = chain[0]
return tmpl
def createFakeTemplate(self, sdUUID, volParams):
"""
Create fake template (relevant for Backup domain only)
"""
with self._fakeTemplateLock:
try:
destDom = sdCache.produce(sdUUID)
volclass = destDom.getVolumeClass()
# Validate that the destination template exists and accessible
volclass(self.repoPath, sdUUID, volParams['imgUUID'],
volParams['volUUID'])
except (se.VolumeDoesNotExist, se.ImagePathError):
try:
# Create fake parent volume
destDom.createVolume(
imgUUID=volParams['imgUUID'], size=volParams['size'],
volFormat=volume.COW_FORMAT,
preallocate=volume.SPARSE_VOL,
diskType=volParams['disktype'],
volUUID=volParams['volUUID'], desc="Fake volume",
srcImgUUID=volume.BLANK_UUID,
srcVolUUID=volume.BLANK_UUID)
vol = destDom.produceVolume(imgUUID=volParams['imgUUID'],
volUUID=volParams['volUUID'])
# Mark fake volume as "FAKE"
vol.setLegality(volume.FAKE_VOL)
# Mark fake volume as shared
vol.setShared()
# Now we should re-link all hardlinks of this template in
# all VMs based on it
destDom.templateRelink(volParams['imgUUID'],
volParams['volUUID'])
self.log.debug("Succeeded to create fake image %s in "
"domain %s", volParams['imgUUID'],
destDom.sdUUID)
except Exception:
self.log.error("Failure to create fake image %s in domain "
"%s", volParams['imgUUID'], destDom.sdUUID,
exc_info=True)
def isLegal(self, sdUUID, imgUUID):
"""
Check correctness of the whole chain (excluding template)
"""
try:
legal = True
volclass = sdCache.produce(sdUUID).getVolumeClass()
vollist = volclass.getImageVolumes(self.repoPath, sdUUID, imgUUID)
self.log.info("image %s in domain %s has vollist %s", imgUUID,
sdUUID, str(vollist))
for v in vollist:
vol = volclass(self.repoPath, sdUUID, imgUUID, v)
if not vol.isLegal() or vol.isFake():
legal = False
break
except:
legal = False
return legal
def __cleanupMove(self, srcVol, dstVol):
"""
Cleanup environments after move operation
"""
try:
if srcVol:
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID)
if dstVol:
dstVol.teardown(sdUUID=dstVol.sdUUID, volUUID=dstVol.volUUID)
except Exception:
self.log.error("Unexpected error", exc_info=True)
def _createTargetImage(self, destDom, srcSdUUID, imgUUID):
# Before actual data copying we need perform several operation
# such as: create all volumes, create fake template if needed, ...
try:
# Find all volumes of source image
srcChain = self.getChain(srcSdUUID, imgUUID)
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception as e:
self.log.error("Unexpected error", exc_info=True)
raise se.SourceImageActionError(imgUUID, srcSdUUID, str(e))
fakeTemplate = False
pimg = volume.BLANK_UUID # standalone chain
# check if the chain is build above a template, or it is a standalone
pvol = srcChain[0].getParentVolume()
if pvol:
# find out parent volume parameters
volParams = pvol.getVolumeParams()
pimg = volParams['imgUUID'] # pimg == template image
if destDom.isBackup():
# FIXME: This workaround help as copy VM to the backup domain
# without its template. We will create fake template
# for future VM creation and mark it as FAKE volume.
# This situation is relevant for backup domain only.
fakeTemplate = True
@contextmanager
def justLogIt(img):
self.log.debug("You don't really need lock parent of image %s",
img)
yield
dstImageResourcesNamespace = sd.getNamespace(
destDom.sdUUID, resourceFactories.IMAGE_NAMESPACE)
# In destination domain we need to lock image's template if exists
with rmanager.acquireResource(dstImageResourcesNamespace, pimg,
rm.LockType.shared) \
if pimg != volume.BLANK_UUID else justLogIt(imgUUID):
if fakeTemplate:
self.createFakeTemplate(destDom.sdUUID, volParams)
dstChain = []
for srcVol in srcChain:
# Create the dst volume
try:
# find out src volume parameters
volParams = srcVol.getVolumeParams(bs=1)
# To avoid prezeroing preallocated volumes on NFS domains
# we create the target as a sparse volume (since it will be
# soon filled with the data coming from the copy) and then
# we change its metadata back to the original value.
if (volParams['prealloc'] == volume.PREALLOCATED_VOL
and destDom.supportsSparseness):
tmpVolPreallocation = volume.SPARSE_VOL
else:
tmpVolPreallocation = volParams['prealloc']
destDom.createVolume(imgUUID=imgUUID,
size=volParams['size'],
volFormat=volParams['volFormat'],
preallocate=tmpVolPreallocation,
diskType=volParams['disktype'],
volUUID=srcVol.volUUID,
desc=volParams['descr'],
srcImgUUID=pimg,
srcVolUUID=volParams['parent'])
dstVol = destDom.produceVolume(imgUUID=imgUUID,
volUUID=srcVol.volUUID)
# Extend volume (for LV only) size to the actual size
dstVol.extend((volParams['apparentsize'] + 511) / 512)
# Change destination volume metadata back to the original
# type.
if tmpVolPreallocation != volParams['prealloc']:
dstVol.setType(volParams['prealloc'])
dstChain.append(dstVol)
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception as e:
self.log.error("Unexpected error", exc_info=True)
raise se.DestImageActionError(imgUUID, destDom.sdUUID,
str(e))
# only base may have a different parent image
pimg = imgUUID
return {'srcChain': srcChain, 'dstChain': dstChain}
def _interImagesCopy(self, destDom, srcSdUUID, imgUUID, chains):
srcLeafVol = chains['srcChain'][-1]
dstLeafVol = chains['dstChain'][-1]
try:
# Prepare the whole chains before the copy
srcLeafVol.prepare(rw=False)
dstLeafVol.prepare(rw=True, chainrw=True, setrw=True)
except Exception:
self.log.error("Unexpected error", exc_info=True)
# teardown volumes
self.__cleanupMove(srcLeafVol, dstLeafVol)
raise
try:
for srcVol in chains['srcChain']:
# Do the actual copy
try:
dstVol = destDom.produceVolume(imgUUID=imgUUID,
volUUID=srcVol.volUUID)
srcFmt = srcVol.getFormat()
if srcFmt == volume.RAW_FORMAT:
srcFmtStr = volume.fmt2str(srcFmt)
dstFmtStr = volume.fmt2str(dstVol.getFormat())
self.log.debug("start qemu convert")
qemuImg.convert(srcVol.getVolumePath(),
dstVol.getVolumePath(),
vars.task.aborting,
srcFmtStr, dstFmtStr)
else:
srcSize = srcVol.getVolumeSize(bs=1)
misc.ddWatchCopy(srcVol.getVolumePath(),
dstVol.getVolumePath(),
vars.task.aborting,
size=srcSize)
except ActionStopped:
raise
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception:
self.log.error("Copy image error: image=%s, src domain=%s,"
" dst domain=%s", imgUUID, srcSdUUID,
destDom.sdUUID, exc_info=True)
raise se.CopyImageError()
finally:
# teardown volumes
self.__cleanupMove(srcLeafVol, dstLeafVol)
def _finalizeDestinationImage(self, destDom, imgUUID, chains, force):
for srcVol in chains['srcChain']:
try:
dstVol = destDom.produceVolume(imgUUID=imgUUID,
volUUID=srcVol.volUUID)
# In case of copying template, we should set the destination
# volume as SHARED (after copy because otherwise prepare as RW
# would fail)
if srcVol.isShared():
dstVol.setShared()
elif srcVol.isInternal():
dstVol.setInternal()
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception as e:
self.log.error("Unexpected error", exc_info=True)
raise se.DestImageActionError(imgUUID, destDom.sdUUID, str(e))
def move(self, srcSdUUID, dstSdUUID, imgUUID, vmUUID, op, postZero, force):
"""
Move/Copy image between storage domains within same storage pool
"""
self.log.info("srcSdUUID=%s dstSdUUID=%s imgUUID=%s vmUUID=%s op=%s "
"force=%s postZero=%s", srcSdUUID, dstSdUUID, imgUUID,
vmUUID, OP_TYPES[op], str(force), str(postZero))
destDom = sdCache.produce(dstSdUUID)
# If image already exists check whether it illegal/fake, overwrite it
if not self.isLegal(destDom.sdUUID, imgUUID):
force = True
# We must first remove the previous instance of image (if exists)
# in destination domain, if we got the overwrite command
if force:
self.log.info("delete image %s on domain %s before overwriting",
imgUUID, destDom.sdUUID)
_deleteImage(destDom, imgUUID, postZero)
chains = self._createTargetImage(destDom, srcSdUUID, imgUUID)
self._interImagesCopy(destDom, srcSdUUID, imgUUID, chains)
self._finalizeDestinationImage(destDom, imgUUID, chains, force)
if force:
leafVol = chains['dstChain'][-1]
# Now we should re-link all deleted hardlinks, if exists
destDom.templateRelink(imgUUID, leafVol.volUUID)
# At this point we successfully finished the 'copy' part of the
# operation and we can clear all recoveries.
vars.task.clearRecoveries()
# If it's 'move' operation, we should delete src image after copying
if op == MOVE_OP:
# TODO: Should raise here.
try:
dom = sdCache.produce(srcSdUUID)
_deleteImage(dom, imgUUID, postZero)
except se.StorageException:
self.log.warning("Failed to remove img: %s from srcDom %s: "
"after it was copied to: %s", imgUUID,
srcSdUUID, dstSdUUID)
self.log.info("%s task on image %s was successfully finished",
OP_TYPES[op], imgUUID)
return True
def cloneStructure(self, sdUUID, imgUUID, dstSdUUID):
self._createTargetImage(sdCache.produce(dstSdUUID), sdUUID, imgUUID)
def syncData(self, sdUUID, imgUUID, dstSdUUID, syncType):
srcChain = self.getChain(sdUUID, imgUUID)
dstChain = self.getChain(dstSdUUID, imgUUID)
if syncType == SYNC_VOLUMES_INTERNAL:
try:
# Removing the leaf volumes
del srcChain[-1], dstChain[-1]
except IndexError:
raise se.ImageIsNotLegalChain()
elif syncType == SYNC_VOLUMES_LEAF:
try:
# Removing all the internal volumes
del srcChain[:-1], dstChain[:-1]
except IndexError:
raise se.ImageIsNotLegalChain()
elif syncType != SYNC_VOLUMES_ALL:
raise se.NotImplementedException()
if len(srcChain) != len(dstChain):
raise se.DestImageActionError(imgUUID, dstSdUUID)
# Checking the volume uuids (after removing the leaves to allow
# different uuids for the current top layer, see previous check).
for i, v in enumerate(srcChain):
if v.volUUID != dstChain[i].volUUID:
raise se.DestImageActionError(imgUUID, dstSdUUID)
dstDom = sdCache.produce(dstSdUUID)
self._interImagesCopy(dstDom, sdUUID, imgUUID,
{'srcChain': srcChain, 'dstChain': dstChain})
self._finalizeDestinationImage(dstDom, imgUUID,
{'srcChain': srcChain,
'dstChain': dstChain}, False)
def __cleanupMultimove(self, sdUUID, imgList, postZero=False):
"""
Cleanup environments after multiple-move operation
"""
for imgUUID in imgList:
try:
dom = sdCache.produce(sdUUID)
_deleteImage(dom, imgUUID, postZero)
except se.StorageException:
self.log.warning("Delete image failed for image: %s in SD: %s",
imgUUID, sdUUID, exc_info=True)
def multiMove(self, srcSdUUID, dstSdUUID, imgDict, vmUUID, force):
"""
Move multiple images between storage domains within same storage pool
"""
self.log.info("srcSdUUID=%s dstSdUUID=%s imgDict=%s vmUUID=%s "
"force=%s", srcSdUUID, dstSdUUID, str(imgDict), vmUUID,
str(force))
cleanup_candidates = []
# First, copy all images to the destination domain
for (imgUUID, postZero) in imgDict.iteritems():
self.log.info("srcSdUUID=%s dstSdUUID=%s imgUUID=%s postZero=%s",
srcSdUUID, dstSdUUID, imgUUID, postZero)
try:
self.move(srcSdUUID, dstSdUUID, imgUUID, vmUUID, COPY_OP,
postZero, force)
except se.StorageException:
self.__cleanupMultimove(sdUUID=dstSdUUID,
imgList=cleanup_candidates,
postZero=postZero)
raise
except Exception as e:
self.__cleanupMultimove(sdUUID=dstSdUUID,
imgList=cleanup_candidates,
postZero=postZero)
self.log.error(e, exc_info=True)
raise se.CopyImageError("image=%s, src domain=%s, dst "
"domain=%s: msg %s" %
(imgUUID, srcSdUUID, dstSdUUID,
str(e)))
cleanup_candidates.append(imgUUID)
# Remove images from source domain only after successfull copying of
# all images to the destination domain
for (imgUUID, postZero) in imgDict.iteritems():
try:
dom = sdCache.produce(srcSdUUID)
_deleteImage(dom, imgUUID, postZero)
except se.StorageException:
self.log.warning("Delete image failed for image %s in SD: %s",
imgUUID, dom.sdUUID, exc_info=True)
def __cleanupCopy(self, srcVol, dstVol):
"""
Cleanup environments after copy operation
"""
try:
if srcVol:
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID)
if dstVol:
dstVol.teardown(sdUUID=dstVol.sdUUID, volUUID=dstVol.volUUID)
except Exception:
self.log.error("Unexpected error", exc_info=True)
def validateVolumeChain(self, sdUUID, imgUUID):
"""
Check correctness of the whole chain (including template if exists)
"""
if not self.isLegal(sdUUID, imgUUID):
raise se.ImageIsNotLegalChain(imgUUID)
chain = self.getChain(sdUUID, imgUUID)
# check if the chain is build above a template, or it is a standalone
pvol = chain[0].getParentVolume()
if pvol:
if not pvol.isLegal() or pvol.isFake():
raise se.ImageIsNotLegalChain(imgUUID)
def copyCollapsed(self, sdUUID, vmUUID, srcImgUUID, srcVolUUID, dstImgUUID,
dstVolUUID, descr, dstSdUUID, volType, volFormat,
preallocate, postZero, force):
"""
Create new template/volume from VM.
Do it by collapse and copy the whole chain (baseVolUUID->srcVolUUID)
"""
self.log.info("sdUUID=%s vmUUID=%s srcImgUUID=%s srcVolUUID=%s "
"dstImgUUID=%s dstVolUUID=%s dstSdUUID=%s volType=%s "
"volFormat=%s preallocate=%s force=%s postZero=%s",
sdUUID, vmUUID, srcImgUUID, srcVolUUID, dstImgUUID,
dstVolUUID, dstSdUUID, volType,
volume.type2name(volFormat),
volume.type2name(preallocate), str(force), str(postZero))
try:
srcVol = dstVol = None
# Find out dest sdUUID
if dstSdUUID == sd.BLANK_UUID:
dstSdUUID = sdUUID
volclass = sdCache.produce(sdUUID).getVolumeClass()
destDom = sdCache.produce(dstSdUUID)
# find src volume
try:
srcVol = volclass(self.repoPath, sdUUID, srcImgUUID,
srcVolUUID)
except se.StorageException:
raise
except Exception as e:
self.log.error(e, exc_info=True)
raise se.SourceImageActionError(srcImgUUID, sdUUID, str(e))
# Create dst volume
try:
# find out src volume parameters
volParams = srcVol.getVolumeParams()
if volParams['parent'] and \
volParams['parent'] != volume.BLANK_UUID:
# Volume has parent and therefore is a part of a chain
# in that case we can not know what is the exact size of
# the space target file (chain ==> cow ==> sparse).
# Therefore compute an estimate of the target volume size
# using the sum of the actual size of the chain's volumes
if volParams['volFormat'] != volume.COW_FORMAT or \
volParams['prealloc'] != volume.SPARSE_VOL:
raise se.IncorrectFormat(self)
volParams['apparentsize'] = self.__chainSizeCalc(
sdUUID, srcImgUUID, srcVolUUID, volParams['size'])
# Find out dest volume parameters
if preallocate in [volume.PREALLOCATED_VOL, volume.SPARSE_VOL]:
volParams['prealloc'] = preallocate
if volFormat in [volume.COW_FORMAT, volume.RAW_FORMAT]:
dstVolFormat = volFormat
else:
dstVolFormat = volParams['volFormat']
self.log.info("copy source %s:%s:%s vol size %s destination "
"%s:%s:%s apparentsize %s" %
(sdUUID, srcImgUUID, srcVolUUID,
volParams['size'], dstSdUUID, dstImgUUID,
dstVolUUID, volParams['apparentsize']))
# If image already exists check whether it illegal/fake,
# overwrite it
if not self.isLegal(dstSdUUID, dstImgUUID):
force = True
# We must first remove the previous instance of image (if
# exists) in destination domain, if we got the overwrite
# command
if force:
self.log.info("delete image %s on domain %s before "
"overwriting", dstImgUUID, dstSdUUID)
_deleteImage(destDom, dstImgUUID, postZero)
# To avoid 'prezeroing' preallocated volume on NFS domain,
# we create the target volume with minimal size and after that
# we'll change its metadata back to the original size.
tmpSize = TEMPORARY_VOLUME_SIZE # in sectors (10M)
destDom.createVolume(
imgUUID=dstImgUUID, size=tmpSize, volFormat=dstVolFormat,
preallocate=volParams['prealloc'],
diskType=volParams['disktype'], volUUID=dstVolUUID,
desc=descr, srcImgUUID=volume.BLANK_UUID,
srcVolUUID=volume.BLANK_UUID)
dstVol = sdCache.produce(dstSdUUID).produceVolume(
imgUUID=dstImgUUID, volUUID=dstVolUUID)
# For convert to 'raw' we need use the virtual disk size
# instead of apparent size
if dstVolFormat == volume.RAW_FORMAT:
newsize = volParams['size']
else:
newsize = volParams['apparentsize']
dstVol.extend(newsize)
dstPath = dstVol.getVolumePath()
# Change destination volume metadata back to the original size.
dstVol.setSize(volParams['size'])
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception as e:
self.log.error("Unexpected error", exc_info=True)
raise se.CopyImageError("Destination volume %s error: %s" %
(dstVolUUID, str(e)))
try:
# Start the actual copy image procedure
srcVol.prepare(rw=False)
dstVol.prepare(rw=True, setrw=True)
try:
(rc, out, err) = volume.qemuConvert(
volParams['path'], dstPath, volParams['volFormat'],
dstVolFormat, vars.task.aborting,
size=srcVol.getVolumeSize(bs=1),
dstvolType=dstVol.getType())
if rc:
raise se.StorageException("rc: %s, err: %s" %
(rc, err))
except ActionStopped:
raise
except se.StorageException as e:
raise se.CopyImageError(str(e))
# Mark volume as SHARED
if volType == volume.SHARED_VOL:
dstVol.setShared()
dstVol.setLegality(volume.LEGAL_VOL)
if force:
# Now we should re-link all deleted hardlinks, if exists
destDom.templateRelink(dstImgUUID, dstVolUUID)
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception as e:
self.log.error("Unexpected error", exc_info=True)
raise se.CopyImageError("src image=%s, dst image=%s: msg=%s" %
(srcImgUUID, dstImgUUID, str(e)))
self.log.info("Finished copying %s:%s -> %s:%s", sdUUID,
srcVolUUID, dstSdUUID, dstVolUUID)
#TODO: handle return status
return dstVolUUID
finally:
self.__cleanupCopy(srcVol=srcVol, dstVol=dstVol)
def markIllegalSubChain(self, sdDom, imgUUID, chain):
"""
Mark all volumes in the sub-chain as illegal
"""
if not chain:
raise se.InvalidParameterException("chain", str(chain))
volclass = sdDom.getVolumeClass()
ancestor = chain[0]
successor = chain[-1]
tmpVol = volclass(self.repoPath, sdDom.sdUUID, imgUUID, successor)
dstParent = volclass(self.repoPath, sdDom.sdUUID, imgUUID,
ancestor).getParent()
# Mark all volumes as illegal
while tmpVol and dstParent != tmpVol.volUUID:
vol = tmpVol.getParentVolume()
tmpVol.setLegality(volume.ILLEGAL_VOL)
tmpVol = vol
def __teardownSubChain(self, sdUUID, imgUUID, chain):
"""
Teardown all volumes in the sub-chain
"""
if not chain:
raise se.InvalidParameterException("chain", str(chain))
# Teardown subchain ('ancestor' ->...-> 'successor') volumes
# before they will deleted.
# This subchain include volumes that were merged (rebased)
# into 'successor' and now should be deleted.
# We prepared all these volumes as part of preparing the whole
# chain before rebase, but during rebase we detached all of them from
# the chain and couldn't teardown they properly.
# So, now we must teardown them to release they resources.
volclass = sdCache.produce(sdUUID).getVolumeClass()
ancestor = chain[0]
successor = chain[-1]
srcVol = volclass(self.repoPath, sdUUID, imgUUID, successor)
dstParent = volclass(self.repoPath, sdUUID, imgUUID,
ancestor).getParent()
while srcVol and dstParent != srcVol.volUUID:
try:
self.log.info("Teardown volume %s from image %s",
srcVol.volUUID, imgUUID)
vol = srcVol.getParentVolume()
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID,
justme=True)
srcVol = vol
except Exception:
self.log.info("Failure to teardown volume %s in subchain %s "
"-> %s", srcVol.volUUID, ancestor, successor,
exc_info=True)
def removeSubChain(self, sdDom, imgUUID, chain, postZero):
"""
Remove all volumes in the sub-chain
"""
if not chain:
raise se.InvalidParameterException("chain", str(chain))
volclass = sdDom.getVolumeClass()
ancestor = chain[0]
successor = chain[-1]
srcVol = volclass(self.repoPath, sdDom.sdUUID, imgUUID, successor)
dstParent = volclass(self.repoPath, sdDom.sdUUID, imgUUID,
ancestor).getParent()
while srcVol and dstParent != srcVol.volUUID:
self.log.info("Remove volume %s from image %s", srcVol.volUUID,
imgUUID)
vol = srcVol.getParentVolume()
srcVol.delete(postZero=postZero, force=True)
chain.remove(srcVol.volUUID)
srcVol = vol
def _internalVolumeMerge(self, sdDom, srcVolParams, volParams, newSize,
chain):
"""
Merge internal volume
"""
srcVol = sdDom.produceVolume(imgUUID=srcVolParams['imgUUID'],
volUUID=srcVolParams['volUUID'])
# Extend successor volume to new accumulated subchain size
srcVol.extend(newSize)
srcVol.prepare(rw=True, chainrw=True, setrw=True)
try:
backingVolPath = os.path.join('..', srcVolParams['imgUUID'],
volParams['volUUID'])
srcVol.rebase(volParams['volUUID'], backingVolPath,
volParams['volFormat'], unsafe=False, rollback=True)
finally:
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID)
# Prepare chain for future erase
chain.remove(srcVolParams['volUUID'])
self.__teardownSubChain(sdDom.sdUUID, srcVolParams['imgUUID'], chain)
return chain
def _baseCowVolumeMerge(self, sdDom, srcVolParams, volParams, newSize,
chain):
"""
Merge snapshot with base COW volume
"""
# FIXME!!! In this case we need workaround to rebase successor
# and transform it to be a base volume (without pointing to any backing
# volume). Actually this case should be handled by 'qemu-img rebase'
# (RFE to kvm). At this point we can achieve this result by 4 steps
# procedure:
# Step 1: create temporary empty volume similar to ancestor volume
# Step 2: Rebase (safely) successor volume on top of this temporary
# volume
# Step 3: Rebase (unsafely) successor volume on top of "" (empty
# string)
# Step 4: Delete temporary volume
srcVol = sdDom.produceVolume(imgUUID=srcVolParams['imgUUID'],
volUUID=srcVolParams['volUUID'])
# Extend successor volume to new accumulated subchain size
srcVol.extend(newSize)
# Step 1: Create temporary volume with destination volume's parent
# parameters
newUUID = str(uuid.uuid4())
sdDom.createVolume(
imgUUID=srcVolParams['imgUUID'], size=volParams['size'],
volFormat=volParams['volFormat'], preallocate=volume.SPARSE_VOL,
diskType=volParams['disktype'], volUUID=newUUID,
desc="New base volume", srcImgUUID=volume.BLANK_UUID,
srcVolUUID=volume.BLANK_UUID)
tmpVol = sdDom.produceVolume(imgUUID=srcVolParams['imgUUID'],
volUUID=newUUID)
tmpVol.prepare(rw=True, justme=True, setrw=True)
# We should prepare/teardown volume for every single rebase.
# The reason is recheckIfLeaf at the end of the rebase, that change
# volume permissions to RO for internal volumes.
srcVol.prepare(rw=True, chainrw=True, setrw=True)
try:
# Step 2: Rebase successor on top of tmpVol
# qemu-img rebase -b tmpBackingFile -F backingFormat -f srcFormat
# src
backingVolPath = os.path.join('..', srcVolParams['imgUUID'],
newUUID)
srcVol.rebase(newUUID, backingVolPath, volParams['volFormat'],
unsafe=False, rollback=True)
finally:
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID)
srcVol.prepare(rw=True, chainrw=True, setrw=True)
try:
# Step 3: Remove pointer to backing file from the successor by
# 'unsafed' rebase qemu-img rebase -u -b "" -F
# backingFormat -f srcFormat src
srcVol.rebase(volume.BLANK_UUID, "", volParams['volFormat'],
unsafe=True, rollback=False)
finally:
srcVol.teardown(sdUUID=srcVol.sdUUID, volUUID=srcVol.volUUID)
# Step 4: Delete temporary volume
tmpVol.teardown(sdUUID=tmpVol.sdUUID, volUUID=tmpVol.volUUID,
justme=True)
tmpVol.delete(postZero=False, force=True)
# Prepare chain for future erase
chain.remove(srcVolParams['volUUID'])
self.__teardownSubChain(sdDom.sdUUID, srcVolParams['imgUUID'], chain)
return chain
def _baseRawVolumeMerge(self, sdDom, srcVolParams, volParams, chain):
"""
Merge snapshot with base RAW volume
"""
# In this case we need convert ancestor->successor subchain to new
# volume and rebase successor's children (if exists) on top of it.
# Step 1: Create an empty volume named sucessor_MERGE similar to
# ancestor volume.
# Step 2: qemuConvert successor -> sucessor_MERGE
# Step 3: Rename successor to _remove_me__successor
# Step 4: Rename successor_MERGE to successor
# Step 5: Unsafely rebase successor's children on top of temporary
# volume
srcVol = chain[-1]
with srcVol.scopedPrepare(rw=True, chainrw=True, setrw=True):
# Find out successor's children list
chList = srcVolParams['children']
# Step 1: Create an empty volume named sucessor_MERGE with
# destination volume's parent parameters
newUUID = srcVol.volUUID + "_MERGE"
sdDom.createVolume(
imgUUID=srcVolParams['imgUUID'], size=srcVolParams['size'],
volFormat=volParams['volFormat'],
preallocate=volParams['prealloc'],
diskType=volParams['disktype'], volUUID=newUUID,
desc=srcVolParams['descr'], srcImgUUID=volume.BLANK_UUID,
srcVolUUID=volume.BLANK_UUID)
newVol = sdDom.produceVolume(imgUUID=srcVolParams['imgUUID'],
volUUID=newUUID)
with newVol.scopedPrepare(rw=True, justme=True, setrw=True):
# Step 2: Convert successor to new volume
# qemu-img convert -f qcow2 successor -O raw newUUID
(rc, out, err) = volume.qemuConvert(
srcVolParams['path'], newVol.getVolumePath(),
srcVolParams['volFormat'], volParams['volFormat'],
vars.task.aborting, size=volParams['apparentsize'],
dstvolType=newVol.getType())
if rc:
self.log.error("qemu-img convert failed: rc=%s, out=%s, "
"err=%s", rc, out, err)
raise se.MergeSnapshotsError(newUUID)
if chList:
newVol.setInternal()
# Step 3: Rename successor as to _remove_me__successor
tmpUUID = self.deletedVolumeName(srcVol.volUUID)
srcVol.rename(tmpUUID)
# Step 4: Rename successor_MERGE to successor
newVol.rename(srcVolParams['volUUID'])
# Step 5: Rebase children 'unsafely' on top of new volume
# qemu-img rebase -u -b tmpBackingFile -F backingFormat -f srcFormat
# src
for ch in chList:
ch.prepare(rw=True, chainrw=True, setrw=True, force=True)
backingVolPath = os.path.join('..', srcVolParams['imgUUID'],
srcVolParams['volUUID'])
try:
ch.rebase(srcVolParams['volUUID'], backingVolPath,
volParams['volFormat'], unsafe=True, rollback=True)
finally:
ch.teardown(sdUUID=ch.sdUUID, volUUID=ch.volUUID)
ch.recheckIfLeaf()
# Prepare chain for future erase
rmChain = [vol.volUUID for
vol in chain if vol.volUUID != srcVolParams['volUUID']]
rmChain.append(tmpUUID)
return rmChain
def subChainSizeCalc(self, ancestor, successor, vols):
"""
Do not add additional calls to this function.
TODO:
Should be unified with chainSizeCalc,
but merge should be refactored,
but this file should probably removed.
"""
chain = []
accumulatedChainSize = 0
endVolName = vols[ancestor].getParent() # TemplateVolName or None
currVolName = successor
while (currVolName != endVolName):
chain.insert(0, currVolName)
accumulatedChainSize += vols[currVolName].getVolumeSize()
currVolName = vols[currVolName].getParent()
return accumulatedChainSize, chain
def merge(self, sdUUID, vmUUID, imgUUID, ancestor, successor, postZero):
"""Merge source volume to the destination volume.
'successor' - source volume UUID
'ancestor' - destination volume UUID
"""
self.log.info("sdUUID=%s vmUUID=%s"
" imgUUID=%s ancestor=%s successor=%s postZero=%s",
sdUUID, vmUUID, imgUUID,
ancestor, successor, str(postZero))
sdDom = sdCache.produce(sdUUID)
allVols = sdDom.getAllVolumes()
volsImgs = sd.getVolsOfImage(allVols, imgUUID)
# Since image namespace should be locked is produce all the volumes is
# safe. Producing the (eventual) template is safe also.
# TODO: Split for block and file based volumes for efficiency sake.
vols = {}
for vName in volsImgs.iterkeys():
vols[vName] = sdDom.produceVolume(imgUUID, vName)
srcVol = vols[successor]
srcVolParams = srcVol.getVolumeParams()
srcVolParams['children'] = []
for vName, vol in vols.iteritems():
if vol.getParent() == successor:
srcVolParams['children'].append(vol)
dstVol = vols[ancestor]
dstParentUUID = dstVol.getParent()
if dstParentUUID != sd.BLANK_UUID:
volParams = vols[dstParentUUID].getVolumeParams()
else:
volParams = dstVol.getVolumeParams()
accSize, chain = self.subChainSizeCalc(ancestor, successor, vols)
imageApparentSize = volParams['size']
# allocate %10 more for cow metadata
reqSize = min(accSize, imageApparentSize) * 1.1
try:
# Start the actual merge image procedure
# IMPORTANT NOTE: volumes in the same image chain might have
# different capacity since the introduction of the disk resize
# feature. This means that when we merge volumes the ancestor
# should get the new size from the successor (in order to be
# able to contain the additional data that we are collapsing).
if dstParentUUID != sd.BLANK_UUID:
# The ancestor isn't a base volume of the chain.
self.log.info("Internal volume merge: src = %s dst = %s",
srcVol.getVolumePath(), dstVol.getVolumePath())
chainToRemove = self._internalVolumeMerge(
sdDom, srcVolParams, volParams, reqSize, chain)
# The ancestor is actually a base volume of the chain.
# We have 2 cases here:
# Case 1: ancestor is a COW volume (use 'rebase' workaround)
# Case 2: ancestor is a RAW volume (use 'convert + rebase')
elif volParams['volFormat'] == volume.RAW_FORMAT:
self.log.info("merge with convert: src = %s dst = %s",
srcVol.getVolumePath(), dstVol.getVolumePath())
chainToRemove = self._baseRawVolumeMerge(
sdDom, srcVolParams, volParams,
[vols[vName] for vName in chain])
else:
self.log.info("4 steps merge: src = %s dst = %s",
srcVol.getVolumePath(), dstVol.getVolumePath())
chainToRemove = self._baseCowVolumeMerge(
sdDom, srcVolParams, volParams, reqSize, chain)
# This is unrecoverable point, clear all recoveries
vars.task.clearRecoveries()
# mark all snapshots from 'ancestor' to 'successor' as illegal
self.markIllegalSubChain(sdDom, imgUUID, chainToRemove)
except ActionStopped:
raise
except se.StorageException:
self.log.error("Unexpected error", exc_info=True)
raise
except Exception as e:
self.log.error(e, exc_info=True)
raise se.SourceImageActionError(imgUUID, sdUUID, str(e))
try:
# remove all snapshots from 'ancestor' to 'successor'
self.removeSubChain(sdDom, imgUUID, chainToRemove, postZero)
except Exception:
self.log.error("Failure to remove subchain %s -> %s in image %s",
ancestor, successor, imgUUID, exc_info=True)
newVol = sdDom.produceVolume(imgUUID=srcVolParams['imgUUID'],
volUUID=srcVolParams['volUUID'])
try:
newVol.shrinkToOptimalSize()
except qemuImg.QImgError:
self.log.warning("Auto shrink after merge failed", exc_info=True)
self.log.info("Merge src=%s with dst=%s was successfully finished.",
srcVol.getVolumePath(), dstVol.getVolumePath())
def _activateVolumeForImportExport(self, domain, imgUUID, volUUID=None):
chain = self.getChain(domain.sdUUID, imgUUID, volUUID)
template = chain[0].getParentVolume()
if template or len(chain) > 1:
self.log.error("Importing and exporting an image with more "
"than one volume is not supported")
raise se.CopyImageError()
domain.activateVolumes(imgUUID, volUUIDs=[chain[0].volUUID])
return chain[0]
def upload(self, methodArgs, sdUUID, imgUUID, volUUID=None):
domain = sdCache.produce(sdUUID)
vol = self._activateVolumeForImportExport(domain, imgUUID, volUUID)
try:
imageSharing.upload(vol.getVolumePath(), methodArgs)
finally:
domain.deactivateImage(imgUUID)
def download(self, methodArgs, sdUUID, imgUUID, volUUID=None):
domain = sdCache.produce(sdUUID)
vol = self._activateVolumeForImportExport(domain, imgUUID, volUUID)
try:
# Extend the volume (if relevant) to the image size
vol.extend(imageSharing.getSize(methodArgs) / volume.BLOCK_SIZE)
imageSharing.download(vol.getVolumePath(), methodArgs)
finally:
domain.deactivateImage(imgUUID)
| gpl-2.0 | 51,019,312,970,338,880 | 42.523404 | 79 | 0.557841 | false |
Jumpscale/jumpscale_core8 | lib/JumpScale/clients/graphite/GraphiteClient.py | 1 | 1317 |
from JumpScale import j
import socket
import time
# import urllib.request, urllib.parse, urllib.error
try:
import urllib.request
import urllib.parse
import urllib.error
except:
import urllib.parse as urllib
class GraphiteClient:
def __init__(self):
self.__jslocation__ = "j.clients.graphite"
self._SERVER = '127.0.0.1'
self._CARBON_PORT = 2003
self._GRAPHITE_PORT = 8081
self._url = "http://%s:%s/render" % (self._SERVER, self._GRAPHITE_PORT)
# self.sock.connect((self.CARBON_SERVER, self.CARBON_PORT))
def send(self, msg):
"""
e.g. foo.bar.baz 20
"""
out = ""
for line in msg.split("\n"):
out += '%s %d\n' % (line, int(time.time()))
sock = socket.socket()
sock.connect((self._SERVER, self._CARBON_PORT))
sock.sendall(out)
sock.close()
def close(self):
pass
def query(self, query_=None, **kwargs):
import requests
query = query_.copy() if query_ else dict()
query.update(kwargs)
query['format'] = 'json'
if 'from_' in query:
query['from'] = query.pop('from_')
qs = urllib.parse.urlencode(query)
url = "%s?%s" % (self._url, qs)
return requests.get(url).json()
| apache-2.0 | 1,251,291,274,446,699,500 | 24.823529 | 79 | 0.557327 | false |
fidals/refarm-site | catalog/models_operations.py | 1 | 4426 | import abc
import typing
from django.db.migrations.operations.base import Operation
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
# @todo #283:30m Group models.py, models_operations.py, models_expressions.py into the module.
class IndexSQL(abc.ABC):
def __init__(self, name: str):
self.name = name
def _index_name(self, table: str):
return f'{table}_{self.name}_idx'
@abc.abstractmethod
def execute(self, table: str, schema_editor: BaseDatabaseSchemaEditor):
"""Execute SQL operation."""
class AddedIndex(IndexSQL):
def __init__(self, name: str, columns: typing.List[str]):
super().__init__(name)
self.columns = columns
def execute(self, table, schema_editor):
schema_editor.execute(
f'CREATE INDEX {self._index_name(table)} ON {table}'
f'({", ".join(self.columns)});'
)
class DroppedIndex(IndexSQL):
def execute(self, table, schema_editor):
schema_editor.execute(
f'DROP INDEX {self._index_name(table)};'
)
class IndexOperation(Operation):
"""
Operate an index by given IndexSQL objects.
Docs: https://docs.djangoproject.com/en/1.11/ref/migration-operations/#writing-your-own
"""
reduces_to_sql = True
reversible = True
def __init__(self, model_name, forward: IndexSQL, backward: IndexSQL):
self.model_name = model_name
self.forward = forward
self.backward = backward
def state_forwards(self, app_label, state):
"""We have to implement this method for Operation interface."""
def database_forwards(self, app_label, schema_editor, _, to_state):
to_model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
table_name = to_model._meta.db_table
self.forward.execute(table_name, schema_editor)
def database_backwards(self, app_label, schema_editor, from_state, _):
from_model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, from_model):
table_name = from_model._meta.db_table
self.backward.execute(table_name, schema_editor)
def describe(self):
return f'Operate the index {self.name} for {self.model_name}'
class RevertedOperation(Operation):
reduces_to_sql = True
reversible = True
def __init__(self, operation: IndexOperation):
self.operation = operation
def state_forwards(self, app_label, state):
"""We have to implement this method for Operation interface."""
def database_forwards(self, app_label, schema_editor, from_state, to_state):
self.operation.database_backwards(app_label, schema_editor, from_state, to_state)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.operation.database_forwards(app_label, schema_editor, from_state, to_state)
# Django doesn't provide ability to add hooks to makemigrations.
# So we have to create migration files and add operations for
# abstract classes (like Tag) manually.
class IndexTagAlphanumeric:
ALPHANUMERIC_NAME = 'alphanumeric_name'
MODEL_NAME = 'tag'
def v1(self) -> typing.List[IndexOperation]:
return [IndexOperation(
model_name=self.MODEL_NAME,
forward=AddedIndex(
name=self.ALPHANUMERIC_NAME,
columns=[
"substring(name, '[a-zA-Zа-яА-Я]+')",
"(substring(name, '[0-9]+\.?[0-9]*')::float)",
],
),
backward=DroppedIndex(name=self.ALPHANUMERIC_NAME),
)]
def v2(self) -> typing.List[IndexOperation]:
"""Preserve whitespaces for alphabetic values of the index."""
old = self.v1()[0]
return [
RevertedOperation(old),
IndexOperation(
model_name=self.MODEL_NAME,
forward=AddedIndex(
name=self.ALPHANUMERIC_NAME,
columns=[
"substring(name, '[a-zA-Zа-яА-Я\s\-_,:;]+')",
"(substring(name, '[0-9]+\.?[0-9]*')::float)",
],
),
backward=DroppedIndex(name=self.ALPHANUMERIC_NAME),
),
]
| mit | -4,237,920,755,846,553,000 | 31.970149 | 94 | 0.613173 | false |
LiberatorUSA/GUCEF | dependencies/curl/tests/python_dependencies/impacket/smb.py | 1 | 151624 | # Copyright (c) 2003-2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Copyright (C) 2001 Michael Teo <[email protected]>
# smb.py - SMB/CIFS library
#
# This software is provided 'as-is', without any express or implied warranty.
# In no event will the author be held liable for any damages arising from the
# use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
#
# 3. This notice cannot be removed or altered from any source distribution.
#
# Altered source done by Alberto Solino (@agsolino)
# Todo:
# [ ] Try [SMB]transport fragmentation using Transact requests
# [ ] Try other methods of doing write (write_raw, transact2, write, write_and_unlock, write_and_close, write_mpx)
# [-] Try replacements for SMB_COM_NT_CREATE_ANDX (CREATE, T_TRANSACT_CREATE, OPEN_ANDX works
# [x] Fix forceWriteAndx, which needs to send a RecvRequest, because recv() will not send it
# [x] Fix Recv() when using RecvAndx and the answer comes splet in several packets
# [ ] Try [SMB]transport fragmentation with overlaping segments
# [ ] Try [SMB]transport fragmentation with out of order segments
# [x] Do chained AndX requests
# [ ] Transform the rest of the calls to structure
# [X] Implement TRANS/TRANS2 reassembly for list_path
import os
import socket
import string
from binascii import a2b_hex
import datetime
from struct import pack, unpack
from contextlib import contextmanager
from impacket import nmb, ntlm, nt_errors, LOG
from impacket.structure import Structure
from impacket.spnego import SPNEGO_NegTokenInit, TypesMech, SPNEGO_NegTokenResp
# For signing
import hashlib
unicode_support = 0
unicode_convert = 1
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Dialect for SMB1
SMB_DIALECT = 'NT LM 0.12'
# Shared Device Type
SHARED_DISK = 0x00
SHARED_DISK_HIDDEN = 0x80000000
SHARED_PRINT_QUEUE = 0x01
SHARED_DEVICE = 0x02
SHARED_IPC = 0x03
# Extended attributes mask
ATTR_ARCHIVE = 0x020
ATTR_COMPRESSED = 0x800
ATTR_NORMAL = 0x080
ATTR_HIDDEN = 0x002
ATTR_READONLY = 0x001
ATTR_TEMPORARY = 0x100
ATTR_DIRECTORY = 0x010
ATTR_SYSTEM = 0x004
# Service Type
SERVICE_DISK = 'A:'
SERVICE_PRINTER = 'LPT1:'
SERVICE_IPC = 'IPC'
SERVICE_COMM = 'COMM'
SERVICE_ANY = '?????'
# Server Type (Can be used to mask with SMBMachine.get_type() or SMBDomain.get_type())
SV_TYPE_WORKSTATION = 0x00000001
SV_TYPE_SERVER = 0x00000002
SV_TYPE_SQLSERVER = 0x00000004
SV_TYPE_DOMAIN_CTRL = 0x00000008
SV_TYPE_DOMAIN_BAKCTRL = 0x00000010
SV_TYPE_TIME_SOURCE = 0x00000020
SV_TYPE_AFP = 0x00000040
SV_TYPE_NOVELL = 0x00000080
SV_TYPE_DOMAIN_MEMBER = 0x00000100
SV_TYPE_PRINTQ_SERVER = 0x00000200
SV_TYPE_DIALIN_SERVER = 0x00000400
SV_TYPE_XENIX_SERVER = 0x00000800
SV_TYPE_NT = 0x00001000
SV_TYPE_WFW = 0x00002000
SV_TYPE_SERVER_NT = 0x00004000
SV_TYPE_POTENTIAL_BROWSER = 0x00010000
SV_TYPE_BACKUP_BROWSER = 0x00020000
SV_TYPE_MASTER_BROWSER = 0x00040000
SV_TYPE_DOMAIN_MASTER = 0x00080000
SV_TYPE_LOCAL_LIST_ONLY = 0x40000000
SV_TYPE_DOMAIN_ENUM = 0x80000000
# Options values for SMB.stor_file and SMB.retr_file
SMB_O_CREAT = 0x10 # Create the file if file does not exists. Otherwise, operation fails.
SMB_O_EXCL = 0x00 # When used with SMB_O_CREAT, operation fails if file exists. Cannot be used with SMB_O_OPEN.
SMB_O_OPEN = 0x01 # Open the file if the file exists
SMB_O_TRUNC = 0x02 # Truncate the file if the file exists
# Share Access Mode
SMB_SHARE_COMPAT = 0x00
SMB_SHARE_DENY_EXCL = 0x10
SMB_SHARE_DENY_WRITE = 0x20
SMB_SHARE_DENY_READEXEC = 0x30
SMB_SHARE_DENY_NONE = 0x40
SMB_ACCESS_READ = 0x00
SMB_ACCESS_WRITE = 0x01
SMB_ACCESS_READWRITE = 0x02
SMB_ACCESS_EXEC = 0x03
TRANS_DISCONNECT_TID = 1
TRANS_NO_RESPONSE = 2
STATUS_SUCCESS = 0x00000000
STATUS_LOGON_FAILURE = 0xC000006D
STATUS_LOGON_TYPE_NOT_GRANTED = 0xC000015B
MAX_TFRAG_SIZE = 5840
EVASION_NONE = 0
EVASION_LOW = 1
EVASION_HIGH = 2
EVASION_MAX = 3
RPC_X_BAD_STUB_DATA = 0x6F7
# SMB_FILE_ATTRIBUTES
SMB_FILE_ATTRIBUTE_NORMAL = 0x0000
SMB_FILE_ATTRIBUTE_READONLY = 0x0001
SMB_FILE_ATTRIBUTE_HIDDEN = 0x0002
SMB_FILE_ATTRIBUTE_SYSTEM = 0x0004
SMB_FILE_ATTRIBUTE_VOLUME = 0x0008
SMB_FILE_ATTRIBUTE_DIRECTORY = 0x0010
SMB_FILE_ATTRIBUTE_ARCHIVE = 0x0020
SMB_SEARCH_ATTRIBUTE_READONLY = 0x0100
SMB_SEARCH_ATTRIBUTE_HIDDEN = 0x0200
SMB_SEARCH_ATTRIBUTE_SYSTEM = 0x0400
SMB_SEARCH_ATTRIBUTE_DIRECTORY = 0x1000
SMB_SEARCH_ATTRIBUTE_ARCHIVE = 0x2000
# Session SetupAndX Action flags
SMB_SETUP_GUEST = 0x01
SMB_SETUP_USE_LANMAN_KEY = 0x02
# QUERY_INFORMATION levels
SMB_INFO_ALLOCATION = 0x0001
SMB_INFO_VOLUME = 0x0002
FILE_FS_SIZE_INFORMATION = 0x0003
SMB_QUERY_FS_VOLUME_INFO = 0x0102
SMB_QUERY_FS_SIZE_INFO = 0x0103
SMB_QUERY_FILE_EA_INFO = 0x0103
SMB_QUERY_FS_DEVICE_INFO = 0x0104
SMB_QUERY_FS_ATTRIBUTE_INFO = 0x0105
SMB_QUERY_FILE_BASIC_INFO = 0x0101
SMB_QUERY_FILE_STANDARD_INFO = 0x0102
SMB_QUERY_FILE_ALL_INFO = 0x0107
FILE_FS_FULL_SIZE_INFORMATION = 0x03EF
# SET_INFORMATION levels
SMB_SET_FILE_DISPOSITION_INFO = 0x0102
SMB_SET_FILE_BASIC_INFO = 0x0101
SMB_SET_FILE_END_OF_FILE_INFO = 0x0104
# File System Attributes
FILE_CASE_SENSITIVE_SEARCH = 0x00000001
FILE_CASE_PRESERVED_NAMES = 0x00000002
FILE_UNICODE_ON_DISK = 0x00000004
FILE_PERSISTENT_ACLS = 0x00000008
FILE_FILE_COMPRESSION = 0x00000010
FILE_VOLUME_IS_COMPRESSED = 0x00008000
# FIND_FIRST2 flags and levels
SMB_FIND_CLOSE_AFTER_REQUEST = 0x0001
SMB_FIND_CLOSE_AT_EOS = 0x0002
SMB_FIND_RETURN_RESUME_KEYS = 0x0004
SMB_FIND_CONTINUE_FROM_LAST = 0x0008
SMB_FIND_WITH_BACKUP_INTENT = 0x0010
FILE_DIRECTORY_FILE = 0x00000001
FILE_DELETE_ON_CLOSE = 0x00001000
FILE_NON_DIRECTORY_FILE = 0x00000040
SMB_FIND_INFO_STANDARD = 0x0001
SMB_FIND_FILE_DIRECTORY_INFO = 0x0101
SMB_FIND_FILE_FULL_DIRECTORY_INFO= 0x0102
SMB_FIND_FILE_NAMES_INFO = 0x0103
SMB_FIND_FILE_BOTH_DIRECTORY_INFO= 0x0104
SMB_FIND_FILE_ID_FULL_DIRECTORY_INFO = 0x105
SMB_FIND_FILE_ID_BOTH_DIRECTORY_INFO = 0x106
# DesiredAccess flags
FILE_READ_DATA = 0x00000001
FILE_WRITE_DATA = 0x00000002
FILE_APPEND_DATA = 0x00000004
FILE_EXECUTE = 0x00000020
MAXIMUM_ALLOWED = 0x02000000
GENERIC_ALL = 0x10000000
GENERIC_EXECUTE = 0x20000000
GENERIC_WRITE = 0x40000000
GENERIC_READ = 0x80000000
# ShareAccess flags
FILE_SHARE_NONE = 0x00000000
FILE_SHARE_READ = 0x00000001
FILE_SHARE_WRITE = 0x00000002
FILE_SHARE_DELETE = 0x00000004
# CreateDisposition flags
FILE_SUPERSEDE = 0x00000000
FILE_OPEN = 0x00000001
FILE_CREATE = 0x00000002
FILE_OPEN_IF = 0x00000003
FILE_OVERWRITE = 0x00000004
FILE_OVERWRITE_IF = 0x00000005
def strerror(errclass, errcode):
if errclass == 0x01:
return 'OS error', ERRDOS.get(errcode, 'Unknown error')
elif errclass == 0x02:
return 'Server error', ERRSRV.get(errcode, 'Unknown error')
elif errclass == 0x03:
return 'Hardware error', ERRHRD.get(errcode, 'Unknown error')
# This is not a standard error class for SMB
#elif errclass == 0x80:
# return 'Browse error', ERRBROWSE.get(errcode, 'Unknown error')
elif errclass == 0xff:
return 'Bad command', 'Bad command. Please file bug report'
else:
return 'Unknown error', 'Unknown error'
# Raised when an error has occured during a session
class SessionError(Exception):
# SMB X/Open error codes for the ERRDOS error class
ERRsuccess = 0
ERRbadfunc = 1
ERRbadfile = 2
ERRbadpath = 3
ERRnofids = 4
ERRnoaccess = 5
ERRbadfid = 6
ERRbadmcb = 7
ERRnomem = 8
ERRbadmem = 9
ERRbadenv = 10
ERRbadaccess = 12
ERRbaddata = 13
ERRres = 14
ERRbaddrive = 15
ERRremcd = 16
ERRdiffdevice = 17
ERRnofiles = 18
ERRgeneral = 31
ERRbadshare = 32
ERRlock = 33
ERRunsup = 50
ERRnetnamedel = 64
ERRnosuchshare = 67
ERRfilexists = 80
ERRinvalidparam = 87
ERRcannotopen = 110
ERRinsufficientbuffer = 122
ERRinvalidname = 123
ERRunknownlevel = 124
ERRnotlocked = 158
ERRrename = 183
ERRbadpipe = 230
ERRpipebusy = 231
ERRpipeclosing = 232
ERRnotconnected = 233
ERRmoredata = 234
ERRnomoreitems = 259
ERRbaddirectory = 267
ERReasnotsupported = 282
ERRlogonfailure = 1326
ERRbuftoosmall = 2123
ERRunknownipc = 2142
ERRnosuchprintjob = 2151
ERRinvgroup = 2455
# here's a special one from observing NT
ERRnoipc = 66
# These errors seem to be only returned by the NT printer driver system
ERRdriveralreadyinstalled = 1795
ERRunknownprinterport = 1796
ERRunknownprinterdriver = 1797
ERRunknownprintprocessor = 1798
ERRinvalidseparatorfile = 1799
ERRinvalidjobpriority = 1800
ERRinvalidprintername = 1801
ERRprinteralreadyexists = 1802
ERRinvalidprintercommand = 1803
ERRinvaliddatatype = 1804
ERRinvalidenvironment = 1805
ERRunknownprintmonitor = 3000
ERRprinterdriverinuse = 3001
ERRspoolfilenotfound = 3002
ERRnostartdoc = 3003
ERRnoaddjob = 3004
ERRprintprocessoralreadyinstalled = 3005
ERRprintmonitoralreadyinstalled = 3006
ERRinvalidprintmonitor = 3007
ERRprintmonitorinuse = 3008
ERRprinterhasjobsqueued = 3009
# Error codes for the ERRSRV class
ERRerror = 1
ERRbadpw = 2
ERRbadtype = 3
ERRaccess = 4
ERRinvnid = 5
ERRinvnetname = 6
ERRinvdevice = 7
ERRqfull = 49
ERRqtoobig = 50
ERRinvpfid = 52
ERRsmbcmd = 64
ERRsrverror = 65
ERRfilespecs = 67
ERRbadlink = 68
ERRbadpermits = 69
ERRbadpid = 70
ERRsetattrmode = 71
ERRpaused = 81
ERRmsgoff = 82
ERRnoroom = 83
ERRrmuns = 87
ERRtimeout = 88
ERRnoresource = 89
ERRtoomanyuids = 90
ERRbaduid = 91
ERRuseMPX = 250
ERRuseSTD = 251
ERRcontMPX = 252
ERRbadPW = None
ERRnosupport = 0
ERRunknownsmb = 22
# Error codes for the ERRHRD class
ERRnowrite = 19
ERRbadunit = 20
ERRnotready = 21
ERRbadcmd = 22
ERRdata = 23
ERRbadreq = 24
ERRseek = 25
ERRbadmedia = 26
ERRbadsector = 27
ERRnopaper = 28
ERRwrite = 29
ERRread = 30
ERRwrongdisk = 34
ERRFCBunavail = 35
ERRsharebufexc = 36
ERRdiskfull = 39
hard_msgs = {
19: ("ERRnowrite", "Attempt to write on write-protected diskette."),
20: ("ERRbadunit", "Unknown unit."),
21: ("ERRnotready", "Drive not ready."),
22: ("ERRbadcmd", "Unknown command."),
23: ("ERRdata", "Data error (CRC)."),
24: ("ERRbadreq", "Bad request structure length."),
25: ("ERRseek", "Seek error."),
26: ("ERRbadmedia", "Unknown media type."),
27: ("ERRbadsector", "Sector not found."),
28: ("ERRnopaper", "Printer out of paper."),
29: ("ERRwrite", "Write fault."),
30: ("ERRread", "Read fault."),
31: ("ERRgeneral", "General failure."),
32: ("ERRbadshare", "An open conflicts with an existing open."),
33: ("ERRlock", "A Lock request conflicted with an existing lock or specified an invalid mode, or an Unlock requested attempted to remove a lock held by another process."),
34: ("ERRwrongdisk", "The wrong disk was found in a drive."),
35: ("ERRFCBUnavail", "No FCBs are available to process request."),
36: ("ERRsharebufexc", "A sharing buffer has been exceeded.")
}
dos_msgs = {
ERRbadfunc: ("ERRbadfunc", "Invalid function."),
ERRbadfile: ("ERRbadfile", "File not found."),
ERRbadpath: ("ERRbadpath", "Directory invalid."),
ERRnofids: ("ERRnofids", "No file descriptors available"),
ERRnoaccess: ("ERRnoaccess", "Access denied."),
ERRbadfid: ("ERRbadfid", "Invalid file handle."),
ERRbadmcb: ("ERRbadmcb", "Memory control blocks destroyed."),
ERRnomem: ("ERRnomem", "Insufficient server memory to perform the requested function."),
ERRbadmem: ("ERRbadmem", "Invalid memory block address."),
ERRbadenv: ("ERRbadenv", "Invalid environment."),
11: ("ERRbadformat", "Invalid format."),
ERRbadaccess: ("ERRbadaccess", "Invalid open mode."),
ERRbaddata: ("ERRbaddata", "Invalid data."),
ERRres: ("ERRres", "reserved."),
ERRbaddrive: ("ERRbaddrive", "Invalid drive specified."),
ERRremcd: ("ERRremcd", "A Delete Directory request attempted to remove the server's current directory."),
ERRdiffdevice: ("ERRdiffdevice", "Not same device."),
ERRnofiles: ("ERRnofiles", "A File Search command can find no more files matching the specified criteria."),
ERRbadshare: ("ERRbadshare", "The sharing mode specified for an Open conflicts with existing FIDs on the file."),
ERRlock: ("ERRlock", "A Lock request conflicted with an existing lock or specified an invalid mode, or an Unlock requested attempted to remove a lock held by another process."),
ERRunsup: ("ERRunsup", "The operation is unsupported"),
ERRnosuchshare: ("ERRnosuchshare", "You specified an invalid share name"),
ERRfilexists: ("ERRfilexists", "The file named in a Create Directory, Make New File or Link request already exists."),
ERRinvalidname: ("ERRinvalidname", "Invalid name"),
ERRbadpipe: ("ERRbadpipe", "Pipe invalid."),
ERRpipebusy: ("ERRpipebusy", "All instances of the requested pipe are busy."),
ERRpipeclosing: ("ERRpipeclosing", "Pipe close in progress."),
ERRnotconnected: ("ERRnotconnected", "No process on other end of pipe."),
ERRmoredata: ("ERRmoredata", "There is more data to be returned."),
ERRinvgroup: ("ERRinvgroup", "Invalid workgroup (try the -W option)"),
ERRlogonfailure: ("ERRlogonfailure", "Logon failure"),
ERRdiskfull: ("ERRdiskfull", "Disk full"),
ERRgeneral: ("ERRgeneral", "General failure"),
ERRunknownlevel: ("ERRunknownlevel", "Unknown info level")
}
server_msgs = {
1: ("ERRerror", "Non-specific error code."),
2: ("ERRbadpw", "Bad password - name/password pair in a Tree Connect or Session Setup are invalid."),
3: ("ERRbadtype", "reserved."),
4: ("ERRaccess", "The requester does not have the necessary access rights within the specified context for the requested function. The context is defined by the TID or the UID."),
5: ("ERRinvnid", "The tree ID (TID) specified in a command was invalid."),
6: ("ERRinvnetname", "Invalid network name in tree connect."),
7: ("ERRinvdevice", "Invalid device - printer request made to non-printer connection or non-printer request made to printer connection."),
49: ("ERRqfull", "Print queue full (files) -- returned by open print file."),
50: ("ERRqtoobig", "Print queue full -- no space."),
51: ("ERRqeof", "EOF on print queue dump."),
52: ("ERRinvpfid", "Invalid print file FID."),
64: ("ERRsmbcmd", "The server did not recognize the command received."),
65: ("ERRsrverror","The server encountered an internal error, e.g., system file unavailable."),
67: ("ERRfilespecs", "The file handle (FID) and pathname parameters contained an invalid combination of values."),
68: ("ERRreserved", "reserved."),
69: ("ERRbadpermits", "The access permissions specified for a file or directory are not a valid combination. The server cannot set the requested attribute."),
70: ("ERRreserved", "reserved."),
71: ("ERRsetattrmode", "The attribute mode in the Set File Attribute request is invalid."),
81: ("ERRpaused", "Server is paused."),
82: ("ERRmsgoff", "Not receiving messages."),
83: ("ERRnoroom", "No room to buffer message."),
87: ("ERRrmuns", "Too many remote user names."),
88: ("ERRtimeout", "Operation timed out."),
89: ("ERRnoresource", "No resources currently available for request."),
90: ("ERRtoomanyuids", "Too many UIDs active on this session."),
91: ("ERRbaduid", "The UID is not known as a valid ID on this session."),
250: ("ERRusempx","Temp unable to support Raw, use MPX mode."),
251: ("ERRusestd","Temp unable to support Raw, use standard read/write."),
252: ("ERRcontmpx", "Continue in MPX mode."),
253: ("ERRreserved", "reserved."),
254: ("ERRreserved", "reserved."),
0xFFFF: ("ERRnosupport", "Function not supported.")
}
# Error clases
ERRDOS = 0x1
error_classes = { 0: ("SUCCESS", {}),
ERRDOS: ("ERRDOS", dos_msgs),
0x02: ("ERRSRV",server_msgs),
0x03: ("ERRHRD",hard_msgs),
0x04: ("ERRXOS", {} ),
0xE1: ("ERRRMX1", {} ),
0xE2: ("ERRRMX2", {} ),
0xE3: ("ERRRMX3", {} ),
0xFF: ("ERRCMD", {} ) }
def __init__( self, error_string, error_class, error_code, nt_status = 0):
Exception.__init__(self, error_string)
self.nt_status = nt_status
self._args = error_string
if nt_status:
self.error_class = 0
self.error_code = (error_code << 16) + error_class
else:
self.error_class = error_class
self.error_code = error_code
def get_error_class( self ):
return self.error_class
def get_error_code( self ):
return self.error_code
def __str__( self ):
error_class = SessionError.error_classes.get( self.error_class, None )
if not error_class:
error_code_str = self.error_code
error_class_str = self.error_class
else:
error_class_str = error_class[0]
error_code = error_class[1].get( self.error_code, None )
if not error_code:
error_code_str = self.error_code
else:
error_code_str = '%s(%s)' % error_code
if self.nt_status:
return 'SMB SessionError: %s(%s)' % nt_errors.ERROR_MESSAGES[self.error_code]
else:
# Fall back to the old format
return 'SMB SessionError: class: %s, code: %s' % (error_class_str, error_code_str)
# Raised when an supported feature is present/required in the protocol but is not
# currently supported by pysmb
class UnsupportedFeature(Exception): pass
# Contains information about a SMB shared device/service
class SharedDevice:
def __init__(self, name, share_type, comment):
self.__name = name
self.__type = share_type
self.__comment = comment
def get_name(self):
return self.__name
def get_type(self):
return self.__type
def get_comment(self):
return self.__comment
def __repr__(self):
return '<SharedDevice instance: name=' + self.__name + ', type=' + str(self.__type) + ', comment="' + self.__comment + '">'
# Contains information about the shared file/directory
class SharedFile:
def __init__(self, ctime, atime, mtime, filesize, allocsize, attribs, shortname, longname):
self.__ctime = ctime
self.__atime = atime
self.__mtime = mtime
self.__filesize = filesize
self.__allocsize = allocsize
self.__attribs = attribs
try:
self.__shortname = shortname[:string.index(shortname, '\0')]
except ValueError:
self.__shortname = shortname
try:
self.__longname = longname[:string.index(longname, '\0')]
except ValueError:
self.__longname = longname
def get_ctime(self):
return self.__ctime
def get_ctime_epoch(self):
return self.__convert_smbtime(self.__ctime)
def get_mtime(self):
return self.__mtime
def get_mtime_epoch(self):
return self.__convert_smbtime(self.__mtime)
def get_atime(self):
return self.__atime
def get_atime_epoch(self):
return self.__convert_smbtime(self.__atime)
def get_filesize(self):
return self.__filesize
def get_allocsize(self):
return self.__allocsize
def get_attributes(self):
return self.__attribs
def is_archive(self):
return self.__attribs & ATTR_ARCHIVE
def is_compressed(self):
return self.__attribs & ATTR_COMPRESSED
def is_normal(self):
return self.__attribs & ATTR_NORMAL
def is_hidden(self):
return self.__attribs & ATTR_HIDDEN
def is_readonly(self):
return self.__attribs & ATTR_READONLY
def is_temporary(self):
return self.__attribs & ATTR_TEMPORARY
def is_directory(self):
return self.__attribs & ATTR_DIRECTORY
def is_system(self):
return self.__attribs & ATTR_SYSTEM
def get_shortname(self):
return self.__shortname
def get_longname(self):
return self.__longname
def __repr__(self):
return '<SharedFile instance: shortname="' + self.__shortname + '", longname="' + self.__longname + '", filesize=' + str(self.__filesize) + '>'
@staticmethod
def __convert_smbtime(t):
x = t >> 32
y = t & 0xffffffff
geo_cal_offset = 11644473600.0 # = 369.0 * 365.25 * 24 * 60 * 60 - (3.0 * 24 * 60 * 60 + 6.0 * 60 * 60)
return (x * 4.0 * (1 << 30) + (y & 0xfff00000)) * 1.0e-7 - geo_cal_offset
# Contain information about a SMB machine
class SMBMachine:
def __init__(self, nbname, nbt_type, comment):
self.__nbname = nbname
self.__type = nbt_type
self.__comment = comment
def __repr__(self):
return '<SMBMachine instance: nbname="' + self.__nbname + '", type=' + hex(self.__type) + ', comment="' + self.__comment + '">'
class SMBDomain:
def __init__(self, nbgroup, domain_type, master_browser):
self.__nbgroup = nbgroup
self.__type = domain_type
self.__master_browser = master_browser
def __repr__(self):
return '<SMBDomain instance: nbgroup="' + self.__nbgroup + '", type=' + hex(self.__type) + ', master browser="' + self.__master_browser + '">'
# Represents a SMB Packet
class NewSMBPacket(Structure):
structure = (
('Signature', '"\xffSMB'),
('Command','B=0'),
('ErrorClass','B=0'),
('_reserved','B=0'),
('ErrorCode','<H=0'),
('Flags1','B=0'),
('Flags2','<H=0'),
('PIDHigh','<H=0'),
('SecurityFeatures','8s=""'),
('Reserved','<H=0'),
('Tid','<H=0xffff'),
('Pid','<H=0'),
('Uid','<H=0'),
('Mid','<H=0'),
('Data','*:'),
)
def __init__(self, **kargs):
Structure.__init__(self, **kargs)
if ('Flags2' in self.fields) is False:
self['Flags2'] = 0
if ('Flags1' in self.fields) is False:
self['Flags1'] = 0
if 'data' not in kargs:
self['Data'] = []
def addCommand(self, command):
if len(self['Data']) == 0:
self['Command'] = command.command
else:
self['Data'][-1]['Parameters']['AndXCommand'] = command.command
self['Data'][-1]['Parameters']['AndXOffset'] = len(self)
self['Data'].append(command)
def isMoreData(self):
return (self['Command'] in [SMB.SMB_COM_TRANSACTION, SMB.SMB_COM_READ_ANDX, SMB.SMB_COM_READ_RAW] and
self['ErrorClass'] == 1 and self['ErrorCode'] == SessionError.ERRmoredata)
def isMoreProcessingRequired(self):
return self['ErrorClass'] == 0x16 and self['ErrorCode'] == 0xc000
def isValidAnswer(self, cmd):
# this was inside a loop reading more from the net (with recv_packet(None))
if self['Command'] == cmd:
if (self['ErrorClass'] == 0x00 and
self['ErrorCode'] == 0x00):
return 1
elif self.isMoreData():
return 1
elif self.isMoreProcessingRequired():
return 1
raise SessionError("SMB Library Error", self['ErrorClass'] + (self['_reserved'] << 8), self['ErrorCode'], self['Flags2'] & SMB.FLAGS2_NT_STATUS)
else:
raise UnsupportedFeature("Unexpected answer from server: Got %d, Expected %d" % (self['Command'], cmd))
class SMBCommand(Structure):
structure = (
('WordCount', 'B=len(Parameters)/2'),
('_ParametersLength','_-Parameters','WordCount*2'),
('Parameters',':'), # default set by constructor
('ByteCount','<H-Data'),
('Data',':'), # default set by constructor
)
def __init__(self, commandOrData = None, data = None, **kargs):
if type(commandOrData) == type(0):
self.command = commandOrData
else:
data = data or commandOrData
Structure.__init__(self, data = data, **kargs)
if data is None:
self['Parameters'] = ''
self['Data'] = ''
class AsciiOrUnicodeStructure(Structure):
UnicodeStructure = ()
AsciiStructure = ()
def __init__(self, flags = 0, **kargs):
if flags & SMB.FLAGS2_UNICODE:
self.structure = self.UnicodeStructure
else:
self.structure = self.AsciiStructure
Structure.__init__(self, **kargs)
class SMBCommand_Parameters(Structure):
pass
class SMBAndXCommand_Parameters(Structure):
commonHdr = (
('AndXCommand','B=0xff'),
('_reserved','B=0'),
('AndXOffset','<H=0'),
)
structure = ( # default structure, overriden by subclasses
('Data',':=""'),
)
############# TRANSACTIONS RELATED
# TRANS2_QUERY_FS_INFORMATION
# QUERY_FS Information Levels
# SMB_QUERY_FS_ATTRIBUTE_INFO
class SMBQueryFsAttributeInfo(Structure):
structure = (
('FileSystemAttributes','<L'),
('MaxFilenNameLengthInBytes','<L'),
('LengthOfFileSystemName','<L-FileSystemName'),
('FileSystemName',':'),
)
class SMBQueryFsInfoVolume(AsciiOrUnicodeStructure):
commonHdr = (
('ulVolSerialNbr','<L=0xABCDEFAA'),
('cCharCount','<B-VolumeLabel'),
)
AsciiStructure = (
('VolumeLabel','z'),
)
UnicodeStructure = (
('VolumeLabel','u'),
)
# FILE_FS_SIZE_INFORMATION
class FileFsSizeInformation(Structure):
structure = (
('TotalAllocationUnits','<q=148529400'),
('AvailableAllocationUnits','<q=14851044'),
('SectorsPerAllocationUnit','<L=2'),
('BytesPerSector','<L=512'),
)
# SMB_QUERY_FS_SIZE_INFO
class SMBQueryFsSizeInfo(Structure):
structure = (
('TotalAllocationUnits','<q=148529400'),
('TotalFreeAllocationUnits','<q=14851044'),
('SectorsPerAllocationUnit','<L=2'),
('BytesPerSector','<L=512'),
)
# FILE_FS_FULL_SIZE_INFORMATION
class SMBFileFsFullSizeInformation(Structure):
structure = (
('TotalAllocationUnits','<q=148529400'),
('CallerAvailableAllocationUnits','<q=148529400'),
('ActualAvailableAllocationUnits','<q=148529400'),
('SectorsPerAllocationUnit','<L=15'),
('BytesPerSector','<L=512')
)
# SMB_QUERY_FS_VOLUME_INFO
class SMBQueryFsVolumeInfo(Structure):
structure = (
('VolumeCreationTime','<q'),
('SerialNumber','<L=0xABCDEFAA'),
('VolumeLabelSize','<L=len(VolumeLabel)'),
('Reserved','<H=0x10'),
('VolumeLabel',':')
)
# SMB_FIND_FILE_BOTH_DIRECTORY_INFO level
class SMBFindFileBothDirectoryInfo(AsciiOrUnicodeStructure):
commonHdr = (
('NextEntryOffset','<L=0'),
('FileIndex','<L=0'),
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('EndOfFile','<q=0'),
('AllocationSize','<q=0'),
('ExtFileAttributes','<L=0'),
)
AsciiStructure = (
('FileNameLength','<L-FileName','len(FileName)'),
('EaSize','<L=0'),
('ShortNameLength','<B=0'),
('Reserved','<B=0'),
('ShortName','24s'),
('FileName',':'),
)
UnicodeStructure = (
('FileNameLength','<L-FileName','len(FileName)*2'),
('EaSize','<L=0'),
('ShortNameLength','<B=0'),
('Reserved','<B=0'),
('ShortName','24s'),
('FileName',':'),
)
# SMB_FIND_FILE_ID_FULL_DIRECTORY_INFO level
class SMBFindFileIdFullDirectoryInfo(AsciiOrUnicodeStructure):
commonHdr = (
('NextEntryOffset','<L=0'),
('FileIndex','<L=0'),
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('EndOfFile','<q=0'),
('AllocationSize','<q=0'),
('ExtFileAttributes','<L=0'),
)
AsciiStructure = (
('FileNameLength','<L-FileName','len(FileName)'),
('EaSize','<L=0'),
('FileID','<q=0'),
('FileName',':'),
)
UnicodeStructure = (
('FileNameLength','<L-FileName','len(FileName)*2'),
('EaSize','<L=0'),
('FileID','<q=0'),
('FileName',':'),
)
# SMB_FIND_FILE_ID_BOTH_DIRECTORY_INFO level
class SMBFindFileIdBothDirectoryInfo(AsciiOrUnicodeStructure):
commonHdr = (
('NextEntryOffset','<L=0'),
('FileIndex','<L=0'),
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('EndOfFile','<q=0'),
('AllocationSize','<q=0'),
('ExtFileAttributes','<L=0'),
)
AsciiStructure = (
('FileNameLength','<L-FileName','len(FileName)'),
('EaSize','<L=0'),
('ShortNameLength','<B=0'),
('Reserved','<B=0'),
('ShortName','24s'),
('Reserved','<H=0'),
('FileID','<q=0'),
('FileName','z'),
)
UnicodeStructure = (
('FileNameLength','<L-FileName','len(FileName)*2'),
('EaSize','<L=0'),
('ShortNameLength','<B=0'),
('Reserved','<B=0'),
('ShortName','24s'),
('Reserved','<H=0'),
('FileID','<q=0'),
('FileName',':'),
)
# SMB_FIND_FILE_DIRECTORY_INFO level
class SMBFindFileDirectoryInfo(AsciiOrUnicodeStructure):
commonHdr = (
('NextEntryOffset','<L=0'),
('FileIndex','<L=0'),
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('EndOfFile','<q=0'),
('AllocationSize','<q=1'),
('ExtFileAttributes','<L=0'),
)
AsciiStructure = (
('FileNameLength','<L-FileName','len(FileName)'),
('FileName','z'),
)
UnicodeStructure = (
('FileNameLength','<L-FileName','len(FileName)*2'),
('FileName',':'),
)
# SMB_FIND_FILE_NAMES_INFO level
class SMBFindFileNamesInfo(AsciiOrUnicodeStructure):
commonHdr = (
('NextEntryOffset','<L=0'),
('FileIndex','<L=0'),
)
AsciiStructure = (
('FileNameLength','<L-FileName','len(FileName)'),
('FileName','z'),
)
UnicodeStructure = (
('FileNameLength','<L-FileName','len(FileName)*2'),
('FileName',':'),
)
# SMB_FIND_FILE_FULL_DIRECTORY_INFO level
class SMBFindFileFullDirectoryInfo(AsciiOrUnicodeStructure):
commonHdr = (
('NextEntryOffset','<L=0'),
('FileIndex','<L=0'),
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('EndOfFile','<q=0'),
('AllocationSize','<q=1'),
('ExtFileAttributes','<L=0'),
)
AsciiStructure = (
('FileNameLength','<L-FileName','len(FileName)'),
('EaSize','<L'),
('FileName','z'),
)
UnicodeStructure = (
('FileNameLength','<L-FileName','len(FileName)*2'),
('EaSize','<L'),
('FileName',':'),
)
# SMB_FIND_INFO_STANDARD level
class SMBFindInfoStandard(AsciiOrUnicodeStructure):
commonHdr = (
('ResumeKey','<L=0xff'),
('CreationDate','<H=0'),
('CreationTime','<H=0'),
('LastAccessDate','<H=0'),
('LastAccessTime','<H=0'),
('LastWriteDate','<H=0'),
('LastWriteTime','<H=0'),
('EaSize','<L'),
('AllocationSize','<L=1'),
('ExtFileAttributes','<H=0'),
)
AsciiStructure = (
('FileNameLength','<B-FileName','len(FileName)'),
('FileName','z'),
)
UnicodeStructure = (
('FileNameLength','<B-FileName','len(FileName)*2'),
('FileName',':'),
)
# SET_FILE_INFORMATION structures
# SMB_SET_FILE_DISPOSITION_INFO
class SMBSetFileDispositionInfo(Structure):
structure = (
('DeletePending','<B'),
)
# SMB_SET_FILE_BASIC_INFO
class SMBSetFileBasicInfo(Structure):
structure = (
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('ChangeTime','<q'),
('ExtFileAttributes','<H'),
('Reserved','<L'),
)
# FILE_STREAM_INFORMATION
class SMBFileStreamInformation(Structure):
commonHdr = (
('NextEntryOffset','<L=0'),
('StreamNameLength','<L=0'),
('StreamSize','<q=0'),
('StreamAllocationSize','<q=0'),
('StreamName',':=""'),
)
# FILE_NETWORK_OPEN_INFORMATION
class SMBFileNetworkOpenInfo(Structure):
structure = (
('CreationTime','<q=0'),
('LastAccessTime','<q=0'),
('LastWriteTime','<q=0'),
('ChangeTime','<q=0'),
('AllocationSize','<q=0'),
('EndOfFile','<q=0'),
('FileAttributes','<L=0'),
('Reserved','<L=0'),
)
# SMB_SET_FILE_END_OF_FILE_INFO
class SMBSetFileEndOfFileInfo(Structure):
structure = (
('EndOfFile','<q'),
)
# TRANS2_FIND_NEXT2
class SMBFindNext2_Parameters(AsciiOrUnicodeStructure):
commonHdr = (
('SID','<H'),
('SearchCount','<H'),
('InformationLevel','<H'),
('ResumeKey','<L'),
('Flags','<H'),
)
AsciiStructure = (
('FileName','z'),
)
UnicodeStructure = (
('FileName','u'),
)
class SMBFindNext2Response_Parameters(Structure):
structure = (
('SearchCount','<H'),
('EndOfSearch','<H=1'),
('EaErrorOffset','<H=0'),
('LastNameOffset','<H=0'),
)
class SMBFindNext2_Data(Structure):
structure = (
('GetExtendedAttributesListLength','_-GetExtendedAttributesList', 'self["GetExtendedAttributesListLength"]'),
('GetExtendedAttributesList',':'),
)
# TRANS2_FIND_FIRST2
class SMBFindFirst2Response_Parameters(Structure):
structure = (
('SID','<H'),
('SearchCount','<H'),
('EndOfSearch','<H=1'),
('EaErrorOffset','<H=0'),
('LastNameOffset','<H=0'),
)
class SMBFindFirst2_Parameters(AsciiOrUnicodeStructure):
commonHdr = (
('SearchAttributes','<H'),
('SearchCount','<H'),
('Flags','<H'),
('InformationLevel','<H'),
('SearchStorageType','<L'),
)
AsciiStructure = (
('FileName','z'),
)
UnicodeStructure = (
('FileName','u'),
)
class SMBFindFirst2_Data(Structure):
structure = (
('GetExtendedAttributesListLength','_-GetExtendedAttributesList', 'self["GetExtendedAttributesListLength"]'),
('GetExtendedAttributesList',':'),
)
# TRANS2_SET_PATH_INFORMATION
class SMBSetPathInformation_Parameters(AsciiOrUnicodeStructure):
commonHdr = (
('InformationLevel','<H'),
('Reserved','<L'),
)
AsciiStructure = (
('FileName','z'),
)
UnicodeStructure = (
('FileName','u'),
)
class SMBSetPathInformationResponse_Parameters(Structure):
structure = (
('EaErrorOffset','<H=0'),
)
# TRANS2_SET_FILE_INFORMATION
class SMBSetFileInformation_Parameters(Structure):
structure = (
('FID','<H'),
('InformationLevel','<H'),
('Reserved','<H'),
)
class SMBSetFileInformationResponse_Parameters(Structure):
structure = (
('EaErrorOffset','<H=0'),
)
# TRANS2_QUERY_FILE_INFORMATION
class SMBQueryFileInformation_Parameters(Structure):
structure = (
('FID','<H'),
('InformationLevel','<H'),
)
class SMBQueryFileInformationResponse_Parameters(Structure):
structure = (
('EaErrorOffset','<H=0'),
)
class SMBQueryFileInformation_Data(Structure):
structure = (
('GetExtendedAttributeList',':'),
)
# TRANS2_QUERY_PATH_INFORMATION
class SMBQueryPathInformationResponse_Parameters(Structure):
structure = (
('EaErrorOffset','<H=0'),
)
class SMBQueryPathInformation_Parameters(AsciiOrUnicodeStructure):
commonHdr = (
('InformationLevel','<H'),
('Reserved','<L=0'),
)
AsciiStructure = (
('FileName','z'),
)
UnicodeStructure = (
('FileName','u'),
)
class SMBQueryPathInformation_Data(Structure):
structure = (
('GetExtendedAttributeList',':'),
)
# SMB_QUERY_FILE_EA_INFO
class SMBQueryFileEaInfo(Structure):
structure = (
('EaSize','<L=0'),
)
# SMB_QUERY_FILE_BASIC_INFO
class SMBQueryFileBasicInfo(Structure):
structure = (
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('ExtFileAttributes','<L'),
#('Reserved','<L=0'),
)
# SMB_QUERY_FILE_STANDARD_INFO
class SMBQueryFileStandardInfo(Structure):
structure = (
('AllocationSize','<q'),
('EndOfFile','<q'),
('NumberOfLinks','<L=0'),
('DeletePending','<B=0'),
('Directory','<B'),
)
# SMB_QUERY_FILE_ALL_INFO
class SMBQueryFileAllInfo(Structure):
structure = (
('CreationTime','<q'),
('LastAccessTime','<q'),
('LastWriteTime','<q'),
('LastChangeTime','<q'),
('ExtFileAttributes','<L'),
('Reserved','<L=0'),
('AllocationSize','<q'),
('EndOfFile','<q'),
('NumberOfLinks','<L=0'),
('DeletePending','<B=0'),
('Directory','<B'),
('Reserved','<H=0'),
('EaSize','<L=0'),
('FileNameLength','<L-FileName','len(FileName)'),
('FileName',':'),
)
# \PIPE\LANMAN NetShareEnum
class SMBNetShareEnum(Structure):
structure = (
('RAPOpcode','<H=0'),
('ParamDesc','z'),
('DataDesc','z'),
('InfoLevel','<H'),
('ReceiveBufferSize','<H'),
)
class SMBNetShareEnumResponse(Structure):
structure = (
('Status','<H=0'),
('Convert','<H=0'),
('EntriesReturned','<H'),
('EntriesAvailable','<H'),
)
class NetShareInfo1(Structure):
structure = (
('NetworkName','13s'),
('Pad','<B=0'),
('Type','<H=0'),
('RemarkOffsetLow','<H=0'),
('RemarkOffsetHigh','<H=0'),
)
# \PIPE\LANMAN NetServerGetInfo
class SMBNetServerGetInfoResponse(Structure):
structure = (
('Status','<H=0'),
('Convert','<H=0'),
('TotalBytesAvailable','<H'),
)
class SMBNetServerInfo1(Structure):
# Level 1 Response
structure = (
('ServerName','16s'),
('MajorVersion','B=5'),
('MinorVersion','B=0'),
('ServerType','<L=3'),
('ServerCommentLow','<H=0'),
('ServerCommentHigh','<H=0'),
)
# \PIPE\LANMAN NetShareGetInfo
class SMBNetShareGetInfo(Structure):
structure = (
('RAPOpcode','<H=0'),
('ParamDesc','z'),
('DataDesc','z'),
('ShareName','z'),
('InfoLevel','<H'),
('ReceiveBufferSize','<H'),
)
class SMBNetShareGetInfoResponse(Structure):
structure = (
('Status','<H=0'),
('Convert','<H=0'),
('TotalBytesAvailable','<H'),
)
############# Security Features
class SecurityFeatures(Structure):
structure = (
('Key','<L=0'),
('CID','<H=0'),
('SequenceNumber','<H=0'),
)
############# SMB_COM_QUERY_INFORMATION2 (0x23)
class SMBQueryInformation2_Parameters(Structure):
structure = (
('Fid','<H'),
)
class SMBQueryInformation2Response_Parameters(Structure):
structure = (
('CreateDate','<H'),
('CreationTime','<H'),
('LastAccessDate','<H'),
('LastAccessTime','<H'),
('LastWriteDate','<H'),
('LastWriteTime','<H'),
('FileDataSize','<L'),
('FileAllocationSize','<L'),
('FileAttributes','<L'),
)
############# SMB_COM_SESSION_SETUP_ANDX (0x73)
class SMBSessionSetupAndX_Parameters(SMBAndXCommand_Parameters):
structure = (
('MaxBuffer','<H'),
('MaxMpxCount','<H'),
('VCNumber','<H'),
('SessionKey','<L'),
('AnsiPwdLength','<H'),
('UnicodePwdLength','<H'),
('_reserved','<L=0'),
('Capabilities','<L'),
)
class SMBSessionSetupAndX_Extended_Parameters(SMBAndXCommand_Parameters):
structure = (
('MaxBufferSize','<H'),
('MaxMpxCount','<H'),
('VcNumber','<H'),
('SessionKey','<L'),
('SecurityBlobLength','<H'),
('Reserved','<L=0'),
('Capabilities','<L'),
)
class SMBSessionSetupAndX_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('AnsiPwdLength','_-AnsiPwd','self["AnsiPwdLength"]'),
('UnicodePwdLength','_-UnicodePwd','self["UnicodePwdLength"]'),
('AnsiPwd',':=""'),
('UnicodePwd',':=""'),
('Account','z=""'),
('PrimaryDomain','z=""'),
('NativeOS','z=""'),
('NativeLanMan','z=""'),
)
UnicodeStructure = (
('AnsiPwdLength','_-AnsiPwd','self["AnsiPwdLength"]'),
('UnicodePwdLength','_-UnicodePwd','self["UnicodePwdLength"]'),
('AnsiPwd',':=""'),
('UnicodePwd',':=""'),
('Account','u=""'),
('PrimaryDomain','u=""'),
('NativeOS','u=""'),
('NativeLanMan','u=""'),
)
class SMBSessionSetupAndX_Extended_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('SecurityBlobLength','_-SecurityBlob','self["SecurityBlobLength"]'),
('SecurityBlob',':'),
('NativeOS','z=""'),
('NativeLanMan','z=""'),
)
UnicodeStructure = (
('SecurityBlobLength','_-SecurityBlob','self["SecurityBlobLength"]'),
('SecurityBlob',':'),
('NativeOS','u=""'),
('NativeLanMan','u=""'),
)
class SMBSessionSetupAndXResponse_Parameters(SMBAndXCommand_Parameters):
structure = (
('Action','<H'),
)
class SMBSessionSetupAndX_Extended_Response_Parameters(SMBAndXCommand_Parameters):
structure = (
('Action','<H=0'),
('SecurityBlobLength','<H'),
)
class SMBSessionSetupAndXResponse_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('NativeOS','z=""'),
('NativeLanMan','z=""'),
('PrimaryDomain','z=""'),
)
UnicodeStructure = (
('NativeOS','u=""'),
('NativeLanMan','u=""'),
('PrimaryDomain','u=""'),
)
class SMBSessionSetupAndX_Extended_Response_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('SecurityBlobLength','_-SecurityBlob','self["SecurityBlobLength"]'),
('SecurityBlob',':'),
('NativeOS','z=""'),
('NativeLanMan','z=""'),
)
UnicodeStructure = (
('SecurityBlobLength','_-SecurityBlob','self["SecurityBlobLength"]'),
('SecurityBlob',':'),
('NativeOS','u=""'),
('NativeLanMan','u=""'),
)
############# SMB_COM_TREE_CONNECT (0x70)
class SMBTreeConnect_Parameters(SMBCommand_Parameters):
structure = (
)
class SMBTreeConnect_Data(SMBCommand_Parameters):
structure = (
('PathFormat','"\x04'),
('Path','z'),
('PasswordFormat','"\x04'),
('Password','z'),
('ServiceFormat','"\x04'),
('Service','z'),
)
############# SMB_COM_TREE_CONNECT_ANDX (0x75)
class SMBTreeConnectAndX_Parameters(SMBAndXCommand_Parameters):
structure = (
('Flags','<H=0'),
('PasswordLength','<H'),
)
class SMBTreeConnectAndXResponse_Parameters(SMBAndXCommand_Parameters):
structure = (
('OptionalSupport','<H=0'),
)
class SMBTreeConnectAndXExtendedResponse_Parameters(SMBAndXCommand_Parameters):
structure = (
('OptionalSupport','<H=1'),
('MaximalShareAccessRights','<L=0x1fffff'),
('GuestMaximalShareAccessRights','<L=0x1fffff'),
)
class SMBTreeConnectAndX_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('_PasswordLength','_-Password','self["_PasswordLength"]'),
('Password',':'),
('Path','z'),
('Service','z'),
)
UnicodeStructure = (
('_PasswordLength','_-Password','self["_PasswordLength"] if self["_PasswordLength"] > 0 else 1'),
('Password',':'),
('Path','u'),
('Service','z'),
)
class SMBTreeConnectAndXResponse_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('Service','z'),
('PadLen','_-Pad','self["PadLen"]'),
('Pad',':=""'),
('NativeFileSystem','z'),
)
UnicodeStructure = (
('Service','z'),
('PadLen','_-Pad','self["PadLen"]'),
('Pad',':=""'),
('NativeFileSystem','u'),
)
############# SMB_COM_NT_CREATE_ANDX (0xA2)
class SMBNtCreateAndX_Parameters(SMBAndXCommand_Parameters):
structure = (
('_reserved', 'B=0'),
('FileNameLength','<H'), # NameLength
('CreateFlags','<L'), # Flags
('RootFid','<L=0'), # RootDirectoryFID
('AccessMask','<L'), # DesiredAccess
('AllocationSizeLo','<L=0'), # AllocationSize
('AllocationSizeHi','<L=0'),
('FileAttributes','<L=0'), # ExtFileAttributes
('ShareAccess','<L=3'), #
('Disposition','<L=1'), # CreateDisposition
('CreateOptions','<L'), # CreateOptions
('Impersonation','<L=2'),
('SecurityFlags','B=3'),
)
class SMBNtCreateAndXResponse_Parameters(SMBAndXCommand_Parameters):
# XXX Is there a memory leak in the response for NTCreate (where the Data section would be) in Win 2000, Win XP, and Win 2003?
structure = (
('OplockLevel', 'B=0'),
('Fid','<H'),
('CreateAction','<L'),
('CreateTime','<q=0'),
('LastAccessTime','<q=0'),
('LastWriteTime','<q=0'),
('LastChangeTime','<q=0'),
('FileAttributes','<L=0x80'),
('AllocationSize','<q=0'),
('EndOfFile','<q=0'),
('FileType','<H=0'),
('IPCState','<H=0'),
('IsDirectory','B'),
)
class SMBNtCreateAndXExtendedResponse_Parameters(SMBAndXCommand_Parameters):
# [MS-SMB] Extended response description
structure = (
('OplockLevel', 'B=0'),
('Fid','<H'),
('CreateAction','<L'),
('CreateTime','<q=0'),
('LastAccessTime','<q=0'),
('LastWriteTime','<q=0'),
('LastChangeTime','<q=0'),
('FileAttributes','<L=0x80'),
('AllocationSize','<q=0'),
('EndOfFile','<q=0'),
('FileType','<H=0'),
('IPCState','<H=0'),
('IsDirectory','B'),
('VolumeGUID','16s'),
('FileIdLow','<L=0'),
('FileIdHigh','<L=0'),
('MaximalAccessRights','<L=0x12019b'),
('GuestMaximalAccessRights','<L=0x120089'),
)
class SMBNtCreateAndX_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('FileName','z'),
)
UnicodeStructure = (
('Pad','B'),
('FileName','u'),
)
############# SMB_COM_OPEN_ANDX (0xD2)
class SMBOpenAndX_Parameters(SMBAndXCommand_Parameters):
structure = (
('Flags','<H=0'),
('DesiredAccess','<H=0'),
('SearchAttributes','<H=0'),
('FileAttributes','<H=0'),
('CreationTime','<L=0'),
('OpenMode','<H=1'), # SMB_O_OPEN = 1
('AllocationSize','<L=0'),
('Reserved','8s=""'),
)
class SMBOpenAndX_Data(SMBNtCreateAndX_Data):
pass
class SMBOpenAndXResponse_Parameters(SMBAndXCommand_Parameters):
structure = (
('Fid','<H=0'),
('FileAttributes','<H=0'),
('LastWriten','<L=0'),
('FileSize','<L=0'),
('GrantedAccess','<H=0'),
('FileType','<H=0'),
('IPCState','<H=0'),
('Action','<H=0'),
('ServerFid','<L=0'),
('_reserved','<H=0'),
)
############# SMB_COM_WRITE (0x0B)
class SMBWrite_Parameters(SMBCommand_Parameters):
structure = (
('Fid','<H'),
('Count','<H'),
('Offset','<L'),
('Remaining','<H'),
)
class SMBWriteResponse_Parameters(SMBCommand_Parameters):
structure = (
('Count','<H'),
)
class SMBWrite_Data(Structure):
structure = (
('BufferFormat','<B=1'),
('DataLength','<H-Data'),
('Data',':'),
)
############# SMB_COM_WRITE_ANDX (0x2F)
class SMBWriteAndX_Parameters(SMBAndXCommand_Parameters):
structure = (
('Fid','<H=0'),
('Offset','<L=0'),
('_reserved','<L=0xff'),
('WriteMode','<H=8'),
('Remaining','<H=0'),
('DataLength_Hi','<H=0'),
('DataLength','<H=0'),
('DataOffset','<H=0'),
('HighOffset','<L=0'),
)
class SMBWriteAndX_Data_Short(Structure):
structure = (
('_PadLen','_-Pad','self["DataOffset"] - 59'),
('Pad',':'),
#('Pad','<B=0'),
('DataLength','_-Data','self["DataLength"]'),
('Data',':'),
)
class SMBWriteAndX_Data(Structure):
structure = (
('_PadLen','_-Pad','self["DataOffset"] - 63'),
('Pad',':'),
#('Pad','<B=0'),
('DataLength','_-Data','self["DataLength"]'),
('Data',':'),
)
class SMBWriteAndX_Parameters_Short(SMBAndXCommand_Parameters):
structure = (
('Fid','<H'),
('Offset','<L'),
('_reserved','<L=0xff'),
('WriteMode','<H=8'),
('Remaining','<H'),
('DataLength_Hi','<H=0'),
('DataLength','<H'),
('DataOffset','<H=0'),
)
class SMBWriteAndXResponse_Parameters(SMBAndXCommand_Parameters):
structure = (
('Count','<H'),
('Available','<H'),
('Reserved','<L=0'),
)
############# SMB_COM_WRITE_RAW (0x1D)
class SMBWriteRaw_Parameters(SMBCommand_Parameters):
structure = (
('Fid','<H'),
('Count','<H'),
('_reserved','<H=0'),
('Offset','<L'),
('Timeout','<L=0'),
('WriteMode','<H=0'),
('_reserved2','<L=0'),
('DataLength','<H'),
('DataOffset','<H=0'),
)
############# SMB_COM_READ (0x0A)
class SMBRead_Parameters(SMBCommand_Parameters):
structure = (
('Fid','<H'),
('Count','<H'),
('Offset','<L'),
('Remaining','<H=Count'),
)
class SMBReadResponse_Parameters(Structure):
structure = (
('Count','<H=0'),
('_reserved','8s=""'),
)
class SMBReadResponse_Data(Structure):
structure = (
('BufferFormat','<B=0x1'),
('DataLength','<H-Data'),
('Data',':'),
)
############# SMB_COM_READ_RAW (0x1A)
class SMBReadRaw_Parameters(SMBCommand_Parameters):
structure = (
('Fid','<H'),
('Offset','<L'),
('MaxCount','<H'),
('MinCount','<H=MaxCount'),
('Timeout','<L=0'),
('_reserved','<H=0'),
)
############# SMB_COM_NT_TRANSACT (0xA0)
class SMBNTTransaction_Parameters(SMBCommand_Parameters):
structure = (
('MaxSetupCount','<B=0'),
('Reserved1','<H=0'),
('TotalParameterCount','<L'),
('TotalDataCount','<L'),
('MaxParameterCount','<L=1024'),
('MaxDataCount','<L=65504'),
('ParameterCount','<L'),
('ParameterOffset','<L'),
('DataCount','<L'),
('DataOffset','<L'),
('SetupCount','<B=len(Setup)/2'),
('Function','<H=0'),
('SetupLength','_-Setup','SetupCount*2'),
('Setup',':'),
)
class SMBNTTransactionResponse_Parameters(SMBCommand_Parameters):
structure = (
('Reserved1','3s=""'),
('TotalParameterCount','<L'),
('TotalDataCount','<L'),
('ParameterCount','<L'),
('ParameterOffset','<L'),
('ParameterDisplacement','<L=0'),
('DataCount','<L'),
('DataOffset','<L'),
('DataDisplacement','<L=0'),
('SetupCount','<B=0'),
('SetupLength','_-Setup','SetupCount*2'),
('Setup',':'),
)
class SMBNTTransaction_Data(Structure):
structure = (
('Pad1Length','_-Pad1','self["Pad1Length"]'),
('Pad1',':'),
('NT_Trans_ParametersLength','_-NT_Trans_Parameters','self["NT_Trans_ParametersLength"]'),
('NT_Trans_Parameters',':'),
('Pad2Length','_-Pad2','self["Pad2Length"]'),
('Pad2',':'),
('NT_Trans_DataLength','_-NT_Trans_Data','self["NT_Trans_DataLength"]'),
('NT_Trans_Data',':'),
)
class SMBNTTransactionResponse_Data(Structure):
structure = (
('Pad1Length','_-Pad1','self["Pad1Length"]'),
('Pad1',':'),
('Trans_ParametersLength','_-Trans_Parameters','self["Trans_ParametersLength"]'),
('Trans_Parameters',':'),
('Pad2Length','_-Pad2','self["Pad2Length"]'),
('Pad2',':'),
('Trans_DataLength','_-Trans_Data','self["Trans_DataLength"]'),
('Trans_Data',':'),
)
############# SMB_COM_TRANSACTION2_SECONDARY (0x33)
class SMBTransaction2Secondary_Parameters(SMBCommand_Parameters):
structure = (
('TotalParameterCount','<H'),
('TotalDataCount','<H'),
('ParameterCount','<H'),
('ParameterOffset','<H'),
('DataCount','<H'),
('DataOffset','<H'),
('DataDisplacement','<H=0'),
('FID','<H'),
)
class SMBTransaction2Secondary_Data(Structure):
structure = (
('Pad1Length','_-Pad1','self["Pad1Length"]'),
('Pad1',':'),
('Trans_ParametersLength','_-Trans_Parameters','self["Trans_ParametersLength"]'),
('Trans_Parameters',':'),
('Pad2Length','_-Pad2','self["Pad2Length"]'),
('Pad2',':'),
('Trans_DataLength','_-Trans_Data','self["Trans_DataLength"]'),
('Trans_Data',':'),
)
############# SMB_COM_TRANSACTION2 (0x32)
class SMBTransaction2_Parameters(SMBCommand_Parameters):
structure = (
('TotalParameterCount','<H'),
('TotalDataCount','<H'),
('MaxParameterCount','<H=1024'),
('MaxDataCount','<H=65504'),
('MaxSetupCount','<B=0'),
('Reserved1','<B=0'),
('Flags','<H=0'),
('Timeout','<L=0'),
('Reserved2','<H=0'),
('ParameterCount','<H'),
('ParameterOffset','<H'),
('DataCount','<H'),
('DataOffset','<H'),
('SetupCount','<B=len(Setup)/2'),
('Reserved3','<B=0'),
('SetupLength','_-Setup','SetupCount*2'),
('Setup',':'),
)
class SMBTransaction2Response_Parameters(SMBCommand_Parameters):
structure = (
('TotalParameterCount','<H'),
('TotalDataCount','<H'),
('Reserved1','<H=0'),
('ParameterCount','<H'),
('ParameterOffset','<H'),
('ParameterDisplacement','<H=0'),
('DataCount','<H'),
('DataOffset','<H'),
('DataDisplacement','<H=0'),
('SetupCount','<B=0'),
('Reserved2','<B=0'),
('SetupLength','_-Setup','SetupCount*2'),
('Setup',':'),
)
class SMBTransaction2_Data(Structure):
structure = (
# ('NameLength','_-Name','1'),
# ('Name',':'),
('Pad1Length','_-Pad1','self["Pad1Length"]'),
('Pad1',':'),
('Trans_ParametersLength','_-Trans_Parameters','self["Trans_ParametersLength"]'),
('Trans_Parameters',':'),
('Pad2Length','_-Pad2','self["Pad2Length"]'),
('Pad2',':'),
('Trans_DataLength','_-Trans_Data','self["Trans_DataLength"]'),
('Trans_Data',':'),
)
class SMBTransaction2Response_Data(Structure):
structure = (
('Pad1Length','_-Pad1','self["Pad1Length"]'),
('Pad1',':'),
('Trans_ParametersLength','_-Trans_Parameters','self["Trans_ParametersLength"]'),
('Trans_Parameters',':'),
('Pad2Length','_-Pad2','self["Pad2Length"]'),
('Pad2',':'),
('Trans_DataLength','_-Trans_Data','self["Trans_DataLength"]'),
('Trans_Data',':'),
)
############# SMB_COM_QUERY_INFORMATION (0x08)
class SMBQueryInformation_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('BufferFormat','B=4'),
('FileName','z'),
)
UnicodeStructure = (
('BufferFormat','B=4'),
('FileName','u'),
)
class SMBQueryInformationResponse_Parameters(Structure):
structure = (
('FileAttributes','<H'),
('LastWriteTime','<L'),
('FileSize','<L'),
('Reserved','"0123456789'),
)
############# SMB_COM_TRANSACTION (0x25)
class SMBTransaction_Parameters(SMBCommand_Parameters):
structure = (
('TotalParameterCount','<H'),
('TotalDataCount','<H'),
('MaxParameterCount','<H=1024'),
('MaxDataCount','<H=65504'),
('MaxSetupCount','<B=0'),
('Reserved1','<B=0'),
('Flags','<H=0'),
('Timeout','<L=0'),
('Reserved2','<H=0'),
('ParameterCount','<H'),
('ParameterOffset','<H'),
('DataCount','<H'),
('DataOffset','<H'),
('SetupCount','<B=len(Setup)/2'),
('Reserved3','<B=0'),
('SetupLength','_-Setup','SetupCount*2'),
('Setup',':'),
)
class SMBTransactionResponse_Parameters(SMBCommand_Parameters):
structure = (
('TotalParameterCount','<H'),
('TotalDataCount','<H'),
('Reserved1','<H=0'),
('ParameterCount','<H'),
('ParameterOffset','<H'),
('ParameterDisplacement','<H=0'),
('DataCount','<H'),
('DataOffset','<H'),
('DataDisplacement','<H=0'),
('SetupCount','<B'),
('Reserved2','<B=0'),
('SetupLength','_-Setup','SetupCount*2'),
('Setup',':'),
)
# TODO: We should merge these both. But this will require fixing
# the instances where this structure is used on the client side
class SMBTransaction_SData(AsciiOrUnicodeStructure):
AsciiStructure = (
('Name','z'),
('Trans_ParametersLength','_-Trans_Parameters'),
('Trans_Parameters',':'),
('Trans_DataLength','_-Trans_Data'),
('Trans_Data',':'),
)
UnicodeStructure = (
('Pad','B'),
('Name','u'),
('Trans_ParametersLength','_-Trans_Parameters'),
('Trans_Parameters',':'),
('Trans_DataLength','_-Trans_Data'),
('Trans_Data',':'),
)
class SMBTransaction_Data(Structure):
structure = (
('NameLength','_-Name'),
('Name',':'),
('Trans_ParametersLength','_-Trans_Parameters'),
('Trans_Parameters',':'),
('Trans_DataLength','_-Trans_Data'),
('Trans_Data',':'),
)
class SMBTransactionResponse_Data(Structure):
structure = (
('Trans_ParametersLength','_-Trans_Parameters'),
('Trans_Parameters',':'),
('Trans_DataLength','_-Trans_Data'),
('Trans_Data',':'),
)
############# SMB_COM_READ_ANDX (0x2E)
class SMBReadAndX_Parameters(SMBAndXCommand_Parameters):
structure = (
('Fid','<H'),
('Offset','<L'),
('MaxCount','<H'),
('MinCount','<H=MaxCount'),
('_reserved','<L=0x0'),
('Remaining','<H=MaxCount'),
('HighOffset','<L=0'),
)
class SMBReadAndX_Parameters2(SMBAndXCommand_Parameters):
structure = (
('Fid','<H'),
('Offset','<L'),
('MaxCount','<H'),
('MinCount','<H=MaxCount'),
('_reserved','<L=0xffffffff'),
('Remaining','<H=MaxCount'),
)
class SMBReadAndXResponse_Parameters(SMBAndXCommand_Parameters):
structure = (
('Remaining','<H=0'),
('DataMode','<H=0'),
('_reserved','<H=0'),
('DataCount','<H'),
('DataOffset','<H'),
('DataCount_Hi','<L'),
('_reserved2','6s=""'),
)
############# SMB_COM_ECHO (0x2B)
class SMBEcho_Data(Structure):
structure = (
('Data',':'),
)
class SMBEcho_Parameters(Structure):
structure = (
('EchoCount','<H'),
)
class SMBEchoResponse_Data(Structure):
structure = (
('Data',':'),
)
class SMBEchoResponse_Parameters(Structure):
structure = (
('SequenceNumber','<H=1'),
)
############# SMB_COM_QUERY_INFORMATION_DISK (0x80)
class SMBQueryInformationDiskResponse_Parameters(Structure):
structure = (
('TotalUnits','<H'),
('BlocksPerUnit','<H'),
('BlockSize','<H'),
('FreeUnits','<H'),
('Reserved','<H=0'),
)
############# SMB_COM_LOGOFF_ANDX (0x74)
class SMBLogOffAndX(SMBAndXCommand_Parameters):
strucure = ()
############# SMB_COM_CLOSE (0x04)
class SMBClose_Parameters(SMBCommand_Parameters):
structure = (
('FID','<H'),
('Time','<L=0'),
)
############# SMB_COM_FLUSH (0x05)
class SMBFlush_Parameters(SMBCommand_Parameters):
structure = (
('FID','<H'),
)
############# SMB_COM_CREATE_DIRECTORY (0x00)
class SMBCreateDirectory_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('BufferFormat','<B=4'),
('DirectoryName','z'),
)
UnicodeStructure = (
('BufferFormat','<B=4'),
('DirectoryName','u'),
)
############# SMB_COM_DELETE (0x06)
class SMBDelete_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('BufferFormat','<B=4'),
('FileName','z'),
)
UnicodeStructure = (
('BufferFormat','<B=4'),
('FileName','u'),
)
class SMBDelete_Parameters(Structure):
structure = (
('SearchAttributes','<H'),
)
############# SMB_COM_DELETE_DIRECTORY (0x01)
class SMBDeleteDirectory_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('BufferFormat','<B=4'),
('DirectoryName','z'),
)
UnicodeStructure = (
('BufferFormat','<B=4'),
('DirectoryName','u'),
)
############# SMB_COM_CHECK_DIRECTORY (0x10)
class SMBCheckDirectory_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('BufferFormat','<B=4'),
('DirectoryName','z'),
)
UnicodeStructure = (
('BufferFormat','<B=4'),
('DirectoryName','u'),
)
############# SMB_COM_RENAME (0x07)
class SMBRename_Parameters(SMBCommand_Parameters):
structure = (
('SearchAttributes','<H'),
)
class SMBRename_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('BufferFormat1','<B=4'),
('OldFileName','z'),
('BufferFormat2','<B=4'),
('NewFileName','z'),
)
UnicodeStructure = (
('BufferFormat1','<B=4'),
('OldFileName','u'),
('BufferFormat2','<B=4'),
('Pad','B=0'),
('NewFileName','u'),
)
############# SMB_COM_OPEN (0x02)
class SMBOpen_Parameters(SMBCommand_Parameters):
structure = (
('DesiredAccess','<H=0'),
('SearchAttributes','<H=0'),
)
class SMBOpen_Data(AsciiOrUnicodeStructure):
AsciiStructure = (
('FileNameFormat','"\x04'),
('FileName','z'),
)
UnicodeStructure = (
('FileNameFormat','"\x04'),
('FileName','z'),
)
class SMBOpenResponse_Parameters(SMBCommand_Parameters):
structure = (
('Fid','<H=0'),
('FileAttributes','<H=0'),
('LastWriten','<L=0'),
('FileSize','<L=0'),
('GrantedAccess','<H=0'),
)
############# EXTENDED SECURITY CLASSES
class SMBExtended_Security_Parameters(Structure):
structure = (
('DialectIndex','<H'),
('SecurityMode','<B'),
('MaxMpxCount','<H'),
('MaxNumberVcs','<H'),
('MaxBufferSize','<L'),
('MaxRawSize','<L'),
('SessionKey','<L'),
('Capabilities','<L'),
('LowDateTime','<L'),
('HighDateTime','<L'),
('ServerTimeZone','<H'),
('ChallengeLength','<B'),
)
class SMBExtended_Security_Data(Structure):
structure = (
('ServerGUID','16s'),
('SecurityBlob',':'),
)
class SMBNTLMDialect_Parameters(Structure):
structure = (
('DialectIndex','<H'),
('SecurityMode','<B'),
('MaxMpxCount','<H'),
('MaxNumberVcs','<H'),
('MaxBufferSize','<L'),
('MaxRawSize','<L'),
('SessionKey','<L'),
('Capabilities','<L'),
('LowDateTime','<L'),
('HighDateTime','<L'),
('ServerTimeZone','<H'),
('ChallengeLength','<B'),
)
class SMBNTLMDialect_Data(Structure):
structure = (
('ChallengeLength','_-Challenge','self["ChallengeLength"]'),
('Challenge',':'),
('Payload',':'),
# For some reason on an old Linux this field is not present, we have to check this out. There must be a flag stating this.
('DomainName','_'),
('ServerName','_'),
)
def __init__(self,data = None, alignment = 0):
Structure.__init__(self,data,alignment)
#self['ChallengeLength']=8
def fromString(self,data):
Structure.fromString(self,data)
self['DomainName'] = ''
self['ServerName'] = ''
class SMB:
# SMB Command Codes
SMB_COM_CREATE_DIRECTORY = 0x00
SMB_COM_DELETE_DIRECTORY = 0x01
SMB_COM_OPEN = 0x02
SMB_COM_CREATE = 0x03
SMB_COM_CLOSE = 0x04
SMB_COM_FLUSH = 0x05
SMB_COM_DELETE = 0x06
SMB_COM_RENAME = 0x07
SMB_COM_QUERY_INFORMATION = 0x08
SMB_COM_SET_INFORMATION = 0x09
SMB_COM_READ = 0x0A
SMB_COM_WRITE = 0x0B
SMB_COM_LOCK_BYTE_RANGE = 0x0C
SMB_COM_UNLOCK_BYTE_RANGE = 0x0D
SMB_COM_CREATE_TEMPORARY = 0x0E
SMB_COM_CREATE_NEW = 0x0F
SMB_COM_CHECK_DIRECTORY = 0x10
SMB_COM_PROCESS_EXIT = 0x11
SMB_COM_SEEK = 0x12
SMB_COM_LOCK_AND_READ = 0x13
SMB_COM_WRITE_AND_UNLOCK = 0x14
SMB_COM_READ_RAW = 0x1A
SMB_COM_READ_MPX = 0x1B
SMB_COM_READ_MPX_SECONDARY = 0x1C
SMB_COM_WRITE_RAW = 0x1D
SMB_COM_WRITE_MPX = 0x1E
SMB_COM_WRITE_MPX_SECONDARY = 0x1F
SMB_COM_WRITE_COMPLETE = 0x20
SMB_COM_QUERY_SERVER = 0x21
SMB_COM_SET_INFORMATION2 = 0x22
SMB_COM_QUERY_INFORMATION2 = 0x23
SMB_COM_LOCKING_ANDX = 0x24
SMB_COM_TRANSACTION = 0x25
SMB_COM_TRANSACTION_SECONDARY = 0x26
SMB_COM_IOCTL = 0x27
SMB_COM_IOCTL_SECONDARY = 0x28
SMB_COM_COPY = 0x29
SMB_COM_MOVE = 0x2A
SMB_COM_ECHO = 0x2B
SMB_COM_WRITE_AND_CLOSE = 0x2C
SMB_COM_OPEN_ANDX = 0x2D
SMB_COM_READ_ANDX = 0x2E
SMB_COM_WRITE_ANDX = 0x2F
SMB_COM_NEW_FILE_SIZE = 0x30
SMB_COM_CLOSE_AND_TREE_DISC = 0x31
SMB_COM_TRANSACTION2 = 0x32
SMB_COM_TRANSACTION2_SECONDARY = 0x33
SMB_COM_FIND_CLOSE2 = 0x34
SMB_COM_FIND_NOTIFY_CLOSE = 0x35
# Used by Xenix/Unix 0x60 - 0x6E
SMB_COM_TREE_CONNECT = 0x70
SMB_COM_TREE_DISCONNECT = 0x71
SMB_COM_NEGOTIATE = 0x72
SMB_COM_SESSION_SETUP_ANDX = 0x73
SMB_COM_LOGOFF_ANDX = 0x74
SMB_COM_TREE_CONNECT_ANDX = 0x75
SMB_COM_QUERY_INFORMATION_DISK = 0x80
SMB_COM_SEARCH = 0x81
SMB_COM_FIND = 0x82
SMB_COM_FIND_UNIQUE = 0x83
SMB_COM_FIND_CLOSE = 0x84
SMB_COM_NT_TRANSACT = 0xA0
SMB_COM_NT_TRANSACT_SECONDARY = 0xA1
SMB_COM_NT_CREATE_ANDX = 0xA2
SMB_COM_NT_CANCEL = 0xA4
SMB_COM_NT_RENAME = 0xA5
SMB_COM_OPEN_PRINT_FILE = 0xC0
SMB_COM_WRITE_PRINT_FILE = 0xC1
SMB_COM_CLOSE_PRINT_FILE = 0xC2
SMB_COM_GET_PRINT_QUEUE = 0xC3
SMB_COM_READ_BULK = 0xD8
SMB_COM_WRITE_BULK = 0xD9
SMB_COM_WRITE_BULK_DATA = 0xDA
# TRANSACT codes
TRANS_TRANSACT_NMPIPE = 0x26
# TRANSACT2 codes
TRANS2_FIND_FIRST2 = 0x0001
TRANS2_FIND_NEXT2 = 0x0002
TRANS2_QUERY_FS_INFORMATION = 0x0003
TRANS2_QUERY_PATH_INFORMATION = 0x0005
TRANS2_QUERY_FILE_INFORMATION = 0x0007
TRANS2_SET_FILE_INFORMATION = 0x0008
TRANS2_SET_PATH_INFORMATION = 0x0006
# Security Share Mode (Used internally by SMB class)
SECURITY_SHARE_MASK = 0x01
SECURITY_SHARE_SHARE = 0x00
SECURITY_SHARE_USER = 0x01
SECURITY_SIGNATURES_ENABLED = 0X04
SECURITY_SIGNATURES_REQUIRED = 0X08
# Security Auth Mode (Used internally by SMB class)
SECURITY_AUTH_MASK = 0x02
SECURITY_AUTH_ENCRYPTED = 0x02
SECURITY_AUTH_PLAINTEXT = 0x00
# Raw Mode Mask (Used internally by SMB class. Good for dialect up to and including LANMAN2.1)
RAW_READ_MASK = 0x01
RAW_WRITE_MASK = 0x02
# Capabilities Mask (Used internally by SMB class. Good for dialect NT LM 0.12)
CAP_RAW_MODE = 0x00000001
CAP_MPX_MODE = 0x0002
CAP_UNICODE = 0x0004
CAP_LARGE_FILES = 0x0008
CAP_EXTENDED_SECURITY = 0x80000000
CAP_USE_NT_ERRORS = 0x40
CAP_NT_SMBS = 0x10
CAP_LARGE_READX = 0x00004000
CAP_LARGE_WRITEX = 0x00008000
CAP_RPC_REMOTE_APIS = 0x20
# Flags1 Mask
FLAGS1_LOCK_AND_READ_OK = 0x01
FLAGS1_PATHCASELESS = 0x08
FLAGS1_CANONICALIZED_PATHS = 0x10
FLAGS1_REPLY = 0x80
# Flags2 Mask
FLAGS2_LONG_NAMES = 0x0001
FLAGS2_EAS = 0x0002
FLAGS2_SMB_SECURITY_SIGNATURE = 0x0004
FLAGS2_IS_LONG_NAME = 0x0040
FLAGS2_DFS = 0x1000
FLAGS2_PAGING_IO = 0x2000
FLAGS2_NT_STATUS = 0x4000
FLAGS2_UNICODE = 0x8000
FLAGS2_COMPRESSED = 0x0008
FLAGS2_SMB_SECURITY_SIGNATURE_REQUIRED = 0x0010
FLAGS2_EXTENDED_SECURITY = 0x0800
# Dialect's Security Mode flags
NEGOTIATE_USER_SECURITY = 0x01
NEGOTIATE_ENCRYPT_PASSWORDS = 0x02
NEGOTIATE_SECURITY_SIGNATURE_ENABLE = 0x04
NEGOTIATE_SECURITY_SIGNATURE_REQUIRED = 0x08
# Tree Connect AndX Response optionalSuppor flags
SMB_SUPPORT_SEARCH_BITS = 0x01
SMB_SHARE_IS_IN_DFS = 0x02
def __init__(self, remote_name, remote_host, my_name = None, host_type = nmb.TYPE_SERVER, sess_port = 445, timeout=None, UDP = 0, session = None, negPacket = None):
# The uid attribute will be set when the client calls the login() method
self._uid = 0
self.__server_name = ''
self.__server_os = ''
self.__server_os_major = None
self.__server_os_minor = None
self.__server_os_build = None
self.__server_lanman = ''
self.__server_domain = ''
self.__server_dns_domain_name = ''
self.__remote_name = string.upper(remote_name)
self.__remote_host = remote_host
self.__isNTLMv2 = True
self._dialects_parameters = None
self._dialects_data = None
# Credentials
self.__userName = ''
self.__password = ''
self.__domain = ''
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = ''
self.__kdc = ''
self.__TGT = None
self.__TGS = None
# Negotiate Protocol Result, used everywhere
# Could be extended or not, flags should be checked before
self._dialect_data = 0
self._dialect_parameters = 0
self._action = 0
self._sess = None
self.encrypt_passwords = True
self.tid = 0
self.fid = 0
# Signing stuff
self._SignSequenceNumber = 0
self._SigningSessionKey = ''
self._SigningChallengeResponse = ''
self._SignatureEnabled = False
self._SignatureVerificationEnabled = False
self._SignatureRequired = False
# Base flags (default flags, can be overriden using set_flags())
self.__flags1 = SMB.FLAGS1_PATHCASELESS | SMB.FLAGS1_CANONICALIZED_PATHS
self.__flags2 = SMB.FLAGS2_EXTENDED_SECURITY | SMB.FLAGS2_NT_STATUS | SMB.FLAGS2_LONG_NAMES
if timeout is None:
self.__timeout = 60
else:
self.__timeout = timeout
# If port 445 and the name sent is *SMBSERVER we're setting the name to the IP.
# This is to help some old applications still believing
# *SMSBSERVER will work against modern OSes. If port is NETBIOS_SESSION_PORT the user better
# know about *SMBSERVER's limitations
if sess_port == 445 and remote_name == '*SMBSERVER':
self.__remote_name = remote_host
if session is None:
if not my_name:
my_name = socket.gethostname()
i = string.find(my_name, '.')
if i > -1:
my_name = my_name[:i]
if UDP:
self._sess = nmb.NetBIOSUDPSession(my_name, remote_name, remote_host, host_type, sess_port, self.__timeout)
else:
self._sess = nmb.NetBIOSTCPSession(my_name, remote_name, remote_host, host_type, sess_port, self.__timeout)
# Initialize session values (_dialect_data and _dialect_parameters)
self.neg_session()
# Call login() without any authentication information to
# setup a session if the remote server
# is in share mode.
if (self._dialects_parameters['SecurityMode'] & SMB.SECURITY_SHARE_MASK) == SMB.SECURITY_SHARE_SHARE:
self.login('', '')
else:
self._sess = session
self.neg_session(negPacket = negPacket)
# Call login() without any authentication information to
# setup a session if the remote server
# is in share mode.
if (self._dialects_parameters['SecurityMode'] & SMB.SECURITY_SHARE_MASK) == SMB.SECURITY_SHARE_SHARE:
self.login('', '')
@staticmethod
def ntlm_supported():
return False
def get_remote_name(self):
return self.__remote_name
def get_remote_host(self):
return self.__remote_host
def get_flags(self):
return self.__flags1, self.__flags2
def set_flags(self, flags1=None, flags2=None):
if flags1 is not None:
self.__flags1 = flags1
if flags2 is not None:
self.__flags2 = flags2
def set_timeout(self, timeout):
prev_timeout = self.__timeout
self.__timeout = timeout
return prev_timeout
def get_timeout(self):
return self.__timeout
@contextmanager
def use_timeout(self, timeout):
prev_timeout = self.set_timeout(timeout)
try:
yield
finally:
self.set_timeout(prev_timeout)
def get_session(self):
return self._sess
def get_tid(self):
return self.tid
def get_fid(self):
return self.fid
def isGuestSession(self):
return self._action & SMB_SETUP_GUEST
def doesSupportNTLMv2(self):
return self.__isNTLMv2
def __del__(self):
if self._sess:
self._sess.close()
def recvSMB(self):
r = self._sess.recv_packet(self.__timeout)
return NewSMBPacket(data = r.get_trailer())
@staticmethod
def __decode_trans(params, data):
totparamcnt, totdatacnt, _, paramcnt, paramoffset, paramds, datacnt, dataoffset, datads, setupcnt = unpack('<HHHHHHHHHB', params[:19])
if paramcnt + paramds < totparamcnt or datacnt + datads < totdatacnt:
has_more = 1
else:
has_more = 0
paramoffset = paramoffset - 55 - setupcnt * 2
dataoffset = dataoffset - 55 - setupcnt * 2
return has_more, params[20:20 + setupcnt * 2], data[paramoffset:paramoffset + paramcnt], data[dataoffset:dataoffset + datacnt]
# TODO: Move this to NewSMBPacket, it belongs there
def signSMB(self, packet, signingSessionKey, signingChallengeResponse):
# This logic MUST be applied for messages sent in response to any of the higher-layer actions and in
# compliance with the message sequencing rules.
# * The client or server that sends the message MUST provide the 32-bit sequence number for this
# message, as specified in sections 3.2.4.1 and 3.3.4.1.
# * The SMB_FLAGS2_SMB_SECURITY_SIGNATURE flag in the header MUST be set.
# * To generate the signature, a 32-bit sequence number is copied into the
# least significant 32 bits of the SecuritySignature field and the remaining
# 4 bytes are set to 0x00.
# * The MD5 algorithm, as specified in [RFC1321], MUST be used to generate a hash of the SMB
# message from the start of the SMB Header, which is defined as follows.
# CALL MD5Init( md5context )
# CALL MD5Update( md5context, Connection.SigningSessionKey )
# CALL MD5Update( md5context, Connection.SigningChallengeResponse )
# CALL MD5Update( md5context, SMB message )
# CALL MD5Final( digest, md5context )
# SET signature TO the first 8 bytes of the digest
# The resulting 8-byte signature MUST be copied into the SecuritySignature field of the SMB Header,
# after which the message can be transmitted.
#print "seq(%d) signingSessionKey %r, signingChallengeResponse %r" % (self._SignSequenceNumber, signingSessionKey, signingChallengeResponse)
packet['SecurityFeatures'] = pack('<q',self._SignSequenceNumber)
# Sign with the sequence
m = hashlib.md5()
m.update( signingSessionKey )
m.update( signingChallengeResponse )
m.update( str(packet) )
# Replace sequence with acual hash
packet['SecurityFeatures'] = m.digest()[:8]
if self._SignatureVerificationEnabled:
self._SignSequenceNumber +=1
else:
self._SignSequenceNumber +=2
def checkSignSMB(self, packet, signingSessionKey, signingChallengeResponse):
# Let's check
signature = packet['SecurityFeatures']
#print "Signature received: %r " % signature
self.signSMB(packet, signingSessionKey, signingChallengeResponse)
#print "Signature calculated: %r" % packet['SecurityFeatures']
if self._SignatureVerificationEnabled is not True:
self._SignSequenceNumber -= 1
return packet['SecurityFeatures'] == signature
def sendSMB(self,smb):
smb['Uid'] = self._uid
#At least on AIX, PIDs can exceed 16 bits, so we mask them out
smb['Pid'] = (os.getpid() & 0xFFFF)
# set flags
smb['Flags1'] |= self.__flags1
smb['Flags2'] |= self.__flags2
if self._SignatureEnabled:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
self.signSMB(smb, self._SigningSessionKey, self._SigningChallengeResponse)
self._sess.send_packet(str(smb))
@staticmethod
def isValidAnswer(s, cmd):
while 1:
if s.rawData():
if s.get_command() == cmd:
if s.get_error_class() == 0x00 and s.get_error_code() == 0x00:
return 1
else:
raise SessionError( "SMB Library Error", s.get_error_class()+ (s.get_reserved() << 8), s.get_error_code() , s.get_flags2() & SMB.FLAGS2_NT_STATUS)
else:
break
return 0
def neg_session(self, extended_security = True, negPacket = None):
def parsePacket(smb):
if smb.isValidAnswer(SMB.SMB_COM_NEGOTIATE):
sessionResponse = SMBCommand(smb['Data'][0])
self._dialects_parameters = SMBNTLMDialect_Parameters(sessionResponse['Parameters'])
self._dialects_data = SMBNTLMDialect_Data()
self._dialects_data['ChallengeLength'] = self._dialects_parameters['ChallengeLength']
self._dialects_data.fromString(sessionResponse['Data'])
if self._dialects_parameters['Capabilities'] & SMB.CAP_EXTENDED_SECURITY:
# Whether we choose it or it is enforced by the server, we go for extended security
self._dialects_parameters = SMBExtended_Security_Parameters(sessionResponse['Parameters'])
self._dialects_data = SMBExtended_Security_Data(sessionResponse['Data'])
# Let's setup some variable for later use
if self._dialects_parameters['SecurityMode'] & SMB.SECURITY_SIGNATURES_REQUIRED:
self._SignatureRequired = True
# Interestingly, the security Blob might be missing sometimes.
#spnego = SPNEGO_NegTokenInit(self._dialects_data['SecurityBlob'])
#for i in spnego['MechTypes']:
# print "Mech Found: %s" % MechTypes[i]
return 1
# If not, let's try the old way
else:
if self._dialects_data['ServerName'] is not None:
self.__server_name = self._dialects_data['ServerName']
if self._dialects_parameters['DialectIndex'] == 0xffff:
raise UnsupportedFeature("Remote server does not know NT LM 0.12")
return 1
else:
return 0
if negPacket is None:
smb = NewSMBPacket()
negSession = SMBCommand(SMB.SMB_COM_NEGOTIATE)
flags2 = self.get_flags()[1]
if extended_security is True:
self.set_flags(flags2=flags2|SMB.FLAGS2_EXTENDED_SECURITY)
else:
self.set_flags(flags2=flags2 & (~SMB.FLAGS2_EXTENDED_SECURITY))
negSession['Data'] = '\x02NT LM 0.12\x00'
smb.addCommand(negSession)
self.sendSMB(smb)
while 1:
smb = self.recvSMB()
return parsePacket(smb)
else:
return parsePacket( NewSMBPacket( data = negPacket))
def tree_connect(self, path, password = '', service = SERVICE_ANY):
LOG.warning("[MS-CIFS] This is an original Core Protocol command.This command has been deprecated.Client Implementations SHOULD use SMB_COM_TREE_CONNECT_ANDX")
# return 0x800
if password:
# Password is only encrypted if the server passed us an "encryption" during protocol dialect
if self._dialects_parameters['ChallengeLength'] > 0:
# this code is untested
password = self.get_ntlmv1_response(ntlm.compute_lmhash(password))
if not unicode_support:
if unicode_convert:
path = str(path)
else:
raise Exception('SMB: Can\t conver path from unicode!')
smb = NewSMBPacket()
treeConnect = SMBCommand(SMB.SMB_COM_TREE_CONNECT)
treeConnect['Parameters'] = SMBTreeConnect_Parameters()
treeConnect['Data'] = SMBTreeConnect_Data()
treeConnect['Data']['Path'] = path.upper()
treeConnect['Data']['Password'] = password
treeConnect['Data']['Service'] = service
smb.addCommand(treeConnect)
self.sendSMB(smb)
while 1:
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_TREE_CONNECT):
# XXX Here we are ignoring the rest of the response
return smb['Tid']
return smb['Tid']
def get_uid(self):
return self._uid
def set_uid(self, uid):
self._uid = uid
def tree_connect_andx(self, path, password = None, service = SERVICE_ANY, smb_packet=None):
if password:
# Password is only encrypted if the server passed us an "encryption" during protocol dialect
if self._dialects_parameters['ChallengeLength'] > 0:
# this code is untested
password = self.get_ntlmv1_response(ntlm.compute_lmhash(password))
else:
password = '\x00'
if not unicode_support:
if unicode_convert:
path = str(path)
else:
raise Exception('SMB: Can\t convert path from unicode!')
if smb_packet is None:
smb = NewSMBPacket()
else:
smb = smb_packet
# Just in case this came with the full path ,let's just leave
# the sharename, we'll take care of the rest
share = path.split('\\')[-1]
try:
_, _, _, _, sockaddr = socket.getaddrinfo(self.get_remote_host(), 80, 0, 0, socket.IPPROTO_TCP)[0]
remote_host = sockaddr[0]
except Exception:
remote_host = self.get_remote_host()
path = '\\\\' + remote_host + '\\' +share
path = path.upper().encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else path
treeConnect = SMBCommand(SMB.SMB_COM_TREE_CONNECT_ANDX)
treeConnect['Parameters'] = SMBTreeConnectAndX_Parameters()
treeConnect['Data'] = SMBTreeConnectAndX_Data(flags=self.__flags2)
treeConnect['Parameters']['PasswordLength'] = len(password)
treeConnect['Data']['Password'] = password
treeConnect['Data']['Path'] = path
treeConnect['Data']['Service'] = service
if self.__flags2 & SMB.FLAGS2_UNICODE:
treeConnect['Data']['Pad'] = 0x0
smb.addCommand(treeConnect)
# filename = "\PIPE\epmapper"
# ntCreate = SMBCommand(SMB.SMB_COM_NT_CREATE_ANDX)
# ntCreate['Parameters'] = SMBNtCreateAndX_Parameters()
# ntCreate['Data'] = SMBNtCreateAndX_Data()
# ntCreate['Parameters']['FileNameLength'] = len(filename)
# ntCreate['Parameters']['CreateFlags'] = 0
# ntCreate['Parameters']['AccessMask'] = 0x3
# ntCreate['Parameters']['CreateOptions'] = 0x0
# ntCreate['Data']['FileName'] = filename
# smb.addCommand(ntCreate)
self.sendSMB(smb)
while 1:
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_TREE_CONNECT_ANDX):
# XXX Here we are ignoring the rest of the response
self.tid = smb['Tid']
return self.tid
self.tid = smb['Tid']
return self.tid
# backwars compatibility
connect_tree = tree_connect_andx
@staticmethod
def getDialect():
return SMB_DIALECT
def get_server_name(self):
#return self._dialects_data['ServerName']
return self.__server_name
def get_session_key(self):
return self._SigningSessionKey
def set_session_key(self, key):
self._SigningSessionKey = key
def get_encryption_key(self):
if 'Challenge' in self._dialects_data.fields:
return self._dialects_data['Challenge']
else:
return None
def get_server_time(self):
timestamp = self._dialects_parameters['HighDateTime']
timestamp <<= 32
timestamp |= self._dialects_parameters['LowDateTime']
timestamp -= 116444736000000000
timestamp /= 10000000
d = datetime.datetime.utcfromtimestamp(timestamp)
return d.strftime("%a, %d %b %Y %H:%M:%S GMT")
def disconnect_tree(self, tid):
smb = NewSMBPacket()
smb['Tid'] = tid
smb.addCommand(SMBCommand(SMB.SMB_COM_TREE_DISCONNECT))
self.sendSMB(smb)
self.recvSMB()
def open(self, tid, filename, open_mode, desired_access):
filename = string.replace(filename,'/', '\\')
filename = filename.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else filename
smb = NewSMBPacket()
smb['Tid'] = tid
openFile = SMBCommand(SMB.SMB_COM_OPEN)
openFile['Parameters'] = SMBOpen_Parameters()
openFile['Parameters']['DesiredAccess'] = desired_access
openFile['Parameters']['OpenMode'] = open_mode
openFile['Parameters']['SearchAttributes'] = ATTR_READONLY | ATTR_HIDDEN | ATTR_ARCHIVE
openFile['Data'] = SMBOpen_Data(flags=self.__flags2)
openFile['Data']['FileName'] = filename
if self.__flags2 & SMB.FLAGS2_UNICODE:
openFile['Data']['Pad'] = 0x0
smb.addCommand(openFile)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_OPEN):
# XXX Here we are ignoring the rest of the response
openFileResponse = SMBCommand(smb['Data'][0])
openFileParameters = SMBOpenResponse_Parameters(openFileResponse['Parameters'])
return (
openFileParameters['Fid'],
openFileParameters['FileAttributes'],
openFileParameters['LastWriten'],
openFileParameters['FileSize'],
openFileParameters['GrantedAccess'],
)
def open_andx(self, tid, filename, open_mode, desired_access):
filename = string.replace(filename,'/', '\\')
filename = filename.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else filename
smb = NewSMBPacket()
smb['Tid'] = tid
openFile = SMBCommand(SMB.SMB_COM_OPEN_ANDX)
openFile['Parameters'] = SMBOpenAndX_Parameters()
openFile['Parameters']['DesiredAccess'] = desired_access
openFile['Parameters']['OpenMode'] = open_mode
openFile['Parameters']['SearchAttributes'] = ATTR_READONLY | ATTR_HIDDEN | ATTR_ARCHIVE
openFile['Data'] = SMBOpenAndX_Data(flags=self.__flags2)
openFile['Data']['FileName'] = filename
if self.__flags2 & SMB.FLAGS2_UNICODE:
openFile['Data']['Pad'] = 0x0
smb.addCommand(openFile)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_OPEN_ANDX):
# XXX Here we are ignoring the rest of the response
openFileResponse = SMBCommand(smb['Data'][0])
openFileParameters = SMBOpenAndXResponse_Parameters(openFileResponse['Parameters'])
return (
openFileParameters['Fid'],
openFileParameters['FileAttributes'],
openFileParameters['LastWriten'],
openFileParameters['FileSize'],
openFileParameters['GrantedAccess'],
openFileParameters['FileType'],
openFileParameters['IPCState'],
openFileParameters['Action'],
openFileParameters['ServerFid'],
)
def close(self, tid, fid):
smb = NewSMBPacket()
smb['Tid'] = tid
closeFile = SMBCommand(SMB.SMB_COM_CLOSE)
closeFile['Parameters'] = SMBClose_Parameters()
closeFile['Parameters']['FID'] = fid
smb.addCommand(closeFile)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_CLOSE):
return 1
return 0
def send_trans(self, tid, setup, name, param, data, noAnswer = 0):
smb = NewSMBPacket()
smb['Tid'] = tid
transCommand = SMBCommand(SMB.SMB_COM_TRANSACTION)
transCommand['Parameters'] = SMBTransaction_Parameters()
transCommand['Data'] = SMBTransaction_Data()
transCommand['Parameters']['Setup'] = setup
transCommand['Parameters']['TotalParameterCount'] = len(param)
transCommand['Parameters']['TotalDataCount'] = len(data)
transCommand['Parameters']['ParameterCount'] = len(param)
transCommand['Parameters']['ParameterOffset'] = 32+3+28+len(setup)+len(name)
transCommand['Parameters']['DataCount'] = len(data)
transCommand['Parameters']['DataOffset'] = transCommand['Parameters']['ParameterOffset'] + len(param)
transCommand['Data']['Name'] = name
transCommand['Data']['Trans_Parameters'] = param
transCommand['Data']['Trans_Data'] = data
if noAnswer:
transCommand['Parameters']['Flags'] = TRANS_NO_RESPONSE
smb.addCommand(transCommand)
self.sendSMB(smb)
def send_trans2(self, tid, setup, name, param, data):
smb = NewSMBPacket()
smb['Tid'] = tid
command = pack('<H', setup)
transCommand = SMBCommand(SMB.SMB_COM_TRANSACTION2)
transCommand['Parameters'] = SMBTransaction2_Parameters()
transCommand['Parameters']['MaxDataCount'] = self._dialects_parameters['MaxBufferSize']
transCommand['Data'] = SMBTransaction2_Data()
transCommand['Parameters']['Setup'] = command
transCommand['Parameters']['TotalParameterCount'] = len(param)
transCommand['Parameters']['TotalDataCount'] = len(data)
if len(param) > 0:
padLen = (4 - (32+2+28 + len(command)) % 4 ) % 4
padBytes = '\xFF' * padLen
transCommand['Data']['Pad1'] = padBytes
else:
transCommand['Data']['Pad1'] = ''
padLen = 0
transCommand['Parameters']['ParameterCount'] = len(param)
transCommand['Parameters']['ParameterOffset'] = 32+2+28+len(command)+len(name) + padLen
if len(data) > 0:
pad2Len = (4 - (32+2+28 + len(command) + padLen + len(param)) % 4) % 4
transCommand['Data']['Pad2'] = '\xFF' * pad2Len
else:
transCommand['Data']['Pad2'] = ''
pad2Len = 0
transCommand['Parameters']['DataCount'] = len(data)
transCommand['Parameters']['DataOffset'] = transCommand['Parameters']['ParameterOffset'] + len(param) + pad2Len
transCommand['Data']['Name'] = name
transCommand['Data']['Trans_Parameters'] = param
transCommand['Data']['Trans_Data'] = data
smb.addCommand(transCommand)
self.sendSMB(smb)
def query_file_info(self, tid, fid, fileInfoClass = SMB_QUERY_FILE_STANDARD_INFO):
self.send_trans2(tid, SMB.TRANS2_QUERY_FILE_INFORMATION, '\x00', pack('<HH', fid, fileInfoClass), '')
resp = self.recvSMB()
if resp.isValidAnswer(SMB.SMB_COM_TRANSACTION2):
trans2Response = SMBCommand(resp['Data'][0])
trans2Parameters = SMBTransaction2Response_Parameters(trans2Response['Parameters'])
# Remove Potential Prefix Padding
return trans2Response['Data'][-trans2Parameters['TotalDataCount']:]
def __nonraw_retr_file(self, tid, fid, offset, datasize, callback):
if (self._dialects_parameters['Capabilities'] & SMB.CAP_LARGE_READX) and self._SignatureEnabled is False:
max_buf_size = 65000
else:
max_buf_size = self._dialects_parameters['MaxBufferSize'] & ~0x3ff # Read in multiple KB blocks
read_offset = offset
while read_offset < datasize:
data = self.read_andx(tid, fid, read_offset, max_buf_size)
callback(data)
read_offset += len(data)
def __nonraw_stor_file(self, tid, fid, offset, datasize, callback):
if (self._dialects_parameters['Capabilities'] & SMB.CAP_LARGE_WRITEX) and self._SignatureEnabled is False:
max_buf_size = 65000
else:
max_buf_size = self._dialects_parameters['MaxBufferSize'] & ~0x3ff # Write in multiple KB blocks
write_offset = offset
while 1:
data = callback(max_buf_size)
if not data:
break
smb = self.write_andx(tid,fid,data, write_offset)
writeResponse = SMBCommand(smb['Data'][0])
writeResponseParameters = SMBWriteAndXResponse_Parameters(writeResponse['Parameters'])
write_offset += writeResponseParameters['Count']
def get_server_domain(self):
return self.__server_domain
def get_server_dns_domain_name(self):
return self.__server_dns_domain_name
def get_server_os(self):
return self.__server_os
def get_server_os_major(self):
return self.__server_os_major
def get_server_os_minor(self):
return self.__server_os_minor
def get_server_os_build(self):
return self.__server_os_build
def set_server_os(self, os):
self.__server_os = os
def get_server_lanman(self):
return self.__server_lanman
def is_login_required(self):
# Login is required if share mode is user.
# Otherwise only public services or services in share mode
# are allowed.
return (self._dialects_parameters['SecurityMode'] & SMB.SECURITY_SHARE_MASK) == SMB.SECURITY_SHARE_USER
def is_signing_required(self):
return self._SignatureRequired
def get_ntlmv1_response(self, key):
challenge = self._dialects_data['Challenge']
return ntlm.get_ntlmv1_response(key, challenge)
def kerberos_login(self, user, password, domain = '', lmhash = '', nthash = '', aesKey = '', kdcHost = '', TGT=None, TGS=None):
# Importing down here so pyasn1 is not required if kerberos is not used.
from impacket.krb5.asn1 import AP_REQ, Authenticator, TGS_REP, seq_set
from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS
from impacket.krb5 import constants
from impacket.krb5.types import Principal, KerberosTime, Ticket
from pyasn1.codec.der import decoder, encoder
import datetime
# login feature does not support unicode
# disable it if enabled
flags2 = self.__flags2
if flags2 & SMB.FLAGS2_UNICODE:
self.__flags2 = flags2 & (flags2 ^ SMB.FLAGS2_UNICODE)
# If TGT or TGS are specified, they are in the form of:
# TGS['KDC_REP'] = the response from the server
# TGS['cipher'] = the cipher used
# TGS['sessionKey'] = the sessionKey
# If we have hashes, normalize them
if lmhash != '' or nthash != '':
if len(lmhash) % 2: lmhash = '0%s' % lmhash
if len(nthash) % 2: nthash = '0%s' % nthash
try: # just in case they were converted already
lmhash = a2b_hex(lmhash)
nthash = a2b_hex(nthash)
except:
pass
self.__userName = user
self.__password = password
self.__domain = domain
self.__lmhash = lmhash
self.__nthash = nthash
self.__aesKey = aesKey
self.__kdc = kdcHost
self.__TGT = TGT
self.__TGS = TGS
# First of all, we need to get a TGT for the user
userName = Principal(user, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
if TGT is None:
if TGS is None:
tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, password, domain, lmhash, nthash, aesKey, kdcHost)
else:
tgt = TGT['KDC_REP']
cipher = TGT['cipher']
sessionKey = TGT['sessionKey']
# Now that we have the TGT, we should ask for a TGS for cifs
if TGS is None:
serverName = Principal('cifs/%s' % self.__remote_name, type=constants.PrincipalNameType.NT_SRV_INST.value)
tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, domain, kdcHost, tgt, cipher, sessionKey)
else:
tgs = TGS['KDC_REP']
cipher = TGS['cipher']
sessionKey = TGS['sessionKey']
smb = NewSMBPacket()
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 61440
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE | SMB.CAP_LARGE_READX | SMB.CAP_LARGE_WRITEX
# Let's build a NegTokenInit with the NTLMSSP
# TODO: In the future we should be able to choose different providers
blob = SPNEGO_NegTokenInit()
# Kerberos v5 mech
blob['MechTypes'] = [TypesMech['MS KRB5 - Microsoft Kerberos 5']]
# Let's extract the ticket from the TGS
tgs = decoder.decode(tgs, asn1Spec = TGS_REP())[0]
ticket = Ticket()
ticket.from_asn1(tgs['ticket'])
# Now let's build the AP_REQ
apReq = AP_REQ()
apReq['pvno'] = 5
apReq['msg-type'] = int(constants.ApplicationTagNumbers.AP_REQ.value)
opts = list()
apReq['ap-options'] = constants.encodeFlags(opts)
seq_set(apReq,'ticket', ticket.to_asn1)
authenticator = Authenticator()
authenticator['authenticator-vno'] = 5
authenticator['crealm'] = domain
seq_set(authenticator, 'cname', userName.components_to_asn1)
now = datetime.datetime.utcnow()
authenticator['cusec'] = now.microsecond
authenticator['ctime'] = KerberosTime.to_asn1(now)
encodedAuthenticator = encoder.encode(authenticator)
# Key Usage 11
# AP-REQ Authenticator (includes application authenticator
# subkey), encrypted with the application session key
# (Section 5.5.1)
encryptedEncodedAuthenticator = cipher.encrypt(sessionKey, 11, encodedAuthenticator, None)
apReq['authenticator'] = None
apReq['authenticator']['etype'] = cipher.enctype
apReq['authenticator']['cipher'] = encryptedEncodedAuthenticator
blob['MechToken'] = encoder.encode(apReq)
sessionSetup['Parameters']['SecurityBlobLength'] = len(blob)
sessionSetup['Parameters'].getData()
sessionSetup['Data']['SecurityBlob'] = blob.getData()
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX):
# We will need to use this uid field for all future requests/responses
self._uid = smb['Uid']
# Now we have to extract the blob to continue the auth process
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndX_Extended_Response_Parameters(sessionResponse['Parameters'])
sessionData = SMBSessionSetupAndX_Extended_Response_Data(flags = smb['Flags2'])
sessionData['SecurityBlobLength'] = sessionParameters['SecurityBlobLength']
sessionData.fromString(sessionResponse['Data'])
self._action = sessionParameters['Action']
# If smb sign required, let's enable it for the rest of the connection
if self._dialects_parameters['SecurityMode'] & SMB.SECURITY_SIGNATURES_REQUIRED:
self._SigningSessionKey = sessionKey.contents
self._SignSequenceNumber = 2
self._SignatureEnabled = True
# restore unicode flag if needed
if flags2 & SMB.FLAGS2_UNICODE:
self.__flags2 |= SMB.FLAGS2_UNICODE
return 1
else:
raise Exception('Error: Could not login successfully')
def login_extended(self, user, password, domain = '', lmhash = '', nthash = '', use_ntlmv2 = True ):
# login feature does not support unicode
# disable it if enabled
flags2 = self.__flags2
if flags2 & SMB.FLAGS2_UNICODE:
self.__flags2 = flags2 & (flags2 ^ SMB.FLAGS2_UNICODE)
# Once everything's working we should join login methods into a single one
smb = NewSMBPacket()
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Extended_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Extended_Data()
sessionSetup['Parameters']['MaxBufferSize'] = 61440
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VcNumber'] = 1
sessionSetup['Parameters']['SessionKey'] = 0
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_EXTENDED_SECURITY | SMB.CAP_USE_NT_ERRORS | SMB.CAP_UNICODE | SMB.CAP_LARGE_READX | SMB.CAP_LARGE_WRITEX
# Let's build a NegTokenInit with the NTLMSSP
# TODO: In the future we should be able to choose different providers
blob = SPNEGO_NegTokenInit()
# NTLMSSP
blob['MechTypes'] = [TypesMech['NTLMSSP - Microsoft NTLM Security Support Provider']]
auth = ntlm.getNTLMSSPType1('','',self._SignatureRequired, use_ntlmv2 = use_ntlmv2)
blob['MechToken'] = str(auth)
sessionSetup['Parameters']['SecurityBlobLength'] = len(blob)
sessionSetup['Parameters'].getData()
sessionSetup['Data']['SecurityBlob'] = blob.getData()
# Fake Data here, don't want to get us fingerprinted
sessionSetup['Data']['NativeOS'] = 'Unix'
sessionSetup['Data']['NativeLanMan'] = 'Samba'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX):
# We will need to use this uid field for all future requests/responses
self._uid = smb['Uid']
# Now we have to extract the blob to continue the auth process
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndX_Extended_Response_Parameters(sessionResponse['Parameters'])
sessionData = SMBSessionSetupAndX_Extended_Response_Data(flags = smb['Flags2'])
sessionData['SecurityBlobLength'] = sessionParameters['SecurityBlobLength']
sessionData.fromString(sessionResponse['Data'])
respToken = SPNEGO_NegTokenResp(sessionData['SecurityBlob'])
# Let's parse some data and keep it to ourselves in case it is asked
ntlmChallenge = ntlm.NTLMAuthChallenge(respToken['ResponseToken'])
if ntlmChallenge['TargetInfoFields_len'] > 0:
av_pairs = ntlm.AV_PAIRS(ntlmChallenge['TargetInfoFields'][:ntlmChallenge['TargetInfoFields_len']])
if av_pairs[ntlm.NTLMSSP_AV_HOSTNAME] is not None:
try:
self.__server_name = av_pairs[ntlm.NTLMSSP_AV_HOSTNAME][1].decode('utf-16le')
except:
# For some reason, we couldn't decode Unicode here.. silently discard the operation
pass
if av_pairs[ntlm.NTLMSSP_AV_DOMAINNAME] is not None:
try:
if self.__server_name != av_pairs[ntlm.NTLMSSP_AV_DOMAINNAME][1].decode('utf-16le'):
self.__server_domain = av_pairs[ntlm.NTLMSSP_AV_DOMAINNAME][1].decode('utf-16le')
except:
# For some reason, we couldn't decode Unicode here.. silently discard the operation
pass
if av_pairs[ntlm.NTLMSSP_AV_DNS_DOMAINNAME] is not None:
try:
self.__server_dns_domain_name = av_pairs[ntlm.NTLMSSP_AV_DNS_DOMAINNAME][1].decode('utf-16le')
except:
# For some reason, we couldn't decode Unicode here.. silently discard the operation
pass
# Parse Version to know the target Operating system name. Not provided elsewhere anymore
if 'Version' in ntlmChallenge.fields:
version = ntlmChallenge['Version']
if len(version) >= 4:
self.__server_os_major, self.__server_os_minor, self.__server_os_build = unpack('<BBH',version[:4])
type3, exportedSessionKey = ntlm.getNTLMSSPType3(auth, respToken['ResponseToken'], user, password, domain, lmhash, nthash, use_ntlmv2 = use_ntlmv2)
if exportedSessionKey is not None:
self._SigningSessionKey = exportedSessionKey
smb = NewSMBPacket()
# Are we required to sign SMB? If so we do it, if not we skip it
if self._SignatureRequired:
smb['Flags2'] |= SMB.FLAGS2_SMB_SECURITY_SIGNATURE
respToken2 = SPNEGO_NegTokenResp()
respToken2['ResponseToken'] = str(type3)
# Reusing the previous structure
sessionSetup['Parameters']['SecurityBlobLength'] = len(respToken2)
sessionSetup['Data']['SecurityBlob'] = respToken2.getData()
# Storing some info for later use
self.__server_os = sessionData['NativeOS']
self.__server_lanman = sessionData['NativeLanMan']
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
self._uid = 0
if smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX):
self._uid = smb['Uid']
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndXResponse_Parameters(sessionResponse['Parameters'])
self._action = sessionParameters['Action']
# If smb sign required, let's enable it for the rest of the connection
if self._dialects_parameters['SecurityMode'] & SMB.SECURITY_SIGNATURES_REQUIRED:
self._SignSequenceNumber = 2
self._SignatureEnabled = True
# restore unicode flag if needed
if flags2 & SMB.FLAGS2_UNICODE:
self.__flags2 |= SMB.FLAGS2_UNICODE
return 1
else:
raise Exception('Error: Could not login successfully')
def getCredentials(self):
return (
self.__userName,
self.__password,
self.__domain,
self.__lmhash,
self.__nthash,
self.__aesKey,
self.__TGT,
self.__TGS)
def getIOCapabilities(self):
res = dict()
if (self._dialects_parameters['Capabilities'] & SMB.CAP_LARGE_READX) and self._SignatureEnabled is False:
max_size = 65000
else:
max_size = self._dialects_parameters['MaxBufferSize'] # Read in multiple KB blocks
res['MaxReadSize'] = max_size
res['MaxWriteSize'] = max_size
return res
def login(self, user, password, domain = '', lmhash = '', nthash = '', ntlm_fallback = True):
# If we have hashes, normalize them
if lmhash != '' or nthash != '':
if len(lmhash) % 2: lmhash = '0%s' % lmhash
if len(nthash) % 2: nthash = '0%s' % nthash
try: # just in case they were converted already
lmhash = a2b_hex(lmhash)
nthash = a2b_hex(nthash)
except:
pass
self.__userName = user
self.__password = password
self.__domain = domain
self.__lmhash = lmhash
self.__nthash = nthash
self.__aesKey = ''
self.__TGT = None
self.__TGS = None
if self._dialects_parameters['Capabilities'] & SMB.CAP_EXTENDED_SECURITY:
try:
self.login_extended(user, password, domain, lmhash, nthash, use_ntlmv2 = True)
except:
# If the target OS is Windows 5.0 or Samba, let's try using NTLMv1
if ntlm_fallback and ((self.get_server_lanman().find('Windows 2000') != -1) or (self.get_server_lanman().find('Samba') != -1)):
self.login_extended(user, password, domain, lmhash, nthash, use_ntlmv2 = False)
self.__isNTLMv2 = False
else:
raise
elif ntlm_fallback:
self.login_standard(user, password, domain, lmhash, nthash)
self.__isNTLMv2 = False
else:
raise SessionError('Cannot authenticate against target, enable ntlm_fallback')
def login_standard(self, user, password, domain = '', lmhash = '', nthash = ''):
# login feature does not support unicode
# disable it if enabled
flags2 = self.__flags2
if flags2 & SMB.FLAGS2_UNICODE:
self.__flags2 = flags2 & (flags2 ^ SMB.FLAGS2_UNICODE)
# Only supports NTLMv1
# Password is only encrypted if the server passed us an "encryption key" during protocol dialect negotiation
if self._dialects_parameters['ChallengeLength'] > 0:
if lmhash != '' or nthash != '':
pwd_ansi = self.get_ntlmv1_response(lmhash)
pwd_unicode = self.get_ntlmv1_response(nthash)
elif password:
lmhash = ntlm.compute_lmhash(password)
nthash = ntlm.compute_nthash(password)
pwd_ansi = self.get_ntlmv1_response(lmhash)
pwd_unicode = self.get_ntlmv1_response(nthash)
else: # NULL SESSION
pwd_ansi = ''
pwd_unicode = ''
else:
pwd_ansi = password
pwd_unicode = ''
smb = NewSMBPacket()
sessionSetup = SMBCommand(SMB.SMB_COM_SESSION_SETUP_ANDX)
sessionSetup['Parameters'] = SMBSessionSetupAndX_Parameters()
sessionSetup['Data'] = SMBSessionSetupAndX_Data()
sessionSetup['Parameters']['MaxBuffer'] = 61440
sessionSetup['Parameters']['MaxMpxCount'] = 2
sessionSetup['Parameters']['VCNumber'] = os.getpid()
sessionSetup['Parameters']['SessionKey'] = self._dialects_parameters['SessionKey']
sessionSetup['Parameters']['AnsiPwdLength'] = len(pwd_ansi)
sessionSetup['Parameters']['UnicodePwdLength'] = len(pwd_unicode)
sessionSetup['Parameters']['Capabilities'] = SMB.CAP_RAW_MODE | SMB.CAP_USE_NT_ERRORS | SMB.CAP_LARGE_READX | SMB.CAP_LARGE_WRITEX
sessionSetup['Data']['AnsiPwd'] = pwd_ansi
sessionSetup['Data']['UnicodePwd'] = pwd_unicode
sessionSetup['Data']['Account'] = str(user)
sessionSetup['Data']['PrimaryDomain'] = str(domain)
sessionSetup['Data']['NativeOS'] = str(os.name)
sessionSetup['Data']['NativeLanMan'] = 'pysmb'
smb.addCommand(sessionSetup)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_SESSION_SETUP_ANDX):
# We will need to use this uid field for all future requests/responses
self._uid = smb['Uid']
sessionResponse = SMBCommand(smb['Data'][0])
sessionParameters = SMBSessionSetupAndXResponse_Parameters(sessionResponse['Parameters'])
sessionData = SMBSessionSetupAndXResponse_Data(flags = smb['Flags2'], data = sessionResponse['Data'])
self._action = sessionParameters['Action']
# Still gotta figure out how to do this with no EXTENDED_SECURITY
if sessionParameters['Action'] & SMB_SETUP_USE_LANMAN_KEY == 0:
self._SigningChallengeResponse = sessionSetup['Data']['UnicodePwd']
self._SigningSessionKey = nthash
else:
self._SigningChallengeResponse = sessionSetup['Data']['AnsiPwd']
self._SigningSessionKey = lmhash
#self._SignSequenceNumber = 1
#self.checkSignSMB(smb, self._SigningSessionKey ,self._SigningChallengeResponse)
#self._SignatureEnabled = True
self.__server_os = sessionData['NativeOS']
self.__server_lanman = sessionData['NativeLanMan']
self.__server_domain = sessionData['PrimaryDomain']
# restore unicode flag if needed
if flags2 & SMB.FLAGS2_UNICODE:
self.__flags2 |= SMB.FLAGS2_UNICODE
return 1
else: raise Exception('Error: Could not login successfully')
def waitNamedPipe(self, tid, pipe, timeout = 5, noAnswer = 0):
smb = NewSMBPacket()
smb['Tid'] = tid
transCommand = SMBCommand(SMB.SMB_COM_TRANSACTION)
transCommand['Parameters'] = SMBTransaction_Parameters()
transCommand['Data'] = SMBTransaction_Data()
setup = '\x53\x00\x00\x00'
name = '\\PIPE%s\x00' % pipe
transCommand['Parameters']['Setup'] = setup
transCommand['Parameters']['TotalParameterCount'] = 0
transCommand['Parameters']['TotalDataCount'] = 0
transCommand['Parameters']['MaxParameterCount'] = 0
transCommand['Parameters']['MaxDataCount'] = 0
transCommand['Parameters']['Timeout'] = timeout * 1000
transCommand['Parameters']['ParameterCount'] = 0
transCommand['Parameters']['ParameterOffset'] = 32+3+28+len(setup)+len(name)
transCommand['Parameters']['DataCount'] = 0
transCommand['Parameters']['DataOffset'] = 0
transCommand['Data']['Name'] = name
transCommand['Data']['Trans_Parameters'] = ''
transCommand['Data']['Trans_Data'] = ''
if noAnswer:
transCommand['Parameters']['Flags'] = TRANS_NO_RESPONSE
smb.addCommand(transCommand)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_TRANSACTION):
return 1
return 0
def read(self, tid, fid, offset=0, max_size = None, wait_answer=1):
if not max_size:
max_size = self._dialects_parameters['MaxBufferSize'] # Read in multiple KB blocks
# max_size is not working, because although it would, the server returns an error (More data avail)
smb = NewSMBPacket()
smb['Tid'] = tid
read = SMBCommand(SMB.SMB_COM_READ)
read['Parameters'] = SMBRead_Parameters()
read['Parameters']['Fid'] = fid
read['Parameters']['Offset'] = offset
read['Parameters']['Count'] = max_size
smb.addCommand(read)
if wait_answer:
while 1:
self.sendSMB(smb)
ans = self.recvSMB()
if ans.isValidAnswer(SMB.SMB_COM_READ):
readResponse = SMBCommand(ans['Data'][0])
readData = SMBReadResponse_Data(readResponse['Data'])
return readData['Data']
return None
def read_andx(self, tid, fid, offset=0, max_size = None, wait_answer=1, smb_packet=None):
if not max_size:
if (self._dialects_parameters['Capabilities'] & SMB.CAP_LARGE_READX) and self._SignatureEnabled is False:
max_size = 65000
else:
max_size = self._dialects_parameters['MaxBufferSize'] # Read in multiple KB blocks
# max_size is not working, because although it would, the server returns an error (More data avail)
if smb_packet is None:
smb = NewSMBPacket()
smb['Tid'] = tid
readAndX = SMBCommand(SMB.SMB_COM_READ_ANDX)
readAndX['Parameters'] = SMBReadAndX_Parameters()
readAndX['Parameters']['Fid'] = fid
readAndX['Parameters']['Offset'] = offset
readAndX['Parameters']['MaxCount'] = max_size
smb.addCommand(readAndX)
else:
smb = smb_packet
if wait_answer:
answer = ''
while 1:
self.sendSMB(smb)
ans = self.recvSMB()
if ans.isValidAnswer(SMB.SMB_COM_READ_ANDX):
# XXX Here we are only using a few fields from the response
readAndXResponse = SMBCommand(ans['Data'][0])
readAndXParameters = SMBReadAndXResponse_Parameters(readAndXResponse['Parameters'])
offset = readAndXParameters['DataOffset']
count = readAndXParameters['DataCount']+0x10000*readAndXParameters['DataCount_Hi']
answer += str(ans)[offset:offset+count]
if not ans.isMoreData():
return answer
max_size = min(max_size, readAndXParameters['Remaining'])
readAndX['Parameters']['Offset'] += count # XXX Offset is not important (apparently)
else:
self.sendSMB(smb)
ans = self.recvSMB()
try:
if ans.isValidAnswer(SMB.SMB_COM_READ_ANDX):
return ans
else:
return None
except:
return ans
return None
def read_raw(self, tid, fid, offset=0, max_size = None, wait_answer=1):
if not max_size:
max_size = self._dialects_parameters['MaxBufferSize'] # Read in multiple KB blocks
# max_size is not working, because although it would, the server returns an error (More data avail)
smb = NewSMBPacket()
smb['Tid'] = tid
readRaw = SMBCommand(SMB.SMB_COM_READ_RAW)
readRaw['Parameters'] = SMBReadRaw_Parameters()
readRaw['Parameters']['Fid'] = fid
readRaw['Parameters']['Offset'] = offset
readRaw['Parameters']['MaxCount'] = max_size
smb.addCommand(readRaw)
self.sendSMB(smb)
if wait_answer:
data = self._sess.recv_packet(self.__timeout).get_trailer()
if not data:
# If there is no data it means there was an error
data = self.read_andx(tid, fid, offset, max_size)
return data
return None
def write(self,tid,fid,data, offset = 0, wait_answer=1):
smb = NewSMBPacket()
smb['Tid'] = tid
write = SMBCommand(SMB.SMB_COM_WRITE)
write['Parameters'] = SMBWrite_Parameters()
write['Data'] = SMBWrite_Data()
write['Parameters']['Fid'] = fid
write['Parameters']['Count'] = len(data)
write['Parameters']['Offset'] = offset
write['Parameters']['Remaining'] = len(data)
write['Data']['Data'] = data
smb.addCommand(write)
self.sendSMB(smb)
if wait_answer:
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_WRITE):
return smb
return None
def write_andx(self,tid,fid,data, offset = 0, wait_answer=1, write_pipe_mode = False, smb_packet=None):
if smb_packet is None:
smb = NewSMBPacket()
smb['Tid'] = tid
writeAndX = SMBCommand(SMB.SMB_COM_WRITE_ANDX)
smb.addCommand(writeAndX)
writeAndX['Parameters'] = SMBWriteAndX_Parameters()
writeAndX['Parameters']['Fid'] = fid
writeAndX['Parameters']['Offset'] = offset
writeAndX['Parameters']['WriteMode'] = 8
writeAndX['Parameters']['Remaining'] = len(data)
writeAndX['Parameters']['DataLength'] = len(data)
writeAndX['Parameters']['DataOffset'] = len(smb) # this length already includes the parameter
writeAndX['Data'] = data
if write_pipe_mode is True:
# First of all we gotta know what the MaxBuffSize is
maxBuffSize = self._dialects_parameters['MaxBufferSize']
if len(data) > maxBuffSize:
chunks_size = maxBuffSize - 60
writeAndX['Parameters']['WriteMode'] = 0x0c
sendData = '\xff\xff' + data
totalLen = len(sendData)
writeAndX['Parameters']['DataLength'] = chunks_size
writeAndX['Parameters']['Remaining'] = totalLen-2
writeAndX['Data'] = sendData[:chunks_size]
self.sendSMB(smb)
if wait_answer:
smbResp = self.recvSMB()
smbResp.isValidAnswer(SMB.SMB_COM_WRITE_ANDX)
alreadySent = chunks_size
sendData = sendData[chunks_size:]
while alreadySent < totalLen:
writeAndX['Parameters']['WriteMode'] = 0x04
writeAndX['Parameters']['DataLength'] = len(sendData[:chunks_size])
writeAndX['Data'] = sendData[:chunks_size]
self.sendSMB(smb)
if wait_answer:
smbResp = self.recvSMB()
smbResp.isValidAnswer(SMB.SMB_COM_WRITE_ANDX)
alreadySent += writeAndX['Parameters']['DataLength']
sendData = sendData[chunks_size:]
return smbResp
else:
smb = smb_packet
self.sendSMB(smb)
if wait_answer:
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_WRITE_ANDX):
return smb
return None
def write_raw(self,tid,fid,data, offset = 0, wait_answer=1):
LOG.warning("[MS-CIFS] This command was introduced in the CorePlus dialect, but is often listed as part of the LAN Manager 1.0 dialect.This command has been deprecated.Clients SHOULD use SMB_COM_WRITE_ANDX")
smb = NewSMBPacket()
smb['Tid'] = tid
writeRaw = SMBCommand(SMB.SMB_COM_WRITE_RAW)
writeRaw['Parameters'] = SMBWriteRaw_Parameters()
writeRaw['Parameters']['Fid'] = fid
writeRaw['Parameters']['Offset'] = offset
writeRaw['Parameters']['Count'] = len(data)
writeRaw['Parameters']['DataLength'] = 0
writeRaw['Parameters']['DataOffset'] = 0
smb.addCommand(writeRaw)
self.sendSMB(smb)
self._sess.send_packet(data)
if wait_answer:
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_WRITE_RAW):
return smb
return None
def TransactNamedPipe(self, tid, fid, data = '', noAnswer = 0, waitAnswer = 1, offset = 0):
self.send_trans(tid,pack('<HH', 0x26, fid),'\\PIPE\\\x00','',data, noAnswer = noAnswer)
if noAnswer or not waitAnswer:
return
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_TRANSACTION):
transResponse = SMBCommand(smb['Data'][0])
transParameters = SMBTransactionResponse_Parameters(transResponse['Parameters'])
return transResponse['Data'][-transParameters['TotalDataCount']:] # Remove Potential Prefix Padding
return None
def TransactNamedPipeRecv(self):
s = self.recvSMB()
if s.isValidAnswer(SMB.SMB_COM_TRANSACTION):
transResponse = SMBCommand(s['Data'][0])
transParameters = SMBTransactionResponse_Parameters(transResponse['Parameters'])
return transResponse['Data'][-transParameters['TotalDataCount']:] # Remove Potential Prefix Padding
return None
def nt_create_andx(self,tid,filename, smb_packet=None, cmd = None, shareAccessMode = FILE_SHARE_READ | FILE_SHARE_WRITE, disposition = FILE_OPEN, accessMask = 0x2019f):
filename = filename.replace('/', '\\')
filename = filename.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else filename
if smb_packet is None:
smb = NewSMBPacket()
smb['Tid'] = tid
else:
smb = smb_packet
if cmd is None:
ntCreate = SMBCommand(SMB.SMB_COM_NT_CREATE_ANDX)
ntCreate['Parameters'] = SMBNtCreateAndX_Parameters()
ntCreate['Data'] = SMBNtCreateAndX_Data(flags=self.__flags2)
ntCreate['Parameters']['FileNameLength'] = len(filename)
ntCreate['Parameters']['CreateFlags'] = 0x16
ntCreate['Parameters']['AccessMask'] = accessMask
ntCreate['Parameters']['CreateOptions'] = 0x40
ntCreate['Parameters']['ShareAccess'] = shareAccessMode
ntCreate['Parameters']['Disposition'] = disposition
ntCreate['Data']['FileName'] = filename
if self.__flags2 & SMB.FLAGS2_UNICODE:
ntCreate['Data']['Pad'] = 0x0
else:
ntCreate = cmd
smb.addCommand(ntCreate)
self.sendSMB(smb)
while 1:
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_NT_CREATE_ANDX):
# XXX Here we are ignoring the rest of the response
ntCreateResponse = SMBCommand(smb['Data'][0])
ntCreateParameters = SMBNtCreateAndXResponse_Parameters(ntCreateResponse['Parameters'])
self.fid = ntCreateParameters['Fid']
return ntCreateParameters['Fid']
def logoff(self):
smb = NewSMBPacket()
logOff = SMBCommand(SMB.SMB_COM_LOGOFF_ANDX)
logOff['Parameters'] = SMBLogOffAndX()
smb.addCommand(logOff)
self.sendSMB(smb)
self.recvSMB()
# Let's clear some fields so you can login again under the same session
self._uid = 0
def list_path(self, service, path = '*', password = None):
path = path.replace('/', '\\')
path = path.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else path
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
findFirstParameter = SMBFindFirst2_Parameters()
findFirstParameter['SearchAttributes'] = SMB_FILE_ATTRIBUTE_DIRECTORY | SMB_FILE_ATTRIBUTE_HIDDEN | \
SMB_FILE_ATTRIBUTE_SYSTEM | SMB_FILE_ATTRIBUTE_READONLY | \
SMB_FILE_ATTRIBUTE_ARCHIVE
findFirstParameter['SearchCount'] = 512
findFirstParameter['Flags'] = SMB_FIND_RETURN_RESUME_KEYS | SMB_FIND_CLOSE_AT_EOS
findFirstParameter['InformationLevel'] = SMB_FIND_FILE_BOTH_DIRECTORY_INFO
findFirstParameter['SearchStorageType'] = 0
findFirstParameter['FileName'] = path + ('\x00\x00' if self.__flags2 & SMB.FLAGS2_UNICODE else '\x00')
self.send_trans2(tid, SMB.TRANS2_FIND_FIRST2, '\x00', findFirstParameter, '')
files = [ ]
totalDataCount = 1
findData = ''
findFirst2ParameterBlock = ''
while len(findData) < totalDataCount:
resp = self.recvSMB()
if resp.isValidAnswer(SMB.SMB_COM_TRANSACTION2):
trans2Response = SMBCommand(resp['Data'][0])
trans2Parameters = SMBTransaction2Response_Parameters(trans2Response['Parameters'])
totalDataCount = trans2Parameters['TotalDataCount']
findFirst2ParameterBlock += trans2Response['Data'][trans2Parameters['ParameterOffset']-55:][:trans2Parameters['ParameterCount']]
findData += trans2Response['Data'][trans2Parameters['DataOffset']-55:]
findParameterBlock = SMBFindFirst2Response_Parameters(findFirst2ParameterBlock)
# Save the SID for resume operations
sid = findParameterBlock['SID']
while True:
record = SMBFindFileBothDirectoryInfo(data = findData)
shortname = record['ShortName'].decode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else record['ShortName']
filename = record['FileName'].decode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else record['FileName']
fileRecord = SharedFile(record['CreationTime'], record['LastAccessTime'], record['LastChangeTime'],
record['EndOfFile'], record['AllocationSize'], record['ExtFileAttributes'],
shortname, filename)
files.append(fileRecord)
if record['NextEntryOffset'] > 0 and len(findData[record['NextEntryOffset']:]) > 0:
findData = findData[record['NextEntryOffset']:]
else:
# More data to search?
if findParameterBlock['EndOfSearch'] == 0:
resume_filename = record['FileName']
findNextParameter = SMBFindNext2_Parameters()
findNextParameter['SID'] = sid
findNextParameter['SearchCount'] = 1024
findNextParameter['InformationLevel'] = SMB_FIND_FILE_BOTH_DIRECTORY_INFO
findNextParameter['ResumeKey'] = 0
findNextParameter['Flags'] = SMB_FIND_RETURN_RESUME_KEYS | SMB_FIND_CLOSE_AT_EOS
findNextParameter['FileName'] = resume_filename + ('\x00\x00' if self.__flags2 & SMB.FLAGS2_UNICODE else '\x00')
self.send_trans2(tid, SMB.TRANS2_FIND_NEXT2, '\x00', findNextParameter, '')
findData = ''
findNext2ParameterBlock = ''
totalDataCount = 1
while len(findData) < totalDataCount:
resp = self.recvSMB()
if resp.isValidAnswer(SMB.SMB_COM_TRANSACTION2):
trans2Response = SMBCommand(resp['Data'][0])
trans2Parameters = SMBTransaction2Response_Parameters(trans2Response['Parameters'])
totalDataCount = trans2Parameters['TotalDataCount']
findNext2ParameterBlock += trans2Response['Data'][trans2Parameters['ParameterOffset']-55:][:trans2Parameters['ParameterCount']]
findData += trans2Response['Data'][trans2Parameters['DataOffset']-55:]
findParameterBlock = SMBFindNext2Response_Parameters(findNext2ParameterBlock)
else:
break
finally:
self.disconnect_tree(tid)
return files
def retr_file(self, service, filename, callback, mode = FILE_OPEN, offset = 0, password = None, shareAccessMode = SMB_ACCESS_READ):
filename = string.replace(filename, '/', '\\')
fid = -1
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
fid = self.nt_create_andx(tid, filename, shareAccessMode = shareAccessMode, accessMask = 0x20089)
res = self.query_file_info(tid, fid)
datasize = SMBQueryFileStandardInfo(res)['EndOfFile']
self.__nonraw_retr_file(tid, fid, offset, datasize, callback)
finally:
if fid >= 0:
self.close(tid, fid)
self.disconnect_tree(tid)
def stor_file(self, service, filename, callback, mode = FILE_OVERWRITE_IF, offset = 0, password = None, shareAccessMode = SMB_ACCESS_WRITE):
filename = string.replace(filename, '/', '\\')
fid = -1
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
fid = self.nt_create_andx(tid, filename, shareAccessMode = shareAccessMode, disposition = mode )
self.__nonraw_stor_file(tid, fid, offset, 0, callback)
finally:
if fid >= 0:
self.close(tid, fid)
self.disconnect_tree(tid)
def stor_file_nonraw(self, service, filename, callback, mode = FILE_OVERWRITE_IF, offset = 0, password = None, shareAccessMode = SMB_ACCESS_WRITE ):
filename = string.replace(filename, '/', '\\')
fid = -1
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
fid = self.nt_create_andx(tid, filename, shareAccessMode = shareAccessMode, disposition = mode)
self.__nonraw_stor_file(tid, fid, offset, 0, callback)
finally:
if fid >= 0:
self.close(tid, fid)
self.disconnect_tree(tid)
def check_dir(self, service, path, password = None):
path = string.replace(path,'/', '\\')
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
smb = NewSMBPacket()
smb['Tid'] = tid
smb['Mid'] = 0
cmd = SMBCommand(SMB.SMB_COM_CHECK_DIRECTORY)
cmd['Parameters'] = ''
cmd['Data'] = SMBCheckDirectory_Data(flags = self.__flags2)
cmd['Data']['DirectoryName'] = path.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else path
smb.addCommand(cmd)
self.sendSMB(smb)
while 1:
s = self.recvSMB()
if s.isValidAnswer(SMB.SMB_COM_CHECK_DIRECTORY):
return
finally:
self.disconnect_tree(tid)
def remove(self, service, path, password = None):
path = string.replace(path,'/', '\\')
# Perform a list to ensure the path exists
self.list_path(service, path, password)
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
smb = NewSMBPacket()
smb['Tid'] = tid
smb['Mid'] = 0
cmd = SMBCommand(SMB.SMB_COM_DELETE)
cmd['Parameters'] = SMBDelete_Parameters()
cmd['Parameters']['SearchAttributes'] = ATTR_HIDDEN | ATTR_SYSTEM | ATTR_ARCHIVE
cmd['Data'] = SMBDelete_Data(flags = self.__flags2)
cmd['Data']['FileName'] = (path + '\x00').encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else (path + '\x00')
smb.addCommand(cmd)
self.sendSMB(smb)
while 1:
s = self.recvSMB()
if s.isValidAnswer(SMB.SMB_COM_DELETE):
return
finally:
self.disconnect_tree(tid)
def rmdir(self, service, path, password = None):
path = string.replace(path,'/', '\\')
# Check that the directory exists
self.check_dir(service, path, password)
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
path = path.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else path
smb = NewSMBPacket()
smb['Tid'] = tid
createDir = SMBCommand(SMB.SMB_COM_DELETE_DIRECTORY)
createDir['Data'] = SMBDeleteDirectory_Data(flags=self.__flags2)
createDir['Data']['DirectoryName'] = path
smb.addCommand(createDir)
self.sendSMB(smb)
while 1:
s = self.recvSMB()
if s.isValidAnswer(SMB.SMB_COM_DELETE_DIRECTORY):
return
finally:
self.disconnect_tree(tid)
def mkdir(self, service, path, password = None):
path = string.replace(path,'/', '\\')
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
path = path.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else path
smb = NewSMBPacket()
smb['Tid'] = tid
smb['Mid'] = 0
createDir = SMBCommand(SMB.SMB_COM_CREATE_DIRECTORY)
createDir['Data'] = SMBCreateDirectory_Data(flags=self.__flags2)
createDir['Data']['DirectoryName'] = path
smb.addCommand(createDir)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_CREATE_DIRECTORY):
return 1
return 0
finally:
self.disconnect_tree(tid)
def rename(self, service, old_path, new_path, password = None):
old_path = string.replace(old_path,'/', '\\')
new_path = string.replace(new_path,'/', '\\')
tid = self.tree_connect_andx('\\\\' + self.__remote_name + '\\' + service, password)
try:
smb = NewSMBPacket()
smb['Tid'] = tid
smb['Mid'] = 0
renameCmd = SMBCommand(SMB.SMB_COM_RENAME)
renameCmd['Parameters'] = SMBRename_Parameters()
renameCmd['Parameters']['SearchAttributes'] = ATTR_SYSTEM | ATTR_HIDDEN | ATTR_DIRECTORY
renameCmd['Data'] = SMBRename_Data(flags = self.__flags2)
renameCmd['Data']['OldFileName'] = old_path.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else old_path
renameCmd['Data']['NewFileName'] = new_path.encode('utf-16le') if self.__flags2 & SMB.FLAGS2_UNICODE else new_path
smb.addCommand(renameCmd)
self.sendSMB(smb)
smb = self.recvSMB()
if smb.isValidAnswer(SMB.SMB_COM_RENAME):
return 1
return 0
finally:
self.disconnect_tree(tid)
def writeFile(self, treeId, fileId, data, offset = 0):
if (self._dialects_parameters['Capabilities'] & SMB.CAP_LARGE_WRITEX) and self._SignatureEnabled is False:
max_buf_size = 65000
else:
max_buf_size = self._dialects_parameters['MaxBufferSize'] & ~0x3ff # Write in multiple KB blocks
write_offset = offset
while 1:
if len(data) == 0:
break
writeData = data[:max_buf_size]
data = data[max_buf_size:]
smb = self.write_andx(treeId,fileId,writeData, write_offset)
writeResponse = SMBCommand(smb['Data'][0])
writeResponseParameters = SMBWriteAndXResponse_Parameters(writeResponse['Parameters'])
write_offset += writeResponseParameters['Count']
def get_socket(self):
return self._sess.get_socket()
ERRDOS = { 1: 'Invalid function',
2: 'File not found',
3: 'Invalid directory',
4: 'Too many open files',
5: 'Access denied',
6: 'Invalid file handle. Please file a bug report.',
7: 'Memory control blocks destroyed',
8: 'Out of memory',
9: 'Invalid memory block address',
10: 'Invalid environment',
11: 'Invalid format',
12: 'Invalid open mode',
13: 'Invalid data',
15: 'Invalid drive',
16: 'Attempt to remove server\'s current directory',
17: 'Not the same device',
18: 'No files found',
32: 'Sharing mode conflicts detected',
33: 'Lock request conflicts detected',
80: 'File already exists'
}
ERRSRV = { 1: 'Non-specific error',
2: 'Bad password',
4: 'Access denied',
5: 'Invalid tid. Please file a bug report.',
6: 'Invalid network name',
7: 'Invalid device',
49: 'Print queue full',
50: 'Print queue full',
51: 'EOF on print queue dump',
52: 'Invalid print file handle',
64: 'Command not recognized. Please file a bug report.',
65: 'Internal server error',
67: 'Invalid path',
69: 'Invalid access permissions',
71: 'Invalid attribute mode',
81: 'Server is paused',
82: 'Not receiving messages',
83: 'No room to buffer messages',
87: 'Too many remote user names',
88: 'Operation timeout',
89: 'Out of resources',
91: 'Invalid user handle. Please file a bug report.',
250: 'Temporarily unable to support raw mode for transfer',
251: 'Temporarily unable to support raw mode for transfer',
252: 'Continue in MPX mode',
65535: 'Unsupported function'
}
ERRHRD = { 19: 'Media is write-protected',
20: 'Unknown unit',
21: 'Drive not ready',
22: 'Unknown command',
23: 'CRC error',
24: 'Bad request',
25: 'Seek error',
26: 'Unknown media type',
27: 'Sector not found',
28: 'Printer out of paper',
29: 'Write fault',
30: 'Read fault',
31: 'General failure',
32: 'Open conflicts with an existing open',
33: 'Invalid lock request',
34: 'Wrong disk in drive',
35: 'FCBs not available',
36: 'Sharing buffer exceeded'
}
| apache-2.0 | 3,336,921,338,414,145,000 | 35.990485 | 215 | 0.553244 | false |
southpaw94/MachineLearning | HPTuning/SVM_X_Validation.py | 1 | 1287 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
# This program introduces validation curves, which are essential
# in reducing over or under fitting of the learning algorithm.
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases'+\
'/breast-cancer-wisconsin/wdbc.data', header=None)
X = df.loc[:, 2:].values
y = df.loc[:, 1].values
# All malignant tumors will be represented as class 1, otherwise, class 0
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, \
test_size=0.20, random_state=1)
gs = GridSearchCV( \
estimator = DecisionTreeClassifier(random_state = 0), \
param_grid = [ \
{'max_depth': [1, 2, 3, 4, 5, 6, 7, None]} \
], \
scoring = 'accuracy', \
cv = 5)
scores = cross_val_score(gs, \
X_train, \
y_train, \
scoring = 'accuracy', \
cv = 5)
print('CV accuracy: %.3f +/- %.3f' % ( \
np.mean(scores), np.std(scores)))
| gpl-2.0 | 6,369,555,869,987,812,000 | 30.390244 | 78 | 0.656566 | false |
cloudbase/coriolis | coriolis/api/__init__.py | 1 | 5046 | # Copyright (c) 2013 OpenStack Foundation
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
WSGI middleware for OpenStack API controllers.
"""
from paste import urlmap
import routes
from oslo_log import log as logging
from oslo_service import wsgi as base_wsgi
from coriolis.api import wsgi
from coriolis import exception
from coriolis.i18n import _, _LW # noqa
LOG = logging.getLogger(__name__)
def root_app_factory(loader, global_conf, **local_conf):
return urlmap.urlmap_factory(loader, global_conf, **local_conf)
class APIMapper(routes.Mapper):
def routematch(self, url=None, environ=None):
if url is "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
def connect(self, *args, **kwargs):
# NOTE(inhye): Default the format part of a route to only accept json
# and xml so it doesn't eat all characters after a '.'
# in the url.
kwargs.setdefault('requirements', {})
if not kwargs['requirements'].get('format'):
kwargs['requirements']['format'] = 'json|xml'
return routes.Mapper.connect(self, *args, **kwargs)
class ProjectMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
if 'parent_resource' not in kwargs:
kwargs['path_prefix'] = '{project_id}/'
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
p_member)
routes.Mapper.resource(self,
member_name,
collection_name,
**kwargs)
class APIRouter(base_wsgi.Router):
"""Routes requests on the API to the appropriate controller and method."""
ExtensionManager = None # override in subclasses
@classmethod
def factory(cls, global_config, **local_config):
return cls()
def __init__(self, ext_mgr=None):
if ext_mgr is None:
if self.ExtensionManager:
ext_mgr = self.ExtensionManager()
else:
raise exception.CoriolisException(
_("Must specify an ExtensionManager class"))
mapper = ProjectMapper()
self.resources = {}
self._setup_routes(mapper, ext_mgr)
self._setup_ext_routes(mapper, ext_mgr)
self._setup_extensions(ext_mgr)
super(APIRouter, self).__init__(mapper)
def _setup_ext_routes(self, mapper, ext_mgr):
for resource in ext_mgr.get_resources():
LOG.debug('Extended resource: %s',
resource.collection)
wsgi_resource = wsgi.Resource(resource.controller)
self.resources[resource.collection] = wsgi_resource
kargs = dict(
controller=wsgi_resource,
collection=resource.collection_actions,
member=resource.member_actions)
if resource.parent:
kargs['parent_resource'] = resource.parent
mapper.resource(resource.collection, resource.collection, **kargs)
if resource.custom_routes_fn:
resource.custom_routes_fn(mapper, wsgi_resource)
def _setup_extensions(self, ext_mgr):
for extension in ext_mgr.get_controller_extensions():
collection = extension.collection
controller = extension.controller
if collection not in self.resources:
LOG.warning(_LW('Extension %(ext_name)s: Cannot extend '
'resource %(collection)s: No such resource'),
{'ext_name': extension.extension.name,
'collection': collection})
continue
LOG.debug('Extension %(ext_name)s extending resource: '
'%(collection)s',
{'ext_name': extension.extension.name,
'collection': collection})
resource = self.resources[collection]
resource.register_actions(controller)
resource.register_extensions(controller)
def _setup_routes(self, mapper, ext_mgr):
raise NotImplementedError
| agpl-3.0 | 3,846,645,010,667,522,000 | 35.832117 | 78 | 0.596512 | false |
NICTA/revrand | tests/test_btypes.py | 1 | 1393 | """Test revrand's bound and parameter types."""
from __future__ import division
import numpy as np
from scipy.stats import gamma
from revrand import Bound, Positive, Parameter
def test_bound():
b = Bound(1, 2)
assert b.lower == 1
assert b.upper == 2
assert b.check(1) is True
assert b.check(3) is False
assert b.clip(5) == 2
def test_positive():
b = Positive(2)
assert b.lower > 0
assert b.upper == 2
assert b.check(1) is True
assert b.check(-1) is False
assert b.clip(-3) == b.lower
def test_parameter():
# Test "Null" parameter
p = Parameter()
assert p.shape == (0,)
assert p.rvs() == []
assert p.has_value is False
assert p.is_random is False
# Test values
v = 1.
p = Parameter(v, Positive())
assert p.value == v
assert p.bounds.lower > 0
assert p.bounds.upper is None
assert p.rvs() == v
assert p.has_value is True
assert p.is_random is False
# Test distributions
p = Parameter(gamma(1), Positive())
assert np.shape(p.rvs()) == ()
assert p.has_value is True
assert p.is_random is True
p = Parameter(gamma(1), Positive(), shape=(2,))
assert np.shape(p.rvs()) == (2,)
assert Positive().check(p.rvs())
p = Parameter(gamma(1), Bound(1, 2), shape=(10, 5))
assert np.shape(p.rvs()) == (10, 5)
assert Bound(1, 2).check(p.rvs())
| apache-2.0 | 2,096,992,696,473,579,300 | 21.467742 | 55 | 0.601579 | false |
Aravinthu/odoo | addons/website/models/ir_http.py | 1 | 10023 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import traceback
import os
import unittest
import werkzeug
import werkzeug.routing
import werkzeug.utils
import odoo
from odoo import api, models
from odoo import SUPERUSER_ID
from odoo.http import request
from odoo.tools import config
from odoo.exceptions import QWebException
from odoo.tools.safe_eval import safe_eval
from odoo.osv.expression import FALSE_DOMAIN
from odoo.addons.http_routing.models.ir_http import ModelConverter
logger = logging.getLogger(__name__)
def sitemap_qs2dom(qs, route, field='name'):
""" Convert a query_string (can contains a path) to a domain"""
dom = []
if qs and qs.lower() not in route:
needles = qs.strip('/').split('/')
# needles will be altered and keep only element which one is not in route
# diff(from=['shop', 'product'], to=['shop', 'product', 'product']) => to=['product']
unittest.util.unorderable_list_difference(route.strip('/').split('/'), needles)
if len(needles) == 1:
dom = [(field, 'ilike', needles[0])]
else:
dom = FALSE_DOMAIN
return dom
def _guess_mimetype(ext=False, default=False):
exts = {
'.css': ['text/css', 'website.default_css'],
'.less': ['text/less', 'website.default_less'],
'.js': ['text/javascript', 'website.default_javascript'],
'.xml': ['text/xml', 'website.default_xml'],
'.csv': ['text/csv', 'website.default_csv'],
'.html': ['text/html', False],
}
if not default:
default = exts['.html']
return ext is not False and exts.get(ext, default) or exts
class Http(models.AbstractModel):
_inherit = 'ir.http'
@classmethod
def _get_converters(cls):
""" Get the converters list for custom url pattern werkzeug need to
match Rule. This override adds the website ones.
"""
return dict(
super(Http, cls)._get_converters(),
model=ModelConverter,
)
@classmethod
def _auth_method_public(cls):
""" If no user logged, set the public user of current website, or default
public user as request uid.
After this method `request.env` can be called, since the `request.uid` is
set. The `env` lazy property of `request` will be correct.
"""
if not request.session.uid:
env = api.Environment(request.cr, SUPERUSER_ID, request.context)
website = env['website'].get_current_website()
if website:
request.uid = website.user_id.id
if not request.uid:
super(Http, cls)._auth_method_public()
@classmethod
def _add_dispatch_parameters(cls, func):
if request.is_frontend:
context = dict(request.context)
if not context.get('tz'):
context['tz'] = request.session.get('geoip', {}).get('time_zone')
request.website = request.env['website'].get_current_website() # can use `request.env` since auth methods are called
context['website_id'] = request.website.id
super(Http, cls)._add_dispatch_parameters(func)
if request.is_frontend and request.routing_iteration == 1:
request.website = request.website.with_context(context)
@classmethod
def _get_languages(cls):
if getattr(request, 'website', False):
return request.website.language_ids
return super(Http, cls)._get_languages()
@classmethod
def _get_language_codes(cls):
if request.website:
return request.website._get_languages()
return super(Http, cls)._get_language_codes()
@classmethod
def _get_default_lang(cls):
if getattr(request, 'website', False):
return request.website.default_lang_id
return super(Http, cls)._get_default_lang()
@classmethod
def _serve_page(cls):
req_page = request.httprequest.path
domain = [('url', '=', req_page), '|', ('website_ids', 'in', request.website.id), ('website_ids', '=', False)]
if not request.website.is_publisher:
domain += [('is_visible', '=', True)]
mypage = request.env['website.page'].search(domain, limit=1)
_, ext = os.path.splitext(req_page)
if mypage:
return request.render(mypage.view_id.id, {
# 'path': req_page[1:],
'deletable': True,
'main_object': mypage,
}, mimetype=_guess_mimetype(ext)[0])
return False
@classmethod
def _serve_404(cls):
req_page = request.httprequest.path
return request.website.is_publisher() and request.render('website.page_404', {'path': req_page[1:]}) or False
@classmethod
def _serve_redirect(cls):
req_page = request.httprequest.path
domain = [
'|', ('website_id', '=', request.website.id), ('website_id', '=', False),
('url_from', '=', req_page)
]
return request.env['website.redirect'].search(domain, limit=1)
@classmethod
def _serve_fallback(cls, exception):
# serve attachment before
parent = super(Http, cls)._serve_fallback(exception)
if parent: # attachment
return parent
website_page = cls._serve_page()
if website_page:
return website_page
redirect = cls._serve_redirect()
if redirect:
return request.redirect(redirect.url_to, code=redirect.type)
return cls._serve_404()
@classmethod
def _handle_exception(cls, exception):
code = 500 # default code
is_website_request = bool(getattr(request, 'is_frontend', False) and getattr(request, 'website', False))
if not is_website_request:
# Don't touch non website requests exception handling
return super(Http, cls)._handle_exception(exception)
else:
try:
response = super(Http, cls)._handle_exception(exception)
if isinstance(response, Exception):
exception = response
else:
# if parent excplicitely returns a plain response, then we don't touch it
return response
except Exception as e:
if 'werkzeug' in config['dev_mode'] and (not isinstance(exception, QWebException) or not exception.qweb.get('cause')):
raise
exception = e
values = dict(
exception=exception,
traceback=traceback.format_exc(),
)
if isinstance(exception, werkzeug.exceptions.HTTPException):
if exception.code is None:
# Hand-crafted HTTPException likely coming from abort(),
# usually for a redirect response -> return it directly
return exception
else:
code = exception.code
if isinstance(exception, odoo.exceptions.AccessError):
code = 403
if isinstance(exception, QWebException):
values.update(qweb_exception=exception)
if isinstance(exception.qweb.get('cause'), odoo.exceptions.AccessError):
code = 403
if code == 500:
logger.error("500 Internal Server Error:\n\n%s", values['traceback'])
if 'qweb_exception' in values:
view = request.env["ir.ui.view"]
views = view._views_get(exception.qweb['template'])
to_reset = views.filtered(lambda view: view.model_data_id.noupdate is True and view.arch_fs)
values['views'] = to_reset
elif code == 403:
logger.warn("403 Forbidden:\n\n%s", values['traceback'])
values.update(
status_message=werkzeug.http.HTTP_STATUS_CODES[code],
status_code=code,
)
if not request.uid:
cls._auth_method_public()
try:
html = request.env['ir.ui.view'].render_template('website.%s' % code, values)
except Exception:
html = request.env['ir.ui.view'].render_template('website.http_error', values)
return werkzeug.wrappers.Response(html, status=code, content_type='text/html;charset=utf-8')
@classmethod
def binary_content(cls, xmlid=None, model='ir.attachment', id=None, field='datas',
unique=False, filename=None, filename_field='datas_fname', download=False,
mimetype=None, default_mimetype='application/octet-stream',
access_token=None, env=None):
env = env or request.env
obj = None
if xmlid:
obj = env.ref(xmlid, False)
elif id and model in env:
obj = env[model].browse(int(id))
if obj and 'website_published' in obj._fields:
if env[obj._name].sudo().search([('id', '=', obj.id), ('website_published', '=', True)]):
env = env(user=SUPERUSER_ID)
return super(Http, cls).binary_content(
xmlid=xmlid, model=model, id=id, field=field, unique=unique, filename=filename,
filename_field=filename_field, download=download, mimetype=mimetype,
default_mimetype=default_mimetype, access_token=access_token, env=env)
class ModelConverter(ModelConverter):
def generate(self, uid, dom=None, args=None):
Model = request.env[self.model].sudo(uid)
domain = safe_eval(self.domain, (args or {}).copy())
if dom:
domain += dom
for record in Model.search_read(domain=domain, fields=['write_date', Model._rec_name]):
if record.get(Model._rec_name, False):
yield {'loc': (record['id'], record[Model._rec_name])}
| agpl-3.0 | 8,303,738,624,251,257,000 | 37.255725 | 134 | 0.583458 | false |
atty303/pyfilesystem | fs/commands/fsinfo.py | 1 | 3443 | #!/usr/bin/env python
from fs.errors import ResourceNotFoundError
from fs.opener import opener
from fs.commands.runner import Command
import sys
from datetime import datetime
class FSInfo(Command):
usage = """fsinfo [OPTION]... [PATH]
Display information regarding an FS resource"""
def get_optparse(self):
optparse = super(FSInfo, self).get_optparse()
optparse.add_option('-k', '--key', dest='keys', action='append', default=[],
help='display KEYS only')
optparse.add_option('-s', '--simple', dest='simple', action='store_true', default=False,
help='info displayed in simple format (no table)')
optparse.add_option('-o', '--omit', dest='omit', action='store_true', default=False,
help='omit path name from output')
optparse.add_option('-d', '--dirsonly', dest='dirsonly', action="store_true", default=False,
help="list directories only", metavar="DIRSONLY")
optparse.add_option('-f', '--filesonly', dest='filesonly', action="store_true", default=False,
help="list files only", metavar="FILESONLY")
return optparse
def do_run(self, options, args):
def wrap_value(val):
if val.rstrip() == '\0':
return self.wrap_error('... missing ...')
return val
def make_printable(text):
if not isinstance(text, basestring):
try:
text = str(text)
except:
try:
text = unicode(text)
except:
text = repr(text)
return text
keys = options.keys or None
for fs, path, is_dir in self.get_resources(args,
files_only=options.filesonly,
dirs_only=options.dirsonly):
if not options.omit:
if options.simple:
file_line = u'%s\n' % self.wrap_filename(path)
else:
file_line = u'[%s] %s\n' % (self.wrap_filename(path), self.wrap_faded(fs.desc(path)))
self.output(file_line)
info = fs.getinfo(path)
for k, v in info.items():
if k.startswith('_'):
del info[k]
elif not isinstance(v, (basestring, int, float, bool, datetime)):
del info[k]
if keys:
table = [(k, make_printable(info.get(k, '\0'))) for k in keys]
else:
keys = sorted(info.keys())
table = [(k, make_printable(info[k])) for k in sorted(info.keys())]
if options.simple:
for row in table:
self.output(row[-1] + '\n')
else:
self.output_table(table, {0:self.wrap_table_header, 1:wrap_value})
def run():
return FSInfo().run()
if __name__ == "__main__":
sys.exit(run())
| bsd-3-clause | 9,063,598,042,925,267,000 | 40 | 105 | 0.448446 | false |
fnaum/rez | src/rez/cli/complete.py | 1 | 3476 | """
Prints package completion strings.
"""
from __future__ import print_function
import argparse
__doc__ = argparse.SUPPRESS
def setup_parser(parser, completions=False):
pass
def command(opts, parser, extra_arg_groups=None):
from rez.cli._util import subcommands
import os
import re
# get comp info from environment variables
comp_line = os.getenv("COMP_LINE", "")
comp_point = os.getenv("COMP_POINT", "")
try:
comp_point = int(comp_point)
except:
comp_point = len(comp_line)
last_word = comp_line.split()[-1]
if comp_line.endswith(last_word):
prefix = last_word
else:
prefix = None
def _pop_arg(l, p):
words = l.split()
arg = None
if words:
arg = words[0]
l_ = l.lstrip()
p -= (len(l) - len(l_) + len(arg))
l = l_[len(arg):]
return l, p, arg
return l, p, arg
# determine subcommand, possibly give subcommand completion
subcommand = None
comp_line, comp_point, cmd = _pop_arg(comp_line, comp_point)
if cmd in ("rez", "rezolve"):
comp_line, comp_point, arg = _pop_arg(comp_line, comp_point)
if arg:
if prefix != arg:
subcommand = arg
else:
subcommand = cmd.split("-", 1)[-1]
if subcommand is None:
cmds = [k for k, v in subcommands.items() if not v.get("hidden")]
if prefix:
cmds = (x for x in cmds if x.startswith(prefix))
print(" ".join(cmds))
if subcommand not in subcommands:
return
# replace '--' with special '--N#' flag so that subcommands can specify
# custom completions.
regex = re.compile("\s--\s")
ddashes = regex.findall(comp_line)
for i, ddash in enumerate(ddashes):
j = comp_line.find(ddash)
while comp_line[j] != "-":
j += 1
j += 2
s = "N%d" % i
comp_line = comp_line[:j] + s + comp_line[j:]
if comp_point >= j:
comp_point += len(s)
# create parser for subcommand
from rez.backport.importlib import import_module
module_name = "rez.cli.%s" % subcommand
mod = import_module(module_name)
parser = argparse.ArgumentParser()
mod.setup_parser(parser, completions=True)
# have to massage input a little so argcomplete behaves
cmd = "rez-%s" % subcommand
comp_line = cmd + comp_line
comp_point += len(cmd)
# generate the completions
from rez.cli._complete_util import RezCompletionFinder
completer = RezCompletionFinder(parser=parser,
comp_line=comp_line,
comp_point=comp_point)
words = completer.completions
print(' '.join(words))
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 | 7,186,320,333,218,014,000 | 29.226087 | 79 | 0.609033 | false |
amoskong/scylla-cluster-tests | sdcm/utils/common.py | 1 | 48757 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2017 ScyllaDB
# pylint: disable=too-many-lines
import itertools
import os
import logging
import random
import socket
import time
import datetime
import errno
import threading
import select
import shutil
import copy
from functools import wraps
from enum import Enum
from collections import defaultdict
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from urlparse import urlparse
import hashlib
import boto3
import libcloud.storage.providers
import libcloud.storage.types
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider
LOGGER = logging.getLogger('utils')
def _remote_get_hash(remoter, file_path):
try:
result = remoter.run('md5sum {}'.format(file_path), verbose=True)
return result.stdout.strip().split()[0]
except Exception as details: # pylint: disable=broad-except
LOGGER.error(str(details))
return None
def _remote_get_file(remoter, src, dst, user_agent=None):
cmd = 'curl -L {} -o {}'.format(src, dst)
if user_agent:
cmd += ' --user-agent %s' % user_agent
return remoter.run(cmd, ignore_status=True)
def remote_get_file(remoter, src, dst, hash_expected=None, retries=1, user_agent=None): # pylint: disable=too-many-arguments
_remote_get_file(remoter, src, dst, user_agent)
if not hash_expected:
return
while retries > 0 and _remote_get_hash(remoter, dst) != hash_expected:
_remote_get_file(remoter, src, dst, user_agent)
retries -= 1
assert _remote_get_hash(remoter, dst) == hash_expected
class retrying(object): # pylint: disable=invalid-name,too-few-public-methods
"""
Used as a decorator to retry function run that can possibly fail with allowed exceptions list
"""
def __init__(self, n=3, sleep_time=1, allowed_exceptions=(Exception,), message=""):
assert n > 0, "Number of retries parameter should be greater then 0 (current: %s)" % n
self.n = n # number of times to retry # pylint: disable=invalid-name
self.sleep_time = sleep_time # number seconds to sleep between retries
self.allowed_exceptions = allowed_exceptions # if Exception is not allowed will raise
self.message = message # string that will be printed between retries
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
if self.n == 1:
# there is no need to retry
return func(*args, **kwargs)
for i in xrange(self.n):
try:
if self.message:
LOGGER.info("%s [try #%s]", self.message, i)
return func(*args, **kwargs)
except self.allowed_exceptions as ex:
LOGGER.debug("'%s': failed with '%r', retrying [#%s]", func.func_name, ex, i)
time.sleep(self.sleep_time)
if i == self.n - 1:
LOGGER.error("'%s': Number of retries exceeded!", func.func_name)
raise
return inner
def log_run_info(arg):
"""
Decorator that prints BEGIN before the function runs and END when function finished running.
Uses function name as a name of action or string that can be given to the decorator.
If the function is a method of a class object, the class name will be printed out.
Usage examples:
@log_run_info
def foo(x, y=1):
pass
In: foo(1)
Out:
BEGIN: foo
END: foo (ran 0.000164)s
@log_run_info("Execute nemesis")
def disrupt():
pass
In: disrupt()
Out:
BEGIN: Execute nemesis
END: Execute nemesis (ran 0.000271)s
"""
def _inner(func, msg=None):
@wraps(func)
def inner(*args, **kwargs):
class_name = ""
if args and func.__name__ in dir(args[0]):
class_name = " <%s>" % args[0].__class__.__name__
action = "%s%s" % (msg, class_name)
start_time = datetime.datetime.now()
LOGGER.debug("BEGIN: %s", action)
res = func(*args, **kwargs)
end_time = datetime.datetime.now()
LOGGER.debug("END: %s (ran %ss)", action, (end_time - start_time).total_seconds())
return res
return inner
if callable(arg): # when decorator is used without a string message
return _inner(arg, arg.__name__)
else:
return lambda f: _inner(f, arg)
class Distro(Enum):
UNKNOWN = 0
CENTOS7 = 1
RHEL7 = 2
UBUNTU14 = 3
UBUNTU16 = 4
UBUNTU18 = 5
DEBIAN8 = 6
DEBIAN9 = 7
def get_data_dir_path(*args):
import sdcm
sdcm_path = os.path.realpath(sdcm.__path__[0])
data_dir = os.path.join(sdcm_path, "../data_dir", *args)
return os.path.abspath(data_dir)
def get_job_name():
return os.environ.get('JOB_NAME', 'local_run')
def verify_scylla_repo_file(content, is_rhel_like=True):
LOGGER.info('Verifying Scylla repo file')
if is_rhel_like:
body_prefix = ['#', '[scylla', 'name=', 'baseurl=', 'enabled=', 'gpgcheck=', 'type=',
'skip_if_unavailable=', 'gpgkey=', 'repo_gpgcheck=', 'enabled_metadata=']
else:
body_prefix = ['#', 'deb']
for line in content.split('\n'):
valid_prefix = False
for prefix in body_prefix:
if line.startswith(prefix) or not line.strip():
valid_prefix = True
break
LOGGER.debug(line)
assert valid_prefix, 'Repository content has invalid line: {}'.format(line)
def remove_comments(data):
"""Remove comments line from data
Remove any string which is start from # in data
Arguments:
data {str} -- data expected the command output, file contents
"""
return '\n'.join([i.strip() for i in data.split('\n') if not i.startswith('#')])
class S3Storage(object):
bucket_name = 'cloudius-jenkins-test'
enable_multipart_threshold_size = 1024 * 1024 * 1024 # 1GB
multipart_chunksize = 50 * 1024 * 1024 # 50 MB
num_download_attempts = 5
def __init__(self, bucket=None):
if bucket:
self.bucket_name = bucket
self._bucket = boto3.resource("s3").Bucket(name=self.bucket_name)
self.transfer_config = boto3.s3.transfer.TransferConfig(multipart_threshold=self.enable_multipart_threshold_size,
multipart_chunksize=self.multipart_chunksize,
num_download_attempts=self.num_download_attempts)
def get_s3_fileojb(self, key):
objects = []
for obj in self._bucket.objects.filter(Prefix=key):
objects.append(obj)
return objects
def search_by_path(self, path=''):
files = []
for obj in self._bucket.objects.filter(Prefix=path):
files.append(obj.key)
return files
def generate_url(self, file_path, dest_dir=''):
bucket_name = self.bucket_name
file_name = os.path.basename(os.path.normpath(file_path))
return "https://{bucket_name}.s3.amazonaws.com/{dest_dir}/{file_name}".format(dest_dir=dest_dir,
file_name=file_name,
bucket_name=bucket_name)
def upload_file(self, file_path, dest_dir=''):
s3_url = self.generate_url(file_path, dest_dir)
s3_obj = "{}/{}".format(dest_dir, os.path.basename(file_path))
try:
LOGGER.info("Uploading '{file_path}' to {s3_url}".format(file_path=file_path, s3_url=s3_url))
print "Uploading '{file_path}' to {s3_url}".format(file_path=file_path, s3_url=s3_url)
self._bucket.upload_file(Filename=file_path,
Key=s3_obj,
Config=self.transfer_config)
LOGGER.info("Uploaded to {0}".format(s3_url))
LOGGER.info("Set public read access")
self.set_public_access(key=s3_obj)
return s3_url
except Exception as details: # pylint: disable=broad-except
LOGGER.debug("Unable to upload to S3: %s", details)
return ""
def set_public_access(self, key):
acl_obj = boto3.resource('s3').ObjectAcl(self.bucket_name, key)
grants = copy.deepcopy(acl_obj.grants)
grantees = {
'Grantee': {
"Type": "Group",
"URI": "http://acs.amazonaws.com/groups/global/AllUsers"
},
'Permission': "READ"
}
grants.append(grantees)
acl_obj.put(ACL='', AccessControlPolicy={'Grants': grants, 'Owner': acl_obj.owner})
def download_file(self, link, dst_dir):
key_name = link.replace("https://{0.bucket_name}.s3.amazonaws.com/".format(self), "")
file_name = os.path.basename(key_name)
try:
LOGGER.info("Downloading {0} from {1}".format(key_name, self.bucket_name))
self._bucket.download_file(Key=key_name,
Filename=os.path.join(dst_dir, file_name),
Config=self.transfer_config)
LOGGER.info("Downloaded finished")
return os.path.join(os.path.abspath(dst_dir), file_name)
except Exception as details: # pylint: disable=broad-except
LOGGER.warning("File {} is not downloaded by reason: {}".format(key_name, details))
return ""
def get_latest_gemini_version():
bucket_name = 'downloads.scylladb.com'
results = S3Storage(bucket_name).search_by_path(path='gemini')
versions = set()
for result_file in results:
versions.add(result_file.split('/')[1])
return str(sorted(versions)[-1])
def list_logs_by_test_id(test_id):
log_types = ['db-cluster', 'monitor-set', 'loader-set', 'sct-runner',
'prometheus', 'grafana',
'job', 'monitoring_data_stack', 'events']
results = []
if not test_id:
return results
def convert_to_date(date_str):
try:
t = datetime.datetime.strptime(date_str, "%Y%m%d_%H%M%S") # pylint: disable=invalid-name
except ValueError:
try:
t = datetime.datetime.strptime(date_str, "%Y_%m_%d_%H_%M_%S") # pylint: disable=invalid-name
except ValueError:
t = datetime.datetime(1999, 1, 1, 1, 1, 1) # pylint: disable=invalid-name
return t # pylint: disable=invalid-name
log_files = S3Storage().search_by_path(path=test_id)
for log_file in log_files:
for log_type in log_types:
if log_type in log_file:
results.append({"file_path": log_file,
"type": log_type,
"link": "https://{}.s3.amazonaws.com/{}".format(S3Storage.bucket_name, log_file),
"date": convert_to_date(log_file.split('/')[1])
})
break
results = sorted(results, key=lambda x: x["date"])
return results
def all_aws_regions():
client = boto3.client('ec2')
return [region['RegionName'] for region in client.describe_regions()['Regions']]
AWS_REGIONS = all_aws_regions()
class ParallelObject(object): # pylint: disable=too-few-public-methods
"""
Run function in with supplied args in parallel using thread.
"""
def __init__(self, objects, timeout=6, num_workers=None, disable_logging=False):
self.objects = objects
self.timeout = timeout
self.num_workers = num_workers
self.disable_logging = disable_logging
def run(self, func):
def func_wrap(fun):
def inner(*args, **kwargs):
thread_name = threading.current_thread().name
fun_args = args
fun_kwargs = kwargs
fun_name = fun.__name__
LOGGER.debug("[{thread_name}] {fun_name}({fun_args}, {fun_kwargs})".format(thread_name=thread_name,
fun_name=fun_name,
fun_args=fun_args,
fun_kwargs=fun_kwargs))
return_val = fun(*args, **kwargs)
LOGGER.debug("[{thread_name}] Done.".format(thread_name=thread_name))
return return_val
return inner
with ThreadPoolExecutor(max_workers=self.num_workers) as pool:
LOGGER.debug("Executing in parallel: '{}' on {}".format(func.__name__, self.objects))
if not self.disable_logging:
func = func_wrap(func)
return list(pool.map(func, self.objects, timeout=self.timeout))
def clean_cloud_instances(tags_dict):
"""
Remove all instances with specific tags from both AWS/GCE
:param tags_dict: a dict of the tag to select the instances,e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:return: None
"""
clean_instances_aws(tags_dict)
clean_elastic_ips_aws(tags_dict)
clean_instances_gce(tags_dict)
def aws_tags_to_dict(tags_list):
tags_dict = {}
if tags_list:
for item in tags_list:
tags_dict[item["Key"]] = item["Value"]
return tags_dict
def list_instances_aws(tags_dict=None, region_name=None, running=False, group_as_region=False, verbose=False):
"""
list all instances with specific tags AWS
:param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:param region_name: name of the region to list
:param running: get all running instances
:param group_as_region: if True the results would be grouped into regions
:param verbose: if True will log progress information
:return: instances dict where region is a key
"""
instances = {}
aws_regions = [region_name] if region_name else AWS_REGIONS
def get_instances(region):
if verbose:
LOGGER.info('Going to list aws region "%s"', region)
time.sleep(random.random())
client = boto3.client('ec2', region_name=region)
custom_filter = []
if tags_dict:
custom_filter = [{'Name': 'tag:{}'.format(key), 'Values': [value]} for key, value in tags_dict.items()]
response = client.describe_instances(Filters=custom_filter)
instances[region] = [instance for reservation in response['Reservations'] for instance in reservation[
'Instances']]
if verbose:
LOGGER.info("%s: done [%s/%s]", region, len(instances.keys()), len(aws_regions))
ParallelObject(aws_regions, timeout=100).run(get_instances)
for curr_region_name in instances:
if running:
instances[curr_region_name] = [i for i in instances[curr_region_name] if i['State']['Name'] == 'running']
else:
instances[curr_region_name] = [i for i in instances[curr_region_name]
if not i['State']['Name'] == 'terminated']
if not group_as_region:
instances = list(itertools.chain(*instances.values())) # flatten the list of lists
total_items = len(instances)
else:
total_items = sum([len(value) for _, value in instances.items()])
if verbose:
LOGGER.info("Found total of %s instances.", len(total_items))
return instances
def clean_instances_aws(tags_dict):
"""
Remove all instances with specific tags AWS
:param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:return: None
"""
assert tags_dict, "tags_dict not provided (can't clean all instances)"
aws_instances = list_instances_aws(tags_dict=tags_dict, group_as_region=True)
for region, instance_list in aws_instances.items():
client = boto3.client('ec2', region_name=region)
for instance in instance_list:
tags = aws_tags_to_dict(instance.get('Tags'))
name = tags.get("Name", "N/A")
instance_id = instance['InstanceId']
LOGGER.info("Going to delete '{instance_id}' [name={name}] ".format(instance_id=instance_id, name=name))
response = client.terminate_instances(InstanceIds=[instance_id])
LOGGER.debug("Done. Result: %s\n", response['TerminatingInstances'])
def list_elastic_ips_aws(tags_dict=None, region_name=None, group_as_region=False, verbose=False):
"""
list all elastic ips with specific tags AWS
:param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:param region_name: name of the region to list
:param group_as_region: if True the results would be grouped into regions
:param verbose: if True will log progress information
:return: instances dict where region is a key
"""
elastic_ips = {}
aws_regions = [region_name] if region_name else AWS_REGIONS
def get_elastic_ips(region):
if verbose:
LOGGER.info('Going to list aws region "%s"', region)
time.sleep(random.random())
client = boto3.client('ec2', region_name=region)
custom_filter = []
if tags_dict:
custom_filter = [{'Name': 'tag:{}'.format(key), 'Values': [value]} for key, value in tags_dict.items()]
response = client.describe_addresses(Filters=custom_filter)
elastic_ips[region] = [ip for ip in response['Addresses']]
if verbose:
LOGGER.info("%s: done [%s/%s]", region, len(elastic_ips.keys()), len(aws_regions))
ParallelObject(aws_regions, timeout=100).run(get_elastic_ips)
if not group_as_region:
elastic_ips = list(itertools.chain(*elastic_ips.values())) # flatten the list of lists
total_items = elastic_ips
else:
total_items = sum([len(value) for _, value in elastic_ips.items()])
if verbose:
LOGGER.info("Found total of %s ips.", total_items)
return elastic_ips
def clean_elastic_ips_aws(tags_dict):
"""
Remove all elastic ips with specific tags AWS
:param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:return: None
"""
assert tags_dict, "tags_dict not provided (can't clean all instances)"
aws_instances = list_elastic_ips_aws(tags_dict=tags_dict, group_as_region=True)
for region, eip_list in aws_instances.items():
client = boto3.client('ec2', region_name=region)
for eip in eip_list:
association_id = eip.get('AssociationId', None)
if association_id:
response = client.disassociate_address(AssociationId=association_id)
LOGGER.debug("disassociate_address. Result: %s\n", response)
allocation_id = eip['AllocationId']
LOGGER.info("Going to release '{allocation_id}' [public_ip={public_ip}] ".format(
allocation_id=allocation_id, public_ip=eip['PublicIp']))
response = client.release_address(AllocationId=allocation_id)
LOGGER.debug("Done. Result: %s\n", response)
def get_all_gce_regions():
from sdcm.keystore import KeyStore
gcp_credentials = KeyStore().get_gcp_credentials()
gce_driver = get_driver(Provider.GCE)
compute_engine = gce_driver(gcp_credentials["project_id"] + "@appspot.gserviceaccount.com",
gcp_credentials["private_key"],
project=gcp_credentials["project_id"])
all_gce_regions = [region_obj.name for region_obj in compute_engine.region_list]
return all_gce_regions
def gce_meta_to_dict(metadata):
meta_dict = {}
data = metadata.get("items")
if data:
for item in data:
key = item["key"]
if key: # sometimes key is empty string
meta_dict[key] = item["value"]
return meta_dict
def filter_gce_by_tags(tags_dict, instances):
filtered_instances = []
for instance in instances:
tags = gce_meta_to_dict(instance.extra['metadata'])
found_keys = set(k for k in tags_dict if k in tags and tags_dict[k] == tags[k])
if found_keys == set(tags_dict.keys()):
filtered_instances.append(instance)
return filtered_instances
def list_instances_gce(tags_dict=None, running=False, verbose=False):
"""
list all instances with specific tags GCE
:param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:return: None
"""
# avoid cyclic dependency issues, since too many things import utils.py
from sdcm.keystore import KeyStore
gcp_credentials = KeyStore().get_gcp_credentials()
gce_driver = get_driver(Provider.GCE)
compute_engine = gce_driver(gcp_credentials["project_id"] + "@appspot.gserviceaccount.com",
gcp_credentials["private_key"],
project=gcp_credentials["project_id"])
if verbose:
LOGGER.info("Going to get all instances from GCE")
all_gce_instances = compute_engine.list_nodes()
# filter instances by tags since libcloud list_nodes() doesn't offer any filtering
if tags_dict:
instances = filter_gce_by_tags(tags_dict=tags_dict, instances=all_gce_instances)
else:
instances = all_gce_instances
if running:
# https://libcloud.readthedocs.io/en/latest/compute/api.html#libcloud.compute.types.NodeState
instances = [i for i in instances if i.state == 'running']
else:
instances = [i for i in instances if not i.state == 'terminated']
if verbose:
LOGGER.info("Done. Found total of %s instances.", len(instances))
return instances
def clean_instances_gce(tags_dict):
"""
Remove all instances with specific tags GCE
:param tags_dict: a dict of the tag to select the instances, e.x. {"TestId": "9bc6879f-b1ef-47e1-99ab-020810aedbcc"}
:return: None
"""
assert tags_dict, "tags_dict not provided (can't clean all instances)"
all_gce_instances = list_instances_gce(tags_dict=tags_dict)
for instance in all_gce_instances:
LOGGER.info("Going to delete: {}".format(instance.name))
# https://libcloud.readthedocs.io/en/latest/compute/api.html#libcloud.compute.base.Node.destroy
res = instance.destroy()
LOGGER.info("{} deleted. res={}".format(instance.name, res))
_SCYLLA_AMI_CACHE = defaultdict(dict)
def get_scylla_ami_versions(region):
"""
get the list of all the formal scylla ami from specific region
:param region: the aws region to look in
:return: list of ami data
:rtype: list
"""
if _SCYLLA_AMI_CACHE[region]:
return _SCYLLA_AMI_CACHE[region]
ec2 = boto3.client('ec2', region_name=region)
response = ec2.describe_images(
Owners=['797456418907'], # ScyllaDB
Filters=[
{'Name': 'name', 'Values': ['ScyllaDB *']},
],
)
_SCYLLA_AMI_CACHE[region] = sorted(response['Images'],
key=lambda x: x['CreationDate'],
reverse=True)
return _SCYLLA_AMI_CACHE[region]
_S3_SCYLLA_REPOS_CACHE = defaultdict(dict)
def get_s3_scylla_repos_mapping(dist_type='centos', dist_version=None):
"""
get the mapping from version prefixes to rpm .repo or deb .list files locations
:param dist_type: which distro to look up centos/ubuntu/debian
:param dist_version: famaily name of the distro version
:return: a mapping of versions prefixes to repos
:rtype: dict
"""
if (dist_type, dist_version) in _S3_SCYLLA_REPOS_CACHE:
return _S3_SCYLLA_REPOS_CACHE[(dist_type, dist_version)]
s3_client = boto3.client('s3')
bucket = 'downloads.scylladb.com'
if dist_type == 'centos':
response = s3_client.list_objects(Bucket=bucket, Prefix='rpm/centos/', Delimiter='/')
for repo_file in response['Contents']:
filename = os.path.basename(repo_file['Key'])
# only if path look like 'rpm/centos/scylla-1.3.repo', we deem it formal one
if filename.startswith('scylla-') and filename.endswith('.repo'):
version_prefix = filename.replace('.repo', '').split('-')[-1]
_S3_SCYLLA_REPOS_CACHE[(
dist_type, dist_version)][version_prefix] = "https://s3.amazonaws.com/{bucket}/{path}".format(bucket=bucket, path=repo_file['Key'])
elif dist_type == 'ubuntu' or dist_type == 'debian':
response = s3_client.list_objects(Bucket=bucket, Prefix='deb/{}/'.format(dist_type), Delimiter='/')
for repo_file in response['Contents']:
filename = os.path.basename(repo_file['Key'])
# only if path look like 'deb/debian/scylla-3.0-jessie.list', we deem it formal one
if filename.startswith('scylla-') and filename.endswith('-{}.list'.format(dist_version)):
version_prefix = filename.replace('-{}.list'.format(dist_version), '').split('-')[-1]
_S3_SCYLLA_REPOS_CACHE[(
dist_type, dist_version)][version_prefix] = "https://s3.amazonaws.com/{bucket}/{path}".format(bucket=bucket, path=repo_file['Key'])
else:
raise NotImplementedError("[{}] is not yet supported".format(dist_type))
return _S3_SCYLLA_REPOS_CACHE[(dist_type, dist_version)]
def pid_exists(pid):
"""
Return True if a given PID exists.
:param pid: Process ID number.
"""
try:
os.kill(pid, 0)
except OSError as detail:
if detail.errno == errno.ESRCH:
return False
return True
def safe_kill(pid, signal):
"""
Attempt to send a signal to a given process that may or may not exist.
:param signal: Signal number.
"""
try:
os.kill(pid, signal)
return True
except Exception: # pylint: disable=broad-except
return False
class FileFollowerIterator(object): # pylint: disable=too-few-public-methods
def __init__(self, filename, thread_obj):
self.filename = filename
self.thread_obj = thread_obj
def __iter__(self):
with open(self.filename, 'r') as input_file:
line = ''
while not self.thread_obj.stopped():
poller = select.poll() # pylint: disable=no-member
poller.register(input_file, select.POLLIN) # pylint: disable=no-member
if poller.poll(100):
line += input_file.readline()
if not line or not line.endswith('\n'):
time.sleep(0.1)
continue
yield line
line = ''
yield line
class FileFollowerThread(object):
def __init__(self):
self.executor = concurrent.futures.ThreadPoolExecutor(1)
self._stop_event = threading.Event()
self.future = None
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def run(self):
raise NotImplementedError()
def start(self):
self.future = self.executor.submit(self.run)
return self.future
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
def follow_file(self, filename):
return FileFollowerIterator(filename, self)
class ScyllaCQLSession(object):
def __init__(self, session, cluster):
self.session = session
self.cluster = cluster
def __enter__(self):
return self.session
def __exit__(self, exc_type, exc_val, exc_tb):
self.cluster.shutdown()
class MethodVersionNotFound(Exception):
pass
class version(object): # pylint: disable=invalid-name,too-few-public-methods
VERSIONS = {}
"""
Runs a method according to the version attribute of the class method
Limitations: currently, can't work if the same method name in the same file used in different
classes
Example:
In [3]: class VersionedClass(object):
...: def __init__(self, current_version):
...: self.version = current_version
...:
...: @version("1.2")
...: def setup(self):
...: return "1.2"
...:
...: @version("2")
...: def setup(self):
...: return "2"
In [4]: vc = VersionedClass("2")
In [5]: vc.setup()
Out[5]: '2'
In [6]: vc = VersionedClass("1.2")
In [7]: vc.setup()
Out[7]: '1.2'
"""
def __init__(self, ver):
self.version = ver
def __call__(self, func):
self.VERSIONS[(self.version, func.func_name, func.func_code.co_filename)] = func
@wraps(func)
def inner(*args, **kwargs):
cls_self = args[0]
func_to_run = self.VERSIONS.get((cls_self.version, func.func_name, func.func_code.co_filename))
if func_to_run:
return func_to_run(*args, **kwargs)
else:
raise MethodVersionNotFound("Method '{}' with version '{}' not defined in '{}'!".format(
func.func_name,
cls_self.version,
cls_self.__class__.__name__))
return inner
def get_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
addr = sock.getsockname()
port = addr[1]
sock.close()
return port
def get_my_ip():
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
return ip
def get_branched_ami(ami_version, region_name):
"""
Get a list of AMIs, based on version match
:param ami_version: branch version to look for, ex. 'branch-2019.1:latest', 'branch-3.1:all'
:param region_name: the region to look AMIs in
:return: list of ec2.images
"""
branch, build_id = ami_version.split(':')
ec2 = boto3.resource('ec2', region_name=region_name)
LOGGER.info("Looking for AMI match [%s]", ami_version)
if build_id == 'latest' or build_id == 'all':
filters = [{'Name': 'tag:branch', 'Values': [branch]}]
else:
filters = [{'Name': 'tag:branch', 'Values': [branch]}, {'Name': 'tag:build-id', 'Values': [build_id]}]
amis = list(ec2.images.filter(Filters=filters))
amis = sorted(amis, key=lambda x: x.creation_date, reverse=True)
assert amis, "AMI matching [{}] wasn't found on {}".format(ami_version, region_name)
if build_id == 'all':
return amis
else:
return amis[:1]
def get_ami_tags(ami_id, region_name):
"""
Get a list of tags of a specific AMI
:param ami_id:
:param region_name: the region to look AMIs in
:return: dict of tags
"""
ec2 = boto3.resource('ec2', region_name=region_name)
test_image = ec2.Image(ami_id)
if test_image.tags:
return {i['Key']: i['Value'] for i in test_image.tags}
else:
return {}
def tag_ami(ami_id, tags_dict, region_name):
tags = [{'Key': key, 'Value': value} for key, value in tags_dict.items()]
ec2 = boto3.resource('ec2', region_name=region_name)
test_image = ec2.Image(ami_id)
tags += test_image.tags
test_image.create_tags(Tags=tags)
LOGGER.info("tagged %s with %s", ami_id, tags)
def get_non_system_ks_cf_list(loader_node, db_node, request_timeout=300, filter_out_table_with_counter=False,
filter_out_mv=False):
"""Get all not system keyspace.tables pairs
Arguments:
loader_node {BaseNode} -- LoaderNoder to send request
db_node_ip {str} -- ip of db_node
"""
# pylint: disable=too-many-locals
def get_tables_columns_list(entity_type):
if entity_type == 'view':
cmd = "paging off; SELECT keyspace_name, view_name FROM system_schema.views"
else:
cmd = "paging off; SELECT keyspace_name, table_name, type FROM system_schema.columns"
result = loader_node.run_cqlsh(cmd=cmd, timeout=request_timeout, verbose=False, target_db_node=db_node,
split=True, connect_timeout=request_timeout)
if not result:
return []
splitter_result = []
for row in result[4:]:
if '|' not in row:
continue
if row.startswith('system'):
continue
splitter_result.append(row.split('|'))
return splitter_result
views_list = set()
if filter_out_mv:
tables = get_tables_columns_list('view')
for table in tables:
views_list.add('.'.join([name.strip() for name in table[:2]]))
views_list = list(views_list)
result = get_tables_columns_list('column')
if not result:
return []
avaialable_ks_cf = defaultdict(list)
for row in result:
ks_cf_name = '.'.join([name.strip() for name in row[:2]])
if filter_out_mv and ks_cf_name in views_list:
continue
column_type = row[2].strip()
avaialable_ks_cf[ks_cf_name].append(column_type)
if filter_out_table_with_counter:
for ks_cf, column_types in avaialable_ks_cf.items():
if 'counter' in column_types:
avaialable_ks_cf.pop(ks_cf)
return avaialable_ks_cf.keys()
def remove_files(path):
LOGGER.debug("Remove path %s", path)
try:
if os.path.isdir(path):
shutil.rmtree(path=path, ignore_errors=True)
if os.path.isfile(path):
os.remove(path)
except Exception as details: # pylint: disable=broad-except
LOGGER.error("Error during remove archived logs %s", details)
def format_timestamp(timestamp):
return datetime.datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
def makedirs(path):
"""
TODO: when move to python3, this function will be replaced
with os.makedirs function:
os.makedirs(name, mode=0o777, exist_ok=False)
"""
try:
os.makedirs(path)
except OSError:
if os.path.exists(path):
return
raise
def wait_ami_available(client, ami_id):
"""Wait while ami_id become available
Wait while ami_id become available, after
10 minutes return an error
Arguments:
client {boto3.EC2.Client} -- client of EC2 service
ami_id {str} -- ami id to check availability
"""
waiter = client.get_waiter('image_available')
waiter.wait(ImageIds=[ami_id],
WaiterConfig={
'Delay': 30,
'MaxAttempts': 20}
)
def update_certificates():
"""
Update the certificate of server encryption, which might be expired.
"""
try:
from sdcm.remote import LocalCmdRunner
localrunner = LocalCmdRunner()
localrunner.run('openssl x509 -req -in data_dir/ssl_conf/example/db.csr -CA data_dir/ssl_conf/cadb.pem -CAkey data_dir/ssl_conf/example/cadb.key -CAcreateserial -out data_dir/ssl_conf/db.crt -days 365')
localrunner.run('openssl x509 -enddate -noout -in data_dir/ssl_conf/db.crt')
except Exception as ex:
raise Exception('Failed to update certificates by openssl: %s' % ex)
def s3_download_dir(bucket, path, target):
"""
Downloads recursively the given S3 path to the target directory.
:param bucket: the name of the bucket to download from
:param path: The S3 directory to download.
:param target: the local directory to download the files to.
"""
client = boto3.client('s3')
# Handle missing / at end of prefix
if not path.endswith('/'):
path += '/'
if path.startswith('/'):
path = path[1:]
result = client.list_objects_v2(Bucket=bucket, Prefix=path)
# Download each file individually
for key in result['Contents']:
# Calculate relative path
rel_path = key['Key'][len(path):]
# Skip paths ending in /
if not key['Key'].endswith('/'):
local_file_path = os.path.join(target, rel_path)
# Make sure directories exist
local_file_dir = os.path.dirname(local_file_path)
makedirs(local_file_dir)
LOGGER.info("Downloading %s from s3 to %s", key['Key'], local_file_path)
client.download_file(bucket, key['Key'], local_file_path)
def gce_download_dir(bucket, path, target):
"""
Downloads recursively the given google storage path to the target directory.
:param bucket: the name of the bucket to download from
:param path: The google storage directory to download.
:param target: the local directory to download the files to.
"""
from sdcm.keystore import KeyStore
gcp_credentials = KeyStore().get_gcp_credentials()
gce_driver = libcloud.storage.providers.get_driver(libcloud.storage.types.Provider.GOOGLE_STORAGE)
driver = gce_driver(gcp_credentials["project_id"] + "@appspot.gserviceaccount.com",
gcp_credentials["private_key"],
project=gcp_credentials["project_id"])
if not path.endswith('/'):
path += '/'
if path.startswith('/'):
path = path[1:]
container = driver.get_container(container_name=bucket)
dir_listing = driver.list_container_objects(container, ex_prefix=path)
for obj in dir_listing:
rel_path = obj.name[len(path):]
local_file_path = os.path.join(target, rel_path)
local_file_dir = os.path.dirname(local_file_path)
makedirs(local_file_dir)
LOGGER.info("Downloading %s from gcp to %s", obj.name, local_file_path)
obj.download(destination_path=local_file_path, overwrite_existing=True)
def download_dir_from_cloud(url):
"""
download a directory from AWS S3 or from google storage
:param url: a url that starts with `s3://` or `gs://`
:return: the temp directory create with the downloaded content
"""
if url is None:
return url
md5 = hashlib.md5()
md5.update(url)
tmp_dir = os.path.join('/tmp/download_from_cloud', md5.hexdigest())
parsed = urlparse(url)
LOGGER.info("Downloading [%s] to [%s]", url, tmp_dir)
if os.path.isdir(tmp_dir) and os.listdir(tmp_dir):
LOGGER.warning("[{}] already exists, skipping download".format(tmp_dir))
else:
if url.startswith('s3://'):
s3_download_dir(parsed.hostname, parsed.path, tmp_dir)
elif url.startswith('gs://'):
gce_download_dir(parsed.hostname, parsed.path, tmp_dir)
elif os.path.isdir(url):
tmp_dir = url
else:
raise ValueError("Unsupported url schema or non-existing directory [{}]".format(url))
if not tmp_dir.endswith('/'):
tmp_dir += '/'
LOGGER.info("Finished downloading [%s]", url)
return tmp_dir
def filter_aws_instances_by_type(instances):
filtered_instances = {
"db_nodes": [],
"loader_nodes": [],
"monitor_nodes": []
}
for instance in instances:
name = [tag['Value']
for tag in instance['Tags'] if tag['Key'] == 'Name']
if 'db-node' in name[0]:
filtered_instances["db_nodes"].append(instance)
if 'monitor-node' in name[0]:
filtered_instances["monitor_nodes"].append(instance)
if 'loader-node' in name[0]:
filtered_instances["loader_nodes"].append(instance)
return filtered_instances
def filter_gce_instances_by_type(instances):
filtered_instances = {
"db_nodes": [],
"loader_nodes": [],
"monitor_nodes": []
}
for instance in instances:
if 'db-nodes' in instance.name:
filtered_instances["db_nodes"].append(instance)
if 'monitor-node' in instance.name:
filtered_instances["monitor_nodes"].append(instance)
if 'loader-node' in instance.name:
filtered_instances["loader_nodes"].append(instance)
return filtered_instances
BUILDERS = [
{
"name": "aws-scylla-qa-builder3",
"public_ip": "18.235.64.163",
"user": "jenkins",
"key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2")
},
{
"name": "aws-eu-west1-qa-builder1",
"public_ip": "18.203.132.87",
"user": "jenkins",
"key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2")
},
{
"name": "aws-eu-west1-qa-builder2",
"public_ip": "34.244.95.165",
"user": "jenkins",
"key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2")
},
{
"name": "aws-eu-west1-qa-builder4",
"public_ip": "34.253.184.117",
"user": "jenkins",
"key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2")
},
{
"name": "aws-eu-west1-qa-builder4",
"public_ip": "52.211.130.106",
"user": "jenkins",
"key_file": os.path.expanduser("~/.ssh/scylla-qa-ec2")
}
]
def get_builder_by_test_id(test_id):
from sdcm.remote import RemoteCmdRunner
base_path_on_builder = "/home/jenkins/slave/workspace"
found_builders = []
def search_test_id_on_builder(builder):
remoter = RemoteCmdRunner(builder['public_ip'],
user=builder['user'],
key_file=builder['key_file'])
LOGGER.info('Search on %s', builder['name'])
result = remoter.run("find {where} -name test_id | xargs grep -rl {test_id}".format(where=base_path_on_builder,
test_id=test_id),
ignore_status=True, verbose=False)
if not result.exited and not result.stderr:
path = result.stdout.strip()
LOGGER.info("Builder name %s, ip %s, folder %s", builder['name'], builder['public_ip'], path)
return {"builder": builder, "path": os.path.dirname(path)}
else:
LOGGER.info("Nothing found")
return None
search_obj = ParallelObject(BUILDERS, timeout=30, num_workers=len(BUILDERS))
results = search_obj.run(search_test_id_on_builder)
found_builders = [builder for builder in results if builder]
if not found_builders:
LOGGER.info("Nothing found for %s", test_id)
return found_builders
def get_post_behavior_actions(config):
action_per_type = {
"db_nodes": None,
"monitor_nodes": None,
"loader_nodes": None
}
for key in action_per_type:
config_key = 'post_behavior_{}'.format(key)
old_config_key = config.get('failure_post_behavior', 'destroy')
action_per_type[key] = config.get(config_key, old_config_key)
return action_per_type
def clean_aws_instances_according_post_behavior(params, config, logdir): # pylint: disable=invalid-name
status = get_testrun_status(params.get('TestId'), logdir)
def apply_action(instances, action):
if action == 'destroy':
instances_ids = [instance['InstanceId'] for instance in instances]
LOGGER.info('Clean next instances %s', instances_ids)
client.terminate_instances(InstanceIds=instances_ids)
elif action == 'keep-on-failure':
if status:
LOGGER.info('Run failed. Leave instances running')
else:
LOGGER.info('Run was Successful. Killing nodes')
apply_action(instances, action='destroy')
elif action == 'keep':
LOGGER.info('Leave instances running')
else:
LOGGER.warning('Unsupported action %s', action)
aws_instances = list_instances_aws(params, group_as_region=True)
for region, instances in aws_instances.items():
if not instances:
continue
client = boto3.client("ec2", region_name=region)
filtered_instances = filter_aws_instances_by_type(instances)
actions_per_type = get_post_behavior_actions(config)
for instance_set_type, action in actions_per_type.items():
LOGGER.info('Apply action "%s" for %s instances', action, instance_set_type)
apply_action(filtered_instances[instance_set_type], action)
def clean_gce_instances_according_post_behavior(params, config, logdir): # pylint: disable=invalid-name
status = get_testrun_status(params.get('TestId'), logdir)
def apply_action(instances, action):
if action == 'destroy':
for instance in filtered_instances['db_nodes']:
LOGGER.info('Destroying instance: %s', instance.name)
instance.destroy()
LOGGER.info('Destroyed instance: %s', instance.name)
elif action == 'keep-on-failure':
if status:
LOGGER.info('Run failed. Leave instances running')
else:
LOGGER.info('Run wasSuccessful. Killing nodes')
apply_action(instances, action='destroy')
elif action == 'keep':
LOGGER.info('Leave instances runing')
else:
LOGGER.warning('Unsupported action %s', action)
gce_instances = list_instances_gce(params)
filtered_instances = filter_gce_instances_by_type(gce_instances)
actions_per_type = get_post_behavior_actions(config)
for instance_set_type, action in actions_per_type.items():
apply_action(filtered_instances[instance_set_type], action)
def search_test_id_in_latest(logdir):
from sdcm.remote import LocalCmdRunner
test_id = None
result = LocalCmdRunner().run('cat {0}/latest/test_id'.format(logdir), ignore_status=True)
if not result.exited and result.stdout:
test_id = result.stdout.strip()
LOGGER.info("Found latest test_id: {}".format(test_id))
LOGGER.info("Collect logs for test-run with test-id: {}".format(test_id))
else:
LOGGER.error('test_id not found. Exit code: %s; Error details %s', result.exited, result.stderr)
return test_id
def get_testrun_dir(base_dir, test_id=None):
from sdcm.remote import LocalCmdRunner
if not test_id:
test_id = search_test_id_in_latest(base_dir)
LOGGER.info('Search dir with logs locally for test id: %s', test_id)
search_cmd = "find {base_dir} -name test_id | xargs grep -rl {test_id}".format(**locals())
result = LocalCmdRunner().run(cmd=search_cmd, ignore_status=True)
LOGGER.info("Search result %s", result)
if result.exited == 0 and result.stdout:
found_dirs = result.stdout.strip().split('\n')
LOGGER.info(found_dirs)
return os.path.dirname(found_dirs[0])
LOGGER.info("No any dirs found locally for current test id")
return None
def get_testrun_status(test_id=None, logdir=None):
testrun_dir = get_testrun_dir(logdir, test_id)
status = None
if testrun_dir:
with open(os.path.join(testrun_dir, 'events_log/critical.log')) as f: # pylint: disable=invalid-name
status = f.readlines()
return status
def download_encrypt_keys():
"""
Download certificate files of encryption at-rest from S3 KeyStore
"""
from sdcm.keystore import KeyStore
ks = KeyStore()
if not os.path.exists('./data_dir/encrypt_conf/CA.pem'):
ks.download_file('CA.pem', './data_dir/encrypt_conf/CA.pem')
if not os.path.exists('./data_dir/encrypt_conf/SCYLLADB.pem'):
ks.download_file('SCYLLADB.pem', './data_dir/encrypt_conf/SCYLLADB.pem')
| agpl-3.0 | -6,150,648,137,884,515,000 | 35.250558 | 210 | 0.595996 | false |
racker/scrivener | scrivener/tests/test_server.py | 1 | 1777 | # Copyright 2012 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from zope.interface import directlyProvides
from twisted.trial.unittest import TestCase
from twisted.test.proto_helpers import StringTransport
from twisted.internet.interfaces import IStreamServerEndpoint
from twisted.internet.defer import succeed
from scrivener.interfaces import ILogHandler
from scrivener.server import ScribeServerService
class ScribeServerServiceTests(TestCase):
def setUp(self):
self.handler = mock.Mock()
directlyProvides(self.handler, ILogHandler)
self.endpoint = mock.Mock()
directlyProvides(self.endpoint, IStreamServerEndpoint)
self.port = mock.Mock()
def _listen(*args, **kwargs):
return succeed(self.port)
self.endpoint.listen.side_effect = _listen
self.service = ScribeServerService(self.endpoint, self.handler)
self.transport = StringTransport()
def test_startService(self):
self.service.startService()
self.assertEqual(self.endpoint.listen.call_count, 1)
def test_stopService(self):
self.service.startService()
self.service.stopService()
self.assertEqual(self.port.stopListening.call_count, 1)
| apache-2.0 | -3,906,171,968,325,228,500 | 30.732143 | 74 | 0.736072 | false |
MendeleievBros/Mendeleiev-Bros | mendeleiev_bros/escena_niveles.py | 1 | 4374 | # -*- coding: utf-8 -*-
import pilas
archi = open('datos.txt', 'r')
nivel = archi.readline()
pantalla = archi.readline()
idioma = archi.readline()
archi.close()
if idioma == "ES":
from modulos.ES import *
else:
from modulos.EN import *
class Elemento(pilas.actores.Texto):
def __init__(self, texto='', x=0, y=0, nivel=0):
pilas.actores.Texto.__init__(self, texto=texto, x=x, y=y, magnitud=10,
vertical=False, fuente="data/tipo_tabla.ttf", fijo=True, ancho=0)
self.color = pilas.colores.negro
self.nivel = nivel
class EscenaNiveles(pilas.escena.Base):
"Es la escena de presentación donde se elije el nivel."
def __init__(self):
pilas.escena.Base.__init__(self)
def leertxt(self):
archi = open('datos.txt', 'r')
linea = archi.readline()
archi.close()
return linea
def nivel(self, evento):
#Recorro la lista de banderas para ver si le he dado
for elemento in self.elementos:
# Miro si el ratón entra en colisión con el área de la bandera
if elemento.colisiona_con_un_punto(evento.x, evento.y):
if elemento.nivel <= int(self.nivel_guardado):
import escena_juego
pilas.cambiar_escena(escena_juego.Juego(elemento.nivel))
def cuando_vuelves(self):
import escena_menu
pilas.cambiar_escena(escena_menu.EscenaMenu())
def iniciar(self):
pilas.fondos.Fondo("data/guarida.jpg")
pilas.eventos.click_de_mouse.conectar(self.nivel)
self.elementos = []
self.candado = []
self.nivel_guardado = self.leertxt()
self.mostrar_tabla()
self.volver = pilas.actores.Boton(ruta_normal='data/volver.png',
ruta_over='data/volver.png')
self.volver.x = 50
self.volver.y = -140
self.volver.conectar_presionado(self.cuando_vuelves)
def candados(self):
# muestra los candados de los niveles no disponibles
for elemento in self.elementos:
if elemento.nivel > int(self.nivel_guardado):
candado1 = pilas.actores.Actor("data/candado.png")
candado1.x = elemento.x
candado1.y = elemento.y
self.candado.append(candado1)
return True
def mostrar_tabla(self):
self.trans1 = pilas.actores.Actor("data/tabla.png")
self.elementos.append(Elemento(texto="H", x=-230, y=130, nivel=1))
self.elementos.append(Elemento(texto="Li", x=-230, y=90, nivel=3))
self.elementos.append(Elemento(texto="Na", x=-230, y=45, nivel=11))
self.elementos.append(Elemento(texto="K", x=-230, y=0, nivel=19))
self.elementos.append(Elemento(texto="Be", x=-205, y=90, nivel=4))
self.elementos.append(Elemento(texto="Mg", x=-205, y=45, nivel=12))
self.elementos.append(Elemento(texto="Ca", x=-205, y=0, nivel=20))
self.elementos.append(Elemento(texto="B", x=80, y=90, nivel=5))
self.elementos.append(Elemento(texto="Al", x=80, y=45, nivel=13))
self.elementos.append(Elemento(texto="Ge", x=80, y=0, nivel=21))
self.elementos.append(Elemento(texto="C", x=105, y=90, nivel=6))
self.elementos.append(Elemento(texto="Si", x=105, y=45, nivel=14))
self.elementos.append(Elemento(texto="Ga", x=105, y=0, nivel=22))
self.elementos.append(Elemento(texto="N", x=130, y=90, nivel=7))
self.elementos.append(Elemento(texto="P", x=130, y=45, nivel=15))
self.elementos.append(Elemento(texto="As", x=130, y=0, nivel=23))
self.elementos.append(Elemento(texto="O", x=155, y=90, nivel=8))
self.elementos.append(Elemento(texto="S", x=155, y=45, nivel=16))
self.elementos.append(Elemento(texto="Se", x=155, y=0, nivel=24))
self.elementos.append(Elemento(texto="F", x=180, y=90, nivel=9))
self.elementos.append(Elemento(texto="Cl", x=180, y=45, nivel=17))
self.elementos.append(Elemento(texto="Br", x=180, y=0, nivel=25))
self.elementos.append(Elemento(texto="He", x=210, y=130, nivel=2))
self.elementos.append(Elemento(texto="Ne", x=210, y=90, nivel=10))
self.elementos.append(Elemento(texto="Ar", x=210, y=45, nivel=18))
self.elementos.append(Elemento(texto="Kr", x=210, y=0, nivel=26))
self.candados()
| gpl-3.0 | -6,599,656,830,202,659,000 | 41.427184 | 78 | 0.61762 | false |
eranroz/dnase | src/data_provider/dataDownloader.py | 1 | 18314 | """'
Script for download and installation of data and required programs
Some functions requires rsync
@see {transformWig} - another script for transformations
''"""
import argparse
import ftplib
from multiprocessing import Pool
import os
import urllib
import time
from config import DATA_DIR, BIN_DIR, OTHER_DATA, SIGNAL_DIR, WIG_TO_BIG_WIG, BIG_WIG_TO_BED_GRAPH, CHROM_SIZES,\
RAW_DATA_DIR
from data_provider import SeqLoader
SMOOTHING = 20
BED_GRAPH_DIR = os.path.join(DATA_DIR, 'bedGraph')
def setup_environment():
"""
Downloads some required programs from UCSC.
"""
tools = ["fetchChromSizes", "wigToBigWig", "bigWigToBedGraph", "bedGraphToBigWig"]
try:
import urllib.request
urlret = urllib.request.urlretrieve
except ImportError:
import urllib.urlretrieve
urlret = urllib.urlretrieve
for tool in tools:
if not os.path.exists(os.path.join(BIN_DIR, tool)):
urlret("http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/%s" % tool,
os.path.join(BIN_DIR, tool))
def download_dnase_human_data(ignore_files=None):
"""
Connects to genboree Epigenome atlas to download specific cell types
chromatin accessibility experiment results
@param ignore_files: files to ignore for example: '01679.DS17212.wig.gz'
"""
global SMOOTHING
if ignore_files is None:
ignore_files = []
epigenome_atlas = ftplib.FTP(host='ftp.genboree.org')
epigenome_atlas.login()
epigenome_atlas.cwd('EpigenomeAtlas/Current-Release/experiment-sample/Chromatin_Accessibility/')
dirs = epigenome_atlas.nlst()
print('Please select cell type:')
print('-1', 'All')
for i, d in enumerate(dirs):
print(i, d)
cell_type = input('Please enter number: ')
pool_process = Pool()
try:
cell_type = int(cell_type)
if cell_type >= len(dirs):
raise ValueError()
else:
if cell_type == -1:
sel_cell_types = list(dirs)
sel_cell_types = sel_cell_types[1:] # skip the meta dir
sel_cell_types = sel_cell_types[26:]
else:
sel_cell_types = [dirs[cell_type]]
sel_dir = ''
try:
for sel_dir in sel_cell_types:
epigenome_atlas.cwd(sel_dir)
print('cd: ', sel_dir)
wig_files = [fl for fl in epigenome_atlas.nlst() if fl[-6:] == 'wig.gz']
if cell_type > 0:
for i, fl in enumerate(wig_files):
print((i, fl))
selected_wig = input("Which file would you like to download? ")
selected_wigs = [wig_files[int(selected_wig)]]
else:
selected_wigs = wig_files
for selected_wig in selected_wigs:
if any(ig in selected_wig for ig in ignore_files):
print('Ignored:', selected_wig)
continue
if not os.path.exists(os.path.join(RAW_DATA_DIR, selected_wig)):
with open(os.path.join(DATA_DIR, selected_wig), 'wb') as dFile:
print(selected_wig)
epigenome_atlas.retrbinary('RETR %s' % selected_wig, dFile.write)
dFile.close()
print("%s download finished!" % selected_wig)
# create pickled small smoothed file
pool_process.apply_async(serialize_wig_file, (selected_wig,))
else:
print('Skipping - file already downloaded')
if sel_dir != dirs[-1]:
epigenome_atlas.cwd('..')
time.sleep(3) # sleep between directories moves
except KeyboardInterrupt:
print("KeyboardInterrupt: stopping downloading new files. Last dir: ", sel_dir)
epigenome_atlas.close()
pool_process.close()
pool_process.join()
except ValueError:
print("The data you enter couldn't be parsed as index")
def download_ncbi_histone(markers_to_download=None, markers_to_ignore=None,
by_experiments_dir='pub/geo/DATA/roadmapepigenomics/by_experiment/'):
"""
Downloads experiments results from NCBI.
@param markers_to_download: specific experiments to be downloaded. Default: histone modifications+mRNA-Seq and RRBS
@param markers_to_ignore: markers to ignore
@param by_experiments_dir: NCBI directory for downloading experiments
"""
if not markers_to_ignore:
markers_to_ignore = ['DNase']
import subprocess
import time
ncbi_ftp = ftplib.FTP(host='ftp.ncbi.nlm.nih.gov')
ncbi_ftp.login()
ncbi_ftp.cwd('/' + by_experiments_dir)
if markers_to_download is None:
experiments = ncbi_ftp.nlst('./')
local_path = os.path.join(OTHER_DATA, "markers")
if not os.path.exists(local_path):
os.mkdir(local_path)
markers_to_download = [ex for ex in experiments if (ex.startswith('H') or ex in ['mRNA-Seq', 'RRBS']) and not (
os.path.exists(local_path + '/' + ex) and len(os.listdir(local_path + '/' + ex)) > 2)]
enough_data = (ex for ex in markers_to_download if len(list(ncbi_ftp.nlst('./%s' % ex))) > 5)
for ex in enough_data:
print('Synchronizing %s' % ex)
if any(ignore in ex for ignore in markers_to_ignore):
continue
ex_dir = by_experiments_dir + ex
if os.path.exists(local_path + '/' + ex) and len(os.listdir(local_path + '/' + ex)) > 2:
print('Skipping ex')
continue
subprocess.call(
["rsync", "-azuP", "--exclude=*.bed.gz", "--include=*.wig.gz", "ftp.ncbi.nlm.nih.gov::%s" % ex_dir,
local_path])
time.sleep(5)
def download_from_source(source_path, file_format="bigWig"):
"""
Downloads based on a SOURCE file:
* each line in source contains a rsync directory
* It looks for files.txt (if exist) to get metadata on the downloaded files
@param file_format: file format to download
@param source_path: a path to a SOURCE file to which data will be downloaded
@return:
"""
import subprocess
import numpy as np
import re
with open(source_path, 'r') as source_file:
sources = list(source_file.readlines())
local_dir = os.path.dirname(source_path)
meta_data_keys = ['file']
meta_data = np.zeros((0, 1), dtype='S100')
meta_file_path = os.path.join(local_dir, 'files.txt')
for source in sources:
source = source.strip()
print('Download {} => {}'.format(source, local_dir))
subprocess.call(
["rsync", "-azuP", "--include=*.{}".format(file_format), "--include=files.txt", "--exclude=*", source,
local_dir])
if not os.path.exists(meta_file_path):
continue
with open(meta_file_path, 'r') as meta_file:
for track in meta_file.readlines():
# skip non relevant files
file_name, file_data = track.split('\t', 1)
if not file_name.endswith('.' + file_format):
continue
file_keys, file_values = zip(*re.findall('(.+?)=(.+?)[;\n$]', file_data))
file_keys = [key.strip() for key in file_keys]
new_meta_keys = [key for key in file_keys if key not in meta_data_keys]
if any(new_meta_keys):
meta_data_tmp = meta_data
meta_data = np.zeros((meta_data.shape[0], meta_data.shape[1] + len(new_meta_keys)), dtype='S100')
meta_data[:, 0: meta_data_tmp.shape[1]] = meta_data_tmp
meta_data_keys += new_meta_keys
file_keys = map(lambda k: meta_data_keys.index(k), file_keys)
new_row = np.zeros(meta_data.shape[1], dtype='S100')
new_row[0] = file_name
for meta_key, meta_value in zip(file_keys, file_values):
new_row[meta_key] = meta_value
meta_data = np.vstack((meta_data, new_row))
os.remove(meta_file_path) # delete the meta file (avoid conflict with other sources)
meta_data = np.vstack((meta_data_keys, meta_data))
np.savetxt(os.path.join(local_dir, 'metadata.csv'), meta_data, delimiter='\t', fmt="%s")
print('Consider to remove incorrect data! use the metadata.csv to find such data...')
def transform_ncbi(wig_directory=SIGNAL_DIR):
"""
Transforms .wig.gz files in wig_directory to pkl files
@param wig_directory: directory with cell types subdirectories, with wig files
"""
pool_process = Pool()
for cell in os.listdir(wig_directory):
cell_path = os.path.join(wig_directory, cell)
cell_files = os.listdir(cell_path)
for f in cell_files:
if not f.endswith('.wig.gz') or 'filtered-density' in f:
continue
output_file = f.replace('.gz', '').replace('.wig', '.%i.npz' % SMOOTHING)
if output_file in cell_files:
continue
pool_process.apply_async(process_ncbi_file, (os.path.join(cell_path, f),))
pool_process.close()
pool_process.join()
print('Finished transforming all files!')
def process_ncbi_file(wig_file):
"""
pickle it
@param wig_file: wiggle files to transform
"""
print('Processing %s' % wig_file)
SeqLoader.wig_transform(wig_file, SMOOTHING)
print('end processing %s' % wig_file)
def transform_wig_files(directory=DATA_DIR):
"""
Transforms wig.gz files to npz files and archives to RAW_DATA_DIR
@param directory: directory with wig.gz files to transform
"""
pool_process = Pool()
for f in [f for f in os.listdir(directory) if f.endswith('.wig.gz')]:
pool_process.apply_async(serialize_wig_file, (f, directory))
pool_process.close()
pool_process.join()
def serialize_wig_file(wig_file, directory=DATA_DIR):
"""
serialize wig file to npz file
@param directory: directory in which the wig file placed
@param wig_file: wig file to npz/pickle
"""
SeqLoader.wig_transform(os.path.join(directory, wig_file), SMOOTHING)
print(os.path.join(directory, wig_file), '-->', os.path.join(RAW_DATA_DIR, wig_file))
os.rename(os.path.join(directory, wig_file), os.path.join(RAW_DATA_DIR, wig_file))
def serialize_dir(in_directory=RAW_DATA_DIR, out_directory=SIGNAL_DIR, file_type='bigWig'):
"""
Serialize bigwig file to npz file
@param file_type: file types to serialize
@param out_directory: output directory
@param in_directory: input directory
"""
import tempfile
import subprocess
if file_type == 'wig':
return transform_wig_files()
if file_type != 'bigWig':
raise NotImplementedError
for filename in os.listdir(in_directory):
if not filename.endswith(file_type):
continue
src_file = os.path.join(in_directory, filename)
dest_file = os.path.join(out_directory, filename.replace('.' + file_type, ''))
if os.path.exists(dest_file+'.npz'):
continue
with tempfile.NamedTemporaryFile('w+', encoding='ascii') as tmp_file:
subprocess.call([BIG_WIG_TO_BED_GRAPH, src_file, tmp_file.name])
seq = SeqLoader.load_bg(tmp_file.name)
SeqLoader.save_result_dict(dest_file, seq)
print('Finish')
def wig_to_bed_graph(cur_trans):
"""
Transforms wig file to bed graph file
@param cur_trans: file to transform as 3-tuple (original.wig, temp.bw, result.bg))
"""
import subprocess
print('Transforming')
print('->'.join(cur_trans))
subprocess.call([WIG_TO_BIG_WIG, cur_trans[0], CHROM_SIZES, cur_trans[1]])
subprocess.call([BIG_WIG_TO_BED_GRAPH, cur_trans[1], cur_trans[2]])
os.remove(cur_trans[1])
print('Completed')
def raw_data_to_bed_graph(wig_directory=RAW_DATA_DIR, bg_directory=BED_GRAPH_DIR):
"""
Transforms raw data wig files to bed graph files
@param wig_directory: directory with wig files
@param bg_directory: directory with bed graph data
"""
pool_process = Pool()
bed_graphs = [f[:-3] for f in os.listdir(bg_directory)]
need_transform = [(os.path.join(wig_directory, f), os.path.join(bg_directory, f[:-7] + '.bw'),
os.path.join(bg_directory, f[:-7] + '.bg')) for f in os.listdir(wig_directory) if
f[:-7] not in bed_graphs]
for trans in need_transform:
pool_process.apply_async(wig_to_bed_graph, (trans,))
pool_process.close()
pool_process.join()
def ucsc_download(src_path, target_path=None, email=None):
"""
Downloads data from UCSC using FTP
@param src_path: path to download to (local)
@param target_path: path to download from (remote)
@param email: email for authentication
"""
if target_path is None:
target_path = input("In which directory would you like to store the genome?")
if email is None:
email = input("Please enter your mail (will be used to enter to hgdownload ftp")
with ftplib.FTP(host='hgdownload.cse.ucsc.edu') as ucsc_ftp:
ucsc_ftp.login(user="anonymous", passwd=email)
ucsc_ftp.cwd(os.path.dirname(target_path))
if not os.path.exists(src_path):
os.makedirs(src_path)
with open(os.path.join(src_path, os.path.basename(target_path)), 'wb') as dFile:
ucsc_ftp.retrbinary('RETR %s' % os.path.basename(target_path), dFile.write)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help="")
# downloads genome sequence
parser_download_genome = subparsers.add_parser('download_genome',
help='Downloads genome sequence from hgdownload.cse.ucsc.edu')
parser_download_genome.add_argument('directory', help="Directory to store retrived file")
parser_download_genome.add_argument('--genome', help="Genome to download", default='hg19')
parser_download_genome.add_argument('--email', help="Email for authentication to UCSC", default='')
parser_download_genome.set_defaults(
func=lambda args: ucsc_download(args.directory, "goldenPath/%s/bigZips/%s.2bit" % (args.genome, args.genome),
args.email))
# utility function for downloading from multiple FTPs
parser_download_source = subparsers.add_parser('download_sources',
help='Downloads genome sequence from hgdownload.cse.ucsc.edu')
parser_download_source.add_argument('source',
help="A file with each line containing FTP source to download data from")
parser_download_source.set_defaults(func=lambda args: download_from_source(args.source))
parser_transform_ncbi = subparsers.add_parser('transform_ncbi',
help='Transforms .wig.gz files in SIGNAL_DIR to pkl files')
parser_transform_ncbi.add_argument('--directory', help="directory with cell types subdirectories, with wig files",
default=SIGNAL_DIR)
parser_transform_ncbi.set_defaults(func=lambda args: transform_ncbi(args.directory))
parser_download_ncbi_markers = subparsers.add_parser('ncbiMarkers',
help='ownloads ncbi markers to OTHER_DATA/markers')
parser_download_ncbi_markers.add_argument('--markers_to_download',
help="specific experiments to be downloaded. " +
"Default: histone modifications+mRNA-Seq and RRBS",
default=None)
parser_download_ncbi_markers.add_argument('--markers_to_ignore', help="markers to ignore",
default=None)
parser_download_ncbi_markers.add_argument('--by_experiments_dir', help="NCBI directory for downloading experiments",
default="pub/geo/DATA/roadmapepigenomics/by_experiment/")
parser_download_ncbi_markers.set_defaults(
func=lambda args: download_ncbi_histone(args.markers_to_download, args.markers_to_ignore,
args.by_experiments_dir))
raw_data_to_bed_graph_parser = subparsers.add_parser('raw_to_bed',
help='Transforms .wig.gz files in NCBI_DIR to pkl files')
raw_data_to_bed_graph_parser.add_argument('--wig_directory', help="directory with wig files",
default=RAW_DATA_DIR)
raw_data_to_bed_graph_parser.add_argument('--bg_directory', help="directory with bed graph data",
default=BED_GRAPH_DIR)
raw_data_to_bed_graph_parser.set_defaults(func=lambda args: raw_data_to_bed_graph(args.wig_directory,
args.bg_directory))
wig_to_npz_transform = subparsers.add_parser('wig_to_npz',
help='Transforms .wig.gz files in directory to npz files')
wig_to_npz_transform.add_argument('--directory', help="directory with wig.gz files to transform",
default=DATA_DIR)
wig_to_npz_transform.set_defaults(func=lambda args: transform_wig_files(args.directory))
serialize_dir_transform = subparsers.add_parser('serialize_dir',
help='Serializes wig.gz/bigWig files to npz')
serialize_dir_transform.add_argument('--in_directory', help="Input directory", default=RAW_DATA_DIR)
serialize_dir_transform.add_argument('--out_directory', help="Output directory directory", default=SIGNAL_DIR)
serialize_dir_transform.set_defaults(func=lambda args: serialize_dir(args.in_directory, args.out_directory))
command_args = parser.parse_args()
command_args.func(command_args)
| mit | -2,990,876,278,321,959,400 | 43.024038 | 120 | 0.598395 | false |
linkslice/graphite-tools | codahale_metrics.py | 1 | 7854 | #!/usr/bin/env python
#####################################################
## Parse codahale/yammer/dropwizard JSON metrics ##
## put the tuples into a list, ##
## pickle the list and dump it into the graphite ##
## pickle port ##
#####################################################
import pickle
import socket
import struct
import time
import re
import sys
from base64 import b64encode
from optparse import OptionParser
import urllib2, httplib
import json
socket.setdefaulttimeout(30.0)
def processResponse(data,graphiteRoot,pickleport):
timestamp = time.time()
output = ([])
if options.verbose: print >> sys.stderr, data
d = json.loads(data)
try:
# Step through JSON objects and sub objects and sub objects.
for everyone, two in d.iteritems():
if type(two).__name__=='dict':
for attr, value in two.items():
if type(value).__name__=='dict':
try:
for left, right in value.items():
if not ((type(right).__name__ == "float") or (type(right).__name__ == "int")): continue
# strip unicode stuff
if '.' in everyone:
blah = str("%s.%s_%s_%s" % ( graphiteRoot, everyone, attr.replace(' ','_'), left.replace(' ','_')))
output.append((blah, (timestamp,right)))
else:
blah = str("%s.%s.%s_%s" % ( graphiteRoot, everyone, attr.replace(' ','_'), left.replace(' ','_')))
output.append((blah, (timestamp,right)))
# Some 'left' objects at this level are of type unicode.
# So, obviously attempting to walk them like they were a dict type
# is going to generate some exceptions.
# Ignore them and move to the next one.
except AttributeError as uh:
continue
else:
#if type(value).__name__=="dict": continue
# strip unicode stuff
blah = str("%s.%s.%s" % ( graphiteRoot, everyone, attr.replace(' ','_')))
output.append((blah,(timestamp,value)))
else:
# strip unicode stuff
blah = str("%s.%s" % ( graphiteRoot, everyone.replace(' ','_')))
output.append((blah, (timestamp,two)))
# probably not needed any longer
except KeyError:
print >> sys.stderr, "Critical: Key not found: %s" % resource
sys.exit(1)
finally:
#prepare the package for delivery!!!
package = pickle.dumps(output, 1)
size = struct.pack('!L', len(package))
# if verbose is set write the pickle to a file for
# further testing
if options.verbose:
fh = open('data.p', 'wb')
pickle.dump(output, fh)
fh.close()
s = socket.socket()
s.connect(('localhost', pickleport))
s.sendall(size)
s.sendall(package)
sys.exit(0)
class HTTPSClientAuthHandler(urllib2.HTTPSHandler):
def __init__(self, key, cert):
urllib2.HTTPSHandler.__init__(self)
self.key = key
self.cert = cert
def https_open(self, req):
return self.do_open(self.getConnection, req)
def getConnection(self, host, timeout=300):
return httplib.HTTPSConnection(host, key_file=self.key, cert_file=self.cert)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-H', '--host', dest='host',
help='Hostname/IP of the web server')
parser.add_option('-p', '--port', dest='port',
type='int', default=80,
help='Port to connect to on the web server')
parser.add_option('-u', '--url', dest='url',
help='URL to retrieve data from')
parser.add_option('-n', '--username', dest='username',
help='Username for accessing the page')
parser.add_option('-w', '--password', dest='password',
help='Password for accessing the page')
parser.add_option('-s', '--service', dest='service',
help='Service you want to query')
parser.add_option('-r', '--resource', dest='resource',
help='Resource you want to query')
parser.add_option('-q', '--query', dest='query',
help='Object to query')
parser.add_option('-S', '--ssl', dest='usingssl',
action="store_true",
help='Enable SSL for HTTP connection')
parser.add_option('-C', '--client', dest='client',
help='Client cert to use')
parser.add_option('-K', '--key', dest='key',
help='Client key to use')
parser.add_option('-R', '--graphite-root', dest='graphiteRoot',
help='Graphite root to store data in')
parser.add_option('-P', '--pickle-port', dest='pickleport',
type='int', default=2004,
help='Pickle port to submit data to')
parser.add_option('-v', '--verbose', dest='verbose',
action="store_true",
help='enable verbose output')
options, args = parser.parse_args()
if not options.host:
print >> sys.stderr, "Critical: You must specify the host."
sys.exit(1)
if not options.url:
print >> sys.stderr, "You must specify a URL."
sys.exit(1)
else:
url = options.url
headers = {}
if options.username and options.password:
authstring = ':'.join((
options.username, options.password)).encode('base64')
headers = {
"Authorization": "Basic " + authstring.rstrip(),
}
# default to use SSL if the port is 443
if options.usingssl or options.port == '443':
if not options.key:
from httplib import HTTPSConnection
try:
connection = HTTPSConnection(options.host, options.port)
connection.request("GET", url, None, headers)
except:
print >> sys.stderr, "Unable to make HTTPS connection to https://%s:%s%s" % ( options.host, options.port, url )
sys.exit(1)
else:
import urllib2
from httplib import HTTPSConnection
opener = urllib2.build_opener(HTTPSClientAuthHandler(options.key, options.client))
connectString = "https://%s:%s%s" % (options.host, options.port, options.url)
try:
response = opener.open(connectString)
except:
print >> sys.stderr, "Could not connect to %s" % connectString
sys.exit(2)
else:
from httplib import HTTPConnection
try:
connection = HTTPConnection(options.host, options.port)
connection.request("GET", url, None, headers)
except Exception as e:
print >> sys.stderr, "Unable to make HTTP connection to http://%s:%s%s because: %s" % ( options.host, options.port, url, e )
sys.exit(1)
graphiteRoot = "%s.%s" % ( options.graphiteRoot, options.host )
if options.key:
returnCode = response.getcode()
else:
response = connection.getresponse()
returnCode = response.status
if returnCode == 200:
processResponse(response.read(),graphiteRoot,options.pickleport)
elif returnCode == 401:
print "Invalid username or password."
sys.exit(1)
elif returnCode == 404:
print "404 not found."
sys.exit(1)
else:
print "Web service error %: " % returnCode #, (None if not response.reason else response.reason) )
sys.exit(1)
| mit | 7,965,960,671,044,199,000 | 38.467337 | 136 | 0.536542 | false |
MOA-2011/enigma2-plugin-extensions-openwebif | plugin/controllers/views/mobile/channels.py | 1 | 7457 | #!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.38012
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/mobile/channels.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class channels(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(channels, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<html>\r
<head>\r
\t<title>OpenWebif</title>\r
\t<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
\t<meta name="viewport" content="user-scalable=no, width=device-width"/>\r
\t<meta name="apple-mobile-web-app-capable" content="yes" />\r
\t<link rel="stylesheet" type="text/css" href="/css/jquery.mobile-1.0.min.css" media="screen"/>\r
\t<link rel="stylesheet" type="text/css" href="/css/iphone.css" media="screen"/>\r
\t<script src="/js/jquery-1.6.2.min.js"></script>\r
\t<script src="/js/jquery.mobile-1.0.min.js"></script>\r
</head>\r
<body> \r
\t<div data-role="page">\r
\r
\t\t<div id="header">\r
\t\t\t<div class="button" onClick="history.back()">''')
_v = VFFSL(SL,"tstrings",True)['back'] # u"$tstrings['back']" on line 17, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['back']")) # from line 17, col 49.
write(u'''</div>\r
\t\t\t<h1><a style="color:#FFF;text-decoration:none;" href=\'/mobile\'>OpenWebif</a></h1>
\t\t</div>\r
\r
\t\t<div id="contentContainer">\r
\t\t\t<ul data-role="listview" data-inset="true" data-theme="d">\r
\t\t\t\t<li data-role="list-divider" role="heading" data-theme="b">''')
_v = VFFSL(SL,"tstrings",True)['channels'] # u"$tstrings['channels']" on line 23, col 64
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['channels']")) # from line 23, col 64.
write(u'''</li>\r
''')
for channel in VFFSL(SL,"channels",True): # generated from line 24, col 5
write(u'''\t\t\t\t<li>\r
\t\t\t\t<a href="/mobile/channelinfo?sref=''')
_v = VFFSL(SL,"channel.ref",True) # u'$channel.ref' on line 26, col 39
if _v is not None: write(_filter(_v, rawExpr=u'$channel.ref')) # from line 26, col 39.
write(u'''" style="padding: 3px;">\r
\t\t\t\t<span class="ui-li-heading" style="margin-top: 0px; margin-bottom: 3px;">''')
_v = VFFSL(SL,"channel.name",True) # u'$channel.name' on line 27, col 78
if _v is not None: write(_filter(_v, rawExpr=u'$channel.name')) # from line 27, col 78.
write(u'''</span>\r
''')
if VFN(VFFSL(SL,"channel",True),"has_key",False)('now_title'): # generated from line 28, col 5
write(u'''\t\t\t\t<span class="ui-li-desc" style="margin-bottom: 0px;">''')
_v = VFFSL(SL,"channel.now_title",True) # u'$channel.now_title' on line 29, col 58
if _v is not None: write(_filter(_v, rawExpr=u'$channel.now_title')) # from line 29, col 58.
write(u'''</span>\r
''')
write(u'''\t\t\t\t</a>\r
\t\t\t\t</li>\r
''')
write(u'''\t\t\t</ul>\r
\t\t</div>\r
\r
\t\t<div id="footer">\r
\t\t\t<p>OpenWebif Mobile</p>\r
\t\t\t<a onclick="document.location.href=\'/index?mode=fullpage\';return false;" href="#">''')
_v = VFFSL(SL,"tstrings",True)['show_full_openwebif'] # u"$tstrings['show_full_openwebif']" on line 39, col 86
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['show_full_openwebif']")) # from line 39, col 86.
write(u'''</a>\r
\t\t</div>\r
\t\t\r
\t</div>\r
</body>\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_channels= 'respond'
## END CLASS DEFINITION
if not hasattr(channels, '_initCheetahAttributes'):
templateAPIClass = getattr(channels, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(channels)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=channels()).run()
| gpl-2.0 | 5,791,372,461,891,725,000 | 37.637306 | 247 | 0.617541 | false |
commonslabgr/donation-box-pi | donation-box/DonationServer.py | 1 | 16989 | #!/usr/bin/python
####################################################
# Name: Donation Box WebSockets deamon
#
# Description:
# Provides the WebSockets Server which polls data from the DB, notifies any connected clients (browsers)
# and accepts messages (donations) from clients that then writes to the DB
#
# Author: Dimitris Koukoulakis
#
# License: GNU GPL v3.0
####################################################
from __future__ import print_function
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import serial
import MySQLdb
import time
import threading
import datetime
import decimal
import json
import urllib2
import logging
import subprocess
import os
import random
from time import gmtime, strftime
import sys
sys.path.insert(0,'/home/commonslab/donation-box/resources')
from Adafruit_Thermal import *
import coopboxqr
#Init
json_data=open('/home/commonslab/donation-box/config.json')
config = json.load(json_data)
json_data.close()
logging.basicConfig(filename='DonationServer.log', level=logging.DEBUG, format='%(levelname)s %(asctime)s: %(message)s')
logging.debug('Donation Box Server started')
coin = 0
#See the config.json file for the configuration
curr = config["General"]["currency"]
init_wait_time = config["General"]["Init Wait time (sec)"]
clients = []
dbserver = config["Database"]["server"]
dbuser = config["Database"]["username"]
dbpass = config["Database"]["password"]
dbname = config["Database"]["name"]
#PRINTER
pr_enabled = config["Printer"]["enabled"]
pr_dev = config["Printer"]["dev"]
pr_baudrate = config["Printer"]["baudrate"]
pr_timeout = config["Printer"]["timeout"]
pr_feedlines = config["Printer"]["feedlines"]
pr_heattime = config["Printer"]["heattime"]
#GAME
game_enabled = config["Game"]["enabled"]
game_run = config["Game"]["run"]
#UI
ui_sendsum = config["UI"]["SendSumDonations"]
#NETWORK
net_enabled = config["Network"]["enabled"]
net_url = config["Network"]["URL"]
net_send = config["Network"]["insert"]
net_get = config["Network"]["get"]
net_getparam = config["Network"]["get_param"]
net_boxid = config["Network"]["boxID"]
#wait at start up for mySQL to load
time.sleep(init_wait_time)
#For normal Screen (No Touch) make donations automatic
#ONLY for single project!
auto_donation = False
if pr_enabled:
printer = Adafruit_Thermal(pr_dev, pr_baudrate, timeout=pr_timeout)
def PrintMSFReceipt():
printer.begin(pr_heattime)
printer.setTimes(0,0) #print as fast as possible
printer.feed(1)
printer.printBitmap(msflogo.width, msflogo.height, msflogo.data)
printer.feed(2)
printer.printBitmap(msfty.width, msfty.height, msfty.data)
printer.feed(1)
printer.printBitmap(msfqr.width, msfqr.height, msfqr.data)
printer.feed(1)
printer.doubleHeightOn()
printer.println(' msf.gr')
printer.println(' +30 210 5200500')
printer.feed(pr_feedlines)
def PrintCoopBoxReceipt(amount,uid):
printer.begin(pr_heattime)
printer.setTimes(0,0) #print as fast as possible
printer.doubleHeightOn()
printer.println(' CODE: {0}'.format(uid))
printer.doubleHeightOff()
printer.feed(1)
printer.println('scan the QR code or go to ')
printer.println('http://thecoopbox.commonslab.gr')
printer.println('and register for your perk')
printer.feed(1)
printer.doubleHeightOn()
printer.println(' {0} EUR'.format(amount))
printer.doubleHeightOff()
printer.feed(1)
#if (amount == '0.50'):
# printer.printBitmap(halfeuro.width, halfeuro.height, halfeuro.data)
#elif (amount == '1.00'):
# printer.printBitmap(oneeuro.width, oneeuro.height, oneeuro.data)
#elif (amount == '2.00'):
# printer.printBitmap(twoeuro.width, twoeuro.height, twoeuro.data)
printer.feed(1)
printer.printBitmap(coopboxqr.width, coopboxqr.height, coopboxqr.data)
printer.feed(pr_feedlines)
def Th_print(currency,value,name,email,prname,prid,donationid,uid):
if not pr_enabled:
logging.debug('Thermal printer is disabled')
return
PrintCoopBoxReceipt(value,uid)
#os.system('/home/commonslab/donation-box/PrintCoopReceipt.py -a {0} -i {1}'.format(value,uid))
#THREAD:Start printing receipt
#p = threading.Thread(target=PrintCoopBoxReceipt(value,uid))
#p.daemon = True
#p.start()
#Generate Unique Donation ID for registering it to the DB and print it for user
def GenerateUID(amount):
#Generate random 5 digit number
r = random.randint(10000,99999)
#Get a list of the digits
l = list(str(r))
#Get the sum of those digits
c = int(l[0])+int(l[1])+int(l[2])+int(l[3])+int(l[4])
#Get the modulus of that sum
c = c%10;
a = str(amount)[0]
'''
if (amount == 1):
a = random.randint(0,2)
elif (amount == 2):
a = random.randint(3,5)
elif (amount == 0.5):
a = random.randint(6,9)
'''
uid = str('{0}{1}{2}').format(a,r,c)
return uid
#Retrieve Donations from server
def RetrieveDonations(pid):
url = net_url+"/"+net_get+"?"+net_getparam+"="+pid
#url = "http://thecoopbox.commonslab.gr/network_output.php?idproject={0}".format(pid)
response = urllib2.urlopen(url)
data = json.loads(response.read())
new_amount = data[0]['amount']
logging.debug(json.dumps(data))
return new_amount
#Submit Donation data to server
def SendDonationToServer(prid,value,uid):
from time import gmtime, strftime
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
#uid = GenerateUID(value)
#data = {"ProjectID":1,"BoxID":1,"DateTime":time.strftime('%Y-%m-%d %H:%M:%S'),"Amount":2}
data = {}
data['ProjectID'] = prid
data['BoxID'] = net_boxid
data['Amount'] = value
data['DonationTime'] = timestamp
data['UID'] = uid
logging.debug(json.dumps(data))
req = urllib2.Request(net_url+'/'+net_send)
req.add_header('Content-Type', 'application/json')
#print "Sending:"
#print json.dumps(data)
logging.debug('Sending: {0}'.format(data))
response = urllib2.urlopen(req, json.dumps(data))
logging.debug('Response from {0}/{1}: {2}'.format(net_url,net_send,response.read()))
if ("successfully" in response.read()):
return True
else:
return False
#Check for inserted coins and send any to the websocket clients
def UpdateCoins():
#THREAD: This function runs inside the thread
LastTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
while True:
#Connect to database
dbConn = MySQLdb.connect(dbserver,dbuser,dbpass,dbname) or die ("could not connect to database")
cursor = dbConn.cursor()
try:
cursor.execute('SELECT timeinserted, value, currency FROM coinacceptor WHERE timeinserted > "{0}" ORDER BY timeinserted ASC'.format(LastTime))
#print('SELECT timeinserted, value, currency FROM coinacceptor WHERE timeinserted > "{0}" ORDER BY timeinserted ASC'.format(LastTime))
for (timeinserted, value, currency) in cursor:
LastTime = timeinserted
global coin
coin = value
global curr
curr = currency
#logging.debug('{0}|{1}'.format(coin,curr))
#print('{0}|{1}'.format(coin,curr))
if coin != 0:
#Send value to web socket clients
SendCoins('{0}|{1}'.format(coin,curr))
if auto_donation:
ProcessDonation('PLAY|||0|COOP|1|{0}EUR'.format(value))
cursor.close(); #close the cursor
except MySQLdb.IntegrityError:
logging.error('failed to fetch data')
finally:
cursor.close(); #close the cursor
#Sleep for a while to allow other processes
time.sleep(0.5);
#Check for money that have not been donated yet
def GetCoins():
global dbserver
global dbname
global dbuser
global dbpass
global coin
global curr
#Connect to Database
dbConn = MySQLdb.connect(dbserver,dbuser,dbpass,dbname) or die ("could not connect to database")
cursor = dbConn.cursor()
try:
#See if there are coins inserted that have not been donated
cursor.execute('SELECT currency,value,donationid FROM coinacceptor WHERE donationid < 0')
# Get returned values
for (currency,value,donationid) in cursor:
#TODO: What should happen if one coin is of differenct currency?
curr = currency
coin += value
logging.debug('DonationID: '+repr(donationid)+' Currency: '+repr(curr)+' Value: '+repr(coin))
if coin != 0:
return str('{0}|{1}'.format(coin,curr))
else:
return 0
cursor.close(); #close the cursor
except MySQLdb.IntegrityError:
logging.error('failed to fetch data')
finally:
cursor.close() #close just incase it failed
def InsertRegistration(name,email):
from time import gmtime, strftime
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
dbConn = MySQLdb.connect(dbserver,dbuser,dbpass,dbname) or die ("could not connect to database")
logging.debug('Insert registration to DB')
dbConn.set_character_set('utf8')
cursor = dbConn.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
logging.debug('Name:'+name+' Email:'+email)
try:
#Insert registration
cursor.execute('INSERT INTO newsletter (email,name,timestamp) VALUES ("{0}","{1}","{2}")'.format(email,name,timestamp))
dbConn.commit()
cursor.close()
except MySQLdb.IntegrityError:
logging.error('failed to fetch data')
for client in clients:
client.write_message("ERROR")
finally:
cursor.close() #close just incase it failed
def InsertDonation(currency,value,name,email,public, prname, prid, uid):
from time import gmtime, strftime
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
dbConn = MySQLdb.connect(dbserver,dbuser,dbpass,dbname) or die ("could not connect to database")
logging.debug('Insert donation to DB')
dbConn.set_character_set('utf8')
cursor = dbConn.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
logging.debug('Name:'+name+' Email:'+email+' public:'+public+' Project Name:'+prname+' ProjectID:'+prid+' Currency:'+currency+' Value:'+value)
if (public == 'false'):
public = 0
else:
public = 1
try:
#Insert donation
logging.debug('INSERT INTO donations (currency,ammount,projectname,email,name,public,projectid,timestamp, uid) VALUES ("{0}",{1},"{2}","{3}","{4}",{5},{6},"{7}","{8}")'.format(currency,value,prname,email,name,public,prid,timestamp,uid))
cursor.execute('INSERT INTO donations (currency,ammount,projectname,email,name,public,projectid,timestamp, uid) VALUES ("{0}",{1},"{2}","{3}","{4}",{5},{6},"{7}","{8}")'.format(currency,value,prname,email,name,public,prid,timestamp,uid))
dbConn.commit()
#Get donation ID
donationid = cursor.lastrowid
#Update coins inserted with donation ID
cursor.execute('UPDATE coinacceptor SET donationid={0} WHERE donationid=-1'.format(donationid))
dbConn.commit()
cursor.close()
except MySQLdb.IntegrityError:
logging.error('failed to fetch data')
for client in clients:
client.write_message("ERROR")
finally:
cursor.close() #close just incase it failed
for client in clients:
client.write_message("SUCCESS")
#SendCoins('{0}|{1}'.format(value,currency))
logging.info('Data written successfuly')
return donationid;
def GetSumDonations():
return GetDonations(-99)
#Get amount of donations for a project
def GetDonations(pid):
#Connect to Database
dbConn = MySQLdb.connect(dbserver,dbuser,dbpass,dbname) or die ("could not connect to database")
cursor = dbConn.cursor()
value = 0
try:
if (pid == -99):
cursor.execute('SELECT SUM(Ammount) FROM donations')
else:
cursor.execute('SELECT SUM(Ammount) FROM donations WHERE ProjectID = {0}'.format(pid))
data = cursor.fetchone()
for row in cursor:
if row[0] is not None:
value = float(row[0])
cursor.close(); #close the cursor
logging.debug('Get project total amount donated: %s', value)
return value
except MySQLdb.IntegrityError:
logging.error('failed to fetch data')
finally:
cursor.close()
#Send coins that have not been donated to clients
def SendCoins(msg):
logging.debug('COINS|{0}'.format(msg))
for client in clients:
client.write_message('COINS|{0}'.format(msg))
#Reset global vars
global coin
global curr
coin = 0
curr = "EUR"
def SendSumDonations(msg):
logging.debug('PID|-99|TOTAL|{0}'.format(msg))
for client in clients:
client.write_message('PID|-99|TOTAL|{0}'.format(msg))
#Send donations for a specified project ID to clients
def SendDonations(pid, msg):
if (net_enabled):
msg = RetrieveDonations(pid)
logging.debug('PID|{0}|TOTAL|{1}'.format(pid,msg))
for client in clients:
client.write_message('PID|{0}|TOTAL|{1}'.format(pid,msg))
#Process Registration
def ProcessRegistration(msg):
logging.debug('Process registration: %s', msg)
values = msg.split('|')
name = values[1]
email = values[2]
#Insert Newsletter registration to database
InsertRegistration(name,email)
#Flag UIDStored
def UIDStored(uid, value):
dbConn = MySQLdb.connect(dbserver,dbuser,dbpass,dbname) or die ("could not connect to database")
cursor = dbConn.cursor()
try:
#See if there are coins inserted that have not been donated
logging.debug('UPDATE donations SET uidStored={0} WHERE uid="{1}"'.format(value,uid))
cursor.execute('UPDATE donations SET uidStored={0} WHERE uid="{1}"'.format(value,uid))
dbConn.commit()
cursor.close() #close the cursor
except MySQLdb.IntegrityError:
logging.error('UIDStored: failed to fetch data')
finally:
cursor.close() #close just incase it failed
#Process Donation
def ProcessDonation(msg):
logging.debug('Process donation: %s', msg)
values = msg.split('|')
name = values[1]
email = values[2]
public = values[3]
prname = values[4]
#This depends on the Language settings
#projectdetails = values[4].split('?') #contains language info (e.g. 81?lang=el)
#prid = projectdetails[0]
prid = values[5]
#lang = projectdetails[1] #lang support for printer limited to ASCII
dondata = values[6]
l = len(dondata)
donvalue = dondata[0:l-3]
doncurr = dondata[l-3:]
#Switch to Game
if (values[0] == 'PLAY'):
SwitchToGame();
if net_enabled:
uid = GenerateUID(donvalue)
#Insert Donation to Database
donationid = InsertDonation(doncurr,donvalue,name,email,public,prname,prid,uid)
if (SendDonationToServer(prid,donvalue,uid)):
UIDStored(uid, True)
else:
UIDStored(uid, False)
#Print receipt
Th_print(doncurr,donvalue,name,email,prname,prid,donationid,uid)
else:
#Insert Donation to Database
donationid = InsertDonation(doncurr,donvalue,name,email,public,prname,prid,0)
Th_print(doncurr,donvalue,name,email,prname,prid,donationid,0)
#Close window playing video loop
def CloseVideo():
logging.debug('Close Video window')
os.system("wmctrl -a 'Donation Box |'")
#Process Messages
def processmsg(msg):
logging.debug('Process message: %s', msg)
values = msg.split('|')
if (values[0] == 'REQPROJECTTOTAL'):
s = GetDonations(values[1])
SendDonations(values[1],s)
elif (values[0] == 'NEWSLETTER'):
ProcessRegistration(msg)
elif ( (values[0] == 'DONATION') or (values[0] == 'PLAY') ):
ProcessDonation(msg)
elif (values[0] == 'VIDEO_CLICK'):
CloseVideo()
#Switch to Game Window
def SwitchToGame():
if game_enabled:
logging.debug('Switch to: ')
logging.debug(game_run)
#For Reaction game
#os.system("wmctrl -a reflex_game")
#For MAME or Pacman game
os.system(game_run)
#HTTP Server Handler
class WSHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
logging.info('New connection was opened')
clients.append(self)
#Get inserted coins that have not been donated
s = GetCoins()
# Send value and currency to web socket client
SendCoins(s)
#Get donations
#s = GetDonations(1) #PID=1 if we run the box as a single donation project, otherwise we need the Project ID
#Send Donations to web socket clients
#SendDonations(1,s)
if (ui_sendsum):
s = GetSumDonations()
SendSumDonations(s)
#Process any received messages
def on_message(self, message):
processmsg(message)
def on_close(self):
logging.info('Connection was closed...')
clients.remove(self)
#THREAD:Start looking for newly inserted coins
t = threading.Thread(target=UpdateCoins)
t.daemon = True
t.start()
application = tornado.web.Application([
(r'/ws', WSHandler),
])
if __name__ == "__main__":
#Start the HTTP server and listen at port 8888
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start()
| gpl-3.0 | -4,253,610,225,365,085,700 | 31.421756 | 243 | 0.6825 | false |
lgarren/spack | lib/spack/spack/test/spec_semantics.py | 1 | 27062 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import spack.architecture
import pytest
from spack.spec import Spec, UnsatisfiableSpecError
from spack.spec import substitute_abstract_variants, parse_anonymous_spec
from spack.variant import InvalidVariantValueError
from spack.variant import MultipleValuesInExclusiveVariantError
def target_factory(spec_string, target_concrete):
spec = Spec(spec_string)
if target_concrete:
spec._mark_concrete()
substitute_abstract_variants(spec)
return spec
def argument_factory(argument_spec, left):
try:
# If it's not anonymous, allow it
right = target_factory(argument_spec, False)
except Exception:
right = parse_anonymous_spec(argument_spec, left.name)
return right
def check_satisfies(target_spec, argument_spec, target_concrete=False):
left = target_factory(target_spec, target_concrete)
right = argument_factory(argument_spec, left)
# Satisfies is one-directional.
assert left.satisfies(right)
assert left.satisfies(argument_spec)
# If left satisfies right, then we should be able to constrain
# right by left. Reverse is not always true.
right.copy().constrain(left)
def check_unsatisfiable(target_spec, argument_spec, target_concrete=False):
left = target_factory(target_spec, target_concrete)
right = argument_factory(argument_spec, left)
assert not left.satisfies(right)
assert not left.satisfies(argument_spec)
with pytest.raises(UnsatisfiableSpecError):
right.copy().constrain(left)
def check_constrain(expected, spec, constraint):
exp = Spec(expected)
spec = Spec(spec)
constraint = Spec(constraint)
spec.constrain(constraint)
assert exp == spec
def check_constrain_changed(spec, constraint):
spec = Spec(spec)
assert spec.constrain(constraint)
def check_constrain_not_changed(spec, constraint):
spec = Spec(spec)
assert not spec.constrain(constraint)
def check_invalid_constraint(spec, constraint):
spec = Spec(spec)
constraint = Spec(constraint)
with pytest.raises(UnsatisfiableSpecError):
spec.constrain(constraint)
@pytest.mark.usefixtures('config', 'builtin_mock')
class TestSpecSematics(object):
"""This tests satisfies(), constrain() and other semantic operations
on specs.
"""
def test_satisfies(self):
check_satisfies('[email protected]', '@0:1')
check_satisfies('libdwarf^[email protected]', '^libelf@0:1')
def test_satisfies_namespace(self):
check_satisfies('builtin.mpich', 'mpich')
check_satisfies('builtin.mock.mpich', 'mpich')
# TODO: only works for deps now, but shouldn't we allow for root spec?
# check_satisfies('builtin.mock.mpich', 'mpi')
check_satisfies('builtin.mock.mpich', 'builtin.mock.mpich')
check_unsatisfiable('builtin.mock.mpich', 'builtin.mpich')
def test_satisfies_namespaced_dep(self):
"""Ensure spec from same or unspecified namespace satisfies namespace
constraint."""
check_satisfies('mpileaks ^builtin.mock.mpich', '^mpich')
check_satisfies('mpileaks ^builtin.mock.mpich', '^mpi')
check_satisfies(
'mpileaks ^builtin.mock.mpich', '^builtin.mock.mpich')
check_unsatisfiable(
'mpileaks ^builtin.mock.mpich', '^builtin.mpich')
def test_satisfies_compiler(self):
check_satisfies('foo%gcc', '%gcc')
check_satisfies('foo%intel', '%intel')
check_unsatisfiable('foo%intel', '%gcc')
check_unsatisfiable('foo%intel', '%pgi')
def test_satisfies_compiler_version(self):
check_satisfies('foo%gcc', '%[email protected]')
check_satisfies('foo%intel', '%[email protected]')
check_satisfies('foo%[email protected]', '%[email protected]:4.6')
check_satisfies('[email protected]%[email protected]', '@1:3%[email protected]:4.6')
check_unsatisfiable('foo%[email protected]', '%[email protected]:4.6')
check_unsatisfiable('[email protected]%pgi', '@1:3%pgi')
check_unsatisfiable('[email protected]%[email protected]', '@1:3%[email protected]:4.6')
check_satisfies('foo %[email protected]', '%[email protected]')
check_unsatisfiable('foo %[email protected]', '%[email protected]')
def test_satisfies_architecture(self):
check_satisfies(
'foo platform=test',
'platform=test')
check_satisfies(
'foo platform=linux',
'platform=linux')
check_satisfies(
'foo platform=test',
'platform=test target=frontend')
check_satisfies(
'foo platform=test',
'platform=test os=frontend target=frontend')
check_satisfies(
'foo platform=test os=frontend target=frontend',
'platform=test')
check_unsatisfiable(
'foo platform=linux',
'platform=test os=redhat6 target=x86_32')
check_unsatisfiable(
'foo os=redhat6',
'platform=test os=debian6 target=x86_64')
check_unsatisfiable(
'foo target=x86_64',
'platform=test os=redhat6 target=x86_32')
check_satisfies(
'foo arch=test-None-None',
'platform=test')
check_satisfies(
'foo arch=test-None-frontend',
'platform=test target=frontend')
check_satisfies(
'foo arch=test-frontend-frontend',
'platform=test os=frontend target=frontend')
check_satisfies(
'foo arch=test-frontend-frontend',
'platform=test')
check_unsatisfiable(
'foo arch=test-frontend-frontend',
'platform=test os=frontend target=backend')
check_satisfies(
'foo platform=test target=frontend os=frontend',
'platform=test target=frontend os=frontend')
check_satisfies(
'foo platform=test target=backend os=backend',
'platform=test target=backend os=backend')
check_satisfies(
'foo platform=test target=default_target os=default_os',
'platform=test os=default_os')
check_unsatisfiable(
'foo platform=test target=x86_32 os=redhat6',
'platform=linux target=x86_32 os=redhat6')
def test_satisfies_dependencies(self):
check_satisfies('mpileaks^mpich', '^mpich')
check_satisfies('mpileaks^zmpi', '^zmpi')
check_unsatisfiable('mpileaks^mpich', '^zmpi')
check_unsatisfiable('mpileaks^zmpi', '^mpich')
def test_satisfies_dependency_versions(self):
check_satisfies('mpileaks^[email protected]', '^mpich@1:3')
check_unsatisfiable('mpileaks^[email protected]', '^[email protected]')
check_satisfies(
'mpileaks^[email protected]^[email protected]', '^mpich@1:3^[email protected]:1.6')
check_unsatisfiable(
'mpileaks^[email protected]^[email protected]', '^mpich@1:3^[email protected]:1.6')
check_unsatisfiable(
'mpileaks^[email protected]^[email protected]', '^mpich@1:3^[email protected]:1.6')
check_unsatisfiable(
'mpileaks^[email protected]^[email protected]', '^mpich@1:3^[email protected]:1.6')
def test_satisfies_virtual_dependencies(self):
check_satisfies('mpileaks^mpi', '^mpi')
check_satisfies('mpileaks^mpi', '^mpich')
check_satisfies('mpileaks^mpi', '^zmpi')
check_unsatisfiable('mpileaks^mpich', '^zmpi')
def test_satisfies_virtual_dependency_versions(self):
check_satisfies('mpileaks^[email protected]', '^[email protected]:1.6')
check_unsatisfiable('mpileaks^mpi@3', '^[email protected]:1.6')
check_satisfies('mpileaks^mpi@2:', '^mpich')
check_satisfies('mpileaks^mpi@2:', '^[email protected]')
check_satisfies('mpileaks^mpi@2:', '^[email protected]')
check_satisfies('mpileaks^mpi@1:', '^mpich2')
check_satisfies('mpileaks^mpi@2:', '^mpich2')
check_unsatisfiable('mpileaks^mpi@3:', '^[email protected]')
check_unsatisfiable('mpileaks^mpi@3:', '^mpich2')
check_unsatisfiable('mpileaks^mpi@3:', '^[email protected]')
def test_satisfies_matching_variant(self):
check_satisfies('mpich+foo', 'mpich+foo')
check_satisfies('mpich~foo', 'mpich~foo')
check_satisfies('mpich foo=1', 'mpich foo=1')
# confirm that synonymous syntax works correctly
check_satisfies('mpich+foo', 'mpich foo=True')
check_satisfies('mpich foo=true', 'mpich+foo')
check_satisfies('mpich~foo', 'mpich foo=FALSE')
check_satisfies('mpich foo=False', 'mpich~foo')
def test_satisfies_multi_value_variant(self):
# Check quoting
check_satisfies('multivalue_variant foo="bar,baz"',
'multivalue_variant foo="bar,baz"')
check_satisfies('multivalue_variant foo=bar,baz',
'multivalue_variant foo=bar,baz')
check_satisfies('multivalue_variant foo="bar,baz"',
'multivalue_variant foo=bar,baz')
# A more constrained spec satisfies a less constrained one
check_satisfies('multivalue_variant foo="bar,baz"',
'multivalue_variant foo="bar"')
check_satisfies('multivalue_variant foo="bar,baz"',
'multivalue_variant foo="baz"')
check_satisfies('multivalue_variant foo="bar,baz,barbaz"',
'multivalue_variant foo="bar,baz"')
check_satisfies('multivalue_variant foo="bar,baz"',
'foo="bar,baz"')
check_satisfies('multivalue_variant foo="bar,baz"',
'foo="bar"')
def test_satisfies_single_valued_variant(self):
"""Tests that the case reported in
https://github.com/LLNL/spack/pull/2386#issuecomment-282147639
is handled correctly.
"""
a = Spec('a foobar=bar')
a.concretize()
assert a.satisfies('foobar=bar')
# Assert that an autospec generated from a literal
# gives the right result for a single valued variant
assert 'foobar=bar' in a
assert 'foobar=baz' not in a
assert 'foobar=fee' not in a
# ... and for a multi valued variant
assert 'foo=bar' in a
# Check that conditional dependencies are treated correctly
assert '^b' in a
def test_unsatisfied_single_valued_variant(self):
a = Spec('a foobar=baz')
a.concretize()
assert '^b' not in a
mv = Spec('multivalue_variant')
mv.concretize()
assert '[email protected]' not in mv
def test_indirect_unsatisfied_single_valued_variant(self):
spec = Spec('singlevalue-variant-dependent')
spec.concretize()
assert '[email protected]' not in spec
def test_unsatisfiable_multi_value_variant(self):
# Semantics for a multi-valued variant is different
# Depending on whether the spec is concrete or not
a = target_factory(
'multivalue_variant foo="bar"', target_concrete=True
)
spec_str = 'multivalue_variant foo="bar,baz"'
b = Spec(spec_str)
assert not a.satisfies(b)
assert not a.satisfies(spec_str)
# A concrete spec cannot be constrained further
with pytest.raises(UnsatisfiableSpecError):
a.constrain(b)
a = Spec('multivalue_variant foo="bar"')
spec_str = 'multivalue_variant foo="bar,baz"'
b = Spec(spec_str)
# The specs are abstract and they **could** be constrained
assert a.satisfies(b)
assert a.satisfies(spec_str)
# An abstract spec can instead be constrained
assert a.constrain(b)
a = target_factory(
'multivalue_variant foo="bar,baz"', target_concrete=True
)
spec_str = 'multivalue_variant foo="bar,baz,quux"'
b = Spec(spec_str)
assert not a.satisfies(b)
assert not a.satisfies(spec_str)
# A concrete spec cannot be constrained further
with pytest.raises(UnsatisfiableSpecError):
a.constrain(b)
a = Spec('multivalue_variant foo="bar,baz"')
spec_str = 'multivalue_variant foo="bar,baz,quux"'
b = Spec(spec_str)
# The specs are abstract and they **could** be constrained
assert a.satisfies(b)
assert a.satisfies(spec_str)
# An abstract spec can instead be constrained
assert a.constrain(b)
# ...but will fail during concretization if there are
# values in the variant that are not allowed
with pytest.raises(InvalidVariantValueError):
a.concretize()
# This time we'll try to set a single-valued variant
a = Spec('multivalue_variant fee="bar"')
spec_str = 'multivalue_variant fee="baz"'
b = Spec(spec_str)
# The specs are abstract and they **could** be constrained,
# as before concretization I don't know which type of variant
# I have (if it is not a BV)
assert a.satisfies(b)
assert a.satisfies(spec_str)
# A variant cannot be parsed as single-valued until we try to
# concretize. This means that we can constrain the variant above
assert a.constrain(b)
# ...but will fail during concretization if there are
# multiple values set
with pytest.raises(MultipleValuesInExclusiveVariantError):
a.concretize()
def test_unsatisfiable_variant_types(self):
# These should fail due to incompatible types
# FIXME: these needs to be checked as the new relaxed
# FIXME: semantic makes them fail (constrain does not raise)
# check_unsatisfiable('multivalue_variant +foo',
# 'multivalue_variant foo="bar"')
# check_unsatisfiable('multivalue_variant ~foo',
# 'multivalue_variant foo="bar"')
check_unsatisfiable(
target_spec='multivalue_variant foo="bar"',
argument_spec='multivalue_variant +foo',
target_concrete=True
)
check_unsatisfiable(
target_spec='multivalue_variant foo="bar"',
argument_spec='multivalue_variant ~foo',
target_concrete=True
)
def test_satisfies_unconstrained_variant(self):
# only asked for mpich, no constraints. Either will do.
check_satisfies('mpich+foo', 'mpich')
check_satisfies('mpich~foo', 'mpich')
check_satisfies('mpich foo=1', 'mpich')
def test_unsatisfiable_variants(self):
# This case is different depending on whether the specs are concrete.
# 'mpich' is not concrete:
check_satisfies('mpich', 'mpich+foo', False)
check_satisfies('mpich', 'mpich~foo', False)
check_satisfies('mpich', 'mpich foo=1', False)
# 'mpich' is concrete:
check_unsatisfiable('mpich', 'mpich+foo', True)
check_unsatisfiable('mpich', 'mpich~foo', True)
check_unsatisfiable('mpich', 'mpich foo=1', True)
def test_unsatisfiable_variant_mismatch(self):
# No matchi in specs
check_unsatisfiable('mpich~foo', 'mpich+foo')
check_unsatisfiable('mpich+foo', 'mpich~foo')
check_unsatisfiable('mpich foo=True', 'mpich foo=False')
def test_satisfies_matching_compiler_flag(self):
check_satisfies('mpich cppflags="-O3"', 'mpich cppflags="-O3"')
check_satisfies(
'mpich cppflags="-O3 -Wall"', 'mpich cppflags="-O3 -Wall"'
)
def test_satisfies_unconstrained_compiler_flag(self):
# only asked for mpich, no constraints. Any will do.
check_satisfies('mpich cppflags="-O3"', 'mpich')
def test_unsatisfiable_compiler_flag(self):
# This case is different depending on whether the specs are concrete.
# 'mpich' is not concrete:
check_satisfies('mpich', 'mpich cppflags="-O3"', False)
# 'mpich' is concrete:
check_unsatisfiable('mpich', 'mpich cppflags="-O3"', True)
def test_copy_satisfies_transitive(self):
spec = Spec('dttop')
spec.concretize()
copy = spec.copy()
for s in spec.traverse():
assert s.satisfies(copy[s.name])
assert copy[s.name].satisfies(s)
def test_unsatisfiable_compiler_flag_mismatch(self):
# No matchi in specs
check_unsatisfiable(
'mpich cppflags="-O3"', 'mpich cppflags="-O2"')
def test_satisfies_virtual(self):
# Don't use check_satisfies: it checks constrain() too, and
# you can't constrain a non-virtual by a virtual.
assert Spec('mpich').satisfies(Spec('mpi'))
assert Spec('mpich2').satisfies(Spec('mpi'))
assert Spec('zmpi').satisfies(Spec('mpi'))
def test_satisfies_virtual_dep_with_virtual_constraint(self):
"""Ensure we can satisfy virtual constraints when there are multiple
vdep providers in the specs."""
assert Spec('netlib-lapack ^openblas').satisfies(
'netlib-lapack ^openblas'
)
assert not Spec('netlib-lapack ^netlib-blas').satisfies(
'netlib-lapack ^openblas'
)
assert not Spec('netlib-lapack ^openblas').satisfies(
'netlib-lapack ^netlib-blas'
)
assert Spec('netlib-lapack ^netlib-blas').satisfies(
'netlib-lapack ^netlib-blas'
)
def test_satisfies_same_spec_with_different_hash(self):
"""Ensure that concrete specs are matched *exactly* by hash."""
s1 = Spec('mpileaks').concretized()
s2 = s1.copy()
assert s1.satisfies(s2)
assert s2.satisfies(s1)
# Simulate specs that were installed before and after a change to
# Spack's hashing algorithm. This just reverses s2's hash.
s2._hash = s1.dag_hash()[-1::-1]
assert not s1.satisfies(s2)
assert not s2.satisfies(s1)
# ========================================================================
# Indexing specs
# ========================================================================
def test_self_index(self):
s = Spec('callpath')
assert s['callpath'] == s
def test_dep_index(self):
s = Spec('callpath')
s.normalize()
assert s['callpath'] == s
assert type(s['dyninst']) == Spec
assert type(s['libdwarf']) == Spec
assert type(s['libelf']) == Spec
assert type(s['mpi']) == Spec
assert s['dyninst'].name == 'dyninst'
assert s['libdwarf'].name == 'libdwarf'
assert s['libelf'].name == 'libelf'
assert s['mpi'].name == 'mpi'
def test_spec_contains_deps(self):
s = Spec('callpath')
s.normalize()
assert 'dyninst' in s
assert 'libdwarf' in s
assert 'libelf' in s
assert 'mpi' in s
@pytest.mark.usefixtures('config')
def test_virtual_index(self):
s = Spec('callpath')
s.concretize()
s_mpich = Spec('callpath ^mpich')
s_mpich.concretize()
s_mpich2 = Spec('callpath ^mpich2')
s_mpich2.concretize()
s_zmpi = Spec('callpath ^zmpi')
s_zmpi.concretize()
assert s['mpi'].name != 'mpi'
assert s_mpich['mpi'].name == 'mpich'
assert s_mpich2['mpi'].name == 'mpich2'
assert s_zmpi['zmpi'].name == 'zmpi'
for spec in [s, s_mpich, s_mpich2, s_zmpi]:
assert 'mpi' in spec
# ========================================================================
# Constraints
# ========================================================================
def test_constrain_variants(self):
check_constrain('[email protected]:2.5', 'libelf@0:2.5', '[email protected]:3')
check_constrain(
'[email protected]:2.5%[email protected]:4.6',
'libelf@0:2.5%gcc@2:4.6',
'[email protected]:3%[email protected]:4.7'
)
check_constrain('libelf+debug+foo', 'libelf+debug', 'libelf+foo')
check_constrain(
'libelf+debug+foo', 'libelf+debug', 'libelf+debug+foo'
)
check_constrain(
'libelf debug=2 foo=1', 'libelf debug=2', 'libelf foo=1'
)
check_constrain(
'libelf debug=2 foo=1', 'libelf debug=2', 'libelf debug=2 foo=1'
)
check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf~foo')
check_constrain(
'libelf+debug~foo', 'libelf+debug', 'libelf+debug~foo'
)
def test_constrain_multi_value_variant(self):
check_constrain(
'multivalue_variant foo="bar,baz"',
'multivalue_variant foo="bar"',
'multivalue_variant foo="baz"'
)
check_constrain(
'multivalue_variant foo="bar,baz,barbaz"',
'multivalue_variant foo="bar,barbaz"',
'multivalue_variant foo="baz"'
)
def test_constrain_compiler_flags(self):
check_constrain(
'libelf cflags="-O3" cppflags="-Wall"',
'libelf cflags="-O3"',
'libelf cppflags="-Wall"'
)
check_constrain(
'libelf cflags="-O3" cppflags="-Wall"',
'libelf cflags="-O3"',
'libelf cflags="-O3" cppflags="-Wall"'
)
def test_constrain_architecture(self):
check_constrain(
'libelf target=default_target os=default_os',
'libelf target=default_target os=default_os',
'libelf target=default_target os=default_os'
)
check_constrain(
'libelf target=default_target os=default_os',
'libelf',
'libelf target=default_target os=default_os'
)
def test_constrain_compiler(self):
check_constrain(
'libelf %[email protected]', 'libelf %[email protected]', 'libelf %[email protected]'
)
check_constrain(
'libelf %[email protected]', 'libelf', 'libelf %[email protected]'
)
def test_invalid_constraint(self):
check_invalid_constraint('libelf@0:2.0', '[email protected]:3')
check_invalid_constraint(
'libelf@0:2.5%[email protected]:4.9', '[email protected]:3%[email protected]:4.7')
check_invalid_constraint('libelf+debug', 'libelf~debug')
check_invalid_constraint('libelf+debug~foo', 'libelf+debug+foo')
check_invalid_constraint('libelf debug=True', 'libelf debug=False')
check_invalid_constraint(
'libelf cppflags="-O3"', 'libelf cppflags="-O2"')
check_invalid_constraint(
'libelf platform=test target=be os=be', 'libelf target=fe os=fe'
)
def test_constrain_changed(self):
check_constrain_changed('libelf', '@1.0')
check_constrain_changed('libelf', '@1.0:5.0')
check_constrain_changed('libelf', '%gcc')
check_constrain_changed('libelf%gcc', '%[email protected]')
check_constrain_changed('libelf', '+debug')
check_constrain_changed('libelf', '~debug')
check_constrain_changed('libelf', 'debug=2')
check_constrain_changed('libelf', 'cppflags="-O3"')
platform = spack.architecture.platform()
check_constrain_changed(
'libelf', 'target=' + platform.target('default_target').name)
check_constrain_changed(
'libelf', 'os=' + platform.operating_system('default_os').name)
def test_constrain_not_changed(self):
check_constrain_not_changed('libelf', 'libelf')
check_constrain_not_changed('[email protected]', '@1.0')
check_constrain_not_changed('[email protected]:5.0', '@1.0:5.0')
check_constrain_not_changed('libelf%gcc', '%gcc')
check_constrain_not_changed('libelf%[email protected]', '%[email protected]')
check_constrain_not_changed('libelf+debug', '+debug')
check_constrain_not_changed('libelf~debug', '~debug')
check_constrain_not_changed('libelf debug=2', 'debug=2')
check_constrain_not_changed(
'libelf cppflags="-O3"', 'cppflags="-O3"')
platform = spack.architecture.platform()
default_target = platform.target('default_target').name
check_constrain_not_changed(
'libelf target=' + default_target, 'target=' + default_target)
def test_constrain_dependency_changed(self):
check_constrain_changed('libelf^foo', 'libelf^[email protected]')
check_constrain_changed('libelf^foo', 'libelf^[email protected]:5.0')
check_constrain_changed('libelf^foo', 'libelf^foo%gcc')
check_constrain_changed('libelf^foo%gcc', 'libelf^foo%[email protected]')
check_constrain_changed('libelf^foo', 'libelf^foo+debug')
check_constrain_changed('libelf^foo', 'libelf^foo~debug')
platform = spack.architecture.platform()
default_target = platform.target('default_target').name
check_constrain_changed(
'libelf^foo', 'libelf^foo target=' + default_target)
def test_constrain_dependency_not_changed(self):
check_constrain_not_changed('libelf^[email protected]', 'libelf^[email protected]')
check_constrain_not_changed(
'libelf^[email protected]:5.0', 'libelf^[email protected]:5.0')
check_constrain_not_changed('libelf^foo%gcc', 'libelf^foo%gcc')
check_constrain_not_changed(
'libelf^foo%[email protected]', 'libelf^foo%[email protected]')
check_constrain_not_changed(
'libelf^foo+debug', 'libelf^foo+debug')
check_constrain_not_changed(
'libelf^foo~debug', 'libelf^foo~debug')
check_constrain_not_changed(
'libelf^foo cppflags="-O3"', 'libelf^foo cppflags="-O3"')
platform = spack.architecture.platform()
default_target = platform.target('default_target').name
check_constrain_not_changed(
'libelf^foo target=' + default_target,
'libelf^foo target=' + default_target)
def test_exceptional_paths_for_constructor(self):
with pytest.raises(TypeError):
Spec((1, 2))
with pytest.raises(ValueError):
Spec('')
with pytest.raises(ValueError):
Spec('libelf foo')
| lgpl-2.1 | 2,349,253,275,245,285,000 | 36.481994 | 78 | 0.601027 | false |
rl-institut/appBBB | appBBB/results_evaluation.py | 1 | 32709 | from os.path import expanduser
import csv
import pandas as pd
import matplotlib.pyplot as plt
from oemof.core import energy_system as es
from oemof.solph.predefined_objectives import minimize_cost
from oemof.outputlib import to_pandas as tpd
from oemof import db
import helper_BBB as hlsb
def create_es(solver, timesteps, year):
"""
Creates a default energy system to load results into.
"""
simulation = es.Simulation(solver=solver,
timesteps=timesteps,
debug=False,
objective_options={"function": minimize_cost})
# Adding a time index to the energy system
time_index = pd.date_range('1/1/' + year,
periods=len(timesteps),
freq='H')
energysystem = es.EnergySystem(time_idx=time_index,
simulation=simulation)
return energysystem
def color_dict(reg):
"""
Sets colors for entities in plot of electricity sector.
"""
cdict = {
# transformer
"('FixedSrc', '" + reg + "', 'wind_pwr')": 'lightblue',
"('FixedSrc', '" + reg + "', 'pv_pwr')": 'yellow',
"('transformer', '" + reg + "', 'oil')": 'black',
"('transformer', '" + reg + "', 'oil', 'chp')": 'black',
"('transformer', '" + reg + "', 'oil', 'SEchp')": 'black',
"('transformer', '" + reg + "', 'natural_gas')": 'lightgrey',
"('transformer', '" + reg + "', 'natural_gas', 'chp')":
'lightgrey',
"('transformer', '" + reg + "', 'natural_gas', 'SEchp')":
'lightgrey',
"('transformer', '" + reg + "', 'natural_gas_cc')": 'darkgrey',
"('transformer', '" + reg + "', 'natural_gas_cc', 'chp')":
'darkgrey',
"('transformer', '" + reg + "', 'natural_gas_cc', 'SEchp')":
'darkgrey',
"('transformer', '" + reg + "', 'HH', 'bhkw_gas')": 'grey',
"('transformer', '" + reg + "', 'GHD', 'bhkw_gas')": 'grey',
"('transformer', '" + reg + "', 'biomass')": 'lightgreen',
"('transformer', '" + reg + "', 'biomass', 'chp')": 'lightgreen',
"('transformer', '" + reg + "', 'biomass', 'SEchp')":
'lightgreen',
"('transformer', '" + reg + "', 'HH', 'bhkw_bio')": 'green',
"('transformer', '" + reg + "', 'GHD', 'bhkw_bio')": 'green',
"('transformer', '" + reg + "', 'powertoheat')": 'lightsalmon',
"('transformer', '" + reg + "', 'lignite_jw', 'SEchp')": 'brown',
"('transformer', '" + reg + "', 'lignite_sp', 'SEchp')": 'orange',
# demand
"('demand', '" + reg + "', 'elec')": 'red',
"('demand', '" + reg + "', 'elec', 'mob')": 'red',
# shortage / excess
"('bus', '" + reg + "', 'elec')_excess": 'purple',
"('bus', '" + reg + "', 'elec')_shortage": 'blueviolet',
# heat pump
"('transformer', '" + reg + "', 'hp', 'brine', 'ww')": 'blue',
"('transformer', '" + reg + "', 'hp', 'brine', 'heating')":
'blue',
"('transformer', '" + reg + "', 'hp', 'air', 'ww')": 'blue',
"('transformer', '" + reg + "', 'hp', 'air', 'heating')": 'blue',
"('transformer', '" + reg + "', 'hp', 'air', 'ww', 'rod')":
'blue',
"('transformer', '" + reg + "', 'hp', 'air', 'heating', 'rod')":
'blue',
# transport
"transport_('bus', 'UB', 'elec')('bus', 'OS', 'elec')": 'salmon',
"transport_('bus', 'OS', 'elec')('bus', 'UB', 'elec')": 'salmon',
"transport_('bus', 'OS', 'elec')('bus', 'LS', 'elec')":
'chocolate',
"transport_('bus', 'LS', 'elec')('bus', 'OS', 'elec')":
'chocolate',
"transport_('bus', 'OS', 'elec')('bus', 'BE', 'elec')": 'peru',
"transport_('bus', 'BE', 'elec')('bus', 'OS', 'elec')": 'peru',
"transport_('bus', 'LS', 'elec')('bus', 'HF', 'elec')":
'burlywood',
"transport_('bus', 'HF', 'elec')('bus', 'LS', 'elec')":
'burlywood',
"transport_('bus', 'HF', 'elec')('bus', 'PO', 'elec')":
'goldenrod',
"transport_('bus', 'PO', 'elec')('bus', 'HF', 'elec')":
'goldenrod',
"transport_('bus', 'HF', 'elec')('bus', 'BE', 'elec')": 'khaki',
"transport_('bus', 'BE', 'elec')('bus', 'HF', 'elec')": 'khaki',
"transport_('bus', 'PO', 'elec')('bus', 'OS', 'elec')":
'indianred',
"transport_('bus', 'OS', 'elec')('bus', 'PO', 'elec')":
'indianred',
"transport_('bus', 'UB', 'elec')('bus', 'KJ', 'elec')": 'lime',
"transport_('bus', 'UB', 'elec')('bus', 'MV', 'elec')": 'cyan',
"transport_('bus', 'PO', 'elec')('bus', 'MV', 'elec')": 'teal',
"transport_('bus', 'PO', 'elec')('bus', 'ST', 'elec')":
'seagreen',
"transport_('bus', 'HF', 'elec')('bus', 'ST', 'elec')":
'yellowgreen',
"transport_('bus', 'LS', 'elec')('bus', 'SN', 'elec')":
'turquoise',
"transport_('bus', 'BE', 'elec')('bus', 'HF', 'elec')": 'olive',
"transport_('bus', 'BE', 'elec')('bus', 'OS', 'elec')":
'lightseagreen',
"transport_('bus', 'KJ', 'import')('bus', 'UB', 'elec')": 'lime',
"transport_('bus', 'MV', 'import')('bus', 'UB', 'elec')": 'cyan',
"transport_('bus', 'MV', 'import')('bus', 'PO', 'elec')": 'teal',
"transport_('bus', 'ST', 'import')('bus', 'PO', 'elec')":
'seagreen',
"transport_('bus', 'ST', 'import')('bus', 'HF', 'elec')":
'yellowgreen',
"transport_('bus', 'SN', 'import')('bus', 'LS', 'elec')":
'turquoise',
"transport_('bus', 'HF', 'elec')('bus', 'BE', 'elec')": 'olive',
"transport_('bus', 'OS', 'elec')('bus', 'BE', 'elec')":
'lightseagreen'}
return cdict
def color_dict_dh(reg):
"""
Sets colors for entities in plot of district heating.
"""
cdict = {
# transformer
"('transformer', '" + reg + "', 'oil', 'chp')": 'black',
"('transformer', '" + reg + "', 'oil', 'SEchp')": 'black',
"('heat_transformer', '" + reg + "', 'oil')": 'black',
"('transformer', '" + reg + "', 'natural_gas', 'chp')":
'lightgrey',
"('transformer', '" + reg + "', 'natural_gas', 'SEchp')":
'lightgrey',
"('heat_transformer', '" + reg + "', 'natural_gas')":
'lightgrey',
"('transformer', '" + reg + "', 'dh_peak_heating')": 'khaki',
"('transformer', '" + reg + "', 'natural_gas_cc', 'chp')":
'darkgrey',
"('transformer', '" + reg + "', 'natural_gas_cc', 'SEchp')":
'darkgrey',
"('transformer', '" + reg + "', 'biomass', 'chp')": 'lightgreen',
"('transformer', '" + reg + "', 'biomass', 'SEchp')":
'lightgreen',
"('heat_transformer', '" + reg + "', 'biomass')": 'lightgreen',
"('transformer', '" + reg + "', 'lignite_jw', 'SEchp')": 'brown',
"('transformer', '" + reg + "', 'lignite_sp', 'SEchp')": 'orange',
"('transformer', '" + reg + "', 'powertoheat')": 'lightsalmon',
# demand
"('demand', '" + reg + "', 'dh')": 'red',
# shortag / excess
"('bus', '" + reg + "', 'dh')_excess": 'purple',
"('bus', '" + reg + "', 'dh')_shortage": 'blue'}
return cdict
def stack_plot(energysystem, reg, bus, date_from, date_to):
"""
Creates a stack plot of the specified bus.
"""
# initialize plot
myplot = tpd.DataFramePlot(energy_system=energysystem)
# get dictionary with color of each entity in plot
if bus == 'elec':
cdict = color_dict(reg)
elif bus == 'dh':
cdict = color_dict_dh(reg)
# slice dataframe to prepare for plot function
myplot.slice_unstacked(
bus_uid="('bus', '" + reg + "', '" + bus + "')",
type="input",
date_from=date_from,
date_to=date_to)
myplot.color_from_dict(cdict)
# set plot parameters
fig = plt.figure(figsize=(40, 14))
plt.rc('legend', **{'fontsize': 18})
plt.rcParams.update({'font.size': 18})
plt.style.use('grayscale')
# plot bus
handles, labels = myplot.io_plot(
bus_uid="('bus', '" + reg + "', '" + bus + "')",
cdict=cdict,
line_kwa={'linewidth': 4},
ax=fig.add_subplot(1, 1, 1),
date_from=date_from,
date_to=date_to,
)
myplot.ax.set_ylabel('Power in MW')
myplot.ax.set_xlabel('Date')
myplot.ax.set_title(bus+" bus")
myplot.set_datetime_ticks(tick_distance=24, date_format='%d-%m-%Y')
myplot.outside_legend(handles=handles, labels=labels)
plt.show()
return (fig)
def sum_max_output_of_component(energysystem, from_uid, to_uid):
"""
Returns the sum and the maximum of the flow from entity with 'from_uid'
to entity with 'to_uid'.
"""
results_bus = energysystem.results[[obj for obj in energysystem.entities
if obj.uid == (from_uid)][0]]
results_bus_component = results_bus[[obj for obj in energysystem.entities
if obj.uid == (to_uid)][0]]
return sum(results_bus_component), max(results_bus_component)
def timeseries_of_component(energysystem, from_uid, to_uid):
"""
Returns the flow from entity with 'from_uid' to entity with 'to_uid'.
"""
results_bus = energysystem.results[[obj for obj in energysystem.entities
if obj.uid == (from_uid)][0]]
results_bus_component = results_bus[[obj for obj in energysystem.entities
if obj.uid == (to_uid)][0]]
return results_bus_component
def print_validation_outputs(energysystem, reg, results_dc):
"""
Returns sums and maximums of flows as well as full load hours of
transformers.
"""
# connect to database
conn_oedb = db.connection(section='open_edb')
# get paremeters of transformers from database
(co2_emissions, co2_fix, eta_elec, eta_th, eta_th_chp, eta_el_chp,
eta_chp_flex_el, sigma_chp, beta_chp, opex_var, opex_fix, capex,
c_rate_in, c_rate_out, eta_in, eta_out,
cap_loss, lifetime, wacc) = hlsb.get_parameters(conn_oedb)
# list of possible power plants in region
pp = [
"('FixedSrc', '" + reg + "', 'wind_pwr')",
"('FixedSrc', '" + reg + "', 'pv_pwr')",
"('transformer', '" + reg + "', 'oil')",
"('transformer', '" + reg + "', 'oil', 'chp')",
"('transformer', '" + reg + "', 'oil', 'SEchp')",
"('transformer', '" + reg + "', 'natural_gas')",
"('transformer', '" + reg + "', 'natural_gas', 'chp')",
"('transformer', '" + reg + "', 'natural_gas', 'SEchp')",
"('transformer', '" + reg + "', 'natural_gas_cc')",
"('transformer', '" + reg + "', 'natural_gas_cc', 'chp')",
"('transformer', '" + reg + "', 'natural_gas_cc', 'SEchp')",
"('transformer', '" + reg + "', 'biomass')",
"('transformer', '" + reg + "', 'biomass', 'chp')",
"('transformer', '" + reg + "', 'biomass', 'SEchp')",
"('transformer', '" + reg + "', 'HH', 'bhkw_gas')",
"('transformer', '" + reg + "', 'GHD', 'bhkw_gas')",
"('transformer', '" + reg + "', 'HH', 'bhkw_bio')",
"('transformer', '" + reg + "', 'GHD', 'bhkw_bio')",
"('transformer', '" + reg + "', 'bhkw_bio')",
"('transformer', '" + reg + "', 'bhkw_bio', 'dh')",
"('transformer', '" + reg + "', 'dh_peak_heating')",
"('transformer', '" + reg + "', 'lignite_jw', 'SEchp')",
"('transformer', '" + reg + "', 'lignite_sp', 'SEchp')",
"('transformer', '" + reg + "', 'powertoheat')"]
# list of efficiencies of the above transformers
eta_el = [
1,
1,
eta_elec['oil'],
eta_el_chp['oil'],
eta_chp_flex_el['oil'],
eta_elec['natural_gas'],
eta_el_chp['natural_gas'],
eta_chp_flex_el['natural_gas'],
eta_elec['natural_gas_cc'],
eta_el_chp['natural_gas_cc'],
eta_chp_flex_el['natural_gas_cc'],
eta_elec['biomass'],
eta_el_chp['biomass'],
eta_chp_flex_el['biomass'],
eta_el_chp['bhkw_gas'],
eta_el_chp['bhkw_gas'],
eta_el_chp['bhkw_bio'],
eta_el_chp['bhkw_bio'],
eta_el_chp['bhkw_bio'],
eta_el_chp['bhkw_bio'],
0, # dh_peakheating
eta_chp_flex_el['jaenschwalde'],
eta_chp_flex_el['schwarzepumpe'],
0 # powertoheat
]
# list of CO2 emissions of the above transformers
co2 = [
0,
0,
co2_emissions['oil'],
co2_emissions['oil'],
co2_emissions['oil'],
co2_emissions['natural_gas'],
co2_emissions['natural_gas'],
co2_emissions['natural_gas'],
co2_emissions['natural_gas_cc'],
co2_emissions['natural_gas_cc'],
co2_emissions['natural_gas_cc'],
co2_emissions['biomass'],
co2_emissions['biomass'],
co2_emissions['biomass'],
co2_emissions['bhkw_gas'],
co2_emissions['bhkw_gas'],
co2_emissions['bhkw_bio'],
co2_emissions['bhkw_bio'],
co2_emissions['bhkw_bio'],
co2_emissions['bhkw_bio'],
0, # dh_peakheating
co2_emissions['lignite'],
co2_emissions['lignite'],
0 # powertoheat
]
# get sum and maximum of each flow from transformer to bus as well as
# full load hours of each transformer
ebus = "('bus', '" + reg + "', 'elec')"
dhbus = "('bus', '" + reg + "', 'dh')"
summe_plant_dict = {}
el_energy = list()
dh_energy = list()
for p in pp:
print(p)
# if flow from transformer to electricity bus
try:
summe_plant_dict[p], maximum = sum_max_output_of_component(
energysystem, p, ebus)
print(('sum:' + str(summe_plant_dict[p])))
print(('max:' + str(maximum)))
results_dc['sum ' + reg + str(p)] = summe_plant_dict[p]
results_dc['max ' + reg + str(p)] = maximum
el_energy.append(summe_plant_dict[p])
except:
print('nicht vorhanden')
results_dc['sum ' + reg + str(p)] = 0
results_dc['max ' + reg + str(p)] = 0
el_energy.append(0)
try:
print(('vlh:' + str(summe_plant_dict[p] / maximum)))
results_dc['vlh ' + reg + str(p)] = summe_plant_dict[p] / maximum
except:
results_dc['vlh ' + reg + str(p)] = 0
print('\n')
# if flow from transformer to district heating bus
try:
summe_plant_dict['dh' + p], maximum = sum_max_output_of_component(
energysystem, p, dhbus)
print(('sum:' + str(summe_plant_dict['dh' + p])))
print(('max:' + str(maximum)))
results_dc['sum '+ reg + str(p) + '_dh'] = \
summe_plant_dict['dh' + p]
results_dc['max '+ reg + str(p) + '_dh'] = maximum
dh_energy.append(summe_plant_dict['dh' + p])
except:
print('nicht vorhanden')
dh_energy.append(0)
results_dc['sum '+ reg + str(p)+'_dh'] = 0
results_dc['max '+ reg + str(p)+'_dh'] = 0
try:
print(('vls:' + str(summe_plant_dict[p] / maximum)))
results_dc['vlh ' + reg + str(p)+'_dh'] = (summe_plant_dict[p] /
maximum)
except:
results_dc['vlh ' + reg + str(p)+'_dh'] = 0
print('\n')
# get sum and maximum of electricity shortage
shortage_bus = "('bus', '" + reg + "', 'elec')_shortage"
summe_plant, maximum = sum_max_output_of_component(
energysystem, shortage_bus, ebus)
print(('el_shortage_sum:' + str(summe_plant)))
print(('el_shortage_max:' + str(maximum)))
results_dc['el_shortage ' + reg] = str(summe_plant)
results_dc['el_shortage_max ' + reg] = maximum
print('\n')
# get sum and maximum of excess in district heating
excess_dh = "('bus', '" + reg + "', 'dh')_excess"
summe_plant, maximum = sum_max_output_of_component(
energysystem, dhbus, excess_dh)
print(('dh_excess_sum:' + str(summe_plant)))
print(('dh_excess_max:' + str(maximum)))
results_dc['dh_excess_sum ' + reg] = summe_plant
results_dc['dh_excess_max ' + reg] = maximum
# get sum and maximum of electricity excess
excess = "('bus', '" + reg + "', 'elec')_excess"
summe_plant, maximum = sum_max_output_of_component(
energysystem, ebus, excess)
print(('el_excess_sum:' + str(summe_plant)))
print(('el_excess_max:' + str(maximum)))
results_dc['el_excess_sum ' + reg] = summe_plant
results_dc['el_excess_max ' + reg] = maximum
# get sum of flows from wind turbines and pv systems to electricity bus
sum_fee = (summe_plant_dict["('FixedSrc', '" + reg + "', 'wind_pwr')"] +
summe_plant_dict["('FixedSrc', '" + reg + "', 'pv_pwr')"])
print(('share excess wind + pv:' + str((summe_plant / sum_fee) * 100)))
# create dataframe with power output of each transformer, electrical
# efficiency and CO2 per MWh
frame = pd.DataFrame(index=pp)
frame['dh_energy'] = dh_energy
frame['energy_sum'] = el_energy
frame['eta_el'] = eta_el
frame['co2'] = co2
return (results_dc, frame)
def print_exports(energysystem, results_dc, year, path):
"""
Get exports from Brandenburg to neighbor regions and imports from neighbor
regions to Brandenburg.
"""
export_from = ["('bus', 'UB', 'elec')",
"('bus', 'UB', 'elec')",
"('bus', 'PO', 'elec')",
"('bus', 'PO', 'elec')",
"('bus', 'HF', 'elec')",
"('bus', 'LS', 'elec')",
"('bus', 'HF', 'elec')",
"('bus', 'OS', 'elec')"]
import_to = export_from
export_to = ["transport_('bus', 'UB', 'elec')('bus', 'KJ', 'elec')",
"transport_('bus', 'UB', 'elec')('bus', 'MV', 'elec')",
"transport_('bus', 'PO', 'elec')('bus', 'MV', 'elec')",
"transport_('bus', 'PO', 'elec')('bus', 'ST', 'elec')",
"transport_('bus', 'HF', 'elec')('bus', 'ST', 'elec')",
"transport_('bus', 'LS', 'elec')('bus', 'SN', 'elec')",
"transport_('bus', 'BE', 'elec')('bus', 'HF', 'elec')",
"transport_('bus', 'BE', 'elec')('bus', 'OS', 'elec')"]
import_from = ["transport_('bus', 'KJ', 'import')('bus', 'UB', 'elec')",
"transport_('bus', 'MV', 'import')('bus', 'UB', 'elec')",
"transport_('bus', 'MV', 'import')('bus', 'PO', 'elec')",
"transport_('bus', 'ST', 'import')('bus', 'PO', 'elec')",
"transport_('bus', 'ST', 'import')('bus', 'HF', 'elec')",
"transport_('bus', 'SN', 'import')('bus', 'LS', 'elec')",
"transport_('bus', 'HF', 'elec')('bus', 'BE', 'elec')",
"transport_('bus', 'OS', 'elec')('bus', 'BE', 'elec')"]
time_index = pd.date_range('1/1/{0}'.format(year), periods=8760, freq='H')
time_no_export = pd.DataFrame(index=time_index)
exports = pd.DataFrame(index=time_index)
imports = pd.DataFrame(index=time_index)
export_total = 0
for i in range(len(export_from)):
print(export_to[i])
# sum of export
summe_ex, maximum = sum_max_output_of_component(
energysystem, export_from[i], export_to[i])
export_total += summe_ex
print('export:')
print(summe_ex)
results_dc['export ' + export_to[i] + ' summe'] = summe_ex
# maximum of export
print('max:')
print(maximum)
results_dc['export ' + export_to[i] + ' maximum'] = maximum
# timeseries
exports[export_to[i]] = timeseries_of_component(
energysystem, export_from[i], export_to[i])
imports[export_to[i]] = timeseries_of_component(
energysystem, import_from[i], import_to[i])
time_no_export[export_to[i]] = (exports[export_to[i]] -
imports[export_to[i]])
# total export
print('export_gesamt:')
print(export_total)
results_dc['export gesamt: '] = export_total
# save import and export timeseries to csv
exports.to_csv(path + 'exports.csv')
imports.to_csv(path + 'imports.csv')
time_no_export.to_csv(path + 'no_export.csv')
return (results_dc, time_no_export)
def print_im_exports(energysystem, results_dc, year, path):
"""
Adds flows between regions in Brandenburg and between Brandenburg and
Berlin to results_dc.
"""
export_from = ["('bus', 'UB', 'elec')",
"('bus', 'PO', 'elec')",
"('bus', 'HF', 'elec')",
"('bus', 'LS', 'elec')",
"('bus', 'OS', 'elec')",
"('bus', 'BE', 'elec')"]
export_to = [
"transport_('bus', 'UB', 'elec')('bus', 'OS', 'elec')",
"transport_('bus', 'OS', 'elec')('bus', 'UB', 'elec')",
"transport_('bus', 'OS', 'elec')('bus', 'LS', 'elec')",
"transport_('bus', 'LS', 'elec')('bus', 'OS', 'elec')",
"transport_('bus', 'OS', 'elec')('bus', 'BE', 'elec')",
"transport_('bus', 'BE', 'elec')('bus', 'OS', 'elec')",
"transport_('bus', 'LS', 'elec')('bus', 'HF', 'elec')",
"transport_('bus', 'HF', 'elec')('bus', 'LS', 'elec')",
"transport_('bus', 'HF', 'elec')('bus', 'PO', 'elec')",
"transport_('bus', 'PO', 'elec')('bus', 'HF', 'elec')",
"transport_('bus', 'HF', 'elec')('bus', 'BE', 'elec')",
"transport_('bus', 'BE', 'elec')('bus', 'HF', 'elec')",
"transport_('bus', 'PO', 'elec')('bus', 'OS', 'elec')",
"transport_('bus', 'OS', 'elec')('bus', 'PO', 'elec')"]
time_index = pd.date_range('1/1/{0}'.format(year), periods=8760, freq='H')
BBB_Kuppelstellen = pd.DataFrame(index=time_index)
export_all = 0
for i in export_from:
print(i)
for k in export_to:
print(k)
try:
summe_ex, maximum = sum_max_output_of_component(
energysystem, i, k)
export_all += summe_ex
print('from '+ i + ' to '+ k)
print(summe_ex)
results_dc['export from ' + i + ' to ' + k] = summe_ex
results_dc['export from ' + i + ' to ' + k + ' maximum'] = \
maximum
BBB_Kuppelstellen['export from ' + i + ' to ' + k] = \
timeseries_of_component(energysystem, i, k)
except:
pass
# total of flows
print('export_in_BBB_gesamt:')
print(export_all)
results_dc['export in BBB gesamt: '] = export_all
# timeseries to csv
BBB_Kuppelstellen.to_csv(path + 'kuppelstellen.csv')
return results_dc
def get_share_ee(energysystem, reg, results_dc):
"""
Get shares of wind and pv on demand fulfillment.
"""
# get feedin timeseries from wind and pv to electricity bus
ebus = "('bus', '" + reg + "', 'elec')"
pv_time = timeseries_of_component(
energysystem, "('FixedSrc', '" + reg + "', 'pv_pwr')", ebus)
wind_time = timeseries_of_component(
energysystem, "('FixedSrc', '" + reg + "', 'wind_pwr')", ebus)
# get electricity demand timeseries
demand_time = timeseries_of_component(
energysystem, ebus, "('demand', '" + reg + "', 'elec')")
# calculate shares
res = pd.DataFrame(index=range(len(demand_time)),
columns=['ee', 'pv', 'wind'])
for i in range(len(demand_time)):
fee = demand_time[i] - pv_time[i] - wind_time[i]
if fee < 0:
res['ee'][i] = demand_time[i]
res['pv'][i] = demand_time[i] * pv_time[i] / (
pv_time[i] + wind_time[i])
res['wind'][i] = demand_time[i] * wind_time[i] / (
pv_time[i] + wind_time[i])
else:
res['ee'][i] = pv_time[i] + wind_time[i]
res['pv'][i] = pv_time[i]
res['wind'][i] = wind_time[i]
ee_share = sum(res['ee']) / sum(demand_time)
pv_share = sum(res['pv']) / sum(demand_time)
wind_share = sum(res['wind']) / sum(demand_time)
# print shares and add to results_dc
print('ee share:')
print(ee_share)
results_dc['ee share ' + reg] = ee_share
print('pv share:')
print(pv_share)
results_dc['pv share ' + reg] = pv_share
print('wind share:')
print(wind_share)
results_dc['wind share ' + reg] = wind_share
return results_dc
def co2(energysystem):
"""
Calculate total CO2 emissions.
"""
# retrieve specific CO2 emissions from database
conn_oedb = db.connection(section='open_edb')
(co2_emissions, co2_fix, eta_elec, eta_th, eta_th_chp, eta_el_chp,
eta_chp_flex_el, sigma_chp, beta_chp, opex_var, opex_fix, capex,
c_rate_in, c_rate_out, eta_in, eta_out,
cap_loss, lifetime, wacc) = hlsb.get_parameters(conn_oedb)
# fossil ressources
global_ressources = ['natural_gas', 'natural_gas_cc', 'lignite',
'oil', 'waste', 'hard_coal']
# create list of global ressource buses BB
list_global_ressource_buses = []
for ressource in global_ressources:
list_global_ressource_buses += ["('bus', 'BB', '" + ressource + "')"]
# create list with entities of global ressource buses
global_ressource_buses_bb = [obj for obj in energysystem.entities
if any(bus in obj.uid for bus in list_global_ressource_buses)]
# get yearly energy
co2 = 0
for bus in global_ressource_buses_bb:
for output in bus.outputs:
summe, maximum = sum_max_output_of_component(
energysystem, bus.uid, output.uid)
co2 += summe * co2_emissions[bus.type]
# biogas
biogas_transformer = [obj for obj in energysystem.entities
if 'bhkw_bio' in obj.uid and 'transformer' in obj.uid]
bb_regions = ['PO', 'UB', 'HF', 'OS', 'LS']
biogas_transformer_bb = [obj for obj in biogas_transformer
if any(region in obj.uid for region in bb_regions)]
# write list to hand over to BB constraint
for transformer in biogas_transformer_bb:
summe, maximum = sum_max_output_of_component(
energysystem, transformer.inputs[0].uid, transformer.uid)
co2 += summe * co2_emissions[transformer.inputs[0].type]
print('Total CO2 emissions in BB:')
print(co2)
return co2
def get_supply_demand_timeseries(energysystem, year, path):
"""
Writes timeseries of all inputs and outputs of the electricity bus of
each region as well as their sums to dataframe and saves to csv.
"""
time_index = pd.date_range('1/1/{0}'.format(year), periods=8760, freq='H')
# create dataframe for timeseries sums of outputs and inputs of electricity
# bus
supply_demand_sum = pd.DataFrame(index=time_index)
for region in energysystem.regions:
reg = region.name
# create dataframe for timeseries of outputs and inputs of electricity
# bus
elec_out = pd.DataFrame(index=time_index)
elec_in = pd.DataFrame(index=time_index)
# get electricity bus entity and its results
elec_bus = [obj for obj in energysystem.entities
if obj.uid == ("('bus', '" + reg + "', 'elec')")][0]
elec_bus_results = energysystem.results[[obj for obj in
energysystem.entities
if obj.uid == ("('bus', '" + reg + "', 'elec')")][0]]
# get outputs of electricity bus
for obj in energysystem.entities:
if 'demand' in obj.uid or 'hp' in obj.uid:
try:
elec_out[obj.uid] = elec_bus_results[[obj][0]]
except:
pass
# get inputs of electricity bus
for obj in energysystem.entities:
if ('transformer' in obj.uid or 'transport' in obj.uid or
'FixedSrc' in obj.uid):
obj_in = energysystem.results[[obj][0]]
try:
elec_in[obj.uid] = obj_in[[elec_bus][0]]
except:
pass
# save to csv
elec_in.to_csv(path + reg + '_all_times_in.csv')
elec_out.to_csv(path + reg + '_all_times_out.csv')
# get residual as well as sum of all inputs and all outputs
supply_demand_sum[reg] = elec_in.sum(axis=1) - elec_out.sum(axis=1)
supply_demand_sum[reg + 'in'] = elec_in.sum(axis=1)
supply_demand_sum[reg + 'out'] = elec_out.sum(axis=1)
# save to csv
supply_demand_sum.to_csv(path + 'supply_minus_demand.csv')
return supply_demand_sum
if __name__ == "__main__":
# load results
path_to_dump = expanduser("~") + '/.oemof/dumps/'
year = 2010
# create dummy energy system
energysystem = create_es('cbc', [t for t in range(8760)], str(year))
# load dumped energy system
energysystem.restore(path_to_dump)
# weeks for stack plot
date_from = {}
date_to = {}
date_from['spring'] = "2010-03-17 00:00:00"
date_to['spring'] = "2010-03-24 00:00:00"
date_from['summer'] = "2010-06-17 00:00:00"
date_to['summer'] = "2010-06-24 00:00:00"
date_from['autumn'] = "2010-09-17 00:00:00"
date_to['autumn'] = "2010-09-24 00:00:00"
date_from['winter'] = "2010-12-17 00:00:00"
date_to['winter'] = "2010-12-24 00:00:00"
# empty results_dc dictionary to write results into
results_dc = {}
# get all inputs and outputs of electricity bus of each region
get_supply_demand_timeseries(energysystem, year, path_to_dump)
# get exports from Brandenburg to neighbor regions and imports from
# neighbor regions to Brandenburg
print_exports(energysystem, results_dc, year, path_to_dump)
# add flows between regions in Brandenburg and between Brandenburg and
# Berlin to results_dc
print_im_exports(energysystem, results_dc, year, path_to_dump)
# calculates total CO2 emissions
results_dc['co2_all_BB'] = co2(energysystem)
transformer_results_df = pd.DataFrame()
for reg in ('HF', 'LS', 'UB', 'PO', 'BE', 'OS'):
# create stack plots for electricity bus and district heating bus for
# winter week
week = 'winter'
for bus in ('elec', 'dh'):
fig = stack_plot(
energysystem, reg, bus, date_from[week], date_to[week])
fig.savefig(path_to_dump + reg + '_' + bus + '_' + week + '.png')
# add sums and maximums of flows as well as full load hours of
# transformers
# return value frame is a dataframe with power output of each
# transformer, electrical efficiency and CO2 per MWh
results_dc, frame = print_validation_outputs(
energysystem, reg, results_dc)
transformer_results_df = transformer_results_df.append(frame)
# get shares of wind and pv of demand fulfillment
get_share_ee(energysystem, reg, results_dc)
# write to csv
transformer_results_df.to_csv(path_to_dump + 'co2_el_energy.csv')
f = open(path_to_dump + '_results.csv', 'w', newline='')
w = csv.writer(f, delimiter=';')
w.writerow(list(results_dc.keys()))
w.writerow(list(results_dc.values()))
f.close
f = open(path_to_dump + '_results.csv', 'w', newline='')
w = csv.writer(f, delimiter=';')
w.writerow(list(results_dc.keys()))
w.writerow(list(results_dc.values()))
f.close | gpl-3.0 | -4,810,676,121,799,167,000 | 39.734745 | 79 | 0.504234 | false |
team-vigir/vigir_behaviors | vigir_flexbe_states/src/vigir_flexbe_states/footstep_plan_relative_state.py | 1 | 4234 | #!/usr/bin/env python
import rospy
import math
import actionlib
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyActionClient
from vigir_footstep_planning_msgs.msg import *
from std_msgs.msg import String, Header
'''
Created on 02/24/2015
@author: Philipp Schillinger and Spyros Maniatopoulos
'''
class FootstepPlanRelativeState(EventState):
'''
Implements a state where the robot plans a relative motion, e.g. 2m to the left.
Please note that the distance is only approximate, actual distance depends on exact step alignment.
-- direction int One of the class constants to specify a direction.
># distance float Distance to walk, given in meters.
#> plan_header Header The header of the plan to perform the walking.
<= planned Successfully created a plan.
<= failed Failed to create a plan.
'''
DIRECTION_LEFT = 3 # PatternParameters.STRAFE_LEFT
DIRECTION_RIGHT = 4 # PatternParameters.STRAFE_RIGHT
DIRECTION_FORWARD = 1 # PatternParameters.FORWARD
DIRECTION_BACKWARD = 2 # PatternParameters.BACKARD
def __init__(self, direction):
'''
Constructor
'''
super(FootstepPlanRelativeState, self).__init__(outcomes=['planned', 'failed'],
input_keys=['distance'],
output_keys=['plan_header'])
if not rospy.has_param("behavior/step_distance_forward"):
Logger.logerr("Need to specify parameter behavior/step_distance_forward at the parameter server")
return
if not rospy.has_param("behavior/step_distance_sideward"):
Logger.logerr("Need to specify parameter behavior/step_distance_sideward at the parameter server")
return
self._step_distance_forward = rospy.get_param("behavior/step_distance_forward")
self._step_distance_sideward = rospy.get_param("behavior/step_distance_sideward")
self._action_topic = '/vigir/footstep_manager/step_plan_request'
self._client = ProxyActionClient({self._action_topic: StepPlanRequestAction})
self._done = False
self._failed = False
self._direction = direction
def execute(self, userdata):
if self._failed:
userdata.plan_header = None
return 'failed'
if self._done:
return 'planned'
if self._client.has_result(self._action_topic):
result = self._client.get_result(self._action_topic)
if result.status.warning != ErrorStatus.NO_WARNING:
Logger.logwarn('Planning footsteps warning:\n%s' % result.status.warning_msg)
if result.status.error == ErrorStatus.NO_ERROR:
userdata.plan_header = result.step_plan.header
num_steps = len(result.step_plan.steps)
Logger.loginfo('Received plan with %d steps' % num_steps)
self._done = True
return 'planned'
else:
userdata.plan_header = None # as recommended: dont send out incomplete plan
Logger.logerr('Planning footsteps failed:\n%s' % result.status.error_msg)
self._failed = True
return 'failed'
def on_enter(self, userdata):
self._failed = False
self._done = False
# Create footstep planner request
strafing = self._direction == PatternParameters.STRAFE_LEFT or self._direction == PatternParameters.STRAFE_RIGHT
pattern_parameters = PatternParameters()
pattern_parameters.mode = self._direction
pattern_parameters.step_distance_forward = self._step_distance_forward if not strafing else 0.0 # will it ignore?
pattern_parameters.step_distance_sideward = self._step_distance_sideward if strafing else 0.0 # will it ignore?
pattern_parameters.close_step = True
step_distance = pattern_parameters.step_distance_sideward if strafing else pattern_parameters.step_distance_forward
pattern_parameters.steps = int(round(userdata.distance / step_distance))
request = StepPlanRequest()
request.parameter_set_name = String('drc_step_no_collision')
request.header = Header(frame_id = '/world', stamp = rospy.Time.now())
request.planning_mode = StepPlanRequest.PLANNING_MODE_PATTERN
request.pattern_parameters = pattern_parameters
action_goal = StepPlanRequestGoal(plan_request = request)
try:
self._client.send_goal(self._action_topic, action_goal)
except Exception as e:
Logger.logwarn('Was unable to create footstep pattern for wide stance:\n%s' % str(e))
self._failed = True
| bsd-3-clause | -8,746,519,600,787,208,000 | 33.422764 | 117 | 0.730279 | false |
IMDProjects/ServiceManager | ServiceManager.py | 1 | 3754 | #-------------------------------------------------------------------------------
# ServiceManager.py
#
# Purpose: Creates, updates, deletes services in ArcGIS Online
#
#
# Prerequisites/Inputs:
# TokenManager: authentication token for NPS ArcGIS Online
# ServiceConfiguration: service configuration structure
# ServiceSource: service content
#
# XML metadata template in known subfolder (<somewhere>/Templates/Metadata)
# Working Folder/Workspace
#
# Outputs:
# Create: feature service in ArcGIS Online repository
# Manage: updated feature service in ArcGIS Online repository
# Delete: log of deleted service(s)
#
# Created by: NPS Inventory and Monitoring Division Staff
# Update date: 20161019
#
#
#
#-------------------------------------------------------------------------------
import urllib
import urllib2
import arcrest
#import TokenManager
from TokenManager import TokenManager
import ServiceConfiguration
from ServiceConfiguration import ServiceConfiguration
#import ServiceSource
from ServiceSource import ServiceSource
class ServiceManager(object):
'''
INFO
----
Object to manage ArcGIS Online feature services
'''
token = None
admin = None
def __init__(self):
if self.token == None:
tm = TokenManager("Portal", "https://nps.maps.arcgis.com", "IMDGISTeam", "G3010g!c2016", "https://irma.nps.gov")
tm.getToken()
self.token = tm.token
if self.admin == None:
self.admin = tm.admin
def getConfiguration(self, itype, title, description, url=None, tags=None, snippet=None, accessInformation=None, metadata=None):
sc = ServiceConfiguration(itype=itype, title=title
, description=description
, url=url
, tags=tags
, snippet=snippet
, accessInformation=accessInformation
, metadata=metadata)
return sc.itemParams
if __name__=='__main__':
sm = ServiceManager()
serviceToken = sm.token
admin = sm.admin
print serviceToken
content = admin.content
userInfo = content.users.user()
ss = ServiceSource()
# Data Store/ServCat example
ss.sourceFilter = ss.dsscConnection("GRI", "GeospatialDataset")
ss.sourceList = ss.getDSSCSources("http://irmaservices.nps.gov/datastore/v4/rest/AdvancedSearch/Composite?top=2000&format=json")
# ArcGIS Server example
#ss.agsConnection("https://inp2300fcvhafo1", "arcgis_admin", "admin2016...")
#ss.sourceList = ss.getAGSSources(ss.agsServer, "Inventory_Geology")
# Metadata: may work if convert this to an XML object: , metadata="https://irma.nps.gov/DataStore/DownloadFile/544273"
for i in range(1, len(ss.sourceList['serviceName'])):
itemParameters = sm.getConfiguration(itype="Map Service"
, title=ss.sourceList['serviceName'][i]
, description=ss.sourceList['description'][i]
, url=ss.sourceList['serviceURL'][i].replace('http','https')
#, url=urllib.urlencode(ss.sourceList['serviceURL'][i])
, tags="National Park Service (NPS) Geologic Resources Inventory (GRI), Geology"
, snippet="Digital Data, Digital Geologic Map, NPS Geologic Resources Inventory"
, accessInformation="National Park Service (NPS) Geologic Resources Inventory (GRI) program, National Park Service (NPS) Inventory and Monitoring Division")
print ss.sourceList['serviceURL'][i]
#print str(itemParameters)
# This request works although the overwrite and folder params are ignored
item = userInfo.addItem(itemParameters=itemParameters, overwrite=True)
print item.title
| mit | 5,012,614,175,353,012,000 | 36.919192 | 176 | 0.644113 | false |
tangentlabs/django-fancypages | fancypages/contrib/oscar_fancypages/abstract_models.py | 1 | 2983 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from ...models import ContentBlock
class AbstractSingleProductBlock(ContentBlock):
name = _("Single Product")
code = 'single-product'
group = _("Catalogue")
template_name = "fancypages/blocks/productblock.html"
product = models.ForeignKey(
'catalogue.Product', verbose_name=_("Single Product"), null=True)
def __unicode__(self):
if self.product:
return u"Product '{0}'".format(self.product.upc)
return u"Product '{0}'".format(self.id)
class Meta:
abstract = True
class AbstractHandPickedProductsPromotionBlock(ContentBlock):
name = _("Hand Picked Products Promotion")
code = 'promotion-hand-picked-products'
group = _("Catalogue")
template_name = "fancypages/blocks/promotionblock.html"
promotion = models.ForeignKey(
'promotions.HandPickedProductList', null=True,
verbose_name=_("Hand Picked Products Promotion"))
def __unicode__(self):
if self.promotion:
return u"Promotion '{0}'".format(self.promotion.pk)
return u"Promotion '{0}'".format(self.id)
class Meta:
abstract = True
class AbstractAutomaticProductsPromotionBlock(ContentBlock):
name = _("Automatic Products Promotion")
code = 'promotion-ordered-products'
group = _("Catalogue")
template_name = "fancypages/blocks/promotionblock.html"
promotion = models.ForeignKey(
'promotions.AutomaticProductList',
verbose_name=_("Automatic Products Promotion"), null=True)
def __unicode__(self):
if self.promotion:
return u"Promotion '{0}'".format(self.promotion.pk)
return u"Promotion '{0}'".format(self.id)
class Meta:
abstract = True
class AbstractOfferBlock(ContentBlock):
name = _("Offer Products")
code = 'products-range'
group = _("Catalogue")
template_name = "fancypages/blocks/offerblock.html"
offer = models.ForeignKey(
'offer.ConditionalOffer', verbose_name=_("Offer"), null=True)
@property
def products(self):
Product = models.get_model('catalogue', 'Product')
product_range = self.offer.condition.range
if product_range.includes_all_products:
return Product.browsable.filter(is_discountable=True)
return product_range.included_products.filter(is_discountable=True)
def __unicode__(self):
if self.offer:
return u"Offer '{0}'".format(self.offer.pk)
return u"Offer '{0}'".format(self.id)
class Meta:
abstract = True
class AbstractPrimaryNavigationBlock(ContentBlock):
name = _("Primary Navigation")
code = 'primary-navigation'
group = _("Content")
template_name = "fancypages/blocks/primary_navigation_block.html"
def __unicode__(self):
return u'Primary Navigation'
class Meta:
abstract = True
| bsd-3-clause | -2,053,354,243,874,709,800 | 28.83 | 75 | 0.65404 | false |
cpcloud/ibis | ibis/impala/tests/test_exprs.py | 1 | 52316 | import unittest
from decimal import Decimal
from io import StringIO
import pandas as pd
import pandas.util.testing as tm
import pytest
import ibis
import ibis.expr.api as api
import ibis.expr.types as ir
from ibis import literal as L
from ibis.common.exceptions import RelationError
from ibis.expr.datatypes import Category
from ibis.expr.tests.mocks import MockConnection
from ibis.impala.compiler import ( # noqa: E402
ImpalaDialect,
ImpalaExprTranslator,
to_sql,
)
from ibis.sql.tests.test_compiler import ExprTestCases # noqa: E402
pytest.importorskip('hdfs')
pytest.importorskip('sqlalchemy')
pytest.importorskip('impala.dbapi')
pytestmark = pytest.mark.impala
def approx_equal(a, b, eps):
assert abs(a - b) < eps
class ExprSQLTest:
def _check_expr_cases(self, cases, named=False):
for expr, expected in cases:
repr(expr)
result = self._translate(expr, named=named)
assert result == expected
def _translate(self, expr, context=None, named=False):
if context is None:
context = ImpalaDialect.make_context()
translator = ImpalaExprTranslator(expr, context=context, named=named)
return translator.get_result()
class TestValueExprs(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
self.int_cols = ['a', 'b', 'c', 'd']
self.bool_cols = ['h']
self.float_cols = ['e', 'f']
def _check_literals(self, cases):
for value, expected in cases:
lit_expr = L(value)
result = self._translate(lit_expr)
assert result == expected
def test_string_literals(self):
cases = [
('simple', "'simple'"),
('I can\'t', "'I can\\'t'"),
('An "escape"', "'An \"escape\"'"),
]
for value, expected in cases:
lit_expr = L(value)
result = self._translate(lit_expr)
assert result == expected
def test_decimal_builtins(self):
t = self.con.table('tpch_lineitem')
col = t.l_extendedprice
cases = [
(col.precision(), 'precision(`l_extendedprice`)'),
(col.scale(), 'scale(`l_extendedprice`)'),
]
self._check_expr_cases(cases)
def test_number_boolean_literals(self):
cases = [(5, '5'), (1.5, '1.5'), (True, 'TRUE'), (False, 'FALSE')]
self._check_literals(cases)
def test_column_ref_table_aliases(self):
context = ImpalaDialect.make_context()
table1 = ibis.table([('key1', 'string'), ('value1', 'double')])
table2 = ibis.table([('key2', 'string'), ('value and2', 'double')])
context.set_ref(table1, 't0')
context.set_ref(table2, 't1')
expr = table1['value1'] - table2['value and2']
result = self._translate(expr, context=context)
expected = 't0.`value1` - t1.`value and2`'
assert result == expected
def test_column_ref_quoting(self):
schema = [('has a space', 'double')]
table = ibis.table(schema)
self._translate(table['has a space'], named='`has a space`')
def test_identifier_quoting(self):
schema = [('date', 'double'), ('table', 'string')]
table = ibis.table(schema)
self._translate(table['date'], named='`date`')
self._translate(table['table'], named='`table`')
def test_named_expressions(self):
a, b, g = self.table.get_columns(['a', 'b', 'g'])
cases = [
(g.cast('double').name('g_dub'), 'CAST(`g` AS double) AS `g_dub`'),
(g.name('has a space'), '`g` AS `has a space`'),
(((a - b) * a).name('expr'), '(`a` - `b`) * `a` AS `expr`'),
]
return self._check_expr_cases(cases, named=True)
def test_binary_infix_operators(self):
# For each function, verify that the generated code is what we expect
a, b, h = self.table.get_columns(['a', 'b', 'h'])
bool_col = a > 0
cases = [
(a + b, '`a` + `b`'),
(a - b, '`a` - `b`'),
(a * b, '`a` * `b`'),
(a / b, '`a` / `b`'),
(a ** b, 'pow(`a`, `b`)'),
(a < b, '`a` < `b`'),
(a <= b, '`a` <= `b`'),
(a > b, '`a` > `b`'),
(a >= b, '`a` >= `b`'),
(a == b, '`a` = `b`'),
(a != b, '`a` != `b`'),
(h & bool_col, '`h` AND (`a` > 0)'),
(h | bool_col, '`h` OR (`a` > 0)'),
# xor is brute force
(h ^ bool_col, '(`h` OR (`a` > 0)) AND NOT (`h` AND (`a` > 0))'),
]
self._check_expr_cases(cases)
def test_binary_infix_parenthesization(self):
a, b, c = self.table.get_columns(['a', 'b', 'c'])
cases = [
((a + b) + c, '(`a` + `b`) + `c`'),
(a.log() + c, 'ln(`a`) + `c`'),
(b + (-(a + c)), '`b` + (-(`a` + `c`))'),
]
self._check_expr_cases(cases)
def test_between(self):
cases = [(self.table.f.between(0, 1), '`f` BETWEEN 0 AND 1')]
self._check_expr_cases(cases)
def test_isnull_notnull(self):
cases = [
(self.table['g'].isnull(), '`g` IS NULL'),
(self.table['a'].notnull(), '`a` IS NOT NULL'),
(
(self.table['a'] + self.table['b']).isnull(),
'`a` + `b` IS NULL',
),
]
self._check_expr_cases(cases)
def test_casts(self):
a, d, g = self.table.get_columns(['a', 'd', 'g'])
cases = [
(a.cast('int16'), 'CAST(`a` AS smallint)'),
(a.cast('int32'), 'CAST(`a` AS int)'),
(a.cast('int64'), 'CAST(`a` AS bigint)'),
(a.cast('float'), 'CAST(`a` AS float)'),
(a.cast('double'), 'CAST(`a` AS double)'),
(a.cast('string'), 'CAST(`a` AS string)'),
(d.cast('int8'), 'CAST(`d` AS tinyint)'),
(g.cast('double'), 'CAST(`g` AS double)'),
(g.cast('timestamp'), 'CAST(`g` AS timestamp)'),
]
self._check_expr_cases(cases)
def test_misc_conditionals(self):
a = self.table.a
cases = [(a.nullif(0), 'nullif(`a`, 0)')]
self._check_expr_cases(cases)
def test_decimal_casts(self):
cases = [
(
L('9.9999999').cast('decimal(38, 5)'),
"CAST('9.9999999' AS decimal(38, 5))",
),
(
self.table.f.cast('decimal(12, 2)'),
"CAST(`f` AS decimal(12, 2))",
),
]
self._check_expr_cases(cases)
def test_negate(self):
cases = [
(-self.table['a'], '-`a`'),
(-self.table['f'], '-`f`'),
(-self.table['h'], 'NOT `h`'),
]
self._check_expr_cases(cases)
def test_timestamp_extract_field(self):
fields = [
'year',
'month',
'day',
'hour',
'minute',
'second',
'millisecond',
]
cases = [
(
getattr(self.table.i, field)(),
"extract(`i`, '{0}')".format(field),
)
for field in fields
]
self._check_expr_cases(cases)
# integration with SQL translation
expr = self.table[
self.table.i.year().name('year'),
self.table.i.month().name('month'),
self.table.i.day().name('day'),
]
result = to_sql(expr)
expected = """SELECT extract(`i`, 'year') AS `year`, extract(`i`, 'month') AS `month`,
extract(`i`, 'day') AS `day`
FROM alltypes"""
assert result == expected
def test_timestamp_now(self):
cases = [(ibis.now(), 'now()')]
self._check_expr_cases(cases)
def test_timestamp_deltas(self):
units = [
('years', 'year'),
('months', 'month'),
('weeks', 'week'),
('days', 'day'),
('hours', 'hour'),
('minutes', 'minute'),
('seconds', 'second'),
]
t = self.table.i
f = '`i`'
cases = []
for unit, compiled_unit in units:
K = 5
offset = ibis.interval(**{unit: K})
add_template = 'date_add({1}, INTERVAL {2} {0})'
sub_template = 'date_sub({1}, INTERVAL {2} {0})'
cases.append(
(t + offset, add_template.format(compiled_unit.upper(), f, K))
)
cases.append(
(t - offset, sub_template.format(compiled_unit.upper(), f, K))
)
self._check_expr_cases(cases)
def test_timestamp_literals(self):
from pandas import Timestamp
tv1 = '2015-01-01 12:34:56'
ex1 = "'2015-01-01 12:34:56'"
cases = [
(L(Timestamp(tv1)), ex1),
(L(Timestamp(tv1).to_pydatetime()), ex1),
(ibis.timestamp(tv1), ex1),
]
self._check_expr_cases(cases)
def test_timestamp_day_of_week(self):
timestamp_value = L('2015-09-01 01:00:23', type='timestamp')
cases = [
(
timestamp_value.day_of_week.index(),
"pmod(dayofweek('2015-09-01 01:00:23') - 2, 7)",
),
(
timestamp_value.day_of_week.full_name(),
"dayname('2015-09-01 01:00:23')",
),
]
self._check_expr_cases(cases)
def test_timestamp_from_integer(self):
col = self.table.c
cases = [
(
col.to_timestamp(),
'CAST(from_unixtime(`c`, "yyyy-MM-dd HH:mm:ss") '
'AS timestamp)',
),
(
col.to_timestamp('ms'),
'CAST(from_unixtime(CAST(floor(`c` / 1000) AS int), '
'"yyyy-MM-dd HH:mm:ss") '
'AS timestamp)',
),
(
col.to_timestamp('us'),
'CAST(from_unixtime(CAST(floor(`c` / 1000000) AS int), '
'"yyyy-MM-dd HH:mm:ss") '
'AS timestamp)',
),
]
self._check_expr_cases(cases)
def test_correlated_predicate_subquery(self):
t0 = self.table
t1 = t0.view()
expr = t0.g == t1.g
ctx = ImpalaDialect.make_context()
ctx.make_alias(t0)
# Grab alias from parent context
subctx = ctx.subcontext()
subctx.make_alias(t1)
subctx.make_alias(t0)
result = self._translate(expr, context=subctx)
expected = "t0.`g` = t1.`g`"
assert result == expected
def test_any_all(self):
t = self.table
bool_expr = t.f == 0
cases = [
(bool_expr.any(), 'max(`f` = 0)'),
(-bool_expr.any(), 'max(`f` = 0) = FALSE'),
(bool_expr.all(), 'min(`f` = 0)'),
(-bool_expr.all(), 'min(`f` = 0) = FALSE'),
]
self._check_expr_cases(cases)
class TestUnaryBuiltins(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
def test_numeric_unary_builtins(self):
# No argument functions
functions = [
'abs',
'ceil',
'floor',
'exp',
'sqrt',
('log', 'ln'),
('approx_median', 'appx_median'),
('approx_nunique', 'ndv'),
'ln',
'log2',
'log10',
'nullifzero',
'zeroifnull',
]
cases = []
for what in functions:
if isinstance(what, tuple):
ibis_name, sql_name = what
else:
ibis_name = sql_name = what
for cname in ['double_col', 'int_col']:
expr = getattr(self.table[cname], ibis_name)()
cases.append(
(expr, '{0}({1})'.format(sql_name, '`{0}`'.format(cname)))
)
self._check_expr_cases(cases)
def test_log_other_bases(self):
cases = [(self.table.double_col.log(5), 'log(5, `double_col`)')]
self._check_expr_cases(cases)
def test_round(self):
cases = [
(self.table.double_col.round(), 'round(`double_col`)'),
(self.table.double_col.round(0), 'round(`double_col`, 0)'),
(self.table.double_col.round(2), 'round(`double_col`, 2)'),
(
self.table.double_col.round(self.table.tinyint_col),
'round(`double_col`, `tinyint_col`)',
),
]
self._check_expr_cases(cases)
def test_sign(self):
cases = [
(
self.table.tinyint_col.sign(),
'CAST(sign(`tinyint_col`) AS tinyint)',
),
(self.table.float_col.sign(), 'sign(`float_col`)'),
(
self.table.double_col.sign(),
'CAST(sign(`double_col`) AS double)',
),
]
self._check_expr_cases(cases)
def test_hash(self):
expr = self.table.int_col.hash()
assert isinstance(expr, ir.IntegerColumn)
assert isinstance(self.table.int_col.sum().hash(), ir.IntegerScalar)
cases = [(self.table.int_col.hash(), 'fnv_hash(`int_col`)')]
self._check_expr_cases(cases)
def test_reduction_where(self):
cond = self.table.bigint_col < 70
c = self.table.double_col
tmp = (
'{0}(CASE WHEN `bigint_col` < 70 THEN `double_col` '
'ELSE NULL END)'
)
cases = [
(c.sum(where=cond), tmp.format('sum')),
(c.count(where=cond), tmp.format('count')),
(c.mean(where=cond), tmp.format('avg')),
(c.max(where=cond), tmp.format('max')),
(c.min(where=cond), tmp.format('min')),
(c.std(where=cond), tmp.format('stddev_samp')),
(c.std(where=cond, how='pop'), tmp.format('stddev_pop')),
(c.var(where=cond), tmp.format('var_samp')),
(c.var(where=cond, how='pop'), tmp.format('var_pop')),
]
self._check_expr_cases(cases)
def test_reduction_invalid_where(self):
condbad_literal = L('T')
c = self.table.double_col
for reduction in [c.sum, c.count, c.mean, c.max, c.min]:
with self.assertRaises(TypeError):
reduction(where=condbad_literal)
class TestCaseExprs(unittest.TestCase, ExprSQLTest, ExprTestCases):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_isnull_1_0(self):
expr = self.table.g.isnull().ifelse(1, 0)
result = self._translate(expr)
expected = 'CASE WHEN `g` IS NULL THEN 1 ELSE 0 END'
assert result == expected
# inside some other function
result = self._translate(expr.sum())
expected = 'sum(CASE WHEN `g` IS NULL THEN 1 ELSE 0 END)'
assert result == expected
def test_simple_case(self):
expr = self._case_simple_case()
result = self._translate(expr)
expected = """CASE `g`
WHEN 'foo' THEN 'bar'
WHEN 'baz' THEN 'qux'
ELSE 'default'
END"""
assert result == expected
def test_search_case(self):
expr = self._case_search_case()
result = self._translate(expr)
expected = """CASE
WHEN `f` > 0 THEN `d` * 2
WHEN `c` < 0 THEN `a` * 2
ELSE CAST(NULL AS bigint)
END"""
assert result == expected
def test_where_use_if(self):
expr = ibis.where(self.table.f > 0, self.table.e, self.table.a)
assert isinstance(expr, ir.FloatingValue)
result = self._translate(expr)
expected = "if(`f` > 0, `e`, `a`)"
assert result == expected
def test_nullif_ifnull(self):
table = self.con.table('tpch_lineitem')
f = table.l_quantity
cases = [
(f.nullif(f), 'nullif(`l_quantity`, `l_quantity`)'),
(
(f == 0).nullif(f == 0),
'nullif(`l_quantity` = 0, `l_quantity` = 0)',
),
(
(f != 0).nullif(f == 0),
'nullif(`l_quantity` != 0, `l_quantity` = 0)',
),
(f.fillna(0), 'isnull(`l_quantity`, CAST(0 AS decimal(12, 2)))'),
]
self._check_expr_cases(cases)
def test_decimal_fillna_cast_arg(self):
table = self.con.table('tpch_lineitem')
f = table.l_extendedprice
cases = [
(
f.fillna(0),
'isnull(`l_extendedprice`, CAST(0 AS decimal(12, 2)))',
),
(f.fillna(0.0), 'isnull(`l_extendedprice`, 0.0)'),
]
self._check_expr_cases(cases)
def test_identical_to(self):
t = self.con.table('functional_alltypes')
expr = t.tinyint_col.identical_to(t.double_col)
result = to_sql(expr)
expected = """\
SELECT `tinyint_col` IS NOT DISTINCT FROM `double_col` AS `tmp`
FROM functional_alltypes"""
assert result == expected
def test_identical_to_special_case(self):
expr = ibis.NA.cast('int64').identical_to(ibis.NA.cast('int64'))
result = to_sql(expr)
assert result == 'SELECT TRUE AS `tmp`'
class TestBucketHistogram(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_bucket_to_case(self):
buckets = [0, 10, 25, 50]
expr1 = self.table.f.bucket(buckets)
expected1 = """\
CASE
WHEN (`f` >= 0) AND (`f` < 10) THEN 0
WHEN (`f` >= 10) AND (`f` < 25) THEN 1
WHEN (`f` >= 25) AND (`f` <= 50) THEN 2
ELSE CAST(NULL AS tinyint)
END"""
expr2 = self.table.f.bucket(buckets, close_extreme=False)
expected2 = """\
CASE
WHEN (`f` >= 0) AND (`f` < 10) THEN 0
WHEN (`f` >= 10) AND (`f` < 25) THEN 1
WHEN (`f` >= 25) AND (`f` < 50) THEN 2
ELSE CAST(NULL AS tinyint)
END"""
expr3 = self.table.f.bucket(buckets, closed='right')
expected3 = """\
CASE
WHEN (`f` >= 0) AND (`f` <= 10) THEN 0
WHEN (`f` > 10) AND (`f` <= 25) THEN 1
WHEN (`f` > 25) AND (`f` <= 50) THEN 2
ELSE CAST(NULL AS tinyint)
END"""
expr4 = self.table.f.bucket(
buckets, closed='right', close_extreme=False
)
expected4 = """\
CASE
WHEN (`f` > 0) AND (`f` <= 10) THEN 0
WHEN (`f` > 10) AND (`f` <= 25) THEN 1
WHEN (`f` > 25) AND (`f` <= 50) THEN 2
ELSE CAST(NULL AS tinyint)
END"""
expr5 = self.table.f.bucket(buckets, include_under=True)
expected5 = """\
CASE
WHEN `f` < 0 THEN 0
WHEN (`f` >= 0) AND (`f` < 10) THEN 1
WHEN (`f` >= 10) AND (`f` < 25) THEN 2
WHEN (`f` >= 25) AND (`f` <= 50) THEN 3
ELSE CAST(NULL AS tinyint)
END"""
expr6 = self.table.f.bucket(
buckets, include_under=True, include_over=True
)
expected6 = """\
CASE
WHEN `f` < 0 THEN 0
WHEN (`f` >= 0) AND (`f` < 10) THEN 1
WHEN (`f` >= 10) AND (`f` < 25) THEN 2
WHEN (`f` >= 25) AND (`f` <= 50) THEN 3
WHEN `f` > 50 THEN 4
ELSE CAST(NULL AS tinyint)
END"""
expr7 = self.table.f.bucket(
buckets, close_extreme=False, include_under=True, include_over=True
)
expected7 = """\
CASE
WHEN `f` < 0 THEN 0
WHEN (`f` >= 0) AND (`f` < 10) THEN 1
WHEN (`f` >= 10) AND (`f` < 25) THEN 2
WHEN (`f` >= 25) AND (`f` < 50) THEN 3
WHEN `f` >= 50 THEN 4
ELSE CAST(NULL AS tinyint)
END"""
expr8 = self.table.f.bucket(
buckets, closed='right', close_extreme=False, include_under=True
)
expected8 = """\
CASE
WHEN `f` <= 0 THEN 0
WHEN (`f` > 0) AND (`f` <= 10) THEN 1
WHEN (`f` > 10) AND (`f` <= 25) THEN 2
WHEN (`f` > 25) AND (`f` <= 50) THEN 3
ELSE CAST(NULL AS tinyint)
END"""
expr9 = self.table.f.bucket(
[10], closed='right', include_over=True, include_under=True
)
expected9 = """\
CASE
WHEN `f` <= 10 THEN 0
WHEN `f` > 10 THEN 1
ELSE CAST(NULL AS tinyint)
END"""
expr10 = self.table.f.bucket(
[10], include_over=True, include_under=True
)
expected10 = """\
CASE
WHEN `f` < 10 THEN 0
WHEN `f` >= 10 THEN 1
ELSE CAST(NULL AS tinyint)
END"""
cases = [
(expr1, expected1),
(expr2, expected2),
(expr3, expected3),
(expr4, expected4),
(expr5, expected5),
(expr6, expected6),
(expr7, expected7),
(expr8, expected8),
(expr9, expected9),
(expr10, expected10),
]
self._check_expr_cases(cases)
def test_cast_category_to_int_noop(self):
# Because the bucket result is an integer, no explicit cast is
# necessary
expr = self.table.f.bucket(
[10], include_over=True, include_under=True
).cast('int32')
expected = """\
CASE
WHEN `f` < 10 THEN 0
WHEN `f` >= 10 THEN 1
ELSE CAST(NULL AS tinyint)
END"""
expr2 = self.table.f.bucket(
[10], include_over=True, include_under=True
).cast('double')
expected2 = """\
CAST(CASE
WHEN `f` < 10 THEN 0
WHEN `f` >= 10 THEN 1
ELSE CAST(NULL AS tinyint)
END AS double)"""
self._check_expr_cases([(expr, expected), (expr2, expected2)])
def test_bucket_assign_labels(self):
buckets = [0, 10, 25, 50]
bucket = self.table.f.bucket(buckets, include_under=True)
size = self.table.group_by(bucket.name('tier')).size()
labelled = size.tier.label(
['Under 0', '0 to 10', '10 to 25', '25 to 50'], nulls='error'
).name('tier2')
expr = size[labelled, size['count']]
expected = """\
SELECT
CASE `tier`
WHEN 0 THEN 'Under 0'
WHEN 1 THEN '0 to 10'
WHEN 2 THEN '10 to 25'
WHEN 3 THEN '25 to 50'
ELSE 'error'
END AS `tier2`, `count`
FROM (
SELECT
CASE
WHEN `f` < 0 THEN 0
WHEN (`f` >= 0) AND (`f` < 10) THEN 1
WHEN (`f` >= 10) AND (`f` < 25) THEN 2
WHEN (`f` >= 25) AND (`f` <= 50) THEN 3
ELSE CAST(NULL AS tinyint)
END AS `tier`, count(*) AS `count`
FROM alltypes
GROUP BY 1
) t0"""
result = to_sql(expr)
assert result == expected
self.assertRaises(ValueError, size.tier.label, ['a', 'b', 'c'])
self.assertRaises(
ValueError, size.tier.label, ['a', 'b', 'c', 'd', 'e']
)
class TestInNotIn(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_field_in_literals(self):
values = ['foo', 'bar', 'baz']
values_formatted = tuple(set(values))
cases = [
(self.table.g.isin(values), "`g` IN {}".format(values_formatted)),
(
self.table.g.notin(values),
"`g` NOT IN {}".format(values_formatted),
),
]
self._check_expr_cases(cases)
def test_literal_in_list(self):
cases = [
(
L(2).isin([self.table.a, self.table.b, self.table.c]),
'2 IN (`a`, `b`, `c`)',
),
(
L(2).notin([self.table.a, self.table.b, self.table.c]),
'2 NOT IN (`a`, `b`, `c`)',
),
]
self._check_expr_cases(cases)
def test_isin_notin_in_select(self):
values = ['foo', 'bar']
values_formatted = tuple(set(values))
filtered = self.table[self.table.g.isin(values)]
result = to_sql(filtered)
expected = """SELECT *
FROM alltypes
WHERE `g` IN {}"""
assert result == expected.format(values_formatted)
filtered = self.table[self.table.g.notin(values)]
result = to_sql(filtered)
expected = """SELECT *
FROM alltypes
WHERE `g` NOT IN {}"""
assert result == expected.format(values_formatted)
class TestCoalesceGreaterLeast(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
def test_coalesce(self):
t = self.table
cases = [
(
ibis.coalesce(t.string_col, 'foo'),
"coalesce(`string_col`, 'foo')",
),
(
ibis.coalesce(t.int_col, t.bigint_col),
'coalesce(`int_col`, `bigint_col`)',
),
]
self._check_expr_cases(cases)
def test_greatest(self):
t = self.table
cases = [
(
ibis.greatest(t.string_col, 'foo'),
"greatest(`string_col`, 'foo')",
),
(
ibis.greatest(t.int_col, t.bigint_col),
'greatest(`int_col`, `bigint_col`)',
),
]
self._check_expr_cases(cases)
def test_least(self):
t = self.table
cases = [
(ibis.least(t.string_col, 'foo'), "least(`string_col`, 'foo')"),
(
ibis.least(t.int_col, t.bigint_col),
'least(`int_col`, `bigint_col`)',
),
]
self._check_expr_cases(cases)
class TestAnalyticFunctions(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
def test_analytic_exprs(self):
t = self.table
w = ibis.window(order_by=t.float_col)
cases = [
(
ibis.row_number().over(w),
'(row_number() OVER (ORDER BY `float_col`) - 1)',
),
(t.string_col.lag(), 'lag(`string_col`)'),
(t.string_col.lag(2), 'lag(`string_col`, 2)'),
(t.string_col.lag(default=0), 'lag(`string_col`, 1, 0)'),
(t.string_col.lead(), 'lead(`string_col`)'),
(t.string_col.lead(2), 'lead(`string_col`, 2)'),
(t.string_col.lead(default=0), 'lead(`string_col`, 1, 0)'),
(t.double_col.first(), 'first_value(`double_col`)'),
(t.double_col.last(), 'last_value(`double_col`)'),
# (t.double_col.nth(4), 'first_value(lag(double_col, 4 - 1))')
(t.double_col.ntile(3), 'ntile(3)'),
(t.double_col.percent_rank(), 'percent_rank()'),
]
self._check_expr_cases(cases)
class TestStringBuiltins(unittest.TestCase, ExprSQLTest):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('functional_alltypes')
def test_unary_ops(self):
s = self.table.string_col
cases = [
(s.lower(), 'lower(`string_col`)'),
(s.upper(), 'upper(`string_col`)'),
(s.reverse(), 'reverse(`string_col`)'),
(s.strip(), 'trim(`string_col`)'),
(s.lstrip(), 'ltrim(`string_col`)'),
(s.rstrip(), 'rtrim(`string_col`)'),
(s.capitalize(), 'initcap(`string_col`)'),
(s.length(), 'length(`string_col`)'),
(s.ascii_str(), 'ascii(`string_col`)'),
]
self._check_expr_cases(cases)
def test_substr(self):
# Database numbers starting from 1
cases = [
(self.table.string_col.substr(2), 'substr(`string_col`, 2 + 1)'),
(
self.table.string_col.substr(0, 3),
'substr(`string_col`, 0 + 1, 3)',
),
]
self._check_expr_cases(cases)
def test_strright(self):
cases = [(self.table.string_col.right(4), 'strright(`string_col`, 4)')]
self._check_expr_cases(cases)
def test_like(self):
cases = [
(self.table.string_col.like('foo%'), "`string_col` LIKE 'foo%'"),
(
self.table.string_col.like(['foo%', '%bar']),
"`string_col` LIKE 'foo%' OR `string_col` LIKE '%bar'",
),
]
self._check_expr_cases(cases)
def test_rlike(self):
ex = r"regexp_like(`string_col`, '[\d]+')"
cases = [
(self.table.string_col.rlike(r'[\d]+'), ex),
(self.table.string_col.re_search(r'[\d]+'), ex),
]
self._check_expr_cases(cases)
def test_re_extract(self):
sql = r"regexp_extract(`string_col`, '[\d]+', 0)"
cases = [(self.table.string_col.re_extract(r'[\d]+', 0), sql)]
self._check_expr_cases(cases)
def test_re_replace(self):
sql = r"regexp_replace(`string_col`, '[\d]+', 'aaa')"
cases = [(self.table.string_col.re_replace(r'[\d]+', 'aaa'), sql)]
self._check_expr_cases(cases)
def test_parse_url(self):
sql = "parse_url(`string_col`, 'HOST')"
cases = [(self.table.string_col.parse_url('HOST'), sql)]
self._check_expr_cases(cases)
def test_repeat(self):
cases = [(self.table.string_col.repeat(2), 'repeat(`string_col`, 2)')]
self._check_expr_cases(cases)
def test_translate(self):
cases = [
(
self.table.string_col.translate('a', 'b'),
"translate(`string_col`, 'a', 'b')",
)
]
self._check_expr_cases(cases)
def test_find(self):
s = self.table.string_col
i1 = self.table.tinyint_col
cases = [
(s.find('a'), "locate('a', `string_col`) - 1"),
(s.find('a', 2), "locate('a', `string_col`, 3) - 1"),
(
s.find('a', start=i1),
"locate('a', `string_col`, `tinyint_col` + 1) - 1",
),
]
self._check_expr_cases(cases)
def test_lpad(self):
cases = [
(self.table.string_col.lpad(1, 'a'), "lpad(`string_col`, 1, 'a')"),
(self.table.string_col.lpad(25), "lpad(`string_col`, 25, ' ')"),
]
self._check_expr_cases(cases)
def test_rpad(self):
cases = [
(self.table.string_col.rpad(1, 'a'), "rpad(`string_col`, 1, 'a')"),
(self.table.string_col.rpad(25), "rpad(`string_col`, 25, ' ')"),
]
self._check_expr_cases(cases)
def test_find_in_set(self):
cases = [
(
self.table.string_col.find_in_set(['a']),
"find_in_set(`string_col`, 'a') - 1",
),
(
self.table.string_col.find_in_set(['a', 'b']),
"find_in_set(`string_col`, 'a,b') - 1",
),
]
self._check_expr_cases(cases)
def test_string_join(self):
cases = [(L(',').join(['a', 'b']), "concat_ws(',', 'a', 'b')")]
self._check_expr_cases(cases)
def test_embedded_identifier_quoting(alltypes):
t = alltypes
expr = t[[(t.double_col * 2).name('double(fun)')]]['double(fun)'].sum()
expr.execute()
def test_table_info(alltypes):
buf = StringIO()
alltypes.info(buf=buf)
assert buf.getvalue() is not None
@pytest.mark.parametrize(('expr', 'expected'), [(L(1) + L(2), 3)])
def test_execute_exprs_no_table_ref(con, expr, expected):
result = con.execute(expr)
assert result == expected
# ExprList
exlist = ibis.api.expr_list(
[L(1).name('a'), ibis.now().name('b'), L(2).log().name('c')]
)
con.execute(exlist)
def test_summary_execute(alltypes):
table = alltypes
# also test set_column while we're at it
table = table.set_column('double_col', table.double_col * 2)
expr = table.double_col.summary()
repr(expr)
result = expr.execute()
assert isinstance(result, pd.DataFrame)
expr = table.group_by('string_col').aggregate(
[
table.double_col.summary().prefix('double_'),
table.float_col.summary().prefix('float_'),
table.string_col.summary().suffix('_string'),
]
)
result = expr.execute()
assert isinstance(result, pd.DataFrame)
def test_distinct_array(con, alltypes):
table = alltypes
expr = table.string_col.distinct()
result = con.execute(expr)
assert isinstance(result, pd.Series)
def test_decimal_metadata(con):
table = con.table('tpch_lineitem')
expr = table.l_quantity
assert expr.type().precision == 12
assert expr.type().scale == 2
# TODO: what if user impyla version does not have decimal Metadata?
def test_builtins_1(con, alltypes):
table = alltypes
i1 = table.tinyint_col
i4 = table.int_col
i8 = table.bigint_col
d = table.double_col
s = table.string_col
exprs = [
api.now(),
api.e,
# hash functions
i4.hash(),
d.hash(),
s.hash(),
# modulus cases
i1 % 5,
i4 % 10,
20 % i1,
d % 5,
i1.zeroifnull(),
i4.zeroifnull(),
i8.zeroifnull(),
i4.to_timestamp('s'),
i4.to_timestamp('ms'),
i4.to_timestamp('us'),
i8.to_timestamp(),
d.abs(),
d.cast('decimal(12, 2)'),
d.cast('int32'),
d.ceil(),
d.exp(),
d.isnull(),
d.fillna(0),
d.floor(),
d.log(),
d.ln(),
d.log2(),
d.log10(),
d.notnull(),
d.zeroifnull(),
d.nullifzero(),
d.round(),
d.round(2),
d.round(i1),
i1.sign(),
i4.sign(),
d.sign(),
# conv
i1.convert_base(10, 2),
i4.convert_base(10, 2),
i8.convert_base(10, 2),
s.convert_base(10, 2),
d.sqrt(),
d.zeroifnull(),
# nullif cases
5 / i1.nullif(0),
5 / i1.nullif(i4),
5 / i4.nullif(0),
5 / d.nullif(0),
api.literal(5).isin([i1, i4, d]),
# tier and histogram
d.bucket([0, 10, 25, 50, 100]),
d.bucket([0, 10, 25, 50], include_over=True),
d.bucket([0, 10, 25, 50], include_over=True, close_extreme=False),
d.bucket([10, 25, 50, 100], include_under=True),
d.histogram(10),
d.histogram(5, base=10),
d.histogram(base=10, binwidth=5),
# coalesce-like cases
api.coalesce(
table.int_col, api.null(), table.smallint_col, table.bigint_col, 5
),
api.greatest(table.float_col, table.double_col, 5),
api.least(table.string_col, 'foo'),
# string stuff
s.contains('6'),
s.like('6%'),
s.re_search(r'[\d]+'),
s.re_extract(r'[\d]+', 0),
s.re_replace(r'[\d]+', 'a'),
s.repeat(2),
s.translate("a", "b"),
s.find("a"),
s.lpad(10, 'a'),
s.rpad(10, 'a'),
s.find_in_set(["a"]),
s.lower(),
s.upper(),
s.reverse(),
s.ascii_str(),
s.length(),
s.strip(),
s.lstrip(),
s.strip(),
# strings with int expr inputs
s.left(i1),
s.right(i1),
s.substr(i1, i1 + 2),
s.repeat(i1),
]
proj_exprs = [expr.name('e%d' % i) for i, expr in enumerate(exprs)]
projection = table[proj_exprs]
projection.limit(10).execute()
_check_impala_output_types_match(con, projection)
def _check_impala_output_types_match(con, table):
query = to_sql(table)
t = con.sql(query)
def _clean_type(x):
if isinstance(x, Category):
x = x.to_integer_type()
return x
left, right = t.schema(), table.schema()
for i, (n, left, right) in enumerate(
zip(left.names, left.types, right.types)
):
left = _clean_type(left)
right = _clean_type(right)
if left != right:
pytest.fail(
'Value for {0} had left type {1}'
' and right type {2}'.format(n, left, right)
)
@pytest.mark.parametrize(
('expr', 'expected'),
[
# mod cases
(L(50) % 5, 0),
(L(50000) % 10, 0),
(250 % L(50), 0),
# nullif cases
(5 / L(50).nullif(0), 0.1),
(5 / L(50).nullif(L(50000)), 0.1),
(5 / L(50000).nullif(0), 0.0001),
(L(50000).zeroifnull(), 50000),
],
)
def test_int_builtins(con, expr, expected):
result = con.execute(expr)
assert result == expected, to_sql(expr)
def test_column_types(alltypes):
df = alltypes.execute()
assert df.tinyint_col.dtype.name == 'int8'
assert df.smallint_col.dtype.name == 'int16'
assert df.int_col.dtype.name == 'int32'
assert df.bigint_col.dtype.name == 'int64'
assert df.float_col.dtype.name == 'float32'
assert df.double_col.dtype.name == 'float64'
assert df.timestamp_col.dtype.name == 'datetime64[ns]'
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(50000).to_timestamp('s'), pd.to_datetime(50000, unit='s')),
(L(50000).to_timestamp('ms'), pd.to_datetime(50000, unit='ms')),
(L(5 * 10 ** 8).to_timestamp(), pd.to_datetime(5 * 10 ** 8, unit='s')),
(
ibis.timestamp('2009-05-17 12:34:56').truncate('y'),
pd.Timestamp('2009-01-01'),
),
(
ibis.timestamp('2009-05-17 12:34:56').truncate('M'),
pd.Timestamp('2009-05-01'),
),
(
ibis.timestamp('2009-05-17 12:34:56').truncate('month'),
pd.Timestamp('2009-05-01'),
),
(
ibis.timestamp('2009-05-17 12:34:56').truncate('d'),
pd.Timestamp('2009-05-17'),
),
(
ibis.timestamp('2009-05-17 12:34:56').truncate('h'),
pd.Timestamp('2009-05-17 12:00'),
),
(
ibis.timestamp('2009-05-17 12:34:56').truncate('m'),
pd.Timestamp('2009-05-17 12:34'),
),
(
ibis.timestamp('2009-05-17 12:34:56').truncate('minute'),
pd.Timestamp('2009-05-17 12:34'),
),
],
)
def test_timestamp_builtins(con, expr, expected):
result = con.execute(expr)
assert result == expected, to_sql(expr)
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(-5).abs(), 5),
(L(5.245).cast('int32'), 5),
(L(5.245).ceil(), 6),
(L(5.245).isnull(), False),
(L(5.245).floor(), 5),
(L(5.245).notnull(), True),
(L(5.245).round(), 5),
(L(5.245).round(2), Decimal('5.25')),
(L(5.245).sign(), 1),
],
)
def test_decimal_builtins(con, expr, expected):
result = con.execute(expr)
assert result == expected, to_sql(expr)
@pytest.mark.parametrize(
('func', 'expected'),
[
pytest.param(lambda dc: dc, '5.245', id='id'),
pytest.param(lambda dc: dc % 5, '0.245', id='mod'),
pytest.param(lambda dc: dc.fillna(0), '5.245', id='fillna'),
pytest.param(lambda dc: dc.exp(), '189.6158', id='exp'),
pytest.param(lambda dc: dc.log(), '1.65728', id='log'),
pytest.param(lambda dc: dc.log2(), '2.39094', id='log2'),
pytest.param(lambda dc: dc.log10(), '0.71975', id='log10'),
pytest.param(lambda dc: dc.sqrt(), '2.29019', id='sqrt'),
pytest.param(lambda dc: dc.zeroifnull(), '5.245', id='zeroifnull'),
pytest.param(lambda dc: -dc, '-5.245', id='neg'),
],
)
def test_decimal_builtins_2(con, func, expected):
dc = L('5.245').cast('decimal(12, 5)')
expr = func(dc)
result = con.execute(expr)
tol = Decimal('0.0001')
approx_equal(Decimal(result), Decimal(expected), tol)
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').length(), 4),
(L('ABCD').lower(), 'abcd'),
(L('abcd').upper(), 'ABCD'),
(L('abcd').reverse(), 'dcba'),
(L('abcd').ascii_str(), 97),
(L(' a ').strip(), 'a'),
(L(' a ').lstrip(), 'a '),
(L(' a ').rstrip(), ' a'),
(L('abcd').capitalize(), 'Abcd'),
(L('abcd').substr(0, 2), 'ab'),
(L('abcd').left(2), 'ab'),
(L('abcd').right(2), 'cd'),
(L('abcd').repeat(2), 'abcdabcd'),
# global replace not available in Impala yet
# (L('aabbaabbaa').replace('bb', 'B'), 'aaBaaBaa'),
(L('0123').translate('012', 'abc'), 'abc3'),
(L('abcd').find('a'), 0),
(L('baaaab').find('b', 2), 5),
(L('abcd').lpad(1, '-'), 'a'),
(L('abcd').lpad(5), ' abcd'),
(L('abcd').rpad(1, '-'), 'a'),
(L('abcd').rpad(5), 'abcd '),
(L('abcd').find_in_set(['a', 'b', 'abcd']), 2),
(L(', ').join(['a', 'b']), 'a, b'),
(L('abcd').like('a%'), True),
(L('abcd').re_search('[a-z]'), True),
(L('abcd').re_extract('[a-z]', 0), 'a'),
(L('abcd').re_replace('(b)', '2'), 'a2cd'),
],
)
def test_string_functions(con, expr, expected):
result = con.execute(expr)
assert result == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L("https://www.cloudera.com").parse_url('HOST'), "www.cloudera.com"),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10').parse_url(
'QUERY', 'v'
),
'kEuEcWfewf8',
),
],
)
def test_parse_url(con, expr, expected):
result = con.execute(expr)
assert result == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(7) / 2, 3.5),
(L(7) // 2, 3),
(L(7).floordiv(2), 3),
(L(2).rfloordiv(7), 3),
],
)
def test_div_floordiv(con, expr, expected):
result = con.execute(expr)
assert result == expected
@pytest.mark.xfail(
raises=RelationError,
reason='Equality was broken, and fixing it broke this test',
)
def test_filter_predicates(con):
t = con.table('tpch_nation')
predicates = [
lambda x: x.n_name.lower().like('%ge%'),
lambda x: x.n_name.lower().contains('ge'),
lambda x: x.n_name.lower().rlike('.*ge.*'),
]
expr = t
for pred in predicates:
expr = expr[pred(expr)].projection([expr])
expr.execute()
def test_histogram_value_counts(alltypes):
t = alltypes
expr = t.double_col.histogram(10).value_counts()
expr.execute()
def test_casted_expr_impala_bug(alltypes):
# Per GH #396. Prior to Impala 2.3.0, there was a bug in the query
# planner that caused this expression to fail
expr = alltypes.string_col.cast('double').value_counts()
expr.execute()
def test_decimal_timestamp_builtins(con):
table = con.table('tpch_lineitem')
dc = table.l_quantity
ts = table.l_receiptdate.cast('timestamp')
exprs = [
dc % 10,
dc + 5,
dc + dc,
dc / 2,
dc * 2,
dc ** 2,
dc.cast('double'),
api.where(table.l_discount > 0, dc * table.l_discount, api.NA),
dc.fillna(0),
ts < (ibis.now() + ibis.interval(months=3)),
ts < (ibis.timestamp('2005-01-01') + ibis.interval(months=3)),
# hashing
dc.hash(),
ts.hash(),
# truncate
ts.truncate('y'),
ts.truncate('q'),
ts.truncate('month'),
ts.truncate('d'),
ts.truncate('w'),
ts.truncate('h'),
ts.truncate('minute'),
]
timestamp_fields = [
'years',
'months',
'days',
'hours',
'minutes',
'seconds',
'weeks',
]
for field in timestamp_fields:
if hasattr(ts, field):
exprs.append(getattr(ts, field)())
offset = ibis.interval(**{field: 2})
exprs.append(ts + offset)
exprs.append(ts - offset)
proj_exprs = [expr.name('e%d' % i) for i, expr in enumerate(exprs)]
projection = table[proj_exprs].limit(10)
projection.execute()
def test_timestamp_scalar_in_filter(alltypes):
# #310
table = alltypes
expr = table.filter(
[
table.timestamp_col
< (ibis.timestamp('2010-01-01') + ibis.interval(months=3)),
table.timestamp_col < (ibis.now() + ibis.interval(days=10)),
]
).count()
expr.execute()
def test_aggregations(alltypes):
table = alltypes.limit(100)
d = table.double_col
s = table.string_col
cond = table.string_col.isin(['1', '7'])
exprs = [
table.bool_col.count(),
d.sum(),
d.mean(),
d.min(),
d.max(),
s.approx_nunique(),
d.approx_median(),
s.group_concat(),
d.std(),
d.std(how='pop'),
d.var(),
d.var(how='pop'),
table.bool_col.any(),
table.bool_col.notany(),
-table.bool_col.any(),
table.bool_col.all(),
table.bool_col.notall(),
-table.bool_col.all(),
table.bool_col.count(where=cond),
d.sum(where=cond),
d.mean(where=cond),
d.min(where=cond),
d.max(where=cond),
d.std(where=cond),
d.var(where=cond),
]
metrics = [expr.name('e%d' % i) for i, expr in enumerate(exprs)]
agged_table = table.aggregate(metrics)
agged_table.execute()
def test_analytic_functions(alltypes):
t = alltypes.limit(1000)
g = t.group_by('string_col').order_by('double_col')
f = t.float_col
exprs = [
f.lag(),
f.lead(),
f.rank(),
f.dense_rank(),
f.percent_rank(),
f.ntile(buckets=7),
f.first(),
f.last(),
f.first().over(ibis.window(preceding=10)),
f.first().over(ibis.window(following=10)),
ibis.row_number(),
f.cumsum(),
f.cummean(),
f.cummin(),
f.cummax(),
# boolean cumulative reductions
(f == 0).cumany(),
(f == 0).cumall(),
f.sum(),
f.mean(),
f.min(),
f.max(),
]
proj_exprs = [expr.name('e%d' % i) for i, expr in enumerate(exprs)]
proj_table = g.mutate(proj_exprs)
proj_table.execute()
def test_anti_join_self_reference_works(con, alltypes):
t = alltypes.limit(100)
t2 = t.view()
case = t[-(t.string_col == t2.string_col).any()]
con.explain(case)
def test_tpch_self_join_failure(con):
region = con.table('tpch_region')
nation = con.table('tpch_nation')
customer = con.table('tpch_customer')
orders = con.table('tpch_orders')
fields_of_interest = [
region.r_name.name('region'),
nation.n_name.name('nation'),
orders.o_totalprice.name('amount'),
orders.o_orderdate.cast('timestamp').name('odate'),
]
joined_all = (
region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey)[
fields_of_interest
]
)
year = joined_all.odate.year().name('year')
total = joined_all.amount.sum().cast('double').name('total')
annual_amounts = joined_all.group_by(['region', year]).aggregate(total)
current = annual_amounts
prior = annual_amounts.view()
yoy_change = (current.total - prior.total).name('yoy_change')
yoy = current.join(
prior,
(
(current.region == prior.region)
& (current.year == (prior.year - 1))
),
)[current.region, current.year, yoy_change]
# no analysis failure
con.explain(yoy)
def test_tpch_correlated_subquery_failure(con):
# #183 and other issues
region = con.table('tpch_region')
nation = con.table('tpch_nation')
customer = con.table('tpch_customer')
orders = con.table('tpch_orders')
fields_of_interest = [
customer,
region.r_name.name('region'),
orders.o_totalprice.name('amount'),
orders.o_orderdate.cast('timestamp').name('odate'),
]
tpch = (
region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey)[
fields_of_interest
]
)
t2 = tpch.view()
conditional_avg = t2[(t2.region == tpch.region)].amount.mean()
amount_filter = tpch.amount > conditional_avg
expr = tpch[amount_filter].limit(0)
con.explain(expr)
def test_non_equijoin(con):
t = con.table('functional_alltypes').limit(100)
t2 = t.view()
expr = t.join(t2, t.tinyint_col < t2.timestamp_col.minute()).count()
# it works
expr.execute()
def test_char_varchar_types(con):
sql = """\
SELECT CAST(string_col AS varchar(20)) AS varchar_col,
CAST(string_col AS CHAR(5)) AS char_col
FROM functional_alltypes"""
t = con.sql(sql)
assert isinstance(t.varchar_col, ir.StringColumn)
assert isinstance(t.char_col, ir.StringColumn)
def test_unions_with_ctes(con, alltypes):
t = alltypes
expr1 = t.group_by(['tinyint_col', 'string_col']).aggregate(
t.double_col.sum().name('metric')
)
expr2 = expr1.view()
join1 = expr1.join(expr2, expr1.string_col == expr2.string_col)[[expr1]]
join2 = join1.view()
expr = join1.union(join2)
con.explain(expr)
def test_head(con):
t = con.table('functional_alltypes')
result = t.head().execute()
expected = t.limit(5).execute()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
('left', 'right', 'expected'),
[
(ibis.NA.cast('int64'), ibis.NA.cast('int64'), True),
(L(1), L(1), True),
(ibis.NA.cast('int64'), L(1), False),
(L(1), ibis.NA.cast('int64'), False),
(L(0), L(1), False),
(L(1), L(0), False),
],
)
def test_identical_to(con, left, right, expected):
expr = left.identical_to(right)
result = con.execute(expr)
assert result == expected
def test_not(alltypes):
t = alltypes.limit(10)
expr = t.projection([(~t.double_col.isnull()).name('double_col')])
result = expr.execute().double_col
expected = ~t.execute().double_col.isnull()
tm.assert_series_equal(result, expected)
def test_where_with_timestamp():
t = ibis.table(
[('uuid', 'string'), ('ts', 'timestamp'), ('search_level', 'int64')],
name='t',
)
expr = t.group_by(t.uuid).aggregate(
min_date=t.ts.min(where=t.search_level == 1)
)
result = ibis.impala.compile(expr)
expected = """\
SELECT `uuid`,
min(CASE WHEN `search_level` = 1 THEN `ts` ELSE NULL END) AS `min_date`
FROM t
GROUP BY 1"""
assert result == expected
def test_filter_with_analytic():
x = ibis.table(ibis.schema([('col', 'int32')]), 'x')
with_filter_col = x[x.columns + [ibis.null().name('filter')]]
filtered = with_filter_col[with_filter_col['filter'].isnull()]
subquery = filtered[filtered.columns]
with_analytic = subquery[['col', subquery.count().name('analytic')]]
expr = with_analytic[with_analytic.columns]
result = ibis.impala.compile(expr)
expected = """\
SELECT `col`, `analytic`
FROM (
SELECT `col`, count(*) OVER () AS `analytic`
FROM (
SELECT `col`, `filter`
FROM (
SELECT *
FROM (
SELECT `col`, NULL AS `filter`
FROM x
) t3
WHERE `filter` IS NULL
) t2
) t1
) t0"""
assert result == expected
def test_named_from_filter_groupby():
t = ibis.table([('key', 'string'), ('value', 'double')], name='t0')
gb = t.filter(t.value == 42).groupby(t.key)
sum_expr = lambda t: (t.value + 1 + 2 + 3).sum() # noqa: E731
expr = gb.aggregate(abc=sum_expr)
expected = """\
SELECT `key`, sum(((`value` + 1) + 2) + 3) AS `abc`
FROM t0
WHERE `value` = 42
GROUP BY 1"""
assert ibis.impala.compile(expr) == expected
expr = gb.aggregate(foo=sum_expr)
expected = """\
SELECT `key`, sum(((`value` + 1) + 2) + 3) AS `foo`
FROM t0
WHERE `value` = 42
GROUP BY 1"""
assert ibis.impala.compile(expr) == expected
def test_nunique_where():
t = ibis.table([('key', 'string'), ('value', 'double')], name='t0')
expr = t.key.nunique(where=t.value >= 1.0)
expected = """\
SELECT count(DISTINCT CASE WHEN `value` >= 1.0 THEN `key` ELSE NULL END) AS `nunique`
FROM t0""" # noqa: E501
result = ibis.impala.compile(expr)
assert result == expected
| apache-2.0 | 8,254,318,533,257,342,000 | 28.129176 | 94 | 0.512291 | false |
palpen/wage_calculator | calculate_hours.py | 1 | 2217 |
import re
import argparse
def wage_calculator(log_txt_file, month, year, wage):
date_pattern = "\*\*Date\*\*" # pattern identifying the date in the file
hours_full = []
with log_txt_file as f:
for line in log_txt_file:
if re.search(r"{0}".format(date_pattern), line): # go to the relevant line
if re.search(month, line) and re.search(str(year), line): # within line, go to desired month/year
# skips two lines to the line containing the number of hours worked
f.next()
hours_line = f.next()
hours_list_str = re.findall(r'[-+]?\d*\.*\d+', hours_line) # put hours in a list
hours_list = [float(x) for x in hours_list_str]
hours_full += hours_list
sum_hours_date = sum(hours_list)
print line.rstrip()
print "Hours logged: " + str(hours_list)
print "Total hours for the day " + str(sum_hours_date) + "\n"
hours_total = sum(hours_full)
print "-----------"
print "Total hours worked in {0} {1}: **{2}** \n".format(month, year,
hours_total)
print "Hourly wage: **${0}/hr** \n".format(wage)
print "Total wage for {0} {1}: **${2}**".format(month, year,
hours_total * wage)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("file",
help="Text file containing hours logged (e.g. ra_hours.txt)",
type=argparse.FileType('r')
)
parser.add_argument("month",
help="The month for which we want the income",
type=str)
parser.add_argument("year",
help="Enter year",
type=int)
parser.add_argument("wage",
help="Enter hourly wage",
type=float)
args = parser.parse_args()
wage_calculator(args.file, args.month, args.year, args.wage)
if __name__ == '__main__':
main()
| mit | 1,043,829,032,182,839,900 | 32.089552 | 114 | 0.483085 | false |
kovidgoyal/build-calibre | scripts/pkgs/pyqt.py | 1 | 1772 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import os
from .constants import PYTHON, MAKEOPTS, build_dir, PREFIX, isosx, iswindows
from .utils import run, replace_in_file
def main(args):
b = build_dir()
if isosx:
b = os.path.join(b, 'python/Python.framework/Versions/2.7')
elif iswindows:
b = os.path.join(b, 'private', 'python')
lp = os.path.join(PREFIX, 'qt', 'lib')
sip, qmake = 'sip', 'qmake'
if iswindows:
sip += '.exe'
qmake += '.exe'
sp = 'Lib' if iswindows else 'lib/python2.7'
cmd = [PYTHON, 'configure.py', '--confirm-license', '--sip=%s/bin/%s' % (PREFIX, sip), '--qmake=%s/qt/bin/%s' % (PREFIX, qmake),
'--bindir=%s/bin' % b, '--destdir=%s/%s/site-packages' % (b, sp), '--verbose', '--sipdir=%s/share/sip/PyQt5' % b,
'--no-stubs', '-c', '-j5', '--no-designer-plugin', '--no-qml-plugin', '--no-docstrings']
if iswindows:
cmd.append('--spec=win32-msvc2015')
cmd.append('--sip-incdir=%s/private/python/include' % PREFIX)
run(*cmd, library_path=lp)
if iswindows:
# In VisualStudio 15 Update 3 the compiler crashes on the below
# statement
replace_in_file('QtGui/sipQtGuipart2.cpp', 'return new ::QPicture[sipNrElem]', 'return NULL')
run('nmake')
run('nmake install')
else:
run('make ' + MAKEOPTS, library_path=lp)
run('make install')
def post_install_check():
run(PYTHON, '-c', 'import sip, sipconfig; from PyQt5 import QtCore, QtGui, QtWebKit', library_path=os.path.join(PREFIX, 'qt', 'lib'))
| gpl-3.0 | -6,079,356,304,098,510,000 | 39.272727 | 137 | 0.603273 | false |
Bl4ckb0ne/ring-api | ring_api/server/flask/api/certificate.py | 1 | 2365 | #
# Copyright (C) 2016 Savoir-faire Linux Inc
#
# Authors: Seva Ivanov <[email protected]>
# Simon Zeni <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from flask import jsonify, request
from flask_restful import Resource
import numpy as np
class Certificates(Resource):
def __init__(self, dring):
self.dring = dring
def get(self, cert_id=None):
if (cert_id):
return jsonify({
'status': 200,
'details': self.dring.config.get_certificate_details(cert_id)
})
return jsonify({
'status': 200,
'pinned': self.dring.config.get_pinned_certificates()
})
def post(self, cert_id):
data = request.get_json(force=True)
if (not data):
return jsonify({
'status': 404,
'message': 'data not found'
})
if ('action' not in data):
return jsonify({
'status': 400,
'message': 'action not found in request data'
})
result = None
if (data.get('action') == 'pin'):
# temporary
local = True if data.get('local') in ["True", "true"] else False
result = self.dring.config.pin_certificate(cert_id, local)
elif (data.get('action') == 'unpin'):
result = self.dring.config.unpin_certificate(cert_id)
else:
return jsonify({
'status': 400,
'message': 'wrong action type'
})
return jsonify({
'status': 200,
'action': result
})
| gpl-3.0 | -8,790,291,132,487,526,000 | 29.320513 | 80 | 0.587738 | false |
AntonioMtn/NZBMegaSearch | werkzeug/contrib/testtools.py | 1 | 2435 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.testtools
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements extended wrappers for simplified testing.
`TestResponse`
A response wrapper which adds various cached attributes for
simplified assertions on various content types.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from ..utils import cached_property, import_string
from ..wrappers import Response
from warnings import warn
warn(DeprecationWarning('werkzeug.contrib.testtools is deprecated and '
'will be removed with Werkzeug 1.0'))
class ContentAccessors(object):
"""
A mixin class for response objects that provides a couple of useful
accessors for unittesting.
"""
def xml(self):
"""Get an etree if possible."""
if 'xml' not in self.mimetype:
raise AttributeError(
'Not a XML response (Content-Type: %s)'
% self.mimetype)
for module in ['xml.etree.ElementTree', 'ElementTree',
'elementtree.ElementTree']:
etree = import_string(module, silent=True)
if etree is not None:
return etree.XML(self.body)
raise RuntimeError('You must have ElementTree installed '
'to use TestResponse.xml')
xml = cached_property(xml)
def lxml(self):
"""Get an lxml etree if possible."""
if ('html' not in self.mimetype and 'xml' not in self.mimetype):
raise AttributeError('Not an HTML/XML response')
from lxml import etree
try:
from lxml.html import fromstring
except ImportError:
fromstring = etree.HTML
if self.mimetype=='text/html':
return fromstring(self.data)
return etree.XML(self.data)
lxml = cached_property(lxml)
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.mimetype:
raise AttributeError('Not a JSON response')
try:
from simplejson import loads
except ImportError:
from json import loads
return loads(self.data)
json = cached_property(json)
class TestResponse(Response, ContentAccessors):
"""Pass this to `werkzeug.test.Client` for easier unittesting."""
| gpl-2.0 | -8,992,439,196,236,989,000 | 33.295775 | 76 | 0.614374 | false |
phako/Totem | src/plugins/iplayer/iplayer.py | 1 | 10459 | # -*- coding: utf-8 -*-
import gettext
from gi.repository import GObject, Peas, Totem # pylint: disable-msg=E0611
import iplayer2
import threading
gettext.textdomain ("totem")
D_ = gettext.dgettext
_ = gettext.gettext
class IplayerPlugin (GObject.Object, Peas.Activatable):
__gtype_name__ = 'IplayerPlugin'
object = GObject.property (type = GObject.Object)
def __init__ (self):
GObject.Object.__init__ (self)
self.debug = False
self.totem = None
self.programme_download_lock = threading.Lock ()
self.tv_feed = None
self.tv_tree_store = None
def do_activate (self):
self.totem = self.object
# Build the interface
builder = Totem.plugin_load_interface ("iplayer", "iplayer.ui", True,
self.totem.get_main_window (),
self)
container = builder.get_object ('iplayer_vbox')
self.tv_tree_store = builder.get_object ('iplayer_programme_store')
programme_list = builder.get_object ('iplayer_programme_list')
programme_list.connect ('row-expanded', self.__row_expanded_cb)
programme_list.connect ('row-activated', self.__row_activated_cb)
container.show_all ()
self.tv_feed = iplayer2.Feed ('tv')
# Add the interface to Totem's sidebar
self.totem.add_sidebar_page ("iplayer", _(u"BBC iPlayer"), container)
# Get the channel category listings
self._populate_channel_list (self.tv_feed, self.tv_tree_store)
def do_deactivate (self):
self.totem.remove_sidebar_page ("iplayer")
def _populate_channel_list (self, feed, tree_store):
if self.debug:
print "Populating channel list…"
# Add all the channels as top-level rows in the tree store
channels = feed.channels ()
for channel_id, title in channels.items ():
tree_store.append (None, (title, channel_id, None))
# Add the channels' categories in a thread, since they each require a
# network request
thread = PopulateChannelsThread (self.__populate_channel_list_cb,
feed, tree_store)
thread.start ()
def __populate_channel_list_cb (self, tree_store, parent_path, values):
# Callback from PopulateChannelsThread to add stuff to the tree store
if values == None:
self.totem.action_error (_(u'Error listing channel categories'),
_(u'There was an unknown error getting '\
'the list of television channels '\
'available on BBC iPlayer.'))
return False
parent_iter = tree_store.get_iter (parent_path)
category_iter = tree_store.append (parent_iter, values)
# Append a dummy child row so that the expander's visible; we can
# then queue off the expander to load the programme listing for this
# category
tree_store.append (category_iter, [_(u'Loading…'), None, None])
return False
def __row_expanded_cb (self, tree_view, row_iter, path):
tree_model = tree_view.get_model ()
if self.debug:
print "__row_expanded_cb called."
# We only care about the category level (level 1), and only when
# it has the "Loading..." placeholder child row
if (get_iter_level (tree_model, row_iter) != 1 or
tree_model.iter_n_children (row_iter) != 1):
return
# Populate it with programmes asynchronously
self._populate_programme_list (self.tv_feed, tree_model, row_iter)
def __row_activated_cb (self, tree_view, path, view_column):
tree_store = tree_view.get_model ()
tree_iter = tree_store.get_iter (path)
if tree_iter == None:
return
mrl = tree_store.get_value (tree_iter, 2)
# Only allow programme rows to be activated, not channel or category
# rows
if mrl == None:
return
# Add the programme to the playlist and play it
title = tree_store.get_value (tree_iter, 0)
self.totem.add_to_playlist_and_play (mrl, title, True)
def _populate_programme_list (self, feed, tree_store, category_iter):
if self.debug:
print "Populating programme list…"
category_path = tree_store.get_path (category_iter)
thread = PopulateProgrammesThread (self.programme_download_lock,
self.__populate_programme_list_cb,
feed, (tree_store, category_path))
thread.start ()
def __populate_programme_list_cb (self, tree_store, category_path, values,
remove_placeholder):
# Callback from PopulateProgrammesThread to add stuff to the tree store
if values == None:
# Translators: the "programme feed" is the list of TV shows
# available to watch online
self.totem.action_error (_(u'Error getting programme feed'),
_(u'There was an error getting the list '\
'of programmes for this channel and '\
'category combination.'))
return False
category_iter = tree_store.get_iter (category_path)
if category_iter != None:
tree_store.append (category_iter, values)
# Remove the placeholder row
children = tree_store.iter_children (category_iter)
if remove_placeholder and children != None:
tree_store.remove (children)
return False
def get_iter_level (tree_model, tree_iter):
i = 0
while True:
tree_iter = tree_model.iter_parent (tree_iter)
if tree_iter == None:
break
i += 1
return i
def category_name_to_id (category_name):
return category_name.lower ().replace (' ', '_').replace ('&', 'and')
class PopulateChannelsThread (threading.Thread):
# Class to populate the channel list from the Internet
def __init__ (self, callback, feed, tree_model):
self.callback = callback
self.feed = feed
self.tree_model = tree_model
threading.Thread.__init__ (self)
def run (self):
shown_error = False
tree_iter = self.tree_model.get_iter_first ()
while (tree_iter != None):
channel_id = self.tree_model.get_value (tree_iter, 1)
parent_path = self.tree_model.get_path (tree_iter)
try:
# Add this channel's categories as sub-rows
# We have to pass a path because the model could theoretically
# be modified while the idle function is waiting in the queue,
# invalidating an iter
for name, _count in self.feed.get (channel_id).categories ():
category_id = category_name_to_id (name)
GObject.idle_add (self.callback,
self.tree_model, parent_path,
[name, category_id, None])
except StandardError:
# Only show the error once, rather than for each channel
# (it gets a bit grating)
if not shown_error:
GObject.idle_add (self.callback,
self.tree_model, parent_path, None)
shown_error = True
tree_iter = self.tree_model.iter_next (tree_iter)
class PopulateProgrammesThread (threading.Thread):
# Class to populate the programme list for a channel/category combination
# from the Internet
def __init__ (self, download_lock, callback, feed,
(tree_model, category_path)):
self.download_lock = download_lock
self.callback = callback
self.feed = feed
self.tree_model = tree_model
self.category_path = category_path
threading.Thread.__init__ (self)
def run (self):
self.download_lock.acquire ()
category_iter = self.tree_model.get_iter (self.category_path)
if category_iter == None:
GObject.idle_add (self.callback,
self.tree_model, self.category_path, None, False)
self.download_lock.release ()
return
category_id = self.tree_model.get_value (category_iter, 1)
parent_iter = self.tree_model.iter_parent (category_iter)
channel_id = self.tree_model.get_value (parent_iter, 1)
# Retrieve the programmes and return them
feed = self.feed.get (channel_id).get (category_id)
if feed == None:
GObject.idle_add (self.callback,
self.tree_model, self.category_path, None, False)
self.download_lock.release ()
return
# Get the programmes
try:
programmes = feed.list ()
except StandardError:
GObject.idle_add (self.callback,
self.tree_model, self.category_path, None, False)
self.download_lock.release ()
return
# Add the programmes to the tree store
remove_placeholder = True
for programme in programmes:
programme_item = programme.programme
# Get the media, which gives the stream URI.
# We go for mobile quality, since the higher-quality streams are
# RTMP-only which isn't currently supported by GStreamer or xine
# TODO: Use higher-quality streams once
# http://bugzilla.gnome.org/show_bug.cgi?id=566604 is fixed
media = programme_item.get_media_for ('mobile')
if media == None:
# Not worth displaying an error in the interface for this
print "Programme has no HTTP streams"
continue
GObject.idle_add (self.callback,
self.tree_model, self.category_path,
[programme.get_title (), programme.get_summary (),
media.url],
remove_placeholder)
remove_placeholder = False
self.download_lock.release ()
| gpl-2.0 | 8,621,740,412,142,376,000 | 38.296992 | 80 | 0.568736 | false |
NERC-CEH/ecomaps | ecomaps/services/analysis.py | 1 | 11358 | import datetime
from random import randint
from sqlalchemy.orm import subqueryload, subqueryload_all, aliased, contains_eager, joinedload
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql import Alias, or_, asc, desc
from ecomaps.model import Dataset, Analysis, AnalysisCoverageDatasetColumn
from ecomaps.services.general import DatabaseService
from ecomaps.model import AnalysisCoverageDataset
import urllib2
__author__ = 'Phil Jenkins (Tessella)'
class AnalysisService(DatabaseService):
"""Provides operations on Analysis objects"""
def get_analyses_for_user(self, user_id):
"""Gets a list of analyses for the given user
Params:
user_id: Get analyses for the user with this ID
"""
with self.readonly_scope() as session:
return session.query(Analysis) \
.options(subqueryload(Analysis.point_dataset)) \
.options(subqueryload(Analysis.coverage_datasets)) \
.options(subqueryload(Analysis.run_by_user)) \
.filter(or_(Analysis.viewable_by == user_id, Analysis.run_by == user_id),
Analysis.deleted != True) \
.all()
def get_public_analyses(self):
"""Gets all analyses that are classed as 'public' i.e. they
aren't restricted to a particular user account"""
with self.readonly_scope() as session:
return session.query(Analysis) \
.options(subqueryload(Analysis.point_dataset)) \
.options(subqueryload(Analysis.coverage_datasets)) \
.options(subqueryload(Analysis.run_by_user)) \
.filter(Analysis.viewable_by == None,
Analysis.deleted != True) \
.all()
def publish_analysis(self, analysis_id):
"""Publishes the analysis with the supplied ID
Params:
analysis_id: ID of the analysis to publish
"""
with self.transaction_scope() as session:
try:
analysis = session.query(Analysis).filter(Analysis.id == analysis_id,
Analysis.deleted != True).one()
except NoResultFound:
return None
# Now update the "viewable by" field - setting to None
# infers that the analysis is published
analysis.viewable_by = None
analysis.result_dataset.viewable_by_user_id = None
def get_analysis_by_id(self, analysis_id, user_id):
"""Returns a single analysis with the given ID
Params:
analysis_id - ID of the analysis to look for
"""
with self.readonly_scope() as session:
try:
return session.query(Analysis)\
.options(joinedload(Analysis.run_by_user)) \
.filter(Analysis.id == analysis_id,
or_(or_(Analysis.viewable_by == user_id,
Analysis.viewable_by == None),
Analysis.run_by == user_id),
Analysis.deleted != True).one()
except NoResultFound:
return None
def create(self, name, point_dataset_id, coverage_dataset_ids,
user_id, unit_of_time, random_group, model_variable,
data_type, model_id, analysis_description,input_hash,
time_indicies):
"""Creates a new analysis object
Params:
name - Friendly name for the analysis
point_dataset_id - Id of dataset containing point data
coverage_dataset_ids - List of coverage dataset ids, which should be
in the format <id>_<column_name>
user_id - Who is creating this analysis?
unit_of_time - unit of time selected
random_group - additional input into the model
model_variable - the variable that is being modelled
data_type - data type of the variable
model_id - id of the model to be used to generate the results
analysis_description - a short string describing the analysis
input_hash - used to quickly identify a duplicate analysis in terms of inputs
time_indicies - if any columns in coverage datasets need time slicing, the index (i.e. the time slice)
to take will be stored against each relevant column in here
Returns:
ID of newly-inserted analysis
"""
with self.transaction_scope() as session:
analysis = Analysis()
analysis.name = name
analysis.run_by = user_id
analysis.viewable_by = user_id
analysis.point_data_dataset_id = int(point_dataset_id)
analysis.deleted = False
analysis.model_id = model_id
analysis.description = analysis_description
# Hook up the coverage datasets
for id in coverage_dataset_ids:
coverage_ds = AnalysisCoverageDataset()
# The coverage dataset 'ID' is actually a
# composite in the form <id>_<column-name>
ds_id, column_name = id.split('_')
id_as_int = int(ds_id)
coverage_ds.dataset_id = id_as_int
col = AnalysisCoverageDatasetColumn()
# Check to see if we need to record a time index against
# this column, will be used for time-slicing later
if id in time_indicies:
col.time_index = time_indicies[id]
col.column = column_name
coverage_ds.columns.append(col)
analysis.coverage_datasets.append(coverage_ds)
# Parameters that are used in the analysis
analysis.unit_of_time = unit_of_time
analysis.random_group = random_group
analysis.model_variable = model_variable
analysis.data_type = data_type
# Hash of input values for future comparison
analysis.input_hash = input_hash
session.add(analysis)
# Flush and refresh to give us the generated ID for this new analysis
session.flush([analysis])
session.refresh(analysis)
return analysis.id
def get_netcdf_file(self, url):
''' Gets the file with results data in
'''
file_name = url + ".dods"
return urllib2.urlopen(file_name)
def get_analysis_for_result_dataset(self, dataset_id):
""" Gets the analysis ID with the given result dataset ID
@param dataset_id: ID of the (result) dataset contained within the analysis
"""
with self.readonly_scope() as session:
return session.query(Analysis.id).filter(Analysis.result_dataset_id == dataset_id,
Analysis.deleted != True).one()[0]
def sort_and_filter_private_analyses_by_column(self,user_id,column,order, filter_variable):
"""Sorts the private analyses by the column name, and applies a filter on the model variable value selected
Params:
user_id: unique id of the user
column: The name of the column to sort on
order: either "asc" or "desc"
filter_variable: the model_variable value used to filter the analyses
"""
with self.readonly_scope() as session:
query = session.query(Analysis) \
.options(subqueryload(Analysis.point_dataset)) \
.options(subqueryload(Analysis.coverage_datasets)) \
.options(subqueryload(Analysis.run_by_user)) \
.filter(or_(Analysis.viewable_by == user_id, Analysis.run_by == user_id),
Analysis.deleted != True)
if filter_variable:
query = query.filter(Analysis.model_variable == filter_variable)
if order == "asc":
return query.order_by(asc(column)).all()
elif order == "desc":
return query.order_by(desc(column)).all()
else:
return query.all()
def sort_and_filter_public_analyses_by_column(self,column, order, filter_variable):
"""Sorts the public analyses by the column name and applies a filter on the model variable value selected
Params:
column: The name of the column to sort on
order: either "asc" or "desc"
filter_variable: the model_variable value used to filter the analyses
"""
with self.readonly_scope() as session:
query = session.query(Analysis) \
.options(subqueryload(Analysis.point_dataset)) \
.options(subqueryload(Analysis.coverage_datasets)) \
.options(subqueryload(Analysis.run_by_user)) \
.filter(Analysis.viewable_by == None,
Analysis.deleted != True)
if filter_variable:
query = query.filter(Analysis.model_variable == filter_variable)
if order == "asc":
return query.order_by(asc(column)).all()
elif order == "desc":
return query.order_by(desc(column)).all()
else:
return query.all()
def get_public_analyses_with_identical_input(self, input_hash):
""" Gets a list of published analyses matching the input hash
@param: Hash of input values used to determine whether an analysis has been run before
"""
with self.readonly_scope() as session:
try:
# Only pull out public analyses for now
return session.query(Analysis) \
.filter(Analysis.input_hash == input_hash,
Analysis.viewable_by == None,
Analysis.deleted != True) \
.all()[0]
except:
return None
def delete_private_analysis(self, analysis_id):
"""Deletion is only a 'soft' delete i.e. a flag will be set so that the analysis is not viewable by the user.
This is so that if the user wants to recover the analysis, the can be reversed.
Params
analysis_id = id of the analysis to delete
"""
with self.transaction_scope() as session:
analysis = session.query(Analysis).filter(Analysis.id == analysis_id,
Analysis.deleted != True).one()
analysis.deleted = True
def get_all_model_variables(self):
"""Return all the distinct model variable values across all the analyses
"""
with self.readonly_scope() as session:
try:
return session.query(Analysis.model_variable).distinct()
except NoResultFound:
return None | gpl-2.0 | 3,181,348,859,315,533,300 | 39.137809 | 118 | 0.558549 | false |
kamimura/py-convert-temperatures | converter.py | 1 | 1936 | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
# added a new temperature scale, two if statements need to add
def convert_to_celsius(t, source):
if source == "Kelvin":
return t - 273.15
elif source == "Celsius":
return t
elif source == "Fahrenheit":
return (t - 32) * 5 / 9
elif source == "Rankine":
return (t - 491.67) * 5 / 9
elif source == "Delisle":
return 100 - t * 2 / 3
elif source == "Newton":
return t * 100 / 33
elif source == "Reaumur":
return t * 5 / 4
elif source == "Romer":
return (t - 7.5) * 40 / 21
else:
raise Exception("convert_to_celsius: {0}".format(source))
def convert_from_celsius(t, target):
if target == "Kelvin":
return t + 273.15
elif target == "Celsius":
return t
elif target == "Fahrenheit":
return t * 9 / 5 + 32
elif target == "Rankine":
return (t + 273.15) * 9 / 5
elif target == "Delisle":
return (100 - t) * 3 / 2
elif target == "Newton":
return t * 33 / 100
elif target == "Reaumur":
return t * 4 / 5
elif target == "Romer":
return t * 21 / 40 + 7.5
else:
raise Exception("convert_from_celsius: {0}".format(target))
def convert_temperatures(t, source, target):
return convert_from_celsius(convert_to_celsius(t, source), target)
if __name__ == '__main__':
units = ["Kelvin", "Celsius", "Fahrenheit", "Rankine", "Delisle","Newton",
"Reaumur", "Romer"]
# http://en.wikipedia.org/wiki/Comparison_of_temperature_scales#Comparison_of_temperature_scales
print("Absolute zero")
for target in units:
print("{0}: {1:.2f}".format(
target, convert_temperatures(0, "Kelvin", target)))
print("Ice melts")
for target in units:
print("{0}: {1:.2f}".format(
target, convert_temperatures(32, "Fahrenheit", target)))
| mit | 1,126,159,050,346,238,600 | 30.737705 | 100 | 0.558884 | false |
dubourg/openturns | python/test/t_Gumbel_std.py | 1 | 5074 | #! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
# Instanciate one distribution object
distribution = Gumbel(2.0, -0.5)
print("Distribution ", repr(distribution))
print("Distribution ", distribution)
# Is this distribution elliptical ?
print("Elliptical = ", distribution.isElliptical())
# Is this distribution continuous ?
print("Continuous = ", distribution.isContinuous())
# Test for realization of distribution
oneRealization = distribution.getRealization()
print("oneRealization=", repr(oneRealization))
# Test for sampling
size = 10000
oneSample = distribution.getSample(size)
print("oneSample first=", repr(oneSample[0]), " last=", repr(oneSample[1]))
print("mean=", repr(oneSample.computeMean()))
print("covariance=", repr(oneSample.computeCovariance()))
# Define a point
point = NumericalPoint(distribution.getDimension(), 1.0)
print("Point= ", repr(point))
# Show PDF and CDF at point
eps = 1e-5
# derivative of PDF with regards its arguments
DDF = distribution.computeDDF(point)
print("ddf =", repr(DDF))
# by the finite difference technique
print("ddf (FD)=", repr(NumericalPoint(1, (distribution.computePDF(
point + NumericalPoint(1, eps)) - distribution.computePDF(point + NumericalPoint(1, -eps))) / (2.0 * eps))))
# PDF value
LPDF = distribution.computeLogPDF(point)
print("log pdf=%.6f" % LPDF)
PDF = distribution.computePDF(point)
print("pdf =%.6f" % PDF)
# by the finite difference technique from CDF
print("pdf (FD)=%.6f" % ((distribution.computeCDF(point + NumericalPoint(1, eps)) -
distribution.computeCDF(point + NumericalPoint(1, -eps))) / (2.0 * eps)))
# derivative of the PDF with regards the parameters of the distribution
CDF = distribution.computeCDF(point)
print("cdf=%.6f" % CDF)
CCDF = distribution.computeComplementaryCDF(point)
print("ccdf=%.6f" % CCDF)
PDFgr = distribution.computePDFGradient(point)
print("pdf gradient =", repr(PDFgr))
# by the finite difference technique
PDFgrFD = NumericalPoint(2)
PDFgrFD[0] = (Gumbel(distribution.getAlpha() + eps, distribution.getBeta()).computePDF(point)
- Gumbel(distribution.getAlpha() - eps, distribution.getBeta()).computePDF(point)) / (2.0 * eps)
PDFgrFD[1] = (Gumbel(distribution.getAlpha(), distribution.getBeta() + eps).computePDF(point)
- Gumbel(distribution.getAlpha(), distribution.getBeta() - eps).computePDF(point)) / (2.0 * eps)
print("pdf gradient (FD)=", repr(PDFgrFD))
# derivative of the PDF with regards the parameters of the distribution
CDFgr = distribution.computeCDFGradient(point)
print("cdf gradient =", repr(CDFgr))
# by the finite difference technique
CDFgrFD = NumericalPoint(2)
CDFgrFD[0] = (Gumbel(distribution.getAlpha() + eps, distribution.getBeta()).computeCDF(point)
- Gumbel(distribution.getAlpha() - eps, distribution.getBeta()).computeCDF(point)) / (2.0 * eps)
CDFgrFD[1] = (Gumbel(distribution.getAlpha(), distribution.getBeta() + eps).computeCDF(point)
- Gumbel(distribution.getAlpha(), distribution.getBeta() - eps).computeCDF(point)) / (2.0 * eps)
print("cdf gradient (FD)=", repr(CDFgrFD))
# quantile
quantile = distribution.computeQuantile(0.95)
print("quantile=", repr(quantile))
print("cdf(quantile)=%.6f" % distribution.computeCDF(quantile))
mean = distribution.getMean()
print("mean=", repr(mean))
standardDeviation = distribution.getStandardDeviation()
print("standard deviation=", repr(standardDeviation))
skewness = distribution.getSkewness()
print("skewness=", repr(skewness))
kurtosis = distribution.getKurtosis()
print("kurtosis=", repr(kurtosis))
covariance = distribution.getCovariance()
print("covariance=", repr(covariance))
parameters = distribution.getParametersCollection()
print("parameters=", repr(parameters))
for i in range(6):
print("standard moment n=", i, " value=",
distribution.getStandardMoment(i))
print("Standard representative=", distribution.getStandardRepresentative())
# Specific to this distribution
mu = distribution.getMu()
print("mu=%.6f" % mu)
sigma = distribution.getSigma()
print("sigma=%.6f" % sigma)
newDistribution = Gumbel(mu, sigma, Gumbel.MUSIGMA)
print("alpha from (mu, sigma)=%.6f" % newDistribution.getAlpha())
print("beta from (mu, sigma)=%.6f" % newDistribution.getBeta())
a = distribution.getA()
print("a=%.6f" % a)
b = distribution.getB()
print("b=%.6f" % b)
newDistribution = Gumbel(a, b, Gumbel.AB)
print("alpha from (a, b)=%.6f" % newDistribution.getAlpha())
print("beta from (a, b)=%.6f" % newDistribution.getBeta())
except:
import sys
print("t_Gumbel.py", sys.exc_info()[0], sys.exc_info()[1])
| gpl-3.0 | -9,043,513,947,748,732,000 | 41.283333 | 116 | 0.665353 | false |
3dfxsoftware/cbss-addons | account_bank_statement_vauxoo/model/account_journal.py | 1 | 5195 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# "Nhomar Hernandez <[email protected]>"
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.netsvc as netsvc
import logging
class account_journal(osv.Model):
_inherit = 'account.journal'
_columns = {
'default_interim_account_id': fields.many2one('account.account',
'Interim Account',
help="""In banks you probably want send account move
lines to a interim account before affect the default
debit and credit account who
will have the booked
balance for this kind of operations, in this field
you configure this account."""),
'default_income_account_id': fields.many2one('account.account',
'Extra Income Account',
help="""In banks you probably want as counter part for extra
banking income money use an specific account in this field
you can canfigure this account"""),
'default_expense_account_id': fields.many2one('account.account',
'Expense Account',
help="""In banks you probable wants send account move lines to an
extra account to be able to record account move lines due to bank
comisions and bank debit notes, in this field you configure this
account."""),
'concept_ids': fields.one2many('account.journal.bs.config', 'bsl_id',
'Concept Lines', required=False),
'moveper_line': fields.boolean('One Move per Line', required=False,
help="""Do you want one move per line or one move per bank
statement,True: One Per Line False:
One Per bank statement"""),
}
class account_journal_bs_config(osv.Model):
_name = 'account.journal.bs.config'
_order = 'sequence asc'
logger = netsvc.Logger()
_columns = {
'sequence': fields.integer('Label'),
'bsl_id': fields.many2one('account.journal', 'Journal',
required=False),
'partner_id': fields.many2one('res.partner', 'Partner',
required=False),
'account_id': fields.many2one('account.account', 'Account',
required=False),
'expresion': fields.char('Text To be Compared', size=128,
required=True, readonly=False),
'name': fields.char('Cancept Label', size=128, required=True,
readonly=False),
}
_defaults = {
'sequence': 10,
}
def _check_expresion(self, cr, user, ids, context=None):
"""
A user defined constraints listed in {_constraints}
@param cr: cursor to database
@param user: id of current user
@param ids: list of record ids on which constraints executes
@return: return True if all constraints satisfied, False otherwise
"""
try:
exp_ = self.browse(cr, user, ids, context=context)[0].expresion
exp = eval(exp_)
self.logger.notifyChannel('Chain. '+str(exp), netsvc.LOG_DEBUG,
'Succefully Validated')
if type(exp) is list:
return True
else:
self.logger.notifyChannel(
'Chain. '+str(exp_), netsvc.LOG_ERROR,
'Fail With You must use a list')
return False
except Exception, var:
self.logger.notifyChannel('Chain. '+str(exp_), netsvc.LOG_ERROR,
'Fail With %s' % var)
return False
_constraints = [
(_check_expresion, '''Error: La expresion no es lista
debe quedar algo así:
["cadenaA","cadenaB","CadenaC"]
o es inválida''', ['expresion']),
]
account_journal_bs_config
| gpl-2.0 | 8,968,663,386,985,846,000 | 44.156522 | 119 | 0.536491 | false |
Kaftanov/Cchat | chat-server/server.py | 1 | 7250 | #!/usr/bin/env python3
"""
#############################
Server application || TCP, socket
version python: python3
#############################
"""
import select
import signal
import socket
import sys
import uuid
import datetime
from communication import send, receive
from messages import Messages
from dbworker import DbHandler
from cmdworker import Commands
class Server:
"""
object that are contained in the
listen_count: max listening ports
serv_host: location server in net
serv_port: server's port
command_list: special server command for user
contain: ['/online', /info, ]
command_string: string which contain command
user_list: list of output client address
user_dict: embedded dict which look like: {'sid_value': {
'login': .., 'first_name': .., 'second_name': .., 'password': ..,
'hostname':..}, ..}
socket_sid_dict: contain session id value (sid_value) and linking with socket
functions Server contain
__init__
info: initialize socket
variable: listen_count, serv_host, serv_port
sighandler
info: shutting down server and closing all sockets
variable: without variable
serve
info: main server's loop
variable: without variable
exec_commands
info: execute commands from 'command_list'
variable: command_string
validation_user
info: checking if user's password is valid
variable: dict with key ['password']
broadcast_message
info: sending message on all socket, which contain in self.user_list
variable: string text
get_sid
info: get session id from socket dict
variable: socket
"""
def __init__(self, listen_count=None, serv_host=None, serv_port=None):
if listen_count is None:
listen_count = 5
if serv_host is None:
serv_host = 'localhost'
if serv_port is None:
serv_port = 3490
# set server messages worker
self.MsgWorker = Messages(host=serv_host, port=serv_port, backlog=listen_count)
# set data base worker
self.DbWorker = DbHandler()
# set command worker
self.CmdWorker = Commands()
self.uid_link = {}
self.user_list = []
self.server_password = 'qwerty'
# initialize server socket
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((serv_host, serv_port))
self.server.listen(listen_count)
print(self.MsgWorker.welcome_string())
# set signal handler
signal.signal(signal.SIGINT, self.sighandler)
def sighandler(self, signum, frame):
""" Shutdown the server if typing Ctrl + C """
for sock in self.user_list:
sock.close()
self.server.close()
sys.exit('Shutting down server...')
def generate_uid(self, login):
uid = self.DbWorker.get_uid_by_login(login)
return uid if uid else str(uuid.uuid4())
def authenticate_user(self, data):
try:
login = data['login']
password = data['password']
uid = self.generate_uid(login)
if data['type'] == 'log':
if password == self.DbWorker.get_passwd_by_login(login):
self.DbWorker.update_state(uid=uid, state=1, date='NULL')
else:
return False,
elif data['type'] == 'reg':
user_form = {'uid': uid, 'login': login, 'password': password,
'state': 1, 'left': 'NULL'}
self.DbWorker.add_user(user_form)
else:
return False,
message = self.MsgWorker.print_new_user(login)
return True, uid, message
except KeyError as error:
print(error)
return False,
def broadcast_message(self, message, sockt=None):
""" Broadcast messages for all users"""
if sockt is None:
for sock in self.user_list:
send(sock, message)
else:
for sock in self.user_list:
if sock is not sockt:
send(sock, message)
def run_server_loop(self):
input_socket_list = [self.server]
is_running = True
while is_running:
try:
in_fds, out_fds, err_fds = select.select(input_socket_list,
self.user_list, [])
except select.error as error:
print(error)
break
except socket.error as error:
print(error)
break
for sock in in_fds:
if sock is self.server:
user, user_address = self.server.accept()
data = receive(user)
request = self.authenticate_user(data)
if request[0]:
message = request[2]
self.broadcast_message(message)
self.uid_link[user] = request[1]
input_socket_list.append(user)
self.user_list.append(user)
send(user, 'Success')
else:
send(user, 'Error')
continue
else:
try:
data = receive(sock)
if data:
print(data)
if data in self.CmdWorker.command_list:
send(sock, self.CmdWorker.execute_commands(data))
else:
user = self.DbWorker.get_user(self.uid_link[sock])['login']
head = '%s~%s' % (user, self.MsgWorker.time())
message = data
self.broadcast_message({'head': head, 'message': message}, sock)
else:
time = self.CmdWorker.time()
self.DbWorker.update_state(self.uid_link[sock], 0, time)
sock.close()
input_socket_list.remove(sock)
self.user_list.remove(sock)
message = self.MsgWorker.print_user_left(self.DbWorker.get_user(
self.uid_link[sock])['login'])
self.broadcast_message(message)
except socket.error as error:
print(error)
input_socket_list.remove(sock)
self.user_list.remove(sock)
self.server.close()
if __name__ == "__main__":
Server().run_server_loop()
| gpl-3.0 | -5,659,754,009,381,891,000 | 36.371134 | 96 | 0.495448 | false |
kcolletripp/popular | search/views.py | 1 | 1297 | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse
from django.views import generic
from .models import Target
from .forms import TargetForm
import utils
# Create your views here.
#class IndexView(generic.ListView):
# template_name = 'search/index.html'
# context_object_name = 'latest_searches'
#
# def get_queryset(self):
# #Return the last 5 published questions. (not including future releases)
# return Target.objects.all().order_by('-target_name')[:5]
#class ResultsView(generic.DetailView):
# model = Target
# template_name = 'search/result.html'
def index(request):
latest_searches = Target.objects.all().order_by('-target_name')[:5]
form = TargetForm()
context = {'latest_searches': latest_searches, 'form':form} #dictionary
return render(request, 'search/index.html', context)
def result(request):
context = {}
if request.method == 'POST':
form = TargetForm(request.POST)
target = form.save()
context['target'] = target #'key':value
context['views'] = utils.get_wiki(target)
else:
#GET, or first time request
form = TargetForm()
context['form'] = form
return render(request, 'search/result.html', context)
| gpl-3.0 | 1,606,601,490,411,531,800 | 31.425 | 80 | 0.680031 | false |
yastrov/battleship | battleship/serverfactory.py | 1 | 1999 | # coding=utf-8
"""
This factory creates BattleshipProtocol() instances for every
connected users. If number of connected users reach the value max_clients then this factory creates instance
of ErrorBattleshipProtocol() for new users.
Also this class, on own initialization, creates instance of the class Battleship which is a game main loop.
See method buildProtocol() in current class and method initClient() in class Battleship().
"""
from twisted.internet.protocol import ServerFactory
from protocol import BattleshipProtocol
from service import Battleship
from errorprotocol import ErrorBattleshipProtocol
from twisted.python import log
class BattleshipServerFactory(ServerFactory):
"""
Battleship server factory. Process incoming client requests
"""
protocol = BattleshipProtocol
def __init__(self, max_clients, service):
"""
Battleship server factory constructor
"""
log.msg('Battleship server initialized')
# parameters
self.battleship_service = Battleship(max_clients)
self.service = service
def buildProtocol(self, addr):
"""
This method is calling when new client connected
Create new protocol BattleshipProtocol if clients < max_clients
or
send error to client, if clients >= max_clients
"""
if len(self.battleship_service.clients) < self.battleship_service.max_clients:
p = self.protocol()
p.factory = self
p = self.battleship_service.initClient(p, addr)
log.msg('class BattleshipServerFactory, method buildProtocol: protocol was built')
return p
else:
"""
If count of players more then self.max_clients then close connections for all new clients
"""
p = ErrorBattleshipProtocol()
p.factory = self
return p | apache-2.0 | 6,821,497,901,575,590,000 | 34.381818 | 112 | 0.646823 | false |
marshallmcdonnell/journals | journals/databases/icat/sns/icat.py | 1 | 8206 | #!/usr/bin/env python
#import flask
from __future__ import print_function
import requests
import xmljson
import json
import lxml
import decimal
import pandas
from journals.utilities import process_numbers
#uri = "http://icat.sns.gov:2080/icat-rest-ws/experiment/SNS"
#uri = "http://icat.sns.gov:2080/icat-rest-ws/experiment/SNS/NOM"
#uri = "http://icat.sns.gov:2080/icat-rest-ws/experiment/SNS/NOM/IPTS-"+ipts+"/meta"
class ICAT(object):
def __init__(self, instrument):
self._base_uri = "http://icat.sns.gov:2080/icat-rest-ws"
self._ipts_uri = self._base_uri + "/experiment/SNS/"+instrument
self._run_uri = self._base_uri + "/dataset/SNS/"+instrument
self._data = None
self._los_data = dict()
self._meta_ipts_data = dict()
self._runs = list()
self._ipts_list = list()
self.key_list = ['ipts', 'duration', 'startTime', 'totalCounts', 'protonCharge', 'title']
# Unit Functions
#---------------
def _uri2xml(self,uri):
xml_data = requests.get(uri)
xml_data = lxml.etree.XML(xml_data.content)
return xml_data
def _xml2json(self,xml_data):
return xmljson.badgerfish.data(xml_data)
def _uri2xml2json(self,uri):
xml_data = self._uri2xml(uri)
json_data = self._xml2json(xml_data)
return json_data
def _get_list_of_all_ipts(self):
uri = self._ipts_uri
json_data = self._uri2xml2json(uri)
for x in json_data['proposals']['proposal']:
if isinstance(x['$'], str):
if x['$'].startswith('IPTS'):
self._ipts_list.append(int(x['$'].split('-')[1].split('.')[0]))
def _get_xml_data_tree(self,data):
xml_tree = lxml.etree.tostring(self.data, pretty_print=True)
return xml_tree
def _get_runs_from_ipts(self,data):
return [ element.get('id') for element in data.iter() if element.tag == 'run' ]
def _get_los_for_run(self,run,json_data):
json_metadata = json_data['metadata']
try:
ipts_pulled = json_metadata['proposal']['$'].split('-')[1]
except:
ipts_pulled = None
los_data = dict()
uid = run
meta_dict = self._get_meta_for_run(json_metadata)
meta_dict['ipts'] = ipts_pulled
los_data[uid] = meta_dict
self._update_master_los(los_data)
'''
NOTE: Below, the check for list is specific to IPTSs w/ proposal lists. These are:
Index IPTS
----- ----
88 8814
119 9818
'''
def _get_meta_for_ipts(self,runs,proposal_json):
if type(proposal_json) == list:
ipts_pulled = int(proposal_json[0]['@id'].split('-')[1])
runs_data = process_numbers(proposal_json[0]['runRange']['$'])
for i, proposal in enumerate(proposal_json[1:]):
runs_data += process_numbers(proposal_json[0]['runRange']['$'])
startTime = [(':'.join( proposal_json[0]['createTime']['$'].split(':')[0:3])).split('.')[0]]
for i, proposal in enumerate(proposal_json[1:]):
startTime += [(':'.join( proposal_json[i+1]['createTime']['$'].split(':')[0:3])).split('.')[0]]
else:
ipts_pulled = int(proposal_json['@id'].split('-')[1])
runs_data = process_numbers(proposal_json['runRange']['$'])
startTime = [(':'.join( proposal_json['createTime']['$'].split(':')[0:3])).split('.')[0]]
meta_ipts_data = dict()
meta_ipts_data[ipts_pulled] = {'runs' : runs_data,
'createtime' : startTime}
self._update_master_meta_ipts_data(meta_ipts_data)
def _update_master_meta_ipts_data(self,meta_ipts_data):
self._meta_ipts_data.update(meta_ipts_data)
def _get_los_for_ipts(self,runs,proposal_json):
if type(proposal_json) == list:
ipts_pulled = int(proposal_json[0]['@id'].split('-')[1])
runs_data = proposal_json[0]['runs']['run']
for i, proposal in enumerate(proposal_json[1:]):
runs_data += proposal_json[i+1]['runs']['run']
else:
ipts_pulled = int(proposal_json['@id'].split('-')[1])
runs_data = proposal_json['runs']['run']
los_data = dict()
if len(runs) == 1:
uid = proposal_json['runs']['run']['@id']
x = proposal_json['runs']['run']
meta_dict = self._get_meta_for_run(x)
meta_dict['ipts'] = ipts_pulled
los_data[uid] = meta_dict
else:
for x in runs_data:
uid = x['@id']
meta_dict = self._get_meta_for_run(x)
meta_dict['ipts'] = ipts_pulled
los_data[uid] = meta_dict
self._update_master_los(los_data)
def _update_master_los(self,los_data):
self._los_data.update(los_data)
def _get_meta_for_run(self,metadata):
meta = dict.fromkeys(self.key_list)
print(meta)
for key in self.key_list:
if key in metadata:
if key == 'duration':
meta[key] = str(int(float(metadata[key]['$'])/60.))+'min'
elif key == 'startTime':
meta[key] = (':'.join( metadata[key]['$'].split(':')[0:3])).split('.')[0]
elif key == 'totalCounts':
meta[key] = '{:.2E}'.format(decimal.Decimal(metadata[key]['$']))
elif key == 'protonCharge':
meta[key] = float("{0:.2f}".format(metadata[key]['$'] / 1e12) )
else:
meta[key] = metadata[key]['$']
return meta
# Main Functions
#------------------
def initializeMetaIptsData(self):
ipts_list = self.getListOfIPTS()
self.getIPTSs( ipts_list[-2:], data='meta')
def getMetaIptsData(self):
return self._meta_ipts_data
def applyIptsFilter(self,ipts_list):
self.reset_los()
self.getIPTSs(ipts_list)
def getDataFrame(self):
data = self.get_los()
df = pandas.DataFrame.from_dict(data,orient='index')
df = df.reset_index()
df = df.rename(columns={'index': '#Scan', 'duration': 'time', 'protonCharge': 'PC/pC'})
col_order = ['#Scan', 'ipts', 'time', 'startTime', 'totalCounts', 'PC/pC', 'title']
df = df[col_order]
return df
def getListOfIPTS(self):
if not self._ipts_list:
self._get_list_of_all_ipts()
return sorted(self._ipts_list)
def getIPTSs(self,proposals,**kwargs):
for i, ipts in enumerate(proposals):
self.getIPTS(ipts,**kwargs)
def getIPTS(self,ipts,data='all'):
uri = self._ipts_uri + "/IPTS-"+str(ipts)+"/"+data
xml_data = self._uri2xml(uri)
runs = self._get_runs_from_ipts(xml_data)
json_data = self._xml2json(xml_data)
if data == 'all':
try:
self._get_los_for_ipts(runs,json_data['proposals']['proposal'])
except KeyError:
print(ipts, json_data['proposals'])
if data == 'meta':
self._get_meta_for_ipts(runs,json_data['proposals']['proposal'])
def getRun(self,run):
uri = self._run_uri+'/'+ str(run)+"/metaOnly"
json_data = self._uri2xml2json(uri)
self._get_los_for_run(run, json_data)
def reset_los(self):
self._los_data = dict()
def get_los(self):
return self._los_data
def print_runs(self):
if self._runs is None:
self._get_runs()
for run in self._runs:
print(run)
def print_los(self):
if self._los_data is None:
print(self._los_data, "( No data yet in los dictionary. )")
los_data = self._los_data
print("#Scan IPTS time starttime totalCounts PC/C title")
for run in sorted(los_data.keys()):
print(run, end=' ')
for key in self.key_list:
print(los_data[run][key], end=' ')
print()
| mit | 5,587,422,821,829,324,000 | 33.334728 | 111 | 0.530466 | false |
RyanWangGit/HandySearch | scraper/scraper/exporters.py | 1 | 1245 | """
Item Exporters are used to export/serialize items into sqlite3 database.
"""
from scrapy.exporters import BaseItemExporter
import sqlite3
class SqliteWebpageExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
super().__init__()
self._configure(kwargs)
self.conn = sqlite3.connect(file.name)
self.conn.execute(r"""CREATE TABLE IF NOT EXISTS `webpages`(
`id` INTEGER PRIMARY KEY,
`title` VARCHAR DEFAULT NULL,
`content` VARCHAR DEFAULT NULL,
`url` VARCHAR DEFAULT NULL UNIQUE
);
""")
self.conn.commit()
self.conn.text_factory = str
def export_item(self, item):
self.start_exporting()
self.conn.execute(r"""INSERT INTO webpages(title, content, url) VALUES (?, ?, ?)""",
(item['title'], item['content'], item['url']))
self.conn.commit()
self.finish_exporting()
def __del__(self):
self.conn.close()
| gpl-3.0 | 1,078,980,320,858,903,800 | 37.90625 | 92 | 0.473092 | false |
karesansui/karesansui | bin/stop_network.py | 1 | 3039 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import sys
import logging
from optparse import OptionParser
from ksscommand import KssCommand, KssCommandException, KssCommandOptException
import __cmd__
try:
import karesansui
from karesansui import __version__
from karesansui.lib.virt.virt import KaresansuiVirtConnection
from karesansui.lib.utils import load_locale
except ImportError, e:
print >>sys.stderr, "[Error] some packages not found. - %s" % e
sys.exit(1)
_ = load_locale()
usage = '%prog [options]'
def getopts():
optp = OptionParser(usage=usage, version=__version__)
optp.add_option('-n', '--name', dest='name', help=_('Network name'))
return optp.parse_args()
def chkopts(opts):
if not opts.name:
raise KssCommandOptException('ERROR: %s option is required.' % '-n or --name')
class StopNetwork(KssCommand):
def process(self):
(opts, args) = getopts()
chkopts(opts)
self.up_progress(10)
conn = KaresansuiVirtConnection(readonly=False)
try:
active_networks = conn.list_active_network()
inactive_networks = conn.list_inactive_network()
if not (opts.name in active_networks or opts.name in inactive_networks):
raise KssCommandException('Network not found. - net=%s' % (opts.name))
self.up_progress(10)
conn.stop_network(opts.name)
self.up_progress(40)
if opts.name in conn.list_active_network():
raise KssCommandException('Failed to stop network. - net=%s' % (opts.name))
self.logger.info('Stopped network. - net=%s' % (opts.name))
print >>sys.stdout, _('Stopped network. - net=%s') % (opts.name)
return True
finally:
conn.close()
if __name__ == "__main__":
target = StopNetwork()
sys.exit(target.run())
| mit | 6,552,379,057,524,915,000 | 33.146067 | 91 | 0.678513 | false |
DynamoDS/Coulomb | SessionTools/stacktrace_extractor.py | 1 | 5506 | from __future__ import print_function
import json
import xml.etree.ElementTree as xmlElementTree
import base64
import os
import sys
from os.path import isfile, join
import gzip
import datetime
import random
import traceback
import sys
import features_JSON
import features_XML
VERSION="StackTraces-2018-08-16" # This is the name of the feature set, update it with any major changes
# Progress counters
processed = 0
skipped = 0
err_count = 0
i = 0
if len(sys.argv) != 2:
print ("Usage: python stacktrace_extractor.py PathToSessions")
path = sys.argv[1]
paths = []
# Walk over the dataset computing a list of all the files to process
print ('Enumerating files')
for root, subdirs, files in os.walk(path):
for ff in files:
i = i + 1
if i % 1000 == 0:
print (i)
path = join(root,ff)
if (not path.endswith('sorted.gz')):
continue
paths.append(path)
# Randomise them in order to avoid collisions + leapfrogging problems with distributed workers
# If processing locally removing this may slightly improve performance
random.shuffle(paths)
print ('Paths to process: ' + str(len(paths)))
for path in paths:
# Report progress
print (str(datetime.datetime.now()) + ": " + path + ": processed: " + str(processed) + ", errs: " + str(err_count) + ", results_exist: " + str(skipped) + ", total: " + str(processed + skipped) )
out_path = path + ".features" + "." + VERSION
# skip files that have been processed already
if os.path.exists(out_path) and os.path.getmtime(out_path) > os.path.getmtime(path):
skipped = skipped + 1
continue
try:
f = gzip.open (path)
fo = open(out_path , 'w')
processed = processed + 1
isJSON = False
# Initialise structures for this day
stackTraces = []
tags = set()
userId = None
version = None
sessionStartMicroTime = 0
sessionEndMicroTime = 0
sessionDate = ''
# Helper function to export data so far
# TODO: De-nest this function
def writeDataToFile():
if (len(stackTraces) == 0): # If no stack traces, skip
if os.path.exists(out_path):
os.remove(out_path)
return
print (json.dumps(
{
"StackTraces" : stackTraces,
"Tags" : list(tags),
"UserID": userId,
"WorkspaceVersion": version,
"SessionDuration": sessionEndMicroTime - sessionStartMicroTime,
"Date": sessionDate
}), file=fo)
# Process each line of the session file
for ln in f:
if ln.startswith("Downloading phase"): # Marker from the download script, ignore
continue
data = json.loads(ln)
# Compute the first day
if sessionStartMicroTime == 0:
sessionStartMicroTime = int(data["MicroTime"])
sessionDate = data["DateTime"].split(" ")[0]
# If a day has rolled over, export the data
if sessionDate != data["DateTime"].split(" ")[0]:
print (path + " has session over multiple days")
# Split the session: write session so far to file, then reset data collection.
writeDataToFile()
stackTraces = []
tags = set()
sessionStartMicroTime = int(data["MicroTime"])
sessionDate = data["DateTime"].split(" ")[0]
sessionEndMicroTime = int(data["MicroTime"])
# Keep track of what we've seen in the data file
tags.add(data["Tag"])
# Data is in base64 to protect against special characters, unpack it
b64decodedData = base64.b64decode(data["Data"])
# Split what to do based on what kind of message this is
# Text entered into the search box
if data["Tag"] == "StackTrace":
stackTraces.append(b64decodedData)
# A workspace being reported
if data["Tag"] == "Workspace":
if b64decodedData == '': # An empty workspace, ignore
continue
# Select which feature extraction library to use depending on what version on the file format
feature_lib = None
if b64decodedData.startswith("<"):
feature_lib = features_XML
else:
isJSON = True
continue # Skip JSON coded files for now
# feature_lib = features_JSON
# Extract version number (first time only)
if (version == None):
version = feature_lib.getVersion(b64decodedData)
# Extract user ID (first time only)
if userId == None:
userId = data["UserID"]
except Exception as e:
# If there were a problem, get the stack trace for what happeend
exc_type, exc_value, exc_traceback = sys.exc_info()
# Log it
print (e)
print (path)
traceback.print_tb(exc_traceback, file=sys.stdout)
# Remove partial results
fo.flush()
os.remove(out_path)
err_count = err_count + 1
continue
# Flush any further data to the file
writeDataToFile()
| mit | 1,103,576,669,027,327,500 | 31.579882 | 198 | 0.562841 | false |
tomekwojcik/ForgeryPy | tests/test_date.py | 1 | 1148 | # -*- coding: utf-8 -*-
import datetime
from unittest import TestCase
from forgery_py.forgery import date
class DateForgeryTestCase(TestCase):
def test_day_of_week(self):
result = date.day_of_week()
assert result in date.DAYS
result = date.day_of_week(abbr=True)
assert result in date.DAYS_ABBR
def test_month(self):
result = date.month()
assert result in date.MONTHS
result = date.month(abbr=True)
assert result in date.MONTHS_ABBR
result = date.month(numerical=True)
assert result in range(1, 13)
def test_year(self):
today = datetime.date.today()
result = date.year()
assert result > today.year
result = date.year(past=True)
assert result < today.year
def test_day(self):
result = date.day()
assert result in range(1, 32)
result = date.day(2)
assert result in range(1, 3)
def test_date(self):
today = datetime.date.today()
result = date.date()
assert result > today
result = date.date(past=True)
assert result < today | mit | -7,785,290,931,851,571,000 | 21.98 | 44 | 0.599303 | false |
nasa/39A | spaceapps/locations/forms.py | 1 | 3715 | from django.forms import Form, ModelForm, CharField, ChoiceField
from django.forms.models import (
modelformset_factory,
inlineformset_factory,
BaseInlineFormSet,
)
from django.forms.formsets import formset_factory, BaseFormSet
import selectable.forms as selectable
from awards.models import LocalAward, Nomination
from registration.models import Registration
from projects.models import Project
from .lookups import UserLookup
from .models import (
Location,
Sponsor,
Lead,
Resource,
)
class LocalAwardForm(Form):
def __init__(self, projects, *args, **kwargs):
super(LocalAwardForm, self).__init__(*args, **kwargs)
self.fields['project'] = ChoiceField(choices=projects)
choices=(('1', 'First',))
title = CharField(max_length=100)
project = ChoiceField(choices=choices)
class LocationForm(ModelForm):
class Meta:
model = Location
exclude = ('name', 'slug', 'private', 'start', 'end' )
class LeadForm(ModelForm):
class Meta(object):
model = Lead
widgets = {
'lead': selectable.AutoCompleteSelectWidget(
lookup_class=UserLookup)
}
class CheckInForm(ModelForm):
test = 'happy'
class Meta(object):
model = Registration
# fields = ('check_in', )
CheckInFormSet = modelformset_factory(
Registration,
form=CheckInForm,
extra=0,
)
SponsorFormSet = inlineformset_factory(
Location,
Sponsor,
extra=1,
)
LeadFormSet = inlineformset_factory(
Location,
Lead,
form=LeadForm,
extra=1,
)
ResourceFormSet = inlineformset_factory(
Location,
Resource,
extra=1,
)
class LocalAwardBaseFormSet(BaseFormSet):
def __init__(self, projects, *args, **kwargs):
self.projects = projects
super(LocalAwardBaseFormSet, self).__init__(*args, **kwargs)
def _construct_forms(self):
self.forms = []
for i in xrange(self.total_form_count()):
self.forms.append(self._construct_form(i, projects=self.projects))
LocalAwardFormSet = formset_factory(
LocalAwardForm,
formset=LocalAwardBaseFormSet,
extra=1,
)
class AwardBaseFormSet(BaseInlineFormSet):
def __init__(self, *args, **kwargs):
self.projects = kwargs.pop('projects')
super(AwardBaseFormSet, self).__init__(*args, **kwargs)
def _construct_forms(self):
self.forms = []
for i in xrange(self.total_form_count()):
self.forms.append(self._construct_form(i, projects=self.projects))
class AwardForm(ModelForm):
class Meta:
model = LocalAward
def __init__(self, *args, **kwargs):
projects = kwargs.pop('projects')
super(AwardForm, self).__init__(*args, **kwargs)
if projects is not None:
self.fields['project'].queryset = Project.objects.filter(
id__in=projects).distinct()
AwardFormSet = inlineformset_factory(
Location,
LocalAward,
form=AwardForm,
formset=AwardBaseFormSet,
extra=1,
)
class NominationForm(ModelForm):
class Meta:
model = LocalAward
def __init__(self, *args, **kwargs):
projects = kwargs.pop('projects')
super(NominationForm, self).__init__(*args, **kwargs)
if projects is not None:
self.fields['project'].queryset = Project.objects.filter(
id__in=projects).distinct()
NominationFormSet = inlineformset_factory(
Location,
Nomination,
form=NominationForm,
formset=AwardBaseFormSet,
extra=2,
max_num=2,
)
| apache-2.0 | -321,959,692,586,215,400 | 25.161972 | 78 | 0.621534 | false |
linsalrob/EdwardsLab | bin/NSF_bibtex_by_year.py | 1 | 1668 | """
Parse a bibtex file and only print those entries within a certain number of years
"""
import os
import sys
import argparse
import datetime
from pybtex.database import parse_file, BibliographyData
if __name__ == "__main__":
now = datetime.datetime.now()
earlyyear = now.year - 4
parser = argparse.ArgumentParser(description='Parse a bibtex file and create a list of conflicts')
parser.add_argument('-f', help='bibtex file', required=True)
parser.add_argument('-y', help="Earliest year to report conflict (default={})".format(earlyyear), default=earlyyear, type=int)
args = parser.parse_args()
entries = set()
dupentries=False
with open(args.f, 'r') as bin:
for l in bin:
if l.startswith('@'):
l = l.replace('@misc', '')
l = l.replace('@article', '')
l = l.replace('@inproceedings', '')
if l in entries:
sys.stderr.write("Duplicate entry " + l.replace('{', '').replace(',', ''))
dupentries=True
entries.add(l)
if dupentries:
sys.stderr.write("FATAL: The bibtex file has duplicate entries in it. Please remove them before trying to continue\n")
sys.stderr.write("(It is an issue with Google Scholar, but pybtex breaks with duplicate entries. Sorry)\n")
sys.exit(-1)
bib = parse_file(args.f, 'bibtex')
for e in bib.entries:
if 'year' in bib.entries[e].fields:
if int(bib.entries[e].fields['year']) >= args.y:
bib_data = BibliographyData({e : bib.entries[e]})
print(bib_data.to_string('bibtex'))
| mit | 4,710,846,875,079,895,000 | 35.26087 | 130 | 0.598321 | false |
izapolsk/integration_tests | cfme/tests/containers/test_reports.py | 1 | 15141 | import re
from traceback import format_exc
import pytest
from wrapanapi.utils import eval_strings
from cfme import test_requirements
from cfme.containers.provider import ContainersProvider
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.wait import TimedOutError
pytestmark = [
pytest.mark.usefixtures('setup_provider'),
pytest.mark.meta(
server_roles='+ems_metrics_coordinator +ems_metrics_collector +ems_metrics_processor'),
pytest.mark.tier(1),
pytest.mark.long_running,
pytest.mark.provider([ContainersProvider], scope='function'),
test_requirements.containers
]
@pytest.fixture(scope='module')
def node_hardwares_db_data(appliance):
"""Grabbing hardwares table data for nodes"""
db = appliance.db.client
hardwares_table = db['hardwares']
container_nodes = db['container_nodes']
out = {}
for node in db.session.query(container_nodes).all():
out[node.name] = hardwares_table.__table__.select().where(
hardwares_table.id == node.id
).execute().fetchone()
return out
def get_vpor_data_by_name(vporizer_, name):
return [vals for vals in vporizer_ if vals.resource_name == name]
def get_report(appliance, menu_name, candu=False):
"""Queue a report by menu name , wait for finish and return it"""
try:
saved_report = appliance.collections.reports.instantiate(
type='Configuration Management',
subtype='Containers',
menu_name=menu_name,
is_candu=candu
).queue(wait_for_finish=True)
except TimedOutError:
pytest.skip('Could not find report "{}" in containers.\nTraceback:\n{}'
.format(menu_name, format_exc()))
return saved_report
def test_container_reports_base_on_options(soft_assert, appliance):
"""This test verifies that all containers options are available in the report 'based on'
Dropdown in the report creation
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
view = navigate_to(appliance.collections.reports, 'Add')
for base_on in (
'Chargeback for Images',
'Container Images',
'Container Services',
'Container Templates',
'Containers',
re.compile(r'Performance - Container\s*Nodes'),
re.compile(r'Performance - Container\s*Projects'),
'Performance - Containers'
):
compare = (base_on.match if hasattr(base_on, 'match') else base_on.__eq__)
option = [opt for opt in view.base_report_on.all_options
if compare(str(opt.text))]
soft_assert(option, 'Could not find option "{}" for base report on.'.format(base_on))
def test_report_pods_per_ready_status(appliance, soft_assert, provider):
"""Testing 'Pods per Ready Status' report, see polarion case for more info
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
pods_per_ready_status = provider.pods_per_ready_status()
report = get_report(appliance, 'Pods per Ready Status')
for row in report.data.rows:
name = row['# Pods per Ready Status']
readiness_ui = bool(eval_strings([row['Ready Condition Status']]).pop())
if soft_assert(name in pods_per_ready_status, # this check based on BZ#1435958
'Could not find pod "{}" in openshift.'
.format(name)):
expected_readiness = bool(all(pod for pod in pods_per_ready_status.get(name, False)))
soft_assert(expected_readiness == readiness_ui,
'For pod "{}" expected readiness is "{}" Found "{}"'
.format(name, expected_readiness, readiness_ui))
def test_report_nodes_by_capacity(appliance, soft_assert, node_hardwares_db_data):
"""Testing 'Nodes By Capacity' report, see polarion case for more info
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
report = get_report(appliance, 'Nodes By Capacity')
for row in report.data.rows:
hw = node_hardwares_db_data[row['Name']]
soft_assert(hw.cpu_total_cores == int(row['CPU Cores']),
'Number of CPU cores is wrong: expected {}'
' got {}'.format(hw.cpu_total_cores, row['CPU Cores']))
# The following block is to convert whatever we have to MB
memory_ui = float(re.sub(r'[a-zA-Z,]', '', row['Memory']))
if 'gb' in row['Memory'].lower():
memory_mb_ui = memory_ui * 1024
# Shift hw.memory_mb to GB, round to the number of decimals of memory_mb_db
# and shift back to MB:
memory_mb_db = round(hw.memory_mb / 1024.0,
len(str(memory_mb_ui).split('.')[1])) * 1024
else: # Assume it's MB
memory_mb_ui = memory_ui
memory_mb_db = hw.memory_mb
soft_assert(memory_mb_ui == memory_mb_db,
'Memory (MB) is wrong for node "{}": expected {} got {}'
.format(row['Name'], memory_mb_ui, memory_mb_db))
def test_report_nodes_by_cpu_usage(appliance, soft_assert, vporizer):
"""Testing 'Nodes By CPU Usage' report, see polarion case for more info
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
report = get_report(appliance, 'Nodes By CPU Usage')
for row in report.data.rows:
vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0]
usage_db = round(vpor_values.max_cpu_usage_rate_average, 2)
usage_report = round(float(row['CPU Usage (%)']), 2)
soft_assert(usage_db == usage_report,
'CPU usage is wrong for node "{}": expected {} got {}'
.format(row['Name'], usage_db, usage_report))
def test_report_nodes_by_memory_usage(appliance, soft_assert, vporizer):
"""Testing 'Nodes By Memory Usage' report, see polarion case for more info
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
report = get_report(appliance, 'Nodes By Memory Usage')
for row in report.data.rows:
vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0]
usage_db = round(vpor_values.max_mem_usage_absolute_average, 2)
usage_report = round(float(row['Memory Usage (%)']), 2)
soft_assert(usage_db == usage_report,
'CPU usage is wrong for node "{}": expected {} got {}.'
.format(row['Name'], usage_db, usage_report))
def test_report_number_of_nodes_per_cpu_cores(appliance, soft_assert, node_hardwares_db_data):
"""Testing 'Number of Nodes per CPU Cores' report, see polarion case for more info
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
report = get_report(appliance, 'Nodes by Number of CPU Cores')
for row in report.data.rows:
hw = node_hardwares_db_data[row['Name']]
soft_assert(hw.cpu_total_cores == int(row['Hardware Number of CPU Cores']),
'Hardware Number of CPU Cores is wrong for node "{}": expected {} got {}.'
.format(row['Name'], hw.cpu_total_cores, row['Hardware Number of CPU Cores']))
def test_report_projects_by_number_of_pods(appliance, soft_assert):
"""Testing 'Projects by Number of Pods' report, see polarion case for more info
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
container_projects = appliance.db.client['container_projects']
container_pods = appliance.db.client['container_groups']
report = get_report(appliance, 'Projects by Number of Pods')
for row in report.data.rows:
pods_count = len(container_pods.__table__.select().where(
container_pods.container_project_id ==
container_projects.__table__.select().where(
container_projects.name == row['Project Name']).execute().fetchone().id
).execute().fetchall())
soft_assert(pods_count == int(row['Number of Pods']),
'Number of pods is wrong for project "{}". expected {} got {}.'
.format(row['Project Name'], pods_count, row['Number of Pods']))
def test_report_projects_by_cpu_usage(appliance, soft_assert, vporizer):
"""Testing 'Projects By CPU Usage' report, see polarion case for more info
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
report = get_report(appliance, 'Projects By CPU Usage')
for row in report.data.rows:
vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0]
usage_db = round(vpor_values.max_cpu_usage_rate_average, 2)
usage_report = round(float(row['CPU Usage (%)']), 2)
soft_assert(usage_db == usage_report,
'CPU usage is wrong for project "{}": expected {} got {}'
.format(row['Name'], usage_db, usage_report))
def test_report_projects_by_memory_usage(appliance, soft_assert, vporizer):
"""Testing 'Projects By Memory Usage' report, see polarion case for more info
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
report = get_report(appliance, 'Projects By Memory Usage')
for row in report.data.rows:
vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0]
usage_db = round(vpor_values.max_mem_usage_absolute_average, 2)
usage_report = round(float(row['Memory Usage (%)']), 2)
soft_assert(usage_db == usage_report,
'CPU usage is wrong for project "{}": expected {} got {}.'
.format(row['Name'], usage_db, usage_report))
def test_report_pod_counts_for_container_images_by_project(appliance, provider, soft_assert):
"""Testing 'Pod counts For Container Images by Project' report,\
see polarion case for more info
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
report = get_report(appliance, 'Pod counts For Container Images by Project', candu=True)
pods_api = provider.mgmt.list_pods()
pods_per_project = {}
for project in provider.mgmt.list_project_names():
pods_per_project[project] = [
pd for pd in pods_api if pd.metadata.namespace == project]
rows = list(report.data.rows)
for row in rows:
project_name, pod_name = row['Project Name'], row['Pod Name']
pod = [pd for pd in pods_per_project[project_name] if pd.metadata.name == pod_name]
soft_assert(pod, 'Could not find pod "{}" of project "{}" in the report.'
.format(pod_name, project_name))
pod = pod.pop()
for pd in pods_per_project[project_name]:
expected_image = pd.spec.containers[0].image
pod_images = [r['Image Name'] for r in rows if r['Pod Name'] == pod_name]
# Use 'in' since the image name in the API may include also registry and tag
soft_assert([img_nm for img_nm in pod_images if img_nm in expected_image],
'Could not find image "{}" in pod "{}". Pod images in report: {}'
.format(expected_image, pod_name, pod_images))
def test_report_recently_discovered_pods(appliance, provider, soft_assert):
"""Testing 'Recently Discovered Pods' report, see polarion case for more info
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
report = get_report(appliance, 'Recently Discovered Pods')
pods_in_report = [row['Name'] for row in report.data.rows]
pods_per_ready_status = provider.pods_per_ready_status()
for pod in pods_per_ready_status.keys():
soft_assert(pod in pods_in_report,
'Could not find pod "{}" in report.'.format(pod))
def test_report_number_of_images_per_node(appliance, provider, soft_assert):
"""Testing 'Number of Images per Node' report, see polarion case for more info
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
pods_api = provider.mgmt.list_pods()
report = get_report(appliance, 'Number of Images per Node', candu=True)
report_data = list(report.data.rows)
for pod in pods_api:
expected_image = pod.spec.containers[0].image
node = pod.spec.node_name
pod_name = pod.metadata.name
pod_images = [row['Image Name'] for row in report_data
if row['Pod Name'] == pod_name and
row['Node Name'] == node]
# Use 'in' since the image name in the API may include also registry and tag
is_image = [img_nm for img_nm in pod_images if img_nm in expected_image]
soft_assert(is_image,
'Expected image for pod "{0}" in node {1} is "{2}". found images: {3}'
.format(pod_name, node, expected_image, pod_images))
def test_report_projects_by_number_of_containers(appliance, provider, soft_assert):
"""Testing 'Projects by Number of Containers' report, see polarion case for more info
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
report = get_report(appliance, 'Projects by Number of Containers')
pods_api = provider.mgmt.list_pods()
# Since there is no provider column, in case of more than 1 provider we get some projects
# multiple times in the report. Because of that for each project name we are collecting
# all the 'Containers Count' columns and then checking that the containers count that we
# fetched from the API is found _in_ the counts under this project name
projects_containers_count = {}
for row in report.data.rows:
if row['Project Name'] not in projects_containers_count:
projects_containers_count[row['Project Name']] = []
projects_containers_count[row['Project Name']].append(int(row['Containers Count']))
for project_name, containers_counts in projects_containers_count.items():
containers_counts_api = sum(
[len(pod.spec.containers) for pod in pods_api
if pod.metadata.namespace == project_name]
)
soft_assert(containers_counts_api in containers_counts,
'Expected containers count for project {} should be {}. Found {} instead.'
.format(project_name, containers_counts_api, containers_counts_api))
| gpl-2.0 | -8,170,377,183,069,185,000 | 38.429688 | 98 | 0.625586 | false |
Samurais/sce_deck | usr/send_mail_notifications.py | 1 | 2677 | import smtplib
import time
import sys
import os
print "Usage:python", sys.argv[0], "[recipient](1..n) reports_dir"
print "Example:python", sys.argv[0], " [email protected] [email protected] /opt/reports"
# constants
fromaddr = "[email protected]"
password = '********'
def timestamp():
ISOTIMEFORMAT = "%Y-%m-%d-%X"
return str(time.strftime(ISOTIMEFORMAT))
def send_mail(fromaddr, password, toaddrs, reports_dir):
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.login(fromaddr, password)
for x in toaddrs:server.sendmail(fromaddr, x, mail_content(x, reports_dir))
server.quit()
def read_surefire_report(reports_dir):
report_lines = []
if os.path.isdir(reports_dir):
reports = os.listdir(reports_dir)
for x in reports:
if x.endswith(".txt"):
with open("%s/%s" % (reports_dir, x), "r") as f:
report_lines.extend(f.readlines())
return report_lines
def mail_content(recipient, reports_dir):
recipient_name = recipient.split("@")[0]
site_endpoint = "http://idlerx.cn.ibm.com:8080/sce-deck"
mail_body = [
"From: Decker<%s>" % fromaddr,
"To: %s<%s>" % (recipient_name, recipient),
"Subject: SCE-DECK-REPORT-" + timestamp(),
"",
"hi,%s" % recipient_name,
"",
"I am glad to tell you that SCE Deck for BPM was rebuilt successfully just now.",
"Some reports may help you get the latest status about BPM v85 SCE Prj.",
"",
"Junit Testcase %s/surefire-report.html" % site_endpoint,
"Test Coverage %s/cobertura/index.html" % site_endpoint,
"Tag List %s/taglist.html" % site_endpoint,
"Duplicate code detection %s/cpd.html" % site_endpoint ,
"Verification of coding rules %s/pmd.html" % site_endpoint,
"",
"Test Result Quick View:"
]
mail_body.extend(read_surefire_report(reports_dir))
mail_body.extend(
[ "What's more ?",
"Project,Dependencies,javadoc ,test javadoc ,Test Source Xref, Source Xref ...",
site_endpoint,
"",
"----------------------",
"Decker Sce",
"BPM SCE Automation Manager",
"----------------------",
"The Journey is the Reward."
])
return "\r\n".join(mail_body)
if __name__ == "__main__":
recipients = sys.argv[1:-1:] if len(sys.argv[:]) > 2 else []
reports_dir = sys.argv[-1]
if len(recipients) > 0:
print ">> Send mail to ", ",".join(recipients)
print ">> Reports - ", reports_dir
send_mail(fromaddr, password, recipients, reports_dir)
else:
print ">> No boby is in the recipients."
| apache-2.0 | -2,456,349,963,366,588,400 | 33.320513 | 87 | 0.595443 | false |
tzicatl/lfs-shipping-ups | shipping_ups/plugin.py | 1 | 2998 | from django.contrib.sites.models import get_current_site
from lfs.cart.utils import get_cart
from lfs.customer.utils import get_customer
from lfs.plugins import ShippingMethodPriceCalculator
from ups.client import UPSClient, UPSError
from ups.model import Package, Address
from .models import UPSConfiguration
class UPSPriceCalculator(ShippingMethodPriceCalculator):
#Cache price
_price = None
def _ups_config(self):
site = get_current_site(self.request)
return UPSConfiguration.objects.get(site=site)
def _get_quote(self):
ups_cfg = self._ups_config()
credentials = {
'username': ups_cfg.username,
'password': ups_cfg.password,
'access_license': ups_cfg.access_license,
'shipper_number': ups_cfg.shipper_number,
}
shipper = Address(
name=ups_cfg.shipper_name,
address=ups_cfg.shipper_address,
city=ups_cfg.shipper_city,
state=ups_cfg.shipper_state,
zip=ups_cfg.shipper_zipcode,
country=ups_cfg.shipper_country.code
)
customer = get_customer(self.request)
ship_address = customer.get_selected_shipping_address()
recipient = Address(
name=' '.join([ship_address.firstname or '', ship_address.lastname or '']),
address=' '.join([ship_address.line1 or '', ship_address.line2 or '']),
city=ship_address.city,
state=ship_address.state,
zip=ship_address.zip_code,
country=ship_address.country.code
)
cart = get_cart(self.request)
#weight, length, width, height
product_info = [0, 0, 0, 0]
for line_item in cart.get_items():
product_info[0] += line_item.product.weight * line_item.amount
product_info[1] += line_item.product.length * line_item.amount
product_info[2] += line_item.product.width * line_item.amount
product_info[3] += line_item.product.height * line_item.amount
#import pdb; pdb.set_trace()
quote = 0.0
if all(product_info):
packages = [Package(*product_info)]
ups = UPSClient(credentials)
response = ups.rate(
packages=packages,
packaging_type=ups_cfg.default_packaging_type,
shipper=shipper,
recipient=recipient
)
quote = float(response['info'][0]['cost'])
return quote
def get_price_net(self):
return self.get_price_gross()
def get_price_gross(self):
#XXX No error handler :P
# return self.get_price_net() * ((100 + self.shipping_method.tax.rate) / 100)
try:
if self._price is None:
self._price = self._get_quote()
return self._price
except UPSError:
return 0.0
def get_tax(self):
#How do I calculate taxes?
return 0.0 | bsd-3-clause | -7,931,353,110,327,888,000 | 31.956044 | 87 | 0.588726 | false |
mlperf/training_results_v0.6 | NVIDIA/benchmarks/minigo/implementations/tensorflow/minigo/rl_loop/example_buffer.py | 1 | 12801 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime as dt
import fire
import functools
import itertools
import multiprocessing as mp
import os
import sys
import random
import subprocess
import time
from collections import deque
sys.path.insert(0, '.')
from absl import flags
import tensorflow as tf
from tqdm import tqdm
import numpy as np
import preprocessing
from utils import timer, ensure_dir_exists
from rl_loop import fsdb
READ_OPTS = preprocessing.TF_RECORD_CONFIG
LOCAL_DIR = "data/"
# How many positions to look at per generation.
# Per AGZ, 2048 minibatch * 1k = 2M positions/generation
EXAMPLES_PER_GENERATION = 2 ** 21
MINIMUM_NEW_GAMES = 12000
AVG_GAMES_PER_MODEL = 20000
def pick_examples_from_tfrecord(filename, sampling_frac=0.02):
# tf_record_iterator is deprecated. Silence those warnings for now.
# TODO(tommadams): remove this once
# https://github.com/tensorflow/minigo/issues/740 is fixed.
v = tf.logging.get_verbosity()
tf.logging.set_verbosity(tf.logging.ERROR)
protos = list(tf.python_io.tf_record_iterator(filename, READ_OPTS))
tf.logging.set_verbosity(v)
number_samples = np.random.poisson(len(protos) * sampling_frac)
choices = random.sample(protos, min(len(protos), number_samples))
return choices
def choose(game, sampling_frac=0.02):
examples = pick_examples_from_tfrecord(game, sampling_frac)
timestamp = file_timestamp(game)
return [(timestamp, ex) for ex in examples]
def file_timestamp(filename):
return int(os.path.basename(filename).split('-')[0])
def _ts_to_str(timestamp):
return dt.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
class ExampleBuffer():
def __init__(self, max_size=2**21, sampling_frac=0.02):
self.examples = deque(maxlen=max_size)
self.max_size = max_size
self.sampling_frac = sampling_frac
self.func = functools.partial(choose, sampling_frac=sampling_frac)
self.total_updates = 0
def parallel_fill(self, games, threads=8):
""" games is a list of .tfrecord.zz game records. """
games.sort(key=os.path.basename)
# A couple extra in case parsing fails
max_games = int(self.max_size / self.sampling_frac / 200) + 480
if len(games) > max_games:
games = games[-max_games:]
with mp.pool.ThreadPool(threads) as pool:
res = tqdm(pool.imap(self.func, games), total=len(games))
self.examples.extend(itertools.chain.from_iterable(res))
print("Got", len(self.examples), "examples")
def update(self, new_games):
""" new_games is a list of .tfrecord.zz new game records. """
new_games.sort(key=os.path.basename)
first_new_game = None
for idx, game in enumerate(new_games):
timestamp = file_timestamp(game)
if timestamp <= self.examples[-1][0]:
continue
elif first_new_game is None:
first_new_game = idx
num_new_games = len(new_games) - idx
print("Found {}/{} new games".format(
num_new_games, len(new_games)))
self.total_updates += num_new_games
self.examples.extend(self.func(game))
if first_new_game is None:
print("No new games", file_timestamp(
new_games[-1]), self.examples[-1][0])
def flush(self, path, num_out=1):
num_worker = num_out
# random.shuffle on deque is O(n^2) convert to list for O(n)
self.examples = list(self.examples)
random.shuffle(self.examples)
example_list = [ex[1] for ex in self.examples]
length = len(example_list)//num_out
example_list = example_list[:length*num_out]
i_list = []
for i in range(num_out):
i_list.append((path+'_'+str(i), example_list[i*length:(i+1)*length], False))
with timer("Writing examples to " + path):
with mp.pool.ThreadPool(num_out) as pool:
pool.starmap(preprocessing.write_tf_examples, i_list)
self.examples.clear()
self.examples = deque(maxlen=self.max_size)
@property
def count(self):
return len(self.examples)
def __str__(self):
if self.count == 0:
return "ExampleBuffer: 0 positions"
return "ExampleBuffer: {} positions sampled from {} to {}".format(
self.count,
_ts_to_str(self.examples[0][0]),
_ts_to_str(self.examples[-1][0]))
def files_for_model(model):
return tf.gfile.Glob(os.path.join(LOCAL_DIR, model[1], '*.zz'))
def smart_rsync(
from_model_num=0,
source_dir=None,
dest_dir=LOCAL_DIR):
source_dir = source_dir or fsdb.selfplay_dir()
from_model_num = 0 if from_model_num < 0 else from_model_num
models = [m for m in fsdb.get_models() if m[0] >= from_model_num]
for _, model in models:
_rsync_dir(os.path.join(
source_dir, model), os.path.join(dest_dir, model))
def time_rsync(from_date,
source_dir=None,
dest_dir=LOCAL_DIR):
source_dir = source_dir or fsdb.selfplay_dir()
while from_date < dt.datetime.utcnow():
src = os.path.join(source_dir, from_date.strftime("%Y-%m-%d-%H"))
if tf.gfile.Exists(src):
_rsync_dir(src, os.path.join(
dest_dir, from_date.strftime("%Y-%m-%d-%H")))
from_date = from_date + dt.timedelta(hours=1)
def _rsync_dir(source_dir, dest_dir):
ensure_dir_exists(dest_dir)
with open('.rsync_log', 'ab') as rsync_log:
subprocess.call(['gsutil', '-m', 'rsync', source_dir, dest_dir],
stderr=rsync_log)
def _determine_chunk_to_make(write_dir):
"""
Returns the full path of the chunk to make (gs://...)
and a boolean, indicating whether we should wait for a new model
or if we're 'behind' and should just write out our current chunk immediately
True == write immediately.
"""
models = fsdb.get_models()
# Last model is N. N+1 (should be) training. We should gather games for N+2.
chunk_to_make = os.path.join(write_dir, str(
models[-1][0] + 1) + '.tfrecord.zz')
if not tf.gfile.Exists(chunk_to_make):
# N+1 is missing. Write it out ASAP
print("Making chunk ASAP:", chunk_to_make)
return chunk_to_make, True
chunk_to_make = os.path.join(write_dir, str(
models[-1][0] + 2) + '.tfrecord.zz')
while tf.gfile.Exists(chunk_to_make):
print("Chunk for next model ({}) already exists. Sleeping.".format(
chunk_to_make))
time.sleep(5 * 60)
models = fsdb.get_models()
chunk_to_make = os.path.join(write_dir, str(
models[-1][0] + 2) + '.tfrecord.zz')
print("Making chunk:", chunk_to_make)
return chunk_to_make, False
def get_window_size(chunk_num):
""" Adjust the window size by how far we are through a run.
At the start of the run, there's a benefit to 'expiring' the completely
random games a little sooner, and scaling up to the 500k game window
specified in the paper.
"""
return min(500000, (chunk_num + 5) * (AVG_GAMES_PER_MODEL // 2))
def fill_and_wait_time(bufsize=EXAMPLES_PER_GENERATION,
write_dir=None,
threads=32,
start_from=None):
start_from = start_from or dt.datetime.utcnow()
write_dir = write_dir or fsdb.golden_chunk_dir()
buf = ExampleBuffer(bufsize)
chunk_to_make, fast_write = _determine_chunk_to_make(write_dir)
hours = fsdb.get_hour_dirs()
with timer("Rsync"):
time_rsync(min(dt.datetime.strptime(
hours[-1], "%Y-%m-%d-%H/"), start_from))
start_from = dt.datetime.utcnow()
hours = fsdb.get_hour_dirs()
files = (tf.gfile.Glob(os.path.join(LOCAL_DIR, d, "*.zz"))
for d in reversed(hours) if tf.gfile.Exists(os.path.join(LOCAL_DIR, d)))
files = itertools.islice(files, get_window_size(chunk_to_make))
models = fsdb.get_models()
buf.parallel_fill(
list(itertools.chain.from_iterable(files)), threads=threads)
print("Filled buffer, watching for new games")
while (fsdb.get_latest_model() == models[-1] or buf.total_updates < MINIMUM_NEW_GAMES):
with timer("Rsync"):
time_rsync(start_from - dt.timedelta(minutes=60))
start_from = dt.datetime.utcnow()
hours = sorted(fsdb.get_hour_dirs(LOCAL_DIR))
new_files = list(map(lambda d: tf.gfile.Glob(
os.path.join(LOCAL_DIR, d, '*.zz')), hours[-2:]))
buf.update(list(itertools.chain.from_iterable(new_files)))
if fast_write:
break
time.sleep(30)
if fsdb.get_latest_model() != models[-1]:
print("New model! Waiting for games. Got",
buf.total_updates, "new games so far")
latest = fsdb.get_latest_model()
print("New model!", latest[1], "!=", models[-1][1])
print(buf)
buf.flush(chunk_to_make)
def fill_and_wait_models(bufsize=EXAMPLES_PER_GENERATION,
write_dir=None,
threads=8,
model_window=100,
skip_first_rsync=False):
""" Fills a ringbuffer with positions from the most recent games, then
continually rsync's and updates the buffer until a new model is promoted.
Once it detects a new model, iit then dumps its contents for training to
immediately begin on the next model.
"""
write_dir = write_dir or fsdb.golden_chunk_dir()
buf = ExampleBuffer(bufsize)
models = fsdb.get_models()[-model_window:]
if not skip_first_rsync:
with timer("Rsync"):
smart_rsync(models[-1][0] - 6)
files = tqdm(map(files_for_model, models), total=len(models))
buf.parallel_fill(list(itertools.chain(*files)), threads=threads)
print("Filled buffer, watching for new games")
while fsdb.get_latest_model()[0] == models[-1][0]:
with timer("Rsync"):
smart_rsync(models[-1][0] - 2)
new_files = tqdm(map(files_for_model, models[-2:]), total=len(models))
buf.update(list(itertools.chain(*new_files)))
time.sleep(60)
latest = fsdb.get_latest_model()
print("New model!", latest[1], "!=", models[-1][1])
print(buf)
buf.flush(os.path.join(write_dir, str(latest[0] + 1) + '.tfrecord.zz'))
def make_chunk_for(output_dir=LOCAL_DIR,
local_dir=LOCAL_DIR,
game_dir=None,
model_num=1,
positions=EXAMPLES_PER_GENERATION,
threads=8,
sampling_frac=0.02):
"""
Explicitly make a golden chunk for a given model `model_num`
(not necessarily the most recent one).
While we haven't yet got enough samples (EXAMPLES_PER_GENERATION)
Add samples from the games of previous model.
"""
game_dir = game_dir or fsdb.selfplay_dir()
ensure_dir_exists(output_dir)
models = [model for model in fsdb.get_models() if model[0] < model_num]
buf = ExampleBuffer(positions, sampling_frac=sampling_frac)
files = []
for _, model in sorted(models, reverse=True):
local_model_dir = os.path.join(local_dir, model)
if not tf.gfile.Exists(local_model_dir):
print("Rsyncing", model)
_rsync_dir(os.path.join(game_dir, model), local_model_dir)
files.extend(tf.gfile.Glob(os.path.join(local_model_dir, '*.zz')))
print("{}: {} games".format(model, len(files)))
if len(files) * 200 * sampling_frac > positions:
break
print("Filling from {} files".format(len(files)))
buf.parallel_fill(files, threads=threads)
print(buf)
output = os.path.join(output_dir, str(model_num) + '.tfrecord.zz')
print("Writing to", output)
buf.flush(output)
if __name__ == "__main__":
import sys
remaining_argv = flags.FLAGS(sys.argv, known_only=True)
fire.Fire({
'fill_and_wait_models': fill_and_wait_models,
'fill_and_wait_time': fill_and_wait_time,
'smart_rsync': smart_rsync,
'make_chunk_for': make_chunk_for,
}, remaining_argv[1:])
| apache-2.0 | -7,449,819,088,656,408,000 | 35.470085 | 91 | 0.617452 | false |
wanghuok02/iherb | iherb/spiders/iherbspider.py | 1 | 1439 | import scrapy
import logging
from scrapy.spiders import Spider
from scrapy.selector import Selector
from iherb.items import IherbItem
class IherbSpider(Spider):
name = "iherbspider"
allowed_domains = ["iherb.cn"]
max_page = 5
cur_page = 1
start_urls = [
"http://www.iherb.cn/Supplements?oos=true&disc=false&p=1"
]
def parse(self, response):
brands = response.xpath("//article[div[span[@itemprop='price']]]/a[1]/@title").extract()
desc = response.xpath("//article[div[span[@itemprop='price']]]/a[1]/@title").extract()
urls = response.xpath("//article[div[span[@itemprop='price']]]/a[1]/@href").extract()
prices = response.xpath("//span[@itemprop='price']/@content").extract()
items = []
length = len(brands)
for it in range(length):
item = IherbItem()
item['url'] = urls[it]
item['brand'] = brands[it].split(',')[0]
item['desc'] = brands[it].split(',')[1]
item['price'] = prices[it][1:]
#items.append(item)
yield item
if(self.cur_page <= 431):
self.cur_page += 1
self.logger.info("cur_page*********************************** %s", self.cur_page)
yield scrapy.Request("http://www.iherb.cn/Supplements?oos=true&disc=false&p="+str(self.cur_page), self.parse)
| apache-2.0 | -8,967,152,911,173,662,000 | 35.923077 | 122 | 0.542738 | false |
emanuele/jstsp2015 | simulation.py | 1 | 7420 | """Simulation estimating Type I and Type II error of CBT and KTST.
Author: Sandro Vega-Pons, Emanuele Olivetti
"""
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
from kernel_two_sample_test import MMD2u, compute_null_distribution
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import StratifiedKFold, cross_val_score
# from multiprocessing import cpu_count
from joblib import Parallel, delayed
# Temporarily stop warnings to cope with the too verbose sklearn
# GridSearchCV.score warning:
import warnings
warnings.simplefilter("ignore")
# boundaries for seeds generation during parallel processing:
MAX_INT = np.iinfo(np.uint32(1)).max
MIN_INT = np.iinfo(np.uint32(1)).min
def estimate_pvalue(score_unpermuted, scores_null):
iterations = len(scores_null)
p_value = max(1.0/iterations, (scores_null > score_unpermuted).sum() /
float(iterations))
return p_value
def compute_svm_score(K, y, n_folds, scoring='accuracy', random_state=0):
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=True,
random_state=random_state)
clf = SVC(C=1.0, kernel='precomputed')
scores = cross_val_score(clf, K, y, scoring=scoring, cv=cv, n_jobs=1)
score = scores.mean()
return score
def compute_svm_score_nestedCV(K, y, n_folds, scoring='accuracy',
random_state=None,
param_grid=[{'C': np.logspace(-5, 5, 20)}]):
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=True,
random_state=random_state)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
cvclf = SVC(kernel='precomputed')
y_train = y[train]
cvcv = StratifiedKFold(y_train, n_folds=n_folds,
shuffle=True,
random_state=random_state)
clf = GridSearchCV(cvclf, param_grid=param_grid, scoring=scoring,
cv=cvcv, n_jobs=1)
clf.fit(K[:, train][train, :], y_train)
scores[i] = clf.score(K[test, :][:, train], y[test])
return scores.mean()
if __name__ == '__main__':
np.random.seed(0)
print("JSTSP Simulation Experiments.")
nA = 20 # size of class A
nB = 20 # size of class B
d = 5 # number of dimensions
# separation between the two normally-distributed classes:
delta = 0.75
twist = np.ones(d)
print("nA = %s" % nA)
print("nB = %s" % nB)
print("d = %s" % d)
print("delta = %s" % delta)
print("twist = %s" % twist)
muA = np.zeros(d)
muB = np.ones(d) * delta
covA = np.eye(d)
covB = np.eye(d) * twist
seed_data = 0 # random generation of data
rng_data = np.random.RandomState(seed_data)
seed_ktst = 0 # random permutations of KTST
rng_ktst = np.random.RandomState(seed_ktst)
seed_cv = 0 # random splits of cross-validation
rng_cv = np.random.RandomState(seed_cv)
svm_param_grid = [{'C': np.logspace(-5, 5, 20)}]
# svm_param_grid = [{'C': np.logspace(-3, 2, 10)}]
repetitions = 100
print("This experiments will be repeated on %s randomly-sampled datasets."
% repetitions)
scores = np.zeros(repetitions)
p_value_scores = np.zeros(repetitions)
mmd2us = np.zeros(repetitions)
p_value_mmd2us = np.zeros(repetitions)
for r in range(repetitions):
print("")
print("Repetition %s" % r)
A = rng_data.multivariate_normal(muA, covA, size=nA)
B = rng_data.multivariate_normal(muB, covB, size=nB)
X = np.vstack([A, B])
y = np.concatenate([np.zeros(nA), np.ones(nB)])
distances = pairwise_distances(X, metric='euclidean')
sigma2 = np.median(distances) ** 2.0
K = np.exp(- distances * distances / sigma2)
# K = X.dot(X.T)
iterations = 10000
mmd2u_unpermuted = MMD2u(K, nA, nB)
print("mmd2u: %s" % mmd2u_unpermuted)
mmd2us[r] = mmd2u_unpermuted
mmd2us_null = compute_null_distribution(K, nA, nB, iterations,
random_state=rng_ktst)
p_value_mmd2u = estimate_pvalue(mmd2u_unpermuted, mmd2us_null)
print("mmd2u p-value: %s" % p_value_mmd2u)
p_value_mmd2us[r] = p_value_mmd2u
scoring = 'accuracy'
n_folds = 5
iterations = 1
# score_unpermuted = compute_svm_score_nestedCV(K, y, n_folds,
# scoring=scoring,
# random_state=rng_cv)
rngs = [np.random.RandomState(rng_cv.randint(low=MIN_INT, high=MAX_INT)) for i in range(iterations)]
scores_unpermuted = Parallel(n_jobs=-1)(delayed(compute_svm_score_nestedCV)(K, y, n_folds, scoring, rngs[i], param_grid=svm_param_grid) for i in range(iterations))
score_unpermuted = np.mean(scores_unpermuted)
print("accuracy: %s" % score_unpermuted)
scores[r] = score_unpermuted
# print("Doing permutations:"),
iterations = 100
scores_null = np.zeros(iterations)
# for i in range(iterations):
# if (i % 10) == 0:
# print(i)
# yi = rng_cv.permutation(y)
# scores_null[i] = compute_svm_score_nestedCV(K, yi, n_folds,
# scoring=scoring,
# random_state=rng_cv)
rngs = [np.random.RandomState(rng_cv.randint(low=MIN_INT, high=MAX_INT)) for i in range(iterations)]
yis = [np.random.permutation(y) for i in range(iterations)]
scores_null = Parallel(n_jobs=-1)(delayed(compute_svm_score_nestedCV)(K, yis[i], n_folds, scoring, rngs[i], param_grid=svm_param_grid) for i in range(iterations))
p_value_score = estimate_pvalue(score_unpermuted, scores_null)
p_value_scores[r] = p_value_score
print("%s p-value: %s" % (scoring, p_value_score))
p_value_threshold = 0.05
mmd2u_power = (p_value_mmd2us[:r+1] <= p_value_threshold).mean()
scores_power = (p_value_scores[:r+1] <= p_value_threshold).mean()
print("p_value_threshold: %s" % p_value_threshold)
print("Partial results - MMD2u: %s , %s: %s" %
(mmd2u_power, scoring, scores_power))
print("")
print("FINAL RESULTS:")
p_value_threshold = 0.1
print("p_value_threshold: %s" % p_value_threshold)
mmd2u_power = (p_value_mmd2us <= p_value_threshold).mean()
scores_power = (p_value_scores <= p_value_threshold).mean()
print("MMD2u Power: %s" % mmd2u_power)
print("%s Power: %s" % (scoring, scores_power))
print("")
p_value_threshold = 0.05
print("p_value_threshold: %s" % p_value_threshold)
mmd2u_power = (p_value_mmd2us <= p_value_threshold).mean()
scores_power = (p_value_scores <= p_value_threshold).mean()
print("MMD2u Power: %s" % mmd2u_power)
print("%s Power: %s" % (scoring, scores_power))
print("")
p_value_threshold = 0.01
print("p_value_threshold: %s" % p_value_threshold)
mmd2u_power = (p_value_mmd2us <= p_value_threshold).mean()
scores_power = (p_value_scores <= p_value_threshold).mean()
print("MMD2u Power: %s" % mmd2u_power)
print("%s Power: %s" % (scoring, scores_power))
print("")
| mit | -3,438,818,910,070,712,000 | 37.645833 | 171 | 0.592453 | false |
JBed/edx-spark | Lab_3/lab3.py | 1 | 17280 | import re
import datetime
from pyspark.sql import Row
month_map = {'Jan': 1, 'Feb': 2, 'Mar':3, 'Apr':4, 'May':5, 'Jun':6, 'Jul':7,
'Aug':8, 'Sep': 9, 'Oct':10, 'Nov': 11, 'Dec': 12}
def parse_apache_time(s):
""" Convert Apache time format into a Python datetime object
Args:
s (str): date and time in Apache time format
Returns:
datetime: datetime object (ignore timezone for now)
"""
return datetime.datetime(int(s[7:11]),
month_map[s[3:6]],
int(s[0:2]),
int(s[12:14]),
int(s[15:17]),
int(s[18:20]))
def parseApacheLogLine(logline):
""" Parse a line in the Apache Common Log format
Args:
logline (str): a line of text in the Apache Common Log format
Returns:
tuple: either a dictionary containing the parts of the Apache Access Log and 1,
or the original invalid log line and 0
"""
match = re.search(APACHE_ACCESS_LOG_PATTERN, logline)
if match is None:
return (logline, 0)
size_field = match.group(9)
if size_field == '-':
size = long(0)
else:
size = long(match.group(9))
return (Row(
host = match.group(1),
client_identd = match.group(2),
user_id = match.group(3),
date_time = parse_apache_time(match.group(4)),
method = match.group(5),
endpoint = match.group(6),
protocol = match.group(7),
response_code = int(match.group(8)),
content_size = size
), 1)
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S*) (\S* *)" (\d{3}) (\S+)'
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab2', 'apache.access.log.PROJECT')
logFile = os.path.join(baseDir, inputPath)
def parseLogs():
""" Read and parse log file """
parsed_logs = (sc
.textFile(logFile)
.map(parseApacheLogLine)
.cache())
access_logs = (parsed_logs
.filter(lambda s: s[1] == 1)
.map(lambda s: s[0])
.cache())
failed_logs = (parsed_logs
.filter(lambda s: s[1] == 0)
.map(lambda s: s[0]))
failed_logs_count = failed_logs.count()
if failed_logs_count > 0:
print 'Number of invalid logline: {}'.format(failed_logs.count())
for line in failed_logs.take(20):
print 'Invalid logline: {}'.format(line)
print 'Read {} lines, successfully parsed {} lines, failed to parse {} lines'.format((parsed_logs.count(), access_logs.count(), failed_logs.count()))
return parsed_logs, access_logs, failed_logs
parsed_logs, access_logs, failed_logs = parseLogs()
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+).*\[([\w:\/]+\s[+\-]\d{4})\] "(\S+) (\S*)( *\S+ *)*" (\d{3}) (\S+)'
parsed_logs, access_logs, failed_logs = parseLogs()
Test.assertEquals(failed_logs.count(), 0, 'incorrect failed_logs.count()')
Test.assertEquals(parsed_logs.count(), 1043177 , 'incorrect parsed_logs.count()')
Test.assertEquals(access_logs.count(), parsed_logs.count(), 'incorrect access_logs.count()')
content_sizes = access_logs.map(lambda log: log.content_size).cache()
print 'Content Size Avg: %i, Min: %i, Max: {}'.format((
content_sizes.reduce(lambda a, b : a + b) / content_sizes.count(),
content_sizes.min(),
content_sizes.max()))
responseCodeToCount = (access_logs
.map(lambda log: (log.response_code, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
responseCodeToCountList = responseCodeToCount.take(100)
print 'Found {} response codes'.format(len(responseCodeToCountList))
print 'Response Code Counts: {}'.format(responseCodeToCountList)
assert len(responseCodeToCountList) == 7
assert sorted(responseCodeToCountList) == [(200, 940847), (302, 16244), (304, 79824), (403, 58), (404, 6185), (500, 2), (501, 17)]
labels = responseCodeToCount.map(lambda (x, y): x).collect()
print labels
count = access_logs.count()
fracs = responseCodeToCount.map(lambda (x, y): (float(y) / count)).collect()
print fracs
import matplotlib.pyplot as plt
def pie_pct_format(value):
""" Determine the appropriate format string for the pie chart percentage label
Args:
value: value of the pie slice
Returns:
str: formated string label; if the slice is too small to fit, returns an empty string for label
"""
return '' if value < 7 else '{}'.format(value)
fig = plt.figure(figsize=(4.5, 4.5), facecolor='white', edgecolor='white')
colors = ['yellowgreen', 'lightskyblue', 'gold', 'purple', 'lightcoral', 'yellow', 'black']
explode = (0.05, 0.05, 0.1, 0, 0, 0, 0)
patches, texts, autotexts = plt.pie(fracs, labels=labels, colors=colors,
explode=explode, autopct=pie_pct_format,
shadow=False, startangle=125)
for text, autotext in zip(texts, autotexts):
if autotext.get_text() == '':
text.set_text('') # If the slice is small to fit, don't show a text label
plt.legend(labels, loc=(0.80, -0.1), shadow=True)
hostCountPairTuple = access_logs.map(lambda log: (log.host, 1))
hostSum = hostCountPairTuple.reduceByKey(lambda a, b : a + b)
hostMoreThan10 = hostSum.filter(lambda s: s[1] > 10)
hostsPick20 = (hostMoreThan10
.map(lambda s: s[0])
.take(20))
print 'Any 20 hosts that have accessed more then 10 times: {}'.format(hostsPick20)
endpoints = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b)
.cache())
ends = endpoints.map(lambda (x, y): x).collect()
counts = endpoints.map(lambda (x, y): y).collect()
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, len(ends), 0, max(counts)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Endpoints')
plt.ylabel('Number of Hits')
plt.plot(counts)
endpointCounts = (access_logs
.map(lambda log: (log.endpoint, 1))
.reduceByKey(lambda a, b : a + b))
topEndpoints = endpointCounts.takeOrdered(10, lambda s: -1 * s[1])
print 'Top Ten Endpoints: {}'.format(topEndpoints)
assert topEndpoints == [(u'/images/NASA-logosmall.gif', 59737), (u'/images/KSC-logosmall.gif', 50452), (u'/images/MOSAIC-logosmall.gif', 43890), (u'/images/USA-logosmall.gif', 43664), (u'/images/WORLD-logosmall.gif', 43277), (u'/images/ksclogo-medium.gif', 41336), (u'/ksc.html', 28582), (u'/history/apollo/images/apollo-logo1.gif', 26778), (u'/images/launch-logo.gif', 24755), (u'/', 20290)], 'incorrect Top Ten Endpoints'
not200 = access_logs.filter(lambda x: x.response_code != 200)
endpointCountPairTuple = not200.map(lambda x: (x.endpoint,1))
endpointSum = endpointCountPairTuple.reduceByKey(lambda a,b: a+b)
topTenErrURLs = endpointSum.takeOrdered(10, key=lambda x: -x[1])
print 'Top Ten failed URLs: {}'.format(topTenErrURLs)
Test.assertEquals(endpointSum.count(), 7689, 'incorrect count for endpointSum')
Test.assertEquals(topTenErrURLs, [(u'/images/NASA-logosmall.gif', 8761), (u'/images/KSC-logosmall.gif', 7236), (u'/images/MOSAIC-logosmall.gif', 5197), (u'/images/USA-logosmall.gif', 5157), (u'/images/WORLD-logosmall.gif', 5020), (u'/images/ksclogo-medium.gif', 4728), (u'/history/apollo/images/apollo-logo1.gif', 2907), (u'/images/launch-logo.gif', 2811), (u'/', 2199), (u'/images/ksclogosmall.gif', 1622)], 'incorrect Top Ten failed URLs (topTenErrURLs)')
hosts = access_logs.map(lambda x: x.host)
uniqueHosts = hosts.distinct()
uniqueHostCount = uniqueHosts.count()
print 'Unique hosts: {}'.format(uniqueHostCount)
Test.assertEquals(uniqueHostCount, 54507, 'incorrect uniqueHostCount')
dayToHostPairTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dayGroupedHosts = dayToHostPairTuple.groupByKey()
dayHostCount = dayGroupedHosts.map(lambda x: (x[0],set(x[1])))
dailyHosts = (dayHostCount.map(lambda x: (x[0],len(x[1]))))
dailyHostsList = dailyHosts.takeOrdered(30)
print 'Unique hosts per day: {}'.format(dailyHostsList)
dailyHosts.cache()
Test.assertEquals(dailyHosts.count(), 21, 'incorrect dailyHosts.count()')
Test.assertEquals(dailyHostsList, [(1, 2582), (3, 3222), (4, 4190), (5, 2502), (6, 2537), (7, 4106), (8, 4406), (9, 4317), (10, 4523), (11, 4346), (12, 2864), (13, 2650), (14, 4454), (15, 4214), (16, 4340), (17, 4385), (18, 4168), (19, 2550), (20, 2560), (21, 4134), (22, 4456)], 'incorrect dailyHostsList')
Test.assertTrue(dailyHosts.is_cached, 'incorrect dailyHosts.is_cached')
daysWithHosts = sorted(dailyHosts.map(lambda x: x[0]).collect())
hosts = [x[1] for x in sorted(dailyHosts.collect())]
print '{}'.format(hosts)
test_days = range(1, 23)
test_days.remove(2)
Test.assertEquals(daysWithHosts, test_days, 'incorrect days')
Test.assertEquals(hosts, [2582, 3222, 4190, 2502, 2537, 4106, 4406, 4317, 4523, 4346, 2864, 2650, 4454, 4214, 4340, 4385, 4168, 2550, 2560, 4134, 4456], 'incorrect hosts')
fig = plt.figure(figsize=(8,4.5), facecolor='white', edgecolor='white')
plt.axis([min(daysWithHosts), max(daysWithHosts), 0, max(hosts)+500])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Hosts')
plt.plot(daysWithHosts, hosts)
dayAndHostTuple = access_logs.map(lambda x: (x.date_time.day,x.host))
dailyHostsList
dayReqs = access_logs.map(lambda x: (x.date_time.day,1)).reduceByKey(lambda a,b: a+b)
summed = dayReqs.reduceByKey(lambda a,b: a+b)
joined = dayReqs.join(dailyHosts)
avgDailyReqPerHost = joined.map(lambda x: (x[0],x[1][0]/x[1][1]))
avgDailyReqPerHostList = avgDailyReqPerHost.takeOrdered(30)
print 'Average number of daily requests per Hosts is {}'.format(avgDailyReqPerHostList)
avgDailyReqPerHost.cache()
Test.assertEquals(avgDailyReqPerHostList, [(1, 13), (3, 12), (4, 14), (5, 12), (6, 12), (7, 13), (8, 13), (9, 14), (10, 13), (11, 14), (12, 13), (13, 13), (14, 13), (15, 13), (16, 13), (17, 13), (18, 13), (19, 12), (20, 12), (21, 13), (22, 12)], 'incorrect avgDailyReqPerHostList')
Test.assertTrue(avgDailyReqPerHost.is_cached, 'incorrect avgDailyReqPerHost.is_cache')
daysWithAvg = sorted(avgDailyReqPerHost.map(lambda x: x[0]).collect())
avgs = [x[1] for x in avgDailyReqPerHost.takeOrdered(30, key=lambda x: x[0])]
print '{}'.format(avgs)
Test.assertEquals(daysWithAvg, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect days')
Test.assertEquals(avgs, [13, 12, 14, 12, 12, 13, 13, 14, 13, 14, 13, 13, 13, 13, 13, 13, 13, 12, 12, 13, 12], 'incorrect avgs')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithAvg), 0, max(avgs)+2])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('Average')
plt.plot(daysWithAvg, avgs)
badRecords = (access_logs.filter(lambda x: x.response_code == 404))
print 'Found {} 404 URLs'.format(badRecords.count())
badRecords.cache()
Test.assertEquals(badRecords.count(), 6185, 'incorrect badRecords.count()')
Test.assertTrue(badRecords.is_cached, 'incorrect badRecords.is_cached')
badEndpoints = badRecords.map(lambda x: x.endpoint)
badUniqueEndpoints = badEndpoints.distinct()
badUniqueEndpointsPick40 = badUniqueEndpoints.take(40)
print '404 URLS: {}'.format(badUniqueEndpointsPick40)
badUniqueEndpointsSet40 = set(badUniqueEndpointsPick40)
Test.assertEquals(len(badUniqueEndpointsSet40), 40, 'badUniqueEndpointsPick40 not distinct')
badEndpointsCountPairTuple = badRecords.map(lambda x: (x.endpoint,1))
badEndpointsSum = badEndpointsCountPairTuple.reduceByKey(lambda a,b: a+b)
print 'Top twenty {}'.format(badEndpointsSum)
badEndpointsTop20 = badEndpointsSum.takeOrdered(20, key= lambda x: -x[1])
print 'Top Twenty 404 URLs: {}'.format(badEndpointsTop20)
Test.assertEquals(badEndpointsTop20, [(u'/pub/winvn/readme.txt', 633), (u'/pub/winvn/release.txt', 494), (u'/shuttle/missions/STS-69/mission-STS-69.html', 431), (u'/images/nasa-logo.gif', 319), (u'/elv/DELTA/uncons.htm', 178), (u'/shuttle/missions/sts-68/ksc-upclose.gif', 156), (u'/history/apollo/sa-1/sa-1-patch-small.gif', 146), (u'/images/crawlerway-logo.gif', 120), (u'/://spacelink.msfc.nasa.gov', 117), (u'/history/apollo/pad-abort-test-1/pad-abort-test-1-patch-small.gif', 100), (u'/history/apollo/a-001/a-001-patch-small.gif', 97), (u'/images/Nasa-logo.gif', 85), (u'/shuttle/resources/orbiters/atlantis.gif', 64), (u'/history/apollo/images/little-joe.jpg', 62), (u'/images/lf-logo.gif', 59), (u'/shuttle/resources/orbiters/discovery.gif', 56), (u'/shuttle/resources/orbiters/challenger.gif', 54), (u'/robots.txt', 53), (u'/elv/new01.gif', 44), (u'/history/apollo/pad-abort-test-2/pad-abort-test-2-patch-small.gif', 38)], 'incorrect badEndpointsTop20')
errHostsCountPairTuple = badRecords.map(lambda x: (x.host,1))
errHostsSum = errHostsCountPairTuple.reduceByKey(lambda a,b: a+b)
errHostsTop25 = errHostsSum.takeOrdered(25, key= lambda x: -x[1])
print 'Top 25 hosts that generated errors: {}'.format(errHostsTop25)
Test.assertEquals(len(errHostsTop25), 25, 'length of errHostsTop25 is not 25')
Test.assertEquals(len(set(errHostsTop25) - set([(u'maz3.maz.net', 39), (u'piweba3y.prodigy.com', 39), (u'gate.barr.com', 38), (u'm38-370-9.mit.edu', 37), (u'ts8-1.westwood.ts.ucla.edu', 37), (u'nexus.mlckew.edu.au', 37), (u'204.62.245.32', 33), (u'163.206.104.34', 27), (u'spica.sci.isas.ac.jp', 27), (u'www-d4.proxy.aol.com', 26), (u'www-c4.proxy.aol.com', 25), (u'203.13.168.24', 25), (u'203.13.168.17', 25), (u'internet-gw.watson.ibm.com', 24), (u'scooter.pa-x.dec.com', 23), (u'crl5.crl.com', 23), (u'piweba5y.prodigy.com', 23), (u'onramp2-9.onr.com', 22), (u'slip145-189.ut.nl.ibm.net', 22), (u'198.40.25.102.sap2.artic.edu', 21), (u'gn2.getnet.com', 20), (u'msp1-16.nas.mr.net', 20), (u'isou24.vilspa.esa.es', 19), (u'dial055.mbnet.mb.ca', 19), (u'tigger.nashscene.com', 19)])), 0, 'incorrect errHostsTop25')
errDateCountPairTuple = badRecords.map(lambda x: (x.date_time.day,1))
errDateSum = errDateCountPairTuple.reduceByKey(lambda a,b: a+b)
#print '{}'.format(errDateSum.take(10))
errDateSorted = (errDateSum)
#print errDateSorted
errByDate = errDateSorted.takeOrdered(30)
print '404 Errors by day: {}'.format(errByDate)
errDateSorted.cache()
Test.assertEquals(errByDate, [(1, 243), (3, 303), (4, 346), (5, 234), (6, 372), (7, 532), (8, 381), (9, 279), (10, 314), (11, 263), (12, 195), (13, 216), (14, 287), (15, 326), (16, 258), (17, 269), (18, 255), (19, 207), (20, 312), (21, 305), (22, 288)], 'incorrect errByDate')
Test.assertTrue(errDateSorted.is_cached, 'incorrect errDateSorted.is_cached')
daysWithErrors404 = errDateSorted.map(lambda x: x[0]).takeOrdered(30)
errors404ByDay = [x[1] for x in errDateSorted.takeOrdered(30, key= lambda x: x[0])]
print errors404ByDay
Test.assertEquals(daysWithErrors404, [1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22], 'incorrect daysWithErrors404')
Test.assertEquals(errors404ByDay, [243, 303, 346, 234, 372, 532, 381, 279, 314, 263, 195, 216, 287, 326, 258, 269, 255, 207, 312, 305, 288], 'incorrect errors404ByDay')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(daysWithErrors404), 0, max(errors404ByDay)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Day')
plt.ylabel('404 Errors')
plt.plot(daysWithErrors404, errors404ByDay)
pass
topErrDate = errDateSorted.takeOrdered(5, key= lambda x: -x[1])
print 'Top Five dates for 404 requests: {}'.format(topErrDate)
Test.assertEquals(topErrDate, [(7, 532), (8, 381), (6, 372), (4, 346), (15, 326)], 'incorrect topErrDate')
hourCountPairTuple = badRecords.map(lambda x: (x.date_time.hour,1))
hourRecordsSum = hourCountPairTuple.reduceByKey(lambda a,b: a+b)
hourRecordsSorted = hourRecordsSum
errHourList = hourRecordsSorted.takeOrdered(30)
print 'Top hours for 404 requests: {}'.format(errHourList)
hourRecordsSorted.cache()
Test.assertEquals(errHourList, [(0, 175), (1, 171), (2, 422), (3, 272), (4, 102), (5, 95), (6, 93), (7, 122), (8, 199), (9, 185), (10, 329), (11, 263), (12, 438), (13, 397), (14, 318), (15, 347), (16, 373), (17, 330), (18, 268), (19, 269), (20, 270), (21, 241), (22, 234), (23, 272)], 'incorrect errHourList')
Test.assertTrue(hourRecordsSorted.is_cached, 'incorrect hourRecordsSorted.is_cached')
hoursWithErrors404 = hourRecordsSorted.map(lambda x: x[0]).takeOrdered(30)
errors404ByHours = [x[1] for x in hourRecordsSorted.takeOrdered(30, key = lambda x: -x[0])]
Test.assertEquals(hoursWithErrors404, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], 'incorrect hoursWithErrors404')
Test.assertEquals(errors404ByHours, [175, 171, 422, 272, 102, 95, 93, 122, 199, 185, 329, 263, 438, 397, 318, 347, 373, 330, 268, 269, 270, 241, 234, 272], 'incorrect errors404ByHours')
fig = plt.figure(figsize=(8,4.2), facecolor='white', edgecolor='white')
plt.axis([0, max(hoursWithErrors404), 0, max(errors404ByHours)])
plt.grid(b=True, which='major', axis='y')
plt.xlabel('Hour')
plt.ylabel('404 Errors')
plt.plot(hoursWithErrors404, errors404ByHours)
| apache-2.0 | 6,074,960,403,950,983,000 | 43.766839 | 961 | 0.657697 | false |
azlanismail/prismgames | examples/games/car/networkx/readwrite/gexf.py | 1 | 33475 | """
****
GEXF
****
Read and write graphs in GEXF format.
GEXF (Graph Exchange XML Format) is a language for describing complex
network structures, their associated data and dynamics.
This implementation does not support mixed graphs (directed and
unidirected edges together).
Format
------
GEXF is an XML format. See http://gexf.net/format/schema.html for the
specification and http://gexf.net/format/basic.html for examples.
"""
# Based on GraphML NetworkX GraphML reader
import itertools
import networkx as nx
from networkx.utils import open_file, make_str
try:
from xml.etree.cElementTree import Element, ElementTree, tostring
except ImportError:
try:
from xml.etree.ElementTree import Element, ElementTree, tostring
except ImportError:
pass
__author__ = """\n""".join(['Aric Hagberg ([email protected])'])
__all__ = ['write_gexf', 'read_gexf', 'relabel_gexf_graph', 'generate_gexf']
@open_file(1,mode='wb')
def write_gexf(G, path, encoding='utf-8',prettyprint=True,version='1.1draft'):
"""Write G in GEXF format to path.
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Parameters
----------
G : graph
A NetworkX graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_gexf(G, "test.gexf")
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together).
The node id attribute is set to be the string of the node label.
If you want to specify an id use set it as node data, e.g.
node['a']['id']=1 to set the id of node 'a' to 1.
References
----------
.. [1] GEXF graph format, http://gexf.net/format/
"""
writer = GEXFWriter(encoding=encoding,prettyprint=prettyprint,
version=version)
writer.add_graph(G)
writer.write(path)
def generate_gexf(G, encoding='utf-8',prettyprint=True,version='1.1draft'):
"""Generate lines of GEXF format representation of G"
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Parameters
----------
G : graph
A NetworkX graph
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> linefeed=chr(10) # linefeed=\n
>>> s=linefeed.join(nx.generate_gexf(G)) # doctest: +SKIP
>>> for line in nx.generate_gexf(G): # doctest: +SKIP
... print line
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together).
The node id attribute is set to be the string of the node label.
If you want to specify an id use set it as node data, e.g.
node['a']['id']=1 to set the id of node 'a' to 1.
References
----------
.. [1] GEXF graph format, http://gexf.net/format/
"""
writer = GEXFWriter(encoding=encoding,prettyprint=prettyprint,
version=version)
writer.add_graph(G)
for line in str(writer).splitlines():
yield line
@open_file(0,mode='rb')
def read_gexf(path,node_type=str,relabel=False,version='1.1draft'):
"""Read graph in GEXF format from path.
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
node_type: Python type (default: str)
Convert node ids to this type
relabel : bool (default: False)
If True relabel the nodes to use the GEXF node "label" attribute
instead of the node "id" attribute as the NetworkX node label.
Returns
-------
graph: NetworkX graph
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together).
References
----------
.. [1] GEXF graph format, http://gexf.net/format/
"""
reader = GEXFReader(node_type=node_type,version=version)
if relabel:
G=relabel_gexf_graph(reader(path))
else:
G=reader(path)
return G
class GEXF(object):
# global register_namespace
versions={}
d={'NS_GEXF':"http://www.gexf.net/1.1draft",
'NS_VIZ':"http://www.gexf.net/1.1draft/viz",
'NS_XSI':"http://www.w3.org/2001/XMLSchema-instance",
'SCHEMALOCATION':' '.join(['http://www.gexf.net/1.1draft',
'http://www.gexf.net/1.1draft/gexf.xsd'
]),
'VERSION':'1.1'
}
versions['1.1draft']=d
d={'NS_GEXF':"http://www.gexf.net/1.2draft",
'NS_VIZ':"http://www.gexf.net/1.2draft/viz",
'NS_XSI':"http://www.w3.org/2001/XMLSchema-instance",
'SCHEMALOCATION':' '.join(['http://www.gexf.net/1.2draft',
'http://www.gexf.net/1.2draft/gexf.xsd'
]),
'VERSION':'1.2'
}
versions['1.2draft']=d
types=[(int,"integer"),
(float,"float"),
(float,"double"),
(bool,"boolean"),
(list,"string"),
(dict,"string"),
]
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
types.extend([
(str,"liststring"),
(str,"anyURI"),
(str,"string")])
except ValueError: # Python 2.6+
types.extend([
(str,"liststring"),
(str,"anyURI"),
(str,"string"),
(unicode,"liststring"),
(unicode,"anyURI"),
(unicode,"string")])
xml_type = dict(types)
python_type = dict(reversed(a) for a in types)
convert_bool={'true':True,'false':False}
# try:
# register_namespace = ET.register_namespace
# except AttributeError:
# def register_namespace(prefix, uri):
# ET._namespace_map[uri] = prefix
def set_version(self,version):
d=self.versions.get(version)
if d is None:
raise nx.NetworkXError('Unknown GEXF version %s'%version)
self.NS_GEXF = d['NS_GEXF']
self.NS_VIZ = d['NS_VIZ']
self.NS_XSI = d['NS_XSI']
self.SCHEMALOCATION = d['NS_XSI']
self.VERSION=d['VERSION']
self.version=version
# register_namespace('viz', d['NS_VIZ'])
class GEXFWriter(GEXF):
# class for writing GEXF format files
# use write_gexf() function
def __init__(self, graph=None, encoding="utf-8",
mode='static',prettyprint=True,
version='1.1draft'):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GEXF writer requires '
'xml.elementtree.ElementTree')
self.prettyprint=prettyprint
self.mode=mode
self.encoding = encoding
self.set_version(version)
self.xml = Element("gexf",
{'xmlns':self.NS_GEXF,
'xmlns:xsi':self.NS_XSI,
'xmlns:viz':self.NS_VIZ,
'xsi:schemaLocation':self.SCHEMALOCATION,
'version':self.VERSION})
# counters for edge and attribute identifiers
self.edge_id=itertools.count()
self.attr_id=itertools.count()
# default attributes are stored in dictionaries
self.attr={}
self.attr['node']={}
self.attr['edge']={}
self.attr['node']['dynamic']={}
self.attr['node']['static']={}
self.attr['edge']['dynamic']={}
self.attr['edge']['static']={}
if graph is not None:
self.add_graph(graph)
def __str__(self):
if self.prettyprint:
self.indent(self.xml)
s=tostring(self.xml).decode(self.encoding)
return s
def add_graph(self, G):
# Add a graph element to the XML
if G.is_directed():
default='directed'
else:
default='undirected'
graph_element = Element("graph",defaultedgetype=default,mode=self.mode)
self.graph_element=graph_element
self.add_nodes(G,graph_element)
self.add_edges(G,graph_element)
self.xml.append(graph_element)
def add_nodes(self, G, graph_element):
nodes_element = Element('nodes')
for node,data in G.nodes_iter(data=True):
node_data=data.copy()
node_id = make_str(node_data.pop('id', node))
kw={'id':node_id}
label = make_str(node_data.pop('label', node))
kw['label']=label
try:
pid=node_data.pop('pid')
kw['pid'] = make_str(pid)
except KeyError:
pass
# add node element with attributes
node_element = Element("node", **kw)
# add node element and attr subelements
default=G.graph.get('node_default',{})
node_data=self.add_parents(node_element, node_data)
if self.version=='1.1':
node_data=self.add_slices(node_element, node_data)
else:
node_data=self.add_spells(node_element, node_data)
node_data=self.add_viz(node_element,node_data)
node_data=self.add_attributes("node", node_element,
node_data, default)
nodes_element.append(node_element)
graph_element.append(nodes_element)
def add_edges(self, G, graph_element):
def edge_key_data(G):
# helper function to unify multigraph and graph edge iterator
if G.is_multigraph():
for u,v,key,data in G.edges_iter(data=True,keys=True):
edge_data=data.copy()
edge_data.update(key=key)
edge_id=edge_data.pop('id',None)
if edge_id is None:
edge_id=next(self.edge_id)
yield u,v,edge_id,edge_data
else:
for u,v,data in G.edges_iter(data=True):
edge_data=data.copy()
edge_id=edge_data.pop('id',None)
if edge_id is None:
edge_id=next(self.edge_id)
yield u,v,edge_id,edge_data
edges_element = Element('edges')
for u,v,key,edge_data in edge_key_data(G):
kw={'id':make_str(key)}
try:
edge_weight=edge_data.pop('weight')
kw['weight']=make_str(edge_weight)
except KeyError:
pass
try:
edge_type=edge_data.pop('type')
kw['type']=make_str(edge_type)
except KeyError:
pass
edge_element = Element("edge",
source=make_str(u),target=make_str(v),
**kw)
default=G.graph.get('edge_default',{})
edge_data=self.add_viz(edge_element,edge_data)
edge_data=self.add_attributes("edge", edge_element,
edge_data, default)
edges_element.append(edge_element)
graph_element.append(edges_element)
def add_attributes(self, node_or_edge, xml_obj, data, default):
# Add attrvalues to node or edge
attvalues=Element('attvalues')
if len(data)==0:
return data
if 'start' in data or 'end' in data:
mode='dynamic'
else:
mode='static'
for k,v in data.items():
# rename generic multigraph key to avoid any name conflict
if k == 'key':
k='networkx_key'
attr_id = self.get_attr_id(make_str(k), self.xml_type[type(v)],
node_or_edge, default, mode)
if type(v)==list:
# dynamic data
for val,start,end in v:
e=Element("attvalue")
e.attrib['for']=attr_id
e.attrib['value']=make_str(val)
e.attrib['start']=make_str(start)
e.attrib['end']=make_str(end)
attvalues.append(e)
else:
# static data
e=Element("attvalue")
e.attrib['for']=attr_id
e.attrib['value']=make_str(v)
attvalues.append(e)
xml_obj.append(attvalues)
return data
def get_attr_id(self, title, attr_type, edge_or_node, default, mode):
# find the id of the attribute or generate a new id
try:
return self.attr[edge_or_node][mode][title]
except KeyError:
# generate new id
new_id=str(next(self.attr_id))
self.attr[edge_or_node][mode][title] = new_id
attr_kwargs = {"id":new_id, "title":title, "type":attr_type}
attribute=Element("attribute",**attr_kwargs)
# add subelement for data default value if present
default_title=default.get(title)
if default_title is not None:
default_element=Element("default")
default_element.text=make_str(default_title)
attribute.append(default_element)
# new insert it into the XML
attributes_element=None
for a in self.graph_element.findall("attributes"):
# find existing attributes element by class and mode
a_class=a.get('class')
a_mode=a.get('mode','static') # default mode is static
if a_class==edge_or_node and a_mode==mode:
attributes_element=a
if attributes_element is None:
# create new attributes element
attr_kwargs = {"mode":mode,"class":edge_or_node}
attributes_element=Element('attributes', **attr_kwargs)
self.graph_element.insert(0,attributes_element)
attributes_element.append(attribute)
return new_id
def add_viz(self,element,node_data):
viz=node_data.pop('viz',False)
if viz:
color=viz.get('color')
if color is not None:
if self.VERSION=='1.1':
e=Element("{%s}color"%self.NS_VIZ,
r=str(color.get('r')),
g=str(color.get('g')),
b=str(color.get('b')),
)
else:
e=Element("{%s}color"%self.NS_VIZ,
r=str(color.get('r')),
g=str(color.get('g')),
b=str(color.get('b')),
a=str(color.get('a')),
)
element.append(e)
size=viz.get('size')
if size is not None:
e=Element("{%s}size"%self.NS_VIZ,value=str(size))
element.append(e)
thickness=viz.get('thickness')
if thickness is not None:
e=Element("{%s}thickness"%self.NS_VIZ,value=str(thickness))
element.append(e)
shape=viz.get('shape')
if shape is not None:
if shape.startswith('http'):
e=Element("{%s}shape"%self.NS_VIZ,
value='image',uri=str(shape))
else:
e=Element("{%s}shape"%self.NS_VIZ,value=str(shape.get))
element.append(e)
position=viz.get('position')
if position is not None:
e=Element("{%s}position"%self.NS_VIZ,
x=str(position.get('x')),
y=str(position.get('y')),
z=str(position.get('z')),
)
element.append(e)
return node_data
def add_parents(self,node_element,node_data):
parents=node_data.pop('parents',False)
if parents:
parents_element=Element('parents')
for p in parents:
e=Element('parent')
e.attrib['for']=str(p)
parents_element.append(e)
node_element.append(parents_element)
return node_data
def add_slices(self,node_element,node_data):
slices=node_data.pop('slices',False)
if slices:
slices_element=Element('slices')
for start,end in slices:
e=Element('slice',start=str(start),end=str(end))
slices_element.append(e)
node_element.append(slices_element)
return node_data
def add_spells(self,node_element,node_data):
spells=node_data.pop('spells',False)
if spells:
spells_element=Element('spells')
for start,end in spells:
e=Element('spell',start=str(start),end=str(end))
spells_element.append(e)
node_element.append(spells_element)
return node_data
def write(self, fh):
# Serialize graph G in GEXF to the open fh
if self.prettyprint:
self.indent(self.xml)
document = ElementTree(self.xml)
header='<?xml version="1.0" encoding="%s"?>'%self.encoding
fh.write(header.encode(self.encoding))
document.write(fh, encoding=self.encoding)
def indent(self, elem, level=0):
# in-place prettyprint formatter
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class GEXFReader(GEXF):
# Class to read GEXF format files
# use read_gexf() function
def __init__(self, node_type=None,version='1.1draft'):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GEXF reader requires '
'xml.elementtree.ElementTree')
self.node_type=node_type
# assume simple graph and test for multigraph on read
self.simple_graph=True
self.set_version(version)
def __call__(self, stream):
self.xml = ElementTree(file=stream)
g=self.xml.find("{%s}graph" % self.NS_GEXF)
if g is not None:
return self.make_graph(g)
# try all the versions
for version in self.versions:
self.set_version(version)
g=self.xml.find("{%s}graph" % self.NS_GEXF)
if g is not None:
return self.make_graph(g)
raise nx.NetworkXError("No <graph> element in GEXF file")
def make_graph(self, graph_xml):
# mode is "static" or "dynamic"
graph_mode = graph_xml.get("mode", "")
self.dynamic=(graph_mode=='dynamic')
# start with empty DiGraph or MultiDiGraph
edgedefault = graph_xml.get("defaultedgetype", None)
if edgedefault=='directed':
G=nx.MultiDiGraph()
else:
G=nx.MultiGraph()
# graph attributes
graph_start=graph_xml.get('start')
if graph_start is not None:
G.graph['start']=graph_start
graph_end=graph_xml.get('end')
if graph_end is not None:
G.graph['end']=graph_end
# node and edge attributes
attributes_elements=graph_xml.findall("{%s}attributes"%self.NS_GEXF)
# dictionaries to hold attributes and attribute defaults
node_attr={}
node_default={}
edge_attr={}
edge_default={}
for a in attributes_elements:
attr_class = a.get("class")
if attr_class=='node':
na,nd = self.find_gexf_attributes(a)
node_attr.update(na)
node_default.update(nd)
G.graph['node_default']=node_default
elif attr_class=='edge':
ea,ed = self.find_gexf_attributes(a)
edge_attr.update(ea)
edge_default.update(ed)
G.graph['edge_default']=edge_default
else:
raise # unknown attribute class
# Hack to handle Gephi0.7beta bug
# add weight attribute
ea={'weight':{'type': 'double', 'mode': 'static', 'title': 'weight'}}
ed={}
edge_attr.update(ea)
edge_default.update(ed)
G.graph['edge_default']=edge_default
# add nodes
nodes_element=graph_xml.find("{%s}nodes" % self.NS_GEXF)
if nodes_element is not None:
for node_xml in nodes_element.findall("{%s}node" % self.NS_GEXF):
self.add_node(G, node_xml, node_attr)
# add edges
edges_element=graph_xml.find("{%s}edges" % self.NS_GEXF)
if edges_element is not None:
for edge_xml in edges_element.findall("{%s}edge" % self.NS_GEXF):
self.add_edge(G, edge_xml, edge_attr)
# switch to Graph or DiGraph if no parallel edges were found.
if self.simple_graph:
if G.is_directed():
G=nx.DiGraph(G)
else:
G=nx.Graph(G)
return G
def add_node(self, G, node_xml, node_attr, node_pid=None):
# add a single node with attributes to the graph
# get attributes and subattributues for node
data = self.decode_attr_elements(node_attr, node_xml)
data = self.add_parents(data, node_xml) # add any parents
if self.version=='1.1':
data = self.add_slices(data, node_xml) # add slices
else:
data = self.add_spells(data, node_xml) # add spells
data = self.add_viz(data, node_xml) # add viz
data = self.add_start_end(data, node_xml) # add start/end
# find the node id and cast it to the appropriate type
node_id = node_xml.get("id")
if self.node_type is not None:
node_id=self.node_type(node_id)
# every node should have a label
node_label = node_xml.get("label")
data['label']=node_label
# parent node id
node_pid = node_xml.get("pid", node_pid)
if node_pid is not None:
data['pid']=node_pid
# check for subnodes, recursive
subnodes=node_xml.find("{%s}nodes" % self.NS_GEXF)
if subnodes is not None:
for node_xml in subnodes.findall("{%s}node" % self.NS_GEXF):
self.add_node(G, node_xml, node_attr, node_pid=node_id)
G.add_node(node_id, data)
def add_start_end(self, data, xml):
# start and end times
node_start = xml.get("start")
if node_start is not None:
data['start']=node_start
node_end = xml.get("end")
if node_end is not None:
data['end']=node_end
return data
def add_viz(self, data, node_xml):
# add viz element for node
viz={}
color=node_xml.find("{%s}color"%self.NS_VIZ)
if color is not None:
if self.VERSION=='1.1':
viz['color']={'r':int(color.get('r')),
'g':int(color.get('g')),
'b':int(color.get('b'))}
else:
viz['color']={'r':int(color.get('r')),
'g':int(color.get('g')),
'b':int(color.get('b')),
'a':float(color.get('a')),
}
size=node_xml.find("{%s}size"%self.NS_VIZ)
if size is not None:
viz['size']=float(size.get('value'))
thickness=node_xml.find("{%s}thickness"%self.NS_VIZ)
if thickness is not None:
viz['thickness']=float(thickness.get('value'))
shape=node_xml.find("{%s}shape"%self.NS_VIZ)
if shape is not None:
viz['shape']=shape.get('shape')
if viz['shape']=='image':
viz['shape']=shape.get('uri')
position=node_xml.find("{%s}position"%self.NS_VIZ)
if position is not None:
viz['position']={'x':float(position.get('x',0)),
'y':float(position.get('y',0)),
'z':float(position.get('z',0))}
if len(viz)>0:
data['viz']=viz
return data
def add_parents(self, data, node_xml):
parents_element=node_xml.find("{%s}parents"%self.NS_GEXF)
if parents_element is not None:
data['parents']=[]
for p in parents_element.findall("{%s}parent"%self.NS_GEXF):
parent=p.get('for')
data['parents'].append(parent)
return data
def add_slices(self, data, node_xml):
slices_element=node_xml.find("{%s}slices"%self.NS_GEXF)
if slices_element is not None:
data['slices']=[]
for s in slices_element.findall("{%s}slice"%self.NS_GEXF):
start=s.get('start')
end=s.get('end')
data['slices'].append((start,end))
return data
def add_spells(self, data, node_xml):
spells_element=node_xml.find("{%s}spells"%self.NS_GEXF)
if spells_element is not None:
data['spells']=[]
for s in spells_element.findall("{%s}spell"%self.NS_GEXF):
start=s.get('start')
end=s.get('end')
data['spells'].append((start,end))
return data
def add_edge(self, G, edge_element, edge_attr):
# add an edge to the graph
# raise error if we find mixed directed and undirected edges
edge_direction = edge_element.get("type")
if G.is_directed() and edge_direction=='undirected':
raise nx.NetworkXError(\
"Undirected edge found in directed graph.")
if (not G.is_directed()) and edge_direction=='directed':
raise nx.NetworkXError(\
"Directed edge found in undirected graph.")
# Get source and target and recast type if required
source = edge_element.get("source")
target = edge_element.get("target")
if self.node_type is not None:
source=self.node_type(source)
target=self.node_type(target)
data = self.decode_attr_elements(edge_attr, edge_element)
data = self.add_start_end(data,edge_element)
# GEXF stores edge ids as an attribute
# NetworkX uses them as keys in multigraphs
# if networkx_key is not specified as an attribute
edge_id = edge_element.get("id")
if edge_id is not None:
data["id"] = edge_id
# check if there is a 'multigraph_key' and use that as edge_id
multigraph_key = data.pop('networkx_key',None)
if multigraph_key is not None:
edge_id=multigraph_key
weight = edge_element.get('weight')
if weight is not None:
data['weight']=float(weight)
edge_label = edge_element.get("label")
if edge_label is not None:
data['label']=edge_label
if G.has_edge(source,target):
# seen this edge before - this is a multigraph
self.simple_graph=False
G.add_edge(source, target, key=edge_id, **data)
if edge_direction=='mutual':
G.add_edge(target, source, key=edge_id, **data)
def decode_attr_elements(self, gexf_keys, obj_xml):
# Use the key information to decode the attr XML
attr = {}
# look for outer "<attvalues>" element
attr_element=obj_xml.find("{%s}attvalues" % self.NS_GEXF)
if attr_element is not None:
# loop over <attvalue> elements
for a in attr_element.findall("{%s}attvalue" % self.NS_GEXF):
key = a.get('for') # for is required
try: # should be in our gexf_keys dictionary
title=gexf_keys[key]['title']
except KeyError:
raise nx.NetworkXError("No attribute defined for=%s"%key)
atype=gexf_keys[key]['type']
value=a.get('value')
if atype=='boolean':
value=self.convert_bool[value]
else:
value=self.python_type[atype](value)
if gexf_keys[key]['mode']=='dynamic':
# for dynamic graphs use list of three-tuples
# [(value1,start1,end1), (value2,start2,end2), etc]
start=a.get('start')
end=a.get('end')
if title in attr:
attr[title].append((value,start,end))
else:
attr[title]=[(value,start,end)]
else:
# for static graphs just assign the value
attr[title] = value
return attr
def find_gexf_attributes(self, attributes_element):
# Extract all the attributes and defaults
attrs = {}
defaults = {}
mode=attributes_element.get('mode')
for k in attributes_element.findall("{%s}attribute" % self.NS_GEXF):
attr_id = k.get("id")
title=k.get('title')
atype=k.get('type')
attrs[attr_id]={'title':title,'type':atype,'mode':mode}
# check for the "default" subelement of key element and add
default=k.find("{%s}default" % self.NS_GEXF)
if default is not None:
if atype=='boolean':
value=self.convert_bool[default.text]
else:
value=self.python_type[atype](default.text)
defaults[title]=value
return attrs,defaults
def relabel_gexf_graph(G):
"""Relabel graph using "label" node keyword for node label.
Parameters
----------
G : graph
A NetworkX graph read from GEXF data
Returns
-------
H : graph
A NetworkX graph with relabed nodes
Notes
-----
This function relabels the nodes in a NetworkX graph with the
"label" attribute. It also handles relabeling the specific GEXF
node attributes "parents", and "pid".
"""
# build mapping of node labels, do some error checking
try:
mapping=[(u,G.node[u]['label']) for u in G]
except KeyError:
raise nx.NetworkXError('Failed to relabel nodes: '
'missing node labels found. '
'Use relabel=False.')
x,y=zip(*mapping)
if len(set(y))!=len(G):
raise nx.NetworkXError('Failed to relabel nodes: '
'duplicate node labels found. '
'Use relabel=False.')
mapping=dict(mapping)
H=nx.relabel_nodes(G,mapping)
# relabel attributes
for n in G:
m=mapping[n]
H.node[m]['id']=n
if 'pid' in H.node[m]:
H.node[m]['pid']=mapping[G.node[n]['pid']]
if 'parents' in H.node[m]:
H.node[m]['parents']=[mapping[p] for p in G.node[n]['parents']]
return H
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import xml.etree.cElementTree
except:
raise SkipTest("xml.etree.cElementTree not available")
# fixture for nose tests
def teardown_module(module):
import os
try:
os.unlink('test.gexf')
except:
pass
| gpl-2.0 | 3,307,386,771,986,776,000 | 34.705044 | 79 | 0.522121 | false |
odoo-argentina/account | l10n_ar_account/models/account_invoice.py | 1 | 12294 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api, _
from openerp.exceptions import UserError
import re
import logging
_logger = logging.getLogger(__name__)
class AccountInvoice(models.Model):
_inherit = "account.invoice"
currency_rate = fields.Float(
string='Currency Rate',
copy=False,
digits=(16, 4),
# TODO make it editable, we have to change move create method
readonly=True,
)
document_letter_id = fields.Many2one(
related='document_type_id.document_letter_id',
)
afip_responsible_type_id = fields.Many2one(
'afip.responsible.type',
string='AFIP Responsible Type',
readonly=True,
copy=False,
)
invoice_number = fields.Integer(
compute='_get_invoice_number',
string="Invoice Number",
)
point_of_sale_number = fields.Integer(
compute='_get_invoice_number',
string="Point Of Sale",
)
vat_base_amount = fields.Monetary(
compute="_get_argentina_amounts",
string='VAT Base Amount'
)
vat_exempt_base_amount = fields.Monetary(
compute="_get_argentina_amounts",
string='VAT Exempt Base Amount'
)
# TODO borrar, no los necesitariamos mas porque modificamos compute all
# para que cree estos impuestos
# base iva cero (tenemos que agregarlo porque odoo no crea las lineas para
# impuestos con valor cero)
# vat_zero_base_amount = fields.Monetary(
# compute="_get_argentina_amounts",
# string='VAT Zero Base Amount'
# )
# no gravado en iva (tenemos que agregarlo porque odoo no crea las lineas
# para impuestos con valor cero)
vat_untaxed_base_amount = fields.Monetary(
compute="_get_argentina_amounts",
string='VAT Untaxed Base Amount'
)
vat_amount = fields.Monetary(
compute="_get_argentina_amounts",
string='VAT Amount'
)
other_taxes_amount = fields.Monetary(
compute="_get_argentina_amounts",
string='Other Taxes Amount'
)
vat_tax_ids = fields.One2many(
compute="_get_argentina_amounts",
comodel_name='account.invoice.tax',
string='VAT Taxes'
)
not_vat_tax_ids = fields.One2many(
compute="_get_argentina_amounts",
comodel_name='account.invoice.tax',
string='Not VAT Taxes'
)
afip_incoterm_id = fields.Many2one(
'afip.incoterm',
'Incoterm',
readonly=True,
states={'draft': [('readonly', False)]}
)
point_of_sale_type = fields.Selection(
related='journal_id.point_of_sale_type',
readonly=True,
)
# estos campos los agregamos en este modulo pero en realidad los usa FE
# pero entendemos que podrian ser necesarios para otros tipos, por ahora
# solo lo vamos a hacer requerido si el punto de venta es del tipo
# electronico
afip_concept = fields.Selection(
compute='_get_concept',
# store=True,
selection=[('1', 'Producto / Exportación definitiva de bienes'),
('2', 'Servicios'),
('3', 'Productos y Servicios'),
('4', '4-Otros (exportación)'),
],
string="AFIP concept",
)
afip_service_start = fields.Date(
string='Service Start Date'
)
afip_service_end = fields.Date(
string='Service End Date'
)
@api.one
def _get_argentina_amounts(self):
"""
"""
# vat values
# we exclude exempt vats and untaxed (no gravados)
wihtout_tax_id = self.tax_line_ids.filtered(lambda r: not r.tax_id)
if wihtout_tax_id:
raise UserError(_(
"Some Invoice Tax Lines don't have a tax_id asociated, please "
"correct them or try to refresh invoice "))
vat_taxes = self.tax_line_ids.filtered(
lambda r: (
r.tax_id.tax_group_id.type == 'tax' and
r.tax_id.tax_group_id.tax == 'vat' and
r.tax_id.tax_group_id.afip_code not in [1, 2]))
vat_amount = sum(vat_taxes.mapped('amount'))
self.vat_tax_ids = vat_taxes
self.vat_amount = vat_amount
self.vat_base_amount = sum(vat_taxes.mapped('base_amount'))
# vat exempt values
# exempt taxes are the ones with code 2
vat_exempt_taxes = self.tax_line_ids.filtered(
lambda r: (
r.tax_id.tax_group_id.type == 'tax' and
r.tax_id.tax_group_id.tax == 'vat' and
r.tax_id.tax_group_id.afip_code == 2))
self.vat_exempt_base_amount = sum(
vat_exempt_taxes.mapped('base_amount'))
# vat_untaxed_base_amount values (no gravado)
# vat exempt taxes are the ones with code 1
vat_untaxed_taxes = self.tax_line_ids.filtered(
lambda r: (
r.tax_id.tax_group_id.type == 'tax' and
r.tax_id.tax_group_id.tax == 'vat' and
r.tax_id.tax_group_id.afip_code == 1))
self.vat_untaxed_base_amount = sum(
vat_untaxed_taxes.mapped('base_amount'))
# other taxes values
not_vat_taxes = self.tax_line_ids - vat_taxes
other_taxes_amount = sum(not_vat_taxes.mapped('amount'))
self.not_vat_tax_ids = not_vat_taxes
self.other_taxes_amount = other_taxes_amount
@api.one
@api.depends('document_number', 'number')
def _get_invoice_number(self):
""" Funcion que calcula numero de punto de venta y numero de factura
a partir del document number. Es utilizado principalmente por el modulo
de vat ledger citi
"""
# TODO mejorar estp y almacenar punto de venta y numero de factura por
# separado, de hecho con esto hacer mas facil la carga de los
# comprobantes de compra
str_number = self.document_number or self.number or False
if str_number and self.state not in [
'draft', 'proforma', 'proforma2', 'cancel']:
if self.document_type_id.code in [33, 99, 331, 332]:
point_of_sale = '0'
# leave only numbers and convert to integer
invoice_number = str_number
# despachos de importacion
elif self.document_type_id.code == 66:
point_of_sale = '0'
invoice_number = '0'
elif "-" in str_number:
splited_number = str_number.split('-')
invoice_number = splited_number.pop()
point_of_sale = splited_number.pop()
elif "-" not in str_number and len(str_number) == 12:
point_of_sale = str_number[:4]
invoice_number = str_number[-8:]
else:
raise UserError(_(
'Could not get invoice number and point of sale for '
'invoice id %i') % (self.id))
self.invoice_number = int(
re.sub("[^0-9]", "", invoice_number))
self.point_of_sale_number = int(
re.sub("[^0-9]", "", point_of_sale))
@api.one
@api.depends(
'invoice_line_ids',
'invoice_line_ids.product_id',
'invoice_line_ids.product_id.type',
'localization',
)
def _get_concept(self):
afip_concept = False
if self.point_of_sale_type in ['online', 'electronic']:
# exportaciones
invoice_lines = self.invoice_line_ids
product_types = set(
[x.product_id.type for x in invoice_lines if x.product_id])
consumible = set(['consu', 'product'])
service = set(['service'])
mixed = set(['consu', 'service', 'product'])
# default value "product"
afip_concept = '1'
if product_types.issubset(mixed):
afip_concept = '3'
if product_types.issubset(service):
afip_concept = '2'
if product_types.issubset(consumible):
afip_concept = '1'
if self.document_type_id.code in [19, 20, 21]:
# TODO verificar esto, como par expo no existe 3 y existe 4
# (otros), considermaos que un mixto seria el otros
if afip_concept == '3':
afip_concept = '4'
self.afip_concept = afip_concept
@api.multi
def get_localization_invoice_vals(self):
self.ensure_one()
if self.localization == 'argentina':
commercial_partner = self.partner_id.commercial_partner_id
currency_rate = self.currency_id.compute(
1., self.company_id.currency_id)
return {
'afip_responsible_type_id': (
commercial_partner.afip_responsible_type_id.id),
'currency_rate': currency_rate,
}
else:
return super(
AccountInvoice, self).get_localization_invoice_vals()
@api.multi
def _get_available_journal_document_types(self):
"""
This function search for available document types regarding:
* Journal
* Partner
* Company
* Documents configuration
If needed, we can make this funcion inheritable and customizable per
localization
"""
self.ensure_one()
if self.localization != 'argentina':
return super(
AccountInvoice, self)._get_available_journal_document_types()
invoice_type = self.type
journal_document_types = journal_document_type = self.env[
'account.journal.document.type']
if invoice_type in [
'out_invoice', 'in_invoice', 'out_refund', 'in_refund']:
if self.use_documents:
letters = self.journal_id.get_journal_letter(
counterpart_partner=self.commercial_partner_id)
domain = [
('journal_id', '=', self.journal_id.id),
'|',
('document_type_id.document_letter_id', 'in', letters.ids),
('document_type_id.document_letter_id', '=', False),
]
# If internal_type in context we try to serch specific document
# for eg used on debit notes
internal_type = self._context.get('internal_type', False)
if internal_type:
journal_document_type = journal_document_type.search(
domain + [
('document_type_id.internal_type',
'=', internal_type)], limit=1)
# For domain, we search all documents
journal_document_types = journal_document_types.search(domain)
# If not specific document type found, we choose another one
if not journal_document_type and journal_document_types:
journal_document_type = journal_document_types[0]
if invoice_type == 'in_invoice':
other_document_types = (
self.commercial_partner_id.other_document_type_ids)
domain = [
('journal_id', '=', self.journal_id.id),
('document_type_id',
'in', other_document_types.ids),
]
other_journal_document_types = self.env[
'account.journal.document.type'].search(domain)
journal_document_types += other_journal_document_types
# if we have some document sepecific for the partner, we choose it
if other_journal_document_types:
journal_document_type = other_journal_document_types[0]
return {
'available_journal_document_types': journal_document_types,
'journal_document_type': journal_document_type,
}
| agpl-3.0 | 2,440,302,413,177,547,000 | 37.898734 | 79 | 0.549219 | false |
Azure/azure-sdk-for-python | sdk/servicebus/azure-servicebus/samples/async_samples/schedule_messages_and_cancellation_async.py | 1 | 2329 | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
Example to show scheduling messages to and cancelling messages from a Service Bus Queue asynchronously.
"""
# pylint: disable=C0111
import os
import asyncio
import datetime
from azure.servicebus.aio import ServiceBusClient
from azure.servicebus import ServiceBusMessage
CONNECTION_STR = os.environ['SERVICE_BUS_CONNECTION_STR']
QUEUE_NAME = os.environ["SERVICE_BUS_QUEUE_NAME"]
async def schedule_single_message(sender):
message = ServiceBusMessage("ServiceBusMessage to be scheduled")
scheduled_time_utc = datetime.datetime.utcnow() + datetime.timedelta(seconds=30)
sequence_number = await sender.schedule_messages(message, scheduled_time_utc)
return sequence_number
async def schedule_multiple_messages(sender):
messages_to_schedule = []
for _ in range(10):
messages_to_schedule.append(ServiceBusMessage("Message to be scheduled"))
scheduled_time_utc = datetime.datetime.utcnow() + datetime.timedelta(seconds=30)
sequence_numbers = await sender.schedule_messages(messages_to_schedule, scheduled_time_utc)
return sequence_numbers
async def main():
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR, logging_enable=True)
async with servicebus_client:
sender = servicebus_client.get_queue_sender(queue_name=QUEUE_NAME)
async with sender:
sequence_number = await schedule_single_message(sender)
print("Single message is scheduled and sequence number is {}".format(sequence_number))
sequence_numbers = await schedule_multiple_messages(sender)
print("Multiple messages are scheduled and sequence numbers are {}".format(sequence_numbers))
await sender.cancel_scheduled_messages(sequence_number)
await sender.cancel_scheduled_messages(sequence_numbers)
print("All scheduled messages are cancelled.")
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| mit | -1,515,303,370,977,743,400 | 40.589286 | 109 | 0.682267 | false |
aspilotros/YouTube_views_forecasting | CountingLines3-noplot.py | 1 | 3206 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 15 17:54:35 2017
@author: ale
"""
#sys.stdout.write(frmt_date)
# Counting the number of lines in a file each 5 min
import time
import datetime as dt
import matplotlib.pyplot as plt
import plotly as py
import plotly.graph_objs as go
import numpy as np
root1 = input('insert the complete path that hosts the output folder')
root2 = input('output folder name')
name = root1 + '/' + root2 + '/key.done'
name1 = root1 + '/' + root2 + 'key.disabled'
name2 = root1 + '/' + root2 + 'key.invalidrequest'
name3 = root1 + '/' + root2 + 'key.nostatyet'
name4 = root1 + '/' + root2 + 'key.notfound'
name5 = root1 + '/' + root2 + 'key.private'
name6 = root1 + '/' + root2 + 'key.quotalimit'
j=0
counts=[]
counts1=[]
counts2=[]
counts3=[]
counts4=[]
counts5=[]
counts6=[]
while True:
handle = open(name, 'r')
handle1 = open(name1, 'r')
handle2 = open(name2, 'r')
handle3 = open(name3, 'r')
handle4 = open(name4, 'r')
handle5 = open(name5, 'r')
handle6 = open(name6, 'r')
counts.append(0)
counts1.append(0)
counts2.append(0)
counts3.append(0)
counts4.append(0)
counts5.append(0)
counts6.append(0)
for line in handle:
counts[j]=counts[j]+1
for line1 in handle1:
counts1[j]=counts1[j]+1
for line2 in handle2:
counts2[j]=counts2[j]+1
for line3 in handle3:
counts3[j]=counts3[j]+1
for line4 in handle4:
counts4[j]=counts4[j]+1
for line5 in handle5:
counts5[j]=counts5[j]+1
for line6 in handle6:
counts6[j]=counts6[j]+1
total=counts[j]+counts1[j]+counts2[j]+counts3[j]+counts4[j]+counts5[j]+counts6[j]
epoch_now = time.time()
frmt_date = dt.datetime.utcfromtimestamp(epoch_now)
frmt_date=frmt_date+dt.timedelta(hours=2)
frmt_date = frmt_date.strftime("%Y/%m/%d %H:%M")
#plt.plot(epoch_now,counts, 'r--', counts1, 'b--', counts2, 'g--', counts3, 'rs', counts4, 'bs', counts5, 'gs', counts6, 'r^')
#plt.show()
# Create traces
print (
'line in file = ',counts[j],'time = ' ,frmt_date, ' out of total =', total,'/n',
'done ',counts[j],' disabled ',counts1[j],' invalidreq',counts2[j],' notstatyet ',counts3[j],' notfound ',counts4[j],' private ',counts5[j],' quotalimit ',counts6[j])
#plotting each 12 cycles i.e. each 12*300sec=each hour
'''
if j % 12 == 11:
trace0 = go.Scatter(
x = np.arange(j),
y = counts,
mode = 'markers',
name = 'done'
)
trace1 = go.Scatter(
x = np.arange(j),
y = counts1+counts2+counts3+counts4+counts5,
mode = 'markers',
name = 'key not available'
)
trace2 = go.Scatter(
x = np.arange(j),
y = counts6,
mode = 'lines',
name = 'quotalimit'
)
data = [trace0, trace1, trace2]
py.offline.plot({
"data": data,
"layout": go.Layout(title="Crawler Stats")
})
'''
j=j+1
time.sleep(300)
| mit | -4,667,322,212,081,878,000 | 22.925373 | 178 | 0.554273 | false |
rdireen/nearside | nearside/cylindrical/structures.py | 1 | 3017 | # Copyright (C) 2015 Randy Direen <[email protected]>
#
# This file is part of NearSide.
#
# NearSide is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NearSide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NearSide. If not, see <http://www.gnu.org/licenses/>
"""***************************************************************************
Holds all the structures for cylindrical measurements
Randy Direen
3/06/2015
A description
***************************************************************************"""
#--------------------------Place in each *.py file----------------------------
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from six.moves import range #use range instead of xrange
#-----------------------------------------------------------------------------
#---------------------------------------------------------------------Built-ins
import json
from os.path import dirname
#--------------------------------------------------------------------3rd Party
import numpy as np
import spherepy as sp
#------------------------------------------------------------------------Custom
import nearside.probe as pb
#==============================================================================
# Global Declarations
#==============================================================================
err_msg = {}
#=============================================================================
# Objects
#=============================================================================
#-=-=-=-=-=-=-=-=-=-=-= COEFFICIENT REPRESENTATIONS =-=-=-=-=-=-=-=-=-=-=-=-=-
# The coefficients represent the device or environment that has been measured.
# These coefficients can be transformed back to field values.
class CylindricalScalarCoeffs(object):
pass
class CylindricalVectorCoeffs(object):
pass
#-=-=-=-=-=-=-=-=-=-=-= MEASURED ON UNIFORM GRID =-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# These objects use the algorithms that require data to be equally spaced in
# the theta direction and the phi direction.
class CylindricalMeasurementScalarUniform(object):
pass
class CylindricalMeasurementTransverseUniform(object):
pass
#-=-=-=-=-=-=-=-=-=-=-= MEASURED ON NON UNIFORM GRID =-=-=-=-=-=-=-=-=-=-=-=-=
# These objects use the algorithms that DO NOT require data to be equally
# spaced in the theta direction and the phi direction.
class CylindricalMeasurementScalarNonUniform(object):
pass
class CylindricalMeasurementTransverseNonUniform(object):
pass | gpl-3.0 | -3,671,326,859,326,461,000 | 33.295455 | 79 | 0.526019 | false |
dataversioncontrol/dvc | dvc/state.py | 1 | 16303 | """Manages state database used for checksum caching."""
from __future__ import unicode_literals
import os
import sqlite3
import dvc.logger as logger
from dvc.config import Config
from dvc.utils import file_md5, remove, current_timestamp
from dvc.exceptions import DvcException
from dvc.utils.fs import get_mtime_and_size, get_inode
class StateVersionTooNewError(DvcException):
"""Thrown when dvc version is older than the state database version."""
def __init__(self, dvc_version, expected, actual):
super(StateVersionTooNewError, self).__init__(
"you are using an old version '{dvc_version}' of dvc that is "
"using state file version '{expected}' which is not compatible "
"with the state file version '{actual}' that is used in this "
"repo. Please upgrade right now!".format(
dvc_version=dvc_version, expected=expected, actual=actual
)
)
def _file_metadata_changed(actual_mtime, mtime, actual_size, size):
return actual_mtime != mtime or actual_size != size
class State(object): # pylint: disable=too-many-instance-attributes
"""Class for the state database.
Args:
repo (dvc.repo.Repo): repo instance that this state belongs to.
config (configobj.ConfigObj): config for the state.
Raises:
StateVersionTooNewError: thrown when dvc version is older than the
state database version.
"""
VERSION = 3
STATE_FILE = "state"
STATE_TABLE = "state"
STATE_TABLE_LAYOUT = (
"inode INTEGER PRIMARY KEY, "
"mtime TEXT NOT NULL, "
"size TEXT NOT NULL, "
"md5 TEXT NOT NULL, "
"timestamp TEXT NOT NULL"
)
STATE_INFO_TABLE = "state_info"
STATE_INFO_TABLE_LAYOUT = "count INTEGER"
STATE_INFO_ROW = 1
LINK_STATE_TABLE = "link_state"
LINK_STATE_TABLE_LAYOUT = (
"path TEXT PRIMARY KEY, "
"inode INTEGER NOT NULL, "
"mtime TEXT NOT NULL"
)
STATE_ROW_LIMIT = 100000000
STATE_ROW_CLEANUP_QUOTA = 50
MAX_INT = 2 ** 63 - 1
MAX_UINT = 2 ** 64 - 2
def __init__(self, repo, config):
self.repo = repo
self.dvc_dir = repo.dvc_dir
self.root_dir = repo.root_dir
self.row_limit = 100
self.row_cleanup_quota = 50
state_config = config.get(Config.SECTION_STATE, {})
self.row_limit = state_config.get(
Config.SECTION_STATE_ROW_LIMIT, self.STATE_ROW_LIMIT
)
self.row_cleanup_quota = state_config.get(
Config.SECTION_STATE_ROW_CLEANUP_QUOTA,
self.STATE_ROW_CLEANUP_QUOTA,
)
if not self.dvc_dir:
self.state_file = None
return
self.state_file = os.path.join(self.dvc_dir, self.STATE_FILE)
# https://www.sqlite.org/tempfiles.html
self.temp_files = [
self.state_file + "-journal",
self.state_file + "-wal",
]
self.database = None
self.cursor = None
self.inserts = 0
def __enter__(self):
self.load()
def __exit__(self, typ, value, tbck):
self.dump()
def _collect(self, path):
if os.path.isdir(path):
return self.repo.cache.local.collect_dir_cache(path)
return (file_md5(path)[0], None)
def changed(self, path, md5):
"""Check if file/directory has the expected md5.
Args:
path (str): path to the file/directory to check.
md5 (str): expected md5.
Returns:
bool: True if path has the expected md5, False otherwise.
"""
actual = self.update(path)
msg = "File '{}', md5 '{}', actual '{}'"
logger.debug(msg.format(path, md5, actual))
if not md5 or not actual:
return True
return actual.split(".")[0] != md5.split(".")[0]
def _execute(self, cmd):
logger.debug(cmd)
return self.cursor.execute(cmd)
def _fetchall(self):
ret = self.cursor.fetchall()
logger.debug("fetched: {}".format(ret))
return ret
def _to_sqlite(self, num):
assert num >= 0
assert num < self.MAX_UINT
# NOTE: sqlite stores unit as signed ints, so maximum uint is 2^63-1
# see http://jakegoulding.com/blog/2011/02/06/sqlite-64-bit-integers/
if num > self.MAX_INT:
ret = -(num - self.MAX_INT)
else:
ret = num
assert self._from_sqlite(ret) == num
return ret
def _from_sqlite(self, num):
assert abs(num) <= self.MAX_INT
if num < 0:
return abs(num) + self.MAX_INT
assert num < self.MAX_UINT
assert num >= 0
return num
def _prepare_db(self, empty=False):
from dvc import VERSION
if not empty:
cmd = "PRAGMA user_version;"
self._execute(cmd)
ret = self._fetchall()
assert len(ret) == 1
assert len(ret[0]) == 1
assert isinstance(ret[0][0], int)
version = ret[0][0]
if version > self.VERSION:
raise StateVersionTooNewError(VERSION, self.VERSION, version)
elif version < self.VERSION:
msg = (
"State file version '{}' is too old. "
"Reformatting to the current version '{}'."
)
logger.warning(msg.format(version, self.VERSION))
cmd = "DROP TABLE IF EXISTS {};"
self._execute(cmd.format(self.STATE_TABLE))
self._execute(cmd.format(self.STATE_INFO_TABLE))
self._execute(cmd.format(self.LINK_STATE_TABLE))
# Check that the state file is indeed a database
cmd = "CREATE TABLE IF NOT EXISTS {} ({})"
self._execute(cmd.format(self.STATE_TABLE, self.STATE_TABLE_LAYOUT))
self._execute(
cmd.format(self.STATE_INFO_TABLE, self.STATE_INFO_TABLE_LAYOUT)
)
self._execute(
cmd.format(self.LINK_STATE_TABLE, self.LINK_STATE_TABLE_LAYOUT)
)
cmd = (
"INSERT OR IGNORE INTO {} (count) SELECT 0 "
"WHERE NOT EXISTS (SELECT * FROM {})"
)
self._execute(cmd.format(self.STATE_INFO_TABLE, self.STATE_INFO_TABLE))
cmd = "PRAGMA user_version = {};"
self._execute(cmd.format(self.VERSION))
def load(self):
"""Loads state database."""
retries = 1
while True:
assert self.database is None
assert self.cursor is None
assert self.inserts == 0
empty = not os.path.exists(self.state_file)
self.database = sqlite3.connect(self.state_file)
self.cursor = self.database.cursor()
# Try loading once to check that the file is indeed a database
# and reformat it if it is not.
try:
self._prepare_db(empty=empty)
return
except sqlite3.DatabaseError:
self.cursor.close()
self.database.close()
self.database = None
self.cursor = None
self.inserts = 0
if retries > 0:
os.unlink(self.state_file)
retries -= 1
else:
raise
def _vacuum(self):
# NOTE: see https://bugs.python.org/issue28518
self.database.isolation_level = None
self._execute("VACUUM")
self.database.isolation_level = ""
def dump(self):
"""Saves state database."""
assert self.database is not None
cmd = "SELECT count from {} WHERE rowid={}"
self._execute(cmd.format(self.STATE_INFO_TABLE, self.STATE_INFO_ROW))
ret = self._fetchall()
assert len(ret) == 1
assert len(ret[0]) == 1
count = self._from_sqlite(ret[0][0]) + self.inserts
if count > self.row_limit:
msg = "cleaning up state, this might take a while."
logger.warning(msg)
delete = count - self.row_limit
delete += int(self.row_limit * (self.row_cleanup_quota / 100.0))
cmd = (
"DELETE FROM {} WHERE timestamp IN ("
"SELECT timestamp FROM {} ORDER BY timestamp ASC LIMIT {});"
)
self._execute(
cmd.format(self.STATE_TABLE, self.STATE_TABLE, delete)
)
self._vacuum()
cmd = "SELECT COUNT(*) FROM {}"
self._execute(cmd.format(self.STATE_TABLE))
ret = self._fetchall()
assert len(ret) == 1
assert len(ret[0]) == 1
count = ret[0][0]
cmd = "UPDATE {} SET count = {} WHERE rowid = {}"
self._execute(
cmd.format(
self.STATE_INFO_TABLE,
self._to_sqlite(count),
self.STATE_INFO_ROW,
)
)
self._update_cache_directory_state()
self.database.commit()
self.cursor.close()
self.database.close()
self.database = None
self.cursor = None
self.inserts = 0
def _do_update(self, path, known_checksum=None):
"""
Make sure the stored info for the given path is up to date.
"""
if not os.path.exists(path):
return None, None
actual_mtime, actual_size = get_mtime_and_size(path)
actual_inode = get_inode(path)
existing_record = self.get_state_record_for_inode(actual_inode)
if existing_record:
md5, info = self._update_existing_state_record(
path,
actual_inode,
actual_mtime,
actual_size,
existing_record,
known_checksum,
)
else:
md5, info = self._insert_new_state_record(
path, actual_inode, actual_mtime, actual_size, known_checksum
)
return md5, info
def _update_existing_state_record(
self,
path,
actual_inode,
actual_mtime,
actual_size,
existing_record,
known_checksum=None,
):
mtime, size, md5, _ = existing_record
if _file_metadata_changed(actual_mtime, mtime, actual_size, size):
md5, info = self._update_state_for_path_changed(
path, actual_inode, actual_mtime, actual_size, known_checksum
)
else:
info = None
self._update_state_record_timestamp_for_inode(actual_inode)
return md5, info
def _update_state_record_timestamp_for_inode(self, actual_inode):
cmd = 'UPDATE {} SET timestamp = "{}" WHERE inode = {}'
self._execute(
cmd.format(
self.STATE_TABLE,
current_timestamp(),
self._to_sqlite(actual_inode),
)
)
def _update_state_for_path_changed(
self,
path,
actual_inode,
actual_mtime,
actual_size,
known_checksum=None,
):
if known_checksum:
md5, info = known_checksum, None
else:
md5, info = self._collect(path)
cmd = (
"UPDATE {} SET "
'mtime = "{}", size = "{}", '
'md5 = "{}", timestamp = "{}" '
"WHERE inode = {}"
)
self._execute(
cmd.format(
self.STATE_TABLE,
actual_mtime,
actual_size,
md5,
current_timestamp(),
self._to_sqlite(actual_inode),
)
)
return md5, info
def _insert_new_state_record(
self, path, actual_inode, actual_mtime, actual_size, known_checksum
):
if known_checksum:
md5, info = known_checksum, None
else:
md5, info = self._collect(path)
cmd = (
"INSERT INTO {}(inode, mtime, size, md5, timestamp) "
'VALUES ({}, "{}", "{}", "{}", "{}")'
)
self._execute(
cmd.format(
self.STATE_TABLE,
self._to_sqlite(actual_inode),
actual_mtime,
actual_size,
md5,
current_timestamp(),
)
)
self.inserts += 1
return md5, info
def get_state_record_for_inode(self, inode):
cmd = "SELECT mtime, size, md5, timestamp from {} " "WHERE inode={}"
cmd = cmd.format(self.STATE_TABLE, self._to_sqlite(inode))
self._execute(cmd)
results = self._fetchall()
if results:
# uniquness constrain on inode
assert len(results) == 1
return results[0]
return None
def update(self, path, known_checksum=None):
"""Gets the checksum for the specified path. Checksum will be
retrieved from the state database if available, otherwise it will be
computed and cached in the state database for the further use.
Args:
path (str): path to get the checksum for.
Returns:
str: checksum for the specified path.
"""
return self._do_update(path, known_checksum)[0]
def update_info(self, path):
"""Gets the checksum and the directory info (if applicable) for the
specified path.
Args:
path (str): path to get the checksum and the directory info for.
Returns:
tuple: checksum for the specified path along with a directory info
(list of {relative_path: checksum} entries for each file in the
directory) if applicable, otherwise None.
"""
md5, info = self._do_update(path)
if not info:
info = self.repo.cache.local.load_dir_cache(md5)
return (md5, info)
def update_link(self, path):
"""Adds the specified path to the list of links created by dvc. This
list is later used on `dvc checkout` to cleanup old links.
Args:
path (str): path to add to the list of links.
"""
if not os.path.exists(path):
return
mtime, _ = get_mtime_and_size(path)
inode = get_inode(path)
relpath = os.path.relpath(path, self.root_dir)
cmd = (
"REPLACE INTO {}(path, inode, mtime) "
'VALUES ("{}", {}, "{}")'.format(
self.LINK_STATE_TABLE, relpath, self._to_sqlite(inode), mtime
)
)
self._execute(cmd)
def remove_unused_links(self, used):
"""Removes all saved links except the ones that are used.
Args:
used (list): list of used links that should not be removed.
"""
unused = []
self._execute("SELECT * FROM {}".format(self.LINK_STATE_TABLE))
for row in self.cursor:
relpath, inode, mtime = row
inode = self._from_sqlite(inode)
path = os.path.join(self.root_dir, relpath)
if path in used:
continue
if not os.path.exists(path):
continue
actual_inode = get_inode(path)
actual_mtime, _ = get_mtime_and_size(path)
if inode == actual_inode and mtime == actual_mtime:
logger.debug("Removing '{}' as unused link.".format(path))
remove(path)
unused.append(relpath)
for relpath in unused:
cmd = 'DELETE FROM {} WHERE path = "{}"'
self._execute(cmd.format(self.LINK_STATE_TABLE, relpath))
def _update_cache_directory_state(self):
cache_path = self.repo.cache.local.cache_dir
mtime, size = get_mtime_and_size(cache_path)
inode = get_inode(cache_path)
cmd = (
"INSERT OR REPLACE INTO {}(inode, size, mtime, timestamp, md5) "
'VALUES ({}, "{}", "{}", "{}", "")'.format(
self.STATE_TABLE,
self._to_sqlite(inode),
size,
mtime,
current_timestamp(),
)
)
self._execute(cmd)
| apache-2.0 | -3,852,605,697,348,844,000 | 30.412331 | 79 | 0.532601 | false |
sony/nnabla | python/src/nnabla/backward_function/image_augmentation.py | 1 | 1336 | # Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla.functions as F
def image_augmentation_backward(inputs, shape=None, pad=(0, 0), min_scale=1.0, max_scale=1.0, angle=0.0, aspect_ratio=1.0, distortion=0.0, flip_lr=False, flip_ud=False, brightness=0.0, brightness_each=False, contrast=1.0, contrast_center=0.0, contrast_each=False, noise=0.0, seed=-1):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
dy = inputs[0]
x0 = inputs[1]
raise NotImplementedError(
"image_augmentation_backward is not implemented.")
| apache-2.0 | 1,924,461,627,481,144,800 | 42.096774 | 284 | 0.729042 | false |
RowsberryPi/rowsberrypi | pyrow/statshow.py | 1 | 2273 | #!/usr/bin/env python
#Copyright (c) 2011, Sam Gambrell
#Licensed under the Simplified BSD License.
#This is an example file to show how to make use of pyrow
#Have the rowing machine on and plugged into the computer before starting the program
#The program will display any changes to the machine status, stroke state, or workout state
#NOTE: This code has not been thoroughly tested and may not function as advertised.
#Please report and findings to the author so that they may be addressed in a stable release.
from . import pyrow, find
import time
import logging
if __name__ == '__main__':
#Connecting to erg
ergs = list(find())
if len(ergs) == 0:
exit("No ergs found.")
erg = pyrow(ergs[0])
logging.info("Connected to erg.")
#Create a dictionary of the different status states
state = ['Error', 'Ready', 'Idle', 'Have ID', 'N/A', 'In Use',
'Pause', 'Finished', 'Manual', 'Offline']
stroke = ['Wait for min speed', 'Wait for acceleration', 'Drive', 'Dwelling', 'Recovery']
workout = ['Waiting begin', 'Workout row', 'Countdown pause', 'Interval rest',
'Work time inverval', 'Work distance interval', 'Rest end time', 'Rest end distance',
'Time end rest', 'Distance end rest', 'Workout end', 'Workout terminate',
'Workout logged', 'Workout rearm']
command = ['CSAFE_GETSTATUS_CMD', 'CSAFE_PM_GET_STROKESTATE', 'CSAFE_PM_GET_WORKOUTSTATE']
#prime status number
cstate = -1
cstroke = -1
cworkout = -1
erg.set_workout(distance=2000, split=100, pace=120)
#Inf loop
while 1:
results = erg.send(command)
if cstate != (results['CSAFE_GETSTATUS_CMD'][0] & 0xF):
cstate = results['CSAFE_GETSTATUS_CMD'][0] & 0xF
logging.debug("State %s: %s", str(cstate), state[cstate])
if cstroke != results['CSAFE_PM_GET_STROKESTATE'][0]:
cstroke = results['CSAFE_PM_GET_STROKESTATE'][0]
logging.debug("Stroke %s: %s", str(cstroke), stroke[cstroke])
if cworkout != results['CSAFE_PM_GET_WORKOUTSTATE'][0]:
cworkout = results['CSAFE_PM_GET_WORKOUTSTATE'][0]
logging.debug("Workout %s: %s", str(cworkout), workout[cworkout])
time.sleep(1)
| bsd-2-clause | -7,115,628,451,162,838,000 | 38.877193 | 100 | 0.636604 | false |
tensorflow/datasets | tensorflow_datasets/text/lm1b.py | 1 | 3711 | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Language Model 1 Billion dataset."""
import os
from absl import logging
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """\
@article{DBLP:journals/corr/ChelbaMSGBK13,
author = {Ciprian Chelba and
Tomas Mikolov and
Mike Schuster and
Qi Ge and
Thorsten Brants and
Phillipp Koehn},
title = {One Billion Word Benchmark for Measuring Progress in Statistical Language
Modeling},
journal = {CoRR},
volume = {abs/1312.3005},
year = {2013},
url = {http://arxiv.org/abs/1312.3005},
archivePrefix = {arXiv},
eprint = {1312.3005},
timestamp = {Mon, 13 Aug 2018 16:46:16 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/ChelbaMSGBK13},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
_DESCRIPTION = """\
A benchmark corpus to be used for measuring progress in statistical language \
modeling. This has almost one billion words in the training data.
"""
_DOWNLOAD_URL = ("http://www.statmt.org/lm-benchmark/"
"1-billion-word-language-modeling-benchmark-r13output.tar.gz")
_TOP_LEVEL_DIR = "1-billion-word-language-modeling-benchmark-r13output"
_TRAIN_FILE_FORMAT = os.path.join(_TOP_LEVEL_DIR,
"training-monolingual.tokenized.shuffled",
"news.en-*")
_HELDOUT_FILE_FORMAT = os.path.join(_TOP_LEVEL_DIR,
"heldout-monolingual.tokenized.shuffled",
"news.en.heldout-*")
def _train_data_filenames(tmp_dir):
return tf.io.gfile.glob(os.path.join(tmp_dir, _TRAIN_FILE_FORMAT))
def _test_data_filenames(tmp_dir):
return tf.io.gfile.glob(os.path.join(tmp_dir, _HELDOUT_FILE_FORMAT))
class Lm1b(tfds.core.GeneratorBasedBuilder):
"""1 Billion Word Language Model Benchmark dataset."""
VERSION = tfds.core.Version("1.1.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"text": tfds.features.Text(),
}),
supervised_keys=("text", "text"),
homepage="http://www.statmt.org/lm-benchmark/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
lm1b_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
train_files = _train_data_filenames(lm1b_path)
test_files = _test_data_filenames(lm1b_path)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN, gen_kwargs={"files": train_files}),
tfds.core.SplitGenerator(
name=tfds.Split.TEST, gen_kwargs={"files": test_files}),
]
def _generate_examples(self, files):
for filepath in files:
logging.info("generating examples from = %s", filepath)
with tf.io.gfile.GFile(filepath) as f:
for idx, line in enumerate(f):
yield "%s_%d" % (os.path.basename(filepath), idx), {
"text": line.strip(),
}
| apache-2.0 | 4,412,130,310,659,180,000 | 34.009434 | 88 | 0.642684 | false |
sadig/DC2 | components/dc2-admincenter/dc2/admincenter/lib/auth/kerberos.py | 1 | 2306 | # -*- coding: utf-8 -*-
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import sys
import os
try:
import web
except ImportError as e:
print(e)
print('you did not install web.py')
print(e)
sys.exit(1)
try:
import krbV
except ImportError as e:
print(e)
print('you don\'t have python-krbV installed')
print(e)
sys.exit(1)
try:
from dc2.lib.auth.kerberos.authentication import run
from dc2.lib.auth.kerberos.authentication import krb5_format_principal_name
from dc2.lib.auth.kerberos.authentication import get_ccache_name
except ImportError as e:
print(e)
print("You didn't install dc2.lib")
print(e)
sys.exit(1)
from exceptions import KerberosAuthError
ENCODING = 'UTF-8'
def do_kinit(username=None, password=None):
if username is None or password is None:
raise ValueError('Username and Password can\'t be None')
if username == '' or password == '':
raise ValueError('Username and Password can\'t be empty strings')
realm = krbV.default_context().default_realm.decode(ENCODING)
principal = krb5_format_principal_name(username, realm)
ccache_name = get_ccache_name()
(stdout, stderr, returncode) = run(
['/usr/bin/kinit', principal],
env={'KRB5CCNAME': ccache_name},
stdin=password, raiseonerr=False)
os.environ['KRB5CCNAME'] = ccache_name
web.ctx.session.krb5ccname = ccache_name
if returncode != 0:
raise KerberosAuthError(principal=principal, message=unicode(stderr))
| gpl-2.0 | -8,005,599,435,126,051,000 | 32.405797 | 79 | 0.708894 | false |
tdhooper/starstoloves | starstoloves/lib/search/tests/test_result.py | 1 | 2284 | import pytest
from starstoloves.lib.track.lastfm_track import LastfmTrack
from ..result import LastfmResultParser
pytestmark = pytest.mark.django_db
@pytest.fixture()
def result_parser(request):
return LastfmResultParser()
class TestResultParser():
many_results = {
'trackmatches': {
'track': [
{
'name': 'trackA',
'artist': 'artistA',
'url': 'urlA',
'listeners': '222',
},{
'name': 'trackB',
'artist': 'artistB',
'url': 'urlB',
'listeners': '888',
},
]
}
}
single_result = {
'trackmatches': {
'track': {
'name': 'trackA',
'artist': 'artistA',
'url': 'urlA',
'listeners': '222',
}
}
}
no_results = {
'trackmatches': "\n"
}
def test_parse_returns_lastfm_tracks(self, result_parser):
tracks = result_parser.parse(self.single_result)
assert isinstance(tracks[0], LastfmTrack)
def test_parse_extracts_track_details(self, result_parser):
tracks = result_parser.parse(self.many_results)
assert [track.track_name for track in tracks] == ['trackA', 'trackB']
assert [track.artist_name for track in tracks] == ['artistA', 'artistB']
assert [track.url for track in tracks] == ['urlA', 'urlB']
assert [track.listeners for track in tracks] == [222, 888]
def test_parse_extracts_track_details_when_there_is_only_one(self, result_parser):
tracks = result_parser.parse(self.single_result)
assert [track.track_name for track in tracks] == ['trackA']
assert [track.artist_name for track in tracks] == ['artistA']
assert [track.url for track in tracks] == ['urlA']
assert [track.listeners for track in tracks] == [222]
def test_parse_returns_none_when_there_are_no_tracks(self, result_parser):
assert result_parser.parse(self.no_results) is None
def test_parse_returns_none_when_given_an_error(self, result_parser):
assert result_parser.parse(TypeError) is None
| gpl-2.0 | 778,271,204,807,938,200 | 28.662338 | 86 | 0.549037 | false |
AragurDEV/yowsup | yowsup/layers/protocol_profiles/protocolentities/iq_picture_get_result.py | 1 | 1785 | from .iq_picture import PictureIqProtocolEntity
from yowsup.structs import ProtocolTreeNode
class ResultGetPictureIqProtocolEntity(PictureIqProtocolEntity):
'''
<iq type="result" from="{{jid}}" id="{{id}}">
<picture type="image | preview" id="{{another_id}}">
{{Binary bytes of the picture.}}
</picture>
</iq>
'''
def __init__(self, jid, pictureData, pictureId, preview = True, _id = None):
super(ResultGetPictureIqProtocolEntity, self).__init__(jid, _id, "result")
self.setResultPictureProps(pictureData, pictureId, preview)
def setResultPictureProps(self, pictureData, pictureId, preview = True):
self.preview = preview
self.pictureData = pictureData
self.pictureId = pictureId
def isPreview(self):
return self.preview
def getPictureData(self):
return self.pictureData.encode('latin-1')
def getPictureId(self):
return self.pictureId
def writeToFile(self, path):
with open(path, "wb") as outFile:
outFile.write(self.getPictureData())
def toProtocolTreeNode(self):
node = super(ResultGetPictureIqProtocolEntity, self).toProtocolTreeNode()
pictureNode = ProtocolTreeNode({"type": "preview" if self.isPreview() else "image" }, data = self.getPictureData())
node.addChild(pictureNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = PictureIqProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = ResultGetPictureIqProtocolEntity
pictureNode = node.getChild("picture")
entity.setResultPictureProps(pictureNode.getData(), pictureNode.getAttributeValue("id"), pictureNode.getAttributeValue("type") == "preview")
return entity
| gpl-3.0 | -5,251,179,448,151,242,000 | 38.666667 | 148 | 0.677871 | false |
Instanssi/Instanssi.org | Instanssi/kompomaatti/admin.py | 1 | 1416 | # -*- coding: utf-8 -*-
from django.contrib import admin
from imagekit.admin import AdminThumbnail
from Instanssi.kompomaatti.models import Compo, Entry, Event, Vote, VoteCodeRequest, Profile, Competition, \
CompetitionParticipation, TicketVoteCode, VoteGroup, EntryCollection
class TicketVoteCodeAdmin(admin.ModelAdmin):
list_display = [
'associated_to',
'event',
'ticket',
'time',
]
class VoteCodeRequestAdmin(admin.ModelAdmin):
list_display = [
'user',
'event',
'status',
'text',
]
class EntryAdmin(admin.ModelAdmin):
list_display = [
'name',
'compo',
'user',
'creator',
'entryfile',
'disqualified',
'admin_thumbnail',
]
admin_thumbnail = AdminThumbnail(image_field='imagefile_thumbnail')
class EntryCollectionAdmin(admin.ModelAdmin):
list_display = [
'compo',
'file',
'updated_at',
]
admin.site.register(Compo)
admin.site.register(Entry, EntryAdmin)
admin.site.register(Event)
admin.site.register(Vote)
admin.site.register(VoteGroup)
admin.site.register(TicketVoteCode, TicketVoteCodeAdmin)
admin.site.register(VoteCodeRequest, VoteCodeRequestAdmin)
admin.site.register(EntryCollection, EntryCollectionAdmin)
admin.site.register(Profile)
admin.site.register(Competition)
admin.site.register(CompetitionParticipation)
| mit | -8,645,906,533,803,368,000 | 23.413793 | 108 | 0.680085 | false |
Workday/OpenFrame | tools/perf/benchmarks/start_with_url2.py | 1 | 1913 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from benchmarks import startup2
import page_sets
from telemetry import benchmark
# TODO(gabadie): Replaces start_with_url.* by start_with_url2.* after confirming
# that both benchmarks produce the same results.
# Disable accessing protected member for startup2._StartupPerfBenchmark. It
# needs to be protected to not be listed in the list of benchmarks to run, even
# though its purpose is only to factorise common code between startup
# benchmarks.
# pylint: disable=protected-access
@benchmark.Enabled('has tabs')
@benchmark.Enabled('android')
@benchmark.Disabled('chromeos', 'linux', 'mac', 'win')
class StartWithUrlColdTBM(startup2._StartupPerfBenchmark):
"""Measures time to start Chrome cold with startup URLs."""
page_set = page_sets.StartupPagesPageSetTBM
options = {'pageset_repeat': 5}
def SetExtraBrowserOptions(self, options):
options.clear_sytem_cache_for_browser_and_profile_on_start = True
super(StartWithUrlColdTBM, self).SetExtraBrowserOptions(options)
@classmethod
def Name(cls):
return 'start_with_url2.cold.startup_pages'
@benchmark.Enabled('has tabs')
@benchmark.Enabled('android')
@benchmark.Disabled('chromeos', 'linux', 'mac', 'win')
class StartWithUrlWarmTBM(startup2._StartupPerfBenchmark):
"""Measures stimetime to start Chrome warm with startup URLs."""
page_set = page_sets.StartupPagesPageSetTBM
options = {'pageset_repeat': 11}
@classmethod
def Name(cls):
return 'start_with_url2.warm.startup_pages'
@classmethod
def ValueCanBeAddedPredicate(cls, value, is_first_result):
del value # unused
# Ignores first results because the first invocation is actualy cold since
# we are loading the profile for the first time.
return not is_first_result
| bsd-3-clause | 5,598,138,598,909,910,000 | 33.160714 | 80 | 0.755358 | false |
dr4ke616/LazyTorrent | twisted/plugins/torrent_plugin.py | 1 | 1555 | from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from twisted.application import internet
from twisted.application.service import IServiceMaker
from mamba.utils import config
from mamba.enterprise import database
from mamba.core.session import Session
from mamba.core.services.threadpool import ThreadPoolService
from torrent import MambaApplicationFactory
settings = config.Application('config/application.json')
class Options(usage.Options):
optParameters = [
['port', 'p', settings.port, 'The port number to listen on']
]
class MambaServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = settings.name.lower()
description = settings.description
options = Options
def makeService(self, options):
"""Construct a TCPServer from a factory defined in torrent
"""
factory, application = MambaApplicationFactory(settings)
factory.sessionFactory = Session
httpserver = internet.TCPServer(int(options['port']), factory)
httpserver.setName('{} Application'.format(settings.name))
application.addService(httpserver)
thread_pool = ThreadPoolService(database.Database.pool)
application.addService(thread_pool)
return application
# Now construct an object which *provides* the relevant interfaces
# The name of this variable is irrelevant, as long as there is *some*
# name bound to a provider of IPlugin and IServiceMaker
mamba_service_maker = MambaServiceMaker()
| gpl-3.0 | 8,606,297,883,575,277,000 | 29.490196 | 70 | 0.748553 | false |
GiulianoFranchetto/zephyr | doc/extensions/zephyr/application.py | 1 | 15282 | # Copyright (c) 2017 Open Source Foundries Limited.
#
# SPDX-License-Identifier: Apache-2.0
'''Sphinx extensions related to managing Zephyr applications.'''
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
# TODO: extend and modify this for Windows.
#
# This could be as simple as generating a couple of sets of instructions, one
# for Unix environments, and another for Windows.
class ZephyrAppCommandsDirective(Directive):
r'''
This is a Zephyr directive for generating consistent documentation
of the shell commands needed to manage (build, flash, etc.) an application.
For example, to generate commands to build samples/hello_world for
qemu_x86 use::
.. zephyr-app-commands::
:zephyr-app: samples/hello_world
:board: qemu_x86
:goals: build
Directive options:
\:tool:
which tool to use. Valid options are currently 'cmake', 'west' and 'all'.
The default is 'all'.
\:app:
path to the application to build.
\:zephyr-app:
path to the application to build, this is an app present in the upstream
zephyr repository. Mutually exclusive with \:app:.
\:cd-into:
if set, build instructions are given from within the \:app: folder,
instead of outside of it.
\:generator:
which build system to generate. Valid options are
currently 'ninja' and 'make'. The default is 'ninja'. This option
is not case sensitive.
\:host-os:
which host OS the instructions are for. Valid options are
'unix', 'win' and 'all'. The default is 'all'.
\:board:
if set, the application build will target the given board.
\:shield:
if set, the application build will target the given shield.
\:conf:
if set, the application build will use the given configuration
file. If multiple conf files are provided, enclose the
space-separated list of files with quotes, e.g., "a.conf b.conf".
\:gen-args:
if set, additional arguments to the CMake invocation
\:build-args:
if set, additional arguments to the build invocation
\:build-dir:
if set, the application build directory will *APPEND* this
(relative, Unix-separated) path to the standard build directory. This is
mostly useful for distinguishing builds for one application within a
single page.
\:goals:
a whitespace-separated list of what to do with the app (in
'build', 'flash', 'debug', 'debugserver', 'run'). Commands to accomplish
these tasks will be generated in the right order.
\:maybe-skip-config:
if set, this indicates the reader may have already
created a build directory and changed there, and will tweak the text to
note that doing so again is not necessary.
\:compact:
if set, the generated output is a single code block with no
additional comment lines
'''
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'tool': directives.unchanged,
'app': directives.unchanged,
'zephyr-app': directives.unchanged,
'cd-into': directives.flag,
'generator': directives.unchanged,
'host-os': directives.unchanged,
'board': directives.unchanged,
'shield': directives.unchanged,
'conf': directives.unchanged,
'gen-args': directives.unchanged,
'build-args': directives.unchanged,
'build-dir': directives.unchanged,
'goals': directives.unchanged_required,
'maybe-skip-config': directives.flag,
'compact': directives.flag
}
TOOLS = ['cmake', 'west', 'all']
GENERATORS = ['make', 'ninja']
HOST_OS = ['unix', 'win', 'all']
IN_TREE_STR = '# From the root of the zephyr repository'
def run(self):
# Re-run on the current document if this directive's source changes.
self.state.document.settings.env.note_dependency(__file__)
# Parse directive options. Don't use os.path.sep or os.path.join here!
# That would break if building the docs on Windows.
tool = self.options.get('tool', 'all').lower()
app = self.options.get('app', None)
zephyr_app = self.options.get('zephyr-app', None)
cd_into = 'cd-into' in self.options
generator = self.options.get('generator', 'ninja').lower()
host_os = self.options.get('host-os', 'all').lower()
board = self.options.get('board', None)
shield = self.options.get('shield', None)
conf = self.options.get('conf', None)
gen_args = self.options.get('gen-args', None)
build_args = self.options.get('build-args', None)
build_dir_append = self.options.get('build-dir', '').strip('/')
goals = self.options.get('goals').split()
skip_config = 'maybe-skip-config' in self.options
compact = 'compact' in self.options
if tool not in self.TOOLS:
raise self.error('Unknown tool {}; choose from: {}'.format(
tool, self.TOOLS))
if app and zephyr_app:
raise self.error('Both app and zephyr-app options were given.')
if generator not in self.GENERATORS:
raise self.error('Unknown generator {}; choose from: {}'.format(
generator, self.GENERATORS))
if host_os not in self.HOST_OS:
raise self.error('Unknown host-os {}; choose from: {}'.format(
host_os, self.HOST_OS))
if compact and skip_config:
raise self.error('Both compact and maybe-skip-config options were given.')
app = app or zephyr_app
in_tree = self.IN_TREE_STR if zephyr_app else None
# Allow build directories which are nested.
build_dir = ('build' + '/' + build_dir_append).rstrip('/')
num_slashes = build_dir.count('/')
# Create host_os array
host_os = [host_os] if host_os != "all" else [v for v in self.HOST_OS
if v != 'all']
# Create tools array
tools = [tool] if tool != "all" else [v for v in self.TOOLS
if v != 'all']
# Build the command content as a list, then convert to string.
content = []
tool_comment = None
if len(tools) > 1:
tool_comment = 'Using {}:'
run_config = {
'host_os': host_os,
'app': app,
'in_tree': in_tree,
'cd_into': cd_into,
'board': board,
'shield': shield,
'conf': conf,
'gen_args': gen_args,
'build_args': build_args,
'build_dir': build_dir,
'goals': goals,
'compact': compact,
'skip_config': skip_config,
'generator': generator
}
if 'west' in tools:
w = self._generate_west(**run_config)
if tool_comment:
paragraph = nodes.paragraph()
paragraph += nodes.Text(tool_comment.format('west'))
content.append(paragraph)
content.append(self._lit_block(w))
else:
content.extend(w)
if 'cmake' in tools:
c = self._generate_cmake(**run_config)
if tool_comment:
paragraph = nodes.paragraph()
paragraph += nodes.Text(tool_comment.format(
'CMake and {}'.format( generator)))
content.append(paragraph)
content.append(self._lit_block(c))
else:
content.extend(c)
if not tool_comment:
content = [self._lit_block(content)]
return content
def _lit_block(self, content):
content = '\n'.join(content)
# Create the nodes.
literal = nodes.literal_block(content, content)
self.add_name(literal)
literal['language'] = 'console'
return literal
def _generate_west(self, **kwargs):
content = []
board = kwargs['board']
app = kwargs['app']
in_tree = kwargs['in_tree']
goals = kwargs['goals']
cd_into = kwargs['cd_into']
build_dir = kwargs['build_dir']
compact = kwargs['compact']
kwargs['board'] = None
cmake_args = self._cmake_args(**kwargs)
cmake_args = ' --{}'.format(cmake_args) if cmake_args != '' else ''
# ignore zephyr_app since west needs to run within
# the installation. Instead rely on relative path.
src = ' {}'.format(app) if app and not cd_into else ''
dst = ' -d {}'.format(build_dir) if build_dir != 'build' else ''
if in_tree and not compact:
content.append(in_tree)
if cd_into and app:
content.append('cd {}'.format(app))
if 'build' in goals:
build_args = ' -b {}{}{}{}'.format(board, dst, src, cmake_args)
content.append('west build{}'.format(build_args))
goal_args = '{}'.format(dst)
if 'sign' in goals:
content.append('west sign{}'.format(goal_args))
for goal in goals:
if goal == 'build' or goal == 'sign':
continue
elif goal == 'flash':
content.append('west flash{}'.format(goal_args))
elif goal == 'debug':
content.append('west debug{}'.format(goal_args))
elif goal == 'debugserver':
content.append('west debugserver{}'.format(goal_args))
elif goal == 'attach':
content.append('west attach{}'.format(goal_args))
else:
content.append('west build -t {}{}'.format(goal, goal_args))
return content
def _mkdir(self, mkdir, build_dir, host_os, skip_config):
content = []
if skip_config:
content.append("# If you already made a build directory ({}) and ran cmake, just 'cd {}' instead.".format(build_dir, build_dir)) # noqa: E501
if host_os == 'all':
content.append('mkdir {} && cd {}'.format(build_dir, build_dir))
if host_os == "unix":
content.append('{} {} && cd {}'.format(mkdir, build_dir, build_dir))
elif host_os == "win":
build_dir = build_dir.replace('/','\\')
content.append('mkdir {} & cd {}'.format(build_dir, build_dir))
return content
def _cmake_args(self, **kwargs):
generator = kwargs['generator']
board = kwargs['board']
shield = kwargs['shield']
conf = kwargs['conf']
gen_args = kwargs['gen_args']
board_arg = ' -DBOARD={}'.format(board) if board else ''
shield_arg = ' -DSHIELD={}'.format(shield) if shield else ''
conf_arg = ' -DCONF_FILE={}'.format(conf) if conf else ''
gen_args = ' {}'.format(gen_args) if gen_args else ''
return '{}{}{}{}'.format(board_arg, shield_arg, conf_arg, gen_args)
def _cd_into(self, mkdir, **kwargs):
app = kwargs['app']
host_os = kwargs['host_os']
compact = kwargs['compact']
build_dir = kwargs['build_dir']
skip_config = kwargs['skip_config']
content = []
os_comment = None
if len(host_os) > 1:
os_comment = '# On {}'
num_slashes = build_dir.count('/')
if not app and mkdir and num_slashes == 0:
# When there's no app and a single level deep build dir,
# simplify output
content.extend(self._mkdir(mkdir, build_dir, 'all',
skip_config))
if not compact:
content.append('')
return content
for host in host_os:
if host == "unix":
if os_comment:
content.append(os_comment.format('Linux/macOS'))
if app:
content.append('cd {}'.format(app))
elif host == "win":
if os_comment:
content.append(os_comment.format('Windows'))
if app:
backslashified = app.replace('/', '\\')
content.append('cd {}'.format(backslashified))
if mkdir:
content.extend(self._mkdir(mkdir, build_dir, host, skip_config))
if not compact:
content.append('')
return content
def _generate_cmake(self, **kwargs):
generator = kwargs['generator']
host_os = kwargs['host_os']
cd_into = kwargs['cd_into']
app = kwargs['app']
in_tree = kwargs['in_tree']
host_os = kwargs['host_os']
build_dir = kwargs['build_dir']
build_args = kwargs['build_args']
skip_config = kwargs['skip_config']
goals = kwargs['goals']
compact = kwargs['compact']
content = []
if in_tree and not compact:
content.append(in_tree)
if cd_into:
num_slashes = build_dir.count('/')
mkdir = 'mkdir' if num_slashes == 0 else 'mkdir -p'
content.extend(self._cd_into(mkdir, **kwargs))
# Prepare cmake/ninja/make variables
source_dir = ' ' + '/'.join(['..' for i in range(num_slashes + 1)])
cmake_build_dir = ''
tool_build_dir = ''
else:
source_dir = ' {}'.format(app) if app else ' .'
cmake_build_dir = ' -B{}'.format(build_dir)
tool_build_dir = ' -C{}'.format(build_dir)
# Now generate the actual cmake and make/ninja commands
gen_arg = ' -GNinja' if generator == 'ninja' else ''
build_args = ' {}'.format(build_args) if build_args else ''
cmake_args = self._cmake_args(**kwargs)
if not compact:
if not cd_into and skip_config:
content.append("# If you already ran cmake with -B{}, you " \
"can skip this step and run {} directly.".
format(build_dir, generator)) # noqa: E501
else:
content.append('# Use cmake to configure a {}-based build' \
'system:'.format(generator.capitalize())) # noqa: E501
content.append('cmake{}{}{}{}'.format(cmake_build_dir, gen_arg,
cmake_args, source_dir))
if not compact:
content.extend(['',
'# Now run ninja on the generated build system:'])
if 'build' in goals:
content.append('{}{}{}'.format(generator, tool_build_dir,
build_args))
for goal in goals:
if goal == 'build':
continue
content.append('{}{} {}'.format(generator, tool_build_dir, goal))
return content
def setup(app):
app.add_directive('zephyr-app-commands', ZephyrAppCommandsDirective)
return {
'version': '1.0',
'parallel_read_safe': True,
'parallel_write_safe': True
}
| apache-2.0 | -3,486,946,015,469,314,600 | 36.273171 | 154 | 0.551171 | false |
google-research/open-covid-19-data | src/scripts/locations/generate_iso_3166_1.py | 1 | 7123 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-value-for-parameter
import requests
import pandas as pd
import streamlit as st
import os
import sys
PIPELINE_DIR = os.path.join(os.path.dirname(__file__), '../../', 'src/pipeline')
sys.path.append(PIPELINE_DIR)
import path_utils
################################################################################
##### Query wikidata for all ISO-3166-1 countries ######
################################################################################
# Wikidata query for ISO-3166-1 codes
# Use at https://query.wikidata.org/
# Workaround for a bug in generating urls for wikidata queries:
# Use the UI at https://query.wikidata.org/ to get the query url by entering these queries
# and then click the "Link" button -> SPARQL endpoint -> copy link address.
# This gives you the url for the query.
# SELECT DISTINCT ?country ?countryLabel ?capital ?capitalLabel
# WHERE
# {
# ?country wdt:P31 wd:Q3624078 .
# #not a former country
# FILTER NOT EXISTS {?country wdt:P31 wd:Q3024240}
# #and no an ancient civilisation (needed to exclude ancient Egypt)
# FILTER NOT EXISTS {?country wdt:P31 wd:Q28171280}
# OPTIONAL { ?country wdt:P36 ?capital } .
#
# SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
# }
# ORDER BY ?countryLabel
iso_3166_1_url = 'https://query.wikidata.org/sparql?query=%23added%20before%202016-10%0ASELECT%20DISTINCT%20%3Fcountry%20%3FcountryLabel%20%3FthreeLetterCode%20%3FnumericCode%20%3FtwoLetterCode%0AWHERE%0A%7B%0A%20%20%3Fcountry%20wdt%3AP298%20%3FthreeLetterCode.%0A%20%20%3Fcountry%20wdt%3AP299%20%3FnumericCode.%0A%20%20%3Fcountry%20wdt%3AP297%20%3FtwoLetterCode.%0A%20%20%23not%20a%20former%20country%0A%20%20FILTER%20NOT%20EXISTS%20%7B%3Fcountry%20wdt%3AP31%20wd%3AQ3024240%7D%0A%20%20%23and%20no%20an%20ancient%20civilisation%20(needed%20to%20exclude%20ancient%20Egypt)%0A%20%20FILTER%20NOT%20EXISTS%20%7B%3Fcountry%20wdt%3AP31%20wd%3AQ28171280%7D%0A%0A%20%20SERVICE%20wikibase%3Alabel%20%7B%20bd%3AserviceParam%20wikibase%3Alanguage%20%22en%22%20%7D%0A%7D%0AORDER%20BY%20%3FcountryLabel' # pylint: disable=line-too-long
countries = requests.get(iso_3166_1_url, params={'format': 'json'}).json()['results']['bindings']
country_df = pd.json_normalize(countries)
country_df = country_df.rename(columns={
'country.value': 'wikidata_id',
'twoLetterCode.value': 'country_iso_3166-1_alpha-2',
'numericCode.value': 'country_iso_3166-1_numeric',
'threeLetterCode.value': 'region_code',
'countryLabel.value': 'region_name'
})
country_df = country_df[['wikidata_id', 'country_iso_3166-1_alpha-2', 'country_iso_3166-1_numeric',
'region_code', 'region_name']]
country_df['wikidata_id'] = country_df['wikidata_id'].apply(lambda s: s.split('/')[-1])
country_df['region_code_type'] = 'iso_3166-1'
country_df['country_iso_3166-1_alpha-3'] = country_df['region_code']
country_df['region_code_level'] = 1
country_df['parent_region_code'] = 'WORLD'
country_df['subdivision_type'] = 'countries'
country_df['region_type'] = 'country'
country_df['leaf_region_code'] = country_df['region_code']
country_df['level_1_region_code'] = country_df['region_code']
country_df['level_2_region_code'] = None
country_df['level_3_region_code'] = None
st.subheader('Countries including duplicate ISO-3166-1 / ISO-3166-2 regions')
st.write(country_df)
################################################################################
##### Remove duplicates for regions that could appear as either Level 1 ######
##### or as Level 2 regions, based on whether data sources are separate ######
################################################################################
# Treat Netherlands + Aruba + Curaçao + Sint Maarten (Dutch part) as a single level 1 entity
country_df = country_df[country_df['wikidata_id'] != 'Q55']
# Keep Western Sahara wikidata entry (Q6250) instead of Q40362
country_df = country_df[country_df['wikidata_id'] != 'Q40362']
# These regions appear as both ISO-1 and ISO-2, but we will count them as ISO-2
# so we remove them from the ISO-1 list
# Leave as ISO1 because they have separate data sources: Taiwain, Hong Kong, Macao
regions_to_remove_from_iso1 = {
'ALA': 'Åland Islands', # Finland: FI-01
'BLM': 'Saint Barthélemy', # France: FR-BL Saint Barthélemy (BL)
'GUF': 'French Guiana', # France: FR-GF French Guiana (GF)
'GLP': 'Guadeloupe', # France: FR-GP Guadeloupe (GP)
'MAF': 'Saint Martin (French part)', # France: FR-MF Saint Martin (MF)
'MTQ': 'Martinique', # France: FR-MQ Martinique (MQ)
'NCL': 'New Caledonia', # France: FR-NC New Caledonia (NC)
'PYF': 'French Polynesia', # France: FR-PF French Polynesia (PF)
'SPM': 'Saint Pierre and Miquelon', # France: FR-PM Saint Pierre and Miquelon (PM)
'REU': 'Réunion', # France: FR-RE Réunion (RE)
'ATF': 'French Southern and Antarctic Lands', # France: FR-TF French Southern Territories (TF)
'WLF': 'Wallis and Futuna', # France: FR-WF Wallis and Futuna (WF)
'MYT': 'Mayotte', # France: FR-YT Mayotte (YT)
'SJM': 'Svalbard and Jan Mayen', # Norway: NO-21 Svalbard, NO-22 Jan Mayen
'BES': 'Caribbean Netherlands', # Netherlands: NL-BQ1 Bonaire (BQ), NL-BQ2 Saba (BQ), NL-BQ3 Sint Eustatius (BQ)
'ABW': 'Aruba', # Netherlands: NL-AW Aruba (AW)
'CUW': 'Curaçao', # Netherlands: NL-CW Curaçao (CW)
'SXM': 'Sint Maarten (Dutch part)', # Netherlands: NL-SX Sint Maarten (SX)
'ASM': 'American Samoa', # United States: US-AS
'GUM': 'Guam', # United States: US-GU
'MNP': 'Northern Mariana Islands', # United States: US-MP
'PRI': 'Puerto Rico', # United States: US-PR
'UMI': 'United States Minor Outlying Islands', # United States: US-UM
'VIR': 'United States Virgin Islands', # United States: US-VI
}
st.write(len(regions_to_remove_from_iso1))
country_df = country_df[~country_df['region_code'].isin(regions_to_remove_from_iso1.keys())]
st.subheader('Countries without duplicate ISO-3166-1 / ISO-3166-2 regions')
################################################################################
##### Generate datacommons ids using the known format for the dcids ######
################################################################################
country_df['datacommons_id'] = country_df.apply(lambda x: 'country/' + x['region_code'], axis=1)
st.write(country_df)
st.write(country_df.shape)
country_df.to_csv(
os.path.join(path_utils.path_to('locations_intermediate_dir'), 'iso_3166_1_locations.csv'), index=False)
| apache-2.0 | -6,122,616,209,773,287,000 | 49.460993 | 824 | 0.657625 | false |
LamCiuLoeng/fd | rpac/model/__init__.py | 1 | 2426 | # -*- coding: utf-8 -*-
"""The application's model objects"""
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy.orm import scoped_session, sessionmaker
#from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
# Global session manager: DBSession() returns the Thread-local
# session object appropriate for the current web request.
maker = sessionmaker( autoflush = True, autocommit = False,
extension = ZopeTransactionExtension() )
DBSession = scoped_session( maker )
# Base class for all of our model classes: By default, the data model is
# defined with SQLAlchemy's declarative extension, but if you need more
# control, you can switch to the traditional method.
DeclarativeBase = declarative_base()
# There are two convenient ways for you to spare some typing.
# You can have a query property on all your model classes by doing this:
# DeclarativeBase.query = DBSession.query_property()
# Or you can use a session-aware mapper as it was used in TurboGears 1:
# DeclarativeBase = declarative_base(mapper=DBSession.mapper)
# Global metadata.
# The default metadata is the one from the declarative base.
metadata = DeclarativeBase.metadata
# If you have multiple databases with overlapping table names, you'll need a
# metadata for each database. Feel free to rename 'metadata2'.
#metadata2 = MetaData()
#####
# Generally you will not want to define your table's mappers, and data objects
# here in __init__ but will want to create modules them in the model directory
# and import them at the bottom of this file.
#
######
def init_model( engine ):
"""Call me before using any of the tables or classes in the model."""
DBSession.configure( bind = engine )
# If you are using reflection to introspect your database and create
# table objects for you, your tables must be defined and mapped inside
# the init_model function, so that the engine is available if you
# use the model outside tg2, you need to make sure this is called before
# you use the model.
#
# See the following example:
#global t_reflected
#t_reflected = Table("Reflected", metadata,
# autoload=True, autoload_with=engine)
#mapper(Reflected, t_reflected)
# Import your model modules here.
qry = DBSession.query
from auth import User, Group, Permission
from logic import *
from ordering import *
from master import *
| mit | -2,430,196,997,975,855,000 | 34.676471 | 78 | 0.741962 | false |
gtaylor/paypal-python | paypal/interface.py | 1 | 19787 | # coding=utf-8
"""
The end developer will do most of their work with the PayPalInterface class
found in this module. Configuration, querying, and manipulation can all be done
with it.
"""
import types
import logging
from pprint import pformat
import warnings
import requests
from paypal.settings import PayPalConfig
from paypal.response import PayPalResponse
from paypal.response_list import PayPalResponseList
from paypal.exceptions import (PayPalError,
PayPalAPIResponseError,
PayPalConfigError)
from paypal.compat import is_py3
if is_py3:
#noinspection PyUnresolvedReferences
from urllib.parse import urlencode
else:
from urllib import urlencode
logger = logging.getLogger('paypal.interface')
class PayPalInterface(object):
__credentials = ['USER', 'PWD', 'SIGNATURE', 'SUBJECT']
"""
The end developers will do 95% of their work through this class. API
queries, configuration, etc, all go through here. See the __init__ method
for config related details.
"""
def __init__(self, config=None, **kwargs):
"""
Constructor, which passes all config directives to the config class
via kwargs. For example:
paypal = PayPalInterface(API_USERNAME='somevalue')
Optionally, you may pass a 'config' kwarg to provide your own
PayPalConfig object.
"""
if config:
# User provided their own PayPalConfig object.
self.config = config
else:
# Take the kwargs and stuff them in a new PayPalConfig object.
self.config = PayPalConfig(**kwargs)
def _encode_utf8(self, **kwargs):
"""
UTF8 encodes all of the NVP values.
"""
if is_py3:
# This is only valid for Python 2. In Python 3, unicode is
# everywhere (yay).
return kwargs
unencoded_pairs = kwargs
for i in unencoded_pairs.keys():
#noinspection PyUnresolvedReferences
if isinstance(unencoded_pairs[i], types.UnicodeType):
unencoded_pairs[i] = unencoded_pairs[i].encode('utf-8')
return unencoded_pairs
def _check_required(self, requires, **kwargs):
"""
Checks kwargs for the values specified in 'requires', which is a tuple
of strings. These strings are the NVP names of the required values.
"""
for req in requires:
# PayPal api is never mixed-case.
if req.lower() not in kwargs and req.upper() not in kwargs:
raise PayPalError('missing required : %s' % req)
def _sanitize_locals(self, data):
"""
Remove the 'self' key in locals()
It's more explicit to do it in one function
"""
if 'self' in data:
data = data.copy()
del data['self']
return data
def _call(self, method, **kwargs):
"""
Wrapper method for executing all API commands over HTTP. This method is
further used to implement wrapper methods listed here:
https://www.x.com/docs/DOC-1374
``method`` must be a supported NVP method listed at the above address.
``kwargs`` the actual call parameters
"""
post_params = self._get_call_params(method, **kwargs)
payload = post_params['data']
api_endpoint = post_params['url']
# This shows all of the key/val pairs we're sending to PayPal.
if logger.isEnabledFor(logging.DEBUG):
logger.debug('PayPal NVP Query Key/Vals:\n%s' % pformat(payload))
http_response = requests.post(**post_params)
response = PayPalResponse(http_response.text, self.config)
logger.debug('PayPal NVP API Endpoint: %s' % api_endpoint)
if not response.success:
raise PayPalAPIResponseError(response)
return response
def _get_call_params(self, method, **kwargs):
"""
Returns the prepared call parameters. Mind, these will be keyword
arguments to ``requests.post``.
``method`` the NVP method
``kwargs`` the actual call parameters
"""
payload = {'METHOD': method,
'VERSION': self.config.API_VERSION}
certificate = None
if self.config.API_AUTHENTICATION_MODE == "3TOKEN":
payload['USER'] = self.config.API_USERNAME
payload['PWD'] = self.config.API_PASSWORD
payload['SIGNATURE'] = self.config.API_SIGNATURE
elif self.config.API_AUTHENTICATION_MODE == "CERTIFICATE":
payload['USER'] = self.config.API_USERNAME
payload['PWD'] = self.config.API_PASSWORD
certificate = (self.config.API_CERTIFICATE_FILENAME,
self.config.API_KEY_FILENAME)
elif self.config.API_AUTHENTICATION_MODE == "UNIPAY":
payload['SUBJECT'] = self.config.UNIPAY_SUBJECT
none_configs = [config for config, value in payload.items()
if value is None]
if none_configs:
raise PayPalConfigError(
"Config(s) %s cannot be None. Please, check this "
"interface's config." % none_configs)
# all keys in the payload must be uppercase
for key, value in kwargs.items():
payload[key.upper()] = value
return {'data': payload,
'cert': certificate,
'url': self.config.API_ENDPOINT,
'timeout': self.config.HTTP_TIMEOUT,
'verify': self.config.API_CA_CERTS}
def address_verify(self, email, street, zip):
"""Shortcut for the AddressVerify method.
``email``::
Email address of a PayPal member to verify.
Maximum string length: 255 single-byte characters
Input mask: ?@?.??
``street``::
First line of the billing or shipping postal address to verify.
To pass verification, the value of Street must match the first three
single-byte characters of a postal address on file for the PayPal member.
Maximum string length: 35 single-byte characters.
Alphanumeric plus - , . ‘ # \
Whitespace and case of input value are ignored.
``zip``::
Postal code to verify.
To pass verification, the value of Zip mustmatch the first five
single-byte characters of the postal code of the verified postal
address for the verified PayPal member.
Maximumstring length: 16 single-byte characters.
Whitespace and case of input value are ignored.
"""
args = self._sanitize_locals(locals())
return self._call('AddressVerify', **args)
def create_recurring_payments_profile(self, **kwargs):
"""Shortcut for the CreateRecurringPaymentsProfile method.
Currently, this method only supports the Direct Payment flavor.
It requires standard credit card information and a few additional
parameters related to the billing. e.g.:
profile_info = {
# Credit card information
'creditcardtype': 'Visa',
'acct': '4812177017895760',
'expdate': '102015',
'cvv2': '123',
'firstname': 'John',
'lastname': 'Doe',
'street': '1313 Mockingbird Lane',
'city': 'Beverly Hills',
'state': 'CA',
'zip': '90110',
'countrycode': 'US',
'currencycode': 'USD',
# Recurring payment information
'profilestartdate': '2010-10-25T0:0:0',
'billingperiod': 'Month',
'billingfrequency': '6',
'amt': '10.00',
'desc': '6 months of our product.'
}
response = create_recurring_payments_profile(**profile_info)
The above NVPs compose the bare-minimum request for creating a
profile. For the complete list of parameters, visit this URI:
https://www.x.com/docs/DOC-1168
"""
return self._call('CreateRecurringPaymentsProfile', **kwargs)
def do_authorization(self, transactionid, amt):
"""Shortcut for the DoAuthorization method.
Use the TRANSACTIONID from DoExpressCheckoutPayment for the
``transactionid``. The latest version of the API does not support the
creation of an Order from `DoDirectPayment`.
The `amt` should be the same as passed to `DoExpressCheckoutPayment`.
Flow for a payment involving a `DoAuthorization` call::
1. One or many calls to `SetExpressCheckout` with pertinent order
details, returns `TOKEN`
1. `DoExpressCheckoutPayment` with `TOKEN`, `PAYMENTACTION` set to
Order, `AMT` set to the amount of the transaction, returns
`TRANSACTIONID`
1. `DoAuthorization` with `TRANSACTIONID` and `AMT` set to the
amount of the transaction.
1. `DoCapture` with the `AUTHORIZATIONID` (the `TRANSACTIONID`
returned by `DoAuthorization`)
"""
args = self._sanitize_locals(locals())
return self._call('DoAuthorization', **args)
def do_capture(self, authorizationid, amt, completetype='Complete',
**kwargs):
"""Shortcut for the DoCapture method.
Use the TRANSACTIONID from DoAuthorization, DoDirectPayment or
DoExpressCheckoutPayment for the ``authorizationid``.
The `amt` should be the same as the authorized transaction.
"""
kwargs.update(self._sanitize_locals(locals()))
return self._call('DoCapture', **kwargs)
def do_direct_payment(self, paymentaction="Sale", **kwargs):
"""Shortcut for the DoDirectPayment method.
``paymentaction`` could be 'Authorization' or 'Sale'
To issue a Sale immediately::
charge = {
'amt': '10.00',
'creditcardtype': 'Visa',
'acct': '4812177017895760',
'expdate': '012010',
'cvv2': '962',
'firstname': 'John',
'lastname': 'Doe',
'street': '1 Main St',
'city': 'San Jose',
'state': 'CA',
'zip': '95131',
'countrycode': 'US',
'currencycode': 'USD',
}
direct_payment("Sale", **charge)
Or, since "Sale" is the default:
direct_payment(**charge)
To issue an Authorization, simply pass "Authorization" instead
of "Sale".
You may also explicitly set ``paymentaction`` as a keyword argument:
...
direct_payment(paymentaction="Sale", **charge)
"""
kwargs.update(self._sanitize_locals(locals()))
return self._call('DoDirectPayment', **kwargs)
def do_void(self, **kwargs):
"""Shortcut for the DoVoid method.
Use the TRANSACTIONID from DoAuthorization, DoDirectPayment or
DoExpressCheckoutPayment for the ``AUTHORIZATIONID``.
Required Kwargs
---------------
* AUTHORIZATIONID
"""
return self._call('DoVoid', **kwargs)
def get_express_checkout_details(self, **kwargs):
"""Shortcut for the GetExpressCheckoutDetails method.
Required Kwargs
---------------
* TOKEN
"""
return self._call('GetExpressCheckoutDetails', **kwargs)
def get_transaction_details(self, **kwargs):
"""Shortcut for the GetTransactionDetails method.
Use the TRANSACTIONID from DoAuthorization, DoDirectPayment or
DoExpressCheckoutPayment for the ``transactionid``.
Required Kwargs
---------------
* TRANSACTIONID
"""
return self._call('GetTransactionDetails', **kwargs)
def transaction_search(self, **kwargs):
"""Shortcut for the TransactionSearch method.
Returns a PayPalResponseList object, which merges the L_ syntax list
to a list of dictionaries with properly named keys.
Note that the API will limit returned transactions to 100.
Required Kwargs
---------------
* STARTDATE
Optional Kwargs
---------------
STATUS = one of ['Pending','Processing','Success','Denied','Reversed']
"""
plain = self._call('TransactionSearch', **kwargs)
return PayPalResponseList(plain.raw, self.config)
def set_express_checkout(self, **kwargs):
"""Start an Express checkout.
You'll want to use this in conjunction with
:meth:`generate_express_checkout_redirect_url` to create a payment,
then figure out where to redirect the user to for them to
authorize the payment on PayPal's website.
Required Kwargs
---------------
* PAYMENTREQUEST_0_AMT
* PAYMENTREQUEST_0_PAYMENTACTION
* RETURNURL
* CANCELURL
"""
return self._call('SetExpressCheckout', **kwargs)
def refund_transaction(self, transactionid=None, payerid=None, **kwargs):
"""Shortcut for RefundTransaction method.
Note new API supports passing a PayerID instead of a transaction id,
exactly one must be provided.
Optional:
INVOICEID
REFUNDTYPE
AMT
CURRENCYCODE
NOTE
RETRYUNTIL
REFUNDSOURCE
MERCHANTSTOREDETAILS
REFUNDADVICE
REFUNDITEMDETAILS
MSGSUBID
MERCHANSTOREDETAILS has two fields:
STOREID
TERMINALID
"""
# This line seems like a complete waste of time... kwargs should not
# be populated
if (transactionid is None) and (payerid is None):
raise PayPalError(
'RefundTransaction requires either a transactionid or '
'a payerid')
if (transactionid is not None) and (payerid is not None):
raise PayPalError(
'RefundTransaction requires only one of transactionid %s '
'and payerid %s' % (transactionid, payerid))
if transactionid is not None:
kwargs['TRANSACTIONID'] = transactionid
else:
kwargs['PAYERID'] = payerid
return self._call('RefundTransaction', **kwargs)
def do_express_checkout_payment(self, **kwargs):
"""Finishes an Express checkout.
TOKEN is the token that was returned earlier by
:meth:`set_express_checkout`. This identifies the transaction.
Required
--------
* TOKEN
* PAYMENTACTION
* PAYERID
* AMT
"""
return self._call('DoExpressCheckoutPayment', **kwargs)
def generate_express_checkout_redirect_url(self, token, useraction=None):
"""Returns the URL to redirect the user to for the Express checkout.
Express Checkouts must be verified by the customer by redirecting them
to the PayPal website. Use the token returned in the response from
:meth:`set_express_checkout` with this function to figure out where
to redirect the user to.
The button text on the PayPal page can be controlled via `useraction`.
The documented possible values are `commit` and `continue`. However,
any other value will only result in a warning.
:param str token: The unique token identifying this transaction.
:param str useraction: Control the button text on the PayPal page.
:rtype: str
:returns: The URL to redirect the user to for approval.
"""
url_vars = (self.config.PAYPAL_URL_BASE, token)
url = "%s?cmd=_express-checkout&token=%s" % url_vars
if useraction:
if not useraction.lower() in ('commit', 'continue'):
warnings.warn('useraction=%s is not documented' % useraction,
RuntimeWarning)
url += '&useraction=%s' % useraction
return url
def generate_cart_upload_redirect_url(self, **kwargs):
"""https://www.sandbox.paypal.com/webscr
?cmd=_cart
&upload=1
"""
required_vals = ('business', 'item_name_1', 'amount_1', 'quantity_1')
self._check_required(required_vals, **kwargs)
url = "%s?cmd=_cart&upload=1" % self.config.PAYPAL_URL_BASE
additional = self._encode_utf8(**kwargs)
additional = urlencode(additional)
return url + "&" + additional
def get_recurring_payments_profile_details(self, profileid):
"""Shortcut for the GetRecurringPaymentsProfile method.
This returns details for a recurring payment plan. The ``profileid`` is
a value included in the response retrieved by the function
``create_recurring_payments_profile``. The profile details include the
data provided when the profile was created as well as default values
for ignored fields and some pertinent stastics.
e.g.:
response = create_recurring_payments_profile(**profile_info)
profileid = response.PROFILEID
details = get_recurring_payments_profile(profileid)
The response from PayPal is somewhat self-explanatory, but for a
description of each field, visit the following URI:
https://www.x.com/docs/DOC-1194
"""
args = self._sanitize_locals(locals())
return self._call('GetRecurringPaymentsProfileDetails', **args)
def manage_recurring_payments_profile_status(self, profileid, action,
note=None):
"""Shortcut to the ManageRecurringPaymentsProfileStatus method.
``profileid`` is the same profile id used for getting profile details.
``action`` should be either 'Cancel', 'Suspend', or 'Reactivate'.
``note`` is optional and is visible to the user. It contains the
reason for the change in status.
"""
args = self._sanitize_locals(locals())
if not note:
del args['note']
return self._call('ManageRecurringPaymentsProfileStatus', **args)
def update_recurring_payments_profile(self, profileid, **kwargs):
"""Shortcut to the UpdateRecurringPaymentsProfile method.
``profileid`` is the same profile id used for getting profile details.
The keyed arguments are data in the payment profile which you wish to
change. The profileid does not change. Anything else will take the new
value. Most of, though not all of, the fields available are shared
with creating a profile, but for the complete list of parameters, you
can visit the following URI:
https://www.x.com/docs/DOC-1212
"""
kwargs.update(self._sanitize_locals(locals()))
return self._call('UpdateRecurringPaymentsProfile', **kwargs)
def bm_create_button(self, **kwargs):
"""Shortcut to the BMCreateButton method.
See the docs for details on arguments:
https://cms.paypal.com/mx/cgi-bin/?cmd=_render-content&content_ID=developer/e_howto_api_nvp_BMCreateButton
The L_BUTTONVARn fields are especially important, so make sure to
read those and act accordingly. See unit tests for some examples.
"""
kwargs.update(self._sanitize_locals(locals()))
return self._call('BMCreateButton', **kwargs)
| apache-2.0 | -4,156,943,831,509,777,000 | 36.471591 | 114 | 0.598838 | false |
NeCTAR-RC/ceilometer | ceilometer/compute/pollsters/cpu.py | 1 | 4105 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 eNovance <[email protected]>
# Copyright © 2012 Red Hat, Inc
#
# Author: Julien Danjou <[email protected]>
# Author: Eoghan Glynn <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.compute import plugin
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.openstack.common.gettextutils import _ # noqa
from ceilometer.openstack.common import log
from ceilometer import sample
LOG = log.getLogger(__name__)
class CPUPollster(plugin.ComputePollster):
def get_samples(self, manager, cache, resources):
for instance in resources:
LOG.info(_('checking instance %s'), instance.id)
instance_name = util.instance_name(instance)
try:
cpu_info = manager.inspector.inspect_cpus(instance_name)
LOG.info(_("CPUTIME USAGE: %(instance)s %(time)d"),
{'instance': instance.__dict__,
'time': cpu_info.time})
cpu_num = {'cpu_number': cpu_info.number}
yield util.make_sample_from_instance(
instance,
name='cpu',
type=sample.TYPE_CUMULATIVE,
unit='ns',
volume=cpu_info.time,
additional_metadata=cpu_num,
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining CPU time is not implemented for %s'
), manager.inspector.__class__.__name__)
except Exception as err:
LOG.error(_('could not get CPU time for %(id)s: %(e)s') % (
{'id': instance.id, 'e': err}))
LOG.exception(err)
class CPUUtilPollster(plugin.ComputePollster):
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
LOG.debug(_('Checking CPU util for instance %s'), instance.id)
try:
cpu_info = manager.inspector.inspect_cpu_util(
instance, self._inspection_duration)
LOG.debug(_("CPU UTIL: %(instance)s %(util)d"),
({'instance': instance.__dict__,
'util': cpu_info.util}))
yield util.make_sample_from_instance(
instance,
name='cpu_util',
type=sample.TYPE_GAUGE,
unit='%',
volume=cpu_info.util,
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining CPU Util is not implemented for %s'
), manager.inspector.__class__.__name__)
except Exception as err:
LOG.error(_('Could not get CPU Util for %(id)s: %(e)s'), (
{'id': instance.id, 'e': err}))
| apache-2.0 | -2,454,638,988,634,142,700 | 43.597826 | 75 | 0.574945 | false |
openstack/ironic | ironic/common/context.py | 1 | 1975 | # -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_context import context
class RequestContext(context.RequestContext):
"""Extends security contexts from the oslo.context library."""
def __init__(self, is_public_api=False, **kwargs):
"""Initialize the RequestContext
:param is_public_api: Specifies whether the request should be processed
without authentication.
:param kwargs: additional arguments passed to oslo.context.
"""
super(RequestContext, self).__init__(**kwargs)
self.is_public_api = is_public_api
def to_policy_values(self):
policy_values = super(RequestContext, self).to_policy_values()
policy_values.update({
'project_name': self.project_name,
'is_public_api': self.is_public_api,
})
return policy_values
def ensure_thread_contain_context(self):
"""Ensure threading contains context
For async/periodic tasks, the context of local thread is missing.
Set it with request context and this is useful to log the request_id
in log messages.
"""
if context.get_current():
return
self.update_store()
def get_admin_context():
"""Create an administrator context."""
context = RequestContext(auth_token=None,
project_id=None,
overwrite=False)
return context
| apache-2.0 | -8,362,864,803,926,115,000 | 33.051724 | 79 | 0.65519 | false |
openstack/os-win | os_win/tests/unit/test_utils.py | 1 | 14238 | # Copyright 2015 Cloudbase Solutions SRL
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the os_win._utils module.
"""
from unittest import mock
import ddt
from os_win import _utils
from os_win import constants
from os_win import exceptions
from os_win.tests.unit import test_base
@ddt.ddt
class UtilsTestCase(test_base.BaseTestCase):
@mock.patch('oslo_concurrency.processutils.execute')
def test_execute(self, mock_execute):
_utils.execute(mock.sentinel.cmd, kwarg=mock.sentinel.kwarg)
mock_execute.assert_called_once_with(mock.sentinel.cmd,
kwarg=mock.sentinel.kwarg)
def test_parse_server_string(self):
result = _utils.parse_server_string('::1')
self.assertEqual(('::1', ''), result)
result = _utils.parse_server_string('[::1]:8773')
self.assertEqual(('::1', '8773'), result)
result = _utils.parse_server_string('2001:db8::192.168.1.1')
self.assertEqual(('2001:db8::192.168.1.1', ''), result)
result = _utils.parse_server_string('[2001:db8::192.168.1.1]:8773')
self.assertEqual(('2001:db8::192.168.1.1', '8773'), result)
result = _utils.parse_server_string('192.168.1.1')
self.assertEqual(('192.168.1.1', ''), result)
result = _utils.parse_server_string('192.168.1.2:8773')
self.assertEqual(('192.168.1.2', '8773'), result)
result = _utils.parse_server_string('192.168.1.3')
self.assertEqual(('192.168.1.3', ''), result)
result = _utils.parse_server_string('www.example.com:8443')
self.assertEqual(('www.example.com', '8443'), result)
result = _utils.parse_server_string('www.example.com')
self.assertEqual(('www.example.com', ''), result)
# error case
result = _utils.parse_server_string('www.exa:mple.com:8443')
self.assertEqual(('', ''), result)
result = _utils.parse_server_string('')
self.assertEqual(('', ''), result)
def _get_fake_func_with_retry_decorator(self, side_effect,
decorator=_utils.retry_decorator,
*args, **kwargs):
func_side_effect = mock.Mock(side_effect=side_effect)
@decorator(*args, **kwargs)
def fake_func(*_args, **_kwargs):
return func_side_effect(*_args, **_kwargs)
return fake_func, func_side_effect
@mock.patch.object(_utils, 'time')
def test_retry_decorator(self, mock_time):
err_code = 1
max_retry_count = 5
max_sleep_time = 2
timeout = max_retry_count + 1
mock_time.time.side_effect = range(timeout)
raised_exc = exceptions.Win32Exception(message='fake_exc',
error_code=err_code)
side_effect = [raised_exc] * max_retry_count
side_effect.append(mock.sentinel.ret_val)
(fake_func,
fake_func_side_effect) = self._get_fake_func_with_retry_decorator(
error_codes=err_code,
exceptions=exceptions.Win32Exception,
max_retry_count=max_retry_count,
max_sleep_time=max_sleep_time,
timeout=timeout,
side_effect=side_effect)
ret_val = fake_func(mock.sentinel.arg,
kwarg=mock.sentinel.kwarg)
self.assertEqual(mock.sentinel.ret_val, ret_val)
fake_func_side_effect.assert_has_calls(
[mock.call(mock.sentinel.arg, kwarg=mock.sentinel.kwarg)] *
(max_retry_count + 1))
self.assertEqual(max_retry_count + 1, mock_time.time.call_count)
mock_time.sleep.assert_has_calls(
[mock.call(sleep_time)
for sleep_time in [1, 2, 2, 2, 1]])
@mock.patch.object(_utils, 'time')
def _test_retry_decorator_exceeded(self, mock_time, expected_try_count,
mock_time_side_eff=None,
timeout=None, max_retry_count=None):
raised_exc = exceptions.Win32Exception(message='fake_exc')
mock_time.time.side_effect = mock_time_side_eff
(fake_func,
fake_func_side_effect) = self._get_fake_func_with_retry_decorator(
exceptions=exceptions.Win32Exception,
timeout=timeout,
side_effect=raised_exc)
self.assertRaises(exceptions.Win32Exception, fake_func)
fake_func_side_effect.assert_has_calls(
[mock.call()] * expected_try_count)
def test_retry_decorator_tries_exceeded(self):
self._test_retry_decorator_exceeded(
max_retry_count=2,
expected_try_count=3)
def test_retry_decorator_time_exceeded(self):
self._test_retry_decorator_exceeded(
mock_time_side_eff=[0, 1, 4],
timeout=3,
expected_try_count=1)
@mock.patch('time.sleep')
def _test_retry_decorator_no_retry(self, mock_sleep,
expected_exceptions=(),
expected_error_codes=()):
err_code = 1
raised_exc = exceptions.Win32Exception(message='fake_exc',
error_code=err_code)
fake_func, fake_func_side_effect = (
self._get_fake_func_with_retry_decorator(
error_codes=expected_error_codes,
exceptions=expected_exceptions,
side_effect=raised_exc))
self.assertRaises(exceptions.Win32Exception,
fake_func, mock.sentinel.arg,
fake_kwarg=mock.sentinel.kwarg)
self.assertFalse(mock_sleep.called)
fake_func_side_effect.assert_called_once_with(
mock.sentinel.arg, fake_kwarg=mock.sentinel.kwarg)
def test_retry_decorator_unexpected_err_code(self):
self._test_retry_decorator_no_retry(
expected_exceptions=exceptions.Win32Exception,
expected_error_codes=2)
def test_retry_decorator_unexpected_exc(self):
self._test_retry_decorator_no_retry(
expected_exceptions=(IOError, AttributeError))
@mock.patch('time.sleep')
def test_retry_decorator_explicitly_avoid_retry(self, mock_sleep):
# Tests the case when there is a function aware of the retry
# decorator and explicitly requests that no retry should be
# performed.
def func_side_effect(fake_arg, retry_context):
self.assertEqual(mock.sentinel.arg, fake_arg)
self.assertEqual(retry_context, dict(prevent_retry=False))
retry_context['prevent_retry'] = True
raise exceptions.Win32Exception(message='fake_exc',
error_code=1)
fake_func, mock_side_effect = (
self._get_fake_func_with_retry_decorator(
exceptions=exceptions.Win32Exception,
side_effect=func_side_effect,
pass_retry_context=True))
self.assertRaises(exceptions.Win32Exception,
fake_func, mock.sentinel.arg)
self.assertEqual(1, mock_side_effect.call_count)
self.assertFalse(mock_sleep.called)
@mock.patch.object(_utils.socket, 'getaddrinfo')
def test_get_ips(self, mock_getaddrinfo):
ips = ['1.2.3.4', '5.6.7.8']
mock_getaddrinfo.return_value = [
(None, None, None, None, (ip, 0)) for ip in ips]
resulted_ips = _utils.get_ips(mock.sentinel.addr)
self.assertEqual(ips, resulted_ips)
mock_getaddrinfo.assert_called_once_with(
mock.sentinel.addr, None, 0, 0, 0)
@mock.patch('eventlet.tpool.execute')
@mock.patch('eventlet.getcurrent')
@ddt.data(mock.Mock(), None)
def test_avoid_blocking_call(self, gt_parent, mock_get_current_gt,
mock_execute):
mock_get_current_gt.return_value.parent = gt_parent
mock_execute.return_value = mock.sentinel.ret_val
def fake_blocking_func(*args, **kwargs):
self.assertEqual((mock.sentinel.arg, ), args)
self.assertEqual(dict(kwarg=mock.sentinel.kwarg),
kwargs)
return mock.sentinel.ret_val
fake_blocking_func_decorated = (
_utils.avoid_blocking_call_decorator(fake_blocking_func))
ret_val = fake_blocking_func_decorated(mock.sentinel.arg,
kwarg=mock.sentinel.kwarg)
self.assertEqual(mock.sentinel.ret_val, ret_val)
if gt_parent:
mock_execute.assert_called_once_with(fake_blocking_func,
mock.sentinel.arg,
kwarg=mock.sentinel.kwarg)
else:
self.assertFalse(mock_execute.called)
@mock.patch.object(_utils, 'time')
@ddt.data(True, False)
def test_wmi_retry_decorator(self, expect_hres, mock_time):
expected_hres = 0x8007beef
expected_err_code = expected_hres if expect_hres else 0xbeef
other_hres = 0x80070001
max_retry_count = 5
# The second exception will contain an unexpected error code,
# in which case we expect the function to propagate the error.
expected_try_count = 2
side_effect = [test_base.FakeWMIExc(hresult=expected_hres),
test_base.FakeWMIExc(hresult=other_hres)]
decorator = (_utils.wmi_retry_decorator_hresult if expect_hres
else _utils.wmi_retry_decorator)
(fake_func,
fake_func_side_effect) = self._get_fake_func_with_retry_decorator(
error_codes=expected_err_code,
max_retry_count=max_retry_count,
decorator=decorator,
side_effect=side_effect)
self.assertRaises(test_base.FakeWMIExc,
fake_func,
mock.sentinel.arg,
kwarg=mock.sentinel.kwarg)
fake_func_side_effect.assert_has_calls(
[mock.call(mock.sentinel.arg, kwarg=mock.sentinel.kwarg)] *
expected_try_count)
def test_get_com_error_hresult(self):
fake_hres = -5
expected_hres = (1 << 32) + fake_hres
mock_excepinfo = [None] * 5 + [fake_hres]
mock_com_err = mock.Mock(excepinfo=mock_excepinfo)
ret_val = _utils.get_com_error_hresult(mock_com_err)
self.assertEqual(expected_hres, ret_val)
def get_com_error_hresult_missing_excepinfo(self):
ret_val = _utils.get_com_error_hresult(None)
self.assertIsNone(ret_val)
def test_hresult_to_err_code(self):
# This could differ based on the error source.
# Only the last 2 bytes of the hresult the error code.
fake_file_exists_hres = -0x7ff8ffb0
file_exists_err_code = 0x50
ret_val = _utils.hresult_to_err_code(fake_file_exists_hres)
self.assertEqual(file_exists_err_code, ret_val)
@mock.patch.object(_utils, 'get_com_error_hresult')
@mock.patch.object(_utils, 'hresult_to_err_code')
def test_get_com_error_code(self, mock_hres_to_err_code, mock_get_hresult):
ret_val = _utils.get_com_error_code(mock.sentinel.com_err)
self.assertEqual(mock_hres_to_err_code.return_value, ret_val)
mock_get_hresult.assert_called_once_with(mock.sentinel.com_err)
mock_hres_to_err_code.assert_called_once_with(
mock_get_hresult.return_value)
@ddt.data(_utils._WBEM_E_NOT_FOUND, mock.sentinel.wbem_error)
def test_is_not_found_exc(self, hresult):
exc = test_base.FakeWMIExc(hresult=hresult)
result = _utils._is_not_found_exc(exc)
expected = hresult == _utils._WBEM_E_NOT_FOUND
self.assertEqual(expected, result)
@mock.patch.object(_utils, 'get_com_error_hresult')
def test_not_found_decorator(self, mock_get_com_error_hresult):
mock_get_com_error_hresult.side_effect = lambda x: x
translated_exc = exceptions.HyperVVMNotFoundException
@_utils.not_found_decorator(
translated_exc=translated_exc)
def f(to_call):
to_call()
to_call = mock.Mock()
to_call.side_effect = exceptions.x_wmi(
'expected error', com_error=_utils._WBEM_E_NOT_FOUND)
self.assertRaises(translated_exc, f, to_call)
to_call.side_effect = exceptions.x_wmi()
self.assertRaises(exceptions.x_wmi, f, to_call)
def test_hex_str_to_byte_array(self):
fake_hex_str = '0x0010A'
resulted_array = _utils.hex_str_to_byte_array(fake_hex_str)
expected_array = bytearray([0, 1, 10])
self.assertEqual(expected_array, resulted_array)
def test_byte_array_to_hex_str(self):
fake_byte_array = bytearray(range(3))
resulted_string = _utils.byte_array_to_hex_str(fake_byte_array)
expected_string = '000102'
self.assertEqual(expected_string, resulted_string)
def test_required_vm_version(self):
@_utils.required_vm_version()
def foo(bar, vmsettings):
pass
mock_vmsettings = mock.Mock()
for good_version in [constants.VM_VERSION_5_0,
constants.VM_VERSION_254_0]:
mock_vmsettings.Version = good_version
foo(mock.sentinel.bar, mock_vmsettings)
for bad_version in ['4.99', '254.1']:
mock_vmsettings.Version = bad_version
self.assertRaises(exceptions.InvalidVMVersion, foo,
mock.sentinel.bar, mock_vmsettings)
| apache-2.0 | -7,459,312,965,330,679,000 | 38.994382 | 79 | 0.599874 | false |
Jimdo/thumbor-logdrain-metrics | vows/metrics_vows.py | 1 | 2062 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from pyvows import Vows, expect
from thumbor.context import Context, ServerParameters
from thumbor.importer import Importer
from thumbor.config import Config
import thumbor.metrics
import thumbor_logdrain_metrics.metrics.logdrain_metrics
@Vows.batch
class MetricsVows(Vows.Context):
class CanCreateContextWithMetrics(Vows.Context):
def topic(self):
conf = Config()
conf.METRICS = 'thumbor_logdrain_metrics.metrics.logdrain_metrics'
conf.THUMBOR_LOGDRAIN_METRICS_TIMINGS_MAPPING = {'test.count' : 'count', 'test.time' : 'time'}
imp = Importer(conf)
imp.import_modules()
return Context(None, conf, imp)
def should_not_fail_on_use(self, topic):
expect(topic.metrics.incr('test.count')).not_to_be_an_error()
expect(topic.metrics.incr('test.count', 2)).not_to_be_an_error()
expect(topic.metrics.timing('test.time', 100)).not_to_be_an_error()
class CanCreateContextWithMetricsAndThreashold(Vows.Context):
def topic(self):
conf = Config()
conf.METRICS = 'thumbor_logdrain_metrics.metrics.logdrain_metrics'
conf.THUMBOR_LOGDRAIN_METRICS_TIMINGS_MAPPING = {'test.count' : 'count', 'test.time' : 'time'}
conf.THUMBOR_LOGDRAIN_METRICS_THRESHOLD = 2
imp = Importer(conf)
imp.import_modules()
return Context(None, conf, imp)
def should_not_accumulate_values(self, topic):
expect(topic.metrics.incr('test.count')).not_to_be_an_error()
expect(topic.metrics.incr('test.count', 2)).not_to_be_an_error()
expect(topic.metrics.timing('test.time', 100)).not_to_be_an_error()
expect(topic.metrics.timing('test.time', 200)).not_to_be_an_error()
| mit | 3,042,607,727,211,385,000 | 38.653846 | 106 | 0.654704 | false |
MVilstrup/mosquito_1 | mosquito/networking/sink.py | 1 | 1926 | # Task sink
# Binds PULL socket to tcp://localhost:5558
# Collects results from workers via that socket
import zmq
class Sink(object):
def __init__(self,
result_port,
send_port,
break_port="tcp://127.0.0.1:9999"):
context = zmq.Context()
# Socket to receive messages on
self.receive_socket = context.socket(zmq.PULL)
self.receive_socket.bind(result_port)
# Socket to reschedule domains that timed out
self.send_socket = context.socket(zmq.PUSH)
self.send_socket.bind(send_port)
# Socket to reschedule domains that timed out
self.break_socket = context.socket(zmq.SUB)
self.break_socket.bind(break_port)
# Poller used to switch between the two sockets
poller = zmq.Poller()
poller.register(self.receive_socket, zmq.POLLIN)
poller.register(self.break_socket, zmq.POLLIN)
def start(self):
should_continue = True
while should_continue:
sockets = dict(poller.poll())
if self.receive_socket in sockets and sockets[
self.receive_socket] == zmq.POLLIN:
message = self.receive_socket.recv_json()
self._handle_messages(message)
if self.break_socket in sockets and sockets[
self.break_socket] == zmq.POLLIN:
signal = self.break_socket.recv_string()
if signal == "QUIT":
should_continue = False
return
def _handle_messages(self, message):
raise NotImplementedError("_work should be implemented")
def send_json(self, message):
return self.send_socket.send_json(message)
def send_string(self, message):
return self.send_socket.send_string(message)
def send(self, message):
return self.send_socket.send(message)
| apache-2.0 | 3,985,627,639,154,338,300 | 31.644068 | 64 | 0.59865 | false |
gouzongmei/t1 | src/kimchi/model/vms.py | 1 | 40069 | #
# Project Kimchi
#
# Copyright IBM, Corp. 2014
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from lxml.builder import E
import lxml.etree as ET
from lxml import etree, objectify
import os
import random
import string
import time
import uuid
from xml.etree import ElementTree
import libvirt
from cherrypy.process.plugins import BackgroundTask
from kimchi import model, vnc
from kimchi.config import READONLY_POOL_TYPE, config
from kimchi.exception import InvalidOperation, InvalidParameter
from kimchi.exception import NotFoundError, OperationFailed
from kimchi.model.config import CapabilitiesModel
from kimchi.model.tasks import TaskModel
from kimchi.model.templates import TemplateModel
from kimchi.model.utils import get_vm_name
from kimchi.model.utils import get_metadata_node
from kimchi.model.utils import set_metadata_node
from kimchi.rollbackcontext import RollbackContext
from kimchi.screenshot import VMScreenshot
from kimchi.utils import add_task, get_next_clone_name, import_class
from kimchi.utils import kimchi_log, run_setfacl_set_attr
from kimchi.utils import template_name_from_uri
from kimchi.xmlutils.utils import xpath_get_text, xml_item_update
from kimchi.xmlutils.utils import dictize
DOM_STATE_MAP = {0: 'nostate',
1: 'running',
2: 'blocked',
3: 'paused',
4: 'shutdown',
5: 'shutoff',
6: 'crashed',
7: 'pmsuspended'}
GUESTS_STATS_INTERVAL = 5
VM_STATIC_UPDATE_PARAMS = {'name': './name',
'cpus': './vcpu',
'memory': './memory'}
VM_LIVE_UPDATE_PARAMS = {}
stats = {}
XPATH_DOMAIN_DISK = "/domain/devices/disk[@device='disk']/source/@file"
XPATH_DOMAIN_DISK_BY_FILE = "./devices/disk[@device='disk']/source[@file='%s']"
XPATH_DOMAIN_NAME = '/domain/name'
XPATH_DOMAIN_MAC = "/domain/devices/interface[@type='network']/mac/@address"
XPATH_DOMAIN_MAC_BY_ADDRESS = "./devices/interface[@type='network']/"\
"mac[@address='%s']"
XPATH_DOMAIN_UUID = '/domain/uuid'
class VMsModel(object):
def __init__(self, **kargs):
self.conn = kargs['conn']
self.objstore = kargs['objstore']
self.caps = CapabilitiesModel(**kargs)
self.guests_stats_thread = BackgroundTask(GUESTS_STATS_INTERVAL,
self._update_guests_stats)
self.guests_stats_thread.start()
def _update_guests_stats(self):
vm_list = self.get_list()
for name in vm_list:
try:
dom = VMModel.get_vm(name, self.conn)
vm_uuid = dom.UUIDString()
info = dom.info()
state = DOM_STATE_MAP[info[0]]
if state != 'running':
stats[vm_uuid] = {}
continue
if stats.get(vm_uuid, None) is None:
stats[vm_uuid] = {}
timestamp = time.time()
prevStats = stats.get(vm_uuid, {})
seconds = timestamp - prevStats.get('timestamp', 0)
stats[vm_uuid].update({'timestamp': timestamp})
self._get_percentage_cpu_usage(vm_uuid, info, seconds)
self._get_network_io_rate(vm_uuid, dom, seconds)
self._get_disk_io_rate(vm_uuid, dom, seconds)
except Exception as e:
# VM might be deleted just after we get the list.
# This is OK, just skip.
kimchi_log.debug('Error processing VM stats: %s', e.message)
continue
def _get_percentage_cpu_usage(self, vm_uuid, info, seconds):
prevCpuTime = stats[vm_uuid].get('cputime', 0)
cpus = info[3]
cpuTime = info[4] - prevCpuTime
base = (((cpuTime) * 100.0) / (seconds * 1000.0 * 1000.0 * 1000.0))
percentage = max(0.0, min(100.0, base / cpus))
stats[vm_uuid].update({'cputime': info[4], 'cpu': percentage})
def _get_network_io_rate(self, vm_uuid, dom, seconds):
prevNetRxKB = stats[vm_uuid].get('netRxKB', 0)
prevNetTxKB = stats[vm_uuid].get('netTxKB', 0)
currentMaxNetRate = stats[vm_uuid].get('max_net_io', 100)
rx_bytes = 0
tx_bytes = 0
tree = ElementTree.fromstring(dom.XMLDesc(0))
for target in tree.findall('devices/interface/target'):
dev = target.get('dev')
io = dom.interfaceStats(dev)
rx_bytes += io[0]
tx_bytes += io[4]
netRxKB = float(rx_bytes) / 1000
netTxKB = float(tx_bytes) / 1000
rx_stats = (netRxKB - prevNetRxKB) / seconds
tx_stats = (netTxKB - prevNetTxKB) / seconds
rate = rx_stats + tx_stats
max_net_io = round(max(currentMaxNetRate, int(rate)), 1)
stats[vm_uuid].update({'net_io': rate, 'max_net_io': max_net_io,
'netRxKB': netRxKB, 'netTxKB': netTxKB})
def _get_disk_io_rate(self, vm_uuid, dom, seconds):
prevDiskRdKB = stats[vm_uuid].get('diskRdKB', 0)
prevDiskWrKB = stats[vm_uuid].get('diskWrKB', 0)
currentMaxDiskRate = stats[vm_uuid].get('max_disk_io', 100)
rd_bytes = 0
wr_bytes = 0
tree = ElementTree.fromstring(dom.XMLDesc(0))
for target in tree.findall("devices/disk/target"):
dev = target.get("dev")
io = dom.blockStats(dev)
rd_bytes += io[1]
wr_bytes += io[3]
diskRdKB = float(rd_bytes) / 1024
diskWrKB = float(wr_bytes) / 1024
rd_stats = (diskRdKB - prevDiskRdKB) / seconds
wr_stats = (diskWrKB - prevDiskWrKB) / seconds
rate = rd_stats + wr_stats
max_disk_io = round(max(currentMaxDiskRate, int(rate)), 1)
stats[vm_uuid].update({'disk_io': rate,
'max_disk_io': max_disk_io,
'diskRdKB': diskRdKB,
'diskWrKB': diskWrKB})
def create(self, params):
conn = self.conn.get()
t_name = template_name_from_uri(params['template'])
vm_uuid = str(uuid.uuid4())
vm_list = self.get_list()
name = get_vm_name(params.get('name'), t_name, vm_list)
# incoming text, from js json, is unicode, do not need decode
if name in vm_list:
raise InvalidOperation("KCHVM0001E", {'name': name})
vm_overrides = dict()
pool_uri = params.get('storagepool')
if pool_uri:
vm_overrides['storagepool'] = pool_uri
vm_overrides['fc_host_support'] = self.caps.fc_host_support
t = TemplateModel.get_template(t_name, self.objstore, self.conn,
vm_overrides)
if not self.caps.qemu_stream and t.info.get('iso_stream', False):
raise InvalidOperation("KCHVM0005E")
t.validate()
# Store the icon for displaying later
icon = t.info.get('icon')
if icon:
try:
with self.objstore as session:
session.store('vm', vm_uuid, {'icon': icon})
except Exception as e:
# It is possible to continue Kimchi executions without store
# vm icon info
kimchi_log.error('Error trying to update database with guest '
'icon information due error: %s', e.message)
# If storagepool is SCSI, volumes will be LUNs and must be passed by
# the user from UI or manually.
vol_list = []
if t._get_storage_type() not in ["iscsi", "scsi"]:
vol_list = t.fork_vm_storage(vm_uuid)
graphics = params.get('graphics', {})
stream_protocols = self.caps.libvirt_stream_protocols
xml = t.to_vm_xml(name, vm_uuid,
libvirt_stream_protocols=stream_protocols,
qemu_stream_dns=self.caps.qemu_stream_dns,
graphics=graphics,
volumes=vol_list)
try:
conn.defineXML(xml.encode('utf-8'))
except libvirt.libvirtError as e:
if t._get_storage_type() not in READONLY_POOL_TYPE:
for v in vol_list:
vol = conn.storageVolLookupByPath(v['path'])
vol.delete(0)
raise OperationFailed("KCHVM0007E", {'name': name,
'err': e.get_error_message()})
VMModel.vm_update_os_metadata(VMModel.get_vm(name, self.conn), t.info,
self.caps.metadata_support)
return name
def get_list(self):
return self.get_vms(self.conn)
@staticmethod
def get_vms(conn):
conn = conn.get()
names = [dom.name().decode('utf-8') for dom in conn.listAllDomains(0)]
return sorted(names, key=unicode.lower)
class VMModel(object):
def __init__(self, **kargs):
self.conn = kargs['conn']
self.objstore = kargs['objstore']
self.caps = CapabilitiesModel(**kargs)
self.vmscreenshot = VMScreenshotModel(**kargs)
self.users = import_class('kimchi.model.users.UsersModel')(**kargs)
self.groups = import_class('kimchi.model.groups.GroupsModel')(**kargs)
self.vms = VMsModel(**kargs)
self.task = TaskModel(**kargs)
self.storagepool = model.storagepools.StoragePoolModel(**kargs)
self.storagevolume = model.storagevolumes.StorageVolumeModel(**kargs)
self.storagevolumes = model.storagevolumes.StorageVolumesModel(**kargs)
cls = import_class('kimchi.model.vmsnapshots.VMSnapshotModel')
self.vmsnapshot = cls(**kargs)
cls = import_class('kimchi.model.vmsnapshots.VMSnapshotsModel')
self.vmsnapshots = cls(**kargs)
def update(self, name, params):
dom = self.get_vm(name, self.conn)
dom = self._static_vm_update(dom, params)
self._live_vm_update(dom, params)
return dom.name().decode('utf-8')
def clone(self, name):
"""Clone a virtual machine based on an existing one.
The new virtual machine will have the exact same configuration as the
original VM, except for the name, UUID, MAC addresses and disks. The
name will have the form "<name>-clone-<number>", with <number> starting
at 1; the UUID will be generated randomly; the MAC addresses will be
generated randomly with no conflicts within the original and the new
VM; and the disks will be new volumes [mostly] on the same storage
pool, with the same content as the original disks. The storage pool
'default' will always be used when cloning SCSI and iSCSI disks and
when the original storage pool cannot hold the new volume.
An exception will be raised if the virtual machine <name> is not
shutoff, if there is no available space to copy a new volume to the
storage pool 'default' (when there was also no space to copy it to the
original storage pool) and if one of the virtual machine's disks belong
to a storage pool not supported by Kimchi.
Parameters:
name -- The name of the existing virtual machine to be cloned.
Return:
A Task running the clone operation.
"""
name = name.decode('utf-8')
# VM must be shutoff in order to clone it
info = self.lookup(name)
if info['state'] != u'shutoff':
raise InvalidParameter('KCHVM0033E', {'name': name})
# this name will be used as the Task's 'target_uri' so it needs to be
# defined now.
new_name = get_next_clone_name(self.vms.get_list(), name)
# create a task with the actual clone function
taskid = add_task(u'/vms/%s' % new_name, self._clone_task,
self.objstore,
{'name': name, 'new_name': new_name})
return self.task.lookup(taskid)
def _clone_task(self, cb, params):
"""Asynchronous function which performs the clone operation.
Parameters:
cb -- A callback function to signal the Task's progress.
params -- A dict with the following values:
"name": the name of the original VM.
"new_name": the name of the new VM.
"""
name = params['name']
new_name = params['new_name']
vir_conn = self.conn.get()
# fetch base XML
cb('reading source VM XML')
try:
vir_dom = vir_conn.lookupByName(name)
flags = libvirt.VIR_DOMAIN_XML_SECURE
xml = vir_dom.XMLDesc(flags).decode('utf-8')
except libvirt.libvirtError, e:
raise OperationFailed('KCHVM0035E', {'name': name,
'err': e.message})
# update UUID
cb('updating VM UUID')
old_uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0]
new_uuid = unicode(uuid.uuid4())
xml = xml_item_update(xml, './uuid', new_uuid)
# update MAC addresses
cb('updating VM MAC addresses')
xml = self._clone_update_mac_addresses(xml)
with RollbackContext() as rollback:
# copy disks
cb('copying VM disks')
xml = self._clone_update_disks(xml, rollback)
# update objstore entry
cb('updating object store')
self._clone_update_objstore(old_uuid, new_uuid, rollback)
# update name
cb('updating VM name')
xml = xml_item_update(xml, './name', new_name)
# create new guest
cb('defining new VM')
try:
vir_conn.defineXML(xml)
except libvirt.libvirtError, e:
raise OperationFailed('KCHVM0035E', {'name': name,
'err': e.message})
rollback.commitAll()
cb('OK', True)
@staticmethod
def _clone_update_mac_addresses(xml):
"""Update the MAC addresses with new values in the XML descriptor of a
cloning domain.
The new MAC addresses will be generated randomly, and their values are
guaranteed to be distinct from the ones in the original VM.
Arguments:
xml -- The XML descriptor of the original domain.
Return:
The XML descriptor <xml> with the new MAC addresses instead of the
old ones.
"""
old_macs = xpath_get_text(xml, XPATH_DOMAIN_MAC)
new_macs = []
for mac in old_macs:
while True:
new_mac = model.vmifaces.VMIfacesModel.random_mac()
# make sure the new MAC doesn't conflict with the original VM
# and with the new values on the new VM.
if new_mac not in (old_macs + new_macs):
new_macs.append(new_mac)
break
xml = xml_item_update(xml, XPATH_DOMAIN_MAC_BY_ADDRESS % mac,
new_mac, 'address')
return xml
def _clone_update_disks(self, xml, rollback):
"""Clone disks from a virtual machine. The disks are copied as new
volumes and the new VM's XML is updated accordingly.
Arguments:
xml -- The XML descriptor of the original VM + new value for
"/domain/uuid".
rollback -- A rollback context so the new volumes can be removed if an
error occurs during the cloning operation.
Return:
The XML descriptor <xml> with the new disk paths instead of the
old ones.
"""
# the UUID will be used to create the disk paths
uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0]
all_paths = xpath_get_text(xml, XPATH_DOMAIN_DISK)
vir_conn = self.conn.get()
for i, path in enumerate(all_paths):
try:
vir_orig_vol = vir_conn.storageVolLookupByPath(path)
vir_pool = vir_orig_vol.storagePoolLookupByVolume()
orig_pool_name = vir_pool.name().decode('utf-8')
orig_vol_name = vir_orig_vol.name().decode('utf-8')
except libvirt.libvirtError, e:
domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0]
raise OperationFailed('KCHVM0035E', {'name': domain_name,
'err': e.message})
orig_pool = self.storagepool.lookup(orig_pool_name)
orig_vol = self.storagevolume.lookup(orig_pool_name, orig_vol_name)
new_pool_name = orig_pool_name
new_pool = orig_pool
if orig_pool['type'] in ['dir', 'netfs', 'logical']:
# if a volume in a pool 'dir', 'netfs' or 'logical' cannot hold
# a new volume with the same size, the pool 'default' should
# be used
if orig_vol['capacity'] > orig_pool['available']:
kimchi_log.warning('storage pool \'%s\' doesn\'t have '
'enough free space to store image '
'\'%s\'; falling back to \'default\'',
orig_pool_name, path)
new_pool_name = u'default'
new_pool = self.storagepool.lookup(u'default')
# ...and if even the pool 'default' cannot hold a new
# volume, raise an exception
if orig_vol['capacity'] > new_pool['available']:
domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0]
raise InvalidOperation('KCHVM0034E',
{'name': domain_name})
elif orig_pool['type'] in ['scsi', 'iscsi']:
# SCSI and iSCSI always fall back to the storage pool 'default'
kimchi_log.warning('cannot create new volume for clone in '
'storage pool \'%s\'; falling back to '
'\'default\'', orig_pool_name)
new_pool_name = u'default'
new_pool = self.storagepool.lookup(u'default')
# if the pool 'default' cannot hold a new volume, raise
# an exception
if orig_vol['capacity'] > new_pool['available']:
domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0]
raise InvalidOperation('KCHVM0034E', {'name': domain_name})
else:
# unexpected storage pool type
raise InvalidOperation('KCHPOOL0014E',
{'type': orig_pool['type']})
# new volume name: <UUID>-<loop-index>.<original extension>
# e.g. 1234-5678-9012-3456-0.img
ext = os.path.splitext(path)[1]
new_vol_name = u'%s-%d%s' % (uuid, i, ext)
task = self.storagevolume.clone(orig_pool_name, orig_vol_name,
new_name=new_vol_name)
self.task.wait(task['id'], 3600) # 1 h
# get the new volume path and update the XML descriptor
new_vol = self.storagevolume.lookup(new_pool_name, new_vol_name)
xml = xml_item_update(xml, XPATH_DOMAIN_DISK_BY_FILE % path,
new_vol['path'], 'file')
# remove the new volume should an error occur later
rollback.prependDefer(self.storagevolume.delete, new_pool_name,
new_vol_name)
return xml
def _clone_update_objstore(self, old_uuid, new_uuid, rollback):
"""Update Kimchi's object store with the cloning VM.
Arguments:
old_uuid -- The UUID of the original VM.
new_uuid -- The UUID of the new, clonning VM.
rollback -- A rollback context so the object store entry can be removed
if an error occurs during the cloning operation.
"""
with self.objstore as session:
try:
vm = session.get('vm', old_uuid)
icon = vm['icon']
session.store('vm', new_uuid, {'icon': icon})
except NotFoundError:
# if we cannot find an object store entry for the original VM,
# don't store one with an empty value.
pass
else:
# we need to define a custom function to prepend to the
# rollback context because the object store session needs to be
# opened and closed correctly (i.e. "prependDefer" only
# accepts one command at a time but we need more than one to
# handle an object store).
def _rollback_objstore():
with self.objstore as session_rb:
session_rb.delete('vm', new_uuid, ignore_missing=True)
# remove the new object store entry should an error occur later
rollback.prependDefer(_rollback_objstore)
def _build_access_elem(self, users, groups):
auth = config.get("authentication", "method")
auth_elem = E.auth(type=auth)
for user in users:
auth_elem.append(E.user(user))
for group in groups:
auth_elem.append(E.group(group))
access = E.access()
access.append(auth_elem)
return access
def _vm_update_access_metadata(self, dom, params):
users = groups = None
if "users" in params:
users = params["users"]
for user in users:
if not self.users.validate(user):
raise InvalidParameter("KCHVM0027E",
{'users': user})
if "groups" in params:
groups = params["groups"]
for group in groups:
if not self.groups.validate(group):
raise InvalidParameter("KCHVM0028E",
{'groups': group})
if users is None and groups is None:
return
old_users, old_groups = self._get_access_info(dom)
users = old_users if users is None else users
groups = old_groups if groups is None else groups
node = self._build_access_elem(users, groups)
set_metadata_node(dom, node, self.caps.metadata_support)
def _get_access_info(self, dom):
users = groups = list()
access_xml = (get_metadata_node(dom, "access",
self.caps.metadata_support) or
"""<access></access>""")
access_info = dictize(access_xml)
auth = config.get("authentication", "method")
if ('auth' in access_info['access'] and
('type' in access_info['access']['auth'] or
len(access_info['access']['auth']) > 1)):
users = xpath_get_text(access_xml,
"/access/auth[@type='%s']/user" % auth)
groups = xpath_get_text(access_xml,
"/access/auth[@type='%s']/group" % auth)
elif auth == 'pam':
# Compatible to old permission tagging
users = xpath_get_text(access_xml, "/access/user")
groups = xpath_get_text(access_xml, "/access/group")
return users, groups
@staticmethod
def vm_get_os_metadata(dom, metadata_support):
os_xml = (get_metadata_node(dom, "os", metadata_support) or
"""<os></os>""")
os_elem = ET.fromstring(os_xml)
return (os_elem.attrib.get("version"), os_elem.attrib.get("distro"))
@staticmethod
def vm_update_os_metadata(dom, params, metadata_support):
distro = params.get("os_distro")
version = params.get("os_version")
if distro is None:
return
os_elem = E.os({"distro": distro, "version": version})
set_metadata_node(dom, os_elem, metadata_support)
def _update_graphics(self, dom, xml, params):
root = objectify.fromstring(xml)
graphics = root.devices.find("graphics")
if graphics is None:
return xml
password = params['graphics'].get("passwd")
if password is not None and len(password.strip()) == 0:
password = "".join(random.sample(string.ascii_letters +
string.digits, 8))
if password is not None:
graphics.attrib['passwd'] = password
expire = params['graphics'].get("passwdValidTo")
to = graphics.attrib.get('passwdValidTo')
if to is not None:
if (time.mktime(time.strptime(to, '%Y-%m-%dT%H:%M:%S'))
- time.time() <= 0):
expire = expire if expire is not None else 30
if expire is not None:
expire_time = time.gmtime(time.time() + float(expire))
valid_to = time.strftime('%Y-%m-%dT%H:%M:%S', expire_time)
graphics.attrib['passwdValidTo'] = valid_to
if not dom.isActive():
return ET.tostring(root, encoding="utf-8")
xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
dom.updateDeviceFlags(etree.tostring(graphics),
libvirt.VIR_DOMAIN_AFFECT_LIVE)
return xml
def _static_vm_update(self, dom, params):
state = DOM_STATE_MAP[dom.info()[0]]
old_xml = new_xml = dom.XMLDesc(0)
for key, val in params.items():
if key in VM_STATIC_UPDATE_PARAMS:
if key == 'memory':
# Libvirt saves memory in KiB. Retrieved xml has memory
# in KiB too, so new valeu must be in KiB here
val = val * 1024
if type(val) == int:
val = str(val)
xpath = VM_STATIC_UPDATE_PARAMS[key]
new_xml = xml_item_update(new_xml, xpath, val)
if 'graphics' in params:
new_xml = self._update_graphics(dom, new_xml, params)
conn = self.conn.get()
try:
if 'name' in params:
if state == 'running':
msg_args = {'name': dom.name(), 'new_name': params['name']}
raise InvalidParameter("KCHVM0003E", msg_args)
# Undefine old vm, only if name is going to change
dom.undefine()
root = ET.fromstring(new_xml)
currentMem = root.find('.currentMemory')
if currentMem is not None:
root.remove(currentMem)
dom = conn.defineXML(ET.tostring(root, encoding="utf-8"))
except libvirt.libvirtError as e:
dom = conn.defineXML(old_xml)
raise OperationFailed("KCHVM0008E", {'name': dom.name(),
'err': e.get_error_message()})
return dom
def _live_vm_update(self, dom, params):
self._vm_update_access_metadata(dom, params)
def _has_video(self, dom):
dom = ElementTree.fromstring(dom.XMLDesc(0))
return dom.find('devices/video') is not None
def lookup(self, name):
dom = self.get_vm(name, self.conn)
info = dom.info()
state = DOM_STATE_MAP[info[0]]
screenshot = None
# (type, listen, port, passwd, passwdValidTo)
graphics = self._vm_get_graphics(name)
graphics_port = graphics[2]
graphics_port = graphics_port if state == 'running' else None
try:
if state == 'running' and self._has_video(dom):
screenshot = self.vmscreenshot.lookup(name)
elif state == 'shutoff':
# reset vm stats when it is powered off to avoid sending
# incorrect (old) data
stats[dom.UUIDString()] = {}
except NotFoundError:
pass
with self.objstore as session:
try:
extra_info = session.get('vm', dom.UUIDString())
except NotFoundError:
extra_info = {}
icon = extra_info.get('icon')
vm_stats = stats.get(dom.UUIDString(), {})
res = {}
res['cpu_utilization'] = vm_stats.get('cpu', 0)
res['net_throughput'] = vm_stats.get('net_io', 0)
res['net_throughput_peak'] = vm_stats.get('max_net_io', 100)
res['io_throughput'] = vm_stats.get('disk_io', 0)
res['io_throughput_peak'] = vm_stats.get('max_disk_io', 100)
users, groups = self._get_access_info(dom)
return {'name': name,
'state': state,
'stats': res,
'uuid': dom.UUIDString(),
'memory': info[2] >> 10,
'cpus': info[3],
'screenshot': screenshot,
'icon': icon,
# (type, listen, port, passwd, passwdValidTo)
'graphics': {"type": graphics[0],
"listen": graphics[1],
"port": graphics_port,
"passwd": graphics[3],
"passwdValidTo": graphics[4]},
'users': users,
'groups': groups,
'access': 'full',
'persistent': True if dom.isPersistent() else False
}
def _vm_get_disk_paths(self, dom):
xml = dom.XMLDesc(0)
xpath = "/domain/devices/disk[@device='disk']/source/@file"
return xpath_get_text(xml, xpath)
@staticmethod
def get_vm(name, conn):
conn = conn.get()
try:
# outgoing text to libvirt, encode('utf-8')
return conn.lookupByName(name.encode("utf-8"))
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
raise NotFoundError("KCHVM0002E", {'name': name})
else:
raise OperationFailed("KCHVM0009E", {'name': name,
'err': e.message})
def delete(self, name):
conn = self.conn.get()
dom = self.get_vm(name, self.conn)
self._vmscreenshot_delete(dom.UUIDString())
paths = self._vm_get_disk_paths(dom)
info = self.lookup(name)
if info['state'] == 'running':
self.poweroff(name)
# delete existing snapshots before deleting VM
# libvirt's Test driver does not support the function
# "virDomainListAllSnapshots", so "VMSnapshots.get_list" will raise
# "OperationFailed" in that case.
try:
snapshot_names = self.vmsnapshots.get_list(name)
except OperationFailed, e:
kimchi_log.error('cannot list snapshots: %s; '
'skipping snapshot deleting...' % e.message)
else:
for s in snapshot_names:
self.vmsnapshot.delete(name, s)
try:
dom.undefine()
except libvirt.libvirtError as e:
raise OperationFailed("KCHVM0021E",
{'name': name, 'err': e.get_error_message()})
for path in paths:
try:
vol = conn.storageVolLookupByPath(path)
pool = vol.storagePoolLookupByVolume()
xml = pool.XMLDesc(0)
pool_type = xpath_get_text(xml, "/pool/@type")[0]
if pool_type not in READONLY_POOL_TYPE:
vol.delete(0)
# Update objstore to remove the volume
with self.objstore as session:
session.delete('storagevolume', path,
ignore_missing=True)
except libvirt.libvirtError as e:
kimchi_log.error('Unable to get storage volume by path: %s' %
e.message)
except Exception as e:
raise OperationFailed('KCHVOL0017E', {'err': e.message})
try:
with self.objstore as session:
if path in session.get_list('storagevolume'):
n = session.get('storagevolume', path)['ref_cnt']
session.store('storagevolume', path, {'ref_cnt': n-1})
except Exception as e:
raise OperationFailed('KCHVOL0017E', {'err': e.message})
try:
with self.objstore as session:
session.delete('vm', dom.UUIDString(), ignore_missing=True)
except Exception as e:
# It is possible to delete vm without delete its database info
kimchi_log.error('Error deleting vm information from database: '
'%s', e.message)
vnc.remove_proxy_token(name)
def start(self, name):
# make sure the ISO file has read permission
dom = self.get_vm(name, self.conn)
xml = dom.XMLDesc(0)
xpath = "/domain/devices/disk[@device='cdrom']/source/@file"
isofiles = xpath_get_text(xml, xpath)
for iso in isofiles:
run_setfacl_set_attr(iso)
dom = self.get_vm(name, self.conn)
try:
dom.create()
except libvirt.libvirtError as e:
raise OperationFailed("KCHVM0019E",
{'name': name, 'err': e.get_error_message()})
def poweroff(self, name):
dom = self.get_vm(name, self.conn)
try:
dom.destroy()
except libvirt.libvirtError as e:
raise OperationFailed("KCHVM0020E",
{'name': name, 'err': e.get_error_message()})
def shutdown(self, name):
dom = self.get_vm(name, self.conn)
try:
dom.shutdown()
except libvirt.libvirtError as e:
raise OperationFailed("KCHVM0029E",
{'name': name, 'err': e.get_error_message()})
def reset(self, name):
dom = self.get_vm(name, self.conn)
try:
dom.reset(flags=0)
except libvirt.libvirtError as e:
raise OperationFailed("KCHVM0022E",
{'name': name, 'err': e.get_error_message()})
def _vm_get_graphics(self, name):
dom = self.get_vm(name, self.conn)
xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
expr = "/domain/devices/graphics/@type"
res = xpath_get_text(xml, expr)
graphics_type = res[0] if res else None
expr = "/domain/devices/graphics/@listen"
res = xpath_get_text(xml, expr)
graphics_listen = res[0] if res else None
graphics_port = graphics_passwd = graphics_passwdValidTo = None
if graphics_type:
expr = "/domain/devices/graphics[@type='%s']/@port"
res = xpath_get_text(xml, expr % graphics_type)
graphics_port = int(res[0]) if res else None
expr = "/domain/devices/graphics[@type='%s']/@passwd"
res = xpath_get_text(xml, expr % graphics_type)
graphics_passwd = res[0] if res else None
expr = "/domain/devices/graphics[@type='%s']/@passwdValidTo"
res = xpath_get_text(xml, expr % graphics_type)
if res:
to = time.mktime(time.strptime(res[0], '%Y-%m-%dT%H:%M:%S'))
graphics_passwdValidTo = to - time.mktime(time.gmtime())
return (graphics_type, graphics_listen, graphics_port,
graphics_passwd, graphics_passwdValidTo)
def connect(self, name):
# (type, listen, port, passwd, passwdValidTo)
graphics_port = self._vm_get_graphics(name)[2]
if graphics_port is not None:
vnc.add_proxy_token(name, graphics_port)
else:
raise OperationFailed("KCHVM0010E", {'name': name})
def _vmscreenshot_delete(self, vm_uuid):
screenshot = VMScreenshotModel.get_screenshot(vm_uuid, self.objstore,
self.conn)
screenshot.delete()
try:
with self.objstore as session:
session.delete('screenshot', vm_uuid)
except Exception as e:
# It is possible to continue Kimchi executions without delete
# screenshots
kimchi_log.error('Error trying to delete vm screenshot from '
'database due error: %s', e.message)
class VMScreenshotModel(object):
def __init__(self, **kargs):
self.objstore = kargs['objstore']
self.conn = kargs['conn']
def lookup(self, name):
dom = VMModel.get_vm(name, self.conn)
d_info = dom.info()
vm_uuid = dom.UUIDString()
if DOM_STATE_MAP[d_info[0]] != 'running':
raise NotFoundError("KCHVM0004E", {'name': name})
screenshot = self.get_screenshot(vm_uuid, self.objstore, self.conn)
img_path = screenshot.lookup()
# screenshot info changed after scratch generation
try:
with self.objstore as session:
session.store('screenshot', vm_uuid, screenshot.info)
except Exception as e:
# It is possible to continue Kimchi executions without store
# screenshots
kimchi_log.error('Error trying to update database with guest '
'screenshot information due error: %s', e.message)
return img_path
@staticmethod
def get_screenshot(vm_uuid, objstore, conn):
try:
with objstore as session:
try:
params = session.get('screenshot', vm_uuid)
except NotFoundError:
params = {'uuid': vm_uuid}
session.store('screenshot', vm_uuid, params)
except Exception as e:
# The 'except' outside of 'with' is necessary to catch possible
# exception from '__exit__' when calling 'session.store'
# It is possible to continue Kimchi vm executions without
# screenshots
kimchi_log.error('Error trying to update database with guest '
'screenshot information due error: %s', e.message)
return LibvirtVMScreenshot(params, conn)
class LibvirtVMScreenshot(VMScreenshot):
def __init__(self, vm_uuid, conn):
VMScreenshot.__init__(self, vm_uuid)
self.conn = conn
def _generate_scratch(self, thumbnail):
def handler(stream, buf, opaque):
fd = opaque
os.write(fd, buf)
fd = os.open(thumbnail, os.O_WRONLY | os.O_TRUNC | os.O_CREAT, 0644)
try:
conn = self.conn.get()
dom = conn.lookupByUUIDString(self.vm_uuid)
vm_name = dom.name()
stream = conn.newStream(0)
dom.screenshot(stream, 0, 0)
stream.recvAll(handler, fd)
except libvirt.libvirtError:
try:
stream.abort()
except:
pass
raise NotFoundError("KCHVM0006E", {'name': vm_name})
else:
stream.finish()
finally:
os.close(fd)
| lgpl-3.0 | -8,180,474,466,544,877,000 | 39.028971 | 79 | 0.549827 | false |
eppye-bots/bots | bots/botsinit.py | 1 | 15231 | from __future__ import unicode_literals
import sys
if sys.version_info[0] > 2:
basestring = unicode = str
import configparser as ConfigParser
else:
import ConfigParser
import os
import encodings
import codecs
import logging
import logging.handlers
#bots-modules
from . import botsglobal
from . import botslib
from . import node
class BotsConfig(ConfigParser.RawConfigParser):
''' As ConfigParser, but with defaults.
'''
def get(self,section, option, default='', **kwargs):
if self.has_option(section, option):
return ConfigParser.RawConfigParser.get(self,section, option)
elif default == '':
raise botslib.BotsError('No entry "%(option)s" in section "%(section)s" in "bots.ini".',{'option':option,'section':section})
else:
return default
def getint(self,section, option, default, **kwargs):
if self.has_option(section, option):
return ConfigParser.RawConfigParser.getint(self,section, option)
else:
return default
def getboolean(self,section, option, default, **kwargs):
if self.has_option(section, option):
return ConfigParser.RawConfigParser.getboolean(self,section, option)
else:
return default
def generalinit(configdir):
##########################################################################
#Configdir: settings.py & bots.ini#########################################
#Configdir MUST be importable. So configdir is relative to PYTHONPATH. Try several options for this import.
try: #first check if is configdir outside bots-directory: import configdir.settings.py
importnameforsettings = os.path.normpath(os.path.join(configdir,'settings')).replace(os.sep,'.')
settings = botslib.botsbaseimport(importnameforsettings)
except ImportError: #normal: configdir is in bots directory: import bots.configdir.settings.py
try:
importnameforsettings = os.path.normpath(os.path.join('bots',configdir,'settings')).replace(os.sep,'.')
settings = botslib.botsbaseimport(importnameforsettings)
except ImportError: #set pythonpath to config directory first
if not os.path.exists(configdir): #check if configdir exists.
raise botslib.PanicError('In initilisation: path to configuration does not exists: "%(configdir)s".',{'configdir':configdir})
addtopythonpath = os.path.abspath(os.path.dirname(configdir))
moduletoimport = os.path.basename(configdir)
sys.path.append(addtopythonpath)
importnameforsettings = os.path.normpath(os.path.join(moduletoimport,'settings')).replace(os.sep,'.')
settings = botslib.botsbaseimport(importnameforsettings)
#settings is imported, so now we know where to find settings.py: importnameforsettings
#note: the imported settings.py itself is NOT used, this is doen via django.conf.settings
configdirectory = os.path.abspath(os.path.dirname(settings.__file__))
#Read configuration-file bots.ini.
botsglobal.ini = BotsConfig()
botsglobal.ini.read(os.path.join(configdirectory,'bots.ini'))
# 'directories','botspath': absolute path for bots directory
botsglobal.ini.set('directories','botspath',os.path.abspath(os.path.dirname(__file__)))
# 'directories','config': absolute path for config directory
botsglobal.ini.set('directories','config',configdirectory)
#set config as originally received; used in starting engine via bots-monitor
botsglobal.ini.set('directories','config_org',configdir)
############################################################################
#Usersys####################################################################
#usersys MUST be importable. So usersys is relative to PYTHONPATH. Try several options for this import.
usersys = botsglobal.ini.get('directories','usersys','usersys')
try: #usersys outside bots-directory: import usersys
importnameforusersys = os.path.normpath(usersys).replace(os.sep,'.')
importedusersys = botslib.botsbaseimport(importnameforusersys)
except ImportError: #usersys is in bots directory: import bots.usersys
try:
importnameforusersys = os.path.normpath(os.path.join('bots',usersys)).replace(os.sep,'.')
importedusersys = botslib.botsbaseimport(importnameforusersys)
except ImportError: #set pythonpath to usersys directory first
if not os.path.exists(usersys): #check if configdir exists.
raise botslib.PanicError('In initilisation: path to configuration does not exists: "%(usersys)s".',{'usersys':usersys})
addtopythonpath = os.path.abspath(os.path.dirname(usersys)) #????
moduletoimport = os.path.basename(usersys)
sys.path.append(addtopythonpath)
importnameforusersys = os.path.normpath(usersys).replace(os.sep,'.')
importedusersys = botslib.botsbaseimport(importnameforusersys)
# 'directories','usersysabs': absolute path for config usersysabs
botsglobal.ini.set('directories','usersysabs',os.path.abspath(os.path.dirname(importedusersys.__file__))) #???Find pathname usersys using imported usersys
# botsglobal.usersysimportpath: used for imports from usersys
botsglobal.usersysimportpath = importnameforusersys
botsglobal.ini.set('directories','templatehtml',botslib.join(botsglobal.ini.get('directories','usersysabs'),'grammars/templatehtml/templates'))
############################################################################
#Botssys####################################################################
# 'directories','botssys': absolute path for config botssys
botssys = botsglobal.ini.get('directories','botssys','botssys')
botsglobal.ini.set('directories','botssys_org',botssys) #store original botssys setting
botsglobal.ini.set('directories','botssys',botslib.join(botssys)) #use absolute path
botsglobal.ini.set('directories','data',botslib.join(botssys,'data'))
botsglobal.ini.set('directories','logging',botslib.join(botssys,'logging'))
############################################################################
#other inits##############################################################
if botsglobal.ini.get('webserver','environment','development') != 'development': #values in bots.ini are also used in setting up cherrypy
logging.raiseExceptions = 0 # during production: if errors occurs in writing to log: ignore error. (leads to a missing log line, better than error;-).
botslib.dirshouldbethere(botsglobal.ini.get('directories','data'))
botslib.dirshouldbethere(botsglobal.ini.get('directories','logging'))
initbotscharsets() #initialise bots charsets
node.Node.checklevel = botsglobal.ini.getint('settings','get_checklevel',1)
botslib.settimeout(botsglobal.ini.getint('settings','globaltimeout',10))
############################################################################
#Init django#################################################################################
os.environ['DJANGO_SETTINGS_MODULE'] = importnameforsettings
import django
if hasattr(django,'setup'):
django.setup()
from django.conf import settings
botsglobal.settings = settings #settings are accessed using botsglobal
#**********************************************************************************
#*** bots specific handling of character-sets (eg UNOA charset) *******************
def initbotscharsets():
'''set up right charset handling for specific charsets (UNOA, UNOB, UNOC, etc).'''
#tell python how to search a codec defined by bots. Bots searches for this in usersys/charset
codecs.register(codec_search_function)
#syntax has parameters checkcharsetin or checkcharsetout. These can have value 'botsreplace'
#eg: 'checkcharsetin':'botsreplace', #strict, ignore or botsreplace
#in case of errors: the 'wrong' character is replaced with char as set in bots.ini. Default value in bots.ini is ' ' (space)
botsglobal.botsreplacechar = unicode(botsglobal.ini.get('settings','botsreplacechar',' '))
codecs.register_error('botsreplace', botsreplacechar_handler) #need to register the handler for botsreplacechar
#set aliases for the charsets in bots.ini
for key, value in botsglobal.ini.items('charsets'):
encodings.aliases.aliases[key] = value
def codec_search_function(encoding):
try:
module,filename = botslib.botsimport('charsets',encoding)
except botslib.BotsImportError: #charsetscript not there; other errors like syntax errors are not catched
return None
else:
if hasattr(module,'getregentry'):
return module.getregentry()
else:
return None
def botsreplacechar_handler(info):
'''replaces an char outside a charset by a user defined char. Useful eg for fixed records: recordlength does not change.'''
return (botsglobal.botsreplacechar, info.start+1)
#*** end of bots specific handling of character-sets ******************************
#**********************************************************************************
def connect():
''' connect to database for non-django modules eg engine '''
if botsglobal.settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
#sqlite has some more fiddling; in separate file. Mainly because of some other method of parameter passing.
if not os.path.isfile(botsglobal.settings.DATABASES['default']['NAME']):
raise botslib.PanicError('Could not find database file for SQLite')
from . import botssqlite
botsglobal.db = botssqlite.connect(database = botsglobal.settings.DATABASES['default']['NAME'])
elif botsglobal.settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
import MySQLdb
from MySQLdb import cursors
botsglobal.db = MySQLdb.connect(host=botsglobal.settings.DATABASES['default']['HOST'],
port=int(botsglobal.settings.DATABASES['default']['PORT']),
db=botsglobal.settings.DATABASES['default']['NAME'],
user=botsglobal.settings.DATABASES['default']['USER'],
passwd=botsglobal.settings.DATABASES['default']['PASSWORD'],
cursorclass=cursors.DictCursor,
**botsglobal.settings.DATABASES['default']['OPTIONS'])
elif botsglobal.settings.DATABASES['default']['ENGINE'] == 'django.db.backends.postgresql_psycopg2':
import psycopg2
import psycopg2.extensions
import psycopg2.extras
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
botsglobal.db = psycopg2.connect(host=botsglobal.settings.DATABASES['default']['HOST'],
port=botsglobal.settings.DATABASES['default']['PORT'],
database=botsglobal.settings.DATABASES['default']['NAME'],
user=botsglobal.settings.DATABASES['default']['USER'],
password=botsglobal.settings.DATABASES['default']['PASSWORD'],
connection_factory=psycopg2.extras.DictConnection)
botsglobal.db.set_client_encoding('UNICODE')
else:
raise botslib.PanicError('Unknown database engine "%(engine)s".',{'engine':botsglobal.settings.DATABASES['default']['ENGINE']})
#*******************************************************************
#*** init logging **************************************************
#*******************************************************************
logging.addLevelName(25, 'STARTINFO')
convertini2logger = {'DEBUG':logging.DEBUG,'INFO':logging.INFO,'WARNING':logging.WARNING,'ERROR':logging.ERROR,'CRITICAL':logging.CRITICAL,'STARTINFO':25}
def initenginelogging(logname):
#initialise file logging: create main logger 'bots'
logger = logging.getLogger(logname)
logger.setLevel(convertini2logger[botsglobal.ini.get('settings','log_file_level','INFO')])
if botsglobal.ini.get('settings','log_file_number',None) == 'daily':
handler = logging.handlers.TimedRotatingFileHandler(os.path.join(botsglobal.ini.get('directories','logging'),logname+'.log'),when='midnight',backupCount=10)
else:
handler = logging.handlers.RotatingFileHandler(botslib.join(botsglobal.ini.get('directories','logging'),logname+'.log'),backupCount=botsglobal.ini.getint('settings','log_file_number',10))
handler.doRollover() #each run a new log file is used; old one is rotated
fileformat = logging.Formatter('%(asctime)s %(levelname)-8s %(name)s : %(message)s','%Y%m%d %H:%M:%S')
handler.setFormatter(fileformat)
logger.addHandler(handler)
#initialise file logging: logger for trace of mapping; tried to use filters but got this not to work.....
botsglobal.logmap = logging.getLogger('engine.map')
if not botsglobal.ini.getboolean('settings','mappingdebug',False):
botsglobal.logmap.setLevel(logging.CRITICAL)
#logger for reading edifile. is now used only very limited (1 place); is done with 'if'
#~ botsglobal.ini.getboolean('settings','readrecorddebug',False)
# initialise console/screen logging
if botsglobal.ini.getboolean('settings','log_console',True):
console = logging.StreamHandler()
console.setLevel(logging.INFO)
consuleformat = logging.Formatter('%(levelname)-8s %(message)s')
console.setFormatter(consuleformat) # add formatter to console
logger.addHandler(console) # add console to logger
return logger
def initserverlogging(logname):
# initialise file logging
logger = logging.getLogger(logname)
logger.setLevel(convertini2logger[botsglobal.ini.get(logname,'log_file_level','INFO')])
handler = logging.handlers.TimedRotatingFileHandler(os.path.join(botsglobal.ini.get('directories','logging'),logname+'.log'),when='midnight',backupCount=10)
fileformat = logging.Formatter('%(asctime)s %(levelname)-9s: %(message)s','%Y%m%d %H:%M:%S')
handler.setFormatter(fileformat)
logger.addHandler(handler)
# initialise console/screen logging
if botsglobal.ini.getboolean(logname,'log_console',True):
console = logging.StreamHandler()
console.setLevel(convertini2logger[botsglobal.ini.get(logname,'log_console_level','STARTINFO')])
consoleformat = logging.Formatter('%(asctime)s %(levelname)-9s: %(message)s','%Y%m%d %H:%M:%S')
console.setFormatter(consoleformat) # add formatter to console
logger.addHandler(console) # add console to logger
return logger
| gpl-3.0 | -7,892,572,289,152,995,000 | 62.812766 | 195 | 0.627142 | false |
westernx/sgfs | sgfs/cache.py | 1 | 5707 | from subprocess import call
import collections
import errno
import logging
import os
import sqlite3
from sgsession import Entity
log = logging.getLogger(__name__)
class PathCache(collections.MutableMapping):
def __init__(self, sgfs, project_root):
self.sgfs = sgfs
self.project_root = os.path.abspath(project_root)
# We are in the middle of a transtion of where the SQLite file
# is located, and for now we prioritize the old location.
for name in ('.sgfs-cache.sqlite', '.sgfs/cache.sqlite'):
db_path = os.path.join(project_root, name)
if os.path.exists(db_path):
break
else:
# If it doesn't exist then touch it with read/write permissions for all.
db_dir = os.path.dirname(db_path)
umask = os.umask(0)
try:
try:
os.makedirs(db_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
os.umask(0111)
call(['touch', db_path])
finally:
os.umask(umask)
self.conn = sqlite3.connect(db_path)
self.conn.text_factory = str
with self.conn:
self.conn.execute('CREATE TABLE IF NOT EXISTS entity_paths (entity_type TEXT, entity_id INTEGER, path TEXT)')
self.conn.execute('CREATE UNIQUE INDEX IF NOT EXISTS entity_paths_entity ON entity_paths(entity_type, entity_id)')
def __repr__(self):
return '<%s for %r at 0x%x>' % (self.__class__.__name__, self.project_root, id(self))
def __setitem__(self, entity, path):
if not isinstance(entity, Entity):
raise TypeError('path cache keys must be entities; got %r %r' % (type(entity), entity))
if not isinstance(path, basestring):
raise TypeError('path cache values must be basestring; got %r %r' % (type(path), path))
path = os.path.relpath(os.path.abspath(path), self.project_root)
with self.conn:
self.conn.execute('INSERT OR REPLACE into entity_paths values (?, ?, ?)', (entity['type'], entity['id'], path))
def get(self, entity, default=None, check_tags=True):
"""Get a path for an entity.
:param Entity entity: The entity to look up in the path cache.
:param default: What to return if the entity is not in the cache;
defaults to ``None``.
:param bool check_tags: Should we check for the entity in the directory
tags at the cached path before returning it?
:returns: The cached path.
"""
if not isinstance(entity, Entity):
raise TypeError('path cache keys are entities; got %r %r' % (type(entity), entity))
with self.conn:
c = self.conn.cursor()
c.execute('SELECT path FROM entity_paths WHERE entity_type = ? AND entity_id = ?', (entity['type'], entity['id']))
row = c.fetchone()
if row is None:
return default
path = os.path.abspath(os.path.join(self.project_root, row[0]))
# Make sure that the entity is actually tagged in the given directory.
# This guards against moving tagged directories. This does NOT
# effectively guard against copied directories.
if check_tags:
if not any(tag['entity'] is entity for tag in self.sgfs.get_directory_entity_tags(path)):
log.warning('%s %d is not tagged at %s' % (
entity['type'], entity['id'], path,
))
return default
return path
def __getitem__(self, entity):
path = self.get(entity)
if path is None:
raise KeyError(entity)
else:
return path
def __delitem__(self, entity):
if not isinstance(entity, Entity):
raise TypeError('path cache keys must be entities; got %r %r' % (type(entity), entity))
with self.conn:
self.conn.execute('DELETE FROM entity_paths WHERE entity_type = ? AND entity_id = ?', (entity['type'], entity['id']))
def __len__(self):
with self.conn:
c = self.conn.cursor()
return c.execute('SELECT COUNT(1) FROM entity_paths').fetchone()[0]
def __iter__(self):
with self.conn:
c = self.conn.cursor()
for row in c.execute('SELECT entity_type, entity_id FROM entity_paths'):
yield self.sgfs.session.merge(dict(type=row[0], id=row[1]))
def walk_directory(self, path, entity_type=None, must_exist=True):
relative = os.path.relpath(path, self.project_root)
# Special case the Projects.
if relative == '.':
relative = ''
if relative.startswith('.'):
raise ValueError('path not in project; %r' % path)
with self.conn:
c = self.conn.cursor()
if entity_type is not None:
c.execute('SELECT entity_type, entity_id, path FROM entity_paths WHERE entity_type = ? AND path LIKE ?', (entity_type, relative + '%'))
else:
c.execute('SELECT entity_type, entity_id, path FROM entity_paths WHERE path LIKE ?', (relative + '%', ))
for row in c:
entity = self.sgfs.session.merge(dict(type=row[0], id=row[1]))
path = os.path.join(self.project_root, row[2])
if must_exist and not os.path.exists(path):
continue
yield path, entity
| bsd-3-clause | -2,746,726,976,391,183,000 | 37.823129 | 151 | 0.557736 | false |
dc3-plaso/dfvfs | tests/file_io/vhdi_file_io.py | 1 | 2952 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the file-like object implementation using pyvhdi."""
import unittest
from dfvfs.lib import errors
from dfvfs.path import os_path_spec
from dfvfs.path import vhdi_path_spec
from tests import test_lib as shared_test_lib
from tests.file_io import test_lib
@shared_test_lib.skipUnlessHasTestFile([u'image.vhd'])
class DynamicVHDIFileTest(test_lib.ImageFileTestCase):
"""The unit test for the VHD image file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(DynamicVHDIFileTest, self).setUp()
test_file = self._GetTestFilePath([u'image.vhd'])
self._os_path_spec = os_path_spec.OSPathSpec(location=test_file)
self._vhdi_path_spec = vhdi_path_spec.VHDIPathSpec(
parent=self._os_path_spec)
def testOpenCloseInode(self):
"""Test the open and close functionality using an inode."""
self._TestOpenCloseInode(self._vhdi_path_spec)
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
self._TestOpenCloseLocation(self._vhdi_path_spec)
# Try open with a path specification that has no parent.
path_spec = vhdi_path_spec.VHDIPathSpec(parent=self._os_path_spec)
path_spec.parent = None
with self.assertRaises(errors.PathSpecError):
self._TestOpenCloseLocation(path_spec)
def testSeek(self):
"""Test the seek functionality."""
self._TestSeek(self._vhdi_path_spec)
def testRead(self):
"""Test the read functionality."""
self._TestRead(self._vhdi_path_spec)
@shared_test_lib.skipUnlessHasTestFile([u'image-differential.vhd'])
class DifferentialVHDIFileTest(test_lib.ImageFileTestCase):
"""The unit test for the VHD image file-like object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
super(DifferentialVHDIFileTest, self).setUp()
test_file = self._GetTestFilePath([u'image-differential.vhd'])
self._os_path_spec = os_path_spec.OSPathSpec(location=test_file)
self._vhdi_path_spec = vhdi_path_spec.VHDIPathSpec(
parent=self._os_path_spec)
def testOpenCloseInode(self):
"""Test the open and close functionality using an inode."""
self._TestOpenCloseInode(self._vhdi_path_spec)
def testOpenCloseLocation(self):
"""Test the open and close functionality using a location."""
self._TestOpenCloseLocation(self._vhdi_path_spec)
# Try open with a path specification that has no parent.
path_spec = vhdi_path_spec.VHDIPathSpec(parent=self._os_path_spec)
path_spec.parent = None
with self.assertRaises(errors.PathSpecError):
self._TestOpenCloseLocation(path_spec)
def testSeek(self):
"""Test the seek functionality."""
self._TestSeek(self._vhdi_path_spec)
def testRead(self):
"""Test the read functionality."""
self._TestRead(self._vhdi_path_spec)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 1,324,908,433,610,755,600 | 32.545455 | 70 | 0.712398 | false |
rammstein/0install | zeroinstall/injector/driver.py | 1 | 7097 | """
This class brings together a L{solve.Solver} to choose a set of implmentations, a
L{fetch.Fetcher} to download additional components, and the user's configuration
settings.
@since: 0.53
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, logger
import os, logging
from zeroinstall.injector import arch, reader
from zeroinstall.injector.model import network_offline
from zeroinstall.support import tasks
class Driver(object):
"""Chooses a set of implementations based on a policy.
Typical use:
1. Create a Driver object, giving it the requirements about the program to be run.
2. Call L{solve_with_downloads}. If more information is needed, a L{fetch.Fetcher} will be used to download it.
3. When all downloads are complete, the L{solver} contains the chosen versions.
4. Use L{get_uncached_implementations} to find where to get these versions and download them
using L{download_uncached_implementations}.
@ivar target_arch: target architecture for binaries (deprecated)
@type target_arch: L{arch.Architecture}
@ivar solver: solver used to choose a set of implementations
@type solver: L{solve.Solver}
@ivar watchers: callbacks to invoke after solving
"""
__slots__ = ['watchers', 'requirements', 'config', 'target_arch', 'solver']
def __init__(self, config, requirements):
"""@param config: The configuration settings to use
@type config: L{config.Config}
@param requirements: Details about the program we want to run
@type requirements: L{requirements.Requirements}
@since: 0.53"""
self.watchers = []
assert config
self.config = config
assert requirements
self.requirements = requirements
self.target_arch = arch.get_architecture(requirements.os, requirements.cpu)
from zeroinstall.injector.solver import DefaultSolver
self.solver = DefaultSolver(self.config)
logger.debug(_("Supported systems: '%s'"), arch.os_ranks)
logger.debug(_("Supported processors: '%s'"), arch.machine_ranks)
self.solver.extra_restrictions = requirements.get_extra_restrictions(self.config.iface_cache)
def get_uncached_implementations(self):
"""List all chosen implementations which aren't yet available locally.
@rtype: [(L{model.Interface}, L{model.Implementation})]"""
iface_cache = self.config.iface_cache
stores = self.config.stores
uncached = []
for uri, selection in self.solver.selections.selections.items():
impl = selection.impl
assert impl, self.solver.selections
if not impl.is_available(stores):
uncached.append((iface_cache.get_interface(uri), impl))
return uncached
@tasks.async
def solve_with_downloads(self, force = False, update_local = False):
"""Run the solver, then download any feeds that are missing or
that need to be updated. Each time a new feed is imported into
the cache, the solver is run again, possibly adding new downloads.
@param force: whether to download even if we're already ready to run.
@type force: bool
@param update_local: fetch PackageKit feeds even if we're ready to run.
@type update_local: bool"""
downloads_finished = set() # Successful or otherwise
downloads_in_progress = {} # URL -> Download
# There are three cases:
# 1. We want to run immediately if possible. If not, download all the information we can.
# (force = False, update_local = False)
# 2. We're in no hurry, but don't want to use the network unnecessarily.
# We should still update local information (from PackageKit).
# (force = False, update_local = True)
# 3. The user explicitly asked us to refresh everything.
# (force = True)
try_quick_exit = not (force or update_local)
while True:
self.solver.solve_for(self.requirements)
for w in self.watchers: w()
if try_quick_exit and self.solver.ready:
break
try_quick_exit = False
if not self.solver.ready:
force = True
for f in self.solver.feeds_used:
if f in downloads_finished or f in downloads_in_progress:
continue
if os.path.isabs(f):
if force:
try:
self.config.iface_cache.get_feed(f, force = True)
except reader.MissingLocalFeed as ex:
logger.warning("Reloading %s: %s", f, ex,
exc_info = True if logger.isEnabledFor(logging.INFO) else None)
downloads_in_progress[f] = tasks.IdleBlocker('Refresh local feed')
continue
elif f.startswith('distribution:'):
if force or update_local:
downloads_in_progress[f] = self.config.fetcher.download_and_import_feed(f, self.config.iface_cache)
elif force and self.config.network_use != network_offline:
downloads_in_progress[f] = self.config.fetcher.download_and_import_feed(f, self.config.iface_cache)
# Once we've starting downloading some things,
# we might as well get them all.
force = True
if not downloads_in_progress:
if self.config.network_use == network_offline:
logger.info(_("Can't choose versions and in off-line mode, so aborting"))
break
# Wait for at least one download to finish
blockers = downloads_in_progress.values()
yield blockers
tasks.check(blockers, self.config.handler.report_error)
for f in list(downloads_in_progress.keys()):
if f in downloads_in_progress and downloads_in_progress[f].happened:
del downloads_in_progress[f]
downloads_finished.add(f)
# Need to refetch any "distribution" feed that
# depends on this one
distro_feed_url = 'distribution:' + f
if distro_feed_url in downloads_finished:
downloads_finished.remove(distro_feed_url)
if distro_feed_url in downloads_in_progress:
del downloads_in_progress[distro_feed_url]
@tasks.async
def solve_and_download_impls(self, refresh = False, select_only = False):
"""Run L{solve_with_downloads} and then get the selected implementations too.
@type refresh: bool
@type select_only: bool
@raise SafeException: if we couldn't select a set of implementations
@since: 0.40"""
refreshed = self.solve_with_downloads(refresh)
if refreshed:
yield refreshed
tasks.check(refreshed)
if not self.solver.ready:
raise self.solver.get_failure_reason()
if not select_only:
downloaded = self.download_uncached_implementations()
if downloaded:
yield downloaded
tasks.check(downloaded)
def need_download(self):
"""Decide whether we need to download anything (but don't do it!)
@return: true if we MUST download something (feeds or implementations)
@rtype: bool"""
self.solver.solve_for(self.requirements)
for w in self.watchers: w()
if not self.solver.ready:
return True # Maybe a newer version will work?
if self.get_uncached_implementations():
return True
return False
def download_uncached_implementations(self):
"""Download all implementations chosen by the solver that are missing from the cache.
@rtype: L{zeroinstall.support.tasks.Blocker}"""
assert self.solver.ready, "Solver is not ready!\n%s" % self.solver.selections
return self.solver.selections.download_missing(self.config, include_packages = True)
| lgpl-2.1 | -4,867,127,432,715,335,000 | 35.963542 | 113 | 0.722418 | false |
gleicon/RedisLive | src/dataprovider/redisprovider.py | 1 | 11475 | from api.util import settings
from datetime import datetime, timedelta
import redis
import json
import ast
class RedisStatsProvider(object):
"""A Redis based persistance to store and fetch stats"""
def __init__(self):
# redis server to use to store stats
stats_server = settings.get_redis_stats_server()
self.server = stats_server["server"]
self.port = stats_server["port"]
self.conn = redis.StrictRedis(host=self.server, port=self.port, db=0)
def save_memory_info(self, server, timestamp, used, peak):
"""Saves used and peak memory stats,
Args:
server (str): The server ID
timestamp (datetime): The time of the info.
used (int): Used memory value.
peak (int): Peak memory value.
"""
data = {"timestamp": timestamp.strftime('%s'),
"used": used,
"peak": peak}
self.conn.zadd(server + ":memory", timestamp.strftime('%s'), data)
def save_info_command(self, server, timestamp, info):
"""Save Redis info command dump
Args:
server (str): id of server
timestamp (datetime): Timestamp.
info (dict): The result of a Redis INFO command.
"""
self.conn.set(server + ":Info", json.dumps(info))
def save_monitor_command(self, server, timestamp, command, keyname,
argument):
"""save information about every command
Args:
server (str): Server ID
timestamp (datetime): Timestamp.
command (str): The Redis command used.
keyname (str): The key the command acted on.
argument (str): The args sent to the command.
"""
epoch = timestamp.strftime('%s')
current_date = timestamp.strftime('%y%m%d')
# start a redis MULTI/EXEC transaction
pipeline = self.conn.pipeline()
# store top command and key counts in sorted set for every second
# top N are easily available from sorted set in redis
# also keep a sorted set for every day
# switch to daily stats when stats requsted are for a longer time period
command_count_key = server + ":CommandCount:" + epoch
pipeline.zincrby(command_count_key, command, 1)
command_count_key = server + ":DailyCommandCount:" + current_date
pipeline.zincrby(command_count_key, command, 1)
key_count_key = server + ":KeyCount:" + epoch
pipeline.zincrby(key_count_key, keyname, 1)
key_count_key = server + ":DailyKeyCount:" + current_date
pipeline.zincrby(key_count_key, command, 1)
# keep aggregate command in a hash
command_count_key = server + ":CommandCountBySecond"
pipeline.hincrby(command_count_key, epoch, 1)
command_count_key = server + ":CommandCountByMinute"
field_name = current_date + ":" + str(timestamp.hour) + ":"
field_name += str(timestamp.minute)
pipeline.hincrby(command_count_key, field_name, 1)
command_count_key = server + ":CommandCountByHour"
field_name = current_date + ":" + str(timestamp.hour)
pipeline.hincrby(command_count_key, field_name, 1)
command_count_key = server + ":CommandCountByDay"
field_name = current_date
pipeline.hincrby(command_count_key, field_name, 1)
# commit transaction to redis
pipeline.execute()
def get_info(self, server):
"""Get info about the server
Args:
server (str): The server ID
"""
info = self.conn.get(server + ":Info")
# FIXME: If the collector has never been run we get a 500 here. `None`
# is not a valid type to pass to json.loads.
info = json.loads(info)
return info
def get_memory_info(self, server, from_date, to_date):
"""Get stats for Memory Consumption between a range of dates
Args:
server (str): The server ID
from_date (datetime): Get memory info from this date onwards.
to_date (datetime): Get memory info up to this date.
"""
memory_data = []
start = int(from_date.strftime("%s"))
end = int(to_date.strftime("%s"))
rows = self.conn.zrangebyscore(server + ":memory", start, end)
for row in rows:
# TODO: Check to see if there's not a better way to do this. Using
# eval feels like it could be wrong/dangerous... but that's just a
# feeling.
row = ast.literal_eval(row)
parts = []
# convert the timestamp
timestamp = datetime.fromtimestamp(int(row['timestamp']))
timestamp = timestamp.strftime('%Y-%m-%d %H:%M:%S')
memory_data.append([timestamp, row['peak'], row['used']])
return memory_data
def get_command_stats(self, server, from_date, to_date, group_by):
"""Get total commands processed in the given time period
Args:
server (str): The server ID
from_date (datetime): Get data from this date.
to_date (datetime): Get data to this date.
group_by (str): How to group the stats.
"""
s = []
time_stamps = []
key_name = ""
if group_by == "day":
key_name = server + ":CommandCountByDay"
t = from_date.date()
while t <= to_date.date():
s.append(t.strftime('%y%m%d'))
time_stamps.append(t.strftime('%s'))
t = t + timedelta(days=1)
elif group_by == "hour":
key_name = server + ":CommandCountByHour"
t = from_date
while t<= to_date:
field_name = t.strftime('%y%m%d') + ":" + str(t.hour)
s.append(field_name)
time_stamps.append(t.strftime('%s'))
t = t + timedelta(seconds=3600)
elif group_by == "minute":
key_name = server + ":CommandCountByMinute"
t = from_date
while t <= to_date:
field_name = t.strftime('%y%m%d') + ":" + str(t.hour)
field_name += ":" + str(t.minute)
s.append(field_name)
time_stamps.append(t.strftime('%s'))
t = t + timedelta(seconds=60)
else:
key_name = server + ":CommandCountBySecond"
start = int(from_date.strftime("%s"))
end = int(to_date.strftime("%s"))
for x in range(start, end + 1):
s.append(str(x))
time_stamps.append(x)
data = []
counts = self.conn.hmget(key_name, s)
for x in xrange(0,len(counts)):
# the default time format string
time_fmt = '%Y-%m-%d %H:%M:%S'
if group_by == "day":
time_fmt = '%Y-%m-%d'
elif group_by == "hour":
time_fmt = '%Y-%m-%d %H:00:00'
elif group_by == "minute":
time_fmt = '%Y-%m-%d %H:%M:00'
# get the count.
try:
if counts[x] is not None:
count = int(counts[x])
else:
count = 0
except Exception as e:
count = 0
# convert the timestamp
timestamp = int(time_stamps[x])
timestamp = datetime.fromtimestamp(timestamp)
timestamp = timestamp.strftime(time_fmt)
# add to the data
data.append([count, timestamp])
return reversed(data)
def get_top_commands_stats(self, server, from_date, to_date):
"""Get top commands processed in the given time period
Args:
server (str): Server ID
from_date (datetime): Get stats from this date.
to_date (datetime): Get stats to this date.
"""
counts = self.get_top_counts(server, from_date, to_date, "CommandCount",
"DailyCommandCount")
return reversed(counts)
def get_top_keys_stats(self, server, from_date, to_date):
"""Gets top comm processed
Args:
server (str): Server ID
from_date (datetime): Get stats from this date.
to_date (datetime): Get stats to this date.
"""
return self.get_top_counts(server, from_date, to_date, "KeyCount",
"DailyKeyCount")
# Helper methods
def get_top_counts(self, server, from_date, to_date, seconds_key_name,
day_key_name, result_count=None):
"""Top counts are stored in a sorted set for every second and for every
day. ZUNIONSTORE across the timeperiods generates the results.
Args:
server (str): The server ID
from_date (datetime): Get stats from this date.
to_date (datetime): Get stats to this date.
seconds_key_name (str): The key for stats at second resolution.
day_key_name (str): The key for stats at daily resolution.
Kwargs:
result_count (int): The number of results to return. Default: 10
"""
if result_count is None:
result_count = 10
# get epoch
start = int(from_date.strftime("%s"))
end = int(to_date.strftime("%s"))
diff = to_date - from_date
# start a redis MULTI/EXEC transaction
pipeline = self.conn.pipeline()
# store the set names to use in ZUNIONSTORE in a list
s = []
if diff.days > 2 :
# when difference is over 2 days, no need to check counts for every second
# Calculate:
# counts of every second on the start day
# counts of every day in between
# counts of every second on the end day
next_day = from_date.date() + timedelta(days=1)
prev_day = to_date.date() - timedelta(days=1)
from_date_end_epoch = int(next_day.strftime("%s")) - 1
to_date_begin_epoch = int(to_date.date().strftime("%s"))
# add counts of every second on the start day
for x in range(start, from_date_end_epoch + 1):
s.append(":".join([server, seconds_key_name, str(x)]))
# add counts of all days in between
t = next_day
while t <= prev_day:
s.append(":".join([server, day_key_name, t.strftime('%y%m%d')]))
t = t + timedelta(days=1)
# add counts of every second on the end day
for x in range(to_date_begin_epoch, end + 1):
s.append(server + ":" + seconds_key_name + ":" + str(x))
else:
# add counts of all seconds between start and end date
for x in range(start, end + 1):
s.append(server + ":" + seconds_key_name + ":" + str(x))
# store the union of all the sets in a temp set
temp_key_name = "_top_counts"
pipeline.zunionstore(temp_key_name, s)
pipeline.zrange(temp_key_name, 0, result_count - 1, True, True)
pipeline.delete(temp_key_name)
# commit transaction to redis
results = pipeline.execute()
result_data = []
for val, count in results[-2]:
result_data.append([val, count])
return result_data
| mit | -5,711,488,420,907,123,000 | 35.313291 | 88 | 0.546231 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/verification_ip_flow_result_py3.py | 1 | 1304 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VerificationIPFlowResult(Model):
"""Results of IP flow verification on the target resource.
:param access: Indicates whether the traffic is allowed or denied.
Possible values include: 'Allow', 'Deny'
:type access: str or ~azure.mgmt.network.v2017_10_01.models.Access
:param rule_name: Name of the rule. If input is not matched against any
security rule, it is not displayed.
:type rule_name: str
"""
_attribute_map = {
'access': {'key': 'access', 'type': 'str'},
'rule_name': {'key': 'ruleName', 'type': 'str'},
}
def __init__(self, *, access=None, rule_name: str=None, **kwargs) -> None:
super(VerificationIPFlowResult, self).__init__(**kwargs)
self.access = access
self.rule_name = rule_name
| mit | -3,858,672,163,984,985,000 | 37.352941 | 78 | 0.597393 | false |
030io/se-seed | tests/test_jbm.py | 1 | 1595 | # -*- coding: utf-8 -*-
__author__ = 'chinfeng'
from unittest import TestCase
import jbmongo
import random
class JBMongoTestCase(TestCase):
def setUp(self):
# 我们首先有一个数据库,这个数据库是关联上下文的关键,命名为 Context
self._dbc = jbmongo.DBContext()
def test_coll_definition(self):
dbc = self._dbc
# 要使用我们首先需要定义一个模型
base_document = dbc.base_document()
class Company(base_document):
pass
# 新建一个 company
com = Company()
com.title = 'JB-Man有限公司'
id = com.save()
# 测试读取
com_found = Company.find_one(dict(_id=id))
self.assertIn('_id', com_found)
self.assertEqual(com_found['_id'], id) # 读取方式1
self.assertEqual(com_found._id, id) # 读取方式2
self.assertEqual(com_found.title, 'JB-Man有限公司') # 测试数据一致性
def test_find(self):
dbc = self._dbc
base_document = dbc.base_document()
class Person(base_document):
pass
class MassageStick(base_document):
pass
_bird = random.randint(1, 1000000)
for p in (Person(bird_index=_bird, pain=True) for i in range(10)):
p.save()
for s in (MassageStick(comfort_index=_bird) for i in range(20)):
s.save()
persons = Person.find(dict(bird_index=_bird))
for p in persons:
self.assertEqual(p.bird_index, _bird)
self.assertEqual(p.pain, True)
| lgpl-3.0 | 3,465,320,103,526,201,300 | 27.333333 | 74 | 0.57301 | false |
CRFS/python3-ncplib | examples/time.py | 1 | 1346 | """
NCP time data example.
Connects to a node and requests a single time capture to be performed. Prints the result to stdout.
"""
import asyncio
import ncplib
# The node to connect to. Can be a DNS name or an IP address.
NODE_HOST = "127.0.0.1"
# Frequency.
FREQ_HZ = 2400.12e6 # 2400.12 MHz
# Realtime bandwidth.
RTBW_HZ = 10e6 # 10 MHz
# Capture length.
DURATION_S = 1e-3 # 1 ms.
def split_milli(value):
n = int(value * 1e3)
return (n // 1000000000, n % 1000000000)
def split_nano(value):
n = int(value * 1e9)
return (n // 1000000000, n % 1000000000)
async def main():
"""
The async main method.
Connects to a node and requests a single time capture to be performed. Prints the result to stdout.
"""
# Connect to the node.
async with await ncplib.connect(NODE_HOST) as connection:
# Send a single DSPC command to the node.
fctr, fctm = split_milli(FREQ_HZ)
rbme, rbmi = split_milli(RTBW_HZ)
lsec, lnan = split_nano(DURATION_S)
response = connection.send("DSPC", "TIME", FCTR=fctr, FCTM=fctm, RBME=rbme, RBMI=rbmi, LSEC=lsec, LNAN=lnan)
# Wait for the node to reply.
field = await response.recv()
print(field)
# Run the async main method if this file is run as a script.
if __name__ == "__main__":
asyncio.run(main())
| mit | -7,047,356,326,211,177,000 | 24.396226 | 116 | 0.643388 | false |
cmput404wi16/metablog | account/models.py | 1 | 1216 | from django.db import models
from django.core import serializers
# Create your models here.
class User(models.Model):
uid = models.CharField(max_length=250, primary_key=True)
nameFirst = models.CharField(max_length=250)
nameLast = models.CharField(max_length=250)
githubId = models.CharField(max_length=25, default="abramhindle")
email = models.EmailField(max_length=254, default="")
# TODO check to see if this would be simpler
# friends = models.CharField(max_length=250)
# pending = models.CharField(max_length=250)
following = models.CharField(max_length=250, default="")
# TODO remove followers
followers = models.CharField(max_length=250, default="")
origin = models.CharField(max_length=250, default="local")
password = models.CharField(max_length=25, default="default")
profileImg = models.ImageField(upload_to="account/img/")
def getUserData(self):
return serializers.serialize('json', self.objects.defer("password"))
class AdminUser(models.Model):
#Still in dev, for now class exist
#methods need to be added
user = models.OneToOneField( User,
on_delete=models.CASCADE,
primary_key=True )
def whatDoesTheScouterSay():
print ("It's over 9000!")
| mit | 4,983,296,740,722,061,000 | 35.848485 | 70 | 0.73273 | false |
zoltan-fedor/robo_car | nodes/drive_control.py | 1 | 11947 | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
#from std_msgs.msg import UInt16
from std_msgs.msg import Int32
import time
from pyfirmata import Arduino
on_hardware = True # whether we are running this node on the actual car (so it can access the IO board)
wheelpin = 5 # to which pin on the IO sheld the whee pin got connected
drivepin = 3 # to which pin on the IO shield the drive cable got connected
if on_hardware == True: # running on hardware -- we need to set the board connection
board = Arduino('/dev/ttyACM99', baudrate=57600)
board.servo_config(wheelpin, min_pulse=1, max_pulse=100, angle=90) # set initial direction to straight forward
board.servo_config(drivepin, min_pulse=1, max_pulse=20, angle=90) # set initial speed to natural
speed_natural = 90
speed_current_angle = speed_natural # this variable will carry the actual speed at any time and will be used to determine direction of change (in case of decay or full stop)
speed_min_angle_reverse = 75 # this is the angle below which the car start moving in reverse
speed_min_angle_forward = 107 # this is the angle above which the car start moving forward
speed_max_angle_reverse = 65 # maximum angle allowed in reverse (which is actually a minimum mathematically, as the angle goes 0-90)
speed_max_angle_forward = 115 # maximum angle allowed in forward
speed_decay_angle = 2 # how much we decrease the angle when there is a decay request
speed_change_angle = 1 # when we receive a request to change the speed, this is the angle change we will do
speed_change_angle_decrease = 2 # this is the speed slowdown (breaking)
speed_direction_change_delay = 2 # in sec - delay enforced between changing direction (forward-backward)
last_stop_timestamp = 0.0 # the last time we have reached the zero speed from a non-zero speed (used with the speed_direction_change_delay)
direction_natural = 90 # this is the natural (straight ahead) position of the wheel in angles
direction_current_angle = direction_natural # this variable will carry the actual direction angle at any time
direction_max_angle_left = 30 # maximum angle allowed when setting the direction to the left (which is actually a minimum mathematically, as the angle goes 0-90)
direction_max_angle_right = 145 # maximum angle allowed when setting the direction to the right
direction_decay_angle = 1 # how much we decrease the angle when there is a decay request
direction_change_angle = 7 # when we receive a request to change the direction, this is the angle change we will do
####
# create the publishers to which this node will publish data to
pub_speed_angle = rospy.Publisher('drive_control_speed_angle', Int32, queue_size=10)
pub_direction_angle = rospy.Publisher('drive_control_direction_angle', Int32, queue_size=10)
def message_callback(data):
rospy.loginfo(rospy.get_caller_id() + "I heard speed/direction message %s", data.data)
speed_direction_instructions(data.data)
def decay_callback(data):
rospy.loginfo(rospy.get_caller_id() + "I heard decay message %s", data.data)
speed_direction_instructions(data.data)
def listener():
global pub_drive_angle
# In ROS, nodes are uniquely named. If two nodes with the same
# node are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('drive_control', anonymous=True)
if on_hardware == True:
rospy.loginfo("Running on hardware")
else:
rospy.loginfo("Not running on hardware (in simulation")
####
# initialization
# before starting we need to set the car to idle and wheels facing forward
set_speed_angle(speed_current_angle) # sets the speed to the current angle, which has a default of 90 at start
set_direction_angle(direction_current_angle) # sets the direction to the current angle, which has a default of 90 at start
rospy.loginfo("Started.")
####
# subscribe to the topics
rospy.Subscriber("drive_control_publish", String, message_callback)
rospy.Subscriber("drive_control_decay_publish", String, decay_callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
# this function will set the speed and direction of the car, based on the received instruction
def speed_direction_instructions(instruction):
global speed_current_angle # so we can change this global variable in this function
if(instruction == 'decay'): # this was a decay request
if(speed_current_angle <= speed_min_angle_reverse): # we are currently moving in reverse
change_speed(speed_decay_angle)
if(speed_current_angle >= speed_min_angle_forward): # we are currently moving forward
change_speed(-1*speed_decay_angle)
if(direction_current_angle < direction_natural + direction_decay_angle and direction_current_angle > direction_natural - direction_decay_angle):
# we need to set the direction to the natural direction, because the current angle is within the decay range of natural
change_direction(direction_natural)
else: # we are not within a decay range from the natural direction, so we will decay the direction angle by the decay
if(direction_current_angle < direction_natural): # current direction is to the left
change_direction(direction_decay_angle)
if(direction_current_angle > direction_natural): # current direction is to the right
change_direction(-1*direction_decay_angle)
if(instruction == 'U'): # this is a speed up request (~up button pressed)
if(speed_current_angle > speed_min_angle_reverse and speed_current_angle < speed_min_angle_forward): # currently we are in the neutral zone
speed_current_angle = speed_min_angle_forward
if(speed_current_angle <= speed_min_angle_reverse): # we are currently moving in reverse
change_speed(speed_change_angle_decrease)
if(speed_current_angle >= speed_min_angle_forward): # we are currently moving forward
change_speed(speed_change_angle)
if(instruction == 'D'): # this is a speed down request (~down button pressed)
if(speed_current_angle > speed_min_angle_reverse and speed_current_angle < speed_min_angle_forward): # currently we are in the neutral zone
speed_current_angle = speed_min_angle_reverse
if(speed_current_angle <= speed_min_angle_reverse): # we are currently moving in reverse
change_speed(-1*speed_change_angle)
if(speed_current_angle >= speed_min_angle_forward): # we are currently moving forward
change_speed(-1*speed_change_angle_decrease)
if(instruction == 'L'): # this is a turn left request (~left button pressed)
change_direction(direction_change_angle)
if(instruction == 'R'): # this is a turn right request (~right button pressed)
change_direction(-1*direction_change_angle)
# this function is called with the angle change request and will change the current angle with the amount requested
def change_speed(angle_change):
new_angle = speed_current_angle + angle_change
set_speed_angle(new_angle)
rospy.loginfo("Changed the speed by angle %i", angle_change)
# this function is called with the angle change request and will change the current angle with the amount requested
def change_direction(angle_change):
new_angle = direction_current_angle + angle_change
set_direction_angle(new_angle)
rospy.loginfo("Changed the direction by angle %i", angle_change)
# sets the speed to the angle requested
def set_speed_angle(angle):
global speed_current_angle # so we can change this global variable in this function
global last_stop_timestamp # so we can set this global variable in this function
movement_allowed = 'yes'
#rospy.loginfo("Value of speed_current_angle is %i", speed_current_angle)
#rospy.loginfo("Value of new angle to be set is %i", angle)
if(angle < speed_max_angle_reverse or angle > speed_max_angle_forward):
rospy.loginfo("Out of range angle was requested for speed: %i", angle)
else:
# the old (current) speed is NOT in the zero range but the new speed is in the zero range, then we need to set the last_stop_timestamp,
# which later we will use to determine whether the speed_direction_change_delay has passed yet
# but we only set this if hasn't been set already
if((speed_current_angle <= speed_min_angle_reverse or speed_current_angle >= speed_min_angle_forward)
and angle > speed_min_angle_reverse and angle < speed_min_angle_forward
and last_stop_timestamp == 0.0):
last_stop_timestamp = rospy.get_time() # populate the last_stop_timestamp with the unix timestamp (example: 1424637131.834309)
rospy.loginfo("Last stop timestamp set %f", last_stop_timestamp)
movement_allowed = 'yes'
else:
# the old (current) speed is in the zero range but the new speed is NOT in the zero range, then we need to check the last_stop_timestamp,
# whether the speed_direction_change_delay has passed already
if(speed_current_angle >= speed_min_angle_reverse and speed_current_angle <= speed_min_angle_forward
and (angle < speed_min_angle_reverse or angle > speed_min_angle_forward )):
# if the speed_direction_change_delay already passed or there wasn one then we can start moving
if(rospy.get_time() > (last_stop_timestamp + speed_direction_change_delay)):
movement_allowed = 'yes'
last_stop_timestamp = 0.0
else:
movement_allowed = 'no'
if on_hardware == True: # running on hardware -- we need to actually write this value to the PWM
board.digital[drivepin].write(speed_natural) # we set the angle to the middle of the neutral zone, so we don't send power to the motors
rospy.loginfo("No movement allowed, because the speed_direction_change_delay hasn't passed yet!")
else:
movement_allowed = 'yes'
last_stop_timestamp = 0.0
if(movement_allowed == 'yes'):
if(angle > speed_min_angle_reverse and angle < speed_min_angle_forward): # if the request came to set the angle within the neutral range, then we set it to 90, so we don't send power to the motors
angle = speed_natural
if on_hardware == True: # running on hardware -- we need to actually write this value to the PWM
board.digital[drivepin].write(angle)
speed_current_angle = angle # overwrite the global variable with the new value
pub_speed_angle.publish(angle) # publish the angle we set to a topic so others can see it
rospy.loginfo("Set the speed to angle %i", angle)
# sets the direction to the angle requested
def set_direction_angle(angle):
global direction_current_angle # so we can change this global variable in this function
if(angle < direction_max_angle_left or angle > direction_max_angle_right):
rospy.loginfo("Out of range angle was requested for direction: %i", angle)
else:
if on_hardware == True: # running on hardware -- we need to actually write this value to the PWM
board.digital[wheelpin].write(angle)
direction_current_angle = angle # overwrite the global variable with the new value
pub_direction_angle.publish(angle) # publish the angle we set to a topic so others can see it
rospy.loginfo("Set the direction to angle %i", angle)
if __name__ == '__main__':
listener()
| mit | -8,419,473,920,683,098,000 | 59.035176 | 208 | 0.696493 | false |
TetraAsh/baruwa2 | baruwa/controllers/lists.py | 1 | 12838 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"Lists controller"
import socket
import struct
import logging
from urlparse import urlparse
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from webhelpers import paginate
from pylons.i18n.translation import _
from sqlalchemy import desc
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import NoResultFound
from repoze.what.predicates import not_anonymous
from sphinxapi import SphinxClient, SPH_MATCH_EXTENDED2
#from repoze.what.plugins.pylonshq import ActionProtector
from repoze.what.plugins.pylonshq import ControllerProtector
from baruwa.lib.dates import now
from baruwa.lib.base import BaseController, render
from baruwa.lib.helpers import flash, flash_alert
from baruwa.model.meta import Session
from baruwa.model.lists import List
from baruwa.lib.audit import audit_log
from baruwa.model.domains import Domain
from baruwa.lib.misc import check_num_param
from baruwa.lib.misc import ipaddr_is_valid, convert_list_to_json
from baruwa.forms.lists import list_forms
from baruwa.tasks.settings import update_serial
#from baruwa.lib.auth.predicates import CanAccessAccount
from baruwa.model.accounts import User, Address, domain_owners
from baruwa.lib.regex import EMAIL_RE, IPV4_NET_OR_RANGE_RE, DOM_RE
from baruwa.lib.audit.msgs.lists import *
log = logging.getLogger(__name__)
def make_item(form):
"Make a list item"
litem = List()
litem.user = c.user
litem.list_type = form.list_type.data
litem.from_address = form.from_address.data
return litem
def _set_type(obj):
"Set type of object"
if EMAIL_RE.match(obj.from_address):
obj.from_addr_type = 1
return
if DOM_RE.match(obj.from_address):
obj.from_addr_type = 2
return
if IPV4_NET_OR_RANGE_RE.match(obj.from_address):
obj.from_addr_type = 3
return
if ipaddr_is_valid(obj.from_address):
obj.from_addr_type = 4
return
@ControllerProtector(not_anonymous())
class ListsController(BaseController):
def __before__(self):
"set context"
BaseController.__before__(self)
if self.identity:
c.user = self.identity['user']
else:
c.user = None
c.selectedtab = 'lists'
c.is_ajax = request.is_xhr
def _user_addresses(self):
"Return user addresses"
userid = self.identity['user'].id
query1 = Session.query(User.email.label('email'))\
.filter_by(active=True, account_type=3, id=userid)
query2 = Session.query(Address.address.label('email'))\
.filter_by(enabled=True, user_id=userid)
return query1.union(query2)
def _get_listitem(self, itemid):
"Get a list item"
try:
item = Session.query(List).get(itemid)
except NoResultFound:
item = None
return item
def index(self, list_type=1, direction='dsc', order_by='id',
page=1, format=None):
"Page through lists"
total_found = 0
search_time = 0
num_items = session.get('lists_num_items', 10)
if direction == 'dsc':
sort = desc(order_by)
else:
sort = order_by
q = request.GET.get('q', None)
kwds = {}
if q:
kwds['presliced_list'] = True
conn = SphinxClient()
conn.SetMatchMode(SPH_MATCH_EXTENDED2)
conn.SetFilter('list_type', [int(list_type),])
if page == 1:
conn.SetLimits(0, num_items, 500)
else:
page = int(page)
offset = (page - 1) * num_items
conn.SetLimits(offset, num_items, 500)
try:
results = conn.Query(q, 'lists, lists_rt')
except (socket.timeout, struct.error):
redirect(request.path_qs)
if results and results['matches']:
ids = [hit['id'] for hit in results['matches']]
total_found = results['total_found']
search_time = results['time']
items = Session.query(List)\
.filter(List.list_type == list_type)\
.filter(List.id.in_(ids))\
.order_by(sort)\
.all()
listcount = total_found
else:
items = []
itemcount = 0
listcount = 0
else:
items = Session.query(List)\
.filter(List.list_type == list_type)\
.order_by(sort)
itemcount = Session.query(List.id)\
.filter(List.list_type == list_type)
if c.user.account_type != 1 and itemcount:
items = items.filter(List.user_id == c.user.id)
itemcount = itemcount.filter(List.user_id == c.user.id)
if not 'listcount' in locals():
listcount = itemcount.count()
records = paginate.Page(items,
page=int(page),
items_per_page=num_items,
item_count=listcount,
**kwds)
if format == 'json':
response.headers['Content-Type'] = 'application/json'
data = convert_list_to_json(records, list_type)
return data
c.list_type = list_type
c.page = records
c.direction = direction
c.order_by = order_by
c.q = q
c.total_found = total_found
c.search_time = search_time
return render('/lists/index.html')
def new(self):
"Add a new list item"
c.form = list_forms[c.user.account_type](request.POST,
csrf_context=session)
if c.user.is_domain_admin:
orgs = [group.id for group in c.user.organizations]
query = Session.query(Domain.name).join(domain_owners)\
.filter(domain_owners.c.organization_id.in_(orgs))
options = [(domain.name, domain.name) for domain in query]
c.form.to_domain.choices = options
if c.user.is_peleb:
query = self._user_addresses()
options = [(item.email, item.email) for item in query]
c.form.to_address.choices = options
if request.POST and c.form.validate():
# item = List()
# item.user = c.user
# item.list_type = c.form.list_type.data
# item.from_address = c.form.from_address.data
item = make_item(c.form)
_set_type(item)
aliases = []
if c.user.is_superadmin or c.user.is_peleb:
if c.form.to_address.data != '':
item.to_address = c.form.to_address.data
if ('add_to_alias' in c.form and c.form.add_to_alias.data
and c.user.is_peleb):
for new_addr in options:
if new_addr[0] == item.to_address:
continue
newitem = make_item(c.form)
_set_type(newitem)
newitem.to_address = new_addr[0]
aliases.append(newitem)
else:
item.to_address = 'any'
if c.user.is_domain_admin:
if c.form.to_address.data in ['', 'any']:
item.to_address = c.form.to_domain.data
if c.form.add_to_alias.data:
for dom in options:
if dom[0] == item.to_address:
continue
newitem = make_item(c.form)
_set_type(newitem)
newitem.to_address = dom[0]
aliases.append(newitem)
else:
item.to_address = "%s@%s" % (c.form.to_address.data,
c.form.to_domain.data)
if c.form.add_to_alias.data:
for dom in options:
newitem = make_item(c.form)
_set_type(newitem)
newitem.to_address = "%s@%s" % \
(c.form.to_address.data, dom[0])
if newitem.to_address == item.to_address:
continue
aliases.append(newitem)
try:
Session.add(item)
Session.commit()
for alias in aliases:
try:
Session.add(alias)
Session.commit()
except IntegrityError:
pass
update_serial.delay()
if item.list_type == 1:
listname = _('Approved senders')
else:
listname = _('Banned senders')
info = LISTADD_MSG % dict(s=item.from_address, l=listname)
audit_log(c.user.username,
3, unicode(info), request.host,
request.remote_addr, now())
flash(_('The item has been added to the list'))
if not request.is_xhr:
redirect(url('lists-index',
list_type=c.form.list_type.data))
except IntegrityError:
Session.rollback()
flash_alert(_('The list item already exists'))
return render('/lists/add.html')
def list_delete(self, listid):
"Delete a list item"
item = self._get_listitem(listid)
if not item:
abort(404)
if c.user.account_type != 1 and c.user.id != item.user_id:
abort(403)
c.form = list_forms[c.user.account_type](request.POST,
item,
csrf_context=session)
if not c.user.is_superadmin:
del c.form.add_to_alias
if c.user.is_domain_admin:
orgs = [group.id for group in c.user.organizations]
query = Session.query(Domain.name).join(domain_owners)\
.filter(domain_owners.c.organization_id.in_(orgs))
options = [(domain.name, domain.name) for domain in query]
c.form.to_domain.choices = options
if c.user.is_peleb:
query = self._user_addresses()
options = [(addr.email, addr.email) for addr in query]
c.form.to_address.choices = options
c.id = item.id
if request.POST and c.form.validate():
if item.list_type == 1:
listname = _('Approved senders')
else:
listname = _('Banned senders')
name = item.from_address
Session.delete(item)
Session.commit()
update_serial.delay()
info = LISTDEL_MSG % dict(s=name, l=listname)
audit_log(c.user.username,
4, unicode(info), request.host,
request.remote_addr, now())
flash(_('The item has been deleted'))
if not request.is_xhr:
redirect(url(controller='lists'))
else:
c.delflag = True
return render('/lists/delete.html')
def setnum(self, format=None):
"Set number of items returned"
num = check_num_param(request)
if num and int(num) in [10, 20, 50, 100]:
num = int(num)
session['lists_num_items'] = num
session.save()
nextpage = request.headers.get('Referer', '/')
if '://' in nextpage:
from_url = urlparse(nextpage)
nextpage = from_url[2]
redirect(nextpage)
| gpl-3.0 | 3,271,341,334,908,312,600 | 38.140244 | 80 | 0.531391 | false |
luisza/dfva_client | src/client_fva/ui/requestauthentication.py | 1 | 10627 | import time
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtCore import Qt, pyqtSlot, QThread, pyqtSignal
from PyQt5.QtWidgets import QCompleter, QTableWidgetItem
from client_fva.models.ContactDropDown import ContactModel
from client_fva.models.MyRequest import MyRequestModel
from client_fva.session_storage import SessionStorage
from client_fva.ui.requestauthenticationui import Ui_RequestAuthentication
from client_fva.user_settings import UserSettings
class PersonAuthenticationOpers(QThread):
has_result = pyqtSignal(int)
has_changes = pyqtSignal(str, int, bool, str)
remove_check = pyqtSignal(str)
new_code = pyqtSignal(str, str)
def __init__(self, tid, person, identifications, user):
self.identifications = identifications
self.person = person
super(PersonAuthenticationOpers, self).__init__()
self.tid = tid
self.result = None
self.pending_check = {}
self.wait_time = UserSettings.getInstance().check_wait_time
self.session_storage = SessionStorage.getInstance()
self.myrequest = MyRequestModel(db=self.session_storage.db, user=user)
def log_transaction(self, identification, data):
self.has_changes.emit(identification, data['status'], False, data['status_text'])
self.myid = self.myrequest.add_myrequest(identification, 'autenticación', '', '', signed_document_path="",
transaction_status=data['status'], transaction_text=data['status_text'])
def log_check_transaction(self, identification, data):
self.has_changes.emit(identification, data['status'], data['received_notification'], data['status_text'])
self.myrequest.update_myrequest(self.myid, transaction_status=data['status'],
transaction_text=data['status_text'])
def run(self):
transactions = []
for identification in self.identifications:
result = self.person.authenticate(identification)
self.log_transaction(identification, result)
if result['status'] == 0:
self.pending_check[identification] = result['id']
self.session_storage.transactions[result['id_transaction']] = result['code']
transactions.append(result['id_transaction'])
self.new_code.emit(identification, result['code'])
else:
self.remove_check.emit(identification)
while self.pending_check:
for identification in list(self.pending_check.keys()):
result = self.person.check_authenticate(self.pending_check[identification])
self.log_check_transaction(identification, result)
if result['received_notification']:
del self.pending_check[identification]
self.remove_check.emit(identification)
time.sleep(self.wait_time)
for trans in transactions:
if trans in self.session_storage.transactions:
del self.session_storage.transactions[trans]
self.has_result.emit(self.tid)
class RequestAuthentication(Ui_RequestAuthentication):
CONNECTING = 0
CONNECTED = 1
REJECTED = 2
ERROR = 3
def __init__(self, widget, main_app, db, serial):
Ui_RequestAuthentication.__init__(self)
self.widget = widget
self.main_app = main_app
self.session_storage = SessionStorage.getInstance()
self.setupUi(self.widget)
self.person = self.session_storage.session_info[serial]['personclient']
self.user = self.session_storage.session_info[serial]['user']
self.contacts_model = ContactModel(user=self.user, db=db)
completer = QCompleter()
completer.setModel(self.contacts_model)
completer.setCaseSensitivity(Qt.CaseInsensitive)
completer.setFilterMode(Qt.MatchContains)
self.searchContact.setCompleter(completer)
self.add_contact.clicked.connect(lambda: self.add_contact_to_list())
self.requestAuthentication.clicked.connect(self.request_authentication)
self.cleanbtn.clicked.connect(self.cleantable)
self.auth_list = []
self.status_widgets = {}
self.code_widgets = {}
self.initialize()
def initialize(self):
self.contacts.setColumnCount(4)
self.contacts.setHorizontalHeaderItem(0, QTableWidgetItem("Estado"))
self.contacts.setHorizontalHeaderItem(1, QTableWidgetItem("Identificación"))
self.contacts.setHorizontalHeaderItem(2, QTableWidgetItem("Nombre"))
self.contacts.setHorizontalHeaderItem(3, QTableWidgetItem("Código"))
self.contacts.resizeColumnsToContents()
self.contacts_count = 0
self.contacts.contextMenuEvent = self.context_element_menu_event
def inactive_btn(self):
self.cleanbtn.setEnabled(False)
self.add_contact.setEnabled(False)
self.requestAuthentication.setEnabled(False)
def active_btn(self):
self.cleanbtn.setEnabled(True)
self.add_contact.setEnabled(True)
self.requestAuthentication.setEnabled(True)
def insert_item(self, identification, name):
status_widget = QTableWidgetItem()
status_widget.setIcon(QtGui.QIcon(":/images/autentication.png"))
code_widget = QTableWidgetItem("")
self.contacts.insertRow(self.contacts.rowCount())
self.contacts.setItem(self.contacts_count, 0, status_widget)
self.contacts.setItem(self.contacts_count, 1, QTableWidgetItem(identification))
self.contacts.setItem(self.contacts_count, 2, QTableWidgetItem(name))
self.contacts.setItem(self.contacts_count, 3, code_widget)
self.contacts_count += 1
self.status_widgets[identification] = status_widget
self.code_widgets[identification] = code_widget
self.contacts.resizeColumnsToContents()
def change_person_status(self,status_widget, status, error_text="Error o rechazo por parte del usuario"):
if status == self.CONNECTING:
status_widget.setIcon(QtGui.QIcon(":/images/connecting.png"))
status_widget.setToolTip('Conectando al servicio de firmado')
elif status == self.CONNECTED:
status_widget.setIcon(QtGui.QIcon(":/images/connected.png"))
status_widget.setToolTip('Persona autenticada satisfactoriamente')
elif status == self.REJECTED:
status_widget.setIcon(QtGui.QIcon(":/images/rejected.png"))
status_widget.setToolTip('Persona autenticada satisfactoriamente')
elif status == self.ERROR:
status_widget.setIcon(QtGui.QIcon(":/images/error.png"))
status_widget.setToolTip(error_text)
def add_contact_to_list(self):
txt = self.searchContact.text()
id = self.contacts_model.deserialize_contact(txt)
if id:
if id not in self.auth_list:
if id != txt:
self.insert_item(id, txt)
else:
self.insert_item(id, '')
self.auth_list.append(id)
self.searchContact.setText('')
else:
QtWidgets.QMessageBox.warning(self.widget, 'Contacto ya existente',
"El contacto seleccionado fue agregado a la lista anteriormente.")
else:
QtWidgets.QMessageBox.warning(self.widget, 'Contacto no identificado',
"Lo ingresado no es un nombre de contacto o un número de identificación válido. Formato: 08-8888-8888 o 15 números para extranjeros")
def request_authentication(self):
self.inactive_btn()
self.requestAuthProgressBar.setRange(0, len(self.auth_list))
self.auth_pending = len(self.auth_list)
self.update_process_bar(0, "Enviando peticiones de autenticación")
self.pao = PersonAuthenticationOpers(1, self.person, self.auth_list, self.user)
self.pao.has_result.connect(self.end_authentication)
self.pao.has_changes.connect(self.check_transaction_change)
self.pao.remove_check.connect(self.check_transaction_end)
self.pao.new_code.connect(self.add_new_code)
self.pao.start()
def context_element_menu_event(self, pos):
if self.contacts.selectedIndexes():
selected = self.contacts.currentIndex()
if selected.isValid():
row, column = selected.row(), selected.column()
menu = QtWidgets.QMenu()
menu.setStyleSheet("QMenu::item{color:rgb(76, 118, 82);background-color:rgb(216, 230, 225);}")
delete_action = menu.addAction("Delete")
delete_action.setIcon(QtGui.QIcon(":images/delete.png"))
action = menu.exec_(self.contacts.mapToGlobal(pos.pos()))
if action == delete_action:
self.delete_element(row)
def delete_element(self, row):
self.contacts.removeRow(row)
self.auth_list.pop(row)
self.contacts_count -= 1
def cleantable(self):
for x in range(len(self.auth_list)):
self.contacts.removeRow(0)
self.auth_list.pop()
self.contacts.setRowCount(0)
self.contacts_count=0
def update_process_bar(self, value, text):
self.requestAuthProgressBar.setValue(value)
if text:
self.requestAuthProgressBar.setFormat(text)
def end_authentication(self, id):
self.update_process_bar(len(self.auth_list), 'Solicitud de autorizaciones completo')
self.active_btn()
def check_transaction_end(self, identification):
self.auth_pending -= 1
self.update_process_bar(len(self.auth_list) - self.auth_pending,
'Solicitudes faltantes %d'%self.auth_pending)
def check_transaction_change(self, identification, status, recieved, text):
# transaction_status
icon_status = 0
icon_tooltip = ''
if status == 0:
if recieved:
icon_status = self.CONNECTED
else:
icon_status = self.CONNECTING
elif status == 2:
icon_status = self.REJECTED
icon_tooltip = text
else:
icon_status = self.ERROR
icon_tooltip = text
self.change_person_status(self.status_widgets[identification], icon_status, icon_tooltip)
def add_new_code(self, identification, code):
self.code_widgets[identification].setText(code)
self.contacts.resizeColumnsToContents() | gpl-3.0 | -6,719,245,680,836,708,000 | 42.52459 | 150 | 0.647424 | false |
gonesurfing/Quisk_rpi_remote | sdriqpkg/quisk_hardware.py | 1 | 2872 | # Please do not change this hardware control module.
# It provides support for the SDR-IQ by RfSpace.
from __future__ import print_function
import _quisk as QS
import sdriq
from quisk_hardware_model import Hardware as BaseHardware
class Hardware(BaseHardware):
decimations = [1250, 600, 500, 360]
def __init__(self, app, conf):
BaseHardware.__init__(self, app, conf)
self.use_sidetone = 1
self.clock = conf.sdriq_clock
self.rf_gain_labels = ('RF +30', 'RF +20', 'RF +10', 'RF 0 dB')
if conf.fft_size_multiplier == 0:
conf.fft_size_multiplier = 3 # Set size needed by VarDecim
def open(self):
return sdriq.open_samples() # Return a config message
def close(self):
sdriq.close_samples()
def OnButtonRfGain(self, event):
"""Set the SDR-IQ preamp gain and attenuator state.
sdriq.gain_sdriq(gstate, gain)
gstate == 0: Gain must be 0, -10, -20, or -30
gstate == 1: Attenuator is on and gain is 0 to 127 (7 bits)
gstate == 2: Attenuator is off and gain is 0 to 127 (7 bits)
gain for 34, 24, 14, 4 db is 127, 39, 12, 4.
"""
btn = event.GetEventObject()
n = btn.index
if n == 0:
sdriq.gain_sdriq(2, 127)
elif n == 1:
sdriq.gain_sdriq(2, 39)
elif n == 2:
sdriq.gain_sdriq(2, 12)
elif n == 3:
sdriq.gain_sdriq(1, 12)
else:
print ('Unknown RfGain')
def ChangeFrequency(self, tune, vfo, source='', band='', event=None):
if vfo:
sdriq.freq_sdriq(vfo)
return tune, vfo
def ChangeBand(self, band):
# band is a string: "60", "40", "WWV", etc.
btn = self.application.BtnRfGain
if btn:
if band in ('160', '80', '60', '40'):
btn.SetLabel('RF +10', True)
elif band in ('20',):
btn.SetLabel('RF +20', True)
else:
btn.SetLabel('RF +20', True)
def VarDecimGetChoices(self): # return text labels for the control
l = [] # a list of sample rates
for dec in self.decimations:
l.append(str(int(float(self.clock) / dec / 1e3 + 0.5)))
return l
def VarDecimGetLabel(self): # return a text label for the control
return "Sample rate ksps"
def VarDecimGetIndex(self): # return the current index
return self.index
def VarDecimSet(self, index=None): # set decimation, return sample rate
if index is None: # initial call to set decimation before the call to open()
rate = self.application.vardecim_set # May be None or from different hardware
try:
dec = int(float(self.clock / rate + 0.5))
self.index = self.decimations.index(dec)
except:
try:
self.index = self.decimations.index(self.conf.sdriq_decimation)
except:
self.index = 0
else:
self.index = index
dec = self.decimations[self.index]
sdriq.set_decimation(dec)
return int(float(self.clock) / dec + 0.5)
| gpl-2.0 | -3,532,599,379,455,024,600 | 33.60241 | 84 | 0.625 | false |
yuxng/Deep_ISM | ISM/lib/datasets/imdb.py | 1 | 3612 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
import os.path as osp
import PIL
import numpy as np
import scipy.sparse
import datasets
from ism.config import cfg
class imdb(object):
"""Image database."""
def __init__(self, name):
self._name = name
self._num_classes = 0
self._classes = []
self._image_index = []
self._roidb = None
self._roidb_handler = self.default_roidb
# Use this dict for storing dataset specific config options
self.config = {}
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def image_index(self):
return self._image_index
@property
def roidb_handler(self):
return self._roidb_handler
@roidb_handler.setter
def roidb_handler(self, val):
self._roidb_handler = val
@property
def roidb(self):
# A roidb is a list of dictionaries, each with the following keys:
# boxes
# gt_overlaps
# gt_classes
# flipped
if self._roidb is not None:
return self._roidb
self._roidb = self.roidb_handler()
return self._roidb
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(datasets.ROOT_DIR, 'data', 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def num_images(self):
return len(self.image_index)
def image_path_at(self, i):
raise NotImplementedError
def default_roidb(self):
raise NotImplementedError
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
def evaluate_proposals(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
def append_flipped_images(self):
num_images = self.num_images
for i in xrange(num_images):
entry = {'image' : self.roidb[i]['image'],
'depth' : self.roidb[i]['depth'],
'label' : self.roidb[i]['label'],
'meta_data' : self.roidb[i]['meta_data'],
'class_colors' : self.roidb[i]['class_colors'],
'flipped' : True}
self.roidb.append(entry)
self._image_index = self._image_index * 2
print 'finish appending flipped images'
def competition_mode(self, on):
"""Turn competition mode on or off."""
pass
def fast_hist(self, a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k], minlength=n**2).reshape(n, n)
| mit | 5,910,891,603,174,442,000 | 28.365854 | 85 | 0.564784 | false |
astonshane/davisputnamGo | davisputnam/graph.py | 1 | 1255 | import json
import sys
from pprint import pprint
from ete3 import Tree, TreeStyle, TextFace
from PIL import Image, ImageDraw
def parseTree(root):
tree = Tree()
tree.name = root['Name']
tree.add_face(TextFace(root['Split'], fgcolor="red"), column=0, position="branch-bottom")
if root['Children']:
for child in root['Children']:
tree.children.append(parseTree(child))
return tree
'''
with open('sample_tree.json', 'w') as outfile:
json.dump(obj, outfile, sort_keys=True, indent=4, separators=(',', ': '))
'''
if __name__ == '__main__':
ts = TreeStyle()
ts.show_leaf_name = False
root = json.loads(open(sys.argv[1]).read())
pprint(root)
tree_root = parseTree(root)
print tree_root
for child in tree_root.traverse():
# add a marker with the name of each node, at each node
child.add_face(TextFace(child.name), column=0, position="branch-top")
# render the file and save it
fname = sys.argv[1][:-4] + "png"
tree_root.render(fname, tree_style=ts, w=5000)
im = Image.open(fname)
(x, y) = im.size
draw = ImageDraw.Draw(im)
draw.rectangle((0, y*.45, x*.25, y), fill="white")
im.save(fname, "PNG")
# tree_root.show(tree_style=ts)
| mit | -8,743,040,261,105,409,000 | 25.702128 | 93 | 0.623108 | false |
stanislavb/nagios-snmp-location | nagios-plugins/check_snmp_location.py | 1 | 1637 | #!/usr/bin/env python
# This script checks the standard SNMP location oid
# and saves it in a memcached database with hostname as key.
#
# FreeBSD requirements:
# Compile net-snmp with python bindings
# Install py-memcached
# Nagios exit codes:
# 0 OK
# 1 WARNING
# 2 CRITICAL
# 3 UNKNOWN
import netsnmp
import memcache
from optparse import OptionParser
from sys import exit
# Config
# Either hostname/IP or UNIX socket
memcached_address = ['unix:/var/run/memcached/memcached.sock']
default_community = "public"
location_oid = '1.3.6.1.2.1.1.6'
snmp_version = 2
# Command line option parsing and help text (-h)
usage = "usage: %prog -H host_or_IP -C snmp_community"
parser = OptionParser(usage=usage)
parser.add_option("-H", "--host", dest="host", help="hostname or IP address")
parser.add_option("-C", "--community", dest="community", default=default_community, help="SNMP community")
(options, args) = parser.parse_args()
# We must have a host
if not options.host:
print("UNKNOWN: No hostname or IP to check")
exit(3) # UNKNOWN
# Let's get SNMP location
var = netsnmp.Varbind(location_oid, '0')
res = netsnmp.snmpget(var, Version=snmp_version, DestHost=options.host, Community=options.community, Retries=1)
location = res[0]
if location is not None:
print("OK: " + location)
# Memcached
try:
mc = memcache.Client(memcached_address, debug=0)
mc.set(options.host, location)
except Exception:
# We don't care if memcached doesn't work
pass
exit(0) # OK
print("UNKNOWN: error for host " + options.host + " and SNMP community " + options.community)
exit(3) # UNKNOWN
| unlicense | 1,020,803,993,351,950,000 | 27.224138 | 111 | 0.703115 | false |
flavio/scsgate | scsgate/reactor.py | 1 | 1869 | """ This module contains the definition of the Reactor class.
This one is useful when dealing with concurrent access to the SCSGate
device """
import queue
import threading
from scsgate.tasks import MonitorTask, ExecutionError
class Reactor(threading.Thread):
""" Allows concurrent access to the SCSGate device """
def __init__(self, connection, handle_message, logger=None):
""" Initialize the instance
Arguments
connection: a scsgate.Connection object
handle_message: callback function to invoke whenever a new message
is received
logger: instance of logger
"""
threading.Thread.__init__(self)
self._connection = connection
self._handle_message = handle_message
self._terminate = False
self._logger = logger
self._request_queue = queue.Queue()
def run(self):
""" Starts the thread """
task = None
monitor_task = MonitorTask(
notification_endpoint=self._handle_message)
while True:
if self._terminate:
self._logger.info("scsgate.Reactor exiting")
self._connection.close()
break
try:
task = self._request_queue.get_nowait()
self._logger.debug("scsgate.Reactor: got task {}".format(task))
except queue.Empty:
task = monitor_task
try:
task.execute(connection=self._connection)
except ExecutionError as err:
self._logger.error(err)
def stop(self):
""" Blocks the thread, performs cleanup of the associated
connection """
self._terminate = True
def append_task(self, task):
""" Adds a tasks to the list of the jobs to execute """
self._request_queue.put(task)
| mit | 3,353,941,284,899,801,000 | 29.639344 | 79 | 0.596041 | false |
asedunov/intellij-community | python/helpers/pydev/_pydevd_bundle/pydevd_comm.py | 1 | 60832 | ''' pydevd - a debugging daemon
This is the daemon you launch for python remote debugging.
Protocol:
each command has a format:
id\tsequence-num\ttext
id: protocol command number
sequence-num: each request has a sequence number. Sequence numbers
originating at the debugger are odd, sequence numbers originating
at the daemon are even. Every response uses the same sequence number
as the request.
payload: it is protocol dependent. When response is a complex structure, it
is returned as XML. Each attribute value is urlencoded, and then the whole
payload is urlencoded again to prevent stray characters corrupting protocol/xml encodings
Commands:
NUMBER NAME FROM* ARGUMENTS RESPONSE NOTE
100 series: program execution
101 RUN JAVA - -
102 LIST_THREADS JAVA RETURN with XML listing of all threads
103 THREAD_CREATE PYDB - XML with thread information
104 THREAD_KILL JAVA id (or * to exit) kills the thread
PYDB id nofies JAVA that thread was killed
105 THREAD_SUSPEND JAVA XML of the stack, suspends the thread
reason for suspension
PYDB id notifies JAVA that thread was suspended
106 CMD_THREAD_RUN JAVA id resume the thread
PYDB id \t reason notifies JAVA that thread was resumed
107 STEP_INTO JAVA thread_id
108 STEP_OVER JAVA thread_id
109 STEP_RETURN JAVA thread_id
110 GET_VARIABLE JAVA thread_id \t frame_id \t GET_VARIABLE with XML of var content
FRAME|GLOBAL \t attributes*
111 SET_BREAK JAVA file/line of the breakpoint
112 REMOVE_BREAK JAVA file/line of the return
113 CMD_EVALUATE_EXPRESSION JAVA expression result of evaluating the expression
114 CMD_GET_FRAME JAVA request for frame contents
115 CMD_EXEC_EXPRESSION JAVA
116 CMD_WRITE_TO_CONSOLE PYDB
117 CMD_CHANGE_VARIABLE
118 CMD_RUN_TO_LINE
119 CMD_RELOAD_CODE
120 CMD_GET_COMPLETIONS JAVA
500 series diagnostics/ok
501 VERSION either Version string (1.0) Currently just used at startup
502 RETURN either Depends on caller -
900 series: errors
901 ERROR either - This is reserved for unexpected errors.
* JAVA - remote debugger, the java end
* PYDB - pydevd, the python end
'''
import os
from _pydev_bundle.pydev_imports import _queue
from _pydev_imps._pydev_saved_modules import time
from _pydev_imps._pydev_saved_modules import thread
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import socket
from socket import socket, AF_INET, SOCK_STREAM, SHUT_RD, SHUT_WR, SOL_SOCKET, SO_REUSEADDR, SHUT_RDWR, timeout
from _pydevd_bundle.pydevd_constants import DebugInfoHolder, dict_contains, get_thread_id, IS_JYTHON, IS_PY2, IS_PY3K, IS_PY36_OR_GREATER, \
STATE_RUN
try:
from urllib import quote_plus, unquote, unquote_plus
except:
from urllib.parse import quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport
import pydevconsole
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle import pydevd_tracing
from _pydevd_bundle import pydevd_vm_type
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER, norm_file_to_client
import sys
import traceback
from _pydevd_bundle.pydevd_utils import quote_smart as quote, compare_object_attrs, cmp_to_key, to_string
from _pydev_bundle import pydev_log
from _pydev_bundle import _pydev_completer
from _pydevd_bundle.pydevd_tracing import get_exception_traceback_str
from _pydevd_bundle import pydevd_console
from _pydev_bundle.pydev_monkey import disable_trace_thread_modules, enable_trace_thread_modules
CMD_RUN = 101
CMD_LIST_THREADS = 102
CMD_THREAD_CREATE = 103
CMD_THREAD_KILL = 104
CMD_THREAD_SUSPEND = 105
CMD_THREAD_RUN = 106
CMD_STEP_INTO = 107
CMD_STEP_OVER = 108
CMD_STEP_RETURN = 109
CMD_GET_VARIABLE = 110
CMD_SET_BREAK = 111
CMD_REMOVE_BREAK = 112
CMD_EVALUATE_EXPRESSION = 113
CMD_GET_FRAME = 114
CMD_EXEC_EXPRESSION = 115
CMD_WRITE_TO_CONSOLE = 116
CMD_CHANGE_VARIABLE = 117
CMD_RUN_TO_LINE = 118
CMD_RELOAD_CODE = 119
CMD_GET_COMPLETIONS = 120
# Note: renumbered (conflicted on merge)
CMD_CONSOLE_EXEC = 121
CMD_ADD_EXCEPTION_BREAK = 122
CMD_REMOVE_EXCEPTION_BREAK = 123
CMD_LOAD_SOURCE = 124
CMD_ADD_DJANGO_EXCEPTION_BREAK = 125
CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126
CMD_SET_NEXT_STATEMENT = 127
CMD_SMART_STEP_INTO = 128
CMD_EXIT = 129
CMD_SIGNATURE_CALL_TRACE = 130
CMD_SET_PY_EXCEPTION = 131
CMD_GET_FILE_CONTENTS = 132
CMD_SET_PROPERTY_TRACE = 133
# Pydev debug console commands
CMD_EVALUATE_CONSOLE_EXPRESSION = 134
CMD_RUN_CUSTOM_OPERATION = 135
CMD_GET_BREAKPOINT_EXCEPTION = 136
CMD_STEP_CAUGHT_EXCEPTION = 137
CMD_SEND_CURR_EXCEPTION_TRACE = 138
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139
CMD_IGNORE_THROWN_EXCEPTION_AT = 140
CMD_ENABLE_DONT_TRACE = 141
CMD_SHOW_CONSOLE = 142
CMD_GET_ARRAY = 143
CMD_STEP_INTO_MY_CODE = 144
CMD_GET_CONCURRENCY_EVENT = 145
CMD_SHOW_RETURN_VALUES = 146
CMD_INPUT_REQUESTED = 147
CMD_GET_DESCRIPTION = 148
CMD_PROCESS_CREATED = 149
CMD_SHOW_CYTHON_WARNING = 150
CMD_VERSION = 501
CMD_RETURN = 502
CMD_ERROR = 901
ID_TO_MEANING = {
'101': 'CMD_RUN',
'102': 'CMD_LIST_THREADS',
'103': 'CMD_THREAD_CREATE',
'104': 'CMD_THREAD_KILL',
'105': 'CMD_THREAD_SUSPEND',
'106': 'CMD_THREAD_RUN',
'107': 'CMD_STEP_INTO',
'108': 'CMD_STEP_OVER',
'109': 'CMD_STEP_RETURN',
'110': 'CMD_GET_VARIABLE',
'111': 'CMD_SET_BREAK',
'112': 'CMD_REMOVE_BREAK',
'113': 'CMD_EVALUATE_EXPRESSION',
'114': 'CMD_GET_FRAME',
'115': 'CMD_EXEC_EXPRESSION',
'116': 'CMD_WRITE_TO_CONSOLE',
'117': 'CMD_CHANGE_VARIABLE',
'118': 'CMD_RUN_TO_LINE',
'119': 'CMD_RELOAD_CODE',
'120': 'CMD_GET_COMPLETIONS',
'121': 'CMD_CONSOLE_EXEC',
'122': 'CMD_ADD_EXCEPTION_BREAK',
'123': 'CMD_REMOVE_EXCEPTION_BREAK',
'124': 'CMD_LOAD_SOURCE',
'125': 'CMD_ADD_DJANGO_EXCEPTION_BREAK',
'126': 'CMD_REMOVE_DJANGO_EXCEPTION_BREAK',
'127': 'CMD_SET_NEXT_STATEMENT',
'128': 'CMD_SMART_STEP_INTO',
'129': 'CMD_EXIT',
'130': 'CMD_SIGNATURE_CALL_TRACE',
'131': 'CMD_SET_PY_EXCEPTION',
'132': 'CMD_GET_FILE_CONTENTS',
'133': 'CMD_SET_PROPERTY_TRACE',
'134': 'CMD_EVALUATE_CONSOLE_EXPRESSION',
'135': 'CMD_RUN_CUSTOM_OPERATION',
'136': 'CMD_GET_BREAKPOINT_EXCEPTION',
'137': 'CMD_STEP_CAUGHT_EXCEPTION',
'138': 'CMD_SEND_CURR_EXCEPTION_TRACE',
'139': 'CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED',
'140': 'CMD_IGNORE_THROWN_EXCEPTION_AT',
'141': 'CMD_ENABLE_DONT_TRACE',
'142': 'CMD_SHOW_CONSOLE',
'143': 'CMD_GET_ARRAY',
'144': 'CMD_STEP_INTO_MY_CODE',
'145': 'CMD_GET_CONCURRENCY_EVENT',
'146': 'CMD_SHOW_RETURN_VALUES',
'147': 'CMD_INPUT_REQUESTED',
'148': 'CMD_GET_DESCRIPTION',
'149': 'CMD_PROCESS_CREATED',
'150': 'CMD_SHOW_CYTHON_WARNING',
'501': 'CMD_VERSION',
'502': 'CMD_RETURN',
'901': 'CMD_ERROR',
}
MAX_IO_MSG_SIZE = 1000 #if the io is too big, we'll not send all (could make the debugger too non-responsive)
#this number can be changed if there's need to do so
VERSION_STRING = "@@BUILD_NUMBER@@"
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
file_system_encoding = getfilesystemencoding()
#--------------------------------------------------------------------------------------------------- UTILITIES
#=======================================================================================================================
# pydevd_log
#=======================================================================================================================
def pydevd_log(level, *args):
""" levels are:
0 most serious warnings/errors
1 warnings/significant events
2 informational trace
"""
if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL:
#yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something)
try:
sys.stderr.write('%s\n' % (args,))
except:
pass
#=======================================================================================================================
# GlobalDebuggerHolder
#=======================================================================================================================
class GlobalDebuggerHolder:
'''
Holder for the global debugger.
'''
global_dbg = None # Note: don't rename (the name is used in our attach to process)
#=======================================================================================================================
# get_global_debugger
#=======================================================================================================================
def get_global_debugger():
return GlobalDebuggerHolder.global_dbg
GetGlobalDebugger = get_global_debugger # Backward-compatibility
#=======================================================================================================================
# set_global_debugger
#=======================================================================================================================
def set_global_debugger(dbg):
GlobalDebuggerHolder.global_dbg = dbg
#------------------------------------------------------------------- ACTUAL COMM
#=======================================================================================================================
# PyDBDaemonThread
#=======================================================================================================================
class PyDBDaemonThread(threading.Thread):
created_pydb_daemon_threads = {}
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.killReceived = False
self.pydev_do_not_trace = True
self.is_pydev_daemon_thread = True
def run(self):
created_pydb_daemon = self.created_pydb_daemon_threads
created_pydb_daemon[self] = 1
try:
try:
if IS_JYTHON and not isinstance(threading.currentThread(), threading._MainThread):
# we shouldn't update sys.modules for the main thread, cause it leads to the second importing 'threading'
# module, and the new instance of main thread is created
import org.python.core as PyCore #@UnresolvedImport
ss = PyCore.PySystemState()
# Note: Py.setSystemState() affects only the current thread.
PyCore.Py.setSystemState(ss)
self._on_run()
except:
if sys is not None and traceback is not None:
traceback.print_exc()
finally:
del created_pydb_daemon[self]
def _on_run(self):
raise NotImplementedError('Should be reimplemented by: %s' % self.__class__)
def do_kill_pydev_thread(self):
#that was not working very well because jython gave some socket errors
self.killReceived = True
def _stop_trace(self):
if self.pydev_do_not_trace:
disable_tracing = True
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None) # no debugging on this thread
#=======================================================================================================================
# ReaderThread
#=======================================================================================================================
class ReaderThread(PyDBDaemonThread):
""" reader thread reads and dispatches commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Reader")
from _pydevd_bundle.pydevd_process_net_command import process_net_command
self.process_net_command = process_net_command
self.global_debugger_holder = GlobalDebuggerHolder
def do_kill_pydev_thread(self):
#We must close the socket so that it doesn't stay halted there.
self.killReceived = True
try:
self.sock.shutdown(SHUT_RD) #shutdown the socket for read
except:
#just ignore that
pass
def _on_run(self):
self._stop_trace()
read_buffer = ""
try:
while not self.killReceived:
try:
r = self.sock.recv(1024)
except:
if not self.killReceived:
traceback.print_exc()
self.handle_except()
return #Finished communication.
#Note: the java backend is always expected to pass utf-8 encoded strings. We now work with unicode
#internally and thus, we may need to convert to the actual encoding where needed (i.e.: filenames
#on python 2 may need to be converted to the filesystem encoding).
if hasattr(r, 'decode'):
r = r.decode('utf-8')
read_buffer += r
if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS:
sys.stderr.write('debugger: received >>%s<<\n' % (read_buffer,))
sys.stderr.flush()
if len(read_buffer) == 0:
self.handle_except()
break
while read_buffer.find('\n') != -1:
command, read_buffer = read_buffer.split('\n', 1)
args = command.split('\t', 2)
try:
cmd_id = int(args[0])
pydev_log.debug('Received command: %s %s\n' % (ID_TO_MEANING.get(str(cmd_id), '???'), command,))
self.process_command(cmd_id, int(args[1]), args[2])
except:
traceback.print_exc()
sys.stderr.write("Can't process net command: %s\n" % command)
sys.stderr.flush()
except:
traceback.print_exc()
self.handle_except()
def handle_except(self):
self.global_debugger_holder.global_dbg.finish_debugging_session()
def process_command(self, cmd_id, seq, text):
self.process_net_command(self.global_debugger_holder.global_dbg, cmd_id, seq, text)
#----------------------------------------------------------------------------------- SOCKET UTILITIES - WRITER
#=======================================================================================================================
# WriterThread
#=======================================================================================================================
class WriterThread(PyDBDaemonThread):
""" writer thread writes out the commands in an infinite loop """
def __init__(self, sock):
PyDBDaemonThread.__init__(self)
self.sock = sock
self.setName("pydevd.Writer")
self.cmdQueue = _queue.Queue()
if pydevd_vm_type.get_vm_type() == 'python':
self.timeout = 0
else:
self.timeout = 0.1
def add_command(self, cmd):
""" cmd is NetCommand """
if not self.killReceived: #we don't take new data after everybody die
self.cmdQueue.put(cmd)
def _on_run(self):
""" just loop and write responses """
self._stop_trace()
get_has_timeout = sys.hexversion >= 0x02030000 # 2.3 onwards have it.
try:
while True:
try:
try:
if get_has_timeout:
cmd = self.cmdQueue.get(1, 0.1)
else:
time.sleep(.01)
cmd = self.cmdQueue.get(0)
except _queue.Empty:
if self.killReceived:
try:
self.sock.shutdown(SHUT_WR)
self.sock.close()
except:
pass
return #break if queue is empty and killReceived
else:
continue
except:
#pydevd_log(0, 'Finishing debug communication...(1)')
#when liberating the thread here, we could have errors because we were shutting down
#but the thread was still not liberated
return
out = cmd.outgoing
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
out_message = 'sending cmd --> '
out_message += "%20s" % ID_TO_MEANING.get(out[:3], 'UNKNOWN')
out_message += ' '
out_message += unquote(unquote(out)).replace('\n', ' ')
try:
sys.stderr.write('%s\n' % (out_message,))
except:
pass
if IS_PY3K:
out = bytearray(out, 'utf-8')
self.sock.send(out) #TODO: this does not guarantee that all message are sent (and jython does not have a send all)
if cmd.id == CMD_EXIT:
break
if time is None:
break #interpreter shutdown
time.sleep(self.timeout)
except Exception:
GlobalDebuggerHolder.global_dbg.finish_debugging_session()
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0:
traceback.print_exc()
def empty(self):
return self.cmdQueue.empty()
#--------------------------------------------------- CREATING THE SOCKET THREADS
#=======================================================================================================================
# start_server
#=======================================================================================================================
def start_server(port):
""" binds to a port, waits for the debugger to connect """
s = socket(AF_INET, SOCK_STREAM)
s.settimeout(None)
try:
from socket import SO_REUSEPORT
s.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
except ImportError:
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(('', port))
pydevd_log(1, "Bound to port ", str(port))
try:
s.listen(1)
newSock, _addr = s.accept()
pydevd_log(1, "Connection accepted")
# closing server socket is not necessary but we don't need it
s.shutdown(SHUT_RDWR)
s.close()
return newSock
except:
sys.stderr.write("Could not bind to port: %s\n" % (port,))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) #TODO: is it safe?
#=======================================================================================================================
# start_client
#=======================================================================================================================
def start_client(host, port):
""" connects to a host/port """
pydevd_log(1, "Connecting to ", host, ":", str(port))
s = socket(AF_INET, SOCK_STREAM)
MAX_TRIES = 100
i = 0
while i<MAX_TRIES:
try:
s.connect((host, port))
except:
i+=1
time.sleep(0.2)
continue
pydevd_log(1, "Connected.")
return s
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
sys.stderr.flush()
traceback.print_exc()
sys.exit(1) #TODO: is it safe?
#------------------------------------------------------------------------------------ MANY COMMUNICATION STUFF
#=======================================================================================================================
# NetCommand
#=======================================================================================================================
class NetCommand:
""" Commands received/sent over the network.
Command can represent command received from the debugger,
or one to be sent by daemon.
"""
next_seq = 0 # sequence numbers
def __init__(self, id, seq, text):
""" smart handling of parameters
if sequence is 0, new sequence will be generated
if text has carriage returns they'll be replaced"""
self.id = id
if seq == 0:
NetCommand.next_seq += 2
seq = NetCommand.next_seq
self.seq = seq
self.text = text
encoded = quote(to_string(text), '/<>_=" \t')
self.outgoing = '%s\t%s\t%s\n' % (id, seq, encoded)
#=======================================================================================================================
# NetCommandFactory
#=======================================================================================================================
class NetCommandFactory:
def _thread_to_xml(self, thread):
""" thread information as XML """
name = pydevd_xml.make_valid_xml_value(thread.getName())
cmdText = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread))
return cmdText
def make_error_message(self, seq, text):
cmd = NetCommand(CMD_ERROR, seq, text)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
sys.stderr.write("Error: %s" % (text,))
return cmd
def make_thread_created_message(self, thread):
cmdText = "<xml>" + self._thread_to_xml(thread) + "</xml>"
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_process_created_message(self):
cmdText = '<process/>'
return NetCommand(CMD_PROCESS_CREATED, 0, cmdText)
def make_show_cython_warning_message(self):
try:
return NetCommand(CMD_SHOW_CYTHON_WARNING, 0, '')
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_custom_frame_created_message(self, frameId, frameDescription):
frameDescription = pydevd_xml.make_valid_xml_value(frameDescription)
cmdText = '<xml><thread name="%s" id="%s"/></xml>' % (frameDescription, frameId)
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_list_threads_message(self, seq):
""" returns thread listing as XML """
try:
t = threading.enumerate()
cmd_text = ["<xml>"]
append = cmd_text.append
for i in t:
if t.isAlive():
append(self._thread_to_xml(i))
append("</xml>")
return NetCommand(CMD_RETURN, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_variable_changed_message(self, seq, payload):
# notify debugger that value was changed successfully
return NetCommand(CMD_RETURN, seq, payload)
def make_io_message(self, v, ctx, dbg=None):
'''
@param v: the message to pass to the debug server
@param ctx: 1 for stdio 2 for stderr
@param dbg: If not none, add to the writer
'''
try:
if len(v) > MAX_IO_MSG_SIZE:
v = v[0:MAX_IO_MSG_SIZE]
v += '...'
v = pydevd_xml.make_valid_xml_value(quote(v, '/>_= \t'))
net = NetCommand(str(CMD_WRITE_TO_CONSOLE), 0, '<xml><io s="%s" ctx="%s"/></xml>' % (v, ctx))
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_version_message(self, seq):
try:
return NetCommand(CMD_VERSION, seq, VERSION_STRING)
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_thread_killed_message(self, id):
try:
return NetCommand(CMD_THREAD_KILL, 0, str(id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_suspend_str(self, thread_id, frame, stop_reason, message):
""" <xml>
<thread id="id" stop_reason="reason">
<frame id="id" name="functionName " file="file" line="line">
<var variable stuffff....
</frame>
</thread>
"""
cmd_text_list = ["<xml>"]
append = cmd_text_list.append
make_valid_xml_value = pydevd_xml.make_valid_xml_value
if message:
message = make_valid_xml_value(message)
append('<thread id="%s" stop_reason="%s" message="%s">' % (thread_id, stop_reason, message))
curr_frame = frame
try:
while curr_frame:
#print cmdText
my_id = id(curr_frame)
#print "id is ", my_id
if curr_frame.f_code is None:
break #Iron Python sometimes does not have it!
my_name = curr_frame.f_code.co_name #method name (if in method) or ? if global
if my_name is None:
break #Iron Python sometimes does not have it!
#print "name is ", my_name
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(curr_frame)
myFile = norm_file_to_client(abs_path_real_path_and_base[0])
if file_system_encoding.lower() != "utf-8" and hasattr(myFile, "decode"):
# myFile is a byte string encoded using the file system encoding
# convert it to utf8
myFile = myFile.decode(file_system_encoding).encode("utf-8")
#print "file is ", myFile
#myFile = inspect.getsourcefile(curr_frame) or inspect.getfile(frame)
myLine = str(curr_frame.f_lineno)
#print "line is ", myLine
#the variables are all gotten 'on-demand'
#variables = pydevd_xml.frame_vars_to_xml(curr_frame.f_locals)
variables = ''
append('<frame id="%s" name="%s" ' % (my_id , make_valid_xml_value(my_name)))
append('file="%s" line="%s">' % (quote(myFile, '/>_= \t'), myLine))
append(variables)
append("</frame>")
curr_frame = curr_frame.f_back
except :
traceback.print_exc()
append("</thread></xml>")
return ''.join(cmd_text_list)
def make_thread_suspend_message(self, thread_id, frame, stop_reason, message):
try:
return NetCommand(CMD_THREAD_SUSPEND, 0, self.make_thread_suspend_str(thread_id, frame, stop_reason, message))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_run_message(self, id, reason):
try:
return NetCommand(CMD_THREAD_RUN, 0, str(id) + "\t" + str(reason))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_get_variable_message(self, seq, payload):
try:
return NetCommand(CMD_GET_VARIABLE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_array_message(self, seq, payload):
try:
return NetCommand(CMD_GET_ARRAY, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_description_message(self, seq, payload):
try:
return NetCommand(CMD_GET_DESCRIPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_frame_message(self, seq, payload):
try:
return NetCommand(CMD_GET_FRAME, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_evaluate_expression_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_completions_message(self, seq, payload):
try:
return NetCommand(CMD_GET_COMPLETIONS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_file_contents(self, seq, payload):
try:
return NetCommand(CMD_GET_FILE_CONTENTS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_breakpoint_exception_message(self, seq, payload):
try:
return NetCommand(CMD_GET_BREAKPOINT_EXCEPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_message(self, seq, thread_id, curr_frame_id, exc_type, exc_desc, trace_obj):
try:
while trace_obj.tb_next is not None:
trace_obj = trace_obj.tb_next
exc_type = pydevd_xml.make_valid_xml_value(str(exc_type)).replace('\t', ' ') or 'exception: type unknown'
exc_desc = pydevd_xml.make_valid_xml_value(str(exc_desc)).replace('\t', ' ') or 'exception: no description'
payload = str(curr_frame_id) + '\t' + exc_type + "\t" + exc_desc + "\t" + \
self.make_thread_suspend_str(thread_id, trace_obj.tb_frame, CMD_SEND_CURR_EXCEPTION_TRACE, '')
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_proceeded_message(self, seq, thread_id):
try:
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, 0, str(thread_id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_send_console_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_CONSOLE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_custom_operation_message(self, seq, payload):
try:
return NetCommand(CMD_RUN_CUSTOM_OPERATION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_load_source_message(self, seq, source, dbg=None):
try:
net = NetCommand(CMD_LOAD_SOURCE, seq, '%s' % source)
except:
net = self.make_error_message(0, get_exception_traceback_str())
if dbg:
dbg.writer.add_command(net)
return net
def make_show_console_message(self, thread_id, frame):
try:
return NetCommand(CMD_SHOW_CONSOLE, 0, self.make_thread_suspend_str(thread_id, frame, CMD_SHOW_CONSOLE, ''))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_input_requested_message(self, started):
try:
return NetCommand(CMD_INPUT_REQUESTED, 0, started)
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_exit_message(self):
try:
net = NetCommand(CMD_EXIT, 0, '')
except:
net = self.make_error_message(0, get_exception_traceback_str())
return net
INTERNAL_TERMINATE_THREAD = 1
INTERNAL_SUSPEND_THREAD = 2
#=======================================================================================================================
# InternalThreadCommand
#=======================================================================================================================
class InternalThreadCommand:
""" internal commands are generated/executed by the debugger.
The reason for their existence is that some commands have to be executed
on specific threads. These are the InternalThreadCommands that get
get posted to PyDB.cmdQueue.
"""
def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id)
def do_it(self, dbg):
raise NotImplementedError("you have to override do_it")
class ReloadCodeCommand(InternalThreadCommand):
def __init__(self, module_name, thread_id):
self.thread_id = thread_id
self.module_name = module_name
self.executed = False
self.lock = thread.allocate_lock()
def can_be_executed_by(self, thread_id):
if self.thread_id == '*':
return True #Any thread can execute it!
return InternalThreadCommand.can_be_executed_by(self, thread_id)
def do_it(self, dbg):
self.lock.acquire()
try:
if self.executed:
return
self.executed = True
finally:
self.lock.release()
module_name = self.module_name
if not dict_contains(sys.modules, module_name):
if '.' in module_name:
new_module_name = module_name.split('.')[-1]
if dict_contains(sys.modules, new_module_name):
module_name = new_module_name
if not dict_contains(sys.modules, module_name):
sys.stderr.write('pydev debugger: Unable to find module to reload: "' + module_name + '".\n')
# Too much info...
# sys.stderr.write('pydev debugger: This usually means you are trying to reload the __main__ module (which cannot be reloaded).\n')
else:
sys.stderr.write('pydev debugger: Start reloading module: "' + module_name + '" ... \n')
from _pydevd_bundle import pydevd_reload
if pydevd_reload.xreload(sys.modules[module_name]):
sys.stderr.write('pydev debugger: reload finished\n')
else:
sys.stderr.write('pydev debugger: reload finished without applying any change\n')
#=======================================================================================================================
# InternalTerminateThread
#=======================================================================================================================
class InternalTerminateThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
pydevd_log(1, "killing ", str(self.thread_id))
cmd = dbg.cmd_factory.make_thread_killed_message(self.thread_id)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunThread
#=======================================================================================================================
class InternalRunThread(InternalThreadCommand):
def __init__(self, thread_id):
self.thread_id = thread_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = -1
t.additional_info.pydev_step_stop = None
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalStepThread
#=======================================================================================================================
class InternalStepThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id):
self.thread_id = thread_id
self.cmd_id = cmd_id
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalSetNextStatementThread
#=======================================================================================================================
class InternalSetNextStatementThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id, line, func_name):
self.thread_id = thread_id
self.cmd_id = cmd_id
self.line = line
if IS_PY2:
if isinstance(func_name, unicode):
# On cython with python 2.X it requires an str, not unicode (but on python 3.3 it should be a str, not bytes).
func_name = func_name.encode('utf-8')
self.func_name = func_name
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
t.additional_info.pydev_step_cmd = self.cmd_id
t.additional_info.pydev_next_line = int(self.line)
t.additional_info.pydev_func_name = self.func_name
t.additional_info.pydev_state = STATE_RUN
#=======================================================================================================================
# InternalGetVariable
#=======================================================================================================================
class InternalGetVariable(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attributes = attrs
def do_it(self, dbg):
""" Converts request into python variable """
try:
xml = "<xml>"
_typeName, valDict = pydevd_vars.resolve_compound_variable(self.thread_id, self.frame_id, self.scope, self.attributes)
if valDict is None:
valDict = {}
keys = valDict.keys()
if _typeName != "OrderedDict" and not IS_PY36_OR_GREATER:
if hasattr(keys, 'sort'):
keys.sort(compare_object_attrs) #Python 3.0 does not have it
else:
if IS_PY3K:
keys = sorted(keys, key=cmp_to_key(compare_object_attrs)) #Jython 2.1 does not have it (and all must be compared as strings).
else:
keys = sorted(keys, cmp=compare_object_attrs) #Jython 2.1 does not have it (and all must be compared as strings).
for k in keys:
xml += pydevd_xml.var_to_xml(valDict[k], to_string(k))
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_variable_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving variables " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetArray
#=======================================================================================================================
class InternalGetArray(InternalThreadCommand):
def __init__(self, seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.name = attrs.split("\t")[-1]
self.attrs = attrs
self.roffset = int(roffset)
self.coffset = int(coffset)
self.rows = int(rows)
self.cols = int(cols)
self.format = format
def do_it(self, dbg):
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
var = pydevd_vars.eval_in_context(self.name, frame.f_globals, frame.f_locals)
xml = pydevd_vars.table_like_struct_to_xml(var, self.name, self.roffset, self.coffset, self.rows, self.cols, self.format )
cmd = dbg.cmd_factory.make_get_array_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving array: " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalChangeVariable
#=======================================================================================================================
class InternalChangeVariable(InternalThreadCommand):
""" changes the value of a variable """
def __init__(self, seq, thread_id, frame_id, scope, attr, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attr = attr
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.attr, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_variable_changed_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error changing variable attr:%s expression:%s traceback:%s" % (self.attr, self.expression, get_exception_traceback_str()))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetFrame
#=======================================================================================================================
class InternalGetFrame(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
def do_it(self, dbg):
""" Converts request into python variable """
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
hidden_ns = pydevconsole.get_ipython_hidden_vars()
xml = "<xml>"
xml += pydevd_xml.frame_vars_to_xml(frame.f_locals, hidden_ns)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_frame_message(self.sequence, xml)
dbg.writer.add_command(cmd)
else:
#pydevd_vars.dump_frames(self.thread_id)
#don't print this error: frame not found: means that the client is not synchronized (but that's ok)
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving frame: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateExpression
#=======================================================================================================================
class InternalEvaluateExpression(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression, doExec, doTrim, temp_name):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
self.doExec = doExec
self.doTrim = doTrim
self.temp_name = temp_name
def do_it(self, dbg):
""" Converts request into python variable """
try:
result = pydevd_vars.evaluate_expression(self.thread_id, self.frame_id, self.expression, self.doExec)
if self.temp_name != "":
pydevd_vars.change_attr_expression(self.thread_id, self.frame_id, self.temp_name, self.expression, dbg, result)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, self.expression, self.doTrim)
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetCompletions
#=======================================================================================================================
class InternalGetCompletions(InternalThreadCommand):
""" Gets the completions in a given scope """
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Converts request into completions """
try:
remove_path = None
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
msg = _pydev_completer.generate_completions_as_xml(frame, self.act_tok)
cmd = dbg.cmd_factory.make_get_completions_message(self.sequence, msg)
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "InternalGetCompletions: Frame not found: %s from thread: %s" % (self.frame_id, self.thread_id))
dbg.writer.add_command(cmd)
finally:
if remove_path is not None:
sys.path.remove(remove_path)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
# =======================================================================================================================
# InternalGetDescription
# =======================================================================================================================
class InternalGetDescription(InternalThreadCommand):
""" Fetch the variable description stub from the debug console
"""
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
description = pydevd_console.get_description(frame, self.thread_id, self.frame_id, self.expression)
description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t'))
description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description
cmd = dbg.cmd_factory.make_get_description_message(self.sequence, description_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching description" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalGetBreakpointException
#=======================================================================================================================
class InternalGetBreakpointException(InternalThreadCommand):
""" Send details of exception raised while evaluating conditional breakpoint """
def __init__(self, thread_id, exc_type, stacktrace):
self.sequence = 0
self.thread_id = thread_id
self.stacktrace = stacktrace
self.exc_type = exc_type
def do_it(self, dbg):
try:
callstack = "<xml>"
makeValid = pydevd_xml.make_valid_xml_value
for filename, line, methodname, methodobj in self.stacktrace:
if file_system_encoding.lower() != "utf-8" and hasattr(filename, "decode"):
# filename is a byte string encoded using the file system encoding
# convert it to utf8
filename = filename.decode(file_system_encoding).encode("utf-8")
callstack += '<frame thread_id = "%s" file="%s" line="%s" name="%s" obj="%s" />' \
% (self.thread_id, makeValid(filename), line, makeValid(methodname), makeValid(methodobj))
callstack += "</xml>"
cmd = dbg.cmd_factory.make_send_breakpoint_exception_message(self.sequence, self.exc_type + "\t" + callstack)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Exception: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTrace
#=======================================================================================================================
class InternalSendCurrExceptionTrace(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id, arg, curr_frame_id):
'''
:param arg: exception type, description, traceback object
'''
self.sequence = 0
self.thread_id = thread_id
self.curr_frame_id = curr_frame_id
self.arg = arg
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_message(self.sequence, self.thread_id, self.curr_frame_id, *self.arg)
del self.arg
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalSendCurrExceptionTraceProceeded
#=======================================================================================================================
class InternalSendCurrExceptionTraceProceeded(InternalThreadCommand):
""" Send details of the exception that was caught and where we've broken in.
"""
def __init__(self, thread_id):
self.sequence = 0
self.thread_id = thread_id
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_proceeded_message(self.sequence, self.thread_id)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace Proceeded: " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalEvaluateConsoleExpression
#=======================================================================================================================
class InternalEvaluateConsoleExpression(InternalThreadCommand):
""" Execute the given command in the debug console """
def __init__(self, seq, thread_id, frame_id, line, buffer_output=True):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.line = line
self.buffer_output = buffer_output
def do_it(self, dbg):
""" Create an XML for console output, error and more (true/false)
<xml>
<output message=output_message></output>
<error message=error_message></error>
<more>true/false</more>
</xml>
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
if frame is not None:
console_message = pydevd_console.execute_console_command(
frame, self.thread_id, self.frame_id, self.line, self.buffer_output)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, console_message.to_xml())
else:
from _pydevd_bundle.pydevd_console import ConsoleMessage
console_message = ConsoleMessage()
console_message.add_console_message(
pydevd_console.CONSOLE_ERROR,
"Select the valid frame in the debug view (thread: %s, frame: %s invalid)" % (self.thread_id, self.frame_id),
)
cmd = dbg.cmd_factory.make_error_message(self.sequence, console_message.to_xml())
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalRunCustomOperation
#=======================================================================================================================
class InternalRunCustomOperation(InternalThreadCommand):
""" Run a custom command on an expression
"""
def __init__(self, seq, thread_id, frame_id, scope, attrs, style, encoded_code_or_file, fnname):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attrs = attrs
self.style = style
self.code_or_file = unquote_plus(encoded_code_or_file)
self.fnname = fnname
def do_it(self, dbg):
try:
res = pydevd_vars.custom_operation(self.thread_id, self.frame_id, self.scope, self.attrs,
self.style, self.code_or_file, self.fnname)
resEncoded = quote_plus(res)
cmd = dbg.cmd_factory.make_custom_operation_message(self.sequence, resEncoded)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in running custom operation" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleGetCompletions
#=======================================================================================================================
class InternalConsoleGetCompletions(InternalThreadCommand):
""" Fetch the completions in the debug console
"""
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
""" Get completions and write back to the client
"""
try:
frame = pydevd_vars.find_frame(self.thread_id, self.frame_id)
completions_xml = pydevd_console.get_completions(frame, self.act_tok)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, completions_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching completions" + exc)
dbg.writer.add_command(cmd)
#=======================================================================================================================
# InternalConsoleExec
#=======================================================================================================================
class InternalConsoleExec(InternalThreadCommand):
""" gets the value of a variable """
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
""" Converts request into python variable """
try:
try:
#don't trace new threads created by console command
disable_trace_thread_modules()
result = pydevconsole.console_exec(self.thread_id, self.frame_id, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating console expression " + exc)
dbg.writer.add_command(cmd)
finally:
enable_trace_thread_modules()
sys.stderr.flush()
sys.stdout.flush()
#=======================================================================================================================
# pydevd_find_thread_by_id
#=======================================================================================================================
def pydevd_find_thread_by_id(thread_id):
try:
# there was a deadlock here when I did not remove the tracing function when thread was dead
threads = threading.enumerate()
for i in threads:
tid = get_thread_id(i)
if thread_id == tid or thread_id.endswith('|' + tid):
return i
sys.stderr.write("Could not find thread %s\n" % thread_id)
sys.stderr.write("Available: %s\n" % [get_thread_id(t) for t in threads])
sys.stderr.flush()
except:
traceback.print_exc()
return None
| apache-2.0 | 896,342,124,411,002,400 | 40.837689 | 191 | 0.513513 | false |
suutari-ai/shoop | shuup/core/api/users.py | 3 | 1754 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from django_filters.rest_framework import DjangoFilterBackend, FilterSet
from rest_framework.serializers import ModelSerializer
from rest_framework.viewsets import ModelViewSet
from shuup.api.mixins import PermissionHelperMixin
class UserSerializer(ModelSerializer):
class Meta:
fields = "__all__"
model = get_user_model()
fields = "__all__"
class UserFilter(FilterSet):
class Meta:
model = get_user_model()
fields = ['email']
class UserViewSet(PermissionHelperMixin, ModelViewSet):
"""
retrieve: Fetches a user by its ID.
list: Lists all users.
delete: Deletes an user.
If the object is related to another one and the relationship is protected, an error will be returned.
create: Creates a new user.
update: Fully updates an existing user.
You must specify all parameters to make it possible to overwrite all attributes.
partial_update: Updates an existing user.
You can update only a set of attributes.
"""
queryset = get_user_model().objects.all()
serializer_class = UserSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = UserFilter
def get_view_name(self):
return _("Users")
@classmethod
def get_help_text(cls):
return _("Users can be listed, fetched, created, updated and deleted.")
| agpl-3.0 | 8,015,017,041,828,015,000 | 27.754098 | 105 | 0.706956 | false |
m0mik/gr-osmosdr | grc/gen_osmosdr_blocks.py | 1 | 11905 | """
Copyright 2012 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
MAIN_TMPL = """\
<?xml version="1.0"?>
<block>
<name>$(title) $sourk.title()</name>
<key>$(prefix)_$(sourk)</key>
<category>$($sourk.title())s</category>
<throttle>1</throttle>
<import>import osmosdr</import>
<make>osmosdr.$(sourk)( args="numchan=" + str(\$nchan) + " " + \$args )
self.\$(id).set_sample_rate(\$sample_rate)
#for $n in range($max_nchan)
\#if \$nchan() > $n
self.\$(id).set_center_freq(\$freq$(n), $n)
self.\$(id).set_freq_corr(\$corr$(n), $n)
#if $sourk == 'source':
self.\$(id).set_dc_offset_mode(\$dc_offset_mode$(n), $n)
self.\$(id).set_iq_balance_mode(\$iq_balance_mode$(n), $n)
self.\$(id).set_gain_mode(\$gain_mode$(n), $n)
#end if
self.\$(id).set_gain(\$gain$(n), $n)
self.\$(id).set_if_gain(\$if_gain$(n), $n)
self.\$(id).set_bb_gain(\$bb_gain$(n), $n)
self.\$(id).set_antenna(\$ant$(n), $n)
self.\$(id).set_bandwidth(\$bw$(n), $n)
\#end if
#end for
</make>
<callback>set_sample_rate(\$sample_rate)</callback>
#for $n in range($max_nchan)
<callback>set_center_freq(\$freq$(n), $n)</callback>
<callback>set_freq_corr(\$corr$(n), $n)</callback>
#if $sourk == 'source':
<callback>set_dc_offset_mode(\$dc_offset_mode$(n), $n)</callback>
<callback>set_iq_balance_mode(\$iq_balance_mode$(n), $n)</callback>
<callback>set_gain_mode(\$gain_mode$(n), $n)</callback>
#end if
<callback>set_gain(\$gain$(n), $n)</callback>
<callback>set_if_gain(\$if_gain$(n), $n)</callback>
<callback>set_bb_gain(\$bb_gain$(n), $n)</callback>
<callback>set_antenna(\$ant$(n), $n)</callback>
<callback>set_bandwidth(\$bw$(n), $n)</callback>
#end for
<param>
<name>$(dir.title())put Type</name>
<key>type</key>
<type>enum</type>
<option>
<name>Complex float32</name>
<key>fc32</key>
<opt>type:fc32</opt>
</option>
</param>
<param>
<name>Device Arguments</name>
<key>args</key>
<value></value>
<type>string</type>
<hide>
\#if \$args()
none
\#else
part
\#end if
</hide>
</param>
<param>
<name>Num Channels</name>
<key>nchan</key>
<value>1</value>
<type>int</type>
#for $n in range(1, $max_nchan+1)
<option>
<name>$(n)</name>
<key>$n</key>
</option>
#end for
</param>
<param>
<name>Sample Rate (sps)</name>
<key>sample_rate</key>
<value>samp_rate</value>
<type>real</type>
</param>
$params
<check>$max_nchan >= \$nchan</check>
<check>\$nchan > 0</check>
<$sourk>
<name>$dir</name>
<type>\$type.type</type>
<nports>\$nchan</nports>
</$sourk>
<doc>
The osmocom $sourk block:
While primarily being developed for the OsmoSDR hardware, this block as well supports:
#if $sourk == 'source':
* FUNcube Dongle through libgnuradio-fcd
* FUNcube Dongle Pro+ through gr-fcdproplus
* sysmocom OsmoSDR Devices through libosmosdr
* RTL2832U based DVB-T dongles through librtlsdr
* RTL-TCP spectrum server (see librtlsdr project)
* MSi2500 based DVB-T dongles through libmirisdr
* gnuradio .cfile input through libgnuradio-blocks
* RFSPACE SDR-IQ, SDR-IP, NetSDR (incl. X2 option)
* AirSpy Wideband Receiver through libairspy
#end if
* Great Scott Gadgets HackRF through libhackrf
* Nuand LLC bladeRF through libbladeRF library
* Ettus USRP Devices through Ettus UHD library
* Fairwaves UmTRX through Fairwaves' fork of UHD
By using the osmocom $sourk block you can take advantage of a common software api in your application(s) independent of the underlying radio hardware.
Output Type:
This parameter controls the data type of the stream in gnuradio. Only complex float32 samples are supported at the moment.
Device Arguments:
The device argument is a comma delimited string used to locate devices on your system. Device arguments for multiple devices may be given by separating them with a space.
Use the device id or name/serial (if applicable) to specify a certain device or list of devices. If left blank, the first device found will be used.
Examples:
Optional arguments are placed into [] brackets, remove the brackets before using them! Specific variable values are separated with a |, choose one of them. Variable values containing spaces shall be enclosed in '' as demonstrated in examples section below.
Lines ending with ... mean it's possible to bind devices together by specifying multiple device arguments separated with a space.
#if $sourk == 'source':
fcd=0[,device=hw:2][,type=2]
miri=0[,buffers=32] ...
rtl=serial_number ...
rtl=0[,rtl_xtal=28.8e6][,tuner_xtal=28.8e6] ...
rtl=1[,buffers=32][,buflen=N*512] ...
rtl=2[,direct_samp=0|1|2][,offset_tune=0|1] ...
rtl_tcp=127.0.0.1:1234[,psize=16384][,direct_samp=0|1|2][,offset_tune=0|1] ...
osmosdr=0[,buffers=32][,buflen=N*512] ...
file='/path/to/your file',rate=1e6[,freq=100e6][,repeat=true][,throttle=true] ...
netsdr=127.0.0.1[:50000][,nchan=2]
sdr-ip=127.0.0.1[:50000]
sdr-iq=/dev/ttyUSB0
airspy=0
#end if
hackrf=0[,buffers=32]
bladerf=0[,fpga='/path/to/the/bitstream.rbf']
uhd[,serial=...][,lo_offset=0][,mcr=52e6][,nchan=2][,subdev='\\\\'B:0 A:0\\\\''] ...
Num Channels:
Selects the total number of channels in this multi-device configuration. Required when specifying multiple device arguments.
Sample Rate:
The sample rate is the number of samples per second output by this block on each channel.
Frequency:
The center frequency is the frequency the RF chain is tuned to.
Freq. Corr.:
The frequency correction factor in parts per million (ppm). Set to 0 if unknown.
#if $sourk == 'source':
DC Offset Mode:
Controls the behavior of hardware DC offset corrrection.
Off: Disable correction algorithm (pass through).
Manual: Keep last estimated correction when switched from Automatic to Manual.
Automatic: Periodicallly find the best solution to compensate for DC offset.
This functionality is available for USRP devices only.
IQ Balance Mode:
Controls the behavior of software IQ imbalance corrrection.
Off: Disable correction algorithm (pass through).
Manual: Keep last estimated correction when switched from Automatic to Manual.
Automatic: Periodicallly find the best solution to compensate for image signals.
This functionality depends on http://cgit.osmocom.org/cgit/gr-iqbal/
Gain Mode:
Chooses between the manual (default) and automatic gain mode where appropriate.
To allow manual control of RF/IF/BB gain stages, manual gain mode must be configured.
Currently, only RTL-SDR devices support automatic gain mode.
#end if
RF Gain:
Overall RF gain of the device.
IF Gain:
Overall intermediate frequency gain of the device.
This setting is available for RTL-SDR and OsmoSDR devices with E4000 tuners and HackRF Jawbreaker in receive and transmit mode. Observations lead to a reasonable gain range from 15 to 30dB.
BB Gain:
Overall baseband gain of the device.
This setting is available for HackRF Jawbreaker in receive mode. Observations lead to a reasonable gain range from 15 to 30dB.
Antenna:
For devices with only one antenna, this may be left blank.
Otherwise, the user should specify one of the possible antenna choices.
Bandwidth:
Set the bandpass filter on the radio frontend. To use the default (automatic) bandwidth filter setting, this should be zero.
See the OsmoSDR project page for more detailed documentation:
http://sdr.osmocom.org/trac/wiki/GrOsmoSDR
http://sdr.osmocom.org/trac/wiki/rtl-sdr
http://sdr.osmocom.org/trac/
</doc>
</block>
"""
PARAMS_TMPL = """
<param>
<name>Ch$(n): Frequency (Hz)</name>
<key>freq$(n)</key>
<value>100e6</value>
<type>real</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
</param>
<param>
<name>Ch$(n): Freq. Corr. (ppm)</name>
<key>corr$(n)</key>
<value>0</value>
<type>real</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
</param>
#if $sourk == 'source':
<param>
<name>Ch$(n): DC Offset Mode</name>
<key>dc_offset_mode$(n)</key>
<value>0</value>
<type>int</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
<option>
<name>Off</name>
<key>0</key>
</option>
<option>
<name>Manual</name>
<key>1</key>
</option>
<option>
<name>Automatic</name>
<key>2</key>
</option>
</param>
<param>
<name>Ch$(n): IQ Balance Mode</name>
<key>iq_balance_mode$(n)</key>
<value>0</value>
<type>int</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
<option>
<name>Off</name>
<key>0</key>
</option>
<option>
<name>Manual</name>
<key>1</key>
</option>
<option>
<name>Automatic</name>
<key>2</key>
</option>
</param>
<param>
<name>Ch$(n): Gain Mode</name>
<key>gain_mode$(n)</key>
<value>0</value>
<type>int</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
<option>
<name>Manual</name>
<key>0</key>
</option>
<option>
<name>Automatic</name>
<key>1</key>
</option>
</param>
#end if
<param>
<name>Ch$(n): RF Gain (dB)</name>
<key>gain$(n)</key>
<value>10</value>
<type>real</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
</param>
<param>
<name>Ch$(n): IF Gain (dB)</name>
<key>if_gain$(n)</key>
<value>20</value>
<type>real</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
</param>
<param>
<name>Ch$(n): BB Gain (dB)</name>
<key>bb_gain$(n)</key>
<value>20</value>
<type>real</type>
<hide>\#if \$nchan() > $n then 'none' else 'all'#</hide>
</param>
<param>
<name>Ch$(n): Antenna</name>
<key>ant$(n)</key>
<value></value>
<type>string</type>
<hide>
\#if not \$nchan() > $n
all
\#elif \$ant$(n)()
none
\#else
part
\#end if
</hide>
</param>
<param>
<name>Ch$(n): Bandwidth (Hz)</name>
<key>bw$(n)</key>
<value>0</value>
<type>real</type>
<hide>
\#if not \$nchan() > $n
all
\#elif \$bw$(n)()
none
\#else
part
\#end if
</hide>
</param>
"""
def parse_tmpl(_tmpl, **kwargs):
from Cheetah import Template
return str(Template.Template(_tmpl, kwargs))
max_num_channels = 5
import os.path
if __name__ == '__main__':
import sys
for file in sys.argv[1:]:
head, tail = os.path.split(file)
if tail.startswith('rtlsdr'):
title = 'RTL-SDR'
prefix = 'rtlsdr'
elif tail.startswith('osmosdr'):
title = 'osmocom'
prefix = 'osmosdr'
else: raise Exception, 'file %s has wrong syntax!'%tail
if tail.endswith ('source.xml'):
sourk = 'source'
dir = 'out'
elif tail.endswith ('sink.xml'):
sourk = 'sink'
dir = 'in'
else: raise Exception, 'is %s a source or sink?'%file
params = ''.join([parse_tmpl(PARAMS_TMPL, n=n, sourk=sourk) for n in range(max_num_channels)])
open(file, 'w').write(parse_tmpl(MAIN_TMPL,
max_nchan=max_num_channels,
params=params,
title=title,
prefix=prefix,
sourk=sourk,
dir=dir,
))
| gpl-3.0 | -936,261,014,248,224,000 | 29.762274 | 256 | 0.646031 | false |
ptonini/Stratus | lib/classes.py | 1 | 7366 | from time import sleep
__author__ = 'ptonini'
import re
import os
import sys
import time
from mutagen.mp3 import MP3
from mutagen.easyid3 import EasyID3
class Tracks:
def __init__(self, source):
if isinstance(source, dict):
self.__dict__.update(source)
elif isinstance(source, list):
full_filename = source[0] + source[1]
try:
os.path.isfile(full_filename)
audio = MP3(full_filename)
tag = EasyID3(full_filename)
except Exception:
print 'Invalid file', full_filename
else:
self.path = source[0]
self.filename = source[1]
self.full_filename = full_filename
self.timestamp = int(os.path.getmtime(self.full_filename))
self.length = audio.info.length
if 'genre' in tag:
self.genre = tag['genre'][0]
if 'artist' in tag:
self.artist = tag['artist'][0]
if 'performer' in tag:
self.album_artist = tag['performer'][0]
if 'album' in tag:
self.album = tag['album'][0]
if "date" in tag:
self.year = tag['date'][0]
if 'tracknumber' in tag:
self.track_num = tag['tracknumber'][0]
if 'title' in tag:
self.title = tag['title'][0]
if 'discnumber' in tag:
self.disc_num = tag['discnumber'][0]
else:
self.disc_num = "1"
def update_db(self, db):
if hasattr(self, '_id'):
db.tracks.update({'_id': self._id}, self.__dict__)
print 'Updated to DB:', self.filename
else:
track_count = db.tracks.find({'filename': self.filename}).count()
if track_count == 0:
db.tracks.insert(self.__dict__)
print 'Added to DB:', self.filename
elif track_count > 1:
print 'Error: duplicate tracks on database:', self.filename
def upload_to_gmusic(self, mm):
if not hasattr(self, 'gmusic_id'):
r = mm.upload(self.full_filename, enable_matching=True)
if not r[0] == {}:
self.gmusic_id = r[0][self.full_filename]
print 'Uploaded:', self.filename
elif not r[1] == {}:
self.gmusic_id = r[1][self.full_filename]
print 'Matched: ', self.filename
elif not r[2] == {}:
if 'TrackSampleResponse code 4' in r[2][self.full_filename]:
self.gmusic_id = re.search("\((.*)\)", str(r[2][self.full_filename])).group(1)
print 'Exists: ', self.filename
else:
print 'Error: could no upload or match', self.filename
class Playlists:
def __init__(self, source, db=None, playlists_home=None):
if isinstance(source, dict):
if 'id' in source:
self.full_filename = playlists_home + '/' + source['name'].encode('utf-8') + '.m3u'
self.name = source['name']
self.timestamp = int(int(source['lastModifiedTimestamp'])/1000000)
self.tracks = list()
print self.name
for track in source['tracks']:
self.tracks.append(db.tracks.find_one({'gmusic_id': track['trackId']})['_id'])
self.gmusic_id = source['id']
else:
self.__dict__.update(source)
elif isinstance(source, list):
self.full_filename = os.path.join(source[0], source[1])
self.name = source[1][:-4]
self.timestamp = int(os.path.getmtime(self.full_filename))
with open(self.full_filename, 'r+') as file:
self.tracks = list()
for line in file.readlines():
if line != '\n':
self.tracks.append(db.tracks.find_one({'filename': line[:-1]})['_id'])
def update_db(self, db):
if hasattr(self, '_id'):
print 'Updating playlist "' + self.name + '" on database'
self.__find_one_and_update_db(db, {'_id': self._id})
else:
count = db.playlists.find({'name': self.name}).count()
if count == 0:
print 'Adding playlist "' + self.name + '" to database.'
db.playlists.insert(self.__dict__)
elif count == 1:
print 'Updating playlist "' + self.name + '" on database'
self.__find_one_and_update_db(db, {'name': self.name})
else:
print 'Error: duplicate playlists on database:', self.name
def update_gmusic(self, db, mc, gm_playlists):
if hasattr(self, 'gmusic_id'):
for gm_playlist in gm_playlists:
if self.gmusic_id == gm_playlist['id']:
self.__find_most_recent_and_update_gmusic(db, mc, gm_playlist)
matched_gmusic_id = True
break
if not matched_gmusic_id:
print 'Error - could not match gmusic_id:', self.name
else:
matched_lists = list()
for gm_playlist in gm_playlists:
if self.name == gm_playlist['name']:
matched_lists.append(gm_playlist)
if len(matched_lists) == 0:
self.gmusic_id = mc.create_playlist(self.name)
self.__build_list_and_update_gmusic(db, mc)
elif len(matched_lists) == 1:
self.gmusic_id = matched_lists[0]['id']
self.__find_most_recent_and_update_gmusic(db, mc, matched_lists[0])
else:
print 'Error - duplicate playlists on gmusic:', matched_lists[0]['name']
def __find_one_and_update_db(self, db, criteria):
playlist = db.playlists.find_one(criteria)
if self.timestamp < playlist['timestamp']:
self.tracks = playlist['tracks']
db.playlists.update(criteria, self.__dict__)
def __build_list_and_update_gmusic(self, db, mc):
new_list = list()
for track_id in self.tracks:
new_list.append(db.tracks.find_one({'_id': track_id})['gmusic_id'])
try:
mc.add_songs_to_playlist(self.gmusic_id, new_list)
except:
print 'Error'
sys.exit(1)
def __find_most_recent_and_update_gmusic(self, db, mc, gm_playlist):
gm_timestamp = int(gm_playlist['lastModifiedTimestamp'])/1000000
if self.timestamp > gm_timestamp:
old_list = list()
for entry in gm_playlist['tracks']:
old_list.append(entry['id'])
print 'Updating playlist "' + self.name + '"',
mc.remove_entries_from_playlist(old_list)
time.sleep(len(old_list)/90 )
self.__build_list_and_update_gmusic(db, mc)
print ' finished'
else:
self.timestamp = gm_timestamp
track_list = list()
for track in gm_playlist['tracks']:
track_list.append(db.tracks.find_one({'gmusic_id': track['trackId']})['_id'])
self.tracks = track_list
| unlicense | -336,839,685,327,072,450 | 40.150838 | 99 | 0.509503 | false |
2gis/vmmaster | vmpool/app.py | 1 | 1295 | # coding: utf-8
import logging
from flask import Flask
from core.config import config
from core.utils import JSONEncoder
log = logging.getLogger(__name__)
class Provider(Flask):
def __init__(self, *args, **kwargs):
from core.db import Database
from core.sessions import Sessions
from vmpool.virtual_machines_pool import VirtualMachinesPool
super(Provider, self).__init__(*args, **kwargs)
self.running = True
self.json_encoder = JSONEncoder
self.database = Database()
self.sessions = Sessions(self.database, self.app_context)
self.pool = VirtualMachinesPool(app=self, name=config.PROVIDER_NAME)
self.pool.start_workers()
def cleanup(self):
try:
log.info("Cleanup...")
self.pool.stop_workers()
log.info("Cleanup was done")
except:
log.exception("Cleanup was finished with errors")
def stop(self):
self.running = False
def register_blueprints(app):
from vmpool.api import api
app.register_blueprint(api, url_prefix='/api')
def create_app():
if config is None:
raise Exception("Need to setup config.py in application directory")
app = Provider(__name__)
register_blueprints(app)
return app
| mit | -5,654,950,707,962,474,000 | 25.428571 | 76 | 0.644015 | false |
bitforks/drawbot | drawBot/ui/codeEditor.py | 1 | 49280 | import AppKit
import objc
from keyword import kwlist
import re
from pygments.lexers import PythonLexer, get_lexer_by_name
from pygments.token import *
from pygments.style import Style
from pygments.styles.default import DefaultStyle
try:
import jedi
hasJedi = True
except:
hasJedi = False
from vanilla import *
from lineNumberRulerView import NSLineNumberRuler
from drawBot.misc import getDefault, getFontDefault, getColorDefault, DrawBotError
from drawBot.drawBotDrawingTools import _drawBotDrawingTool
variableChars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_"
fallbackTextColor = AppKit.NSColor.blackColor()
fallbackBackgroundColor = AppKit.NSColor.whiteColor()
fallbackHightLightColor = AppKit.NSColor.selectedTextBackgroundColor()
fallbackFont = AppKit.NSFont.fontWithName_size_("Menlo", 10)
if not fallbackFont:
fallbackFont = AppKit.NSFont.fontWithName_size_("Monaco", 10)
basicLineHeightMultiple = 1.2
basicParagraph = AppKit.NSMutableParagraphStyle.alloc().init()
basicParagraph.setDefaultTabInterval_(28.0)
basicParagraph.setTabStops_(AppKit.NSArray.array())
basicParagraph.setLineHeightMultiple_(basicLineHeightMultiple)
fallbackTypeAttributes = {
AppKit.NSFontAttributeName: fallbackFont,
AppKit.NSLigatureAttributeName: 0,
AppKit.NSParagraphStyleAttributeName: basicParagraph
}
fallbackTracebackAttributes = dict(fallbackTypeAttributes)
fallbackTracebackAttributes[AppKit.NSForegroundColorAttributeName] = AppKit.NSColor.redColor()
fallbackStyles = [
(Token, '#000000'),
(Text, ''),
(Error, '#FF0000'),
(Punctuation, '#4C4C4C'),
(Keyword, '#4978FC'),
(Keyword.Namespace, '#1950FD'),
(Number, '#CC5858'),
(Number.Float, ''),
(Number.Oct, ''),
(Number.Hex, ''),
(Name, ''),
(Name.Tag, '#fb660a'),
(Name.Variable, '#fb660a'),
(Name.Attribute, '#ff0086'),
(Name.Function, '#ff0086'),
(Name.Class, '#ff0086'),
(Name.Constant, '#0086d2'),
(Name.Namespace, ''),
(Name.Builtin, '#31A73E'),
(Name.Builtin.Pseudo, '#FF8700'),
(Name.Exception, '#FF1400'),
(Name.Decorator, ''),
(Operator, '#6D37C9'),
(Operator.Word, '#6D37C9'),
(Comment, '#A3A3A3'),
(String, '#FC00E7'),
(String.Doc, '#FC00E7'),
]
fallbackStyleDict = {}
for key, value in fallbackStyles:
fallbackStyleDict[str(key)] = value
def styleFromDefault():
styles = dict()
tokens = getDefault("PyDETokenColors", fallbackStyleDict)
for key, value in tokens.items():
token = string_to_tokentype(key)
if value and not value.startswith("#"):
value = "#%s" % value
styles[token] = value
style = type('DrawBotStyle', (Style,), dict(styles=styles))
style.background_color = _NSColorToHexString(getColorDefault("PyDEBackgroundColor", fallbackBackgroundColor))
style.highlight_color = _NSColorToHexString(getColorDefault("PyDEHightLightColor", fallbackHightLightColor))
return style
def outputTextAttributesForStyles(styles=None, isError=False):
if styles is None:
styles = styleFromDefault()
if isError:
style = styles.style_for_token(Error)
else:
style = styles.style_for_token(Token)
attr = _textAttributesForStyle(style)
for key in (AppKit.NSForegroundColorAttributeName, AppKit.NSUnderlineColorAttributeName):
if key in attr:
attr[key] = _hexToNSColor(attr[key])
return attr
class _JumpToLineSheet(object):
def __init__(self, callback, parentWindow):
self._callback = callback
self.w = Sheet((210, 80), parentWindow=parentWindow)
self.w.text = TextBox((15, 15, 200, 22), "Jump to line number:")
self.w.lineNumber = EditText((-55, 17, -15, 18), sizeStyle="small")
self.w.cancelButton = Button((-170, -30, -80, 20), "Cancel", callback=self.cancelCallback, sizeStyle="small")
self.w.cancelButton.bind(".", ["command"])
self.w.cancelButton.bind(unichr(27), [])
self.w.okButton = Button((-70, -30, -10, 20), "OK", callback=self.okCallback, sizeStyle="small")
self.w.setDefaultButton(self.w.okButton)
self.w.open()
def okCallback(self, sender):
value = self.w.lineNumber.get()
try:
value = int(value.strip())
except:
value = None
self._callback(value)
self.closeCallback(sender)
def cancelCallback(self, sender):
self._callback(None)
self.closeCallback(sender)
def closeCallback(self, sender):
self.w.close()
def _hexToNSColor(color, default=AppKit.NSColor.blackColor()):
if color is None:
return default
if len(color) != 6:
return default
r = int(color[0:2], 16) / 255.
g = int(color[2:4], 16) / 255.
b = int(color[4:6], 16) / 255.
return AppKit.NSColor.colorWithCalibratedRed_green_blue_alpha_(r, g, b, 1)
def _hexStringToNSColor(txt, default=AppKit.NSColor.blackColor()):
if not txt.startswith("#"):
raise DrawBotError("Not a hex color, should start with '#'")
return _hexToNSColor(txt[1:], default)
def _NSColorToHexString(color):
color = color.colorUsingColorSpaceName_(AppKit.NSCalibratedRGBColorSpace)
r = color.redComponent() * 255
g = color.greenComponent() * 255
b = color.blueComponent() * 255
return "#%02X%02X%02X" % (r, g, b)
def _reverseMap(sourceMap):
destMap = dict()
for key, item in sourceMap.items():
destMap[item] = key
return destMap
_textAttributesForStyleCache = {}
def _clearTextAttributesForStyleCache():
_textAttributesForStyleCache.clear()
def _textAttributesForStyle(style, font=None, token=None):
if font is None:
font = getFontDefault("PyDEFont", fallbackFont)
if token and token in _textAttributesForStyleCache:
return _textAttributesForStyleCache[token]
attr = {
AppKit.NSLigatureAttributeName: 0,
AppKit.NSParagraphStyleAttributeName: basicParagraph,
}
if style.get("italic", False) and style.get("bold", False):
fontManager = AppKit.NSFontManager.sharedFontManager()
boldItalic = fontManager.convertFont_toHaveTrait_(font, AppKit.NSBoldFontMask | AppKit.NSItalicFontMask)
if boldItalic is not None:
font = boldItalic
elif style.get("italic", False):
fontManager = AppKit.NSFontManager.sharedFontManager()
italic = fontManager.convertFont_toHaveTrait_(font, AppKit.NSItalicFontMask)
if italic is not None:
font = italic
elif style.get("bold", False):
fontManager = AppKit.NSFontManager.sharedFontManager()
bold = fontManager.convertFont_toHaveTrait_(font, AppKit.NSBoldFontMask)
if bold is not None:
font = bold
attr[AppKit.NSFontAttributeName] = font
if style.get("color", False):
attr[AppKit.NSForegroundColorAttributeName] = style["color"]
if style.get("bgcolor", False):
attr[AppKit.NSBackgroundColorAttributeName] = style["bgcolor"]
if style.get("underline", False):
attr[AppKit.NSUnderlineStyleAttributeName] = AppKit.NSUnderlineStyleSingle
if style["color"]:
attr[AppKit.NSUnderlineColorAttributeName] = style["color"]
if token:
_textAttributesForStyleCache[token] = attr
return attr
_multiLineRE = re.compile(
r"(\'\'\'|\"\"\"|/\*|<!--)"
r".*?"
r"(\'\'\'|\"\"\"|\*/|--!>)"
, re.DOTALL
)
_whiteSpaceRE = re.compile(r"[ \t]+")
def _findWhitespace(s, pos=0):
m = _whiteSpaceRE.match(s, pos)
if m is None:
return pos
return m.end()
def _pythonWordCompletions(text, charRange):
if not hasJedi:
return [], 0
partialString = text.substringWithRange_(charRange)
keyWords = list(_drawBotDrawingTool.__all__)
try:
lines = text[:charRange.location].count("\n") + 1
if len(text) == charRange.location:
columns = None
else:
columns = 0
if text:
while text[charRange.location-columns] != "\n":
columns += 1
script = jedi.api.Script(source=text, line=lines, column=columns)
keyWords += [c.word for c in script.complete()]
except:
pass
keyWords = [word for word in sorted(keyWords) if word.startswith(partialString)]
return keyWords, 0
languagesIDEBehavior = {
"Python": {
"openToCloseMap": {"(": ")", "[": "]", "{": "}", "<": ">"},
"indentWithEndOfLine": [":", "(", "[", "{"],
"comment": "#",
"keywords": kwlist,
"wordCompletions": _pythonWordCompletions,
"dropPathFormatting": 'u"%s"',
"dropPathsFormatting": '[%s]',
"dropPathsSeperator": ", "
},
}
downArrowSelectionDirection = 0
upArrowSelectionDirection = 1
class CodeNSTextView(AppKit.NSTextView):
jumpToLineWindowClass = _JumpToLineSheet
def init(self):
self = super(CodeNSTextView, self).init()
self._highlightStyle = DefaultStyle
self._languagesIDEBehavior = dict()
self._fallbackTextColor = fallbackTextColor
self._fallbackBackgroundColor = fallbackBackgroundColor
self._fallbackHightLightColor = fallbackHightLightColor
self._fallbackFont = fallbackFont
self.setTypingAttributes_(_textAttributesForStyle(dict(color=self._fallbackTextColor)))
self.setUsesFontPanel_(False)
self.setRichText_(False)
self.setAllowsUndo_(True)
try:
self.setUsesFindBar_(True)
except:
self.setUsesFindPanel_(True)
self._usesTabs = False
self._indentSize = 4
self._ignoreProcessEditing = False
self._lexer = None
self.highlightStyleMap = dict()
nc = AppKit.NSNotificationCenter.defaultCenter()
nc.addObserver_selector_name_object_(self, "userDefaultChanged:", "drawBotUserDefaultChanged", None)
self._arrowSelectionDirection = None
self._canDrag = False
self._liveCoding = False
return self
def __del__(self):
nc = AppKit.NSNotificationCenter.defaultCenter()
nc.removeObserver_(self)
def setLexer_(self, lexer):
if lexer is None:
raise "Cannot set a None type for lexer, must be a subclass of pygment Lexer."
self._lexer = lexer
if self.window():
self.resetHighLightSyntax()
def lexer(self):
return self._lexer
def setHighlightStyle_(self, style):
self._highlightStyle = style
self._buildhighlightStyleMap()
if self.window():
self.resetHighLightSyntax()
def highlightStyle(self):
return self._highlightStyle
def setLanguagesIDEBehavior_(self, languagesIDEBehavior):
self._languagesIDEBehavior = languagesIDEBehavior
def languagesIDEBehavior(self):
return self._languagesIDEBehavior
def languagesIDEBehaviorForLanguage_(self, language):
return self._languagesIDEBehavior.get(language)
def _buildhighlightStyleMap(self):
# cache all tokens with nscolors
styles = self.highlightStyle()
backgroundColor = _hexStringToNSColor(styles.background_color, self._fallbackBackgroundColor)
self.setBackgroundColor_(backgroundColor)
selectionColor = _hexStringToNSColor(styles.highlight_color, self._fallbackHightLightColor)
self.setSelectedTextAttributes_({AppKit.NSBackgroundColorAttributeName: selectionColor})
self.highlightStyleMap = dict()
for token, style in styles:
for key in "color", "bgcolor", "border":
style[key] = _hexToNSColor(style[key], None)
self.highlightStyleMap[token] = style
def setUsesTabs_(self, usesTabs):
oldIndent = self.indent()
self._usesTabs = usesTabs
newIndent = self.indent()
string = self.string()
string = string.replace(oldIndent, newIndent)
self.setString_(string)
def usesTabs(self):
return self._usesTabs
def setIndentSize_(self, size):
oldIndent = oldIndent = self.indent()
self._indentSize = size
newIndent = self.indent()
if not self.usesTabs():
string = self.string()
string = string.replace(oldIndent, newIndent)
self.setString_(string)
def indentSize(self):
return self._indentSize
def indent(self):
if self.usesTabs():
return "\t"
else:
return " " * self.indentSize()
# overwritting NSTextView methods
def setBackgroundColor_(self, color):
# invert the insertioin pointer color
# and the fallback text color and background color
color = color.colorUsingColorSpaceName_(AppKit.NSCalibratedRGBColorSpace)
r = color.redComponent()
g = color.greenComponent()
b = color.blueComponent()
s = sum([r, g, b]) / 3.
inverseColor = s < .6
if inverseColor:
self._fallbackBackgroundColor = AppKit.NSColor.blackColor()
self._fallbackTextColor = AppKit.NSColor.whiteColor()
self.setInsertionPointColor_(AppKit.NSColor.whiteColor())
else:
self._fallbackBackgroundColor = AppKit.NSColor.whiteColor()
self._fallbackTextColor = AppKit.NSColor.blackColor()
self.setInsertionPointColor_(AppKit.NSColor.blackColor())
if self.enclosingScrollView():
self.enclosingScrollView().setBackgroundColor_(color)
self._updateRulersColors()
super(CodeNSTextView, self).setBackgroundColor_(color)
def changeColor_(self, color):
# prevent external color overwrite,
pass
def changeAttributes_(self, attr):
# prevent external attributes overwrite
pass
def smartInsertDeleteEnabled(self):
return False
def isAutomaticTextReplacementEnabled(self):
return True
def isAutomaticQuoteSubstitutionEnabled(self):
return False
def isAutomaticLinkDetectionEnabled(self):
return False
def isAutomaticDataDetectionEnabled(self):
return False
def isAutomaticDashSubstitutionEnabled(self):
return False
def isAutomaticSpellingCorrectionEnabled(self):
return False
# hightlighting
def resetHighLightSyntax(self):
self._ignoreProcessEditing = True
self._highlightSyntax(0, self.string())
self._ignoreProcessEditing = False
def _highlightSyntax(self, location, text):
if self.lexer() is None:
return
font = getFontDefault("PyDEFont", self._fallbackFont)
length = len(self.string())
setAttrs = self.textStorage().setAttributes_range_
if text.endswith("\n"):
text = text[:-1]
# setAttrs = self.layoutManager().addTemporaryAttributes_forCharacterRange_
self.textStorage().beginEditing()
totLenValue = 0
for pos, token, value in self.lexer().get_tokens_unprocessed(text):
style = self.highlightStyleMap.get(token)
lenValue = len(value)
if location + pos + lenValue > length:
lenValue = length - (location + pos)
if lenValue > 0:
setAttrs(_textAttributesForStyle(style, font), (location + pos, lenValue))
totLenValue += lenValue
self.textStorage().fixFontAttributeInRange_((location, totLenValue))
self.textStorage().endEditing()
# key down
def keyDown_(self, event):
char = event.characters()
selectedRange = self.selectedRange()
if AppKit.NSEvent.modifierFlags() & AppKit.NSCommandKeyMask and selectedRange and char in (AppKit.NSUpArrowFunctionKey, AppKit.NSDownArrowFunctionKey, AppKit.NSLeftArrowFunctionKey, AppKit.NSRightArrowFunctionKey):
value = self._getSelectedValueForRange(selectedRange)
if value is not None:
altDown = AppKit.NSEvent.modifierFlags() & AppKit.NSAlternateKeyMask
shiftDown = AppKit.NSEvent.modifierFlags() & AppKit.NSShiftKeyMask
altDown = AppKit.NSEvent.modifierFlags() & AppKit.NSAlternateKeyMask
add = 1
if altDown:
add = .1
if char == AppKit.NSDownArrowFunctionKey:
add *= -1
elif char == AppKit.NSLeftArrowFunctionKey:
add *= -1
if shiftDown and altDown:
add /= 10
elif shiftDown:
add *= 10
if isinstance(value, tuple):
valueX, valueY = value
if char in [AppKit.NSUpArrowFunctionKey, AppKit.NSDownArrowFunctionKey]:
valueY += add
else:
valueX += add
value = "%s, %s" % (valueX, valueY)
else:
value += add
self._insertTextAndRun("%s" % value, selectedRange)
return
txt = self.string().substringWithRange_(selectedRange)
if txt == "True":
self._insertTextAndRun("False", selectedRange)
return
if txt == "False":
self._insertTextAndRun("True", selectedRange)
return
super(CodeNSTextView, self).keyDown_(event)
selectedRange = self.selectedRange()
self._balanceParenForChar(char, selectedRange.location)
if self.isLiveCoding():
self.performSelectorInBackground_withObject_("_runInternalCode", None)
def flagsChanged_(self, event):
self._arrowSelectionDirection = None
super(CodeNSTextView, self).flagsChanged_(event)
def mouseDown_(self, event):
self._canDrag = False
if AppKit.NSEvent.modifierFlags() & AppKit.NSCommandKeyMask and self.selectedRange():
self._canDrag = True
self.undoManager().beginUndoGrouping()
selRng = self.selectedRange()
txt = self.string().substringWithRange_(selRng)
if txt == "True":
self._insertTextAndRun("False", selRng)
elif txt == "False":
self._insertTextAndRun("True", selRng)
return
super(CodeNSTextView, self).mouseDown_(event)
def mouseDragged_(self, event):
if self._canDrag:
try:
selRng = self.selectedRange()
value = self._getSelectedValueForRange(selRng)
if value is not None:
altDown = event.modifierFlags() & AppKit.NSAlternateKeyMask
shiftDown = event.modifierFlags() & AppKit.NSShiftKeyMask
altDown = event.modifierFlags() & AppKit.NSAlternateKeyMask
add = 1
if altDown and shiftDown:
add = .01
elif altDown:
add = .1
elif shiftDown:
add = 10
if isinstance(value, tuple):
valueX, valueY = value
valueX += int(event.deltaX()*2) * add
valueY -= int(event.deltaY()*2) * add
txtValue = "%s, %s" % (valueX, valueY)
else:
value += int(event.deltaX()*2) * add
txtValue = "%s" % value
self._insertTextAndRun(txtValue, selRng)
except:
pass
super(CodeNSTextView, self).mouseDragged_(event)
def mouseUp_(self, event):
if self._canDrag:
self.undoManager().endUndoGrouping()
super(CodeNSTextView, self).mouseUp_(event)
def insertTab_(self, sender):
string = self.string()
if string:
selectedRange = self.selectedRange()
try:
char = string[selectedRange.location-1]
except:
char = ""
if char == ".":
self.setSelectedRange_((selectedRange.location-1, 1))
self.insertText_("self.")
return
if self.usesTabs():
return super(CodeNSTextView, self).insertTab_(sender)
self.insertText_(self.indent())
def insertNewline_(self, sender):
selectedRange = self.selectedRange()
super(CodeNSTextView, self).insertNewline_(sender)
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData:
leadingSpace = ""
line, lineRange = self._getTextForRange(selectedRange)
m = _whiteSpaceRE.match(line)
if m is not None:
leadingSpace = m.group()
line = line.strip()
if line and line[-1] in languageData["indentWithEndOfLine"]:
leadingSpace += self.indent()
if leadingSpace:
self.insertText_(leadingSpace)
def deleteBackward_(self, sender):
self._deleteIndentation(sender, False, super(CodeNSTextView, self).deleteBackward_)
def deleteForward_(self, sender):
self._deleteIndentation(sender, True, super(CodeNSTextView, self).deleteForward_)
def moveLeft_(self, sender):
super(CodeNSTextView, self).moveLeft_(sender)
string = self.string()
if not string:
return
selectedRange = self.selectedRange()
char = string[selectedRange.location]
self._balanceParenForChar(char, selectedRange.location+1)
def moveRight_(self, sender):
super(CodeNSTextView, self).moveRight_(sender)
string = self.string()
if not string:
return
selectedRange = self.selectedRange()
char = string[selectedRange.location-1]
self._balanceParenForChar(char, selectedRange.location)
def moveWordLeft_(self, sender):
ranges = self.selectedRanges()
if len(ranges) == 1:
newRange = ranges[0].rangeValue()
location = self._getLeftWordRange(newRange)
self.setSelectedRange_((location, 0))
else:
super(CodeNSTextView, self).moveWordLeft_(sender)
def moveWordLeftAndModifySelection_(self, sender):
ranges = self.selectedRanges()
if self._arrowSelectionDirection is None:
self._arrowSelectionDirection = downArrowSelectionDirection
if len(ranges) == 1:
newRange = ranges[0].rangeValue()
testLocation = -1
if newRange.length and self._arrowSelectionDirection != downArrowSelectionDirection:
testLocation = self._getLeftWordRange(AppKit.NSRange(newRange.location + newRange.length, 0))
if AppKit.NSLocationInRange(testLocation, newRange) or AppKit.NSMaxRange(newRange) == testLocation:
newRange = AppKit.NSRange(newRange.location, testLocation - newRange.location)
else:
location = self._getLeftWordRange(newRange)
newRange = AppKit.NSRange(location, newRange.location - location + newRange.length)
if newRange.length == 0:
self._arrowSelectionDirection = None
self.setSelectedRange_(newRange)
else:
super(CodeNSTextView, self).moveWordLeftAndModifySelection_(sender)
def moveWordRight_(self, sender):
ranges = self.selectedRanges()
if len(ranges) == 1:
newRange = ranges[0].rangeValue()
location = self._getRightWordRange(newRange)
self.setSelectedRange_((location, 0))
else:
super(CodeNSTextView, self).moveWordRight_(sender)
def moveWordRightAndModifySelection_(self, sender):
ranges = self.selectedRanges()
if self._arrowSelectionDirection is None:
self._arrowSelectionDirection = upArrowSelectionDirection
if len(ranges) == 1:
newRange = ranges[0].rangeValue()
testLocation = -1
if newRange.length and self._arrowSelectionDirection != upArrowSelectionDirection:
testLocation = self._getRightWordRange(AppKit.NSRange(newRange.location, 0))
if AppKit.NSLocationInRange(testLocation, newRange) or AppKit.NSMaxRange(newRange) == testLocation:
newRange = AppKit.NSRange(testLocation, newRange.location - testLocation + newRange.length)
else:
location = self._getRightWordRange(newRange)
newRange = AppKit.NSRange(newRange.location, location - newRange.location)
if newRange.length == 0:
self._arrowSelectionDirection = None
self.setSelectedRange_(newRange)
else:
super(CodeNSTextView, self).moveWordRightAndModifySelection_(sender)
def deleteWordBackward_(self, sender):
ranges = self.selectedRanges()
if len(ranges) == 1:
newRange = ranges[0].rangeValue()
if newRange.length == 0:
self.moveWordLeftAndModifySelection_(sender)
super(CodeNSTextView, self).deleteWordForward_(sender)
def deleteWordForward_(self, sender):
ranges = self.selectedRanges()
if len(ranges) == 1:
newRange = ranges[0].rangeValue()
if newRange.length == 0:
self.moveWordRightAndModifySelection_(sender)
super(CodeNSTextView, self).deleteWordForward_(sender)
# text completion
def rangeForUserCompletion(self):
charRange = super(CodeNSTextView, self).rangeForUserCompletion()
text = self.string()
partialString = text.substringWithRange_(charRange)
if "." in partialString:
dotSplit = partialString.split(".")
partialString = dotSplit.pop()
move = len(".".join(dotSplit))
charRange.location += move + 1
charRange.length = len(partialString)
for c in partialString:
if c not in variableChars:
return (AppKit.NSNotFound, 0)
return charRange
def completionsForPartialWordRange_indexOfSelectedItem_(self, charRange, index):
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData is None:
return [], 0
text = self.string()
func = languageData.get("wordCompletions", self._genericCompletions)
return func(text, charRange)
def _genericCompletions(self, text, charRange):
partialString = text.substringWithRange_(charRange)
keyWords = list()
index = 0
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData is None:
return keyWords, index
if partialString:
_reWords = re.compile(r"\b%s\w+\b" % partialString)
keyWords = _reWords.findall(text)
keyWords = list(set(keyWords + languageData.get("keywords", [])))
keyWords = [word for word in sorted(keyWords) if word.startswith(partialString)]
return keyWords, index
def selectionRangeForProposedRange_granularity_(self, proposedRange, granularity):
location = proposedRange.location
if granularity == AppKit.NSSelectByWord and proposedRange.length == 0 and location != 0:
text = self.string()
lenText = len(text)
length = 1
found = False
while not found:
location -= 1
length += 1
if location <= 0:
found = True
else:
c = text.substringWithRange_((location, 1))[0]
if c not in variableChars:
location += 1
found = True
found = False
while not found:
length += 1
if location + length >= lenText:
found = True
else:
c = text.substringWithRange_((location, length))[-1]
if c not in variableChars:
length -= 1
found = True
return location, length
else:
return super(CodeNSTextView, self).selectionRangeForProposedRange_granularity_(proposedRange, granularity)
# drop
def acceptableDragTypes(self):
acceptableDragTypes = super(CodeNSTextView, self).acceptableDragTypes()
return list(acceptableDragTypes) + [AppKit.NSFilenamesPboardType]
def draggingEntered_(self, dragInfo):
pboard = dragInfo.draggingPasteboard()
types = pboard.types()
if AppKit.NSFilenamesPboardType in types:
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData is not None:
formatter = languageData.get("dropPathFormatting")
if formatter:
paths = pboard.propertyListForType_(AppKit.NSFilenamesPboardType)
dropText = ""
if len(paths) == 1:
dropText = formatter % paths[0]
else:
formattedPaths = []
for path in paths:
formattedPaths.append(formatter % path)
multiLineFormater = languageData.get("dropPathsFormatting", "%s")
seperator = languageData.get("dropPathsSeperator", "\n")
dropText = multiLineFormater % seperator.join(formattedPaths)
if dropText:
pboard.declareTypes_owner_([AppKit.NSPasteboardTypeString], self)
pboard.setString_forType_(dropText, AppKit.NSPasteboardTypeString)
return super(CodeNSTextView, self).draggingEntered_(dragInfo)
# menu
def indent_(self, sender):
def indentFilter(lines):
indent = self.indent()
indentedLines = []
for line in lines:
if line.strip():
indentedLines.append(indent + line)
else:
indentedLines.append(line)
[indent + line for line in lines[:-1]]
return indentedLines
self._filterLines(indentFilter)
def dedent_(self, sender):
def dedentFilter(lines):
indent = self.indent()
dedentedLines = []
indentSize = len(indent)
for line in lines:
if line.startswith(indent):
line = line[indentSize:]
dedentedLines.append(line)
return dedentedLines
self._filterLines(dedentFilter)
def comment_(self, sender):
def commentFilter(lines):
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData is None:
return lines
commentTag = languageData.get("comment")
if commentTag is None:
return lines
commentTag += " "
commentEndTag = languageData.get("commentEnd", "")
if commentEndTag:
commentEndTag = " " + commentEndTag
commentedLines = []
pos = 100
for line in lines:
if not line.strip():
continue
pos = min(pos, _findWhitespace(line))
for line in lines:
if line.strip():
addEnd = ""
if line[-1] == "\n":
line = line.replace("\n", "")
addEnd = "\n"
commentedLines.append(line[:pos] + commentTag + line[pos:] + commentEndTag + addEnd)
else:
commentedLines.append(line)
return commentedLines
self._filterLines(commentFilter)
def uncomment_(self, sender):
def uncommentFilter(lines):
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData is None:
return lines
commentTag = languageData.get("comment", "")
commentEndTag = languageData.get("commentEnd", "")
_commentRE = re.compile(r"[ \t]*(%s)[ ]?" % commentTag)
commentedLines = []
commentMatch = _commentRE.match
for line in lines:
m = commentMatch(line)
if m is not None:
start = m.start(1)
end = m.end()
line = line[:start] + line[end:]
line = line.replace(commentEndTag, "")
commentedLines.append(line)
return commentedLines
self._filterLines(uncommentFilter)
def _jumpToLine(self, lineNumber):
lines = 1
string = self.string()
length = len(string)
tempRange = AppKit.NSMakeRange(0, length)
found = None
while tempRange.location < length:
tempRange = string.lineRangeForRange_(AppKit.NSMakeRange(tempRange.location, 0))
if lines == lineNumber:
found = tempRange
break
tempRange.location = AppKit.NSMaxRange(tempRange)
lines += 1
if found:
self.setSelectedRange_(found)
self.scrollRangeToVisible_(found)
def jumpToLine_(self, sender):
self.jumpToLineWindowClass(self._jumpToLine, self.window())
def jumpToLineNumber_(self, lineNumber):
self._jumpToLine(lineNumber)
def liveCoding_(self, sender):
self._liveCoding = not self._liveCoding
def isLiveCoding(self):
return self._liveCoding
def validateUserInterfaceItem_(self, item):
if item.action() == "liveCoding:":
item.setState_(self.isLiveCoding())
return super(CodeNSTextView, self).validateUserInterfaceItem_(item)
# notifications
def textStorageDidProcessEditing_(self, notification):
if self._ignoreProcessEditing:
return
string = self.string()
if not string:
# no text to color
return
length = len(string)
textStorage = self.textStorage()
lineStart, lineLength = textStorage.editedRange()
lineStart -= 200
lineLength += 200
if lineStart <= 0:
lineStart = 0
if lineStart > length:
lineStart = length
lineLength = 0
if lineStart + lineLength > length:
lineLength = length - lineStart
lineStart, lineLength = string.lineRangeForRange_((lineStart, lineLength))
for quoteMatch in _multiLineRE.finditer(string):
start, end = quoteMatch.start(), quoteMatch.end()
quoteRange = (start, end-start)
if AppKit.NSLocationInRange(lineStart, quoteRange) or AppKit.NSLocationInRange(lineStart+lineLength, quoteRange):
quoteStart, quoteLenght = string.lineRangeForRange_(quoteRange)
lineStart, lineLength = AppKit.NSUnionRange(quoteRange, (lineStart, lineLength))
break
text = string.substringWithRange_((lineStart, lineLength))
self._highlightSyntax(lineStart, text)
def viewDidMoveToWindow(self):
self._buildhighlightStyleMap()
self.resetHighLightSyntax()
notificationCenter = AppKit.NSNotificationCenter.defaultCenter()
notificationCenter.addObserver_selector_name_object_(self, "textStorageDidProcessEditing:", AppKit.NSTextStorageDidProcessEditingNotification, self.textStorage())
def dealloc(self):
notificationCenter = AppKit.NSNotificationCenter.defaultCenter()
notificationCenter.removeObserver_(self)
super(CodeNSTextView, self).dealloc()
def userDefaultChanged_(self, notification):
if self.window():
_clearTextAttributesForStyleCache()
style = styleFromDefault()
self.setTypingAttributes_(_textAttributesForStyle(dict(color=self._fallbackTextColor)))
self.setHighlightStyle_(style)
# helpers
def _updateRulersColors(self):
scrollView = self.enclosingScrollView()
if scrollView and scrollView.hasVerticalRuler():
ruler = scrollView.verticalRulerView()
if hasattr(ruler, "setTextColor_"):
numberStyle = self.highlightStyleMap.get(Comment)
if numberStyle:
ruler.setTextColor_(numberStyle["color"])
if hasattr(ruler, "setRulerBackgroundColor_"):
styles = self.highlightStyle()
backgroundColor = _hexStringToNSColor(styles.background_color, self._fallbackBackgroundColor)
ruler.setRulerBackgroundColor_(backgroundColor)
def _deleteIndentation(self, sender, isForward, superFunc):
selectedRange = self.selectedRange()
if self.usesTabs() or selectedRange.length:
return superFunc(sender)
string = self.string()
if not string:
return superFunc(sender)
possibleIndentStart = selectedRange.location - self.indentSize()
possibleIndentEnd = self.indentSize()
if isForward:
possibleIndentStart = selectedRange.location
if possibleIndentStart < 0:
return superFunc(sender)
possibleIndent = None
if possibleIndentStart + possibleIndentEnd > len(string):
return superFunc(sender)
possibleIndent = string.substringWithRange_((possibleIndentStart, possibleIndentEnd))
if possibleIndent == self.indent():
self.setSelectedRange_((possibleIndentStart, possibleIndentEnd))
self.insertText_("")
else:
superFunc(sender)
def _findMatchingParen(self, location, char, matchChar, end):
add = 1
if end:
add = -1
location -= 2
string = self.string()
found = None
stack = 0
while location >= 0 and location < len(string):
c = string[location]
if c == char:
stack += 1
elif stack != 0 and c == matchChar:
stack -= 1
elif c == matchChar:
found = location
break
location += add
return found
def _balanceParenForChar(self, char, location):
if self.lexer() is None:
return
languageData = self.languagesIDEBehaviorForLanguage_(self.lexer().name)
if languageData is None:
return
openToCloseMap = languageData["openToCloseMap"]
if char in openToCloseMap.keys():
self._balanceParens(location=location, char=char, matchChar=openToCloseMap[char], end=False)
elif char in openToCloseMap.values():
openToCloseMap = _reverseMap(openToCloseMap)
self._balanceParens(location=location, char=char, matchChar=openToCloseMap[char], end=True)
def _balanceParens(self, location, char, matchChar, end):
found = self._findMatchingParen(location, char, matchChar, end)
if found is not None:
oldAttrs, effRng = self.textStorage().attributesAtIndex_effectiveRange_(found, None)
styles = self.highlightStyle()
selectionColor = _hexStringToNSColor(styles.highlight_color, self._fallbackHightLightColor)
textColor = oldAttrs.get(AppKit.NSForegroundColorAttributeName, self._fallbackTextColor)
shadow = AppKit.NSShadow.alloc().init()
shadow.setShadowOffset_((0, 0))
shadow.setShadowColor_(textColor)
shadow.setShadowBlurRadius_(3)
balancingAttrs = {
AppKit.NSBackgroundColorAttributeName: selectionColor,
AppKit.NSShadowAttributeName: shadow
}
self.layoutManager().setTemporaryAttributes_forCharacterRange_(balancingAttrs, (found, 1))
self.performSelector_withObject_afterDelay_("_resetBalanceParens:", (oldAttrs, effRng), 0.2)
def _resetBalanceParens_(self, (attrs, rng)):
self.layoutManager().setTemporaryAttributes_forCharacterRange_(attrs, rng)
def _filterLines(self, filterFunc):
selectedRange = self.selectedRange()
lines, linesRange = self._getTextForRange(selectedRange)
filteredLines = filterFunc(lines.splitlines(True))
filteredLines = "".join(filteredLines)
if lines == filteredLines:
return
self.setSelectedRange_(linesRange)
self.insertText_(filteredLines)
newSelRng = linesRange.location, len(filteredLines)
self.setSelectedRange_(newSelRng)
def _getLeftWordRange(self, newRange):
if newRange.location == 0:
return 0
text = self.string()
location = newRange.location - 1
c = text.substringWithRange_((location, 1))[0]
isChar = foundChar = c in variableChars
count = 0
while isChar == foundChar:
count += 1
location -= 1
if location <= 0:
location = 0
foundChar = not isChar
else:
c = text.substringWithRange_((location, 1))[0]
foundChar = c in variableChars
if count == 1 and isChar != foundChar:
isChar = not isChar
if location != 0:
location += 1
return location
def _getRightWordRange(self, newRange):
text = self.string()
lenText = len(text)
location = newRange.location + newRange.length
if location >= lenText:
return lenText
count = 0
c = text.substringWithRange_((location, 1))[0]
isChar = foundChar = c in variableChars
while isChar == foundChar:
count += 1
location += 1
if location >= lenText:
location = lenText
foundChar = not isChar
else:
c = text.substringWithRange_((location, 1))[0]
foundChar = c in variableChars
if count == 1 and isChar != foundChar:
isChar = not isChar
return location
def _getTextForRange(self, lineRange):
string = self.string()
lineRange = string.lineRangeForRange_(lineRange)
return string.substringWithRange_(lineRange), lineRange
def _getSelectedValueForRange(self, selectedRange):
value = None
try:
txt = self.string().substringWithRange_(selectedRange)
for c in txt:
if c not in "0123456789.,- ":
raise DrawBotError("No dragging possible")
exec("value = %s" % txt)
except:
pass
return value
def _insertTextAndRun(self, txt, txtRange):
self.insertText_(txt)
newRange = AppKit.NSMakeRange(txtRange.location, len(txt))
self.setSelectedRange_(newRange)
return self._runInternalCode()
def _runInternalCode(self):
pool = AppKit.NSAutoreleasePool.alloc().init()
try:
window = self.window()
if window is not None:
doc = window.document()
if doc is not None:
doc.runCode_(self)
return True
except:
return False
class CodeEditor(TextEditor):
nsTextViewClass = CodeNSTextView
def __init__(self, *args, **kwargs):
codeAttr = dict()
for key in "lexer", "highlightStyle", "usesTabs", "indentSize", "languagesIDEBehavior", "showlineNumbers":
value = None
if key in kwargs:
value = kwargs.get(key)
del kwargs[key]
codeAttr[key] = value
super(CodeEditor, self).__init__(*args, **kwargs)
if isinstance(codeAttr["lexer"], str):
try:
codeAttr["lexer"] = get_lexer_by_name(codeAttr["lexer"])
except:
codeAttr["lexer"] = None
if codeAttr["lexer"] is None:
codeAttr["lexer"] = PythonLexer()
self.setLexer(codeAttr["lexer"])
if codeAttr["highlightStyle"] is None:
codeAttr["highlightStyle"] = styleFromDefault()
if codeAttr["highlightStyle"] is not None:
self.setHighlightStyle(codeAttr["highlightStyle"])
if codeAttr["usesTabs"] is not None:
self.setUsesTabs(codeAttr["usesTabs"])
if codeAttr["indentSize"] is not None:
self.setIndentSize(codeAttr["indentSize"])
if codeAttr["languagesIDEBehavior"] is not None:
_languagesIDEBehavior.update(codeAttr["languagesIDEBehavior"])
self.setLanguagesIDEBehavior(languagesIDEBehavior)
if codeAttr["showlineNumbers"] is None:
codeAttr["showlineNumbers"] = True
ruler = NSLineNumberRuler.alloc().init()
ruler.setClientView_(self.getNSTextView())
ruler.setRulerBackgroundColor_(AppKit.NSColor.colorWithCalibratedWhite_alpha_(.95, 1))
self.getNSScrollView().setVerticalRulerView_(ruler)
self.getNSScrollView().setHasHorizontalRuler_(False)
self.getNSScrollView().setHasVerticalRuler_(codeAttr["showlineNumbers"])
self.getNSScrollView().setRulersVisible_(True)
def setHighlightStyle(self, style):
self.getNSTextView().setHighlightStyle_(style)
def setLexer(self, lexer):
self.getNSTextView().setLexer_(lexer)
def setLanguagesIDEBehavior(self, languagesIDEBehavior):
self.getNSTextView().setLanguagesIDEBehavior_(languagesIDEBehavior)
def setUsesTabs(self, value):
self.getNSTextView().setUsesTabs_(value)
def usesTabs(self):
return self.getNSTextView().usesTabs()
def setIndentSize(self, value):
self.getNSTextView().setIndentSize_(value)
def indentSize(self):
return self.getNSTextView().indentSize()
def comment(self):
self.getNSTextView().comment_(self)
def uncomment(self):
self.getNSTextView().uncomment_(self)
def indent(self):
self.getNSTextView().indent_(self)
def dedent(self):
self.getNSTextView().dedent_(self)
def jumpToLine(self, lineNumber=None):
if lineNumber is None:
self.getNSTextView().jumpToLine_(self)
else:
self.getNSTextView().jumpToLineNumber_(lineNumber)
def toggleLineNumbers(self):
self.getNSScrollView().setHasVerticalRuler_(not self.getNSScrollView().hasVerticalRuler())
class OutPutCodeNSTextView(CodeNSTextView):
def init(self):
self = super(OutPutCodeNSTextView, self).init()
self._items = []
self.setTextAttributes()
return self
def clear(self):
self._items = []
self.setString_("")
def appendText_isError_(self, text, isError):
self._items.append((text, isError))
attrs = self.textAttributes
if isError:
attrs = self.tracebackAttributes
text = AppKit.NSAttributedString.alloc().initWithString_attributes_(text, attrs)
self.textStorage().appendAttributedString_(text)
def userDefaultChanged_(self, notification):
super(OutPutCodeNSTextView, self).userDefaultChanged_(notification)
self.setTextAttributes()
def setTextAttributes(self):
self.setString_("")
styles = styleFromDefault()
self.setHighlightStyle_(styles)
self.textAttributes = outputTextAttributesForStyles(styles)
self.tracebackAttributes = outputTextAttributesForStyles(styles, isError=True)
backgroundColor = _hexStringToNSColor(styles.background_color, self._fallbackBackgroundColor)
self.setBackgroundColor_(backgroundColor)
selectionColor = _hexStringToNSColor(styles.highlight_color, self._fallbackHightLightColor)
self.setSelectedTextAttributes_({AppKit.NSBackgroundColorAttributeName: selectionColor})
self.setFont_(getFontDefault("PyDEFont", self._fallbackFont))
items = self._items
self._items = []
for text, isError in items:
self.appendText_isError_(text, isError)
class OutPutEditor(TextEditor):
nsTextViewClass = OutPutCodeNSTextView
def append(self, text, isError=False):
self.getNSTextView().appendText_isError_(text, isError)
def clear(self):
self.getNSTextView().clear()
def forceUpdate(self):
self.getNSTextView().display()
def scrollToEnd(self):
self.getNSTextView().scrollRangeToVisible_((len(self.get()), 0))
| bsd-2-clause | -5,953,409,070,966,837,000 | 35.941529 | 222 | 0.605499 | false |
hep-gc/cloudscheduler | web_frontend/cloudscheduler/csv2/group_views.py | 1 | 50458 | from django.conf import settings
config = settings.CSV2_CONFIG
from django.shortcuts import render, get_object_or_404, redirect
from django.views.decorators.csrf import requires_csrf_token
from django.http import HttpResponse
from django.core.exceptions import PermissionDenied
from cloudscheduler.lib.fw_config import configure_fw
from cloudscheduler.lib.view_utils import \
lno, \
manage_group_users, \
manage_user_group_verification, \
qt, \
render, \
set_user_groups, \
table_fields, \
validate_by_filtered_table_entries, \
validate_fields
from collections import defaultdict
import bcrypt
from cloudscheduler.lib.schema import *
import re
from cloudscheduler.lib.web_profiler import silk_profile as silkp
# lno: GV - error code identifier.
MODID= 'GV'
#-------------------------------------------------------------------------------
GROUP_KEYS = {
'auto_active_group': False,
# Named argument formats (anything else is a string).
'format': {
'group_name': 'lowerdash',
'csrfmiddlewaretoken': 'ignore',
'group': 'ignore',
'htcondor_fqdn': 'fqdn,htcondor_host_id',
'job_cpus': 'integer',
'job_disk': 'integer',
'job_ram': 'integer',
'job_swap': 'integer',
'username': 'ignore',
'user_option': ['add', 'delete'],
'vm_keep_alive': 'integer',
'server_meta_ctl': 'reject',
'instances_ctl': 'reject',
'personality_ctl': 'reject',
'image_meta_ctl': 'reject',
'job_scratch': 'reject',
'personality_size_ctl': 'reject',
'server_groups_ctl': 'reject',
'security_group_rules_ctl': 'reject',
'keypairs_ctl': 'reject',
'security_groups_ctl': 'reject',
'server_group_members_ctl': 'reject',
'floating_ips_ctl': 'reject',
},
'array_fields': [
'username',
],
}
GROUP_ADD_KEYS = {
'not_empty': [
'htcondor_fqdn',
],
}
UNPRIVILEGED_GROUP_KEYS = {
'auto_active_group': True,
# Named argument formats (anything else is a string).
'format': {
'csrfmiddlewaretoken': 'ignore',
'group': 'ignore',
'htcondor_fqdn': 'fqdn,htcondor_host_id',
'job_cpus': 'integer',
'job_disk': 'integer',
'job_ram': 'integer',
'job_swap': 'integer',
'vm_keep_alive': 'integer',
'job_scratch': 'reject',
},
}
METADATA_KEYS = {
# Should the active_group be automatically inserted into the primary keys.
'auto_active_group': True,
'format': {
'enabled': 'dboolean',
'priority': 'integer',
'metadata': 'metadata',
'metadata_name': 'lowerdash',
'mime_type': ('csv2_mime_types', 'mime_type'),
'csrfmiddlewaretoken': 'ignore',
'group': 'ignore',
},
'mandatory': [
'metadata_name',
],
'not_empty': [
'metadata_name',
],
}
METADATA_ADD_KEYS = {
'mandatory': [
'metadata',
],
}
IGNORE_METADATA_NAME = {
'format': {
'metadata_name': 'ignore',
},
}
IGNORE_KEYS = {
'format': {
'alias_name': 'ignore',
'cloud_name': 'ignore',
'fingerprint': 'ignore',
'id': 'ignore',
'key_name': 'ignore',
'name': 'ignore',
'username': 'ignore',
'vmid': 'ignore',
},
}
LIST_KEYS = {
# Named argument formats (anything else is a string).
'format': {
'csrfmiddlewaretoken': 'ignore',
'group': 'ignore',
},
}
#-------------------------------------------------------------------------------
def validate_url_fields(prefix, request, template, actual_fields, expected_fields):
"""Ensure values required in a URL are given, that they are not empty, and that they match the lower format."""
for field in expected_fields:
if field in actual_fields:
if actual_fields[field] == '':
return render(request, template, {'response_code': 1, 'message': '%s, value specified for "%s" must not be the empty string.' % (prefix, field)})
elif not re.fullmatch('([a-z0-9_.:]-?)*[a-z0-9_.:]', actual_fields[field]):
return render(request, template, {'response_code': 1, 'message': '%s, value specified for "%s" must be all lowercase letters, digits, dashes, underscores, periods, and colons, and cannot contain more than one consecutive dash or start or end with a dash.' % (prefix, field)})
else:
return render(request, template, {'response_code': 1, 'message': '%s, request did not contain mandatory parameter "%s".' % (prefix, field)})
#-------------------------------------------------------------------------------
@silkp(name='Group Add')
def add(request):
"""
This function should receive a post request with a payload of group configuration
to add the specified group.
"""
# open the database.
config.db_open()
config.refresh()
# Retrieve the active user, associated group list and optionally set the active group.
rc, msg, active_user = set_user_groups(config, request)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s %s' % (lno(MODID), msg))
if request.method == 'POST':
# Validate input fields.
rc, msg, fields, tables, columns = validate_fields(config, request, [GROUP_KEYS, GROUP_ADD_KEYS], ['csv2_groups', 'csv2_user_groups', 'csv2_user,n', 'csv2_group_metadata,n'], active_user)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group add %s' % (lno(MODID), msg))
if 'vm_flavor' in fields and fields['vm_flavor']:
rc, msg = validate_by_filtered_table_entries(config, fields['vm_flavor'], 'vm_flavor', 'cloud_flavors', 'name', [['group_name', fields['group_name']]])
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group add, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
if 'vm_image' in fields and fields['vm_image']:
rc, msg = validate_by_filtered_table_entries(config, fields['vm_image'], 'vm_image', 'cloud_images', 'name', [['group_name', fields['group_name']]])
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group add, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
if 'vm_keyname' in fields and fields['vm_keyname']:
rc, msg = validate_by_filtered_table_entries(config, fields['vm_keyname'], 'vm_keyname', 'cloud_keypairs', 'key_name', [['group_name', fields['group_name']]])
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group add, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
if 'vm_network' in fields and fields['vm_network']:
rc, msg = validate_by_filtered_table_entries(config, fields['vm_network'], 'vm_network', 'cloud_networks', 'name', [['group_name', fields['group_name']]])
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group add, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
if 'vm_security_groups' in fields and fields['vm_security_groups']:
rc, msg = validate_by_filtered_table_entries(config, fields['vm_security_groups'], 'vm_security_groups', 'cloud_security_groups', 'name', [['group_name', fields['group_name']]], allow_value_list=True)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group add, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Validity check the specified users.
if 'username' in fields:
rc, msg = manage_user_group_verification(config, tables, fields['username'], None)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group add, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Add the group.
table = 'csv2_groups'
rc, msg = config.db_insert(table, table_fields(fields, table, columns, 'insert'))
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group add "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Add user_groups.
if 'username' in fields:
rc, msg = manage_group_users(config, tables, fields['group_name'], fields['username'])
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group add "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Add the default metadata file.
filepath = '/opt/cloudscheduler/metadata/'
filename = 'default.yaml.j2'
filedata = open(filepath+filename, "r").read()
table = 'csv2_group_metadata'
meta_dict = {
"group_name": fields['group_name'],
"metadata_name": filename,
"enabled": 1,
"priority": 0,
"metadata": filedata,
"mime_type": "cloud-config"
}
rc, msg = config.db_insert(table, meta_dict)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group add "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Commit the updates, configure firewall and return.
config.db_commit()
configure_fw(config)
config.db_close()
return group_list(request, active_user=active_user, response_code=0, message='group "%s" successfully added.' % (fields['group_name']))
### Bad request.
else:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group add, invalid method "%s" specified.' % (lno(MODID), request.method))
#-------------------------------------------------------------------------------
@silkp(name='Group Defaults')
def defaults(request, active_user=None, response_code=0, message=None):
"""
Update and list group defaults.
"""
# open the database.
config.db_open()
# Retrieve the active user, associated group list and optionally set the active group.
rc, msg, active_user = set_user_groups(config, request, super_user=False)
if rc == 0:
user_groups_set = True
message = None
if request.method == 'POST':
# Validate input fields.
rc, msg, fields, tables, columns = validate_fields(config, request, [UNPRIVILEGED_GROUP_KEYS], ['csv2_groups'], active_user)
if rc != 0:
config.db_close()
return render(request, 'csv2/group_defaults.html', {'response_code': 1, 'message': '%s default update/list %s' % (lno(MODID), msg), 'active_user': active_user.username, 'active_group': active_user.active_group, 'user_groups': active_user.user_groups})
if rc == 0 and ('vm_flavor' in fields) and (fields['vm_flavor']):
rc, msg = validate_by_filtered_table_entries(config, fields['vm_flavor'], 'vm_flavor', 'cloud_flavors', 'name', [['group_name', fields['group_name']]])
if rc == 0 and ('vm_image' in fields) and (fields['vm_image']):
rc, msg = validate_by_filtered_table_entries(config, fields['vm_image'], 'vm_image', 'cloud_images', 'name', [['group_name', fields['group_name']]])
if rc == 0 and ('vm_keyname' in fields) and (fields['vm_keyname']):
rc, msg = validate_by_filtered_table_entries(config, fields['vm_keyname'], 'vm_keyname', 'cloud_keypairs', 'key_name', [['group_name', fields['group_name']]])
if rc == 0 and ('vm_network' in fields) and (fields['vm_network']):
rc, msg = validate_by_filtered_table_entries(config, fields['vm_network'], 'vm_network', 'cloud_networks', 'name', [['group_name', fields['group_name']]])
if rc == 0 and ('vm_security_groups' in fields) and (fields['vm_security_groups']):
rc, msg = validate_by_filtered_table_entries(config, fields['vm_security_groups'], 'vm_security_groups', 'cloud_security_groups', 'name', [['group_name', fields['group_name']]], allow_value_list=True)
if rc == 0:
# Update the group defaults.
table = 'csv2_groups'
where_clause = "group_name='%s'" % active_user.active_group
rc, msg = config.db_update(table, table_fields(fields, table, columns, 'update'), where=where_clause)
if rc == 0:
# Commit the updates, configure firewall and return.
config.db_commit()
configure_fw(config)
message = 'group defaults "%s" successfully updated.' % (active_user.active_group)
else:
message = '%s group defaults update "%s" failed - %s.' % (lno(MODID), active_user.active_group, msg)
else:
message = '%s group defaults update %s.' % (lno(MODID), msg)
else:
user_groups_set = False
message = '%s %s' % (lno(MODID), msg)
# Prepare default CLI/Error response.
defaults_list = []
image_list = []
flavor_list = []
metadata_dict = {}
keypairs_list = []
network_list = []
security_groups_list = []
# If User/Groups successfully set, retrieve group information.
if user_groups_set:
where_clause = "group_name='%s'" % active_user.active_group
rc, msg, defaults_list = config.db_query("csv2_groups", where=where_clause)
# Replace None values with "".
for defaults in defaults_list:
for key, value in defaults.items():
if value == None:
defaults_list[0][key]=""
# # And additional information for the web page.
# if request.META['HTTP_ACCEPT'] != 'application/json':
# Get all the images in group:
rc, msg, image_list = config.db_query("cloud_images", where=where_clause)
# Get all the flavors in group:
rc, msg, flavor_list = config.db_query("cloud_flavors", where=where_clause)
# Get all keynames in group:
rc, msg, keypairs_list = config.db_query("cloud_keypairs", where=where_clause)
# Get all networks in group:
rc, msg, network_list = config.db_query("cloud_networks", where=where_clause)
# Get all security_groups in group:
rc, msg, security_groups_list = config.db_query("cloud_security_groups", where=where_clause)
# Get the group default metadata list:
rc, msg, _group_list = config.db_query("view_groups_with_metadata_info", where=where_clause)
_group_list, metadata_dict = qt(
_group_list,
keys = {
'primary': [
'group_name',
],
'secondary': [
'metadata_name',
'metadata_enabled',
'metadata_priority',
'metadata_mime_type'
]
},
prune=['password']
)
# Render the page.
context = {
'active_user': active_user.username,
'active_group': active_user.active_group,
'user_groups': active_user.user_groups,
'defaults_list': defaults_list,
'image_list': image_list,
'flavor_list': flavor_list,
'metadata_dict': metadata_dict,
'keypairs_list': keypairs_list,
'network_list': network_list,
'security_groups_list': security_groups_list,
'response_code': rc,
'message': message,
'is_superuser': active_user.is_superuser,
'version': config.get_version()
}
config.db_close()
return render(request, 'csv2/group_defaults.html', context)
#-------------------------------------------------------------------------------
@silkp(name='Group Delete')
def delete(request):
"""
This function should recieve a post request with a payload of group name
to be deleted.
"""
# open the database.
config.db_open()
# Retrieve the active user, associated group list and optionally set the active group.
rc, msg, active_user = set_user_groups(config, request)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s %s' % (lno(MODID), msg))
if request.method == 'POST':
# Validate input fields.
rc, msg, fields, tables, columns = validate_fields(config, request, [GROUP_KEYS, IGNORE_METADATA_NAME, IGNORE_KEYS], [
'csv2_groups',
'csv2_group_metadata',
'csv2_clouds',
'csv2_cloud_aliases',
'csv2_cloud_metadata',
'csv2_group_metadata_exclusions',
'csv2_user_groups',
'csv2_vms',
'cloud_keypairs',
'cloud_networks',
'cloud_security_groups',
'cloud_limits',
'cloud_images',
'cloud_flavors'
], active_user)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group delete %s' % (lno(MODID), msg))
# Delete any group metadata files for the group.
where_clause = "group_name='%s'" % fields['group_name']
rc, msg, _group_list = config.db_query("view_groups_with_metadata_names", where=where_clause)
for row in _group_list:
if row['group_name'] == fields['group_name'] and row['metadata_names']:
metadata_names = row['metadata_names'].split(',')
table = 'csv2_group_metadata'
for metadata_name in metadata_names:
# Delete the group metadata files.
where_clause = "group_name='%s' and metadata_name='%s'" % (fields['group_name'], metadata_name)
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group metadata file delete "%s::%s" failed - %s.' % (lno(MODID), fields['group_name'], metadata_name, msg))
# Delete the csv2_clouds.
table = 'csv2_clouds'
where_clause = "group_name='%s'" % fields['group_name']
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group resources delete "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Delete the csv2_cloud_metadata.
table = 'csv2_cloud_metadata'
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group resource metadata delete "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Delete the csv2_group_metadata_exclusions.
table = 'csv2_group_metadata_exclusions'
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s delete group metadata exclusions for group "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Delete the csv2_user_groups.
table = 'csv2_user_groups'
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group users delete "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Delete the csv2_cloud_aliases.
table = 'csv2_cloud_aliases'
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group resources delete "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Delete the csv2_vms.
table = 'csv2_vms'
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group VMs defaults delete "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Delete the cloud_keypairs.
table = 'cloud_keypairs'
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group keynames delete "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Delete the cloud_networks.
table = 'cloud_networks'
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group networks delete "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Delete the cloud_security_groups.
table = 'cloud_security_groups'
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group security groups delete "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Delete the cloud_limits.
table = 'cloud_limits'
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group limits delete "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Delete the cloud_images.
table = 'cloud_images'
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group images delete "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Delete the cloud_flavors.
table = 'cloud_flavors'
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group flavors delete "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Delete the group.
table = 'csv2_groups'
rc, msg = config.db_delete(table, where=where_clause)
if rc == 0:
# Commit the deletions, configure firewall and return.
config.db_commit()
configure_fw(config)
config.db_close()
return group_list(request, active_user=active_user, response_code=0, message='group "%s" successfully deleted.' % (fields['group_name']))
else:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group delete "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
### Bad request.
else:
# return group_list(request, active_user=active_user, response_code=1, message='%s group delete request did not contain mandatory parameter "group_name".' % lno(MODID))
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group delete, invalid method "%s" specified.' % (lno(MODID), request.method))
#-------------------------------------------------------------------------------
@silkp(name='Group List')
def group_list(request, active_user=None, response_code=0, message=None):
group_list_path = '/group/list/'
if request.path!=group_list_path and request.META['HTTP_ACCEPT'] == 'application/json':
return render(request, 'csv2/clouds.html', {'response_code': response_code, 'message': message, 'active_user': active_user.username, 'active_group': active_user.active_group, 'user_groups': active_user.user_groups})
# open the database.
config.db_open()
# Retrieve the active user, associated group list and optionally set the active group.
if active_user is None:
rc, msg, active_user = set_user_groups(config, request)
if rc != 0:
config.db_close()
return render(request, 'csv2/groups.html', {'response_code': 1, 'message': '%s %s' % (lno(MODID), msg)})
# Validate input fields (should be none).
rc, msg, fields, tables, columns = validate_fields(config, request, [LIST_KEYS], [], active_user)
if rc != 0 and request.path==group_list_path:
config.db_close()
return render(request, 'csv2/groups.html', {'response_code': 1, 'message': '%s group list, %s' % (lno(MODID), msg)})
# Retrieve group information.
if request.META['HTTP_ACCEPT'] == 'application/json':
rc, msg, _group_list = config.db_query("view_groups_with_metadata_names", order_by="group_name")
metadata_dict = {}
else:
rc, msg, _group_list_raw = config.db_query("view_groups_with_metadata_names", order_by="group_name")
_group_list, metadata_dict = qt(
_group_list_raw,
keys = {
'primary': [
'group_name',
],
'secondary': [
'metadata_names',
'metadata_enabled',
'metadata_priority',
'metadata_mime_type',
]
},
prune=['password']
)
# Retrieve user/groups list (dictionary containing list for each user).
rc, msg, groups_per_user_raw = config.db_query("view_user_groups")
groups_per_user = qt(
groups_per_user_raw,
prune=['password']
)
rc, msg, group_defaults = config.db_query("csv2_groups")
# Position the page.
# obj_act_id = request.path.split('/')
# if selector:
# if selector == '-':
# current_group = ''
# else:
# current_group = selector
# elif len(obj_act_id) > 3 and len(obj_act_id[3]) > 0:
# current_group = str(obj_act_id[3])
# else:
if len(_group_list) > 0:
current_group = str(_group_list[0]['group_name'])
else:
current_group = ''
# Render the page.
context = {
'active_user': active_user.username,
'active_group': active_user.active_group,
'user_groups': active_user.user_groups,
'group_defaults': group_defaults,
'group_list': _group_list,
'groups_per_user': groups_per_user,
'metadata_dict': metadata_dict,
'current_group': current_group,
'response_code': response_code,
'message': message,
'is_superuser': active_user.is_superuser,
'version': config.get_version()
}
config.db_close()
return render(request, 'csv2/groups.html', context)
#-------------------------------------------------------------------------------
@silkp(name='Group Metadata Add')
def metadata_add(request):
"""
This function should recieve a post request with a payload of a metadata file
to add to a given group.
"""
# open the database.
config.db_open()
# Retrieve the active user, associated group list and optionally set the active group.
rc, msg, active_user = set_user_groups(config, request, super_user=False)
if rc != 0:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s %s' % (lno(MODID), msg)})
if request.method == 'POST':
# Validate input fields.
rc, msg, fields, tables, columns = validate_fields(config, request, [METADATA_KEYS, METADATA_ADD_KEYS], ['csv2_group_metadata'], active_user)
if rc != 0:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s group metadata-add %s' % (lno(MODID), msg)})
# Add the group metadata file.
table = 'csv2_group_metadata'
rc, msg = config.db_insert(table, table_fields(fields, table, columns, 'insert'))
if rc == 0:
config.db_close(commit=True)
return render(request, 'csv2/reload_parent.html', {'group_name': fields['group_name'], 'response_code': 0, 'message': 'group metadata file "%s::%s" successfully added.' % (active_user.active_group, fields['metadata_name'])})
else:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s group metadata-add "%s::%s" failed - %s.' % (lno(MODID), active_user.active_group, fields['metadata_name'], msg)})
### Bad request.
else:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s group metadata_add, invalid method "%s" specified.' % (lno(MODID), request.method)})
#-------------------------------------------------------------------------------
@silkp(name='Group Metadata Delete')
def metadata_delete(request):
"""
This function should recieve a post request with a payload of a metadata file
name to be deleted from the given group.
"""
context = {}
# open the database.
config.db_open()
# Retrieve the active user, associated group list and optionally set the active group.
rc, msg, active_user = set_user_groups(config, request, super_user=False)
if rc != 0:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s %s' % (lno(MODID), msg)})
if request.method == 'POST':
# Validate input fields.
rc, msg, fields, tables, columns = validate_fields(config, request, [METADATA_KEYS], ['csv2_group_metadata', 'csv2_group_metadata_exclusions,n'], active_user)
if rc != 0:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s group metadata-delete %s' % (lno(MODID), msg)})
# Delete the csv2_group_metadata_exclusions.
table = 'csv2_group_metadata_exclusions'
where_clause = "group_name='%s' and metadata_name='%s'" % (fields['group_name'], fields['metadata_name'])
rc, msg = config.db_delete(table, where=where_clause)
if rc != 0:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s delete group metadata exclusion for group=%s, metadata=%s failed - %s.' % (lno(MODID), fields['group_name'], fields['metadata_name'], msg)})
# Delete the group metadata file.
table = 'csv2_group_metadata'
where_clause = "group_name='%s' and metadata_name='%s'" % (active_user.active_group, fields['metadata_name'])
rc, msg = config.db_delete(table, where=where_clause)
if rc == 0:
config.db_close(commit=True)
return render(request, 'csv2/reload_parent.html', {'group_name': fields['group_name'], 'response_code': 0, 'message': 'group metadata file "%s::%s" successfully deleted.' % (active_user.active_group, fields['metadata_name'])})
else:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s group metadata-delete "%s::%s" failed - %s.' % (lno(MODID), active_user.active_group, fields['metadata_name'], msg)})
### Bad request.
else:
# return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s group metadata-delete request did not contain mandatory parameter "metadata_name".' % lno(MODID)})
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s group metadata-delete, invalid method "%s" specified.' % (lno(MODID), request.method)})
#-------------------------------------------------------------------------------
@silkp(name='Group Metadata Fetch')
def metadata_fetch(request, response_code=0, message=None, metadata_name=None):
context = {}
# open the database.
config.db_open()
# Retrieve the active user, associated group list and optionally set the active group.
rc, msg, active_user = set_user_groups(config, request, super_user=False)
if rc != 0:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s %s' % (lno(MODID), msg)})
# Get mime type list:
rc, msg, mime_types_list = config.db_query("csv2_mime_types")
# If we are NOT returning from an update, we are fetching from webpage
if metadata_name == None:
field_error = validate_url_fields('%s group metadata_fetch' % lno(MODID), request, 'csv2/blank_msg.html', active_user.kwargs, ['metadata_name'])
if field_error:
return field_error
metadata_name = active_user.kwargs['metadata_name']
# Retrieve metadata file.
if metadata_name:
METADATA = "csv2_group_metadata"
where_clause = "group_name='%s' and metadata_name='%s'" % (active_user.active_group, metadata_name)
rc, msg, METADATAobj = config.db_query(METADATA, where=where_clause)
if METADATAobj:
for row in METADATAobj:
context = {
'group_name': row["group_name"],
'metadata': row["metadata"],
'metadata_enabled': row["enabled"],
'metadata_priority': row["priority"],
'metadata_mime_type': row["mime_type"],
'metadata_name': row["metadata_name"],
'mime_types_list': mime_types_list,
'response_code': response_code,
'message': message,
'is_superuser': active_user.is_superuser,
'version': config.get_version()
}
config.db_close()
return render(request, 'csv2/meta_editor.html', context)
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': 'group metadata_fetch, file "%s::%s" does not exist.' % (active_user.active_group, metadata_name)})
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': 'group metadata_fetch, metadata file name omitted.'})
#-------------------------------------------------------------------------------
@silkp(name='Group Metadata List')
@requires_csrf_token
def metadata_list(request):
# open the database.
config.db_open()
# Retrieve the active user, associated group list and optionally set the active group.
rc, msg, active_user = set_user_groups(config, request, super_user=False)
if rc != 0:
config.db_close()
return defaults(request, active_user=active_user, response_code=1, message='%s %s' % (lno(MODID), msg))
# Validate input fields (should be none).
rc, msg, fields, tables, columns = validate_fields(config, request, [LIST_KEYS], [], active_user)
if rc != 0:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s group metadata-list, %s' % (lno(MODID), msg)})
# Retrieve cloud/metadata information.
where_clause = "group_name='%s'" % (active_user.active_group)
rc, msg, group_metadata_list = config.db_query("csv2_group_metadata", where=where_clause)
# Render the page.
context = {
'active_user': active_user.username,
'active_group': active_user.active_group,
'user_groups': active_user.user_groups,
'group_metadata_list': group_metadata_list,
'response_code': 0,
'message': None,
'is_superuser': active_user.is_superuser,
'version': config.get_version()
}
config.db_close()
return render(request, 'csv2/blank_msg.html', context)
#-------------------------------------------------------------------------------
@silkp(name="Group Metadata New")
@requires_csrf_token
def metadata_new(request):
context = {}
# open the database.
config.db_open()
# Retrieve the active user, associated group list and optionally set the active group.
rc, msg, active_user = set_user_groups(config, request, super_user=False)
if rc != 0:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s %s' % (lno(MODID), msg)})
# Get mime type list:
rc, msg, mime_types_list = config.db_query("csv2_mime_types")
context = {
'group_name': active_user.active_group,
'metadata': "",
'metadata_enabled': 0,
'metadata_priority': 0,
'metadata_mime_type': "",
'metadata_name': "",
'mime_types_list': mime_types_list,
'response_code': 0,
'message': "new-group-metadata",
'is_superuser': active_user.is_superuser,
'version': config.get_version()
}
config.db_close()
return render(request, 'csv2/meta_editor.html', context)
#-------------------------------------------------------------------------------
@silkp(name='Group Metadata Query')
@requires_csrf_token
def metadata_query(request):
# open the database.
config.db_open()
# Retrieve the active user, associated group list and optionally set the active group.
rc, msg, active_user = set_user_groups(config, request, super_user=False)
if rc != 0:
config.db_close()
return defaults(request, active_user=active_user, response_code=1, message='%s group metadata_query %s' % (lno(MODID), msg))
fields = active_user.kwargs
field_error = validate_url_fields('%s group metadata_query' % lno(MODID), request, 'csv2/blank_msg.html', fields, ['metadata_name'])
if field_error:
return field_error
# Retrieve cloud/metadata information.
where_clause = "group_name='%s' and metadata_name='%s'" % (active_user.active_group, fields['metadata_name'])
rc, msg, group_metadata_list = config.db_query("csv2_group_metadata", where=where_clause)
config.db_close()
metadata_exists = bool(group_metadata_list)
# Render the page.
context = {
'active_user': active_user.username,
'active_group': active_user.active_group,
'user_groups': active_user.user_groups,
'metadata_exists': metadata_exists,
'response_code': 0,
'message': None,
'is_superuser': active_user.is_superuser,
'version': config.get_version()
}
return render(request, 'csv2/blank_msg.html', context)
#-------------------------------------------------------------------------------
@silkp(name='Group Metadata Update')
def metadata_update(request):
"""
This function should recieve a post request with a payload of a metadata file
as a replacement for the specified file.
"""
context = {}
# open the database.
config.db_open()
# Retrieve the active user, associated group list and optionally set the active group.
rc, msg, active_user = set_user_groups(config, request, super_user=False)
if rc != 0:
config.db_close()
return defaults(request, active_user=active_user, response_code=1, message='%s %s' % (lno(MODID), msg))
if request.method == 'POST':
# Validate input fields.
rc, msg, fields, tables, columns = validate_fields(config, request, [METADATA_KEYS], ['csv2_group_metadata'], active_user)
if rc != 0:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s group metadata-update %s' % (lno(MODID), msg)})
# Update the group metadata file.
table = 'csv2_group_metadata'
updates = table_fields(fields, table, columns, 'update')
if len(updates) < 3: #updates always have to have the keys so (name & group) so unless there is 3 fields there is no update to do.
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s group metadata-update "%s::%s" specified no fields to update and was ignored.' % (lno(MODID), active_user.active_group, fields['metadata_name'])})
where_clause = 'group_name="%s" and metadata_name="%s"' % (active_user.active_group, fields['metadata_name'])
rc, msg = config.db_update(table, updates, where=where_clause)
if rc == 0:
config.db_close(commit=True)
message='group metadata file "%s::%s" successfully updated.' % (fields['group_name'], fields['metadata_name'])
return metadata_fetch(request, response_code=0, message=message, metadata_name=fields['metadata_name'])
else:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s group metadata-update "%s::%s" failed - %s.' % (lno(MODID), active_user.active_group, fields['metadata_name'], msg)})
### Bad request.
else:
config.db_close()
return render(request, 'csv2/blank_msg.html', {'response_code': 1, 'message': '%s group metadata_update, invalid method "%s" specified.' % (lno(MODID), request.method)})
#-------------------------------------------------------------------------------
@silkp(name='Group Update')
def update(request):
"""
This function should recieve a post request with a payload of group configuration
to update a given group.
"""
# open the database.
config.db_open()
config.refresh()
# Retrieve the active user, associated group list and optionally set the active group.
rc, msg, active_user = set_user_groups(config, request)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s %s' % (lno(MODID), msg))
if request.method == 'POST':
# Validate input fields.
rc, msg, fields, tables, columns = validate_fields(config, request, [GROUP_KEYS], ['csv2_groups','csv2_user_groups', 'csv2_user,n'], active_user)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group update %s' % (lno(MODID), msg))
if 'vm_flavor' in fields and fields['vm_flavor']:
rc, msg = validate_by_filtered_table_entries(config, fields['vm_flavor'], 'vm_flavor', 'cloud_flavors', 'name', [['group_name', fields['group_name']]])
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group update, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
if 'vm_image' in fields and fields['vm_image']:
rc, msg = validate_by_filtered_table_entries(config, fields['vm_image'], 'vm_image', 'cloud_images', 'name', [['group_name', fields['group_name']]])
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group update, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
if 'vm_keyname' in fields and fields['vm_keyname']:
rc, msg = validate_by_filtered_table_entries(config, fields['vm_keyname'], 'vm_keyname', 'cloud_keypairs', 'key_name', [['group_name', fields['group_name']]])
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group update, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
if 'vm_network' in fields and fields['vm_network']:
rc, msg = validate_by_filtered_table_entries(config, fields['vm_network'], 'vm_network', 'cloud_networks', 'name', [['group_name', fields['group_name']]])
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group update, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
if 'vm_security_groups' in fields and fields['vm_security_groups']:
rc, msg = validate_by_filtered_table_entries(config, fields['vm_security_groups'], 'vm_security_groups', 'cloud_security_groups', 'name', [['group_name', fields['group_name']]], allow_value_list=True)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group update, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Validity check the specified users.
if 'username' in fields:
rc, msg = manage_user_group_verification(config, tables, fields['username'], None)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group update, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
# Update the group.
table = 'csv2_groups'
group_updates = table_fields(fields, table, columns, 'update')
# group_updates should always have the group name so it should have > 1 updates for there to actually be a change
if len(group_updates) > 1:
where_clause = 'group_name="%s"' % fields['group_name']
rc, msg = config.db_update(table, group_updates, where=where_clause)
if rc != 0:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group update, "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
else:
if 'username' not in fields and request.META['HTTP_ACCEPT'] == 'application/json':
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group update must specify at least one field to update.' % lno(MODID))
# Update user groups.
if request.META['HTTP_ACCEPT'] == 'application/json':
if 'username' in fields:
if 'user_option' in fields and fields['user_option'] == 'delete':
rc, msg = manage_group_users(config, tables, fields['group_name'], users=fields['username'], option='delete')
else:
rc, msg = manage_group_users(config, tables, fields['group_name'], users=fields['username'], option='add')
else:
if 'username' in fields:
rc, msg = manage_group_users(config, tables, fields['group_name'], fields['username'])
else:
rc, msg = manage_group_users(config, tables, fields['group_name'], None)
if rc == 0:
# Commit the updates, configure firewall and return.
config.db_commit()
configure_fw(config)
config.db_close()
return group_list(request, active_user=active_user, response_code=0, message='group "%s" successfully updated.' % (fields['group_name']))
else:
config.db_close()
return group_list(request, active_user=active_user, response_code=1, message='%s group update "%s" failed - %s.' % (lno(MODID), fields['group_name'], msg))
### Bad request.
else:
return group_list(request, active_user=active_user, response_code=1, message='%s group update, invalid method "%s" specified.' % (lno(MODID), request.method))
| apache-2.0 | -8,947,802,555,058,953,000 | 45.376838 | 291 | 0.559475 | false |
hongzhouye/frankenstein | tools/mol_utils.py | 1 | 8589 | """Utils functions for module MOL
"""
import os
import numpy as np
from frankenstein.tools.io_utils import zmat2xyz
from frankenstein.data.atom_data import get_atomic_number
def get_enuc(Zs, xyzs):
"""Compute nuclear repulsion for a give molecule
Note:
The coordinates must be in unit of Bohr. (1 Bohr = 1.88972612457 Ang)
"""
natom = len(Zs)
assert(len(xyzs) == 3*natom)
rs = np.asarray(xyzs).reshape(natom, 3)
enuc = 0
for i in range(natom):
for j in range(i+1, natom):
enuc += Zs[i]*Zs[j] / np.sum((rs[i]-rs[j])**2.)**0.5
return enuc
# utils for geometry
def parse_gfile(gfile):
"""Parse input geometry file into standard geometry string
"""
if gfile[-4:] == ".xyz":
fname = gfile
elif gfile[-5:] == ".zmat":
fname = ".tmp.xyz"
zmat2xyz(gfile, fname)
else:
raise ValueError("Unknown format of input geometry file {:s}".format(gfile))
with open(fname, "r") as f:
natom = int(f.readline())
f.readline()
gstr = ";".join([" ".join(f.readline().split()[:4]) for i in range(natom)])
if fname == ".tmp.xyz":
os.system("rm .tmp.xyz")
return gstr
def standardize_gstr(gstr):
"""Put input geometry string into standard format
[NOTE] If input string is in Z-mat format, transformation to xyz will be performed first.
"""
atoms = [spg.strip() for spg in gstr.split(";")]
atom0 = atoms[0].split()
if len(atom0) == 1:
fzmat = ".tmp.zmat"
with open(fzmat, "w") as f:
f.write("{:d}\n\n".format(len(atoms)))
f.write("\n".join(atoms))
gstr = parse_gfile(fzmat)
os.system("rm {:s}".format(fzmat))
elif len(atom0) == 4:
gstr = ";".join([" ".join(atom.split()) for atom in atoms])
else:
raise ValueError("Unknown format of input geometry string\n{:s}".format(gstr))
return gstr
def parse_gstr(gstr, scale=1.88972612457):
"""Get atomic numbers and (scaled) atomic coordinates
Inp:
scale (float, optional, default: 1.88972612457):
Scaling factor for coordinates. The default assumes input coordinates are in angstrom and transform them into bohr.
"""
axyzs = [atom.split() for atom in gstr.split(";")]
natom = len(axyzs)
atoms = [None] * natom
xyzs = np.zeros(3*natom)
for ia in range(natom):
atoms[ia] = axyzs[ia][0]
xyzs[ia*3:(ia+1)*3] = list(map(float, axyzs[ia][1:]))
xyzs *= scale
xyzs = xyzs.tolist()
Zs = [get_atomic_number(atoms[ia]) for ia in range(natom)]
return atoms, Zs, xyzs
class GEOM:
"""Parse user-inpute geometry
"""
def __init__(self, gfs):
"""Constructor
Inp:
gfs (str):
Geometry file or string.
Geometry file must end with either ".xyz" or ".zmat" and follow format:
```
Natom
comment
Atom1 x y z
Atom2 x y z
...
```
for ".xyz", or
```
Natom
comment
Atom1
Atom2 1 dist(1,2)
...
```
for ".zmat". Geometry string follows the same format as either file format, but (1) without heading lines (Natom + comment), and (2) line separation is replaced by semicolon. For example, for xyz format,
gstr = "Atom1 x y z; Atom2 x y z; ..."
[NOTE] Input Z-mat format will be transformed into xyz format automatically! And only the latter will be stored.
Properties:
gtype : "file" or "str"
gstr : "Atom1 x y z;Atom2 x y z;..."
"""
self.parse_gfs(gfs)
self.lspace = 8 # number of spaces on the left (for printing)
def parse_gfs(self, gfs):
"""Parsing geometry string or file into standard form.
"""
if ".xyz" in gfs or ".zmat" in gfs:
self.gtype = "file"
self.gstr = parse_gfile(gfs)
else:
self.gtype = "str"
self.gstr = standardize_gstr(gfs)
def parse_gstr(self, scale=1.88972612457):
return parse_gstr(self.gstr, scale=scale)
def __str__(self):
gstr_out = " "*(self.lspace//2) + "Nuclear Coordinates:\n"
atoms = self.gstr.split(";")
for ia in range(len(atoms)):
axyz = atoms[ia].split()
axyz[0] = " " * self.lspace + axyz[0]
atoms[ia] = " ".join(axyz)
gstr_out += "\n".join(atoms)
return gstr_out
def get_Zxyz(geom, scale=1.88972612457, retSymb=False):
"""Get atom symbols and coordinates
Note:
The default of "scale" assumes input geometry uses unit "Angstrom" and
tranformas it into "Bohr". Use "scale = 1." to stay with "Angstrom".
"""
gstr_raw = parse_gfile(geom)
gstr = standardize_gstr(gstr_raw)
atoms, Zs, xyzs = parse_gstr(gstr, scale)
if retSymb:
return atoms, xyzs
else:
return Zs, xyzs
def get_noccs(Zs, charge, spin):
"""Determine # of alpha and beta electrons
Inp:
Zs (list of int):
A list of atomic numbers (i.e., nuclear charges) for each atom
charge (int):
Net charge (nelectron - sum(Zs))
spin (int):
Spin multiplicity (2S + 1)
Out:
noccs (list):
[nocca, noccb]
"""
nuc_charge = int(sum(Zs))
nocca = (nuc_charge - charge + spin - 1) // 2
noccb = nocca + 1 - spin
if nuc_charge - (nocca + noccb) != charge:
raise RuntimeError(("Bad combination of spin (={:d}) and "
"charge (={:d})").format(spin, charge))
return [nocca, noccb]
def get_orth_mat(S, orth_method, ao_lindep_thresh):
"""Compute matrix X that is used for orthogonalizing basis functions
Inp:
S (np.ndarray, nao*nao):
AO overlap matrix
orth_method (str):
Either "symm" or "cano"
ao_lindep_thresh (int):
10**-ao_lindep_thresh is the threshold for "basically zero"
eigenvalues (only used and must be given in "cano"
orthogonalization)
Out:
X (np.ndarray, nao*nmo):
Meaning clear from eqn, h_orth = X.T @ h @ X.
nmo = nao for orth_method = "symm"
nmo = # of linearly dependent AOs for orth_method = "cano"
smin (float):
smallest eigenvalue of S
"""
e, u = np.linalg.eigh(S)
n_lindep = int(np.sum(e < 10**-ao_lindep_thresh))
smin = e[0]
if orth_method.upper() == "SYMM":
if n_lindep > 0:
raise RuntimeError("""orth_method = "symm" cannot handle linear dependency in AO basis. Please use a more tolerant ao_lindep_thresh (default: 6) or use orth_method = "cano".""")
X = u @ np.diag(e**-0.5) @ u.T
Xinv = u @ np.diag(e**0.5) @ u.T
elif orth_method.upper() == "CANO":
X = u[:,n_lindep:] @ np.diag(e[n_lindep:]**-0.5)
Xinv = np.diag(e[n_lindep:]**0.5) @ u[:,n_lindep:].T
else:
raise RuntimeError("Unknown orth_method {:s}.".format(orth_method))
return X, Xinv, smin
# utils for basis (TODO: move these functions to basis_utils.py)
def get_pure_by_l(ls, pures):
"""1. Check if same l has same purity; 2. return pures by l
"""
max_l = max(ls)
pure_by_l = [None] * (max_l+1)
for l, pure in zip(ls, pures):
if pure_by_l[l] is None:
pure_by_l[l] = pure
else:
if pure_by_l[l] != pure:
raise ValueError("Two shells with same angular momentum have different purity.")
return pure_by_l
def get_norb_l(l, pure):
"""Get number of orbitals for a given angular momentum
"""
if pure:
return 2 * l + 1
else:
return (l + 1) * (l + 2) // 2
def get_idao_by_l(ls, pures):
"""Get starting index of each group of AO (grouped by angular momentum)
Inp:
ls ([int] * nbas):
A list of angular momentum
pures ([bool] * nbas):
Indicate each l in ls is spheric (pure=True) or cartesian.
Output:
idao_by_l ([ [int] * nbas_this_l ] * max_l)
"""
max_l = max(ls)
idao_by_l = [[] for i in range(max_l+1)]
p0 = 0
for i in range(len(ls)):
l, pure = ls[i], pures[i]
p1 = p0 + get_norb_l(l, pure)
idao_by_l[l].append(p0)
p0 = p1
return idao_by_l
| bsd-3-clause | -3,732,571,148,976,621,600 | 29.031469 | 219 | 0.544301 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.