repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
wetneb/pyoai | src/oaipmh/error.py | 2 | 1480 |
class ErrorBase(Exception):
def oainame(self):
name = self.__class__.__name__
# strip off 'Error' part
name = name[:-5]
# lowercase error name
name = name[0].lower() + name[1:]
return name
class BadArgumentError(ErrorBase):
pass
class BadVerbError(ErrorBase):
pass
class BadResumptionTokenError(ErrorBase):
pass
class CannotDisseminateFormatError(ErrorBase):
pass
class IdDoesNotExistError(ErrorBase):
pass
class NoRecordsMatchError(ErrorBase):
pass
class NoMetadataFormatsError(ErrorBase):
pass
class NoSetHierarchyError(ErrorBase):
pass
class UnknownError(ErrorBase):
pass
# errors not defined by OAI-PMH but which can occur in a client when
# the server is somehow misbehaving
class ClientError(Exception):
def details(self):
"""Error details in human readable text.
"""
raise NotImplementedError
class XMLSyntaxError(ClientError):
"""The OAI-PMH XML can not be parsed as it is not well-formed.
"""
def details(self):
return ("The data delivered by the server could not be parsed, as it "
"is not well-formed XML.")
class DatestampError(ClientError):
"""The OAI-PMH datestamps were not proper UTC datestamps as by spec.
"""
def __init__(self, datestamp):
self.datestamp = datestamp
def details(self):
return ("An illegal datestamp was encountered: %s" % self.datestamp)
| bsd-3-clause | 9,183,814,333,440,261,000 | 23.262295 | 78 | 0.666216 | false |
Unthinkingbit/bitcointools | base58.py | 1 | 3506 | #!/usr/bin/env python
"""encode/decode base58 in the same way that Bitcoin does"""
import hashlib
import math
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
getNewRIPEMD160 = None
getNewSHA256 = None
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length):
""" decode v into a string of len bytes
"""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def getNewRIPEMD160ByCrypto(public_key=""):
return RIPEMD160.new(public_key)
def getNewRIPEMD160ByHashlib(public_key=""):
newRIPEMD160 = hashlib.new('ripemd160')
newRIPEMD160.update(public_key)
return newRIPEMD160
def getNewSHA256ByCrypto(public_key=""):
return SHA256.new(public_key)
def getNewSHA256ByHashlib(public_key=""):
return hashlib.sha256(public_key)
try:
# Python Crypto library is at: http://www.dlitz.net/software/pycrypto/
# Needed for RIPEMD160 hash function, used to compute
# Bitcoin addresses from internal public keys.
import Crypto.Hash.RIPEMD160 as RIPEMD160
getNewRIPEMD160 = getNewRIPEMD160ByCrypto
except ImportError:
try:
test = getNewRIPEMD160ByHashlib()
getNewRIPEMD160 = getNewRIPEMD160ByHashlib
except ImportError:
print("Can not import RIPEMD160")
try:
# Python Crypto library is at: http://www.dlitz.net/software/pycrypto/
# Needed for RIPEMD160 hash function, used to compute
# Bitcoin addresses from internal public keys.
import Crypto.Hash.SHA256 as SHA256
getNewSHA256 = getNewSHA256ByCrypto
except ImportError:
try:
test = getNewSHA256ByHashlib()
getNewSHA256 = getNewSHA256ByHashlib
except ImportError:
print("Can not import SHA256")
def hash_160(public_key):
if getNewSHA256 == None or getNewRIPEMD160 == None:
return ''
h1 = getNewSHA256(public_key).digest()
h2 = getNewRIPEMD160(h1).digest()
return h2
def public_key_to_bc_address(public_key):
h160 = hash_160(public_key)
return hash_160_to_bc_address(h160)
def hash_160_to_bc_address(h160):
if getNewSHA256 == None:
return ''
vh160 = "\x00"+h160 # \x00 is version 0
h3=getNewSHA256(getNewSHA256(vh160).digest()).digest()
addr=vh160+h3[0:4]
return b58encode(addr)
def bc_address_to_hash_160(addr):
bytes = b58decode(addr, 25)
return bytes[1:21]
if __name__ == '__main__':
x = '005cc87f4a3fdfe3a2346b6953267ca867282630d3f9b78e64'.decode('hex_codec')
encoded = b58encode(x)
print encoded, '19TbMSWwHvnxAKy12iNm3KdbGfzfaMFViT'
print b58decode(encoded, len(x)).encode('hex_codec'), x.encode('hex_codec')
| mit | -4,130,492,044,310,019,000 | 25.969231 | 80 | 0.692527 | false |
redouanelg/DINAE | DINAEmnist.py | 1 | 4377 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 24 16:09:10 2017
@author: redouane lguensat
"""
from keras.layers import Input, Dense
from keras.models import Model
input_img = Input(shape=(784,))
encoded = Dense(32, activation='relu')(input_img)
#encoded = Dense(64, activation='relu')(encoded)
#encoded = Dense(32, activation='relu')(encoded)
#decoded = Dense(64, activation='relu')(encoded)
#decoded = Dense(128, activation='relu')(encoded) #!!!!!!
decoded = Dense(784, activation='sigmoid')(encoded)#!!!!!!
# this model maps an input to its reconstruction
autoencoder = Model(input=input_img, output=decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print x_train.shape
print x_test.shape
autoencoder.fit(x_train, x_train,
nb_epoch=100,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
############## create image with artificial missing data ############
nbTest=10
patchsize=28
Testimage=np.zeros((nbTest,patchsize*patchsize));
GroundTruth=np.zeros((nbTest,patchsize*patchsize))
OBSvec=np.zeros((nbTest,patchsize*patchsize))
for i in range(nbTest):
Testimage[i]=x_test[i,:].copy()
GroundTruth[i]=x_test[i,:].copy()
missRate=0.5
missInd=np.nonzero(np.random.choice([0, 1], size=(Testimage.shape[1]), p=[1-missRate, missRate]))
Testimage[i,missInd[0]]=float('nan')
OBSvec[i]=Testimage[i].copy()
######################### DINAE
ImageReconstructedResult=np.zeros((nbTest,patchsize*patchsize));
rmseFinal=[]
for ii in range(nbTest):
rmseVec=[];
indMissingTest=np.where(np.isnan(Testimage[ii]))[0]
Testimage[ii,indMissingTest]=0 #np.nanmean(Testimage); #or simply 0
# iter 1
iteration=1
tempmax=np.amax(Testimage[ii])
Testimage[ii]=Testimage[ii]/tempmax
ImageReconstructed = autoencoder.predict(Testimage[None,ii])
Testimage[ii,indMissingTest]=ImageReconstructed[0,indMissingTest]
from sklearn.metrics import mean_squared_error
from math import sqrt
rmseError1 = sqrt(mean_squared_error(GroundTruth[ii],ImageReconstructed[0]*tempmax))
rmseVec.append(rmseError1)
print rmseVec
# next iterations
rmseError=rmseError1
OldrmseError=rmseError
NewrmseError=0
for j in range(1,100):
OldrmseError=rmseError
ImageReconstructed = autoencoder.predict(Testimage[None,ii])
Testimage[ii,indMissingTest]=ImageReconstructed[0,indMissingTest];
rmseError = sqrt(mean_squared_error(GroundTruth[ii],ImageReconstructed[0]*tempmax))
iteration=iteration+1
NewrmseError=rmseError
rmseVec.append(rmseError)
if NewrmseError < OldrmseError:
ImageReconstructedResult[ii,:]=ImageReconstructed[0].copy();
print j
continue
else:
break
print rmseVec
rmseFinal.append(rmseVec[-1])
print "rmsefinal is %f" %np.mean(rmseFinal)
################" reconstruction
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(3, n, i + 1)
plt.imshow(GroundTruth[i].reshape(28, 28))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display corrupted
ax = plt.subplot(3, n, i + 1 + n)
plt.imshow(OBSvec[i].reshape(28, 28))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(3, n, i + 1 + 2*n)
plt.imshow(ImageReconstructedResult[i].reshape(28, 28))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
##################### HISTORY
## list all data in history
#print(history.history.keys())
## summarize history for loss
#plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
#plt.title('model loss')
#plt.ylabel('loss')
#plt.xlabel('epoch')
#plt.legend(['train', 'test'], loc='upper left')
#plt.show()
| mit | 239,313,385,951,253,300 | 28.574324 | 101 | 0.657528 | false |
thomasquintana/jobber | profile.py | 1 | 2236 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Thomas Quintana <[email protected]>
"""
Simulates the overhead of a system with 10000 actors that do nothing
Each processing 1000 messages and then shutting down.
Run with command:
python -m cProfile -s time profile.py
"""
AMOUNT_PROCESSORS = 10000
AMOUNT_MESSAGES = 1000
import unittest
from mock import create_autospec, Mock
from jobber.constants import (ACTOR_PROCESSOR_COMPLETED, ACTOR_SCHEDULER_RUNNING,
ACTOR_SCHEDULER_STOPPED, ACTOR_SCHEDULER_STOPPING)
from jobber.core.scheduler.shortest_job_next_scheduler import SJNScheduler
from jobber.core.actor.processor import ActorProcessor
from jobber.core.scheduler.actor_heap import ShortestJobNextHeap
from jobber.core.actor.actor import Actor
from jobber.core.messages.poison_pill import PoisonPill
from jobber.core.exceptions.no_messages_exception import NoMessagesException
class MockMessage(object):
pass
def stresstest():
scheduler = SJNScheduler()
mock_actor = create_autospec(Actor())
processors = [ActorProcessor(mock_actor) for _ in range(AMOUNT_PROCESSORS)]
for processor in processors:
for _ in range(AMOUNT_MESSAGES):
processor._receive_message(MockMessage())
for processor in processors:
scheduler.schedule(processor)
scheduler._state = ACTOR_SCHEDULER_RUNNING
scheduler.shutdown()
scheduler._state == ACTOR_SCHEDULER_STOPPED
scheduler.start()
if __name__=='__main__':
stresstest()
| apache-2.0 | 1,259,529,836,510,303,700 | 32.878788 | 81 | 0.76297 | false |
guacamoleo/Tensile | Tensile/ClientWriter.py | 1 | 38398 | ################################################################################
# Copyright (C) 2016 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
from Common import globalParameters, HR, pushWorkingPath, popWorkingPath, print1, CHeader, printWarning
from SolutionStructs import Solution
from SolutionWriter import SolutionWriter
import YAMLIO
import os
from subprocess import Popen
from shutil import copy as shutil_copy
from shutil import rmtree
################################################################################
# Main
################################################################################
def main( config ):
libraryLogicPath = os.path.join(globalParameters["WorkingPath"], \
globalParameters["LibraryLogicPath"])
pushWorkingPath(globalParameters["LibraryClientPath"])
##############################################################################
# Copy Source Files
##############################################################################
pushWorkingPath("source")
filesToCopy = [
"Client.cpp",
"Client.h",
"DeviceStats.h",
"ReferenceCPU.h",
"MathTemplates.cpp",
"MathTemplates.h",
"KernelHeader.h",
"Tools.h",
"CMakeLists.txt",
"TensileConfig.cmake",
"TensileConfigVersion.cmake"
]
for f in filesToCopy:
shutil_copy(
os.path.join(globalParameters["SourcePath"], f),
globalParameters["WorkingPath"] )
if globalParameters["RuntimeLanguage"] == "OCL":
shutil_copy(
os.path.join(globalParameters["SourcePath"], "FindOpenCL.cmake"),
globalParameters["WorkingPath"] )
else:
shutil_copy(
os.path.join(globalParameters["SourcePath"], "FindHIP.cmake"),
globalParameters["WorkingPath"] )
shutil_copy(
os.path.join(globalParameters["SourcePath"], "FindHCC.cmake"),
globalParameters["WorkingPath"] )
##############################################################################
# Read Logic Files
##############################################################################
logicFiles = [os.path.join(libraryLogicPath, f) for f \
in os.listdir(libraryLogicPath) \
if (os.path.isfile(os.path.join(libraryLogicPath, f)) \
and os.path.splitext(f)[1]==".yaml")]
print1("LogicFiles: %s" % logicFiles)
functions = []
functionNames = []
enableHalf = False
for logicFileName in logicFiles:
(scheduleName, deviceNames, problemType, solutionsForType, \
indexOrder, exactLogic, rangeLogic) \
= YAMLIO.readLibraryLogicForSchedule(logicFileName)
if problemType["DataType"].isHalf():
enableHalf = True
functions.append((scheduleName, problemType))
functionNames.append("tensile_%s" % (problemType))
globalParameters["EnableHalf"] = enableHalf
##############################################################################
# Write Generated Header
##############################################################################
forBenchmark = False
solutions = None
problemSizes = None
stepName = None
writeClientParameters(forBenchmark, solutions, problemSizes, stepName, \
functions)
popWorkingPath() # source
##############################################################################
# Run Build Script
##############################################################################
# if redo=true, clobber the build directory
if globalParameters["ForceRedoLibraryClient"]:
rmtree(os.path.join(globalParameters["WorkingPath"], "build"), \
ignore_errors=True)
pushWorkingPath("build")
# write runScript
path = globalParameters["WorkingPath"]
forBenchmark = False
runScriptName = writeRunScript(path, libraryLogicPath, forBenchmark)
# run runScript
process = Popen(runScriptName, cwd=globalParameters["WorkingPath"])
process.communicate()
if process.returncode:
printWarning("Benchmark Process exited with code %u" % process.returncode)
popWorkingPath() # build
popWorkingPath() # LibraryClient
################################################################################
# Write Run Script
################################################################################
def writeRunScript(path, libraryLogicPath, forBenchmark):
# create run.bat or run.sh which builds and runs
runScriptName = os.path.join(path, \
"run.%s" % ("bat" if os.name == "nt" else "sh") )
runScriptFile = open(runScriptName, "w")
echoLine = "@echo." if os.name == "nt" else "echo"
if os.name != "nt":
runScriptFile.write("#!/bin/sh\n")
q = "" if os.name == "nt" else "\""
runScriptFile.write("%s && echo %s%s%s && echo %s# Configuring CMake for Client%s && echo %s%s%s\n" \
% (echoLine, q, HR, q, q, q, q, HR, q))
runScriptFile.write("cmake")
# runtime and kernel language
runScriptFile.write(" -DTensile_RUNTIME_LANGUAGE=%s" \
% globalParameters["RuntimeLanguage"])
if globalParameters["EnableHalf"]:
runScriptFile.write(" -DTensile_ENABLE_HALF=ON")
if forBenchmark:
# for benchmark client
runScriptFile.write(" -DTensile_CLIENT_BENCHMARK=ON")
else:
# for library client
runScriptFile.write(" -DTensile_ROOT=%s" \
% os.path.join(globalParameters["ScriptPath"], "..") )
runScriptFile.write(" -DTensile_CLIENT_BENCHMARK=OFF")
runScriptFile.write(" -DTensile_LOGIC_PATH=%s" % libraryLogicPath)
runScriptFile.write(" -DTensile_LIBRARY_PRINT_DEBUG=%s" \
% ("ON" if globalParameters["LibraryPrintDebug"] else "OFF"))
runScriptFile.write(" -DTensile_SHORT_FILE_NAMES=%s" \
% ("ON" if globalParameters["ShortNames"] else "OFF"))
if globalParameters["CMakeCXXFlags"]:
runScriptFile.write(" -DCMAKE_CXX_FLAGS=%s" \
% globalParameters["CMakeCXXFlags"] )
if globalParameters["CMakeCFlags"]:
runScriptFile.write(" -DCMAKE_C_FLAGS=%s" \
% globalParameters["CMakeCFlags"] )
# for both
if os.name == "nt":
runScriptFile.write(" -DCMAKE_GENERATOR_PLATFORM=x64")
runScriptFile.write(" -DTensile_MERGE_FILES=%s" \
% ("ON" if globalParameters["MergeFiles"] else "OFF"))
runScriptFile.write(" ../source\n")
runScriptFile.write("%s && echo %s%s%s && echo %s# Building Client%s && echo %s%s%s\n" \
% (echoLine, q, HR, q, q, q, q, HR, q))
runScriptFile.write("cmake --build . --config %s%s\n" \
% (globalParameters["CMakeBuildType"], " -- -j 8" \
if os.name != "nt" else "") )
if forBenchmark:
if os.name == "nt":
runScriptFile.write(os.path.join(globalParameters["CMakeBuildType"], \
"client.exe") )
else:
if globalParameters["PinClocks"] and globalParameters["ROCmSMIPath"]:
runScriptFile.write("%s -d 0 --setfan 255 --setsclk 7\n" % globalParameters["ROCmSMIPath"])
runScriptFile.write("sleep 1\n")
runScriptFile.write("%s -d 0 -a\n" % globalParameters["ROCmSMIPath"])
runScriptFile.write("./client")
clp = ""
clp += " --platform-idx %u" % globalParameters["Platform"]
clp += " --device-idx %u" % globalParameters["Device"]
clp += " --init-alpha %u" % globalParameters["DataInitTypeAlpha"]
clp += " --init-beta %u" % globalParameters["DataInitTypeBeta"]
clp += " --init-c %u" % globalParameters["DataInitTypeC"]
clp += " --init-ab %u" % globalParameters["DataInitTypeAB"]
clp += " --print-valids %u" % globalParameters["ValidationPrintValids"]
clp += " --print-max %u" % globalParameters["ValidationMaxToPrint"]
clp += " --num-benchmarks %u" % globalParameters["NumBenchmarks"]
clp += " --num-elements-to-validate %u" % globalParameters["NumElementsToValidate"]
clp += " --num-enqueues-per-sync %u" % globalParameters["EnqueuesPerSync"]
clp += " --num-syncs-per-benchmark %u" % globalParameters["SyncsPerBenchmark"]
clp += " --use-gpu-timer %u" % globalParameters["KernelTime"]
clp += " --sleep-percent %u" % globalParameters["SleepPercent"]
runScriptFile.write(clp)
runScriptFile.write("\n")
if os.name != "nt":
if globalParameters["PinClocks"] and globalParameters["ROCmSMIPath"]:
runScriptFile.write("%s -d 0 --resetclocks\n" % globalParameters["ROCmSMIPath"])
runScriptFile.write("%s -d 0 --setfan 50\n" % globalParameters["ROCmSMIPath"])
else:
executablePath = os.path.join(globalParameters["WorkingPath"])
if os.name == "nt":
executablePath = os.path.join(executablePath, \
globalParameters["CMakeBuildType"], \
"client.exe")
else:
executablePath = os.path.join(executablePath, "client")
runScriptFile.write("%s && echo %s%s%s && echo %s# Library Client:%s && echo %s# %s%s && %s\n" \
% (echoLine, q, HR, q, q, q, q, executablePath, q, executablePath) )
runScriptFile.close()
if os.name != "nt":
os.chmod(runScriptName, 0777)
return runScriptName
################################################################################
# Write Generated Benchmark Parameters
################################################################################
def writeClientParameters(forBenchmark, solutions, problemSizes, stepName, \
functionList):
h = ""
##############################################################################
# Min Naming
##############################################################################
if forBenchmark:
kernels = []
for solution in solutions:
solutionKernels = solution.getKernels()
for kernel in solutionKernels:
if kernel not in kernels:
kernels.append(kernel)
solutionSerialNaming = Solution.getSerialNaming(solutions)
kernelSerialNaming = Solution.getSerialNaming(kernels)
solutionMinNaming = Solution.getMinNaming(solutions)
kernelMinNaming = Solution.getMinNaming(kernels)
solutionWriter = SolutionWriter( \
solutionMinNaming, solutionSerialNaming, \
kernelMinNaming, kernelSerialNaming)
if forBenchmark:
if globalParameters["MergeFiles"]:
h += "#include \"Solutions.h\"\n"
else:
for solution in solutions:
solutionName = solutionWriter.getSolutionName(solution)
h += "#include \"" + solutionName + ".h\"\n"
h += "\n"
else:
h += "#include \"Tensile.h\"\n"
h += "typedef enum {\n"
h += " enum_float,\n"
h += " enum_double,\n"
h += " enum_TensileComplexFloat,\n"
h += " enum_TensileComplexDouble\n"
h += "#ifdef Tensile_ENABLE_HALF\n"
h += " ,enum_TensileHalf\n"
h += "#endif\n"
h += "} DataTypeEnum;\n"
h += "\n"
h += "const char indexChars[%u] = \"%s" \
% (len(globalParameters["IndexChars"])+1, \
globalParameters["IndexChars"][0])
for i in range(1, len(globalParameters["IndexChars"])):
h += globalParameters["IndexChars"][i]
h += "\";\n"
h += "unsigned int functionIdx;\n"
h += "unsigned int dataTypeIdx;\n"
h += "unsigned int problemTypeIdx;\n"
h += "\n"
##############################################################################
# Problem Types
##############################################################################
#dataTypes = []
#problemTypes = []
#functionSerialToDataTypeAndIdx = []
dataTypes = []
problemTypes = []
problemTypesForDataType = {} # for data type
schedulesForProblemType = {} # for problem type
functionInfo = [] # dataTypeIdx, problemTypeIdx, idxWithinDataType, idxWithinProblemType
if forBenchmark:
problemType = solutions[0]["ProblemType"]
dataType = problemType["DataType"]
dataTypes.append(dataType)
problemTypes.append(problemType)
problemTypesForDataType[dataType] = [problemType]
schedulesForProblemType[problemType] = solutions
numProblemTypes = 1
for solution in solutions:
functionInfo.append([ 0, 0, 0, 0, 0, 0 ])
else:
for functionIdx in range(0, len(functionList)):
function = functionList[functionIdx]
scheduleName = function[0]
problemType = function[1]
dataType = problemType["DataType"]
if dataType not in dataTypes:
dataTypes.append(dataType)
problemTypesForDataType[dataType] = []
if problemType not in problemTypesForDataType[dataType]:
problemTypesForDataType[dataType].append(problemType)
schedulesForProblemType[problemType] = []
schedulesForProblemType[problemType].append(scheduleName)
# sort
dataTypes = sorted(dataTypes)
for dataType in dataTypes:
problemTypesForDataType[dataType] = \
sorted(problemTypesForDataType[dataType])
for problemType in problemTypesForDataType[dataType]:
schedulesForProblemType[problemType] = \
sorted(schedulesForProblemType[problemType])
# assign info
functionIdxSerial = 0
problemTypeIdxSerial = 0
for dataTypeIdxSerial in range(0, len(dataTypes)):
dataType = dataTypes[dataTypeIdxSerial]
functionIdxForDataType = 0
for problemTypeIdxForDataType in range(0, \
len(problemTypesForDataType[dataType])):
problemType = \
problemTypesForDataType[dataType][problemTypeIdxForDataType]
problemTypes.append(problemType)
functionIdxForProblemType = 0
for functionIdxForProblemType in range(0, \
len(schedulesForProblemType[problemType])):
functionInfo.append([ \
dataTypeIdxSerial, \
problemTypeIdxForDataType, \
problemTypeIdxSerial, \
functionIdxSerial,\
functionIdxForDataType,\
functionIdxForProblemType, \
])
functionIdxForProblemType += 1
functionIdxForDataType += 1
functionIdxSerial += 1
problemTypeIdxSerial += 1
numProblemTypes = problemTypeIdxSerial
numFunctions = functionIdxSerial
h += "const unsigned int numFunctions = %u;\n" % numFunctions
##############################################################################
# Data Types
##############################################################################
h += "/* data types */\n"
numDataTypes = len(dataTypes)
h += "const unsigned int numDataTypes = %u;\n" % numDataTypes
h += "const DataTypeEnum dataTypeEnums[numDataTypes] = { enum_%s" \
% dataTypes[0].toCpp()
for dataTypeIdx in range(1, numDataTypes):
h += ", enum_%s" % dataTypes[dataTypeIdx].toCpp();
h += " };\n"
# bytes per elements
h += "const unsigned int bytesPerElement[numDataTypes] = { %u" \
% (dataTypes[0].numBytes())
for dataTypeIdx in range(1, numDataTypes):
dataType = dataTypes[dataTypeIdx]
h += ", %u" % dataType.numBytes()
h += " };\n"
# flops per mac
h += "const unsigned int numFlopsPerMac[numDataTypes] = { %u" \
% (2 if dataTypes[0].isReal() else 8)
for dataTypeIdx in range(1, numDataTypes):
dataType = dataTypes[dataTypeIdx]
h += ", %u" % (2 if dataType.isReal() else 8)
h += " };\n"
for dataTypeIdx in range(0, numDataTypes):
h += "#define Tensile_DATA_TYPE_%s\n" \
% dataTypes[dataTypeIdx].toCpp().upper()
##############################################################################
# Problem Types
##############################################################################
h += "/* problem types */\n"
h += "const unsigned int numProblemTypes = %u;\n" % numProblemTypes
# Num C Indices
h += "const unsigned int numIndicesC[numProblemTypes] = { %u" \
% problemTypes[0]["NumIndicesC"]
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
h += ", %u" % problemType["NumIndicesC"]
h += " };\n"
# Num AB Indices
maxNumIndicesAB = len(problemTypes[0]["IndexAssignmentsA"])
h += "const unsigned int numIndicesAB[numProblemTypes] = { %u" \
% len(problemTypes[0]["IndexAssignmentsA"])
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
numIndicesAB = len(problemType["IndexAssignmentsA"])
h += ", %u" % numIndicesAB
maxNumIndicesAB = max(numIndicesAB, maxNumIndicesAB)
h += " };\n"
h += "const unsigned int maxNumIndicesAB = %u;\n" % maxNumIndicesAB
# Index Assignments A
h += "const unsigned int indexAssignmentsA[numProblemTypes][maxNumIndicesAB] = {\n"
for problemTypeIdx in range(0, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
indices = problemType["IndexAssignmentsA"]
h += " { %u" % indices[0]
for i in range(1, maxNumIndicesAB):
if i < len(indices):
h += ", %u" % indices[i]
else:
h += ", static_cast<unsigned int>(-1)"
if problemTypeIdx < numProblemTypes-1:
h += " },\n"
else:
h += " }\n"
h += "};\n"
# Index Assignments B
h += "const unsigned int indexAssignmentsB[numProblemTypes][maxNumIndicesAB] = {\n"
for problemTypeIdx in range(0, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
indices = problemType["IndexAssignmentsB"]
h += " { %u" % indices[0]
for i in range(1, maxNumIndicesAB):
if i < len(indices):
h += ", %u" % indices[i]
else:
h += ", static_cast<unsigned int>(-1)"
if problemTypeIdx < numProblemTypes-1:
h += " },\n"
else:
h += " }\n"
h += "};\n"
# beta
h += "bool useBeta[numProblemTypes] = { %s" \
% ("true" if problemTypes[0]["UseBeta"] else "false")
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
h += ", %s" % ("true" if problemType["UseBeta"] else "false")
h += " };\n"
# Complex Conjugates
h += "const bool complexConjugateA[numProblemTypes] = { %s" \
% ("true" if problemTypes[0]["ComplexConjugateA"] else "false" )
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
h += ", %s" % ("true" if problemTypes[0]["ComplexConjugateA"] else "false" )
h += " };\n"
h += "const bool complexConjugateB[numProblemTypes] = { %s" \
% ("true" if problemTypes[0]["ComplexConjugateB"] else "false" )
for problemTypeIdx in range(1, numProblemTypes):
problemType = problemTypes[problemTypeIdx]
h += ", %s" % ("true" if problemTypes[0]["ComplexConjugateB"] else "false" )
h += " };\n"
h += "\n"
if not forBenchmark:
h += "// dataTypeIdxSerial, problemTypeIdxForDataType, problemTypeIdxSerial, functionIdxSerial, functionIdxForDataType, functionIdxForProblemType\n"
first = True
h += "const unsigned int functionInfo[numFunctions][6] = {\n"
for info in functionInfo:
h += "%s{ %u, %u, %u, %u, %u, %u }" % (" " if first else ",\n ", \
info[0], info[1], info[2], info[3], info[4], info[5] )
first = False
h += " };\n"
##############################################################################
# Problem Sizes
##############################################################################
maxNumIndices = problemTypes[0]["TotalIndices"]
if not forBenchmark:
for problemType in problemTypes:
maxNumIndices = max(problemType["TotalIndices"], maxNumIndices)
h += "const unsigned int maxNumIndices = %u;\n" % maxNumIndices
h += "const unsigned int totalIndices[numProblemTypes] = { %u" \
% problemTypes[0]["TotalIndices"]
for problemTypeIdx in range(1, numProblemTypes):
h += ", %u" % problemTypes[problemTypeIdx]["TotalIndices"]
h += " };\n"
if forBenchmark:
h += "const unsigned int numProblems = %u;\n" \
% problemSizes.totalProblemSizes
h += "const unsigned int problemSizes[numProblems][%u] = {\n" \
% problemTypes[0]["TotalIndices"]
for i in range(0, problemSizes.totalProblemSizes):
line = " {%5u" %problemSizes.sizes[i][0]
for j in range(1, problemTypes[0]["TotalIndices"]):
line += ",%5u" % problemSizes.sizes[i][j]
line += " }"
h += line
if i < problemSizes.totalProblemSizes-1:
h += ","
else:
h += "};"
h += "\n"
else:
h += "unsigned int userSizes[maxNumIndices];\n"
if forBenchmark:
h += "/* problem sizes */\n"
"""
h += "const bool indexIsSized[maxNumIndices] = {"
for i in range(0, problemSizes.totalIndices):
h += " %s" % ("true" if problemSizes.indexIsSized[i] else "false")
if i < problemSizes.totalIndices-1:
h += ","
h += " };\n"
h += "const unsigned int numIndicesSized = %u;\n" \
% len(problemSizes.indicesSized)
h += "const unsigned int indicesSized[numIndicesSized][4] = {\n"
h += "// { min, stride, stride_incr, max }\n"
for i in range(0, len(problemSizes.indicesSized)):
r = problemSizes.indicesSized[i]
h += " { %u, %u, %u, %u }" % (r[0], r[1], r[2], r[3])
if i < len(problemSizes.indicesSized)-1:
h += ","
h += "\n"
h += " };\n"
numIndicesMapped = len(problemSizes.indicesMapped)
h += "const unsigned int numIndicesMapped = %u;\n" % numIndicesMapped
if numIndicesMapped > 0:
h += "#define Tensile_INDICES_MAPPED 1\n"
h += "const unsigned int indicesMapped[numIndicesMapped] = {"
for i in range(0, numIndicesMapped):
h += " %u" % problemSizes.indicesMapped[i]
if i < numIndicesMapped-1:
h += ","
h += " };\n"
else:
h += "#define Tensile_INDICES_MAPPED 0\n"
"""
##############################################################################
# Max Problem Sizes
##############################################################################
if forBenchmark:
h += "size_t maxSizeC = %u;\n" % (problemSizes.maxC)
h += "size_t maxSizeA = %u;\n" % (problemSizes.maxA)
h += "size_t maxSizeB = %u;\n" % (problemSizes.maxB)
h += "\n"
else:
h += "size_t maxSizeC;\n"
h += "size_t maxSizeA;\n"
h += "size_t maxSizeB;\n"
h += "\n"
##############################################################################
# Current Problem Size
##############################################################################
h += "/* current problem size */\n"
#h += "unsigned int fullSizes[maxNumIndices];\n"
#h += "unsigned int currentSizedIndexSizes[numIndicesSized];\n"
#h += "unsigned int currentSizedIndexIncrements[numIndicesSized];\n"
h += "\n"
##############################################################################
# Solutions
##############################################################################
if forBenchmark:
h += "/* solutions */\n"
# Problem Type Indices
h += "const unsigned int maxNumSolutions = %u;\n" % len(solutions)
h += "float solutionPerf[numProblems][maxNumSolutions]; // milliseconds\n"
h += "\n"
# Solution Ptrs
h += "typedef TensileStatus (*SolutionFunctionPointer)(\n"
argList = solutionWriter.getArgList(solutions[0]["ProblemType"], True, True, True)
for i in range(0, len(argList)):
h += " %s %s%s" % (argList[i][0], argList[i][1], \
",\n" if i < len(argList)-1 else ");\n\n")
h += "const SolutionFunctionPointer solutions[maxNumSolutions] = {\n"
for i in range(0, len(solutions)):
solution = solutions[i]
solutionName = solutionWriter.getSolutionName(solution)
h += " %s" % solutionName
if i < len(solutions)-1:
h += ","
h += "\n"
h += " };\n"
h += "\n"
# Solution Names
h += "const char *solutionNames[maxNumSolutions] = {\n"
for i in range(0, len(solutions)):
solution = solutions[i]
solutionName = solutionWriter.getSolutionName(solution)
h += " \"%s\"" % solutionName
if i < len(solutions)-1:
h += ","
h += "\n"
h += " };\n"
h += "\n"
else:
# Function Names
functionNames = []
for dataType in dataTypes:
for problemType in problemTypesForDataType[dataType]:
for scheduleName in schedulesForProblemType[problemType]:
#functionNames.append("tensile_%s_%s" % (scheduleName, problemType))
functionNames.append("tensile_%s" % (problemType))
h += "const char *functionNames[numFunctions] = {\n"
for functionIdx in range(0, len(functionNames)):
functionName = functionNames[functionIdx]
h += " \"%s\"%s\n" % (functionName, \
"," if functionIdx < len(functionNames)-1 else "" )
h += " };\n"
##############################################################################
# Runtime Structures
##############################################################################
h += "/* runtime structures */\n"
h += "TensileStatus status;\n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += "cl_platform_id platform;\n"
h += "cl_device_id device;\n"
h += "cl_context context;\n"
h += "cl_command_queue stream;\n"
else:
h += "hipStream_t stream;\n"
#h += "int deviceIdx = %u;\n" \
# % (globalParameters["Device"])
h += "\n"
h += "void *deviceC;\n"
h += "void *deviceA;\n"
h += "void *deviceB;\n"
##############################################################################
# Benchmarking and Validation Parameters
##############################################################################
h += "\n/* benchmarking parameters */\n"
#h += "const bool measureKernelTime = %s;\n" \
# % ("true" if globalParameters["KernelTime"] else "false")
#h += "const unsigned int numEnqueuesPerSync = %u;\n" \
# % (globalParameters["EnqueuesPerSync"])
#h += "const unsigned int numSyncsPerBenchmark = %u;\n" \
# % (globalParameters["SyncsPerBenchmark"])
#h += "unsigned int numElementsToValidate = %s;\n" \
# % (str(globalParameters["NumElementsToValidate"]) \
# if globalParameters["NumElementsToValidate"] >= 0 \
# else "0xFFFFFFFF" )
#h += "unsigned int validationMaxToPrint = %u;\n" \
# % globalParameters["ValidationMaxToPrint"]
#h += "bool validationPrintValids = %s;\n" \
# % ("true" if globalParameters["ValidationPrintValids"] else "false")
h += "size_t validationStride;\n"
#h += "unsigned int dataInitTypeC = %s;\n" % globalParameters["DataInitTypeC"]
#h += "unsigned int dataInitTypeAB = %s;\n" % globalParameters["DataInitTypeAB"]
h += "\n"
##############################################################################
# Generated Call to Reference
##############################################################################
h += "/* generated call to reference */\n"
h += "template<typename DataType>\n"
h += "TensileStatus generatedCallToReferenceCPU(\n"
h += " const unsigned int *sizes,\n"
h += " DataType *referenceC,\n"
h += " DataType *initialA,\n"
h += " DataType *initialB,\n"
h += " DataType alpha,\n"
h += " DataType beta) {\n"
h += " return tensileReferenceCPU(\n"
h += " referenceC,\n"
h += " initialA,\n"
h += " initialB,\n"
h += " alpha,\n"
h += " beta,\n"
h += " totalIndices[problemTypeIdx],\n"
h += " sizes,\n"
h += " numIndicesC[problemTypeIdx],\n"
h += " numIndicesAB[problemTypeIdx],\n"
h += " indexAssignmentsA[problemTypeIdx],\n"
h += " indexAssignmentsB[problemTypeIdx],\n"
h += " complexConjugateA[problemTypeIdx],\n"
h += " complexConjugateB[problemTypeIdx],\n"
h += " validationStride );\n"
h += "};\n"
h += "\n"
##############################################################################
# Generated Call to Solution
##############################################################################
if forBenchmark:
problemType = solutions[0]["ProblemType"]
h += "/* generated call to solution */\n"
h += "template<typename DataType>\n"
h += "TensileStatus generatedCallToSolution(\n"
h += " unsigned int solutionIdx,\n"
h += " const unsigned int *sizes,\n"
h += " DataType alpha,\n"
h += " DataType beta, \n"
h += " unsigned int numEvents = 0, \n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " cl_event *event_wait_list = NULL,\n"
h += " cl_event *outputEvent = NULL ) {\n"
else:
h += " hipEvent_t *startEvent = NULL,\n"
h += " hipEvent_t *stopEvent = NULL ) {\n"
h += " // calculate parameters assuming packed data\n"
# strides
indexChars = globalParameters["IndexChars"]
firstStride = 1
if problemType["UseInitialStrides"]:
firstStride = 0
lastStrideC = problemType["NumIndicesC"]
lastStrideA = len(problemType["IndexAssignmentsA"])
lastStrideB = len(problemType["IndexAssignmentsB"])
# calculate strides
for i in range(0,lastStrideC):
h += " unsigned int strideC%u%s = 1" % (i, indexChars[i])
for j in range(0, i):
h += "*sizes[%i]" % j
h += ";\n"
for i in range(0,lastStrideA):
h += " unsigned int strideA%u%s = 1" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for j in range(0, i):
h += "*sizes[%i]" % \
problemType["IndexAssignmentsA"][j]
h += ";\n"
for i in range(0,lastStrideB):
h += " unsigned int strideB%u%s = 1" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for j in range(0, i):
h += "*sizes[%i]" % \
problemType["IndexAssignmentsB"][j]
h += ";\n"
for i in range(0, problemType["TotalIndices"]):
h += " unsigned int size%s = sizes[%u];\n" % (indexChars[i], i)
h += "\n"
# function call
h += " // call solution function\n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " return solutions[solutionIdx]( static_cast<cl_mem>(deviceC), static_cast<cl_mem>(deviceA), static_cast<cl_mem>(deviceB),\n"
else:
typeName = dataTypes[0].toCpp()
h += " return solutions[solutionIdx]( static_cast<%s *>(deviceC), static_cast<%s *>(deviceA), static_cast<%s *>(deviceB),\n" \
% (typeName, typeName, typeName)
h += " alpha,\n"
if problemType["UseBeta"]:
h += " beta,\n"
h += " 0, 0, 0, // offsets\n"
for i in range(firstStride,lastStrideC):
h += " strideC%u%s,\n" % (i, indexChars[i])
for i in range(firstStride,lastStrideA):
h += " strideA%u%s,\n" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for i in range(firstStride,lastStrideB):
h += " strideB%u%s,\n" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for i in range(0, problemType["TotalIndices"]):
h += " size%s,\n" % indexChars[i]
h += " stream,\n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " numEvents, event_wait_list, outputEvent ); // events\n"
else:
h += " numEvents, startEvent, stopEvent); // events\n"
h += "};\n"
h += "\n"
else:
############################################################################
# Generated Call to Function
############################################################################
for enqueue in [True, False]:
functionName = "tensile" if enqueue else "tensileGetSolutionName"
returnName = "TensileStatus" if enqueue else "const char *"
h += "/* generated call to function */\n"
h += "template<typename DataType>\n"
h += "%s generatedCallTo_%s(\n" % (returnName, functionName)
h += " unsigned int *sizes,\n"
h += " DataType alpha,\n"
h += " DataType beta, \n"
h += " unsigned int numEvents = 0, \n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " cl_event *event_wait_list = NULL,\n"
h += " cl_event *outputEvent = NULL );\n\n"
else:
h += " hipEvent_t *startEvent = NULL,\n"
h += " hipEvent_t *stopEvent = NULL );\n\n"
for dataType in dataTypes:
typeName = dataType.toCpp()
functionsForDataType = []
for problemType in problemTypesForDataType[dataType]:
for scheduleName in schedulesForProblemType[problemType]:
functionsForDataType.append([scheduleName, problemType])
h += "template<>\n"
h += "inline %s generatedCallTo_%s<%s>(\n" \
% (returnName, functionName, typeName)
h += " unsigned int *sizes,\n"
h += " %s alpha,\n" % typeName
h += " %s beta,\n" % typeName
h += " unsigned int numEvents, \n"
if globalParameters["RuntimeLanguage"] == "OCL":
h += " cl_event *event_wait_list,\n"
h += " cl_event *outputEvent ) {\n\n"
else:
h += " hipEvent_t *startEvent,\n"
h += " hipEvent_t *stopEvent ) {\n\n"
h += " unsigned int functionIdxForDataType = functionInfo[functionIdx][4];\n"
for functionIdx in range(0, len(functionsForDataType)):
function = functionsForDataType[functionIdx]
scheduleName = function[0]
problemType = function[1]
if len(functionsForDataType)> 1:
if functionIdx == 0:
h += " if (functionIdxForDataType == %u) {\n" % functionIdx
elif functionIdx == len(functionsForDataType)-1:
h += " } else {\n"
else:
h += " } else if (functionIdxForDataType == %u) {\n" \
% functionIdx
# strides
indexChars = globalParameters["IndexChars"]
firstStride = 1
if problemType["UseInitialStrides"]:
firstStride = 0
lastStrideC = problemType["NumIndicesC"]
lastStrideA = len(problemType["IndexAssignmentsA"])
lastStrideB = len(problemType["IndexAssignmentsB"])
# calculate strides
for i in range(0,lastStrideC):
h += " unsigned int strideC%u%s = 1" % (i, indexChars[i])
for j in range(0, i):
h += "*sizes[%i]" % j
h += ";\n"
for i in range(0,lastStrideA):
h += " unsigned int strideA%u%s = 1" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for j in range(0, i):
h += "*sizes[%i]" % \
problemType["IndexAssignmentsA"][j]
h += ";\n"
for i in range(0,lastStrideB):
h += " unsigned int strideB%u%s = 1" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for j in range(0, i):
h += "*sizes[%i]" % \
problemType["IndexAssignmentsB"][j]
h += ";\n"
for i in range(0, problemType["TotalIndices"]):
h += " unsigned int size%s = sizes[%u];\n" % (indexChars[i], i)
# function call
h += " // call solution function\n"
h += " return %s_%s(\n" % (functionName, problemType)
if enqueue:
if globalParameters["RuntimeLanguage"] == "OCL":
h += " static_cast<cl_mem>(deviceC),\n"
h += " static_cast<cl_mem>(deviceA),\n"
h += " static_cast<cl_mem>(deviceB),\n"
else:
h += " static_cast<%s *>(deviceC),\n" % typeName
h += " static_cast<%s *>(deviceA),\n" % typeName
h += " static_cast<%s *>(deviceB),\n" % typeName
h += " alpha,\n"
if problemType["UseBeta"]:
h += " beta,\n"
h += " 0, 0, 0, // offsets\n"
for i in range(firstStride,lastStrideC):
h += " strideC%u%s,\n" % (i, indexChars[i])
for i in range(firstStride,lastStrideA):
h += " strideA%u%s,\n" % (i, \
indexChars[problemType["IndexAssignmentsA"][i]])
for i in range(firstStride,lastStrideB):
h += " strideB%u%s,\n" % (i, \
indexChars[problemType["IndexAssignmentsB"][i]])
for i in range(0, problemType["TotalIndices"]):
h += " size%s,\n" % indexChars[i]
h += " stream"
if enqueue:
if globalParameters["RuntimeLanguage"] == "OCL":
h += ",\n numEvents, event_wait_list, outputEvent"
else:
h += ",\n numEvents, startEvent, stopEvent"
h += ");\n"
if len(functionsForDataType) > 1:
h += " }\n" # close last if
h += "};\n" # close callToFunction
##############################################################################
# Results File Name
##############################################################################
if forBenchmark:
h += "/* results file name */\n"
resultsFileName = os.path.join(globalParameters["WorkingPath"], \
"../../Data","%s.csv" % stepName)
resultsFileName = resultsFileName.replace("\\", "\\\\")
h += "const char *resultsFileName = \"%s\";\n" % resultsFileName
##############################################################################
# Write File
##############################################################################
clientParametersFile = open(os.path.join(globalParameters["WorkingPath"], \
"ClientParameters.h"), "w")
clientParametersFile.write(CHeader)
clientParametersFile.write(h)
clientParametersFile.close()
| mit | 2,385,873,656,909,588,000 | 40.421791 | 152 | 0.550992 | false |
nitsas/codejamsolutions | Senate Evacuation/test_runme.py | 1 | 2184 | #!/usr/bin/env python3
"""
Unit Tests for the Senate Evacuation problem
for Google Code Jam 2016
Round 1C
Link to problem description:
https://code.google.com/codejam/contest/4314486/dashboard#s=p0
Author:
Chris Nitsas
(nitsas)
Language:
Python 3(.4)
Date:
May, 2016
Usage:
python3 test_runme.py
"""
import io
import os
import sys
import unittest
# modules I've written:
import runme
def are_extra_samples_present():
return os.path.isfile('extra_sample.in') and os.path.isfile('extra_sample.out')
def contents_of(output_file):
with open(output_file, 'r', encoding='utf-8') as f:
return f.read()
def output_of_runme_on(input_file):
# call runme.main and get its output into from_main
with io.StringIO() as target_output_stream:
# redirect stdout to an io.StringIO object to run main
sys.stdout, old_stdout = target_output_stream, sys.stdout
runme.main(input_file)
from_main = target_output_stream.getvalue()
# get original stdout back
sys.stdout = old_stdout
return from_main
class TestRunme(unittest.TestCase):
"""
Simple tests for the Senate Evacuation problem
for Google Code Jam 2016
Round 1C
"""
# define if needed
# def setUp(self):
# pass
#
# define if needed
# def tearDown(self):
# pass
#
# def test_something(self):
# # use self.assertEqual(), self.assertTrue() or self.assertRaises()
# pass
#
def test_main_on_sample_in(self):
input_file, output_file = 'sample.in', 'sample.out'
# compare runme.main's results with sample.out's contents
self.assertEqual(output_of_runme_on(input_file),
contents_of(output_file))
@unittest.skipIf(not are_extra_samples_present(), 'no extra samples')
def test_main_on_extra_sample_in(self):
input_file, output_file = 'extra_sample.in', 'extra_sample.out'
# compare runme.main's results with extra_sample.out's contents
self.assertEqual(output_of_runme_on(input_file),
contents_of(output_file))
if __name__ == '__main__':
unittest.main()
| mit | 5,893,676,638,818,437,000 | 24.103448 | 83 | 0.642857 | false |
eayunstack/python-neutronclient | neutronclient/neutron/v2_0/network.py | 1 | 8760 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
from neutronclient._i18n import _
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.neutron.v2_0 import availability_zone
from neutronclient.neutron.v2_0 import dns
from neutronclient.neutron.v2_0.qos import policy as qos_policy
def _format_subnets(network):
try:
return '\n'.join([' '.join([s['id'], s.get('cidr', '')])
for s in network['subnets']])
except (TypeError, KeyError):
return ''
class ListNetwork(neutronV20.ListCommand):
"""List networks that belong to a given tenant."""
# Length of a query filter on subnet id
# id=<uuid>& (with len(uuid)=36)
subnet_id_filter_len = 40
resource = 'network'
_formatters = {'subnets': _format_subnets, }
list_columns = ['id', 'name', 'subnets']
pagination_support = True
sorting_support = True
filter_attrs = [
'tenant_id',
'name',
'admin_state_up',
{'name': 'status',
'help': _("Filter %s according to their operation status."
"(For example: ACTIVE, ERROR etc)"),
'boolean': False,
'argparse_kwargs': {'type': utils.convert_to_uppercase}},
{'name': 'shared',
'help': _('Filter and list the networks which are shared.'),
'boolean': True},
{'name': 'router:external',
'help': _('Filter and list the networks which are external.'),
'boolean': True},
{'name': 'tags',
'help': _("Filter and list %s which has all given tags. "
"Multiple tags can be set like --tags <tag[,tag...]>"),
'boolean': False,
'argparse_kwargs': {'metavar': 'TAG'}},
{'name': 'tags_any',
'help': _("Filter and list %s which has any given tags. "
"Multiple tags can be set like --tags-any <tag[,tag...]>"),
'boolean': False,
'argparse_kwargs': {'metavar': 'TAG'}},
{'name': 'not_tags',
'help': _("Filter and list %s which does not have all given tags. "
"Multiple tags can be set like --not-tags <tag[,tag...]>"),
'boolean': False,
'argparse_kwargs': {'metavar': 'TAG'}},
{'name': 'not_tags_any',
'help': _("Filter and list %s which does not have any given tags. "
"Multiple tags can be set like --not-tags-any "
"<tag[,tag...]>"),
'boolean': False,
'argparse_kwargs': {'metavar': 'TAG'}},
]
def extend_list(self, data, parsed_args):
"""Add subnet information to a network list."""
neutron_client = self.get_client()
search_opts = {'fields': ['id', 'cidr']}
if self.pagination_support:
page_size = parsed_args.page_size
if page_size:
search_opts.update({'limit': page_size})
subnet_ids = []
for n in data:
if 'subnets' in n:
subnet_ids.extend(n['subnets'])
def _get_subnet_list(sub_ids):
search_opts['id'] = sub_ids
return neutron_client.list_subnets(
**search_opts).get('subnets', [])
try:
subnets = _get_subnet_list(subnet_ids)
except exceptions.RequestURITooLong as uri_len_exc:
# The URI is too long because of too many subnet_id filters
# Use the excess attribute of the exception to know how many
# subnet_id filters can be inserted into a single request
subnet_count = len(subnet_ids)
max_size = ((self.subnet_id_filter_len * subnet_count) -
uri_len_exc.excess)
chunk_size = max_size // self.subnet_id_filter_len
subnets = []
for i in range(0, subnet_count, chunk_size):
subnets.extend(
_get_subnet_list(subnet_ids[i: i + chunk_size]))
subnet_dict = dict([(s['id'], s) for s in subnets])
for n in data:
if 'subnets' in n:
n['subnets'] = [(subnet_dict.get(s) or {"id": s})
for s in n['subnets']]
class ListExternalNetwork(ListNetwork):
"""List external networks that belong to a given tenant."""
pagination_support = True
sorting_support = True
def retrieve_list(self, parsed_args):
external = '--router:external=True'
if external not in self.values_specs:
self.values_specs.append('--router:external=True')
return super(ListExternalNetwork, self).retrieve_list(parsed_args)
class ShowNetwork(neutronV20.ShowCommand):
"""Show information of a given network."""
resource = 'network'
class CreateNetwork(neutronV20.CreateCommand, qos_policy.CreateQosPolicyMixin):
"""Create a network for a given tenant."""
resource = 'network'
def add_known_arguments(self, parser):
parser.add_argument(
'--admin-state-down',
dest='admin_state', action='store_false',
help=_('Set admin state up to false.'))
parser.add_argument(
'--admin_state_down',
dest='admin_state', action='store_false',
help=argparse.SUPPRESS)
parser.add_argument(
'--shared',
action='store_true',
help=_('Set the network as shared.'),
default=argparse.SUPPRESS)
parser.add_argument(
'--provider:network_type',
metavar='<network_type>',
help=_('The physical mechanism by which the virtual network'
' is implemented.'))
parser.add_argument(
'--provider:physical_network',
metavar='<physical_network_name>',
help=_('Name of the physical network over which the virtual '
'network is implemented.'))
parser.add_argument(
'--provider:segmentation_id',
metavar='<segmentation_id>',
help=_('VLAN ID for VLAN networks or tunnel-id for GRE/VXLAN '
'networks.'))
utils.add_boolean_argument(
parser,
'--vlan-transparent',
default=argparse.SUPPRESS,
help=_('Create a VLAN transparent network.'))
parser.add_argument(
'name', metavar='NAME',
help=_('Name of the network to be created.'))
parser.add_argument(
'--description',
help=_('Description of network.'))
self.add_arguments_qos_policy(parser)
availability_zone.add_az_hint_argument(parser, self.resource)
dns.add_dns_argument_create(parser, self.resource, 'domain')
def args2body(self, parsed_args):
body = {'name': parsed_args.name,
'admin_state_up': parsed_args.admin_state}
neutronV20.update_dict(parsed_args, body,
['shared', 'tenant_id',
'vlan_transparent',
'provider:network_type',
'provider:physical_network',
'provider:segmentation_id',
'description'])
self.args2body_qos_policy(parsed_args, body)
availability_zone.args2body_az_hint(parsed_args, body)
dns.args2body_dns_create(parsed_args, body, 'domain')
return {'network': body}
class DeleteNetwork(neutronV20.DeleteCommand):
"""Delete a given network."""
resource = 'network'
class UpdateNetwork(neutronV20.UpdateCommand, qos_policy.UpdateQosPolicyMixin):
"""Update network's information."""
resource = 'network'
def add_known_arguments(self, parser):
self.add_arguments_qos_policy(parser)
dns.add_dns_argument_update(parser, self.resource, 'domain')
def args2body(self, parsed_args):
body = {}
self.args2body_qos_policy(parsed_args, body)
dns.args2body_dns_update(parsed_args, body, 'domain')
return {'network': body}
| apache-2.0 | 5,446,808,212,747,097,000 | 36.758621 | 79 | 0.573973 | false |
ffalcinelli/wstunnel | setup.py | 1 | 4807 | #!/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013 Fabio Falcinelli
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from setuptools import setup, find_packages
import wstunnel
__author__ = 'fabio'
kwargs = dict(name='wstunnel',
version='0.0.6',
description='A Python WebSocket Tunnel',
author='Fabio Falcinelli',
author_email='[email protected]',
url='https://github.com/ffalcinelli/wstunnel',
keywords=['tunneling', 'websocket', 'ssl'],
packages=find_packages(),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
setup_requires=['nose'],
test_suite='nose.collector')
kwargs["download_url"] = 'https://github.com/ffalcinelli/wstunnel/tarball/{0}'.format(kwargs.get("version"))
install_requires = ["PyYAML>=3.10",
"tornado>=3.0.2",
"nose>=1.3.0",
"mock>=1.0.1"]
if not sys.platform.startswith("win"):
kwargs["install_requires"] = install_requires
kwargs["entry_points"] = {
"console_scripts": [
"wstuncltd = wstunnel.daemon.wstuncltd:main",
"wstunsrvd = wstunnel.daemon.wstunsrvd:main",
]
}
else:
install_requires.extend(["pywin32>=218",
"py2exe>=0.6.9", ])
if "py2exe" in sys.argv:
if wstunnel.PY2:
from wstunnel.svc import wstunsrvd, wstuncltd
import py2exe
class Target:
def __init__(self, **kw):
self.__dict__.update(kw)
# for the versioninfo resources
self.version = kwargs["version"]
self.company_name = "N.A."
self.copyright = "Copyright (c) 2014 Fabio Falcinelli"
self.name = kwargs["name"]
tunclt_svc = Target(
# used for the versioninfo resource
description=wstuncltd.wstuncltd._svc_description_,
# what to build. For a service, the module name (not the
# filename) must be specified!
modules=["wstunnel.svc.wstuncltd"],
cmdline_style='pywin32',
)
tunsrv_svc = Target(
# used for the versioninfo resource
description=wstunsrvd.wstunsrvd._svc_description_,
# what to build. For a service, the module name (not the
# filename) must be specified!
modules=["wstunnel.svc.wstunsrvd"],
cmdline_style='pywin32',
)
kwargs["service"] = [tunclt_svc, tunsrv_svc]
kwargs["options"] = {
"py2exe": {
"compressed": 1,
"optimize": 2,
}
}
else:
sys.stderr.write("Warning: you're using python {0}.{1}.{2} "
"which is not supported yet by py2exe.\n".format(sys.version_info[0],
sys.version_info[1],
sys.version_info[2]))
sys.exit(-1)
else:
kwargs["entry_points"] = {
"console_scripts": [
"wstuncltd = wstunnel.svc.wstuncltd:main",
"wstunsrvd = wstunnel.svc.wstunsrvd:main",
]
}
setup(**kwargs) | lgpl-3.0 | 5,581,825,389,117,905,000 | 38.735537 | 108 | 0.524027 | false |
garyd203/flying-circus | src/flyingcircus/_raw/budgets.py | 1 | 1096 | """Raw representations of every data type in the AWS Budgets service.
See Also:
`AWS developer guide for Budgets
<https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/>`_
This file is automatically generated, and should not be directly edited.
"""
from attr import attrib
from attr import attrs
from ..core import ATTRSCONFIG
from ..core import Resource
from ..core import ResourceProperties
from ..core import create_object_converter
__all__ = ["Budget", "BudgetProperties"]
@attrs(**ATTRSCONFIG)
class BudgetProperties(ResourceProperties):
Budget = attrib(default=None)
NotificationsWithSubscribers = attrib(default=None)
@attrs(**ATTRSCONFIG)
class Budget(Resource):
"""A Budget for Budgets.
See Also:
`AWS Cloud Formation documentation for Budget
<http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-budgets-budget.html>`_
"""
RESOURCE_TYPE = "AWS::Budgets::Budget"
Properties: BudgetProperties = attrib(
factory=BudgetProperties, converter=create_object_converter(BudgetProperties)
)
| lgpl-3.0 | -4,699,627,189,435,388,000 | 26.4 | 106 | 0.737226 | false |
py-eww/eww | eww/command.py | 1 | 15282 | # -*- coding: utf-8 -*-
"""
eww.command
~~~~~~~~~~~
This is our custom command module. It is a subclass of
:py:class:`cmd.Cmd`. The most significant change is using classes rather
than functions for the commands.
Due to this change, we don't use CamelCase for command class names here.
Strictly that's ok via PEP8 since we are kinda treating these like
callables. Just a heads up.
"""
# PyLint picks up a lot of things here that it shouldn't. We clean up here.
# pylint: disable=too-few-public-methods, no-self-use, invalid-name
# pylint: disable=too-many-public-methods, redefined-outer-name
# pylint: disable=maybe-no-member, no-member, star-args, bad-builtin
import cmd
import code
import logging
from math import ceil
import os
import shlex
from StringIO import StringIO
import sys
import __builtin__
try:
import pygal
except ImportError: # pragma: no cover
# Just in case pygal isn't installed
pass
from .parser import Parser, ParserError, Opt
from .quitterproxy import safe_quit
from .shared import COUNTER_STORE, GRAPH_STORE
LOGGER = logging.getLogger(__name__)
class Command(cmd.Cmd):
"""Our cmd subclass where we implement all console functionality."""
class BaseCmd(object):
"""The base class for all commands."""
# You should define the following properties on all subclasses
name = 'Undefined'
description = 'Undefined'
usage = 'Undefined'
options = []
def run(self, line):
"""Performs the requested command. You should definitely override
this.
Args:
line (str): A command line argument to be parsed.
Returns:
bool: True to exit, None otherwise.
"""
pass
class EOF_command(BaseCmd):
"""Implements support for EOF being interpreted as an exit request."""
name = 'EOF'
description = 'An EOF will trigger this command and exit the console.'
usage = 'N/A'
def run(self, line):
"""Returns True to trigger an exit.
Args:
line (str): A command line argument to be parsed.
Returns:
bool: True
"""
return True
class exit_command(BaseCmd):
"""Implements support for the 'exit' command to leave the console."""
name = 'exit'
description = 'Exits the console. (same as quit)'
usage = 'exit'
def run(self, line):
"""Returns True to trigger an exit.
Args:
line (str): A command line argument to be parsed.
Returns:
bool: True
"""
return True
class quit_command(BaseCmd):
"""Implements support for the 'quit' command to leave the console."""
name = 'quit'
description = 'Quits the console. (same as exit)'
usage = 'quit'
def run(self, line):
"""Returns True to trigger an exit.
Args:
line (str): A command line argument to be parsed.
Returns:
bool: True
"""
return True
class repl_command(BaseCmd):
"""Drops the user into a python REPL."""
name = 'repl'
description = 'Provides an interactive REPL.'
usage = 'repl'
def register_quit(self):
"""Registers our custom quit function to prevent stdin from being
closed.
Returns:
None
"""
__builtin__.quit.register(safe_quit)
__builtin__.exit.register(safe_quit)
def unregister_quit(self):
"""Unregisters our custom quit function.
Returns:
None
"""
__builtin__.quit.unregister()
__builtin__.exit.unregister()
def run(self, line):
"""Implements the repl.
Args:
line (str): A command line argument to be parsed.
Returns:
None
"""
print 'Dropping to REPL...'
repl = code.InteractiveConsole()
try:
self.register_quit()
banner = 'Python ' + sys.version + ' on ' + sys.platform + '\n'
banner += 'Note: This interpreter is running *inside* of your '
banner += 'application. Be careful.'
repl.interact(banner)
except SystemExit:
# This catches the exit or quit from the REPL.
pass
finally:
self.unregister_quit()
print "Exiting REPL..."
class stats_command(BaseCmd):
"""A command for inspecting stats and generating graphs."""
name = 'stats'
description = 'Outputs recorded stats and generates graphs.'
usage = 'stats [args] [stat_name]'
# Declare options
options = []
options.append(Opt('-g', '--graph',
dest='graph',
default=False,
action='store_true',
help='Create graph'))
options.append(Opt('-f', '--file',
dest='file',
default=False,
action='store',
type='string',
help='Filename to use when saving graph'))
options.append(Opt('-t', '--title',
dest='title',
default=False,
action='store',
type='string',
help='Graph title'))
def __init__(self):
"""Init."""
super(Command.stats_command, self).__init__()
self.parser = Parser()
self.parser.add_options(self.options)
# Pygal won't support more than this currently
self.max_points = 30
def display_stat_summary(self):
"""Prints a summary of collected stats.
Returns:
None
"""
if not COUNTER_STORE and not GRAPH_STORE:
print "No stats recorded."
return
if COUNTER_STORE:
print "Counters:"
for stat in COUNTER_STORE:
print " ", stat + ':' + str(COUNTER_STORE[stat])
if GRAPH_STORE:
print "Graphs:"
for stat in GRAPH_STORE:
print " ", stat + ':' + str(len(GRAPH_STORE[stat]))
def display_single_stat(self, stat_name):
"""Prints a specific stat.
Args:
stat_name (str): The stat name to display details of.
Returns:
None
"""
if stat_name in COUNTER_STORE:
print COUNTER_STORE[stat_name]
return
if stat_name in GRAPH_STORE:
print list(GRAPH_STORE[stat_name])
return
else:
print 'No stat recorded with that name.'
def reduce_data(self, data):
"""Shrinks len(data) to ``self.max_points``.
Args:
data (iterable): An iterable greater than ``self.max_points``.
Returns:
list: A list with a fair sampling of objects from ``data``,
and a length of ``self.max_points.``
"""
# Thanks to Adam Forsyth for this implementation
shrunk = []
size = float(len(data))
for num in range(self.max_points):
shrunk.append(data[int(ceil(num * size / self.max_points))])
return shrunk
def generate_graph(self, options, stat_name):
"""Generate a graph of ``stat_name``.
Args:
options (dict): A dictionary of option values generated from
our parser.
stat_name (str): A graph name to create a graph from.
Returns:
None
"""
if stat_name not in GRAPH_STORE:
print 'No graph records exist for name', stat_name
return
if 'pygal' not in sys.modules: # pragma: no cover
print 'Pygal library unavailable. Try running `pip install',
print 'pygal`.'
return
data = list(GRAPH_STORE[stat_name])
graph = pygal.Line()
if options['title']:
graph.title = options['title']
else:
graph.title = stat_name
if len(data) > self.max_points:
data = self.reduce_data(data)
x_labels, y_labels = zip(*data)
graph.x_labels = map(str, x_labels)
graph.add(stat_name, y_labels)
graph_svg = graph.render()
filename = options['file'] or stat_name
filename += '.svg'
try:
with open(filename, 'w') as svg_file:
svg_file.write(graph_svg)
print 'Chart written to', filename # pragma: no cover
except IOError:
print 'Unable to write to', os.getcwd() + '/' + filename
def run(self, line):
"""Outputs recorded stats and generates graphs.
Args:
line (str): A command line argument to be parsed.
Returns:
None
"""
if not line:
self.display_stat_summary()
return
try:
options, remainder = self.parser.parse_args(shlex.split(line))
except ParserError as error_msg:
print error_msg
return
options = vars(options)
if not remainder:
# User entered something goofy
help_cmd = Command.help_command()
help_cmd.display_command_detail('stats')
return
if options['graph']:
self.generate_graph(options, remainder[0])
return
else:
self.display_single_stat(remainder[0])
return
class help_command(BaseCmd):
"""When called with no arguments, this presents a friendly help page.
When called with an argument, it presents command specific help.
"""
name = 'help'
description = 'help provides in-console documentation.'
usage = 'help [command]'
# Declare options
options = []
def __init__(self):
"""Init."""
super(Command.help_command, self).__init__()
self.parser = Parser()
self.parser.add_options(self.options)
def get_commands(self):
"""Returns a list of command classes.
Returns:
list: A list of command classes (not instantiated).
"""
commands = []
blacklist = ['EOF_command']
# First we get a list of all command names
all_names = dir(Command)
# Then find on-topic names
for name in all_names:
if name.endswith('_command') and name not in blacklist:
# Pull names and descriptions
cls = getattr(Command, name)
commands.append(cls)
return commands
def display_commands(self):
"""Displays all included commands.
Returns:
None
"""
commands = self.get_commands()
print 'Available Commands:'
print ''
for command in commands:
print ' ', command.name, '-', command.description
print ''
print 'For more info on a specific command, enter "help <command>"'
def display_command_detail(self, command_name):
"""Displays detailed command help.
Args:
command_name (str): A command name to print detailed help for.
Returns:
None
"""
name = command_name + '_command'
try:
cls = getattr(Command, name)
except AttributeError:
print command_name, 'is not a valid command.'
return
print 'Usage:'
print ' ', cls.usage
print ''
print 'Description:'
print ' ', cls.description
if not cls.options:
# All done
return
else:
print ''
# There are a lot of edge cases around pretty printing options.
# This is not elegant, but it's the least brittle option.
output = StringIO()
parser = Parser()
parser.add_options(cls.options)
parser.print_help(file=output)
output = output.getvalue()
# Massage output
output = output.split('\n')
# Remove trailing newline
output = output[:-1]
# Print everything after Options
start = output.index('Options:')
for line in output[start:]:
print line
def run(self, line):
"""Provides help documentation.
Args:
line (str): A command line argument to be parsed.
Returns:
None
"""
if not line:
self.display_commands()
return
try:
options, remainder = self.parser.parse_args(shlex.split(line))
del options # To shutup pylint
except ParserError as error_msg:
print error_msg
return
self.display_command_detail(remainder[0])
def onecmd(self, line):
"""We override cmd.Cmd.onecmd in order to support our class-based
commands. Changes are noted via comments.
Args:
line (str): A command (with arguments) to be executed.
Returns:
bool: True if a command is designed to exit, otherwise None.
"""
cmd, arg, line = self.parseline(line)
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if line == 'EOF':
self.lastcmd = ''
if cmd == '':
return self.default(line)
else:
try:
# Changes start
cmd_class = getattr(Command, cmd + '_command')
cmd_class = cmd_class()
# Changes end
except AttributeError:
return self.default(line)
# Changes start
return cmd_class.run(arg)
# Changes end
def default(self, line):
"""The first responder when a command is unrecognized."""
print 'Command unrecognized.'
| mit | 357,748,505,772,380,400 | 28.501931 | 79 | 0.499018 | false |
alessio/dokku | contrib/dokku-installer.py | 1 | 7718 | #!/usr/bin/env python2.7
import cgi
import json
import os
import SimpleHTTPServer
import SocketServer
import subprocess
import sys
import threading
VERSION = 'v0.4.11'
hostname = ''
try:
command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'"
hostname = subprocess.check_output(command, shell=True)
if ':' in hostname:
hostname = ''
except subprocess.CalledProcessError:
pass
key_file = os.getenv('KEY_FILE', '/root/.ssh/authorized_keys')
admin_keys = []
if os.path.isfile(key_file):
try:
command = "cat {0}".format(key_file)
admin_keys = subprocess.check_output(command, shell=True).strip().split("\n")
except subprocess.CalledProcessError:
pass
def check_boot():
if 'onboot' not in sys.argv:
return
init_dir = os.getenv('INIT_DIR', '/etc/init')
systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system')
nginx_dir = os.getenv('NGINX_DIR', '/etc/nginx/conf.d')
if os.path.exists(init_dir):
with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f:
f.write("start on runlevel [2345]\n")
f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__)))
if os.path.exists(systemd_dir):
with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f:
f.write("[Unit]\n")
f.write("Description=Dokku web-installer\n")
f.write("\n")
f.write("[Service]\n")
f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__)))
f.write("\n")
f.write("[Install]\n")
f.write("WantedBy=multi-user.target\n")
f.write("WantedBy=graphical.target\n")
if os.path.exists(nginx_dir):
with open('{0}/dokku-installer.conf'.format(nginx_dir), 'w') as f:
f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n")
f.write("server {\n")
f.write(" listen 80;\n")
f.write(" location / {\n")
f.write(" proxy_pass http://dokku-installer;\n")
f.write(" }\n")
f.write("}\n")
subprocess.call('rm -f /etc/nginx/sites-enabled/*', shell=True)
sys.exit(0)
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
content = PAGE.replace('{VERSION}', VERSION)
content = content.replace('{HOSTNAME}', hostname)
content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys))
self.send_response(200)
self.end_headers()
self.wfile.write(content)
def do_POST(self):
if self.path not in ['/setup', '/setup/']:
return
params = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku')
if 'vhost' in params and params['vhost'].value == 'true':
with open('{0}/VHOST'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
else:
try:
os.remove('{0}/VHOST'.format(dokku_root))
except OSError:
pass
with open('{0}/HOSTNAME'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
command = ['sshcommand', 'acl-add', 'dokku', 'admin']
for key in params['keys'].value.split("\n"):
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
proc.stdin.write(key)
proc.stdin.close()
proc.wait()
if 'selfdestruct' in sys.argv:
DeleteInstallerThread()
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps({'status': 'ok'}))
class DeleteInstallerThread(object):
def __init__(self, interval=1):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
command = "rm /etc/nginx/conf.d/dokku-installer.conf && /etc/init.d/nginx stop && /etc/init.d/nginx start"
try:
subprocess.call(command, shell=True)
except:
pass
command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && stop dokku-installer"
try:
subprocess.call(command, shell=True)
except:
pass
def main():
check_boot()
port = int(os.getenv('PORT', 2000))
httpd = SocketServer.TCPServer(("", port), GetHandler)
print "Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port)
httpd.serve_forever()
PAGE = """
<html>
<head>
<title>Dokku Setup</title>
<link rel="stylesheet" href="//netdna.bootstrapcdn.com/bootstrap/3.0.0/css/bootstrap.min.css" />
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
</head>
<body>
<div class="container" style="width: 640px;">
<form id="form" role="form">
<h1>Dokku Setup <small>{VERSION}</small></h1>
<div class="form-group">
<h3><small style="text-transform: uppercase;">Admin Access</small></h3>
<label for="key">Public Key</label><br />
<textarea class="form-control" name="keys" rows="7" id="key">{ADMIN_KEYS}</textarea>
</div>
<div class="form-group">
<h3><small style="text-transform: uppercase;">Hostname Configuration</small></h3>
<div class="form-group">
<label for="hostname">Hostname</label>
<input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" />
</div>
<div class="checkbox">
<label><input id="vhost" name="vhost" type="checkbox" value="true"> Use <abbr title="Nginx will be run on port 80 and backend to your apps based on hostname">virtualhost naming</abbr> for apps</label>
</div>
<p>Your app URLs will look like:</p>
<pre id="example">http://hostname:port</pre>
</div>
<button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span style="padding-left: 20px;" id="result"></span>
</form>
</div>
<div id="error-output"></div>
<script>
function setup() {
if ($.trim($("#key").val()) == "") {
alert("Your admin public key cannot be blank.")
return
}
if ($.trim($("#hostname").val()) == "") {
alert("Your hostname cannot be blank.")
return
}
data = $("#form").serialize()
$("input,textarea,button").prop("disabled", true);
$.post('/setup', data)
.done(function() {
$("#result").html("Success!")
window.location.href = "http://progrium.viewdocs.io/dokku/application-deployment";
})
.fail(function(data) {
$("#result").html("Something went wrong...")
$("#error-output").html(data.responseText)
});
}
function update() {
if ($("#vhost").is(":checked") && $("#hostname").val().match(/^(\d{1,3}\.){3}\d{1,3}$/)) {
alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.")
$("#vhost").prop('checked', false);
}
if ($("#vhost").is(':checked')) {
$("#example").html("http://<app-name>."+$("#hostname").val())
} else {
$("#example").html("http://"+$("#hostname").val()+":<app-port>")
}
}
$("#vhost").change(update);
$("#hostname").change(update);
update();
</script>
</body>
</html>
"""
if __name__ == "__main__":
main()
| mit | 5,662,286,861,856,601,000 | 34.081818 | 208 | 0.564006 | false |
delmarrerikaine/dmc-2017 | src/final.py | 1 | 9016 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import gc
from sklearn.preprocessing import MinMaxScaler
from sklearn import linear_model
from func import toCategorical, solveNA, Dummies, solveCategorical, moreFeautures
import os
from IPython import get_ipython
ipython = get_ipython()
#change this manualy
path = '/media/roman/Main/Programing/contest/dmc2017/dmc-2017/'
os.chdir(path)
###############################################################################
### Create Scaler using data from train and class ###
###############################################################################
items = pd.read_csv('data/raw/items.csv',sep='|')
train = pd.read_csv('data/raw/train.csv',sep='|')
train_items=pd.merge(train,items,on='pid')
del train
gc.collect
train_items=toCategorical(train_items)
coef=train_items['competitorPrice'].as_matrix()/train_items['rrp'].as_matrix()
coef=coef[np.logical_not(np.isnan(coef))]
coef_competitorPrice_to_rrp=coef.mean()
items=solveNA(items,train_items,coef_competitorPrice_to_rrp,1)
train_items=solveNA(train_items,train_items,coef_competitorPrice_to_rrp,0)
train_items=Dummies(train_items)
train_items=moreFeautures(train_items)
items = solveCategorical('revenue',train_items,items,1)
items_pred=list(items.columns)
t1=['pid']
for p in items_pred:
if 'revenue' in p:
t1.append(p)
items_pred=t1
train_items=pd.merge(train_items,items[items_pred],on='pid')
ipython.magic("%reset -f out")
predictors=[ 'adFlag',
'genericProduct',
'rrp',
'availability_1',
'availability_2',
'availability_3',
'availability_4',
'unit_CM',
'unit_G',
'unit_KG',
'unit_L',
'unit_M',
'unit_ML',
'unit_P',
'unit_ST',
'salesIndex_40',
'salesIndex_44',
'salesIndex_52',
'salesIndex_53',
'campaignIndex_A',
'campaignIndex_B',
'campaignIndex_C',
'day_of_week',
'discount',
'compDiscount',
'revenue_group_mean',
'revenue_group_count',
'revenue_content_mean',
'revenue_content_count',
'revenue_pharmForm_mean',
'revenue_pharmForm_count',
'revenue_category_mean',
'revenue_category_count',
'revenue_manufacturer_mean',
'revenue_manufacturer_count',
'revenue_group_content_mean',
'revenue_group_content_count',
'revenue_group_pharmForm_mean',
'revenue_group_pharmForm_count',
'revenue_group_category_mean',
'revenue_group_category_count',
'revenue_group_manufacturer_mean',
'revenue_group_manufacturer_count',
'revenue_content_pharmForm_mean',
'revenue_content_pharmForm_count',
'revenue_content_category_mean',
'revenue_content_category_count',
'revenue_content_manufacturer_mean',
'revenue_content_manufacturer_count',
'revenue_pharmForm_category_mean',
'revenue_pharmForm_category_count',
'revenue_pharmForm_manufacturer_mean',
'revenue_pharmForm_manufacturer_count',
'revenue_category_manufacturer_mean',
'revenue_category_manufacturer_count',
'revenue_group_content_pharmForm_mean',
'revenue_group_content_pharmForm_count',
'revenue_group_content_category_mean',
'revenue_group_content_category_count',
'revenue_group_content_manufacturer_mean',
'revenue_group_content_manufacturer_count',
'revenue_group_pharmForm_category_mean',
'revenue_group_pharmForm_category_count',
'revenue_group_pharmForm_manufacturer_mean',
'revenue_group_pharmForm_manufacturer_count',
'revenue_group_category_manufacturer_mean',
'revenue_group_category_manufacturer_count',
'revenue_content_pharmForm_category_mean',
'revenue_content_pharmForm_category_count',
'revenue_content_pharmForm_manufacturer_mean',
'revenue_content_pharmForm_manufacturer_count',
'revenue_content_category_manufacturer_mean',
'revenue_content_category_manufacturer_count',
'revenue_pharmForm_category_manufacturer_mean',
'revenue_pharmForm_category_manufacturer_count']
y_train = train_items['revenue']
x_train = train_items[predictors]
del train_items
gc.collect
ipython.magic("%reset -f in")
items_init = ['pid',
'manufacturer',
'group',
'content',
'unit',
'pharmForm',
'genericProduct',
'salesIndex',
'category',
'campaignIndex',
'rrp']
clas = pd.read_csv('data/raw/class.csv',sep='|')
clas_items=pd.merge(clas,items[items_init],on='pid')
clas_items=toCategorical(clas_items)
clas_items=solveNA(clas_items,clas_items,coef_competitorPrice_to_rrp,2)
clas_items=Dummies(clas_items)
clas_items=moreFeautures(clas_items)
clas_items=pd.merge(clas_items,items[items_pred],on='pid')
x_test = clas_items[predictors]
x_test = x_test.fillna(x_test.mean())
del clas, clas_items
gc.collect
ipython.magic("%reset -f in")
x_train = x_train.append(x_test, ignore_index = True)
del x_test
gc.collect
ipython.magic("%reset -f in")
ipython.magic("%reset -f out")
scaler = MinMaxScaler()
scaler = scaler.fit(x_train)
del items, coef, y_train, x_train
gc.collect
###############################################################################
### Train model Ridge and Lasso ###
###############################################################################
items = pd.read_csv('data/raw/items.csv',sep='|')
train = pd.read_csv('data/raw/train.csv',sep='|')
train_items=pd.merge(train,items,on='pid')
del train
gc.collect
train_items=toCategorical(train_items)
coef=train_items['competitorPrice'].as_matrix()/train_items['rrp'].as_matrix()
coef=coef[np.logical_not(np.isnan(coef))]
coef_competitorPrice_to_rrp=coef.mean()
items=solveNA(items,train_items,coef_competitorPrice_to_rrp,1)
train_items=solveNA(train_items,train_items,coef_competitorPrice_to_rrp,0)
train_items=Dummies(train_items)
train_items=moreFeautures(train_items)
items = solveCategorical('revenue',train_items,items,1)
items_pred=list(items.columns)
t1=['pid']
for p in items_pred:
if 'revenue' in p:
t1.append(p)
items_pred=t1
train_items=pd.merge(train_items,items[items_pred],on='pid')
ipython.magic("%reset -f out")
y_train = train_items['revenue']
x_train = train_items[predictors]
del train_items
gc.collect
ipython.magic("%reset -f in")
x_train=scaler.transform(x_train)
model_ridge = linear_model.Ridge(alpha=6, fit_intercept=True, max_iter=10000)
model_ridge.fit(x_train, y_train)
model_lasso = linear_model.LassoCV(alphas = [1, 0.16, 0.1, 0.001, 0.0005], copy_X = False)
model_lasso.fit(x_train, y_train)
del y_train,x_train
gc.collect
###############################################################################
### Make prediction ###
###############################################################################
items_init = ['pid',
'manufacturer',
'group',
'content',
'unit',
'pharmForm',
'genericProduct',
'salesIndex',
'category',
'campaignIndex',
'rrp']
clas = pd.read_csv('data/raw/class.csv',sep='|')
clas_items=pd.merge(clas,items[items_init],on='pid')
clas_items=toCategorical(clas_items)
clas_items=solveNA(clas_items,clas_items,coef_competitorPrice_to_rrp,2)
clas_items=Dummies(clas_items)
clas_items=moreFeautures(clas_items)
clas_items=pd.merge(clas_items,items[items_pred],on='pid')
submission = pd.DataFrame({
"lineID": clas_items["lineID"],
"revenue": np.zeros(shape=(1210767,))
})
x_test = clas_items[predictors]
del clas_items,clas,items
gc.collect
ipython.magic("%reset -f in")
x_test = x_test.fillna(x_test.mean())
x_test = scaler.transform(x_test)
lasso_preds = model_lasso.predict(x_test)
ridge_preds = model_ridge.predict(x_test)
predictions = pd.DataFrame({"ridge":ridge_preds, "lasso":lasso_preds})
predictions[predictions['ridge']<0]=0
predictions[predictions['lasso']<0]=0
lasso_preds = predictions['lasso']
ridge_preds = predictions['ridge']
com_pred = (lasso_preds + ridge_preds) / 2.0
submission['revenue']=com_pred
submission_sorted=submission.sort_values("lineID");
submission_sorted.to_csv(
"data/Uni_Polytechnic_Lviv_1.csv", index=False, sep='|') | mit | -1,936,272,717,447,411,200 | 32.520446 | 90 | 0.592613 | false |
mareklibra/cockpit | test/verify/storagelib.py | 1 | 15431 | # -*- coding: utf-8 -*-
# This file is part of Cockpit.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
import os
import re
from testlib import *
# A helper for dialog_set_val and dialog_expect
class CheckBoxText:
def __init__(self, val):
self.val = val
class StorageCase(MachineCase):
def setUp(self):
if "atomic" in os.getenv("TEST_OS", ""):
self.skipTest("No storage on Atomic")
super(StorageCase, self).setUp()
self.storagectl_cmd = self.machine.execute("for cmd in storagedctl storagectl udisksctl; do if which $cmd 2>/dev/null; then break; fi; done").strip()
if "udisksctl" in self.storagectl_cmd:
ver = self.machine.execute("busctl --system get-property org.freedesktop.UDisks2 /org/freedesktop/UDisks2/Manager org.freedesktop.UDisks2.Manager Version || true")
else:
ver = self.machine.execute("busctl --system get-property org.storaged.Storaged /org/storaged/Storaged/Manager org.storaged.Storaged.Manager Version || true")
m = re.match('s "(.*)"', ver)
if m:
self.storaged_version = list(map(int, m.group(1).split(".")))
else:
self.storaged_version = [ 0 ]
self.storaged_is_old_udisks = ("udisksctl" in self.storagectl_cmd and self.storaged_version < [2, 6, 0])
if "debian" in self.machine.image or "ubuntu" in self.machine.image:
# Debian's udisks has a patch to use FHS /media directory
self.mount_root = "/media"
else:
self.mount_root = "/run/media"
def inode(self, f):
return self.machine.execute("stat -L '%s' -c %%i" % f)
def retry(self, setup, check, teardown):
def step():
if setup:
setup()
if check():
return True
if teardown:
teardown()
return False
self.browser.wait(step)
# Content
def content_row_tbody(self, index):
return "#detail-content > table > tbody:nth-of-type(%d)" % index
def content_row_expand(self, index):
b = self.browser
tbody = self.content_row_tbody(index)
b.wait_present(tbody)
if not "open" in (b.attr(tbody, "class") or ""):
b.click(tbody + " tr.listing-ct-item")
b.wait_present(tbody + ".open")
def content_row_action(self, index, title):
btn = self.content_row_tbody(index) + " .listing-ct-item .listing-ct-actions button:contains(%s)" % title
self.browser.wait_present(btn)
self.browser.click(btn)
# The row might come and go a couple of times until it has the
# expected content. However, wait_in_text can not deal with a
# temporarily disappearing element, so we use self.retry.
def content_row_wait_in_col(self, row_index, col_index, val):
col = self.content_row_tbody(row_index) +" .listing-ct-item :nth-child(%d)" % (col_index + 1)
self.retry(None, lambda: self.browser.is_present(col) and val in self.browser.text(col), None)
def content_head_action(self, index, title):
self.content_row_expand(index)
btn = self.content_row_tbody(index) + " .listing-ct-head .listing-ct-actions button:contains(%s)" % title
self.browser.wait_present(btn)
self.browser.click(btn)
def content_tab_expand(self, row_index, tab_index):
tab_btn = self.content_row_tbody(row_index) + " .listing-ct-head li:nth-child(%d) a" % tab_index
tab = self.content_row_tbody(row_index) + " .listing-ct-body:nth-child(%d)" % (tab_index + 1)
self.content_row_expand(row_index)
self.browser.wait_present(tab_btn)
self.browser.click(tab_btn)
self.browser.wait_present(tab)
return tab
def content_tab_action(self, row_index, tab_index, title):
tab = self.content_tab_expand(row_index, tab_index)
btn = tab + " button:contains(%s)" % title
self.browser.wait_present(btn)
self.browser.wait_attr(btn, "disabled", None)
self.browser.click(btn)
def wait_content_tab_action_disabled(self, row_index, tab_index, title):
tab = self.content_tab_expand(row_index, tab_index)
btn = tab + " button.disabled:contains(%s)" % title
self.browser.wait_present(btn)
# To check what's in a tab, we need to open the row and select the
# tab.
#
# However, sometimes we open the wrong row or the wrong tab
# because the right row or right tab still has to be created and
# take its right place. If the right row or tab finally appears,
# it won't be open at that point and we will miss it if we only
# open a row/tab once. So we just run the whole process in a big
# retry loop.
#
# XXX - Clicking a button in a tab has the same problem, but we
# ignore that for now.
def content_tab_wait_in_info(self, row_index, tab_index, title, val, alternate_val=None):
b = self.browser
def setup():
pass
def check():
row = self.content_row_tbody(row_index)
row_item = row + " tr.listing-ct-item"
tab_btn = row + " .listing-ct-head li:nth-child(%d) a" % tab_index
tab = row + " .listing-ct-body:nth-child(%d)" % (tab_index + 1)
cell = tab + " table.info-table-ct tr:contains(%s) > td:nth-child(2)" % title
if not b.is_present(row + ".open"):
if not b.is_present(row_item):
return False
b.click(row_item)
if not b.is_present(row + ".open"):
return False
if not b.is_present(tab):
if not b.is_present(tab_btn):
return False
b.click(tab_btn)
if not b.is_present(tab):
return False
if not b.is_present(cell):
return False
return val in b.text(cell) or (alternate_val is not None and alternate_val in b.text(cell))
def teardown():
pass
self.retry(setup, check, teardown)
def content_tab_info_row(self, row_index, tab_index, title):
tab = self.content_tab_expand(row_index, tab_index)
return tab + " table.info-table-ct tr:contains(%s)" % title
def content_tab_info_action(self, row_index, tab_index, title):
tab = self.content_tab_expand(row_index, tab_index)
link = tab + " table.info-table-ct tr:contains(%s) td:nth-child(2) a" % title
self.browser.wait_present(link)
self.browser.click(link)
# Dialogs
def dialog_wait_open(self):
self.browser.wait_present('#dialog')
self.browser.wait_visible('#dialog')
def dialog_wait_alert(self, text):
self.browser.wait_in_text('#dialog .alert-message', text)
def dialog_field(self, field):
return '#dialog [data-field="%s"]' % field
def dialog_val(self, field):
sel = self.dialog_field(field)
ftype = self.browser.attr(sel, "data-field-type")
if ftype == "TextInputChecked":
if self.browser.is_present(sel + " input[type=checkbox]:not(:checked)"):
return False
else:
return self.browser.val(sel + " input[type=text]")
else:
return self.browser.val(self.dialog_field(field))
def dialog_set_val(self, field, val):
if isinstance(val, bool):
self.browser.set_checked(self.dialog_field(field), val)
elif isinstance(val, dict):
for label in val:
self.dialog_select(field, label, val[label])
elif isinstance(val, int):
# size slider
self.browser.set_val(self.dialog_field(field) + " .size-unit", "1048576")
self.browser.set_val(self.dialog_field(field) + " .size-text", str(val))
elif isinstance(val, CheckBoxText):
sel = self.dialog_field(field);
if val.val == False:
self.browser.set_checked(sel + " input[type=checkbox]", False)
else:
self.browser.set_checked(sel + " input[type=checkbox]", True)
self.browser.set_val(sel + " input[type=text]", val.val)
else:
sel = self.dialog_field(field)
ftype = self.browser.attr(sel, "data-field-type")
if ftype == "select":
self.browser.click(sel + " button.dropdown-toggle")
self.browser.click(sel + " li[data-data=%s] a" % val)
elif ftype == "text-input":
self.browser.set_input_text(sel, val)
elif ftype == "TextInputChecked":
if val == False:
self.browser.set_checked(sel + " input[type=checkbox]", False)
else:
self.browser.set_checked(sel + " input[type=checkbox]", True)
self.browser.set_val(sel + " input[type=text]", val)
else:
self.browser.set_val(self.dialog_field(field), val)
def dialog_set_expander(self, field, val):
self.browser.call_js_func(
"""(function (sel, val) {
if ((ph_find(sel).className.indexOf('collapsed') >= 0) == val)
ph_click(sel);
})""", self.dialog_field(field), val)
def dialog_set_combobox(self, field, val):
self.browser.set_val(self.dialog_field(field) + " input[type=text]", val)
def dialog_combobox_choices(self, field):
return self.browser.call_js_func("""(function (sel) {
var lis = ph_find(sel).querySelectorAll('li');
var result = [];
for (i = 0; i < lis.length; ++i)
result.push(lis[i].textContent);
return result;
})""", self.dialog_field(field))
def dialog_is_present(self, field, label):
return self.browser.is_present('%s :contains("%s") input' % (self.dialog_field(field), label))
def dialog_select(self, field, label, val):
self.browser.set_checked('%s :contains("%s") input' % (self.dialog_field(field), label), val)
def dialog_wait_val(self, field, val):
if isinstance(val, int):
# size slider
self.browser.wait_val(self.dialog_field(field) + " .size-unit", "1048576")
self.browser.wait_val(self.dialog_field(field) + " .size-text", str(val))
else:
self.browser.wait_val(self.dialog_field(field), val)
def dialog_wait_error(self, field, val):
# XXX - allow for more than one error
self.browser.wait_present('#dialog .dialog-error')
self.browser.wait_in_text('#dialog .dialog-error', val)
def dialog_wait_not_visible(self, field):
self.browser.wait_not_visible(self.dialog_field(field))
def dialog_apply(self):
self.browser.click('#dialog button.apply')
def dialog_cancel(self):
self.browser.click('#dialog button.cancel')
def dialog_wait_close(self):
self.browser.wait_not_present('#dialog')
def dialog_check(self, expect):
for f in expect:
if isinstance(expect[f], CheckBoxText):
sel = self.dialog_field(f);
if expect[f].val == False:
return self.browser.is_present(sel + " input[type=checkbox]:not(:checked)")
else:
return (self.browser.is_present(sel + " input[type=checkbox]:checked") and
self.browser.val(sel + " input[type=text]") == expect[f].val)
elif not self.dialog_val(f) == expect[f]:
return False
return True
def dialog(self, values, expect={}):
self.dialog_wait_open()
for f in expect:
self.dialog_wait_val(f, expect[f])
for f in values:
self.dialog_set_val(f, values[f])
self.dialog_apply()
self.dialog_wait_close()
def confirm(self):
self.dialog({})
# There is a lot of asynchronous activity in the storage stack.
# For example, changing fstab or crypttab via the storaged API
# will not immediately update the Configuration properties of
# block objects. The storaged daemon will only do that once it
# gets a change notification for those files, which happens some
# time later. As another example, wiping a block device has to be
# noticed by udev and be propagated back to the daemon before it
# updates its properties.
#
# Concretely, the tests have to mainly deal with the cases listed
# below, and we offer some functions to help with that.
#
# - Waiting until a expected change to fstab or crypttab has
# arrived in storaged. This is important so that it will mount
# filesystems to the expected places, and will clean up fstab in
# the expected ways, among other things.
#
# This is done with wait_in_storaged_configuration and
# wait_not_in_storaged_configuration.
#
# - Waiting until a expected change to fstab or crypttab has
# arrived in Cockpit. This is important so that dialogs will
# show the right things, and try to modify the right
# configuration.
#
# This is done by repeatedly opening a dialog until it shows the
# right values, via dialog_with_retry.
#
# - Waiting until a block device is considered 'free' and can be
# used as a physical volume or raid member.
#
# This is also done by repeatedly opening a dialog until all
# needed block devices are listed.
def dialog_with_retry(self, trigger, values, expect):
def setup():
trigger()
self.dialog_wait_open()
def check():
if callable(expect):
return expect()
else:
return self.dialog_check(expect)
def teardown():
self.dialog_cancel()
self.dialog_wait_close()
self.retry(setup, check, teardown)
if values:
for f in values:
self.dialog_set_val(f, values[f])
self.dialog_apply()
else:
self.dialog_cancel()
self.dialog_wait_close()
def wait_in_storaged_configuration(self, mount_point):
wait(lambda: mount_point in self.machine.execute("%s dump | grep Configuration" % self.storagectl_cmd))
def wait_not_in_storaged_configuration(self, mount_point):
wait(lambda: mount_point not in self.machine.execute("%s dump | grep Configuration" % self.storagectl_cmd))
| lgpl-2.1 | -9,075,807,588,054,946,000 | 40.369973 | 175 | 0.590435 | false |
rbiswas4/FluctuationsInCosmology | interfacecosmology/psutils.py | 1 | 25544 | #!/usr/bin/env python
#
#This is a set of wrappers designed to use methods of obtaining linear
#quantities of interest from outputs of actual programs taht do the
#calculations, like CAMB with the help of utilities for specific programs.
#
#USEFUL ROUTINES:
#
#powerspectrum: obtains the linear power spectrum of various quantities from
#---------------
# standard outputs of programs like CAMB
#sigma
#---------------
#sigmaM
#--------------
#CHANGES:
#Only assign values to cosmo as default if values from cosmo are being used
#otherwise pass as whole
#
#Fixed the spelling of filterradiusformass and its calls. Checked that the
#tests at the bottom of the file still work.
#R. Biswas, Thu Nov 14 15:31:46 CST 2013
#
#Fixed bug in function sigmaM, where filterradius is called without
#optional argument cosmo and z. The same bug was there in sigmaM (where
#it was being called without z, and derivatives of sigma and the mass
# function calculation.
#R. Biswas, Thu Nov 14 17:59:14 CST 2013
import sys
import matplotlib.pyplot as plt
import utils.typeutils as tu
import massfunctions as mf
import growthfunction
import numpy as np
import camb_utils.cambio as cio
import utils.filters as filters
verbose = False
def loginterp(xreq, xnative, ynative , left = np.nan , right = np.nan):
logxreq = np.log(xreq)
npinterp = np.interp(logxreq , np.log(xnative), np.log(ynative), left = np.nan, right = np.nan)
return np.exp(npinterp)
def critdensity(h = 1.0,
unittype = 'kgperm3') :
"""Returns the critical density today $\Omega_{crit}(z=0) as a
function of h through the formula
$10^4 h^2 (3.0/(8.0 \pi G) )) in units of kg /m^3 or solar
masses / Mpc^3 .
args:
h: float, optional, defaults to 1.0
= H0/100
unittype: string ,optional, defaults to SI
kgperm3: SI units
solarmassperMpc3: units of M_{\sun} / Mpc^3
returns:
critical density. If no argument is supplied the correct
critical density is the return times h^2
example usage:
>>> from astropy.cosmology import Planck13 as cosmo
>>> print critdensity (cosmo.h )
>>> print cosmo.critical_density0
>>> #The two above values should be same to unit definition
status:
Tested with above tests and found to work. The value of 10^{-27}
is due to the difference in units with astropy.
R. Biswas, Sun Aug 18 12:41:42 CDT 2013
BUGFIX: unittype ought to be kgmerm3 in if loop, but was
written as kmperm3. Fixed Typo.
R. Biswas, Wed Nov 13 19:22:28 CST 2013
notes :
The answer is ~ 10^{-27} while the answer in gm/cm^3 is
~10^{-30} as quoted in PDG for example.
TODO: This function will be removed to cosmodefs
"""
from astropy import units as u
from astropy import constants as ct
kmperMpc = (u.km / u.Mpc).decompose().scale
H0 = 100.0 * kmperMpc *h # in units of per sec
rhocrit = H0*H0 * 3.0 /(8.0 * np.pi * ct.G.value)
#Multiply mass in kg by convtosolmass to get mass in solar mass
convtosolmass = u.kg.to(u.solMass)
#Multiply distance in m to distance in Mpc
convtoMpc = (u.m.to(u.Mpc))
if unittype == "kgperm3" :
rhocritu = rhocrit
if unittype == "solarmassperMpc3":
rhocritu = rhocrit*convtosolmass/convtoMpc**3.0
return rhocritu
def __rhobg ( z = 0.0 , bgtype = "cb", unittype = "solarmassperMpc3",
cosmo = None):
"""returns the background density at redshift z. If bgtype = "matter"
then the function returns the background matter (CDM + Baron + massive
neutrino) density.
args:
z:
optional, float , defaults to 0.
redshift at which the background density is
calculated
bgtype :
string, optional, defaults to "matter"
choices: "matter"
"cb" : baryon - cdm
unittype :
string, optional ,defaults to "Msun/Mpc3"
defines the unit of the return
solarmassperMpc3:
Units of solar mass per Mpc cube
SI :
cosmo : w0wa cosmology
returns:
float, the background density in units of type unittype
example usage:
status:
notes:
Will change later. This exists because our definition of
matter includes neutrinos, which I want to remove later
so that matter is baryon CDM by definition.
Remove to cosmodefs.
"""
#if cosmo == None:
# from astropy.cosmology import Planck13 as cosmo
if bgtype == "matter":
Om0 = cosmo.Om0
if bgtype == "cb":
Om0 = cosmo.Ob0 + cosmo.Oc0
h = cosmo.H0 /100.
rho = critdensity(h = h, unittype = 'solarmassperMpc3')*Om0*(1.+z)**3.0
return rho
def filterradiusformass( M ,
z = 0. ,
bgtype = "cb" ,
cosmo = None):
"""
returns a radius in units of Mpc which encloses a mass M of the
homegenous density of particles specified as bgtype at the redshift
z for a cosmology cosmo.
args:
M :
mass in solar masses
z : float, defaults to 0.
redshift
bgtype : string, defaults to matter
background density to use in converting
mass to a radius
cosmo : wowa cosmology, defaults to Planck13
returns :
Radius in Mpc
"""
#if cosmo == None:
# from astropy.cosmology import Planck13 as cosmo
rhobg = __rhobg ( z , bgtype = bgtype, unittype = "solarmassperMpc3", cosmo = cosmo)
#Assume mass is made of all matter
Rcube = 3* M / rhobg /4.0 / np.pi
R = np.power (Rcube, 1.0/3.0)
return R
def powerspectrumfromfile(fname,
koverh = None ,
pstype = "matter" ,
h = 0.71 ,
ns = 0.963 ,
As = 2.1e-9 ,
Omegacdm = None ,
Omegab = None ):
"""
****************************************
DEPRECATED: USE cambio functions instead
*****************************************
returns a tuple of koverh values and the interpolated power
spectra at these values of hoverh using a CAMB output which
may be a power spectrum output or transfer function output.
If the output is a transfer function output, then ns, h,
and As must be supplied
args:
returns:
tuple of (koverh, ps values)
"""
#decide if file is transfer function or Power spectrum output
psfile = False
tffile = False
Unknown = True
tmpfile = np.loadtxt(fname)
shapetuple = np.shape(tmpfile)
if shapetuple[-1] == 7:
tffile = True
Unknown = False
if shapetuple[-1] ==2 :
psfile = True
Unknown = False
if koverh == None:
koverh = tmpfile[:,0]
if Unknown:
#file is not CAMB transfer function or power spectrum output
raise ValueError("Unknown filename supplied")
if psfile :
if pstype != "matter" :
raise ValueError ("Cannot obtain non-matter power spectrum from CAMB power spectrum file")
return (koverh , powerspectrum(koverh, fname ) )
if tffile :
if pstype == "matter" :
transfer = cio.loadtransfers(rootname = None,
filename = fname)
ps = cio.matterpowerfromtransfersforsinglespecies( koverh ,
transfer ,
h ,
As ,
ns )
return (ps [:,0], ps[:,1])
elif pstype == "cb" :
#LOST CODE HERE
return 0
def powerspectrum ( koverh ,
asciifile = None ,
pstype = "matter",
sigma8type = "matter" ,
method = "CAMBoutfile",
z = 0.0 ,
cosmo = None ,
interpmethod = 'log' ,
**params):
"""
returns linearly interpolated values of the powerspectrum in the
powerspectrumfile with k values in units of h/Mpc. Using
this with koverh = None, returns the values in the table.
args:
koverh : array-like of floats or Nonetype, mandatory
k in units of h/Mpc
asciifile: string,
Filename for power spectrum or CAMB transfer function.
power sepctrum or transfer function input will be
recognized from CAMB file structure.
cosmo : interfacecosmology/astropy cosmological model
method : string, optional , defaults to "CAMBoutfile"
Method of obtaining power spectrum with fixed options
options:
-------
CAMBoutfile :assume that the asciifile output of CAMB
is at desired redshift
CAMBoutgrowth :Use the asciifile from CAMB output at
z = 0.0 , and use a growth function to find
the power spectrum at z = z
interpmethod: string, optional, defaults to 'log'
options:
'log': linearly interpolates
log(koverh) vs log(PS) in units of h^#/Mpc^3
'linear' : linearly interpolates koverh and PS
pstype : string, optional, defaults to 'matter'
sets the way the perturbations are counted in order
to calculate the matter power spectrum, though the perturbations are evolved correctly according to the
cosmological model.
OPTIONS:
--------
'matter': Conventional matter power spectrum, as would
be calculated in CAMB, including the density
contrast for CDM, baryon and massive neutrinos
'cb' : Counts only the cDM and baryons in calculating
the matter power spectrum.
'cbmatter': Counts only the CDM and baryon fluctuations
but the entire matter (CDM + baryons + massive
neutrinos) for the background density
returns:
tuple (koverh , power spectrum)
notes: should be able to obtain the powerspectrum in a variety of
methods with code being added
Override Rules:
sigma8 overrides As
params dictionary overrides cosmo
"""
#Make sure evaluation method is implemented
if not method in ["CAMBoutfile","CAMBoutgrowth"]:
raise ValueError("Method not defined")
if method in ["CAMBoutfile", "CAMBoutgrowth"] :
#Query CAMB file type: power spectrum or transfer
psfile, tkfile, Unknown = cambasciifiletype (asciifile)
if method == "CAMBoutgrowth":
#cannot calculate growth if cosmo not provided
if cosmo == None and z >0.000001 :
raise ValueErrror("Method does not work if cosmo is not defined")
Dg = 1.0
if cosmo !=None:
if z > 0.000001:
Dg = cosmo.growth(z)[0]
# decide whether As or sigma8 is to be used
# if sigma8 provided use it, otherwise As
sigma8 = None
As = None
if params.has_key("sigma8"):
if params.sigma8 != None :
sigma8 = params["sigma8"]
if params.As != None :
As = params["As"]
if cosmo != None :
if cosmo.sigma8 != None:
if sigma8 == None:
sigma8 = cosmo.sigma8
if cosmo.As != None:
if As == None :
As = cosmo.As
#If neither As or sigma8 are provided fail!
if As == None and sigma8 == None and not psfile :
raise ValueError("without As or sigma8 provided, matter power spectrum cannot be calculated from transfer functions\n")
if sigma8 != None:
As = 1.0
if params != None:
paramdict = params
paramdict["As"] = As
#print "VALUES passed on from powerspectrum routine \n"
if verbose:
print "sigma8 = ", sigma8, " As ", cosmo.As
#print paramdict["As"], "IN powerspectrum"
pstmp = __powerspectrum ( koverh = None,
asciifile = asciifile ,
pstype = pstype ,
method = method ,
z = z ,
cosmo = cosmo ,
**paramdict )
#If sigma8 is given, we need to normalize power spectrum
#power spectrum to normalize is pssigma8
if sigma8 != None:
if pstype !=sigma8type:
if verbose:
print "evaluate sigmatype ps \n"
pssigma8 = __powerspectrum ( koverh = None,
asciifile = asciifile ,
pstype = sigma8type ,
method = method ,
z = z ,
cosmo = cosmo ,
**paramdict)
else:
pssigma8 = pstmp
if sigma8 != None :
Asrel = getAsrel (pssigma8 , sigma8, cosmo = cosmo,
filt= filters.Wtophatkspacesq, **paramdict)
#print "Now As has been determined to be ", sigma8type , Asrel
v = pstmp[0], Asrel*pstmp[1]
else :
v = pstmp
if koverh != None:
if interpmethod == "linear":
ret = koverh, np.interp(koverh, v[0], v[1],
left = np.nan , right = np.nan)
else:
interpmethod = "log"
ret = koverh, loginterp(koverh, v[0], v[1],
left = np.nan , right = np.nan)
else:
ret = v
if method == "CAMBoutgrowth" :
return ret[0],Dg*Dg*ret[1]
else:
return ret
def getvalsfromparams(cosmo, **params):
"""
TO DO
provide a general function to pass values into cosmo and params
"""
return None
def cambasciifiletype( fname ) :
# Decide whether this ia a matter or transfer file
psfile = False
tkfile = False
Unknown = True
tmpfile = np.loadtxt(fname )
shapetuple = np.shape(tmpfile)
if shapetuple[-1] == 7:
tkfile = True
Unknown = False
if shapetuple[-1] ==2 :
psfile = True
Unknown = False
if Unknown:
#file is not CAMB transfer function or power spectrum output
raise ValueError("Unknown filename supplied")
return psfile, tkfile, Unknown
def __powerspectrum ( koverh ,
asciifile = None ,
pstype = "matter",
method = "CAMBoutfile",
z = 0.0 ,
cosmo = None ,
**params):
"""
DO NOT CALL DIRECTLY. CALL powerspectrum instead
returns linearly interpolated values of the powerspectrum in the
powerspectrumfile with k values in units of h/Mpc. Using
this with koverh = None, returns the values in the table.
args:
koverh : array-like of floats or Nonetype, mandatory
k in units of h/Mpc
asciifile: string,
Filename for power spectrum or CAMB transfer function.
power sepctrum or transfer function input will be
recognized from CAMB file structure.
method : string, optional , defaults to "CAMBoutfile"
Method of obtaining power spectrum with fixed options
options:
-------
CAMBoutfile :assume that the asciifile output of CAMB
is at desired redshift
CAMBoutgrowth :Use the asciifile from CAMB output at
z = 0.0 , and use a growth function to find
the power spectrum at z = z
returns:
tuple (koverh , power spectrum)
notes: should be able to obtain the powerspectrum in a variety of
methods with code being added
"""
#ensure we are supposed to read CAMB outfiles
if not method in ["CAMBoutfile","CAMBoutgrowth"]:
raise ValueError("Method not defined")
# # Decide whether this ia a matter or transfer file
#This has been made a function
# psfile = False
# tkfile = False
# Unknown = True
#
# shapetuple = np.shape(tmpfile)
# if shapetuple[-1] == 7:
# tkfile = True
# Unknown = False
# if shapetuple[-1] ==2 :
# psfile = True
# Unknown = False
psfile, tkfile, Unknown = cambasciifiletype ( asciifile )
tmpfile = np.loadtxt(asciifile)
if koverh == None:
koverh = tmpfile[:,0]
if Unknown:
#file is not CAMB transfer function or power spectrum output
raise ValueError("Unknown filename supplied")
if psfile:
pk = cio.loadpowerspectrum(asciifile)
if not np.all(np.diff(pk[:,0])>0.):
raise ValueError("The k values in the power spectrum file are not in ascending order")
if koverh == None :
return (pk[:,0], pk[:,1])
return koverh, np.interp( koverh, pk[:,0],pk[:,1],left = np.nan, right = np.nan)
if tkfile:
#print "AS " , params["As"]
#print cosmo.Ob0, cosmo.Oc0
if pstype == "cb":
#print "filename ", asciifile
pk = cio.cbpowerspectrum ( transferfile = asciifile ,
Omegacdm = cosmo.Oc0,
Omegab = cosmo.Ob0,
h = cosmo.h,
Omeganu = cosmo.On0,
As = params["As"],
#As = cosmo.As,
ns = cosmo.ns,
koverh = None )
return (pk [:,0], pk[:,1])
if pstype == "cbmatter":
Omegam = cosmo.Om0
Omegacb = cosmo.Ob0 + cosmo.Oc0
ratiosq = (Omegacb/Omegam)**2.0
#print "filename ", asciifile
pk = cio.cbpowerspectrum ( transferfile = asciifile ,
Omegacdm = cosmo.Oc0,
Omegab = cosmo.Ob0,
h = cosmo.h,
Omeganu = cosmo.On0,
As = params["As"],
#As = cosmo.As,
ns = cosmo.ns,
koverh = None )
return (pk [:,0], pk[:,1]*ratiosq)
if pstype == "matter" :
if koverh == None :
koverh = tmpfile[:,0]
transfer = cio.loadtransfers( filename = asciifile)
transfertuple = (transfer[:,0], transfer[:,-1])
ps = cio.matterpowerfromtransfersforsinglespecies(
koverh ,
transfer = transfertuple,
h = cosmo.h ,
As = params["As"],
ns = cosmo.ns)
return (ps [:,0], ps[:,1])
return koverh, pk
def sigma(ps , R = 8 , khmin = 1e-5, khmax = 2.0, logkhint = 0.005, cosmo = None, filt = filters.Wtophatkspacesq, **params) :
"""
returns the square root of the variance of isotropic, homogeneous
fluctuations filtered with a single scale filter at a scale of
R Mpc/h.
args:
ps: tuple of koverh , power spectrum values
R : array-like float, optional defaults to 8.0
radius in units of Mpc/h over which the filtering
is done
filt: function describing the shape of the filter
default is filters.Wtophatkspacesq which is
the Fourier transform of the tophat at R Mpc/h
cosmo: cosmological model
khmin: float, optional defaults to 1e-5
min value of k/h used in evaluating the integral.
usage:
>>> pk = np.loadtxt("powerspectrum")
>>> sigma (ps = (pk[:,0],pk[:,1]), cosmo = cosmo)
"""
sigsq= sigmasq(ps = ps , R =R, khmin = khmin , khmax = khmax ,
logkhint = logkhint , cosmo=cosmo , filt = filt , **params )
return np.sqrt(sigsq )
def sigmasq (ps , R = 8. , usenative = True, khmin = 0.9e-5 , khmax = 5.0, logkhint = 0.005 ,
cosmo = None, filt= filters.Wtophatkspacesq, **params) :
"""
Returns the variance of the overdensity field smoothed at
a radius of R Mpc/h using a filter specified by filt
args:
ps: tuple of koverh, power spectrum values
R : float array like
distance scale in units of Mpc/h over which
the filtering is done
usenative: bool, optional , defaults to True
Use values provided in ps, rather than
interpolation
cosmo: Model, whose hubble constant will be used
khmin: float, value below which the integral will not be
calculated
returns :
array of sigmasq values
notes:
- We need h, even if CAMB power spectrum is given
- If interpolation is used only, and the range provided
is outside the range of the data, only those points
in the original range will be used. extrapolation
is dangerous, particularly at high k, unless it is
made to drop as a power law.
"""
import numpy as np
import scipy.integrate as si
h = cosmo.H0/100.0
if usenative :
mask = ps[1] is not np.nan
#khvals = ps[0][mask]
khvals = ps[0]
else:
logkhmin = np.log(khmin)
logkhmax = np.log(khmax)
logkh = np.arange(logkhmin, logkhmax , logkhint)
khvals = np.exp(logkh)
logkhmin = max(min(ps[0]),logkhmin)
logkhmax = min(max(ps[0]),logkhmax)
mask = khvals >= khmin
khvals = khvals[mask]
k = khvals * h
psinterp = np.interp (khvals , ps[0], ps[1], left = np.nan, right = np.nan)
#plt.loglog(khvals, psinterp, label="interp")
#plt.loglog(ps[0], ps[1], label="native")
#plt.legend(loc= "best")
#plt.show()
if tu.isiterable(R):
R = np.asarray(R)
kr = np.outer( R, khvals )
else:
kr = R* khvals
kwinsq= filt (kr, R)
#kwin = 3*(np.sin(kr)-kr*np.cos(kr))/(kr)**3
#kwinsq = kwin *kwin
ksqWsqPk = k*k *kwinsq* psinterp /2. /np.pi/ np.pi/h /h/h
sigmasq = si.simps ( ksqWsqPk, x = k, even = 'avg')
return sigmasq
def getcosmo(cosmo, cambtf_file, sigma8 = None) :
"""
returns an FCPL object with the same cosmology as cosmo, except
that the amplitude is a CMB normalized As, such that the cambtf_file
produces the input sigma8, or the cosmo.sigma8
args:
returns:
cosmo with the amplitude set correctly so that
the sigma8 values match
"""
Acosmo = cosmo
sig8 = sigma8
if sig8 is None:
sig8 = cosmo.sigma8
Acosmo.setamplitude(As =1.0, sigma8 = None)
cambtmp = powerspectrum(koverh = None, asciifile = cambtf_file,
cosmo = Acosmo )
As = getAsrel(cambtmp, sigma8 = sig8, cosmo = Acosmo)
Acosmo.setamplitude ( As = As, sigma8 = None)
return Acosmo
def getAsrel (ps , sigma8, khmin = 1.0e-5 , khmax = 2.0, logkhint = 0.005 ,
cosmo = None, filt= filters.Wtophatkspacesq, **params) :
"""
returns a relative value of As by which to multiply the power spectrum
values in order to obtain sigma8
args:
returns:
float, Asrel
"""
sigsq = sigmasq (ps , khmin= khmin, khmax =khmax,logkhint =logkhint, cosmo = cosmo, filt = filt , **params)
#print "sigma8 ", sigma8
#print "sigsq ", sigsq
Asrel = sigma8*sigma8 / sigsq
#print "Asrel in Asre", Asrel
return Asrel
def sigmaM (M ,
ps ,
bgtype = "matter",
khmin = 1.0e-5 ,
khmax = 2.0 ,
logkhint = 0.005 ,
z = 0.0 ,
cosmo = None ,
**params):
"""Returns the standard deviation of the overdensity fields
smoothed at a radius corresponding to mass M.
args:
M: array like , mandatory
mass of halo in units of solar masses
ps:
bgtype :
z = :
khmin :
cosmo :
notes:
the bgtype matters for converting Mass M to R.
Also ps must be set accordingly
"""
if tu.isiterable(M):
M = np.asarray(M)
#if cosmo == None:
# from astropy.cosmology import Planck13 as cosmo
h = cosmo.H0/100.0
R = filterradiusformass( M , bgtype= bgtype, z = z, cosmo = cosmo)
RinMpcoverh = R*h
#print "RinMpcoverh ***************"
#print RinMpcoverh
#return RinMpcoverh
return sigma( ps , R = RinMpcoverh, khmin = khmin , khmax = khmax, logkhint = logkhint , cosmo= cosmo, **params)
def dlnsigmadlnM (M ,
ps ,
bgtype = "matter",
cosmo = None ,
khmin = 1.0e-5 ,
khmax = 2.0 ,
logkhint = 0.005 ,
z = 0.0 ,
**params ) :
"""
returns the derivative dln (\sigma^{-1})/ d ln M at values of M by
args:
M: array-like, mandatory
mass of halo in units of solar mass
ps : tuple, mandatory
(koverh , ps)
z : Redshift. SHOULD ALWAYS BE SET TO 0. left for historical
reasons.
notes:
dln(\sigma^{-1})/dln M = M /sigma^{-1}* (-1.)\sigma^{-2}* d sigma /dR dR/dM
Done by calculating 1/sigma * dsigma /dR * dR /d ln M ,
where dsigma / dR = sigma with derivative of filter
"""
sig = sigmaM (M , ps , khmin = khmin ,
khmax = khmax ,
logkhint = logkhint ,
z = z ,
bgtype = bgtype ,
cosmo = cosmo ,
**params)
h = cosmo.h
R = filterradiusformass( M , bgtype = bgtype, z = 0, cosmo = cosmo)
dlnRdlnM = 1.0/3.0
RinMpcoverh = R*h
#d ln sigma /d ln R = d ln sigma^2 / d ln R / sigma^2/ 2.0
#sigmasq with filter of dWtophatkspacesqdlnR
# is dln sigma^2/ d ln R
dlnsigdlnR = sigmasq (R = RinMpcoverh , ps = ps, z = z ,
bgtype = bgtype, filt = filters.dWtophatkspacesqdlnR, cosmo = cosmo , khmin = khmin ,
khmax = khmax , logkhint = logkhint, **params )/sig/ sig/2.0
#return sig
return dlnsigdlnR *dlnRdlnM
def dndlnM ( M ,
ps ,
z = 0. ,
khmin = 1.0e-5,
khmax = 2.0 ,
logkhint = 0.005 ,
bgtype = "matter",
powerspectrumfile = "LCDM_matterpower.dat" ,
cosmo = None,
deltac = 1.674 ,
**params ):
"""
returns the mass function dn/dln(M) in units of h^3 Mpc^{-3}
args:
M: mandatory, arraylike
mass bin in units of solar Mass
powerspectrumfile : optional, string, defaults to
LCDM_matterpower.dat
name of the power spectrum file from CAMB
cosmo: optional defaults to Planck13
cosmology model
returns:
numpy array containing mass function in units of Mpc^{-3}
CHANGES:
added argument deltac with default value 1.674
"""
h = cosmo.H0/100.0
#rhocr = critdensity( h = h ,
# unittype = "solarmassperMpc3")
sig = sigmaM (M ,
ps ,
bgtype = bgtype,
khmin = khmin ,
khmax = khmax ,
logkhint = logkhint ,
z = z,
cosmo = cosmo ,
**params)
dlsinvdlM = -dlnsigmadlnM (M ,
ps ,
z = z ,
bgtype = bgtype ,
cosmo = cosmo ,
khmin = khmin ,
khmax = khmax ,
logkhint = logkhint ,
**params )
f_sigma = mf.__fsigmaBhattacharya (
sigma = sig,
deltac = deltac ,
z = z ,
A0 = 0.333 ,
a0 = 0.788 ,
p0 = 0.807 ,
q0 = 1.795 ,
alpha1 = 0.11 ,
alpha2 = 0.01 ,
alpha3 = 0.0 ,
alpha4 = 0.0,
Mlow = 6e11 ,
Mhigh = 3e15)
rhobg = __rhobg( z =z , bgtype = bgtype,
unittype = "solarmassperMpc3", cosmo = cosmo)
dndlnM = rhobg *f_sigma *dlsinvdlM /M
#dndlnM = dlsinvdlM *f_sigma/M * rhobg
#critdensity(h = cosmo.h, unittype = "solarmassperMpc3")*cosmo.Om0
return dndlnM
if __name__=="__main__":
import numpy as np
import matplotlib.pyplot as plt
import camb_utils.cambio as cio
import sys
#pk = cio.loadpowerspectrum ("example_data/LCDM_def_matterpower.dat")
pk = cio.loadpowerspectrum ("LCDM_matterpower.dat")
ts = cio.loadtransfers(filename = "example_data/LCDM_def_transfer_out.dat")
#print np.shape(ts)
#print pk[:,0]
pkt = cio.matterpowerfromtransfersforsinglespecies(koverh = pk[:,0],
transfer = (ts[:,0],ts[:,-1]), h = 0.71, As = 2.1e-9, ns = 0.963)
plt.loglog ( pk[:,0], pk[:,1])
plt.loglog ( pkt[:,0], pkt[:,1])
plt.figure()
from astropy.cosmology import Planck13 as cosmo
#print sigma(ps = (pk[:,0],pk[:,1]) , R = 8.0, cosmo = cosmo)
#plt.show()
sys.exit()
M = 10.**(np.arange(7,16,0.2))
R = np.arange(0.0005, 50.0,0.1)
#R = np.array([4,8,12])
#print sigma (8.0)
plt.figure()
plt.plot(R, sigma(R))
plt.xlabel("R ( Mpc /h )")
plt.ylabel(r'$\sigma (R)$')
plt.figure()
plt.plot(M ,filterradiusformass(M))
plt.xscale('log')
plt.xlabel("M ")
plt.ylabel(r'$R(M) Mpc $')
plt.figure()
plt.plot ( M, sigmaM(M, powerspectrumfile = "LCDM_def_matterpower.dat"), "o")
plt.plot ( M, 1./sigmaM(M,powerspectrumfile = "LCDM_def_matterpower.dat"),'o')
plt.xlabel("M ")
plt.ylabel(r'$\sigma (M)$')
plt.xscale('log')
plt.figure()
plt.plot ( M[1:-1], dlninvsigmaMdlnM (M ),"o")
plt.xlabel(r'$M (M_\odot$')
plt.ylabel(r'$\frac{d ln \sigma^{-1}}{d ln(M)}$')
plt.xscale('log')
plt.tight_layout()
plt.savefig("dlninvsigmadlnM,pdf")
plt.figure()
#plt.plot (1./ sigmaM(M[1:-1]), dndlnM (M ), "o")
plt.plot (M[1:-1], dndlnM (M ), "o")
plt.xscale('log')
plt.yscale('log')
plt.show()
#print filterradiusformass ( M =
#plt.show()
| mit | -3,501,540,544,829,631,000 | 24.672362 | 127 | 0.65393 | false |
necroguemancer/google-play-discord-bot | utils.py | 1 | 1935 | from time import sleep, strftime
from datetime import datetime
# from requests.packages.urllib3.exceptions import InsecureRequestWarning
import requests, random
try:
from faker import Faker
fake = Faker()
except Exception:
print("Run \"pip install Faker\" using the correct pip path and you should be fine.")
# import sys; sys.exit(1)
def string_to_dict(headers):
headers_dict = {}
for line in headers.split("\n"):
if not line: continue
line = line.strip()
key, *values = line.split(" ")
key = key[:-1]
if not (key and values): continue
headers_dict[key] = " ".join(values)
return headers_dict
def get_time():
return "[" + strftime("%m/%d %H:%M:%S") + "]"
def dump(r):
with open("dump.html", "w") as f:
f.write(str(r))
def clean(text):
return ''.join([i if ord(i) < 128 else ' ' for i in text])
class ThreadManager(object):
"""docstring for ThreadManager"""
def __init__(self, MAX_THREADS = 30, MESSAGES = False, TIME = True):
super(ThreadManager, self).__init__()
self.MAX_THREADS = MAX_THREADS
self.MESSAGES = MESSAGES
self.TIME = TIME
self.threads = []
def load(self, thread):
self.threads.append(thread)
def clear(self):
self.threads = []
def start(self):
start_time = datetime.now()
THREAD_COUNT = 0
for t in self.threads:
t.daemon = True
t.start()
THREAD_COUNT += 1
if THREAD_COUNT >= self.MAX_THREADS:
if self.MESSAGES:
print("Waiting for a thread to end.")
t.join()
if self.MESSAGES:
print("Starting a new thread now.")
THREAD_COUNT -= 1
if self.MESSAGES:
print("Waiting for all threads to end.")
for t in self.threads:
t.join()
if self.TIME:
print(datetime.now() - start_time)
def get_user_agent():
return fake.user_agent()
def get_random_name():
return "{}{}{}".format(fake.first_name(), fake.last_name(), random.randint(1, 100))
# requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
| gpl-3.0 | 4,605,339,655,682,512,000 | 23.493671 | 86 | 0.663049 | false |
kparal/anaconda | tests/dracut_tests/test_driver_updates.py | 1 | 26143 | # test_driver_updates.py - unittests for driver_updates.py
# Ignore any interruptible calls
# pylint: disable=interruptible-system-call
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
import os
import tempfile
import shutil
import sys
sys.path.append(os.path.normpath(os.path.dirname(__file__)+'/../../dracut'))
from driver_updates import copy_files, move_files, iter_files, ensure_dir
from driver_updates import append_line, mkdir_seq
def touch(path):
try:
open(path, 'a')
except IOError as e:
if e.errno != 17: raise
def makedir(path):
ensure_dir(path)
return path
def makefile(path):
makedir(os.path.dirname(path))
touch(path)
return path
def makefiles(*paths):
return [makefile(p) for p in paths]
class FileTestCaseBase(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix="test_driver_updates.")
self.srcdir = self.tmpdir+'/src/'
self.destdir = self.tmpdir+'/dest/'
def tearDown(self):
shutil.rmtree(self.tmpdir, ignore_errors=True)
def makefiles(self, *paths):
return [makefile(os.path.normpath(self.tmpdir+'/'+p)) for p in paths]
class SelfTestCase(FileTestCaseBase):
def test_makefiles(self):
"""check test helpers"""
filepaths = ["sub/dir/test.file", "testfile"]
self.makefiles(*filepaths)
for f in filepaths:
self.assertTrue(os.path.exists(self.tmpdir+'/'+f))
class TestCopyFiles(FileTestCaseBase):
def test_basic(self):
"""copy_file: copy files into destdir, leaving existing contents"""
files = self.makefiles("src/file1", "src/subdir/file2")
self.makefiles("dest/file3")
copy_files(files, self.destdir)
result = set(os.listdir(self.destdir))
self.assertEqual(result, set(["file1", "file2", "file3"]))
def test_overwrite(self):
"""copy_file: overwrite files in destdir if they have the same name"""
src, dest = self.makefiles("src/file1", "dest/file1")
with open(src, 'w') as outf:
outf.write("srcfile")
with open(dest, 'w') as outf:
outf.write("destfile")
copy_files([src], self.destdir)
self.assertEqual(os.listdir(self.destdir), ["file1"])
self.assertEqual(open(dest).read(), "srcfile")
def test_samefile(self):
"""copy_file: skip files already in destdir"""
(dest,) = self.makefiles("dest/file1")
with open(dest, 'w') as outf:
outf.write("destfile")
copy_files([dest], self.destdir)
self.assertEqual(os.listdir(self.destdir), ["file1"])
self.assertEqual(open(dest).read(), "destfile")
def test_copy_to_parent(self):
"""copy_file: skip files in subdirs of destdir"""
files = self.makefiles("dest/subdir/file1")
copy_files(files, self.destdir)
self.assertEqual(list(iter_files(self.destdir)), files)
class TestIterFiles(FileTestCaseBase):
def test_basic(self):
"""iter_files: iterates over full paths to files under topdir"""
files = set(self.makefiles("src/file1", "dest/file2", "src/sub/file3"))
makedir(self.tmpdir+'/empty/dir')
result = set(iter_files(self.tmpdir))
self.assertEqual(files, result)
def test_pattern(self):
"""iter_files: match filename against glob pattern"""
self.makefiles("src/file1.so", "src/sub.ko/file2")
goodfiles = set(self.makefiles("src/sub/file1.ko", "src/file2.ko.xz"))
result = set(iter_files(self.tmpdir, pattern="*.ko*"))
self.assertEqual(result, goodfiles)
class TestMoveFiles(FileTestCaseBase):
def test_basic(self):
"""move_files: move files to destdir"""
files = self.makefiles("src/file1", "src/subdir/file2")
move_files(files, self.destdir)
self.assertEqual(set(os.listdir(self.destdir)), set(["file1", "file2"]))
self.assertEqual(list(iter_files(self.srcdir)), [])
def test_overwrite(self):
"""move_files: overwrite files with the same name"""
src, dest = self.makefiles("src/file1", "dest/file1")
with open(src, 'w') as outf:
outf.write("srcfile")
with open(dest, 'w') as outf:
outf.write("destfile")
move_files([src], self.destdir)
self.assertEqual(os.listdir(self.destdir), ["file1"])
self.assertEqual(open(dest).read(), "srcfile")
self.assertEqual(list(iter_files(self.srcdir)), [])
def test_samefile(self):
"""move_files: leave files alone if they're already in destdir"""
(dest,) = self.makefiles("dest/file1")
with open(dest, 'w') as outf:
outf.write("destfile")
move_files([dest], self.destdir)
self.assertEqual(os.listdir(self.destdir), ["file1"])
self.assertEqual(open(dest).read(), "destfile")
def test_move_to_parent(self):
"""move_files: leave files alone if they're in a subdir of destdir"""
files = set(self.makefiles("dest/subdir/file1", "dest/file2"))
move_files(files, self.destdir)
self.assertEqual(set(iter_files(self.destdir)), files)
class TestAppendLine(FileTestCaseBase):
def test_empty(self):
"""append_line: create file + append \\n when needed"""
line = "this is a line of text with no newline"
outfile = self.tmpdir+'/outfile'
append_line(outfile, line)
self.assertEqual(open(outfile).read(), line+'\n')
def test_append(self):
"""append_line: adds a line to the end of an existing file"""
oldlines = ["line one", "line two", "and I'm line three"]
outfile = self.tmpdir+'/outfile'
with open(outfile, 'w') as outf:
for line in oldlines:
outf.write(line+'\n')
line = "this line contains a newline already\n"
append_line(outfile, line)
self.assertEqual(open(outfile).read(), '\n'.join(oldlines+[line]))
from driver_updates import read_lines
class TestReadLine(FileTestCaseBase):
def test_empty(self):
"""read_lines: return [] for empty file"""
[empty] = self.makefiles("emptyfile")
self.assertEqual(read_lines(empty), [])
def test_missing(self):
"""read_lines: return [] for missing file"""
self.assertEqual(read_lines(self.tmpdir+'/no-such-file'),[])
def test_readlines(self):
"""read_lines: returns a list of lines without trailing newlines"""
filedata = 'line one\nline two\n\nline four\n'
outfile = self.tmpdir+'/outfile'
with open(outfile, 'w') as outf:
outf.write(filedata)
lines = read_lines(outfile)
self.assertEqual(lines, ['line one', 'line two','','line four'])
def test_readline_and_append_line(self):
"""read_lines: returns items as passed to append_line"""
filename = self.tmpdir+'/outfile'
items = ["one", "two", "five"]
for i in items:
append_line(filename, i)
self.assertEqual(items, read_lines(filename))
class TestMkdirSeq(FileTestCaseBase):
def test_basic(self):
"""mkdir_seq: first dir ends with 1"""
newdir = mkdir_seq(self.srcdir+'/DD-')
self.assertEqual(newdir, self.srcdir+'/DD-1')
self.assertTrue(os.path.isdir(newdir))
def test_one_exists(self):
"""mkdir_seq: increment number if file exists"""
firstdir = mkdir_seq(self.srcdir+'/DD-')
newdir = mkdir_seq(self.srcdir+'/DD-')
self.assertEqual(newdir, self.srcdir+'/DD-2')
self.assertTrue(os.path.isdir(newdir))
self.assertTrue(os.path.isdir(firstdir))
from driver_updates import find_repos, save_repo, ARCH
# As far as we know, this is what makes a valid repo: rhdd3 + rpms/`uname -m`/
def makerepo(topdir, desc=None):
descfile = makefile(topdir+'/rhdd3')
if not desc:
desc = os.path.basename(topdir)
with open(descfile, "w") as outf:
outf.write(desc+"\n")
makedir(topdir+'/rpms/'+ARCH)
class TestFindRepos(FileTestCaseBase):
def test_basic(self):
"""find_repos: return RPM dir if a valid repo is found"""
makerepo(self.tmpdir)
repos = find_repos(self.tmpdir)
self.assertEqual(repos, [self.tmpdir+'/rpms/'+ARCH])
self.assertTrue(os.path.isdir(repos[0]))
def test_multiple_subdirs(self):
"""find_repos: descend multiple subdirs if needed"""
makerepo(self.tmpdir+'/driver1')
makerepo(self.tmpdir+'/sub/driver1')
makerepo(self.tmpdir+'/sub/driver2')
repos = find_repos(self.tmpdir)
self.assertEqual(len(repos),3)
class TestSaveRepo(FileTestCaseBase):
def test_basic(self):
"""save_repo: copies a directory to /run/install/DD-X"""
makerepo(self.srcdir)
repo = find_repos(self.srcdir)[0]
makefile(repo+'/fake-something.rpm')
saved = save_repo(repo, target=self.destdir)
self.assertEqual(set(os.listdir(saved)), set(["fake-something.rpm"]))
self.assertEqual(saved, os.path.join(self.destdir, "DD-1"))
from driver_updates import mount, umount, mounted
class MountTestCase(unittest.TestCase):
@mock.patch('driver_updates.mkdir_seq')
@mock.patch('driver_updates.subprocess.check_call')
def test_mkdir(self, check_call, mkdir):
"""mount: makes mountpoint if needed"""
dev, mnt = '/dev/fake', '/media/DD-1'
mkdir.return_value = mnt
mountpoint = mount(dev)
mkdir.assert_called_once_with('/media/DD-')
check_call.assert_called_once_with(["mount", dev, mnt])
self.assertEqual(mnt, mountpoint)
@mock.patch('driver_updates.mkdir_seq')
@mock.patch('driver_updates.subprocess.check_call')
def test_basic(self, check_call, mkdir):
"""mount: calls mount(8) to mount a device/image"""
dev, mnt = '/dev/fake', '/media/fake'
mount(dev, mnt)
check_call.assert_called_once_with(["mount", dev, mnt])
self.assertFalse(mkdir.called)
@mock.patch('driver_updates.subprocess.call')
def test_umount(self, call):
"""umount: calls umount(8)"""
mnt = '/mnt/fake'
umount(mnt)
call.assert_called_once_with(["umount", mnt])
@mock.patch('driver_updates.mount')
@mock.patch('driver_updates.umount')
def test_mount_manager(self, mock_umount, mock_mount):
"""mounted: context manager mounts/umounts as expected"""
dev, mnt = '/dev/fake', '/media/fake'
mock_mount.return_value = mnt
with mounted(dev, mnt) as mountpoint:
mock_mount.assert_called_once_with(dev, mnt)
self.assertFalse(mock_umount.called)
self.assertEqual(mountpoint, mnt)
mock_umount.assert_called_once_with(mnt)
# NOTE: dd_list and dd_extract get tested pretty thoroughly in tests/dd_tests,
# so this is a slightly higher-level test case
from driver_updates import dd_list, dd_extract, Driver
fake_module = Driver(
source='/repo/path/to/fake-driver-1.0-1.rpm',
name='fake-driver',
flags='modules firmwares',
description='Wow this is totally a fake driver.\nHooray for this',
repo='/repo/path/to'
)
fake_enhancement = Driver(
source='/repo/path/to/fake-enhancement-1.0-1.rpm',
name='fake-enhancement',
flags='binaries libraries',
description='This is enhancing the crap out of the installer.\n\nYeah.',
repo=fake_module.repo
)
def dd_list_output(driver):
out='{0.source}\n{0.name}\n{0.flags}\n{0.description}\n---\n'.format(driver)
return out.encode('utf-8')
class DDUtilsTestCase(unittest.TestCase):
@mock.patch("driver_updates.subprocess.check_output")
def test_dd_list(self, check_output):
"""dd_list: returns a list of Driver objects parsed from output"""
output = dd_list_output(fake_module)+dd_list_output(fake_enhancement)
check_output.return_value = output
anaconda, kernel = '19.0', os.uname()[2]
result = dd_list(fake_module.repo)
cmd = check_output.call_args[0][0]
self.assertIn(kernel, cmd)
self.assertIn(anaconda, cmd)
self.assertIn(fake_module.repo, cmd)
self.assertTrue(cmd[0].endswith("dd_list"))
self.assertEqual(len(result), 2)
mod, enh = sorted(result, key=lambda d: d.name)
self.assertEqual(mod.__dict__, fake_module.__dict__)
self.assertEqual(enh.__dict__, fake_enhancement.__dict__)
@mock.patch("driver_updates.subprocess.check_output")
def test_dd_extract(self, check_output):
"""dd_extract: call binary with expected arguments"""
rpm = "/some/kind/of/path.rpm"
outdir = "/output/dir"
dd_extract(rpm, outdir)
cmd = check_output.call_args[0][0]
self.assertIn(os.uname()[2], cmd)
self.assertIn(rpm, cmd)
self.assertIn(outdir, cmd)
self.assertIn("-blmf", cmd)
self.assertTrue(cmd[0].endswith("dd_extract"))
from driver_updates import extract_drivers, grab_driver_files, load_drivers
@mock.patch("driver_updates.ensure_dir")
@mock.patch("driver_updates.save_repo")
@mock.patch("driver_updates.append_line")
@mock.patch("driver_updates.dd_extract")
class ExtractDriversTestCase(unittest.TestCase):
def test_drivers(self, mock_extract, mock_append, mock_save, *args):
"""extract_drivers: save repo, write pkglist"""
extract_drivers(drivers=[fake_enhancement, fake_module])
# extracts all listed modules
mock_extract.assert_has_calls([
mock.call(fake_enhancement.source, "/updates"),
mock.call(fake_module.source, "/updates")
], any_order=True)
pkglist = "/run/install/dd_packages"
mock_append.assert_called_once_with(pkglist, fake_module.name)
mock_save.assert_called_once_with(fake_module.repo)
def test_enhancements(self, mock_extract, mock_append, mock_save, *args):
"""extract_drivers: extract selected drivers, don't save enhancements"""
extract_drivers(drivers=[fake_enhancement])
mock_extract.assert_called_once_with(
fake_enhancement.source, "/updates"
)
self.assertFalse(mock_append.called)
self.assertFalse(mock_save.called)
def test_repo(self, mock_extract, mock_append, mock_save, *args):
"""extract_drivers(repos=[...]) extracts all drivers from named repos"""
with mock.patch("driver_updates.dd_list", side_effect=[
[fake_enhancement],
[fake_enhancement, fake_module]]):
extract_drivers(repos=['enh_repo', 'mod_repo'])
mock_extract.assert_has_calls([
mock.call(fake_enhancement.source, "/updates"),
mock.call(fake_enhancement.source, "/updates"),
mock.call(fake_module.source, "/updates")
])
pkglist = "/run/install/dd_packages"
mock_append.assert_called_once_with(pkglist, fake_module.name)
mock_save.assert_called_once_with(fake_module.repo)
class GrabDriverFilesTestCase(FileTestCaseBase):
def test_basic(self):
"""grab_driver_files: copy drivers into place, return module list"""
# create a bunch of fake extracted files
outdir = self.tmpdir + '/extract-outdir'
moddir = outdir + "/lib/modules/%s/kernel/" % os.uname()[2]
fwdir = outdir + "/lib/firmware/"
modules = makefiles(moddir+"net/funk.ko", moddir+"fs/lolfs.ko.xz")
firmware = makefiles(fwdir+"funk.fw")
makefiles(outdir+"/usr/bin/monkey", outdir+"/other/dir/blah.ko")
mod_upd_dir = self.tmpdir+'/module-updates'
fw_upd_dir = self.tmpdir+'/fw-updates'
# use our updates dirs instead of the default updates dirs
with mock.patch.multiple("driver_updates",
MODULE_UPDATES_DIR=mod_upd_dir,
FIRMWARE_UPDATES_DIR=fw_upd_dir):
modnames = grab_driver_files(outdir)
self.assertEqual(set(modnames), set(["funk", "lolfs"]))
modfiles = set(['funk.ko', 'lolfs.ko.xz'])
fwfiles = set(['funk.fw'])
# modules/firmware are *not* in their old locations
self.assertEqual([f for f in modules+firmware if os.path.exists(f)], [])
# modules are in the system's updates dir
self.assertEqual(set(os.listdir(mod_upd_dir)), modfiles)
# modules are also in outdir's updates dir
self.assertEqual(set(os.listdir(outdir+'/'+mod_upd_dir)), modfiles)
# repeat for firmware
self.assertEqual(set(os.listdir(fw_upd_dir)), fwfiles)
self.assertEqual(set(os.listdir(outdir+'/'+fw_upd_dir)), fwfiles)
class LoadDriversTestCase(unittest.TestCase):
@mock.patch("driver_updates.subprocess.call")
def test_basic(self, call):
"""load_drivers: runs depmod and modprobes all named modules"""
modnames = ['mod1', 'mod2']
load_drivers(modnames)
call.assert_has_calls([
mock.call(["depmod", "-a"]),
mock.call(["modprobe", "-a"] + modnames)
])
from driver_updates import process_driver_disk
class ProcessDriverDiskTestCase(unittest.TestCase):
def setUp(self):
# an iterable that returns fake mountpoints, for mocking mount()
self.fakemount = ["/mnt/DD-%i" % n for n in range(1,10)]
# an iterable that returns fake repos, for mocking find_repos()
self.frepo = {
'/mnt/DD-1': ['/mnt/DD-1/repo1'],
'/mnt/DD-2': ['/mnt/DD-2/repo1', '/mnt/DD-2/repo2'],
}
# fake iso listings for iso_dir
self.fiso = {
'/mnt/DD-1': [],
'/mnt/DD-2': [],
'/mnt/DD-3': [],
}
# a context-manager object to be returned by the mock mounted()
mounted_ctx = mock.MagicMock(
__enter__=mock.MagicMock(side_effect=self.fakemount), # mount
__exit__=mock.MagicMock(return_value=None), # umount
)
self.modlist = []
# set up our patches
patches = (
mock.patch("driver_updates.mounted", return_value=mounted_ctx),
mock.patch("driver_updates.find_repos", side_effect=self.frepo.get),
mock.patch("driver_updates.find_isos", side_effect=self.fiso.get),
mock.patch("driver_updates.extract_drivers", return_value=True),
mock.patch("driver_updates.load_drivers"),
mock.patch('driver_updates.grab_driver_files',
side_effect=lambda: self.modlist),
)
self.mocks = {p.attribute:p.start() for p in patches}
for p in patches: self.addCleanup(p.stop)
def test_basic(self):
"""process_driver_disk: mount disk, extract RPMs, grab + load drivers"""
dev = '/dev/fake'
process_driver_disk(dev)
# did we mount the initial device, and then the .iso we find therein?
self.mocks['mounted'].assert_called_once_with(dev)
self.mocks['extract_drivers'].assert_called_once_with(repos=self.frepo['/mnt/DD-1'])
self.mocks['grab_driver_files'].assert_called_once_with()
self.mocks['load_drivers'].assert_called_once_with(self.modlist)
def test_recursive(self):
"""process_driver_disk: recursively process .isos at toplevel"""
dev = '/dev/fake'
# first mount has no repos, but an iso
self.frepo['/mnt/DD-1'] = []
self.fiso['/mnt/DD-1'].append('magic.iso')
self.fiso['/mnt/DD-2'].append('ignored.iso')
process_driver_disk(dev)
# did we mount the initial device, and the iso therein?
# also: we ignore ignored.iso because magic.iso is a proper DD
self.mocks['mounted'].assert_has_calls([
mock.call(dev), mock.call('magic.iso')
])
# we extracted drivers from the repo(s) in magic.iso
self.mocks['extract_drivers'].assert_called_once_with(repos=self.frepo['/mnt/DD-2'])
self.mocks['grab_driver_files'].assert_called_once_with()
self.mocks['load_drivers'].assert_called_once_with(self.modlist)
def test_no_drivers(self):
"""process_driver_disk: don't run depmod etc. if no new drivers"""
dev = '/dev/fake'
self.mocks['extract_drivers'].return_value = False
process_driver_disk(dev)
self.assertFalse(self.mocks['grab_driver_files'].called)
self.assertFalse(self.mocks['load_drivers'].called)
from driver_updates import finish, mark_finished, all_finished
class FinishedTestCase(FileTestCaseBase):
def test_mark_finished(self):
"""mark_finished: appends a line to /tmp/dd_finished"""
requeststr = "WOW SOMETHING OR OTHER"
mark_finished(requeststr, topdir=self.tmpdir)
finished = self.tmpdir+'/dd_finished'
self.assertTrue(os.path.exists(finished))
self.assertEqual(read_lines(finished), [requeststr])
def test_all_finished(self):
"""all_finished: True if all lines from dd_todo are in dd_finished"""
todo = self.tmpdir+'/dd_todo'
requests = ['one', 'two', 'final thingy']
with open(todo, 'w') as outf:
outf.write(''.join(r+'\n' for r in requests))
self.assertEqual(set(read_lines(todo)), set(requests))
for r in reversed(requests):
self.assertFalse(all_finished(topdir=self.tmpdir))
mark_finished(r, topdir=self.tmpdir)
self.assertTrue(all_finished(topdir=self.tmpdir))
def test_extra_finished(self):
"""all_finished: True if dd_finished has more items than dd_todo"""
self.test_all_finished()
mark_finished("BONUS", topdir=self.tmpdir)
self.assertTrue(all_finished(topdir=self.tmpdir))
def test_finish(self):
"""finish: mark request finished, and write dd.done if all complete"""
todo = self.tmpdir+'/dd_todo'
done = self.tmpdir+'/dd.done'
requests = ['one', 'two', 'final thingy']
with open(todo, 'w') as outf:
outf.write(''.join(r+'\n' for r in requests))
for r in reversed(requests):
print("marking %s" % r)
self.assertFalse(os.path.exists(done))
finish(r, topdir=self.tmpdir)
self.assertTrue(os.path.exists(done))
from driver_updates import get_deviceinfo, DeviceInfo
blkid_out = b'''\
DEVNAME=/dev/sda2
UUID=0f21a3d1-dcd3-4ab4-a292-c5556850d561
TYPE=ext4
DEVNAME=/dev/sda1
UUID=C53C-EE46
TYPE=vfat
DEVNAME=/dev/sda3
UUID=4126dbb6-c7d3-47b4-b1fc-9bb461df0067
TYPE=btrfs
DEVNAME=/dev/loop0
UUID=6f16967e-0388-4276-bd8d-b88e5b217a55
TYPE=ext4
'''
disk_labels = {
'/dev/sdb1': 'metroid_srv',
'/dev/loop0': 'I\\x20\u262d\\x20COMMUNISM',
'/dev/sda3': 'metroid_root'
}
devicelist = [
DeviceInfo(DEVNAME='/dev/sda2', TYPE='ext4',
UUID='0f21a3d1-dcd3-4ab4-a292-c5556850d561'),
DeviceInfo(DEVNAME='/dev/sda1', TYPE='vfat',
UUID='C53C-EE46'),
DeviceInfo(DEVNAME='/dev/sda3', TYPE='btrfs', LABEL='metroid_root',
UUID='4126dbb6-c7d3-47b4-b1fc-9bb461df0067'),
DeviceInfo(DEVNAME='/dev/loop0', TYPE='ext4',
LABEL='I\\x20\u262d\\x20COMMUNISM',
UUID='6f16967e-0388-4276-bd8d-b88e5b217a55'),
]
# also covers blkid, get_disk_labels, DeviceInfo
class DeviceInfoTestCase(unittest.TestCase):
@mock.patch('driver_updates.subprocess.check_output',return_value=blkid_out)
@mock.patch('driver_updates.get_disk_labels',return_value=disk_labels)
def test_basic(self, get_disk_labels, check_output):
"""get_deviceinfo: parses DeviceInfo from blkid etc."""
disks = get_deviceinfo()
self.assertEqual(len(disks), 4)
disks.sort(key=lambda d: d.device)
loop, efi, boot, root = disks
self.assertEqual(vars(boot), vars(devicelist[0]))
self.assertEqual(vars(efi), vars(devicelist[1]))
self.assertEqual(vars(root), vars(devicelist[2]))
self.assertEqual(vars(loop), vars(devicelist[3]))
def test_shortdev(self):
d = DeviceInfo(DEVNAME="/dev/disk/by-label/OEMDRV")
with mock.patch("os.path.realpath", return_value="/dev/i2o/hdb"):
self.assertEqual(d.shortdev, "i2o/hdb")
# TODO: test TextMenu itself
# py2/3 compat
if sys.version_info.major == 3:
from io import StringIO
else:
from io import BytesIO as StringIO
from driver_updates import device_menu
class DeviceMenuTestCase(unittest.TestCase):
def setUp(self):
patches = (
mock.patch('driver_updates.get_deviceinfo',return_value=devicelist),
)
self.mocks = {p.attribute:p.start() for p in patches}
for p in patches: self.addCleanup(p.stop)
def test_device_menu_exit(self):
"""device_menu: 'c' exits the menu"""
with mock.patch('driver_updates._input', side_effect=['c']):
dev = device_menu()
self.assertEqual(dev, [])
self.assertEqual(self.mocks['get_deviceinfo'].call_count, 1)
def test_device_menu_refresh(self):
"""device_menu: 'r' makes the menu refresh"""
with mock.patch('driver_updates._input', side_effect=['r','c']):
device_menu()
self.assertEqual(self.mocks['get_deviceinfo'].call_count, 2)
@mock.patch("sys.stdout", new_callable=StringIO)
def test_device_menu(self, stdout):
"""device_menu: choosing a number returns that Device"""
choose_num='2'
with mock.patch('driver_updates._input', return_value=choose_num):
result = device_menu()
# if you hit '2' you should get the corresponding device from the list
self.assertEqual(len(result), 1)
dev = result[0]
self.assertEqual(vars(dev), vars(devicelist[int(choose_num)-1]))
# find the corresponding line on-screen
screen = [l.strip() for l in stdout.getvalue().splitlines()]
match = [l for l in screen if l.startswith(choose_num+')')]
self.assertEqual(len(match), 1)
line = match.pop(0)
# the device name (at least) should be on this line
self.assertIn(os.path.basename(dev.device), line)
| gpl-2.0 | 3,576,363,055,649,127,000 | 40.300158 | 92 | 0.628467 | false |
googleapis/python-dataflow-client | google/cloud/dataflow_v1beta3/types/metrics.py | 1 | 16973 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.dataflow.v1beta3",
manifest={
"ExecutionState",
"MetricStructuredName",
"MetricUpdate",
"GetJobMetricsRequest",
"JobMetrics",
"GetJobExecutionDetailsRequest",
"ProgressTimeseries",
"StageSummary",
"JobExecutionDetails",
"GetStageExecutionDetailsRequest",
"WorkItemDetails",
"WorkerDetails",
"StageExecutionDetails",
},
)
class ExecutionState(proto.Enum):
r"""The state of some component of job execution."""
EXECUTION_STATE_UNKNOWN = 0
EXECUTION_STATE_NOT_STARTED = 1
EXECUTION_STATE_RUNNING = 2
EXECUTION_STATE_SUCCEEDED = 3
EXECUTION_STATE_FAILED = 4
EXECUTION_STATE_CANCELLED = 5
class MetricStructuredName(proto.Message):
r"""Identifies a metric, by describing the source which generated
the metric.
Attributes:
origin (str):
Origin (namespace) of metric name. May be
blank for user-define metrics; will be
"dataflow" for metrics defined by the Dataflow
service or SDK.
name (str):
Worker-defined metric name.
context (Sequence[google.cloud.dataflow_v1beta3.types.MetricStructuredName.ContextEntry]):
Zero or more labeled fields which identify the part of the
job this metric is associated with, such as the name of a
step or collection.
For example, built-in counters associated with steps will
have context['step'] = . Counters associated with
PCollections in the SDK will have context['pcollection'] = .
"""
origin = proto.Field(proto.STRING, number=1,)
name = proto.Field(proto.STRING, number=2,)
context = proto.MapField(proto.STRING, proto.STRING, number=3,)
class MetricUpdate(proto.Message):
r"""Describes the state of a metric.
Attributes:
name (google.cloud.dataflow_v1beta3.types.MetricStructuredName):
Name of the metric.
kind (str):
Metric aggregation kind. The possible metric
aggregation kinds are "Sum", "Max", "Min",
"Mean", "Set", "And", "Or", and "Distribution".
The specified aggregation kind is case-
insensitive.
If omitted, this is not an aggregated value but
instead a single metric sample value.
cumulative (bool):
True if this metric is reported as the total
cumulative aggregate value accumulated since the
worker started working on this WorkItem. By
default this is false, indicating that this
metric is reported as a delta that is not
associated with any WorkItem.
scalar (google.protobuf.struct_pb2.Value):
Worker-computed aggregate value for
aggregation kinds "Sum", "Max", "Min", "And",
and "Or". The possible value types are Long,
Double, and Boolean.
mean_sum (google.protobuf.struct_pb2.Value):
Worker-computed aggregate value for the "Mean" aggregation
kind. This holds the sum of the aggregated values and is
used in combination with mean_count below to obtain the
actual mean aggregate value. The only possible value types
are Long and Double.
mean_count (google.protobuf.struct_pb2.Value):
Worker-computed aggregate value for the "Mean" aggregation
kind. This holds the count of the aggregated values and is
used in combination with mean_sum above to obtain the actual
mean aggregate value. The only possible value type is Long.
set_ (google.protobuf.struct_pb2.Value):
Worker-computed aggregate value for the "Set"
aggregation kind. The only possible value type
is a list of Values whose type can be Long,
Double, or String, according to the metric's
type. All Values in the list must be of the
same type.
distribution (google.protobuf.struct_pb2.Value):
A struct value describing properties of a
distribution of numeric values.
gauge (google.protobuf.struct_pb2.Value):
A struct value describing properties of a
Gauge. Metrics of gauge type show the value of a
metric across time, and is aggregated based on
the newest value.
internal (google.protobuf.struct_pb2.Value):
Worker-computed aggregate value for internal
use by the Dataflow service.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Timestamp associated with the metric value.
Optional when workers are reporting work
progress; it will be filled in responses from
the metrics API.
"""
name = proto.Field(proto.MESSAGE, number=1, message="MetricStructuredName",)
kind = proto.Field(proto.STRING, number=2,)
cumulative = proto.Field(proto.BOOL, number=3,)
scalar = proto.Field(proto.MESSAGE, number=4, message=struct_pb2.Value,)
mean_sum = proto.Field(proto.MESSAGE, number=5, message=struct_pb2.Value,)
mean_count = proto.Field(proto.MESSAGE, number=6, message=struct_pb2.Value,)
set_ = proto.Field(proto.MESSAGE, number=7, message=struct_pb2.Value,)
distribution = proto.Field(proto.MESSAGE, number=11, message=struct_pb2.Value,)
gauge = proto.Field(proto.MESSAGE, number=12, message=struct_pb2.Value,)
internal = proto.Field(proto.MESSAGE, number=8, message=struct_pb2.Value,)
update_time = proto.Field(proto.MESSAGE, number=9, message=timestamp_pb2.Timestamp,)
class GetJobMetricsRequest(proto.Message):
r"""Request to get job metrics.
Attributes:
project_id (str):
A project id.
job_id (str):
The job to get metrics for.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Return only metric data that has changed
since this time. Default is to return all
information about all metrics for the job.
location (str):
The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
"""
project_id = proto.Field(proto.STRING, number=1,)
job_id = proto.Field(proto.STRING, number=2,)
start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
location = proto.Field(proto.STRING, number=4,)
class JobMetrics(proto.Message):
r"""JobMetrics contains a collection of metrics describing the
detailed progress of a Dataflow job. Metrics correspond to user-
defined and system-defined metrics in the job.
This resource captures only the most recent values of each
metric; time-series data can be queried for them (under the same
metric names) from Cloud Monitoring.
Attributes:
metric_time (google.protobuf.timestamp_pb2.Timestamp):
Timestamp as of which metric values are
current.
metrics (Sequence[google.cloud.dataflow_v1beta3.types.MetricUpdate]):
All metrics for this job.
"""
metric_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
metrics = proto.RepeatedField(proto.MESSAGE, number=2, message="MetricUpdate",)
class GetJobExecutionDetailsRequest(proto.Message):
r"""Request to get job execution details.
Attributes:
project_id (str):
A project id.
job_id (str):
The job to get execution details for.
location (str):
The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
page_size (int):
If specified, determines the maximum number
of stages to return. If unspecified, the
service may choose an appropriate default, or
may return an arbitrarily large number of
results.
page_token (str):
If supplied, this should be the value of next_page_token
returned by an earlier call. This will cause the next page
of results to be returned.
"""
project_id = proto.Field(proto.STRING, number=1,)
job_id = proto.Field(proto.STRING, number=2,)
location = proto.Field(proto.STRING, number=3,)
page_size = proto.Field(proto.INT32, number=4,)
page_token = proto.Field(proto.STRING, number=5,)
class ProgressTimeseries(proto.Message):
r"""Information about the progress of some component of job
execution.
Attributes:
current_progress (float):
The current progress of the component, in the range [0,1].
data_points (Sequence[google.cloud.dataflow_v1beta3.types.ProgressTimeseries.Point]):
History of progress for the component.
Points are sorted by time.
"""
class Point(proto.Message):
r"""A point in the timeseries.
Attributes:
time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp of the point.
value (float):
The value of the point.
"""
time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,)
value = proto.Field(proto.DOUBLE, number=2,)
current_progress = proto.Field(proto.DOUBLE, number=1,)
data_points = proto.RepeatedField(proto.MESSAGE, number=2, message=Point,)
class StageSummary(proto.Message):
r"""Information about a particular execution stage of a job.
Attributes:
stage_id (str):
ID of this stage
state (google.cloud.dataflow_v1beta3.types.ExecutionState):
State of this stage.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Start time of this stage.
end_time (google.protobuf.timestamp_pb2.Timestamp):
End time of this stage.
If the work item is completed, this is the
actual end time of the stage. Otherwise, it is
the predicted end time.
progress (google.cloud.dataflow_v1beta3.types.ProgressTimeseries):
Progress for this stage.
Only applicable to Batch jobs.
metrics (Sequence[google.cloud.dataflow_v1beta3.types.MetricUpdate]):
Metrics for this stage.
"""
stage_id = proto.Field(proto.STRING, number=1,)
state = proto.Field(proto.ENUM, number=2, enum="ExecutionState",)
start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
progress = proto.Field(proto.MESSAGE, number=5, message="ProgressTimeseries",)
metrics = proto.RepeatedField(proto.MESSAGE, number=6, message="MetricUpdate",)
class JobExecutionDetails(proto.Message):
r"""Information about the execution of a job.
Attributes:
stages (Sequence[google.cloud.dataflow_v1beta3.types.StageSummary]):
The stages of the job execution.
next_page_token (str):
If present, this response does not contain all requested
tasks. To obtain the next page of results, repeat the
request with page_token set to this value.
"""
@property
def raw_page(self):
return self
stages = proto.RepeatedField(proto.MESSAGE, number=1, message="StageSummary",)
next_page_token = proto.Field(proto.STRING, number=2,)
class GetStageExecutionDetailsRequest(proto.Message):
r"""Request to get information about a particular execution stage
of a job. Currently only tracked for Batch jobs.
Attributes:
project_id (str):
A project id.
job_id (str):
The job to get execution details for.
location (str):
The [regional endpoint]
(https://cloud.google.com/dataflow/docs/concepts/regional-endpoints)
that contains the job specified by job_id.
stage_id (str):
The stage for which to fetch information.
page_size (int):
If specified, determines the maximum number
of work items to return. If unspecified, the
service may choose an appropriate default, or
may return an arbitrarily large number of
results.
page_token (str):
If supplied, this should be the value of next_page_token
returned by an earlier call. This will cause the next page
of results to be returned.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Lower time bound of work items to include, by
start time.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Upper time bound of work items to include, by
start time.
"""
project_id = proto.Field(proto.STRING, number=1,)
job_id = proto.Field(proto.STRING, number=2,)
location = proto.Field(proto.STRING, number=3,)
stage_id = proto.Field(proto.STRING, number=4,)
page_size = proto.Field(proto.INT32, number=5,)
page_token = proto.Field(proto.STRING, number=6,)
start_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,)
class WorkItemDetails(proto.Message):
r"""Information about an individual work item execution.
Attributes:
task_id (str):
Name of this work item.
attempt_id (str):
Attempt ID of this work item
start_time (google.protobuf.timestamp_pb2.Timestamp):
Start time of this work item attempt.
end_time (google.protobuf.timestamp_pb2.Timestamp):
End time of this work item attempt.
If the work item is completed, this is the
actual end time of the work item. Otherwise, it
is the predicted end time.
state (google.cloud.dataflow_v1beta3.types.ExecutionState):
State of this work item.
progress (google.cloud.dataflow_v1beta3.types.ProgressTimeseries):
Progress of this work item.
metrics (Sequence[google.cloud.dataflow_v1beta3.types.MetricUpdate]):
Metrics for this work item.
"""
task_id = proto.Field(proto.STRING, number=1,)
attempt_id = proto.Field(proto.STRING, number=2,)
start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
state = proto.Field(proto.ENUM, number=5, enum="ExecutionState",)
progress = proto.Field(proto.MESSAGE, number=6, message="ProgressTimeseries",)
metrics = proto.RepeatedField(proto.MESSAGE, number=7, message="MetricUpdate",)
class WorkerDetails(proto.Message):
r"""Information about a worker
Attributes:
worker_name (str):
Name of this worker
work_items (Sequence[google.cloud.dataflow_v1beta3.types.WorkItemDetails]):
Work items processed by this worker, sorted
by time.
"""
worker_name = proto.Field(proto.STRING, number=1,)
work_items = proto.RepeatedField(
proto.MESSAGE, number=2, message="WorkItemDetails",
)
class StageExecutionDetails(proto.Message):
r"""Information about the workers and work items within a stage.
Attributes:
workers (Sequence[google.cloud.dataflow_v1beta3.types.WorkerDetails]):
Workers that have done work on the stage.
next_page_token (str):
If present, this response does not contain all requested
tasks. To obtain the next page of results, repeat the
request with page_token set to this value.
"""
@property
def raw_page(self):
return self
workers = proto.RepeatedField(proto.MESSAGE, number=1, message="WorkerDetails",)
next_page_token = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -5,073,147,159,870,581,000 | 40.196602 | 98 | 0.654274 | false |
gam-phon/taiga-back | taiga/hooks/github/services.py | 1 | 1901 | # Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import uuid
from django.core.urlresolvers import reverse
from taiga.users.models import User
from taiga.users.models import AuthData
from taiga.base.utils.urls import get_absolute_url
# Set this in settings.PROJECT_MODULES_CONFIGURATORS["github"]
def get_or_generate_config(project):
config = project.modules_config.config
if config and "github" in config:
g_config = project.modules_config.config["github"]
else:
g_config = {"secret": uuid.uuid4().hex}
url = reverse("github-hook-list")
url = get_absolute_url(url)
url = "%s?project=%s" % (url, project.id)
g_config["webhooks_url"] = url
return g_config
def get_github_user(github_id):
user = None
if github_id:
try:
user = AuthData.objects.get(key="github", value=github_id).user
except AuthData.DoesNotExist:
pass
if user is None:
user = User.objects.get(is_system=True, username__startswith="github")
return user
| agpl-3.0 | 6,165,237,721,531,637,000 | 34.166667 | 78 | 0.714587 | false |
postlund/home-assistant | homeassistant/components/harmony/remote.py | 1 | 14127 | """Support for Harmony Hub devices."""
import asyncio
import json
import logging
import aioharmony.exceptions as aioexc
from aioharmony.harmonyapi import (
ClientCallbackType,
HarmonyAPI as HarmonyClient,
SendCommandDevice,
)
import voluptuous as vol
from homeassistant.components import remote
from homeassistant.components.remote import (
ATTR_ACTIVITY,
ATTR_DELAY_SECS,
ATTR_DEVICE,
ATTR_HOLD_SECS,
ATTR_NUM_REPEATS,
DEFAULT_DELAY_SECS,
PLATFORM_SCHEMA,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_NAME,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.util import slugify
from .const import DOMAIN, SERVICE_CHANGE_CHANNEL, SERVICE_SYNC
_LOGGER = logging.getLogger(__name__)
ATTR_CHANNEL = "channel"
ATTR_CURRENT_ACTIVITY = "current_activity"
DEFAULT_PORT = 8088
DEVICES = []
CONF_DEVICE_CACHE = "harmony_device_cache"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(ATTR_ACTIVITY): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Optional(ATTR_DELAY_SECS, default=DEFAULT_DELAY_SECS): vol.Coerce(float),
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
HARMONY_SYNC_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
HARMONY_CHANGE_CHANNEL_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_CHANNEL): cv.positive_int,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Harmony platform."""
activity = None
if CONF_DEVICE_CACHE not in hass.data:
hass.data[CONF_DEVICE_CACHE] = []
if discovery_info:
# Find the discovered device in the list of user configurations
override = next(
(
c
for c in hass.data[CONF_DEVICE_CACHE]
if c.get(CONF_NAME) == discovery_info.get(CONF_NAME)
),
None,
)
port = DEFAULT_PORT
delay_secs = DEFAULT_DELAY_SECS
if override is not None:
activity = override.get(ATTR_ACTIVITY)
delay_secs = override.get(ATTR_DELAY_SECS)
port = override.get(CONF_PORT, DEFAULT_PORT)
host = (discovery_info.get(CONF_NAME), discovery_info.get(CONF_HOST), port)
# Ignore hub name when checking if this hub is known - ip and port only
if host[1:] in ((h.host, h.port) for h in DEVICES):
_LOGGER.debug("Discovered host already known: %s", host)
return
elif CONF_HOST in config:
host = (config.get(CONF_NAME), config.get(CONF_HOST), config.get(CONF_PORT))
activity = config.get(ATTR_ACTIVITY)
delay_secs = config.get(ATTR_DELAY_SECS)
else:
hass.data[CONF_DEVICE_CACHE].append(config)
return
name, address, port = host
_LOGGER.info(
"Loading Harmony Platform: %s at %s:%s, startup activity: %s",
name,
address,
port,
activity,
)
harmony_conf_file = hass.config.path(
"{}{}{}".format("harmony_", slugify(name), ".conf")
)
try:
device = HarmonyRemote(
name, address, port, activity, harmony_conf_file, delay_secs
)
if not await device.connect():
raise PlatformNotReady
DEVICES.append(device)
async_add_entities([device])
register_services(hass)
except (ValueError, AttributeError):
raise PlatformNotReady
def register_services(hass):
"""Register all services for harmony devices."""
hass.services.async_register(
DOMAIN, SERVICE_SYNC, _sync_service, schema=HARMONY_SYNC_SCHEMA
)
hass.services.async_register(
DOMAIN,
SERVICE_CHANGE_CHANNEL,
_change_channel_service,
schema=HARMONY_CHANGE_CHANNEL_SCHEMA,
)
async def _apply_service(service, service_func, *service_func_args):
"""Handle services to apply."""
entity_ids = service.data.get("entity_id")
if entity_ids:
_devices = [device for device in DEVICES if device.entity_id in entity_ids]
else:
_devices = DEVICES
for device in _devices:
await service_func(device, *service_func_args)
async def _sync_service(service):
await _apply_service(service, HarmonyRemote.sync)
async def _change_channel_service(service):
channel = service.data.get(ATTR_CHANNEL)
await _apply_service(service, HarmonyRemote.change_channel, channel)
class HarmonyRemote(remote.RemoteDevice):
"""Remote representation used to control a Harmony device."""
def __init__(self, name, host, port, activity, out_path, delay_secs):
"""Initialize HarmonyRemote class."""
self._name = name
self.host = host
self.port = port
self._state = None
self._current_activity = None
self._default_activity = activity
self._client = HarmonyClient(ip_address=host)
self._config_path = out_path
self._delay_secs = delay_secs
self._available = False
async def async_added_to_hass(self):
"""Complete the initialization."""
_LOGGER.debug("%s: Harmony Hub added", self._name)
# Register the callbacks
self._client.callbacks = ClientCallbackType(
new_activity=self.new_activity,
config_updated=self.new_config,
connect=self.got_connected,
disconnect=self.got_disconnected,
)
# Store Harmony HUB config, this will also update our current
# activity
await self.new_config()
async def shutdown(_):
"""Close connection on shutdown."""
_LOGGER.debug("%s: Closing Harmony Hub", self._name)
try:
await self._client.close()
except aioexc.TimeOut:
_LOGGER.warning("%s: Disconnect timed-out", self._name)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
@property
def name(self):
"""Return the Harmony device's name."""
return self._name
@property
def should_poll(self):
"""Return the fact that we should not be polled."""
return False
@property
def device_state_attributes(self):
"""Add platform specific attributes."""
return {ATTR_CURRENT_ACTIVITY: self._current_activity}
@property
def is_on(self):
"""Return False if PowerOff is the current activity, otherwise True."""
return self._current_activity not in [None, "PowerOff"]
@property
def available(self):
"""Return True if connected to Hub, otherwise False."""
return self._available
async def connect(self):
"""Connect to the Harmony HUB."""
_LOGGER.debug("%s: Connecting", self._name)
try:
if not await self._client.connect():
_LOGGER.warning("%s: Unable to connect to HUB.", self._name)
await self._client.close()
return False
except aioexc.TimeOut:
_LOGGER.warning("%s: Connection timed-out", self._name)
return False
return True
def new_activity(self, activity_info: tuple) -> None:
"""Call for updating the current activity."""
activity_id, activity_name = activity_info
_LOGGER.debug("%s: activity reported as: %s", self._name, activity_name)
self._current_activity = activity_name
self._state = bool(activity_id != -1)
self._available = True
self.async_schedule_update_ha_state()
async def new_config(self, _=None):
"""Call for updating the current activity."""
_LOGGER.debug("%s: configuration has been updated", self._name)
self.new_activity(self._client.current_activity)
await self.hass.async_add_executor_job(self.write_config_file)
async def got_connected(self, _=None):
"""Notification that we're connected to the HUB."""
_LOGGER.debug("%s: connected to the HUB.", self._name)
if not self._available:
# We were disconnected before.
await self.new_config()
async def got_disconnected(self, _=None):
"""Notification that we're disconnected from the HUB."""
_LOGGER.debug("%s: disconnected from the HUB.", self._name)
self._available = False
# We're going to wait for 10 seconds before announcing we're
# unavailable, this to allow a reconnection to happen.
await asyncio.sleep(10)
if not self._available:
# Still disconnected. Let the state engine know.
self.async_schedule_update_ha_state()
async def async_turn_on(self, **kwargs):
"""Start an activity from the Harmony device."""
_LOGGER.debug("%s: Turn On", self.name)
activity = kwargs.get(ATTR_ACTIVITY, self._default_activity)
if activity:
activity_id = None
if activity.isdigit() or activity == "-1":
_LOGGER.debug("%s: Activity is numeric", self.name)
if self._client.get_activity_name(int(activity)):
activity_id = activity
if activity_id is None:
_LOGGER.debug("%s: Find activity ID based on name", self.name)
activity_id = self._client.get_activity_id(str(activity).strip())
if activity_id is None:
_LOGGER.error("%s: Activity %s is invalid", self.name, activity)
return
try:
await self._client.start_activity(activity_id)
except aioexc.TimeOut:
_LOGGER.error("%s: Starting activity %s timed-out", self.name, activity)
else:
_LOGGER.error("%s: No activity specified with turn_on service", self.name)
async def async_turn_off(self, **kwargs):
"""Start the PowerOff activity."""
_LOGGER.debug("%s: Turn Off", self.name)
try:
await self._client.power_off()
except aioexc.TimeOut:
_LOGGER.error("%s: Powering off timed-out", self.name)
async def async_send_command(self, command, **kwargs):
"""Send a list of commands to one device."""
_LOGGER.debug("%s: Send Command", self.name)
device = kwargs.get(ATTR_DEVICE)
if device is None:
_LOGGER.error("%s: Missing required argument: device", self.name)
return
device_id = None
if device.isdigit():
_LOGGER.debug("%s: Device %s is numeric", self.name, device)
if self._client.get_device_name(int(device)):
device_id = device
if device_id is None:
_LOGGER.debug(
"%s: Find device ID %s based on device name", self.name, device
)
device_id = self._client.get_device_id(str(device).strip())
if device_id is None:
_LOGGER.error("%s: Device %s is invalid", self.name, device)
return
num_repeats = kwargs[ATTR_NUM_REPEATS]
delay_secs = kwargs.get(ATTR_DELAY_SECS, self._delay_secs)
hold_secs = kwargs[ATTR_HOLD_SECS]
_LOGGER.debug(
"Sending commands to device %s holding for %s seconds "
"with a delay of %s seconds",
device,
hold_secs,
delay_secs,
)
# Creating list of commands to send.
snd_cmnd_list = []
for _ in range(num_repeats):
for single_command in command:
send_command = SendCommandDevice(
device=device_id, command=single_command, delay=hold_secs
)
snd_cmnd_list.append(send_command)
if delay_secs > 0:
snd_cmnd_list.append(float(delay_secs))
_LOGGER.debug("%s: Sending commands", self.name)
try:
result_list = await self._client.send_commands(snd_cmnd_list)
except aioexc.TimeOut:
_LOGGER.error("%s: Sending commands timed-out", self.name)
return
for result in result_list:
_LOGGER.error(
"Sending command %s to device %s failed with code %s: %s",
result.command.command,
result.command.device,
result.code,
result.msg,
)
async def change_channel(self, channel):
"""Change the channel using Harmony remote."""
_LOGGER.debug("%s: Changing channel to %s", self.name, channel)
try:
await self._client.change_channel(channel)
except aioexc.TimeOut:
_LOGGER.error("%s: Changing channel to %s timed-out", self.name, channel)
async def sync(self):
"""Sync the Harmony device with the web service."""
_LOGGER.debug("%s: Syncing hub with Harmony cloud", self.name)
try:
await self._client.sync()
except aioexc.TimeOut:
_LOGGER.error("%s: Syncing hub with Harmony cloud timed-out", self.name)
else:
await self.hass.async_add_executor_job(self.write_config_file)
def write_config_file(self):
"""Write Harmony configuration file."""
_LOGGER.debug(
"%s: Writing hub configuration to file: %s", self.name, self._config_path
)
if self._client.config is None:
_LOGGER.warning("%s: No configuration received from hub", self.name)
return
try:
with open(self._config_path, "w+", encoding="utf-8") as file_out:
json.dump(self._client.json_config, file_out, sort_keys=True, indent=4)
except IOError as exc:
_LOGGER.error(
"%s: Unable to write HUB configuration to %s: %s",
self.name,
self._config_path,
exc,
)
| apache-2.0 | -3,844,972,485,420,197,000 | 33.040964 | 88 | 0.597791 | false |
QISKit/qiskit-sdk-py | test/python/quantum_info/test_local_invariance.py | 1 | 2191 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests for local invariance routines."""
import unittest
from numpy.testing import assert_allclose
from qiskit.execute import execute
from qiskit.circuit import QuantumCircuit, QuantumRegister
from qiskit.test import QiskitTestCase
from qiskit.providers.basicaer import UnitarySimulatorPy
from qiskit.quantum_info.synthesis.local_invariance import two_qubit_local_invariants
class TestLocalInvariance(QiskitTestCase):
"""Test local invariance routines"""
def test_2q_local_invariance_simple(self):
"""Check the local invariance parameters
for known simple cases.
"""
sim = UnitarySimulatorPy()
qr = QuantumRegister(2, name='q')
qc = QuantumCircuit(qr)
U = execute(qc, sim).result().get_unitary()
vec = two_qubit_local_invariants(U)
assert_allclose(vec, [1, 0, 3])
qr = QuantumRegister(2, name='q')
qc = QuantumCircuit(qr)
qc.cx(qr[1], qr[0])
U = execute(qc, sim).result().get_unitary()
vec = two_qubit_local_invariants(U)
assert_allclose(vec, [0, 0, 1])
qr = QuantumRegister(2, name='q')
qc = QuantumCircuit(qr)
qc.cx(qr[1], qr[0])
qc.cx(qr[0], qr[1])
U = execute(qc, sim).result().get_unitary()
vec = two_qubit_local_invariants(U)
assert_allclose(vec, [0, 0, -1])
qr = QuantumRegister(2, name='q')
qc = QuantumCircuit(qr)
qc.swap(qr[1], qr[0])
U = execute(qc, sim).result().get_unitary()
vec = two_qubit_local_invariants(U)
assert_allclose(vec, [-1, 0, -3])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,291,327,062,508,422,700 | 31.701493 | 85 | 0.650844 | false |
debug-icons-project/debug-icons-tools | code/check_icon_names.py | 1 | 2994 | import os
ICON_DATABASE_FOLDER = "../icon-database"
def check_for_context_problems(themes):
print "Checking the following themes for icons in multiple contexts:"
print ", ".join(themes)
print
Icons = {}
for theme in themes:
with open(os.path.join(ICON_DATABASE_FOLDER, theme + ".txt"), "r") as f:
icons = f.read().splitlines()
for icon in icons:
context, name = icon.split("/")
if not Icons.has_key(name):
Icons[name] = {}
if not Icons[name].has_key(context):
Icons[name][context] = []
Icons[name][context].append(theme)
names = Icons.keys()
names.sort()
for name in names:
data = Icons[name]
number_of_contexts_for_current_icon = len(data.keys())
if number_of_contexts_for_current_icon == 1:
# everything is fine, the icon has the same context in all themes
# themes = data[data.keys()[0]]
# number_of_themes = len(themes)
# if number_of_themes != 1:
# print name, themes
# print name, data
# print
# print
pass
else:
print name
for category in data.keys():
# print category, data[category]
for theme in data[category]:
print " %-13s:" %category, theme
print
# print
correct_icons = 0
incorrect_icons = 0
for name, data in Icons.iteritems():
number_of_contexts_for_current_icon = len(data.keys())
if number_of_contexts_for_current_icon == 1:
correct_icons += 1
else:
incorrect_icons += 1
print "Icons with unique contexts: ", correct_icons
print "Icons with multiple contexts:", incorrect_icons
if __name__ == "__main__":
import sys
nparams = len(sys.argv) - 1
# Please note:
# - all themes must be in the "../base_themes/" subfolder!
# - the themename is the folder of the theme, notthe one given in the index.theme file
# - one could implement direct support of locally installed theme files in /usr/share/icons
# but creating symlinks in the ".../base_themes/" folder might be easier
# if there are parameters passed via command line then these are treated as theme names...
if nparams >= 1:
themes = sys.argv[1:]
# ... otherwise use all the available theme folders
else:
# get all files inthe base theme folder
themes = os.listdir(ICON_DATABASE_FOLDER)
# remove all folder in which 'index.theme' does not exist
themes = [f for f in themes if os.path.isfile(os.path.join(ICON_DATABASE_FOLDER, f))]
# take only file with the ending 'txt' strip the ending
themes = [f[:-4] for f in themes if f.endswith(".txt")]
check_for_context_problems(themes)
| mit | 1,633,209,410,626,736,400 | 25.732143 | 95 | 0.565464 | false |
Johnzero/erp | openerp/addons/fg_account/report/period_check.py | 1 | 6379 | # -*- coding: utf-8 -*-
import tools
from osv import fields, osv
class reconcile_item(osv.osv_memory):
_name = "fg_account.reconcile.item"
_columns = {
'ref_doc':fields.reference('单据', selection=[('fg_sale.order','销售订单'),('fg_account.bill','收款单')],
size=128, readonly=True),
'o_date': fields.date('单据日期', readonly=True),
'name':fields.char('单号', size=24),
'o_partner': fields.many2one('res.partner', '客户', readonly=True),
't':fields.char('项目', size=12, readonly=True),
'reconciled':fields.boolean('已对账', readonly=True),
'cleared':fields.boolean('已清账', readonly=True),
'amount': fields.float('金额', digits=(16,4), readonly=True),
'balance':fields.float('余额', digits=(16,4), readonly=True),
'note':fields.text('附注'),
}
_order = 'o_date asc'
def button_view(self, cr, uid, ids, context=None):
record = self.browse(cr, uid, ids)[0]
r = {
'type': 'ir.actions.act_window',
'name': '查看单据',
'view_mode': 'form',
'view_type': 'form',
'res_model': record.ref_doc._table_name,
'res_id': record.ref_doc.id,
'target': 'new',
'context': context,
}
#if record.ref_doc._table_name == 'fg_account.bill':
# r['res_id'] = record.id - 1000000000
#
#print r
return r
class period_check(osv.osv):
_name = "fg_account.period.check"
_auto = False
_rec_name = 'ref_doc'
_columns = {
'ref_doc':fields.reference('单据', selection=[('fg_sale.order','销售订单'),('fg_account.bill','收款单')],
size=128, readonly=True),
'o_date': fields.date('单据日期', readonly=True),
'name':fields.char('单号', size=24),
'o_partner': fields.many2one('res.partner', '客户', readonly=True),
't':fields.char('项目', size=12, readonly=True),
'reconciled':fields.boolean('已对账', readonly=True),
'cleared':fields.boolean('已清账', readonly=True),
'amount': fields.float('金额', digits=(16,4), readonly=True),
'due_date_from':fields.function(lambda *a,**k:{}, method=True, type='date',string="开始日期"),
'due_date_to':fields.function(lambda *a,**k:{}, method=True, type='date',string="结束日期"),
'note':fields.text('附注'),
}
_order = 'o_date asc'
def button_view(self, cr, uid, ids, context=None):
record = self.browse(cr, uid, ids)[0]
r = {
'type': 'ir.actions.act_window',
'name': '查看单据',
'view_mode': 'form',
'view_type': 'form',
'res_model': record.ref_doc._table_name,
'res_id': record.id,
'target': 'new',
'context': context,
}
if record.ref_doc._table_name == 'fg_account.bill':
r['res_id'] = record.id - 1000000000
return r
def button_clear(self, cr, uid, ids, context=None):
order_obj = self.pool.get('fg_sale.order')
#this should all be order.
#check_record's id IS the id of order.
order_obj.write(cr, uid, ids, {'clear':True})
return True
def button_unclear(self, cr, uid, ids, context=None):
order_obj = self.pool.get('fg_sale.order')
#this should all be order.
#check_record's id IS the id of order.
order_obj.write(cr, uid, ids, {'clear':False})
return True
def init(self, cr):
tools.drop_view_if_exists(cr, 'fg_account_period_check')
cr.execute("""
create or replace view fg_account_period_check as (
(
SELECT
o."id" AS ID,
o.name as name,
'fg_sale.order,' || o."id" AS ref_doc,
o.date_order AS o_date,
o.partner_id AS o_partner,
'发货额' AS T,
o.reconciled AS reconciled,
SUM(line.subtotal_amount)AS amount,
o.note AS note,
o.clear as cleared
FROM
fg_sale_order_line line
JOIN fg_sale_order o ON o."id" = line.order_id
WHERE
o."state" = 'done'
AND NOT o.minus
GROUP BY
o. ID,
o."name",
o.date_confirm,
o.partner_id
)
UNION ALL
(
SELECT
o."id" AS ID,
o.name as name,
'fg_sale.order,' || o."id" AS ref_doc,
o.date_order AS o_date,
o.partner_id AS o_partner,
'退货' AS T,
o.reconciled AS reconciled,
SUM(line.subtotal_amount)AS amount,
o.note AS note,
o.clear as cleared
FROM
fg_sale_order_line line
JOIN fg_sale_order o ON o."id" = line.order_id
WHERE
o."state" = 'done'
AND o.minus
GROUP BY
o. ID,
o."name",
o.date_confirm,
o.partner_id
)
UNION ALL
(
SELECT
(bill."id"+ 1000000000) AS ID,
bill.name as name,
'fg_account.bill,' || bill."id" AS ref_doc,
bill.date_check AS o_date,
bill.partner_id AS o_parnter,
cate."name" AS T,
bill.reconciled AS reconciled,
(0-bill.amount) AS amount,
bill.note AS note,
False as cleared
FROM
fg_account_bill bill
JOIN fg_account_bill_category cate ON bill.category_id = cate. ID
WHERE
bill."state" IN('check', 'done')
)
ORDER BY id desc
)
""")
| agpl-3.0 | -1,519,215,445,223,832,300 | 32.938202 | 105 | 0.460833 | false |
kdschlosser/SonyAPI | setup.py | 1 | 1842 | # -*- coding: utf-8 -*-
#
# SonyAPI
# External control of Sony Bravia Generation 3 TV's
# Copyright (C) 2017 Kevin G. Schlosser
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
from setuptools import setup, find_packages
from SonyAPI.version import (
__version__,
__author__,
__author_email__,
__url__,
__download_url__,
__description__,
__requirements__,
__keywords__,
__classifiers__,
__license__
)
sys.path.insert(0, '.')
PACKAGE_DIR = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the relevant file
with open(os.path.join(PACKAGE_DIR, 'README.txt'), 'r') as f:
__long_description__ = f.read().encode('utf-8')
setup(
name='SonyAPI',
version=__version__,
description=__description__,
long_description=__long_description__,
install_requires=__requirements__,
maintainer=__author__,
author=__author__,
author_email=__author_email__,
zip_safe=True,
packages=find_packages(),
include_package_data=True,
url=__url__,
download_url=__download_url__,
keywords=__keywords__,
classifiers=__classifiers__,
license=__license__,
)
| gpl-2.0 | 2,833,613,687,066,089,000 | 28.238095 | 73 | 0.679696 | false |
MiniSEC/GRR_clone | lib/flows/general/grep.py | 1 | 4124 | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""A simple grep flow."""
import time
from grr.lib import aff4
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import type_info
from grr.lib import utils
class Grep(flow.GRRFlow):
"""Greps a file on the client for a pattern or a regex.
This flow operates on files only, see GlobAndGrep if you want to grep a
directory.
Returns to parent flow:
RDFValueArray of BufferReference objects.
"""
category = "/Filesystem/"
XOR_IN_KEY = 37
XOR_OUT_KEY = 57
flow_typeinfo = type_info.TypeDescriptorSet(
type_info.GrepspecType(
description="The file which will be grepped.",
name="request"),
type_info.String(
description="The output collection.",
name="output",
default="analysis/grep/{u}-{t}"),
)
@flow.StateHandler(next_state=["StoreResults"])
def Start(self):
"""Start Grep flow."""
self.state.request.xor_in_key = self.XOR_IN_KEY
self.state.request.xor_out_key = self.XOR_OUT_KEY
# For literal matches we xor the search term. In the event we search the
# memory this stops us matching the GRR client itself.
if self.state.request.literal:
self.state.request.literal = utils.Xor(self.state.request.literal,
self.XOR_IN_KEY)
self.state.Register("output_collection", None)
self.CallClient("Grep", self.state.request, next_state="StoreResults")
@flow.StateHandler()
def StoreResults(self, responses):
if responses.success:
output = self.state.output.format(t=time.time(),
u=self.state.context.user)
out_urn = self.client_id.Add(output)
fd = aff4.FACTORY.Create(out_urn, "GrepResultsCollection",
mode="w", token=self.token)
self.state.output_collection = fd
if self.state.request.HasField("literal"):
self.state.request.literal = utils.Xor(self.state.request.literal,
self.XOR_IN_KEY)
fd.Set(fd.Schema.DESCRIPTION("Grep by %s: %s" % (
self.state.context.user, str(self.state.request))))
for response in responses:
response.data = utils.Xor(response.data,
self.XOR_OUT_KEY)
response.length = len(response.data)
fd.Add(response)
self.SendReply(response)
else:
self.Notify("FlowStatus", self.session_id,
"Error grepping file: %s." % responses.status)
@flow.StateHandler()
def End(self):
if self.state.output_collection is not None:
self.state.output_collection.Flush()
self.Notify("ViewObject", self.state.output_collection.urn,
u"Grep completed. %d hits" %
len(self.state.output_collection))
class GrepAndDownload(flow.GRRFlow):
"""Downloads file if a signature is found.
This flow greps a file on the client for a literal or regex and, if the
pattern is found, downloads the file.
"""
category = "/Filesystem/"
flow_typeinfo = (Grep.flow_typeinfo)
@flow.StateHandler(next_state=["DownloadFile"])
def Start(self):
self.state.request.mode = rdfvalue.GrepSpec.Mode.FIRST_HIT
self.CallFlow("Grep", request=self.state.request, next_state="DownloadFile")
@flow.StateHandler(next_state=["StoreDownload", "End"])
def DownloadFile(self, responses):
if responses:
self.Log("Grep completed with %s hits, downloading file.", len(responses))
self.CallFlow("FastGetFile", pathspec=responses.First().pathspec,
next_state="StoreDownload")
else:
self.Log("Grep did not yield any results.")
@flow.StateHandler()
def StoreDownload(self, responses):
if not responses.success:
raise flow.FlowError("Error while downloading file: %s" %
responses.status.error_message)
else:
stat = responses.First()
self.Notify("ViewObject", stat.aff4path,
"File downloaded successfully")
| apache-2.0 | -9,200,251,228,935,758,000 | 30.968992 | 80 | 0.634336 | false |
KelSolaar/Umbra | umbra/globals/ui_constants.py | 1 | 7560 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
**ui_constants.py**
**Platform:**
Windows, Linux, Mac Os X.
**Description:**
Defines **Umbra** package ui constants through the :class:`UiConstants` class.
**Others:**
"""
from __future__ import unicode_literals
__author__ = "Thomas Mansencal"
__copyright__ = "Copyright (C) 2008 - 2014 - Thomas Mansencal"
__license__ = "GPL V3.0 - http://www.gnu.org/licenses/"
__maintainer__ = "Thomas Mansencal"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = ["UiConstants"]
class UiConstants():
"""
Defines **Umbra** package ui constants.
"""
ui_file = "Umbra.ui"
"""
:param ui_file: Application ui file.
:type ui_file: unicode
"""
processing_ui_file = "Processing.ui"
"""
:param processing_ui_file: Processing ui file.
:type processing_ui_file: unicode
"""
reporter_ui_file = "Reporter.ui"
"""
:param reporter_ui_file: Reporter ui file.
:type reporter_ui_file: unicode
"""
windows_stylesheet_file = "styles/Windows_styleSheet.qss"
"""
:param windows_stylesheet_file: Application Windows Os stylesheet file.
:type windows_stylesheet_file: unicode
"""
darwin_stylesheet_file = "styles/Darwin_styleSheet.qss"
"""
:param darwin_stylesheet_file: Application Mac Os X Os stylesheet file.
:type darwin_stylesheet_file: unicode
"""
linux_stylesheet_file = "styles/Linux_styleSheet.qss"
"""
:param linux_stylesheet_file: Application Linux Os stylesheet file.
:type linux_stylesheet_file: unicode
"""
windows_full_screen_stylesheet_file = "styles/Windows_FullScreen_styleSheet.qss"
"""
:param windows_full_screen_stylesheet_file: Application Windows Os fullscreen stylesheet file.
:type windows_full_screen_stylesheet_file: unicode
"""
darwin_full_screen_stylesheet_file = "styles/Darwin_FullScreen_styleSheet.qss"
"""
:param darwin_full_screen_stylesheet_file: Application Mac Os X Os fullscreen stylesheet file.
:type darwin_full_screen_stylesheet_file: unicode
"""
linux_full_screen_stylesheet_file = "styles/Linux_FullScreen_styleSheet.qss"
"""
:param linux_full_screen_stylesheet_file: Application Linux Os fullscreen stylesheet file.
:type linux_full_screen_stylesheet_file: unicode
"""
windows_style = "plastique"
"""
:param windows_style: Application Windows Os style.
:type windows_style: unicode
"""
darwin_style = "plastique"
"""
:param darwin_style: Application Mac Os X Os style.
:type darwin_style: unicode
"""
linux_style = "plastique"
"""
:param linux_style: Application Linux Os style.
:type linux_style: unicode
"""
settings_file = "preferences/Default_Settings.rc"
"""
:param settings_file: Application defaults settings file.
:type settings_file: unicode
"""
layouts_file = "layouts/Default_Layouts.rc"
"""
:param layouts_file: Application defaults layouts file.
:type layouts_file: unicode
"""
application_windows_icon = "images/Icon_Dark.png"
"""
:param application_windows_icon: Application icon file.
:type application_windows_icon: unicode
"""
splash_screen_image = "images/Umbra_SpashScreen.png"
"""
:param splash_screen_image: Application splashscreen image.
:type splash_screen_image: unicode
"""
logo_image = "images/Umbra_Logo.png"
"""
:param logo_image: Application logo image.
:type logo_image: unicode
"""
default_toolbar_icon_size = 32
"""
:param default_toolbar_icon_size: Application toolbar icons size.
:type default_toolbar_icon_size: int
"""
custom_layouts_icon = "images/Custom_Layouts.png"
"""
:param custom_layouts_icon: Application **Custom Layouts** icon.
:type custom_layouts_icon: unicode
"""
custom_layouts_hover_icon = "images/Custom_Layouts_Hover.png"
"""
:param custom_layouts_hover_icon: Application **Custom Layouts** hover icon.
:type custom_layouts_hover_icon: unicode
"""
custom_layouts_active_icon = "images/Custom_Layouts_Active.png"
"""
:param custom_layouts_active_icon: Application **Custom Layouts** active icon.
:type custom_layouts_active_icon: unicode
"""
miscellaneous_icon = "images/Miscellaneous.png"
"""
:param miscellaneous_icon: Application **Miscellaneous** icon.
:type miscellaneous_icon: unicode
"""
miscellaneous_hover_icon = "images/Miscellaneous_Hover.png"
"""
:param miscellaneous_hover_icon: Application **Miscellaneous** hover icon.
:type miscellaneous_hover_icon: unicode
"""
miscellaneous_active_icon = "images/Miscellaneous_Active.png"
"""
:param miscellaneous_active_icon: Application **Miscellaneous** active icon.
:type miscellaneous_active_icon: unicode
"""
development_icon = "images/Development.png"
"""
:param development_icon: Application **Development** icon.
:type development_icon: unicode
"""
development_hover_icon = "images/Development_Hover.png"
"""
:param development_hover_icon: Application **Development** hover icon.
:type development_hover_icon: unicode
"""
development_active_icon = "images/Development_Active.png"
"""
:param development_active_icon: Application **Development** active icon.
:type development_active_icon: unicode
"""
preferences_icon = "images/Preferences.png"
"""
:param preferences_icon: Application **Preferences** icon.
:type preferences_icon: unicode
"""
preferences_hover_icon = "images/Preferences_Hover.png"
"""
:param preferences_hover_icon: Application **Preferences** hover icon.
:type preferences_hover_icon: unicode
"""
preferences_active_icon = "images/Preferences_Active.png"
"""
:param preferences_active_icon: Application **Preferences** active icon.
:type preferences_active_icon: unicode
"""
startup_layout = "startup_centric"
"""
:param startup_layout: Application startup layout.
:type startup_layout: unicode
"""
help_file = "http://thomasmansencal.com/Sharing/Umbra/Support/Documentation/Help/Umbra_Manual.html"
"""
:param help_file: Application online help file.
:type help_file: unicode
"""
api_file = "http://thomasmansencal.com/Sharing/Umbra/Support/Documentation/Api/index.html"
"""
:param api_file: Application online Api file.
:type api_file: unicode
"""
development_layout = "development_centric"
"""
:param development_layout: Application development layout.
:type development_layout: unicode
"""
python_grammar_file = "grammars/Python/Python.grc"
"""
:param python_grammar_file: Python language grammar file.
:type python_grammar_file: unicode
"""
logging_grammar_file = "grammars/Logging/Logging.grc"
"""
:param logging_grammar_file: Logging language grammar file.
:type logging_grammar_file: unicode
"""
text_grammar_file = "grammars/Text/Text.grc"
"""
:param text_grammar_file: Text language grammar file.
:type text_grammar_file: unicode
"""
invalid_link_html_file = "htmls/Invalid_Link.html"
"""
:param invalid_link_html_file: Invalid link html file.
:type invalid_link_html_file: unicode
"""
crittercism_id = "5075c158d5f9b9796b000002"
"""
:param crittercism_id: Crittercism Id.
:type crittercism_id: unicode
"""
| gpl-3.0 | 371,396,141,367,701,060 | 29.857143 | 103 | 0.667725 | false |
kalcho83/black-hat-python | bhpnet.py | 1 | 8452 | #!/opt/local/bin/python2.7
import sys
import socket
import getopt
import threading
import subprocess
# define some global variables
listen = False
command = False
upload = False
execute = ""
target = ""
upload_destination = ""
port = 0
# this runs a command and returns the output
def run_command(command):
# trim the newline
command = command.rstrip()
# run the command and get the output back
try:
output = subprocess.check_output(command,stderr=subprocess.STDOUT, shell=True)
except:
output = "Failed to execute command.\r\n"
# send the output back to the client
return output
# this handles incoming client connections
def client_handler(client_socket):
global upload
global execute
global command
# check for upload
if len(upload_destination):
# read in all of the bytes and write to our destination
file_buffer = ""
# keep reading data until none is available
while True:
data = client_socket.recv(1024)
if not data:
break
else:
file_buffer += data
# now we take these bytes and try to write them out
try:
file_descriptor = open(upload_destination,"wb")
file_descriptor.write(file_buffer)
file_descriptor.close()
# acknowledge that we wrote the file out
client_socket.send("Successfully saved file to %s\r\n" % upload_destination)
except:
client_socket.send("Failed to save file to %s\r\n" % upload_destination)
# check for command execution
if len(execute):
# run the command
output = run_command(execute)
client_socket.send(output)
# now we go into another loop if a command shell was requested
if command:
while True:
# show a simple prompt
client_socket.send("<BHP:#> ")
# now we receive until we see a linefeed (enter key)
cmd_buffer = ""
while "\n" not in cmd_buffer:
cmd_buffer += client_socket.recv(1024)
# we have a valid command so execute it and send back the results
response = run_command(cmd_buffer)
# send back the response
client_socket.send(response)
# this is for incoming connections
def server_loop():
global target
global port
# if no target is defined we listen on all interfaces
if not len(target):
target = "0.0.0.0"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((target,port))
server.listen(5)
while True:
client_socket, addr = server.accept()
# spin off a thread to handle our new client
client_thread = threading.Thread(target=client_handler,args=(client_socket,))
client_thread.start()
# if we don't listen we are a client....make it so.
def client_sender(buffer):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# connect to our target host
client.connect((target,port))
# if we detect input from stdin send it
# if not we are going to wait for the user to punch some in
if len(buffer):
client.send(buffer)
while True:
# now wait for data back
recv_len = 1
response = ""
while recv_len:
data = client.recv(4096)
recv_len = len(data)
response+= data
if recv_len < 4096:
break
print response,
# wait for more input
buffer = raw_input("")
buffer += "\n"
# send it off
client.send(buffer)
except:
# just catch generic errors - you can do your homework to beef this up
print "[*] Exception! Exiting."
# teardown the connection
client.close()
def usage():
print "Netcat Replacement"
print
print "Usage: bhpnet.py -t target_host -p port"
print "-l --listen - listen on [host]:[port] for incoming connections"
print "-e --execute=file_to_run - execute the given file upon receiving a connection"
print "-c --command - initialize a command shell"
print "-u --upload=destination - upon receiving connection upload a file and write to [destination]"
print
print
print "Examples: "
print "bhpnet.py -t 192.168.0.1 -p 5555 -l -c"
print "bhpnet.py -t 192.168.0.1 -p 5555 -l -u=c:\\target.exe"
print "bhpnet.py -t 192.168.0.1 -p 5555 -l -e=\"cat /etc/passwd\""
print "echo 'ABCDEFGHI' | ./bhpnet.py -t 192.168.11.12 -p 135"
sys.exit(0)
def main():
global listen
global port
global execute
global command
global upload_destination
global target
if not len(sys.argv[1:]):
usage()
# read the commandline options
try:
opts, args = getopt.getopt(sys.argv[1:],"hle:t:p:cu:",["help","listen","execute","target","port","command","upload"])
except getopt.GetoptError as err:
print str(err)
usage()
for o,a in opts:
if o in ("-h","--help"):
usage()
elif o in ("-l","--listen"):
listen = True
elif o in ("-e", "--execute"):
execute = a
elif o in ("-c", "--commandshell"):
command = True
elif o in ("-u", "--upload"):
upload_destination = a
elif o in ("-t", "--target"):
target = a
elif o in ("-p", "--port"):
port = int(a)
else:
assert False,"Unhandled Option"
# are we going to listen or just send data from stdin
if not listen and len(target) and port > 0:
# read in the buffer from the commandline
# this will block, so send CTRL-D if not sending input
# to stdin
buffer = sys.stdin.read()
# send data off
client_sender(buffer)
# we are going to listen and potentially
# upload things, execute commands and drop a shell back
# depending on our command line options above
if listen:
server_loop()
main() | gpl-3.0 | -4,140,704,907,797,089,300 | 34.074689 | 133 | 0.428064 | false |
callowayproject/django-massmedia | example/settings.py | 1 | 4213 | # Django settings for example project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
import os
import sys
APP = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJ_ROOT = os.path.abspath(os.path.dirname(__file__))
sys.path.append(APP)
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'dev.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.abspath(os.path.join(PROJ_ROOT, 'media', 'uploads'))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/uploads/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.abspath(os.path.join(PROJ_ROOT, 'media', 'static'))
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'g2_39yupn*6j4p*cg2%w643jiq-1n_annua*%i8+rq0dx9p=$n'
ROOT_URLCONF = 'example.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJ_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'massmedia',
'django.contrib.flatpages',
'testapp',
'mathfilters',
)
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
MMEDIA_IMAGE_STORAGE = 'media_storage.MediaStorage'
MASSMEDIA_SERVICES = {
'YOUTUBE': {
'EMAIL': '',
'USERNAME': '',
'PASSWORD': '',
},
}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
| apache-2.0 | -6,627,286,543,488,898,000 | 32.975806 | 123 | 0.688346 | false |
liamneath1/hailey.io | hailey_web/hailey_api/migrations/0001_initial.py | 1 | 1312 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-14 22:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='StockMarket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticker_code', models.CharField(max_length=20)),
('index_code', models.IntegerField()),
('date', models.DateField()),
('open_price', models.FloatField()),
('close_price', models.FloatField(blank=True, null=True)),
('high_price', models.FloatField(blank=True, null=True)),
('low_price', models.FloatField(blank=True, null=True)),
('volume', models.FloatField(blank=True, null=True)),
('misc', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'stockmarket',
},
),
migrations.AlterUniqueTogether(
name='stockmarket',
unique_together=set([('ticker_code', 'index_code', 'date')]),
),
]
| mit | 1,092,914,653,890,573,000 | 33.526316 | 114 | 0.539634 | false |
red-hat-storage/rhcephpkg | setup.py | 1 | 5632 | from time import sleep
import os
import re
import subprocess
import sys
from setuptools.command.test import test as TestCommand
from setuptools import setup, Command
try:
# Python 2 backwards compat
from __builtin__ import raw_input as input
except ImportError:
pass
readme = os.path.join(os.path.dirname(__file__), 'README.rst')
LONG_DESCRIPTION = open(readme).read()
def read_module_contents():
with open('rhcephpkg/__init__.py') as app_init:
return app_init.read()
module_file = read_module_contents()
metadata = dict(re.findall("__([a-z]+)__\s*=\s*'([^']+)'", module_file))
version = metadata['version']
class BumpCommand(Command):
""" Bump the __version__ number and commit all changes. """
user_options = [('version=', 'v', 'version number to use')]
def initialize_options(self):
new_version = metadata['version'].split('.')
new_version[-1] = str(int(new_version[-1]) + 1) # Bump the final part
self.version = ".".join(new_version)
def finalize_options(self):
pass
def run(self):
print('old version: %s new version: %s' %
(metadata['version'], self.version))
try:
input('Press enter to confirm, or ctrl-c to exit >')
except KeyboardInterrupt:
raise SystemExit("\nNot proceeding")
old = "__version__ = '%s'" % metadata['version']
new = "__version__ = '%s'" % self.version
module_file = read_module_contents()
with open('rhcephpkg/__init__.py', 'w') as fileh:
fileh.write(module_file.replace(old, new))
# Commit everything with a standard commit message
cmd = ['git', 'commit', '-a', '-m', 'version %s' % self.version]
print(' '.join(cmd))
subprocess.check_call(cmd)
class ReleaseCommand(Command):
""" Tag and push a new release. """
user_options = [('sign', 's', 'GPG-sign the Git tag and release files')]
def initialize_options(self):
self.sign = False
def finalize_options(self):
pass
def run(self):
# Create Git tag
tag_name = 'v%s' % version
cmd = ['git', 'tag', '-a', tag_name, '-m', 'version %s' % version]
if self.sign:
cmd.append('-s')
print(' '.join(cmd))
subprocess.check_call(cmd)
# Push Git tag to origin remote
cmd = ['git', 'push', 'origin', tag_name]
print(' '.join(cmd))
subprocess.check_call(cmd)
# Wait for CI to build this tag, so we can push directly to master
sha1 = self.sha1()
print('waiting 5 min for Travis CI to mark %s as green' % sha1)
sleep(5 * 60)
state = self.ci_state(sha1)
while state == 'pending':
print('Travis CI is %s for %s ...' % (state, sha1))
sleep(45)
state = self.ci_state(sha1)
assert state == 'success'
# Push master to the remote
cmd = ['git', 'push', 'origin', 'master']
print(' '.join(cmd))
subprocess.check_call(cmd)
# Create source package
cmd = ['python', 'setup.py', 'sdist']
print(' '.join(cmd))
subprocess.check_call(cmd)
tarball = 'dist/%s-%s.tar.gz' % ('rhcephpkg', version)
# GPG sign
if self.sign:
cmd = ['gpg2', '-b', '-a', tarball]
print(' '.join(cmd))
subprocess.check_call(cmd)
# Upload
cmd = ['twine', 'upload', tarball]
if self.sign:
cmd.append(tarball + '.asc')
print(' '.join(cmd))
subprocess.check_call(cmd)
def sha1(self):
cmd = ['git', 'rev-parse', 'HEAD']
print(' '.join(cmd))
output = subprocess.check_output(cmd).strip()
if sys.version_info[0] == 2:
return output
return output.decode('utf-8')
def ci_state(self, sha1):
""" Look up GitHub's status for this sha1 ref """
import requests
# See https://developer.github.com/v3/repos/statuses/
url = 'https://api.github.com/' \
'repos/red-hat-storage/rhcephpkg/commits/%s/status' % sha1
preview = 'application/vnd.github.howard-the-duck-preview+json'
response = requests.get(url, headers={'Accept': preview})
response.raise_for_status()
data = response.json()
return data['state']
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
args = 'rhcephpkg --flake8 ' + self.pytest_args
errno = pytest.main(args.split())
sys.exit(errno)
setup(
name='rhcephpkg',
description='Packaging tool for Red Hat Ceph Storage product',
packages=['rhcephpkg'],
author='Ken Dreyer',
author_email='[email protected]',
url='https://github.com/red-hat-storage/rhcephpkg',
version=metadata['version'],
license='MIT',
zip_safe=False,
keywords='packaging, build, rpkg',
long_description=LONG_DESCRIPTION,
scripts=['bin/rhcephpkg'],
install_requires=[
'gbp',
'python-bugzilla',
'python-jenkins>=1.0.0',
'six',
'tambo>=0.1.0',
],
tests_require=[
'pytest',
'pytest-flake8',
],
cmdclass={'test': PyTest, 'bump': BumpCommand, 'release': ReleaseCommand},
)
| mit | 3,729,046,440,790,301,700 | 28.957447 | 78 | 0.571023 | false |
macedir/Software-Development | Python/P1.py | 1 | 1648 | # KYLE'S CHANGE COUNTER VERSION 1000
# INCLUDES UNSOLVABLE PENNY ERROR
import math
dict = {
100: 'hundred dollar bill',
50: 'fifty dollar bill',
20: 'twenty dollar bill',
10: 'ten dollar bill',
5: 'five dollar bill',
2: 'toonie',
1: 'loonie',
.25: 'quarter',
.10: 'dime',
.05: 'nickel',
.01: 'penny'
}
newDict = {}
##target = 999.99
##wl = target
while True:
print(' ')
inputamnt = input("How much? ")
try:
wl = float(inputamnt)
target = wl
def determineAmount(work, factor):
amountReq = work // factor
#amountReqFull = amountReq * factor
return amountReq
print('For $' + str(target) + ', you will need:')
for k in sorted(dict, reverse = True):
workInt = math.floor((determineAmount(wl, k)))
wl = (wl - (workInt*k))
newDict[k] = workInt
if workInt is not 0:
if k is not .01:
if workInt > 1:
print(str(workInt) + ' ' + dict[k] + 's')
else:
print(str(workInt) + ' ' + dict[k])
elif workInt > 1:
print(str(workInt) + ' pennies')
else:
print(str(workInt) + ' penny')
print(' ')
print('Margin of error of +-1 penny\n------------------------------')
except ValueError:
print('That is not a number\n------------------------------\n')
| gpl-3.0 | 3,040,184,352,776,137,700 | 27.428571 | 77 | 0.426578 | false |
xmendez/wfuzz | src/wfuzz/factories/fuzzresfactory.py | 1 | 4150 | import copy
from .fuzzfactory import reqfactory
from .payman import payman_factory
from ..fuzzobjects import FuzzResult, FuzzType, FuzzWord, FuzzWordType
from ..helpers.obj_factory import ObjectFactory, SeedBuilderHelper
class FuzzResultFactory(ObjectFactory):
def __init__(self):
ObjectFactory.__init__(
self,
{
"fuzzres_from_options_and_dict": FuzzResultDictioBuilder(),
"fuzzres_from_allvar": FuzzResultAllVarBuilder(),
"fuzzres_from_recursion": FuzzResRecursiveBuilder(),
"seed_from_recursion": SeedRecursiveBuilder(),
"seed_from_options": SeedResultBuilder(),
"seed_from_options_and_dict": FuzzResultDictSeedBuilder(),
"baseline_from_options": BaselineResultBuilder(),
},
)
class FuzzResultDictioBuilder:
def __call__(self, options, dictio_item):
res = copy.deepcopy(options["compiled_seed"])
res.item_type = FuzzType.RESULT
res.discarded = False
res.payload_man.update_from_dictio(dictio_item)
res.update_from_options(options)
SeedBuilderHelper.replace_markers(res.history, res.payload_man)
res.nres = next(FuzzResult.newid)
return res
class SeedResultBuilder:
def __call__(self, options):
seed = reqfactory.create("seed_from_options", options)
res = FuzzResult(seed)
res.payload_man = payman_factory.create("payloadman_from_request", seed)
return res
class BaselineResultBuilder:
def __call__(self, options):
raw_seed = reqfactory.create("request_from_options", options)
baseline_payloadman = payman_factory.create(
"payloadman_from_baseline", raw_seed
)
if baseline_payloadman.payloads:
res = FuzzResult(raw_seed)
res.payload_man = baseline_payloadman
res.update_from_options(options)
res.is_baseline = True
SeedBuilderHelper.replace_markers(raw_seed, baseline_payloadman)
return res
else:
return None
class FuzzResultAllVarBuilder:
def __call__(self, options, var_name, payload):
fuzzres = copy.deepcopy(options["compiled_seed"])
fuzzres.item_type = FuzzType.RESULT
fuzzres.discarded = False
fuzzres.payload_man = payman_factory.create("empty_payloadman", payload)
fuzzres.payload_man.update_from_dictio([payload])
fuzzres.history.wf_allvars_set = {var_name: payload.content}
fuzzres.nres = next(FuzzResult.newid)
return fuzzres
class FuzzResultDictSeedBuilder:
def __call__(self, options, dictio):
fuzzres = copy.deepcopy(dictio[0].content)
fuzzres.history.update_from_options(options)
fuzzres.update_from_options(options)
fuzzres.payload_man = payman_factory.create("empty_payloadman", dictio[0])
fuzzres.payload_man.update_from_dictio(dictio)
return fuzzres
class SeedRecursiveBuilder:
def __call__(self, seed):
new_seed = copy.deepcopy(seed)
new_seed.history.url = seed.history.recursive_url + "FUZZ"
new_seed.rlevel += 1
if new_seed.rlevel_desc:
new_seed.rlevel_desc += " - "
new_seed.rlevel_desc += seed.payload_man.description()
new_seed.item_type = FuzzType.SEED
new_seed.discarded = False
new_seed.payload_man = payman_factory.create(
"payloadman_from_request", new_seed.history
)
return new_seed
class FuzzResRecursiveBuilder:
def __call__(self, seed, url):
fr = copy.deepcopy(seed)
fr.history.url = str(url)
fr.rlevel = seed.rlevel + 1
if fr.rlevel_desc:
fr.rlevel_desc += " - "
fr.rlevel_desc += seed.payload_man.description()
fr.item_type = FuzzType.BACKFEED
fr.discarded = False
fr.is_baseline = False
fr.payload_man = payman_factory.create(
"empty_payloadman", FuzzWord(url, FuzzWordType.WORD)
)
return fr
resfactory = FuzzResultFactory()
| gpl-2.0 | -3,708,989,993,210,402,300 | 31.170543 | 82 | 0.630361 | false |
WayStudios/fora | fora/core/user.py | 1 | 3473 | # fora
# class User
# Xu [[email protected]] Copyright 2015
from fora.core.dbsession import (
DBSession,
OR
)
from fora.models.user import UserModel
import uuid
from datetime import datetime
class User(object):
""" This class contains core functionality of fora user manipulation.
"""
model = None
def __init__(self):
self.model = None
def exists(self):
return self.model != None
def is_guest(self):
return self.model == None
def id(self):
return self.model.id
def uuid(self, new_uuid = None):
if not new_uuid:
return self.model.uuid
self.model.uuid = new_uuid
def email_address(self, new_email_address = None):
if not new_email_address:
return self.model.email_address
self.model.email_address = new_email_address
def username(self, new_username = None):
if not new_username:
return self.model.username
self.model.username = new_username
def password(self, new_password = None):
if not new_password:
return self.model.password
self.model.password = new_password
def is_active(self, new_is_active = None):
if new_is_active == None:
return self.model.is_active
self.model.is_active = new_is_active
def is_deleted(self, new_is_deleted = None):
if new_is_deleted == None:
return self.model.is_deleted
self.model.is_deleted = new_is_deleted
def create_date(self, new_create_date = None):
if not new_create_date:
return self.model.create_date
self.model.create_date = new_create_date
def update_date(self, new_update_date = None):
if not new_update_date:
return self.model.update_date
self.model.update_date = new_update_date
@staticmethod
def get_user_by_uuid(uuid):
result = DBSession.query(UserModel).filter(UserModel.uuid == uuid).first()
obj = User()
obj.model = result
return obj
@staticmethod
def get_user_by_email_address(email_address):
result = DBSession.query(UserModel).filter(UserModel.email_address == email_address).first()
obj = User()
obj.model = result
return obj
@staticmethod
def get_user_by_username(username):
result = DBSession.query(UserModel).filter(UserModel.username == username).first()
obj = User()
obj.model = result
return obj
@staticmethod
def get_user_by_identity(identity):
result = DBSession.query(UserModel).filter(OR(UserModel.username == identity, UserModel.email_address == identity, UserModel.uuid == identity)).first()
obj = User()
obj.model = result
return obj
@staticmethod
def get_users():
results = DBSession.query(UserModel).all()
objs = {}
for result in results:
objs[result.id] = User()
objs[result.id].model = result
return objs
@staticmethod
def create_user(username, email_address, password, is_active = True, is_deleted = False):
result = UserModel(uuid = str(uuid.uuid4()), email_address = email_address, username = username, password = password, is_active = is_active, is_deleted = is_deleted, create_date = datetime.utcnow(), update_date = datetime.utcnow())
DBSession.add(result)
obj = User()
obj.model = result
return obj
| bsd-3-clause | -2,055,657,441,272,897,500 | 35.557895 | 239 | 0.624532 | false |
phate/jive | scripts/arrayliterals.py | 1 | 2660 | import re
import sys
r = re.compile('([a-zA-Z0-9_*_ ]*\\[\\]) *')
r = re.compile('\\(([a-zA-Z0-9_*_ ]*)\\[\\]\\)')
def scan_stmt_starts(text):
starts = []
next_line_stmt = False
beginning_decl = False
nested_decls = set()
depth = 0
for n in range(len(text)):
c = text[n]
if c == '{':
if beginning_decl:
nested_decls.add(depth)
depth += 1
elif c == '}':
depth -= 1
if depth in nested_decls:
nested_decls.remove(depth)
elif c == '=':
beginning_decl = True
if c not in '= \t\n':
beginning_decl = False
if c == ';' or c == '{' or c == '}':
next_line_stmt = not bool(nested_decls)
elif c == '\n' and next_line_stmt:
starts.append((n + 1, get_indent(text, n + 1)))
next_line_stmt = False
else:
next_line_stmt = False
assert depth == 0
return starts
def get_indent(text, pos):
indent = ''
while pos < len(text) and text[pos] == '\t':
indent += text[pos]
pos += 1
return indent
#for start, indent in scan_stmt_starts(text):
#print '---'
#print indent, start
#print text[start:text.find('\n', start)]
def find_stmt_start(text, pos):
last_start, last_indent = 0, ''
for start, indent in scan_stmt_starts(text):
if start > pos:
return last_start, last_indent
last_start = start
last_indent = indent
return last_start, last_indent
def find_closing_brace(text, pos):
depth = 1
while depth > 0 and pos < len(text):
if text[pos] in '([{':
depth += 1
elif text[pos] in ')]}':
depth -= 1
pos += 1
return pos
def is_macro_def(text, pos):
while pos > 0:
if text[pos] == '#':
return True
elif text[pos] == '\n':
return False
else:
pos -= 1
return False
def convert_single(text, counter):
for m in r.finditer(text):
start, end = m.start(), m.end()
if is_macro_def(text, start):
continue
arraytype = m.group(1)
stmt_start, stmt_indent = find_stmt_start(text, start)
values_start = text.find('{', end)
values_end = find_closing_brace(text, values_start + 1)
values = text[values_start:values_end]
before_stmt = text[:stmt_start]
var = 'tmparray%d' % counter
inserted_tmp = stmt_indent + arraytype + ' ' + var +'[] = ' + values + ';\n';
remainder = text[stmt_start:start] + var + text[values_end:]
return text[:stmt_start] + inserted_tmp + remainder
return None
def convert_all(text):
counter = 0
while True:
new_text = convert_single(text, counter)
if not new_text: return text
text = new_text
counter += 1
filename = sys.argv[1]
text = file(filename).read()
new_text = convert_all(text)
if new_text != text:
sys.stderr.write(filename + '\n')
file(filename, 'w').write(new_text)
| lgpl-2.1 | 9,154,337,267,762,679,000 | 20.803279 | 79 | 0.608271 | false |
dongsenfo/pymatgen | pymatgen/analysis/defects/tests/test_defect_compatibility.py | 1 | 19109 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import numpy as np
from pymatgen.core import PeriodicSite
from pymatgen.io.vasp import Vasprun, Poscar, Outcar
from pymatgen.analysis.defects.core import Vacancy, Interstitial, DefectEntry
from pymatgen.analysis.defects.defect_compatibility import DefectCompatibility
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files')
class DefectCompatibilityTest(PymatgenTest):
def setUp(self):
struc = PymatgenTest.get_structure("VO2")
struc.make_supercell(3)
struc = struc
self.vac = Vacancy(struc, struc.sites[0], charge=-3)
abc = self.vac.bulk_structure.lattice.abc
axisdata = [np.arange(0., lattval, 0.2) for lattval in abc]
bldata = [np.array([1. for u in np.arange(0., lattval, 0.2)]) for lattval in abc]
dldata = [
np.array([(-1 - np.cos(2 * np.pi * u / lattval)) for u in np.arange(0., lattval, 0.2)]) for lattval in abc
]
self.frey_params = {'axis_grid': axisdata, 'bulk_planar_averages': bldata,
'defect_planar_averages': dldata, 'dielectric': 15,
'initial_defect_structure': struc.copy(),
'defect_frac_sc_coords': struc.sites[0].frac_coords[:]}
kumagai_bulk_struc = Poscar.from_file(os.path.join( test_dir, 'defect', 'CONTCAR_bulk')).structure
bulk_out = Outcar( os.path.join( test_dir, 'defect', 'OUTCAR_bulk.gz'))
defect_out = Outcar( os.path.join( test_dir, 'defect', 'OUTCAR_vac_Ga_-3.gz'))
self.kumagai_vac = Vacancy(kumagai_bulk_struc, kumagai_bulk_struc.sites[0], charge=-3)
kumagai_defect_structure = self.kumagai_vac.generate_defect_structure()
self.kumagai_params = {'bulk_atomic_site_averages': bulk_out.electrostatic_potential,
'defect_atomic_site_averages': defect_out.electrostatic_potential,
'site_matching_indices': [[ind, ind-1] for ind in range(len(kumagai_bulk_struc))],
'defect_frac_sc_coords': [0.,0.,0.],
'initial_defect_structure': kumagai_defect_structure,
'dielectric': 18.118 * np.identity(3),
'gamma': 0.153156 #not neccessary to load gamma, but speeds up unit test
}
v = Vasprun(os.path.join(test_dir, 'vasprun.xml'))
eigenvalues = v.eigenvalues.copy()
kptweights = v.actual_kpoints_weights
potalign = -0.1
vbm = v.eigenvalue_band_properties[2]
cbm = v.eigenvalue_band_properties[1]
self.bandfill_params = { 'eigenvalues': eigenvalues,
'kpoint_weights': kptweights,
'potalign': potalign,
'vbm': vbm, 'cbm': cbm }
self.band_edge_params = {'hybrid_cbm': 1., 'hybrid_vbm': -1., 'vbm': -0.5,
'cbm': 0.6, 'num_hole_vbm': 1., 'num_elec_cbm': 1.}
def test_process_entry(self):
# basic process with no corrections
dentry = DefectEntry(self.vac, 0., corrections={}, parameters={'vbm': 0., 'cbm': 0.}, entry_id=None)
dc = DefectCompatibility()
dentry = dc.process_entry( dentry)
self.assertIsNotNone( dentry)
# process with corrections from parameters used in other unit tests
params = self.frey_params.copy()
params.update(self.bandfill_params)
params.update({'hybrid_cbm': params['cbm'] + .2, 'hybrid_vbm': params['vbm'] - .4, })
dentry = DefectEntry(self.vac, 0., corrections={}, parameters=params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.process_entry( dentry)
self.assertAlmostEqual( dentry.corrections['bandedgeshifting_correction'], 1.2)
self.assertAlmostEqual( dentry.corrections['bandfilling_correction'], 0.0)
self.assertAlmostEqual( dentry.corrections['charge_correction'], 5.44595036)
# test over delocalized free carriers which forces skipping charge correction
# modify the eigenvalue list to have free holes
hole_eigenvalues = {}
for spinkey, spinset in params['eigenvalues'].items():
hole_eigenvalues[spinkey] = []
for kptset in spinset:
hole_eigenvalues[spinkey].append([])
for eig in kptset:
if (eig[0] < params['vbm']) and (eig[0] > params['vbm'] - .8):
hole_eigenvalues[spinkey][-1].append([eig[0], 0.5])
else:
hole_eigenvalues[spinkey][-1].append(eig)
params.update( {'eigenvalues': hole_eigenvalues})
dentry = DefectEntry(self.vac, 0., corrections={}, parameters=params, entry_id=None)
dc = DefectCompatibility( free_chg_cutoff=0.8)
dentry = dc.process_entry( dentry)
self.assertAlmostEqual( dentry.corrections['bandedgeshifting_correction'], 1.19999999)
self.assertAlmostEqual( dentry.corrections['bandfilling_correction'], -1.62202400)
self.assertAlmostEqual( dentry.corrections['charge_correction'], 0.)
# turn off band filling and band edge shifting
dc = DefectCompatibility( free_chg_cutoff=0.8, use_bandfilling=False, use_bandedgeshift=False)
dentry = dc.process_entry( dentry)
self.assertAlmostEqual( dentry.corrections['bandedgeshifting_correction'], 0.)
self.assertAlmostEqual( dentry.corrections['bandfilling_correction'], 0.)
self.assertAlmostEqual( dentry.corrections['charge_correction'], 0.)
def test_perform_all_corrections(self):
#return entry even if insufficent values are provided
# for freysoldt, kumagai, bandfilling, or band edge shifting
de = DefectEntry(self.vac, 0., corrections={}, parameters={}, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_all_corrections( de)
self.assertIsNotNone( dentry)
#all other correction applications are tested in unit tests below
def test_perform_freysoldt(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.frey_params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_freysoldt( de)
val = dentry.parameters['freysoldt_meta']
self.assertAlmostEqual(val['freysoldt_electrostatic'], 0.975893)
self.assertAlmostEqual(val['freysoldt_potential_alignment_correction'], 4.4700574)
self.assertAlmostEqual(val['freysoldt_potalign'], 1.4900191)
self.assertTrue('pot_corr_uncertainty_md' in val.keys())
self.assertTrue('pot_plot_data' in val.keys())
def test_perform_kumagai(self):
de = DefectEntry( self.kumagai_vac, 0., parameters=self.kumagai_params)
dc = DefectCompatibility()
dentry = dc.perform_kumagai( de)
val = dentry.parameters['kumagai_meta']
self.assertAlmostEqual(val['kumagai_electrostatic'], 0.88236299)
self.assertAlmostEqual(val['kumagai_potential_alignment_correction'], 2.09704862)
self.assertAlmostEqual(val['kumagai_potalign'], 0.69901620)
self.assertTrue('pot_corr_uncertainty_md' in val.keys())
self.assertTrue('pot_plot_data' in val.keys())
def test_run_bandfilling(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.bandfill_params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_bandfilling( de)
val = dentry.parameters['bandfilling_meta']
self.assertAlmostEqual(val['num_hole_vbm'], 0.)
self.assertAlmostEqual(val['num_elec_cbm'], 0.)
self.assertAlmostEqual(val['bandfilling_correction'], 0.)
def test_run_band_edge_shifting(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.band_edge_params, entry_id=None)
dc = DefectCompatibility()
dentry = dc.perform_band_edge_shifting( de)
val = dentry.parameters['bandshift_meta']
self.assertEqual(val['vbmshift'], -0.5)
self.assertEqual(val['cbmshift'], 0.4)
self.assertEqual(val['bandedgeshifting_correction'], 1.5)
def test_delocalization_analysis(self):
#return entry even if insufficent values are provided
# for delocalization analysis with freysoldt, kumagai,
# bandfilling, or band edge shifting
de = DefectEntry(self.vac, 0., corrections={}, parameters={}, entry_id=None)
dc = DefectCompatibility()
dentry = dc.delocalization_analysis( de)
self.assertIsNotNone( dentry)
#all other correction applications are tested in unit tests below
def test_check_freysoldt_delocalized(self):
de = DefectEntry(self.vac, 0., corrections={}, parameters=self.frey_params, entry_id=None)
de.parameters.update( {'is_compatible': True}) #needs to be initialized with this here for unittest
dc = DefectCompatibility( plnr_avg_var_tol=0.1, plnr_avg_minmax_tol=0.5)
dentry = dc.perform_freysoldt( de)
# check case which fits under compatibility constraints
dentry = dc.check_freysoldt_delocalized( dentry)
frey_delocal = dentry.parameters['delocalization_meta']['plnr_avg']
self.assertTrue( frey_delocal['is_compatible'])
ans_var = [0.00038993, 0.02119532, 0.02119532]
ans_window = [0.048331509, 0.36797169, 0.36797169]
for ax in range(3):
ax_metadata = frey_delocal['metadata'][ax]
self.assertTrue( ax_metadata['frey_variance_compatible'])
self.assertAlmostEqual( ax_metadata['frey_variance'], ans_var[ax])
self.assertTrue( ax_metadata['frey_minmax_compatible'])
self.assertAlmostEqual( ax_metadata['frey_minmax_window'], ans_window[ax])
self.assertTrue( dentry.parameters['is_compatible'])
# check planar delocalization on 2nd and 3rd axes
dc = DefectCompatibility( plnr_avg_var_tol=0.1, plnr_avg_minmax_tol=0.2)
dentry.parameters.update( {'is_compatible': True})
dentry = dc.check_freysoldt_delocalized( dentry)
frey_delocal = dentry.parameters['delocalization_meta']['plnr_avg']
self.assertFalse( frey_delocal['is_compatible'])
ax_metadata = frey_delocal['metadata'][0]
self.assertTrue( ax_metadata['frey_variance_compatible'])
self.assertTrue( ax_metadata['frey_minmax_compatible'])
for ax in [1,2]:
ax_metadata = frey_delocal['metadata'][ax]
self.assertTrue( ax_metadata['frey_variance_compatible'])
self.assertFalse( ax_metadata['frey_minmax_compatible'])
self.assertFalse( dentry.parameters['is_compatible'])
# check variance based delocalization on 2nd and 3rd axes
dc = DefectCompatibility( plnr_avg_var_tol=0.01, plnr_avg_minmax_tol=0.5)
dentry.parameters.update( {'is_compatible': True})
dentry = dc.check_freysoldt_delocalized( dentry)
frey_delocal = dentry.parameters['delocalization_meta']['plnr_avg']
self.assertFalse( frey_delocal['is_compatible'])
ax_metadata = frey_delocal['metadata'][0]
self.assertTrue( ax_metadata['frey_variance_compatible'])
self.assertTrue( ax_metadata['frey_minmax_compatible'])
for ax in [1,2]:
ax_metadata = frey_delocal['metadata'][ax]
self.assertFalse( ax_metadata['frey_variance_compatible'])
self.assertTrue( ax_metadata['frey_minmax_compatible'])
self.assertFalse( dentry.parameters['is_compatible'])
def test_check_kumagai_delocalized(self):
de = DefectEntry( self.kumagai_vac, 0., parameters=self.kumagai_params)
de.parameters.update( {'is_compatible': True}) #needs to be initialized with this here for unittest
dc = DefectCompatibility( atomic_site_var_tol=13.3, atomic_site_minmax_tol=20.95)
dentry = dc.perform_kumagai( de)
# check case which fits under compatibility constraints
dentry = dc.check_kumagai_delocalized( dentry)
kumagai_delocal = dentry.parameters['delocalization_meta']['atomic_site']
self.assertTrue( kumagai_delocal['is_compatible'])
kumagai_md = kumagai_delocal['metadata']
true_variance = 13.262304401193997
true_minmax = 20.9435
self.assertTrue(kumagai_md['kumagai_variance_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_variance'], true_variance)
self.assertTrue(kumagai_md['kumagai_minmax_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_minmax_window'], true_minmax)
self.assertTrue( dentry.parameters['is_compatible'])
# break variable compatibility
dc = DefectCompatibility( atomic_site_var_tol=0.1, atomic_site_minmax_tol=20.95)
de.parameters.update( {'is_compatible': True})
dentry = dc.perform_kumagai( de)
dentry = dc.check_kumagai_delocalized( dentry)
kumagai_delocal = dentry.parameters['delocalization_meta']['atomic_site']
self.assertFalse( kumagai_delocal['is_compatible'])
kumagai_md = kumagai_delocal['metadata']
self.assertFalse(kumagai_md['kumagai_variance_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_variance'], true_variance)
self.assertTrue(kumagai_md['kumagai_minmax_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_minmax_window'], true_minmax)
self.assertFalse( dentry.parameters['is_compatible'])
# break maxmin compatibility
dc = DefectCompatibility(atomic_site_var_tol=13.3, atomic_site_minmax_tol=0.5)
de.parameters.update({'is_compatible': True})
dentry = dc.perform_kumagai(de)
dentry = dc.check_kumagai_delocalized(dentry)
kumagai_delocal = dentry.parameters['delocalization_meta']['atomic_site']
self.assertFalse(kumagai_delocal['is_compatible'])
kumagai_md = kumagai_delocal['metadata']
self.assertTrue(kumagai_md['kumagai_variance_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_variance'], true_variance)
self.assertFalse(kumagai_md['kumagai_minmax_compatible'])
self.assertAlmostEqual(kumagai_md['kumagai_minmax_window'], true_minmax)
self.assertFalse(dentry.parameters['is_compatible'])
def test_check_final_relaxed_structure_delocalized(self):
# test structure delocalization analysis
# first test no movement in atoms
initial_defect_structure = self.vac.generate_defect_structure()
final_defect_structure = initial_defect_structure.copy()
sampling_radius = 4.55
defect_frac_sc_coords = self.vac.site.frac_coords[:]
params = {'initial_defect_structure': initial_defect_structure,
'final_defect_structure': final_defect_structure,
'sampling_radius': sampling_radius,
'defect_frac_sc_coords': defect_frac_sc_coords,
'is_compatible': True}
dentry = DefectEntry(self.vac, 0., corrections={}, parameters=params, entry_id=None)
dc = DefectCompatibility( tot_relax_tol=0.1, perc_relax_tol=0.1, defect_tot_relax_tol=0.1)
dentry = dc.check_final_relaxed_structure_delocalized( dentry)
struc_delocal = dentry.parameters['delocalization_meta']['structure_relax']
self.assertTrue( dentry.parameters['is_compatible'])
self.assertTrue( struc_delocal['is_compatible'])
self.assertTrue( struc_delocal['metadata']['structure_tot_relax_compatible'])
self.assertEqual( struc_delocal['metadata']['tot_relax_outside_rad'], 0.)
self.assertTrue( struc_delocal['metadata']['structure_perc_relax_compatible'])
self.assertEqual( struc_delocal['metadata']['perc_relax_outside_rad'], 0.)
self.assertEqual( len(struc_delocal['metadata']['full_structure_relax_data']), len(initial_defect_structure))
self.assertIsNone( struc_delocal['metadata']['defect_index'])
defect_delocal = dentry.parameters['delocalization_meta']['defectsite_relax']
self.assertTrue( defect_delocal['is_compatible'])
self.assertIsNone( defect_delocal['metadata']['relax_amount'])
# next test for when structure has delocalized outside of radius from defect
pert_struct_fin_struct = initial_defect_structure.copy()
pert_struct_fin_struct.perturb( 0.1)
dentry.parameters.update( {'final_defect_structure': pert_struct_fin_struct})
dentry = dc.check_final_relaxed_structure_delocalized( dentry)
struc_delocal = dentry.parameters['delocalization_meta']['structure_relax']
self.assertFalse( dentry.parameters['is_compatible'])
self.assertFalse( struc_delocal['is_compatible'])
self.assertFalse( struc_delocal['metadata']['structure_tot_relax_compatible'])
self.assertAlmostEqual( struc_delocal['metadata']['tot_relax_outside_rad'], 12.5)
self.assertFalse( struc_delocal['metadata']['structure_perc_relax_compatible'])
self.assertAlmostEqual( struc_delocal['metadata']['perc_relax_outside_rad'], 77.63975155)
# now test for when an interstitial defect has migrated too much
inter_def_site = PeriodicSite('H', [7.58857304, 11.70848069, 12.97817518],
self.vac.bulk_structure.lattice, to_unit_cell=True,
coords_are_cartesian=True)
inter = Interstitial(self.vac.bulk_structure, inter_def_site, charge=0)
initial_defect_structure = inter.generate_defect_structure()
final_defect_structure = initial_defect_structure.copy()
poss_deflist = sorted(
final_defect_structure.get_sites_in_sphere(inter.site.coords,
2, include_index=True), key=lambda x: x[1])
def_index = poss_deflist[0][2]
final_defect_structure.translate_sites(indices=[def_index],
vector=[0., 0., 0.008]) #fractional coords translation
defect_frac_sc_coords = inter_def_site.frac_coords[:]
params = {'initial_defect_structure': initial_defect_structure,
'final_defect_structure': final_defect_structure,
'sampling_radius': sampling_radius,
'defect_frac_sc_coords': defect_frac_sc_coords,
'is_compatible': True}
dentry = DefectEntry(inter, 0., corrections={}, parameters=params, entry_id=None)
dentry = dc.check_final_relaxed_structure_delocalized( dentry)
defect_delocal = dentry.parameters['delocalization_meta']['defectsite_relax']
self.assertFalse( defect_delocal['is_compatible'])
self.assertAlmostEqual( defect_delocal['metadata']['relax_amount'], 0.10836054)
if __name__ == "__main__":
unittest.main()
| mit | -3,447,476,710,233,725,400 | 52.228412 | 118 | 0.646397 | false |
glenngillen/dotfiles | .vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/jedilsp/pygls/protocol.py | 1 | 29516 | ############################################################################
# Copyright(c) Open Law Library. All rights reserved. #
# See ThirdPartyNotices.txt in the project root for additional notices. #
# #
# Licensed under the Apache License, Version 2.0 (the "License") #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http: // www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
############################################################################
import asyncio
import enum
import functools
import json
import logging
import re
import sys
import traceback
import uuid
from collections import namedtuple
from concurrent.futures import Future
from functools import partial
from itertools import zip_longest
from typing import List
from pygls.capabilities import ServerCapabilitiesBuilder
from pygls.constants import ATTR_FEATURE_TYPE
from pygls.exceptions import (JsonRpcException, JsonRpcInternalError, JsonRpcInvalidParams,
JsonRpcMethodNotFound, JsonRpcRequestCancelled,
MethodTypeNotRegisteredError)
from pygls.feature_manager import (FeatureManager, assign_help_attrs, get_help_attrs,
is_thread_function)
from pygls.lsp import (JsonRPCNotification, JsonRPCRequestMessage, JsonRPCResponseMessage,
get_method_params_type, get_method_return_type, is_instance)
from pygls.lsp.methods import (CLIENT_REGISTER_CAPABILITY, CLIENT_UNREGISTER_CAPABILITY, EXIT,
TEXT_DOCUMENT_PUBLISH_DIAGNOSTICS, WINDOW_LOG_MESSAGE,
WINDOW_SHOW_MESSAGE, WORKSPACE_APPLY_EDIT, WORKSPACE_CONFIGURATION,
WORKSPACE_EXECUTE_COMMAND)
from pygls.lsp.types import (ApplyWorkspaceEditParams, ApplyWorkspaceEditResponse, Diagnostic,
DidChangeTextDocumentParams, DidChangeWorkspaceFoldersParams,
DidCloseTextDocumentParams, DidOpenTextDocumentParams,
ExecuteCommandParams, InitializeParams, InitializeResult,
LogMessageParams, MessageType, PublishDiagnosticsParams,
RegistrationParams, ShowMessageParams, UnregistrationParams,
WorkspaceEdit)
from pygls.uris import from_fs_path
from pygls.workspace import Workspace
logger = logging.getLogger(__name__)
def call_user_feature(base_func, method_name):
"""Wraps generic LSP features and calls user registered feature
immediately after it.
"""
@functools.wraps(base_func)
def decorator(self, *args, **kwargs):
ret_val = base_func(self, *args, **kwargs)
try:
user_func = self.fm.features[method_name]
self._execute_notification(user_func, *args, **kwargs)
except KeyError:
pass
except Exception:
logger.exception('Failed to handle user defined notification "%s": %s',
method_name, args)
return ret_val
return decorator
def dict_to_object(**d):
"""Create nested objects (namedtuple) from dict."""
type_name = d.pop('type_name', 'Object')
return json.loads(
json.dumps(d),
object_hook=lambda p: namedtuple(type_name, p.keys(), rename=True)(*p.values())
)
def default_serializer(o):
"""JSON serializer for complex objects."""
if isinstance(o, enum.Enum):
return o.value
return o.__dict__
def deserialize_command(params):
"""Function used to deserialize command arguments to a specific class
or a namedtuple."""
# TODO: Register/Look up custom command arguments' types
# Currently command parameters are type of 'any', but eventually we would
# want to register an argument type of our custom command and to
# deserialize it properly.
temp_obj = dict_to_object(**params, type_name='CommandParams')
params['arguments'] = getattr(temp_obj, 'arguments', None)
return params
def deserialize_params(data, get_params_type):
"""Function used to deserialize params to a specific class."""
try:
method = data['method']
params = data['params']
if not isinstance(params, dict):
return data
try:
params_type = get_params_type(method)
if params_type is None:
params_type = dict_to_object
elif params_type.__name__ == ExecuteCommandParams.__name__:
params = deserialize_command(params)
except MethodTypeNotRegisteredError:
params_type = dict_to_object
try:
data['params'] = params_type(**params)
except TypeError:
raise ValueError(
f'Could not instantiate "{params_type.__name__}" from params: {params}')
except KeyError:
pass
return data
def deserialize_message(data, get_params_type=get_method_params_type):
"""Function used to deserialize data received from client."""
if 'jsonrpc' in data:
try:
deserialize_params(data, get_params_type)
except ValueError:
raise JsonRpcInvalidParams()
if 'id' in data:
if 'method' in data:
return JsonRPCRequestMessage(**data)
else:
return JsonRPCResponseMessage(**data)
else:
return JsonRPCNotification(**data)
return data
def to_lsp_name(method_name):
"""Convert method name to LSP real name
Example:
text_document__did_open -> textDocument/didOpen
"""
method_name = method_name.replace('__', '/')
m_chars = list(method_name)
m_replaced = []
for i, ch in enumerate(m_chars):
if ch == '_':
continue
if m_chars[i - 1] == '_':
m_replaced.append(ch.capitalize())
continue
m_replaced.append(ch)
return ''.join(m_replaced)
class JsonRPCProtocol(asyncio.Protocol):
"""Json RPC protocol implementation using on top of `asyncio.Protocol`.
Specification of the protocol can be found here:
https://www.jsonrpc.org/specification
This class provides bidirectional communication which is needed for LSP.
"""
CANCEL_REQUEST = '$/cancelRequest'
CHARSET = 'utf-8'
CONTENT_TYPE = 'application/vscode-jsonrpc'
MESSAGE_PATTERN = re.compile(
rb'^(?:[^\r\n]+\r\n)*'
+ rb'Content-Length: (?P<length>\d+)\r\n'
+ rb'(?:[^\r\n]+\r\n)*\r\n'
+ rb'(?P<body>{.*)',
re.DOTALL,
)
VERSION = '2.0'
def __init__(self, server):
self._server = server
self._shutdown = False
self._client_request_futures = {}
self._server_request_futures = {}
self.fm = FeatureManager(server)
self.transport = None
self._message_buf = []
def __call__(self):
return self
def _check_ret_type_and_send_response(self, method_name, method_type, msg_id, result):
"""Check if registered feature returns appropriate result type."""
if method_type == ATTR_FEATURE_TYPE:
return_type = get_method_return_type(method_name)
if not is_instance(result, return_type):
error = JsonRpcInternalError().to_dict()
self._send_response(msg_id, error=error)
self._send_response(msg_id, result=result)
def _execute_notification(self, handler, *params):
"""Executes notification message handler."""
if asyncio.iscoroutinefunction(handler):
future = asyncio.ensure_future(handler(*params))
future.add_done_callback(self._execute_notification_callback)
else:
if is_thread_function(handler):
self._server.thread_pool.apply_async(handler, (*params, ))
else:
handler(*params)
def _execute_notification_callback(self, future):
"""Success callback used for coroutine notification message."""
if future.exception():
error = JsonRpcInternalError.of(sys.exc_info()).to_dict()
logger.exception('Exception occurred in notification: "%s"', error)
# Revisit. Client does not support response with msg_id = None
# https://stackoverflow.com/questions/31091376/json-rpc-2-0-allow-notifications-to-have-an-error-response
# self._send_response(None, error=error)
def _execute_request(self, msg_id, handler, params):
"""Executes request message handler."""
method_name, method_type = get_help_attrs(handler)
if asyncio.iscoroutinefunction(handler):
future = asyncio.ensure_future(handler(params))
self._client_request_futures[msg_id] = future
future.add_done_callback(partial(self._execute_request_callback,
method_name, method_type, msg_id))
else:
# Can't be canceled
if is_thread_function(handler):
self._server.thread_pool.apply_async(
handler, (params, ),
callback=partial(
self._check_ret_type_and_send_response,
method_name, method_type, msg_id,
),
error_callback=partial(self._execute_request_err_callback, msg_id))
else:
self._check_ret_type_and_send_response(
method_name, method_type, msg_id, handler(params))
def _execute_request_callback(self, method_name, method_type, msg_id, future):
"""Success callback used for coroutine request message."""
try:
if not future.cancelled():
self._check_ret_type_and_send_response(
method_name, method_type, msg_id, result=future.result())
else:
self._send_response(
msg_id,
error=JsonRpcRequestCancelled(f'Request with id "{msg_id}" is canceled')
)
self._client_request_futures.pop(msg_id, None)
except Exception:
error = JsonRpcInternalError.of(sys.exc_info()).to_dict()
logger.exception('Exception occurred for message "%s": %s', msg_id, error)
self._send_response(msg_id, error=error)
def _execute_request_err_callback(self, msg_id, exc):
"""Error callback used for coroutine request message."""
exc_info = (type(exc), exc, None)
error = JsonRpcInternalError.of(exc_info).to_dict()
logger.exception('Exception occurred for message "%s": %s', msg_id, error)
self._send_response(msg_id, error=error)
def _get_handler(self, feature_name):
"""Returns builtin or used defined feature by name if exists."""
try:
return self.fm.builtin_features[feature_name]
except KeyError:
try:
return self.fm.features[feature_name]
except KeyError:
raise JsonRpcMethodNotFound.of(feature_name)
def _handle_cancel_notification(self, msg_id):
"""Handles a cancel notification from the client."""
future = self._client_request_futures.pop(msg_id, None)
if not future:
logger.warning('Cancel notification for unknown message id "%s"', msg_id)
return
# Will only work if the request hasn't started executing
if future.cancel():
logger.info('Cancelled request with id "%s"', msg_id)
def _handle_notification(self, method_name, params):
"""Handles a notification from the client."""
if method_name == JsonRPCProtocol.CANCEL_REQUEST:
self._handle_cancel_notification(params.id)
return
try:
handler = self._get_handler(method_name)
self._execute_notification(handler, params)
except KeyError:
logger.warning('Ignoring notification for unknown method "%s"', method_name)
except Exception:
logger.exception('Failed to handle notification "%s": %s', method_name, params)
def _handle_request(self, msg_id, method_name, params):
"""Handles a request from the client."""
try:
handler = self._get_handler(method_name)
# workspace/executeCommand is a special case
if method_name == WORKSPACE_EXECUTE_COMMAND:
handler(params, msg_id)
else:
self._execute_request(msg_id, handler, params)
except JsonRpcException as e:
logger.exception('Failed to handle request %s %s %s', msg_id, method_name, params)
self._send_response(msg_id, None, e.to_dict())
except Exception:
logger.exception('Failed to handle request %s %s %s', msg_id, method_name, params)
err = JsonRpcInternalError.of(sys.exc_info()).to_dict()
self._send_response(msg_id, None, err)
def _handle_response(self, msg_id, result=None, error=None):
"""Handles a response from the client."""
future = self._server_request_futures.pop(msg_id, None)
if not future:
logger.warning('Received response to unknown message id "%s"', msg_id)
return
if error is not None:
logger.debug('Received error response to message "%s": %s', msg_id, error)
future.set_exception(JsonRpcException.from_dict(error))
else:
logger.debug('Received result for message "%s": %s', msg_id, result)
future.set_result(result)
def _procedure_handler(self, message):
"""Delegates message to handlers depending on message type."""
if message.jsonrpc != JsonRPCProtocol.VERSION:
logger.warning('Unknown message "%s"', message)
return
if self._shutdown and getattr(message, 'method', '') != EXIT:
logger.warning('Server shutting down. No more requests!')
return
if isinstance(message, JsonRPCNotification):
logger.debug('Notification message received.')
self._handle_notification(message.method, message.params)
elif isinstance(message, JsonRPCResponseMessage):
logger.debug('Response message received.')
self._handle_response(message.id, message.result, message.error)
elif isinstance(message, JsonRPCRequestMessage):
logger.debug('Request message received.')
self._handle_request(message.id, message.method, message.params)
def _send_data(self, data):
"""Sends data to the client."""
if not data:
return
try:
body = data.json(by_alias=True, exclude_unset=True, encoder=default_serializer)
logger.info('Sending data: %s', body)
body = body.encode(self.CHARSET)
header = (
f'Content-Length: {len(body)}\r\n'
f'Content-Type: {self.CONTENT_TYPE}; charset={self.CHARSET}\r\n\r\n'
).encode(self.CHARSET)
self.transport.write(header + body)
except Exception:
logger.error(traceback.format_exc())
def _send_response(self, msg_id, result=None, error=None):
"""Sends a JSON RPC response to the client.
Args:
msg_id(str): Id from request
result(any): Result returned by handler
error(any): Error returned by handler
"""
response = JsonRPCResponseMessage(id=msg_id,
jsonrpc=JsonRPCProtocol.VERSION,
result=result,
error=error)
if error is None:
del response.error
else:
del response.result
self._send_data(response)
def connection_lost(self, exc):
"""Method from base class, called when connection is lost, in which case we
want to shutdown the server's process as well.
"""
logger.error('Connection to the client is lost! Shutting down the server.')
sys.exit(1)
def connection_made(self, transport: asyncio.BaseTransport):
"""Method from base class, called when connection is established"""
self.transport = transport
def data_received(self, data: bytes):
"""Method from base class, called when server receives the data"""
logger.debug('Received %r', data)
while len(data):
# Append the incoming chunk to the message buffer
self._message_buf.append(data)
# Look for the body of the message
message = b''.join(self._message_buf)
found = JsonRPCProtocol.MESSAGE_PATTERN.fullmatch(message)
body = found.group('body') if found else b''
length = int(found.group('length')) if found else 1
if len(body) < length:
# Message is incomplete; bail until more data arrives
return
# Message is complete;
# extract the body and any remaining data,
# and reset the buffer for the next message
body, data = body[:length], body[length:]
self._message_buf = []
# Parse the body
self._procedure_handler(
json.loads(body.decode(self.CHARSET),
object_hook=deserialize_message))
def notify(self, method: str, params=None):
"""Sends a JSON RPC notification to the client."""
logger.debug('Sending notification: "%s" %s', method, params)
request = JsonRPCNotification(
jsonrpc=JsonRPCProtocol.VERSION,
method=method,
params=params
)
self._send_data(request)
def send_request(self, method, params=None, callback=None):
"""Sends a JSON RPC request to the client.
Args:
method(str): The method name of the message to send
params(any): The payload of the message
Returns:
Future that will be resolved once a response has been received
"""
msg_id = str(uuid.uuid4())
logger.debug('Sending request with id "%s": %s %s', msg_id, method, params)
request = JsonRPCRequestMessage(
id=msg_id,
jsonrpc=JsonRPCProtocol.VERSION,
method=method,
params=params
)
future = Future()
# If callback function is given, call it when result is received
if callback:
def wrapper(future: Future):
result = future.result()
logger.info('Configuration for %s received: %s', params, result)
callback(result)
future.add_done_callback(wrapper)
self._server_request_futures[msg_id] = future
self._send_data(request)
return future
def send_request_async(self, method, params=None):
"""Calls `send_request` and wraps `concurrent.futures.Future` with
`asyncio.Future` so it can be used with `await` keyword.
Args:
method(str): The method name of the message to send
params(any): The payload of the message
Returns:
`asyncio.Future` that can be awaited
"""
return asyncio.wrap_future(self.send_request(method, params))
def thread(self):
"""Decorator that mark function to execute it in a thread."""
return self.fm.thread()
class LSPMeta(type):
"""Wraps LSP built-in features (`bf_` naming convention).
Built-in features cannot be overridden but user defined features with
the same LSP name will be called after them.
"""
def __new__(mcs, cls_name, cls_bases, cls):
for attr_name, attr_val in cls.items():
if callable(attr_val) and attr_name.startswith('bf_'):
method_name = to_lsp_name(attr_name[3:])
wrapped = call_user_feature(attr_val, method_name)
assign_help_attrs(wrapped, method_name, ATTR_FEATURE_TYPE)
cls[attr_name] = wrapped
logger.debug('Added decorator for lsp method: "%s"', attr_name)
return super().__new__(mcs, cls_name, cls_bases, cls)
class LanguageServerProtocol(JsonRPCProtocol, metaclass=LSPMeta):
"""A class that represents language server protocol.
It contains implementations for generic LSP features.
Attributes:
workspace(Workspace): In memory workspace
"""
def __init__(self, server):
super().__init__(server)
self.workspace = None
self._register_builtin_features()
def _register_builtin_features(self):
"""Registers generic LSP features from this class."""
for name in dir(self):
attr = getattr(self, name)
if callable(attr) and name.startswith('bf_'):
lsp_name = to_lsp_name(name[3:])
self.fm.add_builtin_feature(lsp_name, attr)
def apply_edit(self, edit: WorkspaceEdit, label: str = None) -> \
ApplyWorkspaceEditResponse:
"""Sends apply edit request to the client."""
return self.send_request(WORKSPACE_APPLY_EDIT,
ApplyWorkspaceEditParams(edit=edit, label=label))
def bf_exit(self, *args):
"""Stops the server process."""
self.transport.close()
sys.exit(0 if self._shutdown else 1)
def bf_initialize(self, params: InitializeParams):
"""Method that initializes language server.
It will compute and return server capabilities based on
registered features.
"""
logger.info('Language server initialized %s', params)
self._server.process_id = params.process_id
# Initialize server capabilities
self.client_capabilities = params.capabilities
self.server_capabilities = ServerCapabilitiesBuilder(
self.client_capabilities,
self.fm.features.keys(),
self.fm.feature_options,
list(self.fm.commands.keys()),
self._server.sync_kind,
).build()
logger.debug('Server capabilities: %s', self.server_capabilities.dict())
root_path = params.root_path
root_uri = params.root_uri or from_fs_path(root_path)
# Initialize the workspace
workspace_folders = params.workspace_folders or []
self.workspace = Workspace(root_uri, self._server.sync_kind, workspace_folders)
return InitializeResult(capabilities=self.server_capabilities)
def bf_initialized(self, *args):
"""Notification received when client and server are connected."""
pass
def bf_shutdown(self, *args):
"""Request from client which asks server to shutdown."""
for future in self._client_request_futures.values():
future.cancel()
for future in self._server_request_futures.values():
future.cancel()
self._shutdown = True
return None
def bf_text_document__did_change(self,
params: DidChangeTextDocumentParams):
"""Updates document's content.
(Incremental(from server capabilities); not configurable for now)
"""
for change in params.content_changes:
self.workspace.update_document(params.text_document, change)
def bf_text_document__did_close(self,
params: DidCloseTextDocumentParams):
"""Removes document from workspace."""
self.workspace.remove_document(params.text_document.uri)
def bf_text_document__did_open(self,
params: DidOpenTextDocumentParams):
"""Puts document to the workspace."""
self.workspace.put_document(params.text_document)
def bf_workspace__did_change_workspace_folders(
self,
params: DidChangeWorkspaceFoldersParams):
"""Adds/Removes folders from the workspace."""
logger.info('Workspace folders changed: %s', params)
added_folders = params.event.added or []
removed_folders = params.event.removed or []
for f_add, f_remove in zip_longest(added_folders, removed_folders):
if f_add:
self.workspace.add_folder(f_add)
if f_remove:
self.workspace.remove_folder(f_remove.uri)
def bf_workspace__execute_command(self,
params: ExecuteCommandParams,
msg_id):
"""Executes commands with passed arguments and returns a value."""
cmd_handler = self.fm.commands[params.command]
self._execute_request(msg_id, cmd_handler, params.arguments)
def get_configuration(self, params, callback):
"""Sends configuration request to the client.
Args:
params(dict): ConfigurationParams from lsp specs
callback(callable): Callabe which will be called after
response from the client is received
Returns:
concurrent.futures.Future object that will be resolved once a
response has been received
"""
return self.send_request(WORKSPACE_CONFIGURATION, params, callback)
def get_configuration_async(self, params):
"""Calls `get_configuration` method but designed to use with coroutines
Args:
params(dict): ConfigurationParams from lsp specs
Returns:
asyncio.Future that can be awaited
"""
return asyncio.wrap_future(self.get_configuration(params, None))
def publish_diagnostics(self, doc_uri: str, diagnostics: List[Diagnostic]):
"""Sends diagnostic notification to the client."""
self.notify(TEXT_DOCUMENT_PUBLISH_DIAGNOSTICS,
PublishDiagnosticsParams(uri=doc_uri, diagnostics=diagnostics))
def register_capability(self, params: RegistrationParams, callback):
"""Register a new capability on the client.
Args:
params(RegistrationParams): RegistrationParams from lsp specs
callback(callable): Callabe which will be called after
response from the client is received
Returns:
concurrent.futures.Future object that will be resolved once a
response has been received
"""
return self.send_request(CLIENT_REGISTER_CAPABILITY, params, callback)
def register_capability_async(self, params: RegistrationParams):
"""Register a new capability on the client.
Args:
params(RegistrationParams): RegistrationParams from lsp specs
callback(callable): Callabe which will be called after
response from the client is received
Returns:
asyncio.Future object that will be resolved once a
response has been received
"""
return asyncio.wrap_future(self.register_capability(params, None))
def show_message(self, message, msg_type=MessageType.Info):
"""Sends message to the client to display message."""
self.notify(WINDOW_SHOW_MESSAGE, ShowMessageParams(type=msg_type, message=message))
def show_message_log(self, message, msg_type=MessageType.Log):
"""Sends message to the client's output channel."""
self.notify(WINDOW_LOG_MESSAGE, LogMessageParams(type=msg_type, message=message))
def unregister_capability(self, params: UnregistrationParams, callback):
"""Unregister a new capability on the client.
Args:
params(UnregistrationParams): UnregistrationParams from lsp specs
callback(callable): Callabe which will be called after
response from the client is received
Returns:
concurrent.futures.Future object that will be resolved once a
response has been received
"""
return self.send_request(CLIENT_UNREGISTER_CAPABILITY, params, callback)
def unregister_capability_async(self, params: UnregistrationParams):
"""Unregister a new capability on the client.
Args:
params(UnregistrationParams): UnregistrationParams from lsp specs
callback(callable): Callabe which will be called after
response from the client is received
Returns:
asyncio.Future object that will be resolved once a
response has been received
"""
return asyncio.wrap_future(self.unregister_capability(params, None))
| mit | 1,117,900,913,965,417,300 | 38.09404 | 117 | 0.599336 | false |
stevepeak/debris | tests/test_object_from_routes.py | 1 | 3517 | import os
import json
import redis
import unittest
import bmemcached
import debris
class User(object):
__metaclass__ = debris.Object
def __init__(self, id, **data):
self.id = str(id)
self.name = data['name']
self.email = data['email']
class Tests(unittest.TestCase):
data = {
"1": {"id": "1", "name": "Elaina Dach", "email": "[email protected]"},
"2": {"id": "2", "name": "Lucas Jaskolski", "email": "[email protected]"},
"3": {"id": "3", "name": "Ms. Agustin Walter", "email": "[email protected]"},
"4": {"id": "4", "name": "Mr. Agustina Ward IV", "email": "[email protected]"},
"5": {"id": "5", "name": "Dr. Brock Sanford IV", "email": "[email protected]"},
"6": {"id": "6", "name": "Itzel Schimmel", "email": "[email protected]"},
"7": {"id": "7", "name": "Ms. Desiree Simonis", "email": "[email protected]"},
"8": {"id": "8", "name": "Gabe Kling", "email": "[email protected]"},
"9": {"id": "9", "name": "Raheem Kreiger", "email": "[email protected]"},
"10": {"id": "10", "name": "Maximilian Kulas", "email": "[email protected]"}
}
@classmethod
def setUpClass(self):
r = redis.Redis()
r.set("User.3", json.dumps(self.data["3"]))
r.set("User.6", json.dumps(self.data["6"]))
m = bmemcached.Client()
m.set("User.4", json.dumps(self.data["4"]))
m.set("User.9", json.dumps(self.data["9"]))
debris.config({
"services": {
"redis": {},
"memcached": {},
"postgresql": {},
"memory": {}
},
"objects": {
"User": {
"get": [
{"service": "redis"},
{"service": "memcached"},
{
"service": "postgresql",
"query": "select name, email from users where id=%(id)s limit 1;",
"query[]": "select id, name, email from unnest(%(id)s) as s inner join users u on u.id=s::int limit %(limit)s;"
}
],
"put": [
{
"service": "redis"
# "ttl": 3600 # future
}
]
}
}
})
def test_single(self):
for uid in xrange(1, 11):
u = User(uid)
self.assertEqual(u.id, str(uid))
self.assertEqual(u.name, self.data[str(uid)]["name"])
self.assertEqual(u.email, self.data[str(uid)]["email"])
self.assertIs(u, User(uid))
def test_multi(self):
ids = [1,4,5,7,9]
users = User(ids)
for u in users:
self.assertIsInstance(u, User)
self.assertIs(u, User(u.id))
self.assertIn(int(u.id), ids)
ids.remove(int(u.id))
self.assertEqual(u.name, self.data[u.id]["name"])
self.assertEqual(u.email, self.data[u.id]["email"])
def test_same_same(self):
"objects with same arguments will always be the same on ram"
self.assertIs(User(1), User(1))
self.assertIsNot(User(1), User(2))
self.assertIs(User(1), User(1, name="john"))
self.assertEqual(User(1, name="john", email="").name, "Elaina Dach")
| apache-2.0 | -4,427,584,538,980,168,700 | 36.414894 | 139 | 0.466022 | false |
simonpessemesse/seguinus | chambres/joursFerie.py | 1 | 5799 | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
def datepaques(an):
"""Calcule la date de Paques d'une annee donnee an (=nombre entier)"""
a = an // 100
b = an % 100
c = (3 * (a + 25)) // 4
d = (3 * (a + 25)) % 4
e = (8 * (a + 11)) // 25
f = (5 * a + b) % 19
g = (19 * f + c - e) % 30
h = (f + 11 * g) // 319
j = (60 * (5 - d) + b) // 4
k = (60 * (5 - d) + b) % 4
m = (2 * j - k - g + h) % 7
n = (g - h + m + 114) // 31
p = (g - h + m + 114) % 31
jour = p + 1
mois = n
return [jour, mois, an]
def datechaine(d, sep='/'):
"""Transforme une date liste=[j,m,a] en une date chaîne 'jj/mm/aaaa'"""
return ("%02d" + sep + "%02d" + sep + "%0004d") % (d[0], d[1], d[2])
def dateliste(c, sep='/'):
"""Transforme une date chaîne 'j/m/a' en une date liste [j,m,a]"""
j, m, a = c.split(sep)
return [int(j), int(m), int(a)]
def jourplus(d, n=1):
"""Donne la date du nième jour suivant d=[j, m, a] (n>=0)"""
j, m, a = d
fm = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if (a % 4 == 0 and a % 100 != 0) or a % 400 == 0: # bissextile?
fm[2] = 29
for i in range(0, n):
j += 1
if j > fm[m]:
j = 1
m += 1
if m > 12:
m = 1
a += 1
return [j, m, a]
def jourmoins(d, n=-1):
"""Donne la date du nième jour précédent d=[j, m, a] (n<=0)"""
j, m, a = d
fm = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if (a % 4 == 0 and a % 100 != 0) or a % 400 == 0: # bissextile?
fm[2] = 29
for i in range(0, abs(n)):
j -= 1
if j < 1:
m -= 1
if m < 1:
m = 12
a -= 1
j = fm[m]
return [j, m, a]
def numjoursem(d):
"""Donne le numero du jour de la semaine d'une date d=[j,m,a]
lundi=1, mardi=2, ..., dimanche=7
Algorithme de Maurice Kraitchik (1882–1957)"""
j, m, a = d
if m < 3:
m += 12
a -= 1
n = (j + 2 * m + (3 * (m + 1)) // 5 + a + a // 4 - a // 100 + a // 400 + 2) % 7
return [6, 7, 1, 2, 3, 4, 5][n]
def joursem(d):
"""Donne le jour de semaine en texte a partir de son numero
lundi=1, mardi=2, ..., dimanche=7"""
return ["", "lundi", "mardi", "mercredi", "jeudi", "vendredi", "samedi",
"dimanche"][numjoursem(d)]
def joursferiesliste(an, sd=0):
"""Liste des jours feries France en date-liste de l'annee an (nb entier).
sd=0 (=defaut): tous les jours feries.
sd=1: idem sans les sammedis-dimanches.
sd=2: tous + les 2 jours feries supplementaires d'Alsace-Moselle.
sd=3: idem sd=2 sans les samedis-dimanches"""
F = [] # =liste des dates des jours feries en date-liste d=[j,m,a]
L = [] # =liste des libelles du jour ferie
dp = datepaques(an)
# Jour de l'an
d = [1, 1, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Jour de l'an")
# Vendredi saint (pour l'Alsace-Moselle)
d = jourmoins(dp, -2)
if sd >= 2:
F.append(d)
L.append("Vendredi saint")
# Dimanche de Paques
d = dp
if (sd == 0) or (sd == 2):
F.append(d)
L.append("Dimanche de Paques")
# Lundi de Paques
d = jourplus(dp, +1)
F.append(d)
L.append("Lundi de Paques")
# Fete du travail
d = [1, 5, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Fete du travail")
# Victoire des allies 1945
d = [8, 5, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Victoire des allies 1945")
# Jeudi de l'Ascension
d = jourplus(dp, +39)
F.append(d)
L.append("Jeudi de l'Ascension")
# Dimanche de Pentecote
d = jourplus(dp, +49)
if (sd == 0) or (sd == 2):
F.append(d)
L.append("Dimanche de Pentecote")
# Lundi de Pentecote
d = jourplus(d, +1)
F.append(d)
L.append("Lundi de Pentecote")
# Fete Nationale
d = [14, 7, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Fete Nationale")
# Assomption
d = [15, 8, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Assomption")
# Toussaint
d = [1, 11, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Toussaint")
# Armistice 1918
d = [11, 11, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Armistice 1918")
# Jour de Noel
d = [25, 12, an]
nj = numjoursem(d)
if (sd == 0) or (sd == 1 and nj < 6) or (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Jour de Noel")
# Saint Etienne (pour l'Alsace-Moselle)
d = [26, 12, an]
nj = numjoursem(d)
if (sd == 2) or (sd == 3 and nj < 6):
F.append(d)
L.append("Saint Etienne")
return F, L
def estferie(d, sd=0):
"""estferie(d,sd=0): => dit si une date d=[j,m,a] donnee est feriee France
si la date est feriee, renvoie son libelle
sinon, renvoie une chaine vide"""
j, m, a = d.day, d.month, d.year
F, L = joursferiesliste(a, sd)
for i in range(0, len(F)):
if j == F[i][0] and m == F[i][1] and a == F[i][2]:
return L[i]
return ""
| gpl-2.0 | 4,589,852,060,434,486,000 | 27.24878 | 83 | 0.473493 | false |
anrl/gini3 | frontend/src/gbuilder/UI/MainWindow.py | 1 | 59141 | """The main window for gbuilder 2.0"""
import os, time, math, subprocess, sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from DropBar import *
from LogWindow import *
from Canvas import *
from Node import *
from Edge import *
from Configuration import *
from Core.globals import *
import thread
import socket
import atexit
import fcntl
import struct
from ExportWindow import *
from SendDirectoryWindow import *
from Properties import *
from Systray import *
from Network.gclient import *
from Core.Compiler import *
from TabWidget import *
from Tutorial import *
from TaskManagerWindow import *
import Core.globals
from Wireless.ClientAPI import *
from Wireless.ServerAPI import *
class MainWindow(Systray):
def __init__(self,app):
"""
Create a main window for the application
"""
defaultOptions["palette"]=app.palette()
Systray.__init__(self)
self.expansions=0
self.client=None
self.server=None
self.wserverIP=None
self.wserverPort=None
self.wgini_client=None
self.wgini_server=None
self.running=False
self.recovery=False
mainWidgets["main"] = self
mainWidgets["app"] = app
self.canvas = Canvas(self)
mainWidgets["canvas"] = self.canvas
self.tabWidget = TabWidget(self)
mainWidgets["tab"] = self.tabWidget
self.setCentralWidget(self.tabWidget)
#self.setCentralWidget(self.canvas)
self.createActions()
self.createMenus()
self.createToolBars()
self.createStatusBar()
self.createDockWindows()
self.createConfigWindows()
self.createPopupWindows()
self.createProgressBar()
self.newScene()
self.debugWindow.hide()
self.tm.hide()
self.routes.hide()
self.setVisible(True)
self.center()
self.saveLayout(environ["config"] + "defaultLayout")
self.setStyleSheet("""QToolTip {
background-color: black;
color: white;
border: black solid 1px
}""")
self.defaultLayout = True
if options["restore"]:
self.loadLayout()
self.defaultLayout = False
self.loadProject()
atexit.register(self.cleanup)
def center(self):
"""
Center the window.
"""
screen = QtGui.QDesktopWidget().screenGeometry()
rect = self.geometry()
self.move((screen.width()-rect.width())/2, (screen.height()-rect.height())/2)
self.show()
def getProject(self):
"""
Return the project.
"""
return self.project
def startTutorial(self):
"""
Start the interactive tutorial.
"""
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You are already doing the tutorial! If you would like to stop or restart, select 'Close' from the File menu now.")
return
if not self.closeTopology():
return
self.project = "Tutorial"
self.filename = ""
self.canvas = Tutorial(self)
mainWidgets["canvas"] = self.canvas
self.tabWidget.removeTab(0)
self.tabWidget.addTab(self.canvas, "Tutorial")
self.canvas.start()
for nodeType in nodeTypes.keys():
itemTypes = nodeTypes[nodeType]
itemTypes[nodeType] = 0
self.properties.clear()
self.interfaces.clear()
self.routes.clear()
self.resetLayout(True)
self.lockDocks()
def lockDocks(self):
"""
Lock the dock windows so they cannot be moved, closed or resized.
"""
self.tm.hide()
for dock in self.docks.values():
dock.setFeatures(dock.NoDockWidgetFeatures)
def unlockDocks(self):
"""
Unlock the dock windows.
"""
self.tm.show()
for dock in self.docks.values():
dock.setFeatures(dock.DockWidgetClosable | dock.DockWidgetMovable | dock.DockWidgetFloatable)
def faq(self):
"""
Open the FAQ in the default browser.
"""
olddir = os.getcwd()
os.chdir(environ["doc"])
loadpath = os.getcwd()
os.chdir(olddir)
if environ["os"] == "Windows":
url = QtCore.QUrl("file:///" + loadpath + "/FAQ.html")
else:
url = QtCore.QUrl("file://" + loadpath + "/FAQ.html")
QtGui.QDesktopServices.openUrl(url)
def closeTopology(self,usedyRouters=usedyRouters):
"""
Close the current topology.
"""
if self.running:
self.log.append("You cannot close a topology when one is still running!")
return False
scene = self.canvas.scene()
if scene and scene.items():
reply = QtGui.QMessageBox.warning(self, self.tr(Core.globals.PROG_NAME), self.tr("Save before closing?"), QtGui.QMessageBox.Yes | QtGui.QMessageBox.No | QtGui.QMessageBox.Cancel)
if reply == QtGui.QMessageBox.Yes:
if not self.saveTopology():
return False
elif reply == QtGui.QMessageBox.No:
pass
else:
return False
if isinstance(mainWidgets["canvas"], Tutorial):
self.canvas = Canvas(self)
mainWidgets["canvas"] = self.canvas
self.tabWidget.removeTab(0)
self.tabWidget.addTab(self.canvas, "Default Project")
self.project = ""
self.unlockDocks()
self.filename = ""
scene = Scene(self.canvas)
scene.setItemIndexMethod(QtGui.QGraphicsScene.NoIndex)
self.canvas.setScene(scene)
self.expansions = 0
for nodeType in nodeTypes.keys():
itemTypes = nodeTypes[nodeType]
itemTypes[nodeType] = 0
if usedyRouters:
for yunid, yun in usedyRouters.iteritems():
availableyRouters.append(yun)
availableyRouters.sort(key=lambda YunEntity: YunEntity['ID'])
usedyRouters = {}
self.properties.clear()
self.interfaces.clear()
self.routes.clear()
return True
def sendFile(self):
"""
Start a process to select and send a file to the server.
"""
if not self.server or self.server.poll() != None:
self.log.append("Please start the server first!")
return
if not self.client or not self.client.isConnected():
self.startClient()
filename = self.loadFile("All Files (*.*)")
if not filename:
return
self.sendWindow.setFilename(filename)
self.sendWindow.show()
def newScene(self):
"""
Close the current topology and create a new one.
"""
if self.running:
self.log.append("You cannot create a new topology when one is still running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot create a new topology during the tutorial!")
return
if not self.closeTopology(usedyRouters):
return
self.expandScene()
def expandScene(self):
"""
Expand the scene based on number of expansions.
"""
x = 175 + self.expansions * 30
y = 160 + self.expansions * 20
scene = self.canvas.scene()
item = QtGui.QGraphicsLineItem(-x, -y, x, y)
scene.addItem(item)
scene.removeItem(item)
self.expansions += 1
def newProject(self):
"""
Create a new project for device sharing.
"""
if self.running:
self.log.append("You cannot create a new project when one is still running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot create a new project during the tutorial!")
return
filename = self.saveFile("gproj")
if filename.isEmpty():
return
projectname = str(filename).split("/")[-1].strip(".gproj")
from Core.Item import nodeTypes
for nodeType in nodeTypes:
if projectname.startswith(nodeType + "_"):
self.popup.setWindowTitle("Invalid Project Name")
self.popup.setText("You cannot name a project starting with the name of a device and underscore!")
self.popup.show()
return
self.project = str(filename)
file = QtCore.QFile(filename)
if not file.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, self.tr("Save Error"),
self.tr("Cannot write file %1:\n%2.")
.arg(self.filename)
.arg(file.errorString()))
return
out = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
if options["username"]:
out << "username=" + options["username"] + "\n"
else:
self.log.append("Warning, no username is specified!")
if options["session"]:
out << "session=" + options["session"] + "\n"
elif options["server"]:
out << "server=" + options["server"] + "\n"
else:
self.log.append("Warning, no server or session name is specified!")
QtGui.QApplication.restoreOverrideCursor()
self.tabWidget.addTab(self.canvas, projectname)
def openProject(self):
"""
Load an existing project for device sharing.
"""
if self.running:
self.log.append("You cannot open a project when one is still running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot open a project during the tutorial!")
return
filename = self.loadFile("GPROJ (*.gproj)")
if filename.isEmpty():
return
self.project = str(filename)
self.loadProject()
def loadProject(self):
"""
Load project file data into options.
"""
if not self.project:
self.tabWidget.addTab(self.canvas, "Default Project")
return
file = QtCore.QFile(self.project)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, self.tr("Load Error"),
self.tr("Cannot read file %1:\n%2.")
.arg(self.project)
.arg(file.errorString()))
self.tabWidget.addTab(self.canvas, "Default Project")
return
_in = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
while not _in.atEnd():
line = str(_in.readLine())
option, value = line.split("=", 1)
options[option] = value
self.configWindow.updateSettings()
QtGui.QApplication.restoreOverrideCursor()
projectname = self.project.split("/")[-1].strip(".gproj")
self.tabWidget.addTab(self.canvas, projectname)
def closeProject(self):
"""
Close the current project.
"""
if self.running:
self.log.append("You cannot close a project when it is still running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot close a project during the tutorial!")
return
if self.tabWidget.count() == 1:
self.tabWidget.addTab(self.canvas, "Default Project")
self.project = ""
else:
self.tabWidget.removeTab(0)
def export(self):
"""
Open an export window to generate an image from the canvas.
"""
self.exportWindow.show()
def startBackend(self):
"""
Start the backend server.
"""
self.startServer()
#self.startClient()
def setRecovery(self, recovery):
"""
Set the recovering state of the topology.
"""
self.recovery = recovery
def isRunning(self):
"""
Returns whether a topology is running or not.
"""
return self.running
def startWServer(self):
"""
Call the startwgini_server function
"""
self.startwgini_server()
#thread.join()
def startServer(self):
"""
Start the server backend of gbuilder, which controls running topologies.
"""
if self.server and self.server.poll() == None:
self.log.append("A server is already running!")
return
base = "ssh -t " + options["username"] + "@" + options["server"]
tunnel = " -L " + options["localPort"] + ":localhost:" + options["remotePort"]
server = "bash -c -i 'gserver " + options["remotePort"] + "' || sleep 5"
command = ""
gserver = "gserver"
if environ["os"] == "Windows":
startpath = environ["tmp"] + "gserver.start"
try:
startFile = open(startpath, "w")
startFile.write("echo -ne \"\\033]0;" + gserver + "\\007\"\n")
startFile.write(server)
startFile.close()
except:
self.log.append("Failed to write to start file!")
return
command += "putty -"
if options["session"]:
command += "load " + options["session"] + " -l " + options["username"] + " -t"
else:
command += base
command += tunnel + " -m \"" + startpath + "\""
else:
command += "rxvt -T \"" + gserver + "\" -e " + base + tunnel + " \" " + server + "\""
self.server = subprocess.Popen(str(command), shell=True,preexec_fn=os.setpgrp)
def startwgini_server(self):
"""
Start the wireless GINI server
"""
base = "ssh -t " + options["username"] + "@" + options["wserver"]
tunnel = " -L " + options["wlocalPort"] + ":localhost:" + options["wremotePort"]
server = "bash -c -i 'ServerAPI'"
command = ""
gserver = "WGINI Server"
command += "rxvt -T \"" + gserver + "\" -e " + base + tunnel + " \" " + server + "\""
sudoPassword = 'livelifeking123'
command1 = 'route add -net 192.168.0.0 gw 192.168.54.24 netmask 255.255.255.0 eth1' #change accordingly!
p = os.system('echo %s|sudo -S %s' % (sudoPassword, command1))
self.wgini_server = subprocess.Popen(str(command), shell=True,preexec_fn=os.setpgrp)
def startClient(self):
"""
Start the client of gbuilder, which communicates with the server.
"""
self.client = Client(self)
self.client.connectTo("localhost", int(options["localPort"]), 10)
#self.client.start()
mainWidgets["client"] = self.client
def compile(self):
"""
Compile the current topology.
"""
if self.running:
self.log.append("You cannot compile a topology when one is still running!")
return False
if self.saveTopology() == False:
return False
scene = self.canvas.scene()
compiler = Compiler(scene.items(), self.filename)
xmlFile = compiler.compile()
self.properties.display()
self.interfaces.display()
self.routes.display()
if xmlFile:
self.statusBar().showMessage(self.tr("Compiled '%1'").arg(xmlFile), 2000)
return True
else:
self.statusBar().showMessage(self.tr("Compile failed"), 2000)
return False
def run(self):
"""
Run the current topology.
"""
if not self.server or self.server.poll() != None:
self.log.append("Please start the server first!")
return
if not self.client or not self.client.isConnected():
self.startClient()
if self.isRunning() and not self.recovery:
self.log.append("A topology is already running, please stop it first!")
return
scene = self.canvas.scene()
items = scene.items()
if items:
if self.recovery:
self.recovery = False
elif options["autocompile"] and not self.compile():
return
else:
self.log.append("Please create or load a topology first!")
return
options["elasticMode"] = False
xmlFile = self.filename.replace(".gsav", ".xml")
if not os.access(xmlFile, os.F_OK):
self.log.append("Please compile the topology first!")
return
self.tm.show()
#self.progressBar.setValue(0)
self.client.process("file . " + xmlFile)
self.client.send("init " + self.project.split("/")[-1].strip(".gproj"))
self.client.send("canvas %d,%d" % (scene.width(), scene.height()))
for item in items:
if item.device_type == "Mobile" or item.device_type == "Wireless_access_point":
x = item.pos().x()
y = item.pos().y()
self.client.send("mobile %s %d,%d" % (item.getName(), x, y))
self.client.process("start " + xmlFile)
self.running = True
self.canvas.setAcceptDrops(False)
scene = self.canvas.scene()
scene.startRefresh()
scene.clearSelection()
self.properties.clear()
self.interfaces.clear()
self.routes.clear()
def stop(self):
"""
Stop the current running topology.
"""
if not self.server or self.server.poll() != None:
self.log.append("Please start the server first!")
return
if not self.client or not self.client.isConnected():
self.startClient()
if (self.wgini_client is not None) and usedyRouters:
status = self.wgini_client.Delete()
self.log.append(status)
if self.recovery:
self.recovery = False
scene = self.canvas.scene()
activeDevices = False
from Core.Device import Device
for item in scene.items():
if not isinstance(item, Device):
continue
if item.device_type == "Router":
item.stop()
if item.status:
activeDevices = True
if not activeDevices:
self.stopped()
elif not scene.isRefreshing():
scene.startRefresh()
self.client.process("stop")
def stopped(self):
"""
Handle a fully stopped topology.
"""
self.running = False
self.canvas.scene().stopRefresh()
self.tm.hide()
self.canvas.setAcceptDrops(True)
olddir = os.getcwd()
os.chdir(environ["tmp"])
for tmpfile in os.listdir("."):
if tmpfile.startswith("."):
continue
try:
os.remove(tmpfile)
except:
continue
os.chdir(olddir)
def loadFile(self, filetype):
"""
Load a file through a file dialog.
"""
# Qt is very picky in the filename structure but python is not, so we use python
# to form the correct path which will work for both Windows and Linux
olddir = os.getcwd()
os.chdir(environ["sav"])
loadpath = os.getcwd()
os.chdir(olddir)
filename = QtGui.QFileDialog.getOpenFileName(self,
self.tr("Choose a file name"), loadpath,
self.tr(filetype))
return filename
def loadrealTopologyfile(self, filetype):
"""
Load a real topology name
"""
self.popup.setWindowTitle("Topology Names")
self.popup.setText("You are about to select from the list:\n1.Ernet")
self.popup.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
self.popup.show()
retval = self.popup.exec_()
if retval==1024:
olddir = os.getcwd()
os.chdir(environ["sav"])
os.chdir("exist")
loadpath = os.getcwd()
os.chdir(olddir)
filename = QtGui.QFileDialog.getOpenFileName(self,
self.tr("Choose a file name"), loadpath,
self.tr(filetype))
return filename
def loadrealTopology(self):
"""
Load a real topology.
"""
if self.running:
self.log.append("You cannot load a topology when one is still running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot load a topology during the tutorial!")
return
def loadIntoScene(line, *args):
scene = self.canvas.scene()
itemType,arg = line.split(":")
args = str(arg).strip("()").split(",")
if itemType == "edge":
source = scene.findItem(args[0])
dest = scene.findItem(args[1])
if source.device_type == "Mobile" or dest.device_type == "Mobile":
item = Wireless_Connection(source, dest)
else:
item = Connection(source, dest)
scene.addItem(item)
else:
devType, index = str(itemType).rsplit("_", 1)
item = deviceTypes[devType]()
item.setIndex(int(index))
scene.addItem(item)
item.setPos(float(args[0]), float(args[1]))
item.nudge()
return item
def loadProperties(itemDict):
currentInterfaceTarget = None
currentRouteSubnet = None
for item, properties in itemDict.iteritems():
for line in properties:
count = 0
while line.find("\t") == 0:
line = line[1:]
count += 1
prop, value = line.split(":", 1)
if count == 1:
item.setProperty(prop, value)
elif count == 2:
currentInterfaceTarget = self.canvas.scene().findItem(value)
elif count == 3:
item.setInterfaceProperty(prop, value, currentInterfaceTarget)
elif count == 4:
currentRouteSubnet = value
item.addEntry("", "", value, currentInterfaceTarget)
elif count == 5:
item.setEntryProperty(prop, value, currentRouteSubnet, currentInterfaceTarget)
filename = self.loadrealTopologyfile("GSAV (*.gsav)")
if not filename:
return
file = QtCore.QFile(filename)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, self.tr("Load Error"),
self.tr("Cannot read file %1:\n%2.")
.arg(filename)
.arg(file.errorString()))
return
self.newScene()
self.filename = str(filename)
_in = QtCore.QTextStream(file)
yRouters = False
if "yRouter" in str(_in.readAll()):
yRouters = True
QtGui.QMessageBox.warning(self, self.tr("Load Warning"), self.tr("This file contains yRouters, which may not be physically available right now. Any yRouters no longer physically available will automatically be removed from the topology."))
if not self.wgini_server:
if not self.startWGINIClient():
QtGui.QMessageBox.warning(self, self.tr("Load Error"), self.tr("Cannot open file with yRouters without connecting to wireless server."))
return
if yRouters:
self.discover()
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
itemDict = {}
_in.seek(0)
line = str(_in.readLine())
lines = []
while not _in.atEnd():
item=loadIntoScene(line)
line=str(_in.readLine())
while line.find("\t") == 0:
lines.append(line)
line=str(_in.readLine())
itemDict[item] = lines
lines = []
loadProperties(itemDict)
QtGui.QApplication.restoreOverrideCursor()
self.statusBar().showMessage(self.tr("Loaded '%1'").arg(filename), 2000)
def loadTopology(self):
"""
Load a topology.
"""
if self.running:
self.log.append("You cannot load a topology when one is still running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot load a topology during the tutorial!")
return
def loadIntoScene(line, *args):
scene = self.canvas.scene()
itemType,arg = line.split(":")
args = str(arg).strip("()").split(",")
if itemType == "edge":
source = scene.findItem(args[0])
dest = scene.findItem(args[1])
if source.device_type == "Mobile" or dest.device_type == "Mobile":
item = Wireless_Connection(source, dest)
else:
item = Connection(source, dest)
scene.addItem(item)
else:
devType, index = str(itemType).rsplit("_", 1)
item = deviceTypes[devType]()
item.setIndex(int(index))
scene.addItem(item)
item.setPos(float(args[0]), float(args[1]))
item.nudge()
return item
def loadProperties(itemDict):
currentInterfaceTarget = None
currentRouteSubnet = None
for item, properties in itemDict.iteritems():
for line in properties:
count = 0
while line.find("\t") == 0:
line = line[1:]
count += 1
prop, value = line.split(":", 1)
if count == 1:
item.setProperty(prop, value)
elif count == 2:
currentInterfaceTarget = self.canvas.scene().findItem(value)
elif count == 3:
item.setInterfaceProperty(prop, value, currentInterfaceTarget)
elif count == 4:
currentRouteSubnet = value
item.addEntry("", "", value, currentInterfaceTarget)
elif count == 5:
item.setEntryProperty(prop, value, currentRouteSubnet, currentInterfaceTarget)
filename = self.loadFile("GSAV (*.gsav)")
if filename.isEmpty():
return
file = QtCore.QFile(filename)
if not file.open(QtCore.QFile.ReadOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, self.tr("Load Error"),
self.tr("Cannot read file %1:\n%2.")
.arg(filename)
.arg(file.errorString()))
return
self.newScene()
self.filename = str(filename)
_in = QtCore.QTextStream(file)
yRouters = False
if "yRouter" in str(_in.readAll()):
yRouters = True
QtGui.QMessageBox.warning(self, self.tr("Load Warning"), self.tr("This file contains yRouters, which may not be physically available right now. Any yRouters no longer physically available will automatically be removed from the topology."))
if not self.wgini_server:
if not self.startWGINIClient():
QtGui.QMessageBox.warning(self, self.tr("Load Error"), self.tr("Cannot open file with yRouters without connecting to wireless server."))
return
if yRouters:
self.discover()
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
itemDict = {}
_in.seek(0)
line = str(_in.readLine())
lines = []
while not _in.atEnd():
item=loadIntoScene(line)
line=str(_in.readLine())
while line.find("\t") == 0:
lines.append(line)
line=str(_in.readLine())
itemDict[item] = lines
lines = []
loadProperties(itemDict)
QtGui.QApplication.restoreOverrideCursor()
self.statusBar().showMessage(self.tr("Loaded '%1'").arg(filename), 2000)
def saveFile(self, filetype):
"""
Save a file through a file dialog.
"""
olddir = os.getcwd()
os.chdir(environ["sav"])
savepath = os.getcwd()
os.chdir(olddir)
filename = QtGui.QFileDialog.getSaveFileName(self,
self.tr("Choose a file name"), savepath,
self.tr(filetype.upper() + " (*.%s)" % filetype))
if filename.isEmpty():
return filename
if not filename.toLower().endsWith("." + filetype):
filename += "." + filetype
return filename
def saveTopologyAs(self):
"""
Save a topology under a given filename.
"""
if not self.canvas.scene().items():
self.log.append("There is nothing to save!")
return False
filename = self.saveFile("gsav")
if filename.isEmpty():
return False
self.filename = str(filename)
return self.saveTopology()
def saveTopology(self):
"""
Save a topology.
"""
scene=self.canvas.scene()
if not scene.items():
self.log.append("There is nothing to save!")
return False
#for first time used
if not self.filename:
return self.saveTopologyAs()
if usedyRouters:
self.popup.setWindowTitle("Save Warning")
self.popup.setText("This topology contains yRouters, which may not be available when loading the project later.")
self.popup.show()
file = QtCore.QFile(self.filename)
if not file.open(QtCore.QFile.WriteOnly | QtCore.QFile.Text):
QtGui.QMessageBox.warning(self, self.tr("Save Error"),
self.tr("Cannot write file %1:\n%2.")
.arg(self.filename)
.arg(file.errorString()))
return False
out = QtCore.QTextStream(file)
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
outstring = ""
for item in scene.items():
if isinstance(item, Node):
outstring += item.toString()
for item in scene.items():
if isinstance(item, Edge):
outstring += item.toString()
out << outstring
QtGui.QApplication.restoreOverrideCursor()
self.statusBar().showMessage(self.tr("Saved '%1'").arg(self.filename), 2000)
return True
def copy(self):
"""
Copy selected text from the log into the paste buffer.
"""
self.log.copy()
def config(self):
"""
Open the options window.
"""
self.configWindow.show()
def startWGINIClient(self):
"""
Start wireless GINI client
"""
ok=None
if not self.server or self.server.poll() is not None:
self.log.append("You must start the main server before you can start the wireless client!")
elif not self.wgini_server or self.wgini_server.poll() is not None:
self.popup.setWindowTitle("Start server")
self.popup.setText("You must start the WGINI server first! Please start it from the system tray above canvas.")
self.popup.show()
elif self.wgini_client is not None:
self.log.append("Wireless GINI client is already running!")
else:
windowTitle = "Client data"
labelText = "Enter wireless client IP:"
text, ok = self.inputDialog.getText(self.inputDialog, windowTitle, labelText)
if ok:
if not text:
self.log.append("Nothing entered; starting wireless GINI client cancelled!")
return False
else:
ipportip=text
if not (socket.inet_aton(str(ipportip))):
self.log.append("Invalid entry, starting wireless GINI client cancelled.")
return False
self.wserverIP = get_ip_address('eth1')
self.wserverPort = '60000'
wclientIP = str(ipportip)
try:
self.wgini_client= WGINI_Client(self.wserverIP,self.wserverPort,wclientIP)
mainWidgets["wgini_client"]=self.wgini_client
self.log.append("Wireless GINI client connected at %s" %(ipportip[0]))
return True
except:
self.log.append("Starting wireless GINI client failed.")
return False
else:
return False
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def discover(self):
"""
Add yRouters within range of topology
"""
if self.wgini_client is None:
self.log.append("You must connect to the wireless server before you can discover any new devices!")
if not self.startWGINIClient():
return
if self.isRunning() and not self.recovery:
self.log.append("A topology is currently running, please stop it before discovering any new devices!")
return
if isinstance(mainWidgets["canvas"],Tutorial):
self.log.append("You cannot discover any new devices during this tutorial!")
return
if not self.client or not self.client.isConnected():
self.startClient()
self.popup.setWindowTitle("yRouter discovery")
tempList=self.wgini_client.Check()
scene=self.canvas.scene()
removed = 0
for yid, yun in usedyRouters.iteritems():
if (yun not in tempList) or (yun in tempList and ((yun['MaxWlan'] - yun['CurrWlan']) == 0)):
self.popup.setText("yRouter_%d is no longer available. It will be removed from the topology." %yid)
self.popup.show()
yRouter=scene.findItem(self.device_type + "_%d" %yid)
yRouter.delete()
del usedyRouters[yid]
removed += 1
found=0
updated=0
for yun in tempList:
openYun = yun['MaxWlan'] - yun['CurrWlan']
if ((yun['MaxWlan'] - yun['CurrWlan']) == 0):
if yun['ID'] in usedyRouters.keys():
self.popup.setText("yRouter_%d is no longer available. It will be removed from the topology." %yun['ID'])
self.popup.show()
yRouter = scene.findItem(self.device_type + "_%d" %yun['ID'])
yRouter.delete()
del usedyRouters[yun['ID']]
removed += 1
else:
continue
elif (yun['ID'] not in yRouters.keys()):
yRouters[yun['ID']] = yun
availableyRouters.append(yun)
found += 1
else:
if not yRouters[yun['ID']] == yun:
yRouters[yun['ID']] = yun
yRouter = (y for y in availableyRouters if y['ID'] == yun['ID'])
availableyRouters.remove(yRouter)
availableyRouters.append(yun)
updated +=1
availableyRouters.sort(key=lambda YunEntity: YunEntity['ID'])
if found == 0 and updated == 0 and removed == 0:
text = "No yRouters found, updated, or removed."
else:
if found == 0:
text = "No yRouters found, "
else:
text = "%d yRouters found, " %found
if updated == 0:
text += "no yRouters updated, "
else:
text += "%d yRouters updated, " %updated
if removed == 0:
text += "no yRouters removed."
else:
text += "%d yRouters removed." %removed
if mainWidgets["drop"].commonDropArea.yRouterDrop is not None:
mainWidgets["drop"].commonDropArea.yRouterDrop.update()
if mainWidgets["drop"].netDropArea.yRouterDrop is not None:
mainWidgets["drop"].netDropArea.yRouterDrop.update()
self.log.append(text)
def arrange(self):
"""
Rearrange the topology based on the distance between nodes.
"""
if self.isRunning():
self.log.append("Cannot arrange while running!")
return
if isinstance(mainWidgets["canvas"], Tutorial):
mainWidgets["log"].append("Cannot arrange during the tutorial!")
return
options["elasticMode"] = not options["elasticMode"]
def about(self):
"""
Show the about window.
"""
QtGui.QMessageBox.about(self,
self.tr("About %s %s"
% (Core.globals.PROG_NAME,
Core.globals.PROG_VERSION)),
self.tr("<b>%s %s</b><br>Written by Daniel Ng<br>under the supervision of Muthucumaru Maheswaran"
% (Core.globals.PROG_NAME,
Core.globals.PROG_VERSION)))
def createActions(self):
"""
Create the actions used in the menus and toolbars.
"""
self.newSceneAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "new.png"), self.tr("&New"), self)
self.newSceneAct.setShortcut(self.tr("Ctrl+N"))
self.newSceneAct.setStatusTip(self.tr("Create a new topology"))
self.connect(self.newSceneAct, QtCore.SIGNAL("triggered()"), self.newScene)
self.closeAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "close.png"), self.tr("&Close"), self)
self.closeAct.setShortcut(self.tr("Ctrl+W"))
self.closeAct.setStatusTip(self.tr("Close the current topology"))
self.connect(self.closeAct, QtCore.SIGNAL("triggered()"), self.closeTopology)
self.loadAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "open.png"), self.tr("&Open..."), self)
self.loadAct.setShortcut(self.tr("Ctrl+O"))
self.loadAct.setStatusTip(self.tr("Load a topology"))
self.connect(self.loadAct, QtCore.SIGNAL("triggered()"), self.loadTopology)
self.saveAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "save.png"), self.tr("&Save..."), self)
self.saveAct.setShortcut(self.tr("Ctrl+S"))
self.saveAct.setStatusTip(self.tr("Save the current topology"))
self.connect(self.saveAct, QtCore.SIGNAL("triggered()"), self.saveTopology)
self.saveAsAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "save.png"), self.tr("&Save As..."), self)
self.saveAsAct.setShortcut(self.tr("Ctrl+Shift+S"))
self.saveAsAct.setStatusTip(self.tr("Save the current topology under a given filename"))
self.connect(self.saveAsAct, QtCore.SIGNAL("triggered()"), self.saveTopologyAs)
self.sendFileAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "send.png"), self.tr("&Send File..."), self)
self.sendFileAct.setShortcut(self.tr("Ctrl+F"))
self.sendFileAct.setStatusTip(self.tr("Choose a file to send to the server"))
self.connect(self.sendFileAct, QtCore.SIGNAL("triggered()"), self.sendFile)
self.exportAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "export.png"), self.tr("&Export..."), self)
self.exportAct.setShortcut(self.tr("Ctrl+P"))
self.exportAct.setStatusTip(self.tr("Export the current topology as an image"))
self.connect(self.exportAct, QtCore.SIGNAL("triggered()"), self.export)
self.copyAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "copy.png"), self.tr("&Copy"), self)
self.copyAct.setShortcut(self.tr("Ctrl+C"))
self.copyAct.setStatusTip(self.tr("Copy the selected text"))
self.connect(self.copyAct, QtCore.SIGNAL("triggered()"), self.copy)
self.startWGINIClientAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "startClient.png"), self.tr("&Start WGINI Client"), self)
self.startWGINIClientAct.setShortcut(self.tr("Ctrl+W"))
self.startWGINIClientAct.setStatusTip(self.tr("Start wireless GINI client"))
self.connect(self.startWGINIClientAct, QtCore.SIGNAL("triggered()"), self.startWGINIClient)
self.discoverAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "discover.png"), self.tr("&Discover"), self)
self.discoverAct.setShortcut(self.tr("Ctrl+Shift+Y"))
self.discoverAct.setStatusTip(self.tr("Discover nearby yRouters"))
self.connect(self.discoverAct, QtCore.SIGNAL("triggered()"), self.discover)
self.compileAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "compile.png"), self.tr("&Compile"), self)
self.compileAct.setShortcut(self.tr("Ctrl+E"))
self.compileAct.setStatusTip(self.tr("Compile the current topology"))
self.connect(self.compileAct, QtCore.SIGNAL("triggered()"), self.compile)
self.runAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "run.png"), self.tr("&Run"), self)
self.runAct.setShortcut(self.tr("Ctrl+R"))
self.runAct.setStatusTip(self.tr("Run the current topology"))
self.connect(self.runAct, QtCore.SIGNAL("triggered()"), self.run)
self.stopAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "stop.png"), self.tr("&Stop"), self)
self.stopAct.setShortcut(self.tr("Ctrl+D"))
self.stopAct.setStatusTip(self.tr("Stop the current topology"))
self.connect(self.stopAct, QtCore.SIGNAL("triggered()"), self.stop)
self.startServerAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "startServer.png"), self.tr("&Start Server"), self)
self.startServerAct.setShortcut(self.tr("Ctrl+T"))
self.startServerAct.setStatusTip(self.tr("Start the server"))
self.connect(self.startServerAct, QtCore.SIGNAL("triggered()"), self.startBackend)
self.startwServerAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "startServer.png"), self.tr("&Start WGINI Server"), self)
self.startwServerAct.setShortcut(self.tr("Ctrl+W"))
self.startwServerAct.setStatusTip(self.tr("Start the WGINI server"))
self.connect(self.startwServerAct, QtCore.SIGNAL("triggered()"), self.startWServer)
self.optionsAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "options.png"), self.tr("&Options"), self)
self.optionsAct.setShortcut(self.tr("F2"))
self.optionsAct.setStatusTip(self.tr("Show the options window"))
self.connect(self.optionsAct, QtCore.SIGNAL("triggered()"), self.config)
self.arrangeAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "arrange.png"), self.tr("&Arrange"), self)
self.arrangeAct.setShortcut(self.tr("Ctrl+A"))
self.arrangeAct.setStatusTip(self.tr("Arranges the current topology"))
self.connect(self.arrangeAct, QtCore.SIGNAL("triggered()"), self.arrange)
self.resetLayoutAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "layout.png"), self.tr("Reset Layout"), self)
self.resetLayoutAct.setStatusTip(self.tr("Reset dock windows to the saved layout"))
self.connect(self.resetLayoutAct, QtCore.SIGNAL("triggered()"), self.resetLayout)
self.expandSceneAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "expand.png"), self.tr("Expand Scene"), self)
self.expandSceneAct.setStatusTip(self.tr("Expand the scene for more space"))
self.connect(self.expandSceneAct, QtCore.SIGNAL("triggered()"), self.expandScene)
self.quitAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "exit.png"), self.tr("&Quit"), self)
self.quitAct.setShortcut(self.tr("Ctrl+Q"))
self.quitAct.setStatusTip(self.tr("Quit the application"))
self.connect(self.quitAct, QtCore.SIGNAL("triggered()"), self.quit)
self.newProjectAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "new.png"), self.tr("&New"), self)
self.newProjectAct.setShortcut(self.tr("Ctrl+Shift+N"))
self.newProjectAct.setStatusTip(self.tr("Create a new project"))
self.connect(self.newProjectAct, QtCore.SIGNAL("triggered()"), self.newProject)
self.openProjectAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "open.png"), self.tr("&Open"), self)
self.openProjectAct.setShortcut(self.tr("Ctrl+Shift+O"))
self.openProjectAct.setStatusTip(self.tr("Open an existing project"))
self.connect(self.openProjectAct, QtCore.SIGNAL("triggered()"), self.openProject)
self.closeProjectAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "close.png"), self.tr("&Close"), self)
self.closeProjectAct.setShortcut(self.tr("Ctrl+Shift+W"))
self.closeProjectAct.setStatusTip(self.tr("Close the current project"))
self.connect(self.closeProjectAct, QtCore.SIGNAL("triggered()"), self.closeProject)
self.tutorialAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "tutorial.png"), self.tr("&Tutorial"), self)
self.connect(self.tutorialAct, QtCore.SIGNAL("triggered()"), self.startTutorial)
self.faqAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "help.png"), self.tr("&FAQ"), self)
self.connect(self.faqAct, QtCore.SIGNAL("triggered()"), self.faq)
self.aboutAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "giniLogo.png"), self.tr("&About"), self)
self.aboutAct.setStatusTip(self.tr("Show the application's About box"))
self.connect(self.aboutAct, QtCore.SIGNAL("triggered()"), self.about)
self.aboutQtAct = QtGui.QAction(QtGui.QIcon(environ["images"] + "Qt-logo.png"), self.tr("About &Qt"), self)
self.aboutQtAct.setStatusTip(self.tr("Show the Qt library's About box"))
self.connect(self.aboutQtAct, QtCore.SIGNAL("triggered()"),
QtGui.qApp, QtCore.SLOT("aboutQt()"))
def createMenus(self):
"""
Create the menus with actions.
"""
self.fileMenu = self.menuBar().addMenu(self.tr("&File"))
self.fileMenu.setPalette(defaultOptions["palette"])
self.fileMenu.addAction(self.newSceneAct)
self.fileMenu.addAction(self.loadAct)
self.fileMenu.addAction(self.saveAct)
self.fileMenu.addAction(self.saveAsAct)
self.fileMenu.addAction(self.sendFileAct)
self.fileMenu.addAction(self.exportAct)
self.fileMenu.addAction(self.closeAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.quitAct)
self.projectMenu = self.menuBar().addMenu(self.tr("&Project"))
self.projectMenu.setPalette(defaultOptions["palette"])
self.projectMenu.addAction(self.newProjectAct)
self.projectMenu.addAction(self.openProjectAct)
self.projectMenu.addAction(self.closeProjectAct)
self.editMenu = self.menuBar().addMenu(self.tr("&Edit"))
self.editMenu.setPalette(defaultOptions["palette"])
self.editMenu.addAction(self.copyAct)
self.editMenu.addAction(self.arrangeAct)
self.editMenu.addAction(self.resetLayoutAct)
self.editMenu.addAction(self.expandSceneAct)
self.runMenu = self.menuBar().addMenu(self.tr("&Run"))
self.runMenu.setPalette(defaultOptions["palette"])
self.runMenu.addAction(self.startWGINIClientAct)
self.runMenu.addAction(self.discoverAct)
self.runMenu.addAction(self.compileAct)
self.runMenu.addAction(self.runAct)
self.runMenu.addAction(self.stopAct)
self.runMenu.addAction(self.startServerAct)
self.runMenu.addAction(self.startwServerAct)
self.configMenu = self.menuBar().addMenu(self.tr("&Config"))
self.configMenu.setPalette(defaultOptions["palette"])
self.configMenu.addAction(self.optionsAct)
self.menuBar().addSeparator()
self.helpMenu = self.menuBar().addMenu(self.tr("&Help"))
self.helpMenu.setPalette(defaultOptions["palette"])
self.helpMenu.addAction(self.tutorialAct)
self.helpMenu.addAction(self.faqAct)
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
def createPopupMenu(self):
"""
Customize the popup menu so that it is visible.
"""
popupMenu = QtGui.QMainWindow.createPopupMenu(self)
popupMenu.setPalette(defaultOptions["palette"])
return popupMenu
def createToolBars(self):
"""
Create the toolbars with actions.
"""
self.fileToolBar = self.addToolBar(self.tr("File"))
self.fileToolBar.addAction(self.newSceneAct)
self.fileToolBar.addAction(self.loadAct)
self.fileToolBar.addAction(self.saveAct)
self.fileToolBar.addAction(self.sendFileAct)
self.fileToolBar.addAction(self.exportAct)
self.fileToolBar.addAction(self.closeAct)
self.editToolBar = self.addToolBar(self.tr("Edit"))
self.editToolBar.addAction(self.copyAct)
self.editToolBar.addAction(self.resetLayoutAct)
self.editToolBar.addAction(self.expandSceneAct)
self.runToolBar = self.addToolBar(self.tr("Run"))
self.runToolBar.addAction(self.startServerAct)
self.runToolBar.addAction(self.discoverAct)
self.runToolBar.addAction(self.compileAct)
self.runToolBar.addAction(self.runAct)
self.runToolBar.addAction(self.stopAct)
self.runToolBar.addAction(self.startWGINIClientAct)
self.runToolBar.addAction(self.startwServerAct)
def createStatusBar(self):
"""
Create the status bar.
"""
self.statusBar().showMessage(self.tr("Ready"))
def createProgressBar(self):
"""
Create the progress bar.
"""
self.progressBar = QtGui.QProgressBar()
self.progressBar.setRange(0, 10000)
self.progressBar.setValue(0)
self.statusBar().addPermanentWidget(self.progressBar)
self.progressBar.show()
def getDeviceCount(self, alive=False):
"""
Return the interfaceable device count, or the alive ones if alive=True.
"""
from Core.Interfaceable import Interfaceable
count = 0.0
for item in self.canvas.scene().items():
if isinstance(item, Interfaceable):
if item.device_type != "REALM":
if alive and item.status in ("", "dead"):
continue
count += 1.0
return count
def updateProgressBar(self):
"""
Update the progress bar.
"""
maxVal = self.progressBar.maximum()
finalVal = (self.getDeviceCount(True) / self.getDeviceCount()) * maxVal
if finalVal < 0:
finalVal = 0
self.progressBar.setValue(finalVal)
if finalVal == 0:
return True
return False
def createConfigWindows(self):
"""
Create the options window.
"""
self.configWindow = ConfigDialog(self)
def createDockWindows(self):
"""
Create the dock windows: dropbar, log, properties, interfaces, routes.
"""
self.log = LogWindow(self.tr("Log"), self)
self.log.append("Welcome to %s %s!\n"
% (Core.globals.PROG_NAME, Core.globals.PROG_VERSION))
self.log.append("To open an existing topology, please click the 'Open' icon from the tray above canvas!")
self.log.setGeometry(QtCore.QRect(0, 0, 800, 114))
mainWidgets["log"] = self.log
self.dropbar = DropBar(self.tr("Components"), self)
self.dropbar.setGeometry(QtCore.QRect(0, 0, 129, 390))
mainWidgets["drop"] = self.dropbar
self.properties = PropertiesWindow(self)
self.properties.setWindowTitle("Properties")
mainWidgets["properties"] = self.properties
self.interfaces = InterfacesWindow(self)
self.interfaces.setWindowTitle("Interfaces")
mainWidgets["interfaces"] = self.interfaces
self.routes = RoutesWindow(self.interfaces, self)
self.routes.setWindowTitle("Routes")
mainWidgets["routes"] = self.routes
self.tm = TaskManagerWindow(self)
self.tm.setWindowTitle("Task Manager")
mainWidgets["tm"] = self.tm
self.debugWindow = QtGui.QDockWidget(self.tr("Debug Window"))
self.debugWindow.setWidget(DebugWindow(self))
self.docks = {"Components":self.dropbar, "Log":self.log, "Properties":self.properties, "Interfaces":self.interfaces, "Routes":self.routes, "Task Manager":self.tm}
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.dropbar)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, self.log)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.properties)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.interfaces)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.routes)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.tm)
self.tm.setFloating(True)
self.routes.setFloating(True)
self.debugWindow.setFloating(True)
def createPopupWindows(self):
"""
Create the different popup windows.
"""
self.exportWindow = ExportWindow(self)
self.sendWindow = SendDirectoryWindow(self)
self.popup = QtGui.QMessageBox(self)
self.popup.setIcon(QtGui.QMessageBox.Warning)
self.popup.setWindowIcon(QtGui.QIcon(environ["images"]+"giniLogo.png"))
mainWidgets["popup"] = self.popup
# specific dialog for client IP and port input
self.inputDialog = QtGui.QInputDialog(self)
self.inputDialog.setWindowIcon(QtGui.QIcon(environ["images"]+"giniLogo.png"))
mainWidgets["dialog"] = self.inputDialog
def keyPressEvent(self, event):
"""
Handle specific shortcut keys.
"""
key = event.key()
scene = self.canvas.scene()
if key == QtCore.Qt.Key_Escape:
scene.clearSelection()
elif key == QtCore.Qt.Key_Delete:
for item in scene.selectedItems():
item.delete()
elif key == QtCore.Qt.Key_C:
items = scene.items()
if not items:
return
selected = scene.selectedItems()
scene.clearSelection()
if selected:
index = items.index(selected[0])
items[index - 1].setSelected(True)
else:
items[0].setSelected(True)
elif key == QtCore.Qt.Key_H:
for dock in self.docks.values():
dock.setFloating(not dock.isFloating())
elif key == QtCore.Qt.Key_F10:
self.debugWindow.show()
def cleanup(self):
if self.server != None:
self.server.kill()
class DebugWindow(QtGui.QWidget):
def __init__(self, parent):
QtGui.QWidget.__init__(self)
self.parent = parent
self.layout = QtGui.QVBoxLayout()
#self.list = QtGui.QListWidget()
self.button = QtGui.QPushButton("Execute")
self.lineedit = QtGui.QLineEdit()
#self.layout.addWidget(self.list)
self.layout.addWidget(self.lineedit)
self.layout.addWidget(self.button)
self.setLayout(self.layout)
self.windows = {}
for key, val in mainWidgets.iteritems():
if key != "app" and key != "client" and val != None:
self.windows[key] = val
self.connect(self.button, QtCore.SIGNAL("clicked()"), self.execute)
def fill(self):
scene = mainWidgets["canvas"].scene()
for i in range(125):
scene.addItem(UML())
def execute(self):
canvas = mainWidgets["canvas"]
scene = canvas.scene()
#self.list.clear()
#for item in scene.items():
# try:
# self.list.addItem(item.getName() + "(%d,%d)" % (item.pos().x(), item.pos().y()))
# except:
# pass
#for name, window in self.windows.iteritems():
# self.list.addItem(name + ":" + str(window.geometry()))
text = str(self.lineedit.text())
if text:
lines = text.split(";")
for line in lines:
print eval(line)
if isinstance(canvas, Tutorial):
canvas.next()
| mit | 2,184,146,233,005,499,600 | 36.032797 | 248 | 0.561083 | false |
daonb/django-oklinks | oklinks/models.py | 1 | 1797 | import os
from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.core.files.storage import FileSystemStorage
from django.conf import settings
from managers import LinksManager, LinkTypeManager
class LinkType(models.Model):
title = models.CharField(max_length=200, verbose_name=_('title'))
image = models.ImageField(upload_to='icons')
objects = LinkTypeManager()
class Meta:
verbose_name = _('link type')
verbose_name_plural = _('link types')
def __unicode__(self):
return self.title
class Link(models.Model):
url = models.URLField(verbose_name='URL', max_length=1000,
verify_exists=False)
title = models.CharField(max_length=200, verbose_name=_('title'))
content_type = models.ForeignKey(ContentType,
verbose_name=_('content type'),
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
link_type = models.ForeignKey(LinkType, default=LinkTypeManager.default)
active = models.BooleanField(default=True)
objects = LinksManager()
class Meta:
verbose_name = _('link')
verbose_name_plural = _('links')
def __unicode__(self):
return "%s: %s" % (self.title, self.url)
class LinkedFile(models.Model):
link = models.ForeignKey(Link, null=True, blank=True, default=None)
sha1 = models.CharField(max_length=1000, null=True)
last_updated = models.DateTimeField(auto_now=True, null=True)
link_file = models.FileField(upload_to='link_files')
| bsd-3-clause | 1,131,894,914,825,749,500 | 35.673469 | 93 | 0.687257 | false |
aurelo/lphw | source/ex48/tests/ex48_tests.py | 1 | 1933 | from nose.tools import *
from source.ex48 import lexicon
def setup():
print "SETUP!"
def teardown():
print "TEAR DOWN!"
def test_basic():
print "I RAN!"
def test_directions():
assert_equal(lexicon.scan("north"), [("direction", "north")])
result = lexicon.scan("north south east")
assert_equal(result, [('direction', 'north'),
('direction', 'south'),
('direction', 'east')])
def test_verbs():
assert_equal(lexicon.scan('go'), [('verb', 'go')])
result = lexicon.scan("go kill EAT")
assert_equals(result, [('verb', 'go'),
('verb', "kill"),
('verb', "EAT")])
def test_stops():
assert_equals(lexicon.scan('the'), [('stop', 'the')])
result = lexicon.scan("the in of")
assert_equals(result, [('stop', 'the'),
('stop', 'in'),
('stop', 'of')])
def test_nouns():
assert_equals(lexicon.scan("bear"), [('noun', 'bear')])
result = lexicon.scan("bear princess")
assert_equals(result, [('noun', 'bear'),
('noun', 'princess')])
def test_number():
assert_equals(lexicon.scan('1234'), [('number', '1234')])
result = lexicon.scan('3 91234')
assert_equals(result, [('number', '3'),
('number', '91234')])
def test_errors():
assert_equals(lexicon.scan('ASDFADFASDF'), [('error', 'ASDFADFASDF')])
def test_sentence():
result = lexicon.scan('go north and kill the bear 9 times')
assert_equals(result, [('verb', 'go'),
('direction', 'north'),
('stop', 'and'),
('verb', 'kill'),
('stop', 'the'),
('noun', 'bear'),
('number', '9'),
('error', 'times')])
| mit | -657,871,801,129,793,000 | 27.426471 | 74 | 0.46508 | false |
mavroskardia/ilovemymudder | mudder/src/common/utils.py | 1 | 2484 | import sys
import os
import shutil
import io
import datetime
import time
import importlib
import threading
class HijackedStdOut(io.TextIOWrapper):
def write(self, s):
if s == '\n':
super().write(s)
return
s = '{:%Y.%m.%d %H:%M:%S} => {}'.format(datetime.datetime.now(), s)
super().write(s)
self.flush()
class HijackedStdIn(io.TextIOWrapper):
pass
class HijackedStdInBuffer(io.BufferedRandom):
pass
def hijack_stdout():
sys.stdout = HijackedStdOut(buffer=sys.stdout.buffer)
def hijack_stdin():
sys.stdin = HijackedStdIn(buffer=HijackedStdInBuffer())
return sys.stdin
def clean():
dirs_to_remove = []
for path, dirs, files in os.walk(os.curdir):
if path.endswith('__pycache__'):
dirs_to_remove.append(path)
for d in dirs_to_remove:
print('cleaning', d)
shutil.rmtree(d)
class ModuleWatcher(object):
watched_files = []
def __init__(self, module, interval=2):
self.module = module
self.interval = interval
self.thread = threading.Thread(target=self.loop)
self.done = False
if module.__file__ in ModuleWatcher.watched_files:
raise Exception('This file is already being watched')
ModuleWatcher.watched_files.append(module.__file__)
def watch(self, action=None):
self.action = action
self.filename = self.module.__file__
self.t0 = os.stat(self.filename).st_mtime
self.thread.start()
def loop(self):
while not self.done:
dt = os.stat(self.filename).st_mtime
if dt != self.t0:
print('{} was modified, reloading...'.format(self.module))
importlib.reload(self.module)
self.t0 = dt
if self.action: self.action()
time.sleep(self.interval)
def stop(self):
self.done = True
self.thread.join()
def watch_and_reload(module):
print('watching module {} for changes'.format(module))
mw = ModuleWatcher(module)
mw.watch()
return mw
if __name__ == '__main__':
if sys.argv[1] == 'clean':
clean()
elif sys.argv[1] == 'watch':
mod = importlib.import_module('.test', package='src.common')
watch = watch_and_reload(mod)
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
watch.stop()
break
| mit | 892,111,590,700,638,300 | 23.84 | 75 | 0.579308 | false |
lefakkomies/pynomo | examples/ex_type1_nomo_1.py | 1 | 1854 | """
ex_type1_nomo_1.py
Simple nomogram of type 1: F1 + F2 + F3 = 0
Copyright (C) 2007-2009 Leif Roschier
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
sys.path.insert(0, "..")
# sys.path[:0] = [".."]
from pynomo.nomographer import Nomographer
N_params_1 = {
'u_min': 0.0,
'u_max': 10.0,
'function': lambda u: u,
'title': r'$u_1$',
'tick_levels': 2,
'tick_text_levels': 1,
}
N_params_2 = {
'u_min': 0.0,
'u_max': 10.0,
'function': lambda u: u,
'title': r'$u_2$',
'tick_levels': 2,
'tick_text_levels': 1,
}
N_params_3 = {
'u_min': 0.0,
'u_max': -10.0,
'function': lambda u: u,
'title': r'$u_3$',
'tick_levels': 2,
'tick_text_levels': 1,
}
block_1_params = {
'block_type': 'type_1',
'width': 10.0,
'height': 10.0,
'f1_params': N_params_1,
'f2_params': N_params_2,
'f3_params': N_params_3,
'isopleth_values': [[6, 2, 'x']],
}
main_params = {
'filename': 'ex_type1_nomo_1.pdf',
'paper_height': 10.0,
'paper_width': 10.0,
'block_params': [block_1_params],
'transformations': [('rotate', 0.01), ('scale paper',)],
'title_str': r'$u_1+u_2+u_3=0$',
'debug': False,
}
Nomographer(main_params)
| gpl-3.0 | 1,613,223,115,143,167,700 | 24.39726 | 73 | 0.599784 | false |
wdmchaft/taskcoach | taskcoachlib/gui/viewer/category.py | 1 | 10590 | # -*- coding: utf-8 -*-
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2010 Task Coach developers <[email protected]>
Copyright (C) 2008 Rob McMullen <[email protected]>
Copyright (C) 2008 Thomas Sonne Olesen <[email protected]>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import wx
from taskcoachlib import patterns, command, widgets
from taskcoachlib.domain import category
from taskcoachlib.i18n import _
from taskcoachlib.gui import uicommand, menu, dialog, render
import base, mixin
class BaseCategoryViewer(mixin.AttachmentDropTargetMixin,
mixin.SortableViewerForCategoriesMixin,
mixin.SearchableViewerMixin,
mixin.NoteColumnMixin, mixin.AttachmentColumnMixin,
base.SortableViewerWithColumns, base.TreeViewer):
SorterClass = category.CategorySorter
defaultTitle = _('Categories')
defaultBitmap = 'folder_blue_arrow_icon'
def __init__(self, *args, **kwargs):
kwargs.setdefault('settingsSection', 'categoryviewer')
super(BaseCategoryViewer, self).__init__(*args, **kwargs)
for eventType in (category.Category.subjectChangedEventType(),
category.Category.filterChangedEventType(),
category.Category.foregroundColorChangedEventType(),
category.Category.backgroundColorChangedEventType(),
category.Category.fontChangedEventType(),
category.Category.iconChangedEventType(),
category.Category.selectedIconChangedEventType(),
category.Category.exclusiveSubcategoriesChangedEventType()):
patterns.Publisher().registerObserver(self.onAttributeChanged,
eventType)
def onEveryMinute(self, event):
pass
def domainObjectsToView(self):
return self.taskFile.categories()
def curselectionIsInstanceOf(self, class_):
return class_ == category.Category
def createWidget(self):
imageList = self.createImageList() # Has side-effects
self._columns = self._createColumns()
itemPopupMenu = self.createCategoryPopupMenu()
columnPopupMenu = menu.ColumnPopupMenu(self)
self._popupMenus.extend([itemPopupMenu, columnPopupMenu])
widget = widgets.CheckTreeCtrl(self, self._columns,
self.onSelect, self.onCheck,
uicommand.CategoryEdit(viewer=self, categories=self.presentation()),
uicommand.CategoryDragAndDrop(viewer=self, categories=self.presentation()),
uicommand.EditSubject(viewer=self),
itemPopupMenu, columnPopupMenu,
**self.widgetCreationKeywordArguments())
widget.AssignImageList(imageList) # pylint: disable-msg=E1101
return widget
def createCategoryPopupMenu(self, localOnly=False):
return menu.CategoryPopupMenu(self.parent, self.settings, self.taskFile,
self, localOnly)
def _createColumns(self):
# pylint: disable-msg=W0142
kwargs = dict(renderDescriptionCallback=lambda category: category.description(),
resizeCallback=self.onResizeColumn)
columns = [widgets.Column('subject', _('Subject'),
category.Category.subjectChangedEventType(),
sortCallback=uicommand.ViewerSortByCommand(viewer=self,
value='subject'),
imageIndexCallback=self.subjectImageIndex,
width=self.getColumnWidth('subject'),
**kwargs),
widgets.Column('description', _('Description'),
category.Category.descriptionChangedEventType(),
sortCallback=uicommand.ViewerSortByCommand(viewer=self,
value='description'),
renderCallback=lambda category: category.description(),
width=self.getColumnWidth('description'),
**kwargs),
widgets.Column('attachments', '',
category.Category.attachmentsChangedEventType(), # pylint: disable-msg=E1101
width=self.getColumnWidth('attachments'),
alignment=wx.LIST_FORMAT_LEFT,
imageIndexCallback=self.attachmentImageIndex,
headerImageIndex=self.imageIndex['paperclip_icon'],
renderCallback=lambda category: '', **kwargs)]
if self.settings.getboolean('feature', 'notes'):
columns.append(widgets.Column('notes', '',
category.Category.notesChangedEventType(), # pylint: disable-msg=E1101
width=self.getColumnWidth('notes'),
alignment=wx.LIST_FORMAT_LEFT,
imageIndexCallback=self.noteImageIndex,
headerImageIndex=self.imageIndex['note_icon'],
renderCallback=lambda category: '', **kwargs))
return columns
def getImageIndices(self, category):
bitmap = category.icon(recursive=True)
bitmap_selected = category.selectedIcon(recursive=True) or bitmap
return self.imageIndex[bitmap] if bitmap else -1, self.imageIndex[bitmap_selected] if bitmap_selected else -1
def subjectImageIndex(self, category, which):
normalImageIndex, expandedImageIndex = self.getImageIndices(category)
expanded = which in [wx.TreeItemIcon_Expanded,
wx.TreeItemIcon_SelectedExpanded]
return expandedImageIndex if expanded else normalImageIndex
def createToolBarUICommands(self):
commands = super(BaseCategoryViewer, self).createToolBarUICommands()
commands[-2:-2] = [None,
uicommand.CategoryNew(categories=self.presentation(),
settings=self.settings),
uicommand.CategoryNewSubCategory(categories=self.presentation(),
viewer=self),
uicommand.CategoryEdit(categories=self.presentation(),
viewer=self),
uicommand.CategoryDelete(categories=self.presentation(),
viewer=self)]
return commands
def createColumnUICommands(self):
commands = [\
uicommand.ToggleAutoColumnResizing(viewer=self,
settings=self.settings),
None,
uicommand.ViewColumn(menuText=_('&Description'),
helpText=_('Show/hide description column'),
setting='description', viewer=self),
uicommand.ViewColumn(menuText=_('&Attachments'),
helpText=_('Show/hide attachments column'),
setting='attachments', viewer=self)]
if self.settings.getboolean('feature', 'notes'):
commands.append(uicommand.ViewColumn(menuText=_('&Notes'),
helpText=_('Show/hide notes column'),
setting='notes', viewer=self))
return commands
def onAttributeChanged(self, event):
if category.Category.exclusiveSubcategoriesChangedEventType() in event.types():
# We need to refresh the children of the changed item as well
# because they have to use radio buttons instead of checkboxes, or
# vice versa:
items = event.sources()
for item in items.copy():
items |= set(item.children())
self.widget.RefreshItems(*items)
else:
super(BaseCategoryViewer, self).onAttributeChanged(event)
def onCheck(self, event):
categoryToFilter = self.widget.GetItemPyData(event.GetItem())
categoryToFilter.setFiltered(event.GetItem().IsChecked())
self.onSelect(event) # Notify status bar
def getIsItemChecked(self, item):
if isinstance(item, category.Category):
return item.isFiltered()
return False
def getItemParentHasExclusiveChildren(self, item):
parent = item.parent()
return parent and parent.hasExclusiveSubcategories()
def isShowingCategories(self):
return True
def statusMessages(self):
status1 = _('Categories: %d selected, %d total')%\
(len(self.curselection()), len(self.presentation()))
filteredCategories = self.presentation().filteredCategories()
status2 = _('Status: %d filtered')%len(filteredCategories)
return status1, status2
def itemEditorClass(self):
return dialog.editor.CategoryEditor
def newItemCommandClass(self):
return command.NewCategoryCommand
def editItemCommandClass(self):
return command.EditCategoryCommand
def newSubItemCommandClass(self):
return command.NewSubCategoryCommand
def deleteItemCommandClass(self):
return command.DeleteCategoryCommand
class CategoryViewer(BaseCategoryViewer):
def __init__(self, *args, **kwargs):
super(CategoryViewer, self).__init__(*args, **kwargs)
self.filterUICommand.setChoice(self.settings.getboolean('view',
'categoryfiltermatchall'))
def getToolBarUICommands(self):
''' UI commands to put on the toolbar of this viewer. '''
toolBarUICommands = super(CategoryViewer, self).getToolBarUICommands()
toolBarUICommands.insert(-2, None) # Separator
# pylint: disable-msg=W0201
self.filterUICommand = \
uicommand.CategoryViewerFilterChoice(settings=self.settings)
toolBarUICommands.insert(-2, self.filterUICommand)
return toolBarUICommands
| gpl-3.0 | 5,805,815,929,631,619,000 | 46.066667 | 117 | 0.622285 | false |
singingwolfboy/flask-dance | flask_dance/consumer/storage/__init__.py | 1 | 1267 | from abc import ABCMeta, abstractmethod
class BaseStorage(metaclass=ABCMeta):
@abstractmethod
def get(self, blueprint):
return None
@abstractmethod
def set(self, blueprint, token):
return None
@abstractmethod
def delete(self, blueprint):
return None
class NullStorage(BaseStorage):
"""
This mock storage will never store OAuth tokens.
If you try to retrieve a token from this storage, you will always
get ``None``.
"""
def get(self, blueprint):
return None
def set(self, blueprint, token):
return None
def delete(self, blueprint):
return None
class MemoryStorage(BaseStorage):
"""
This mock storage stores an OAuth token in memory and so that it can
be retrieved later. Since the token is not persisted in any way,
this is mostly useful for writing automated tests.
The initializer accepts a ``token`` argument, for setting the
initial value of the token.
"""
def __init__(self, token=None, *args, **kwargs):
self.token = token
def get(self, blueprint):
return self.token
def set(self, blueprint, token):
self.token = token
def delete(self, blueprint):
self.token = None
| mit | 1,565,694,463,115,637,000 | 22.036364 | 72 | 0.648777 | false |
schenc3/InteractiveROSETTA | InteractiveROSETTA/scripts/io_tools/process_pdb.py | 1 | 117335 | #!/usr/bin/env python
# :noTabs=true:
"""
create a directory of the contents in a PDB
splits into chains, grouped chains (pairings parsed from the header),
individual HETATM PDB lines, sequence files (FASTA), etc.
verbosely:
This method behaves slightly differently for PDB files with multiple models,
nucleic acids, duplicate complexes, etc.
so if you are interested in the specifics, please read the source code
In short, it tries to write:
header.txt a text file of the header lines
numbering_map.txt a text file showing 1-indexed PDB numbering
clean.pdb only ATOM lines
hetatm.pdb only HETATM lines, may be split by resName
.fa sequences of all peptides and nucleic acids
subdirectories for each protein model/subunit (similar info)
does not write a text file for the "trailer" (lines after the coordinates)
converts lines (ATOM or HETATM) that can be converted based on <conversion>
(generally) and <na_conversion> (specific for nucleic acids, relevant
because RNA and DNA may require different treatment...)
!!!WARNING!!! defaults:
CSE CYS converts SelenoCysteinE to Cysteine
HYP PRO converts HYdroxylProline to Proline
CYD CYS does NOT convert "CYsteine Disulfides to Cysteine"
HIP HIS converts "HIP" to Histidine (~double protonation)
HID HIS converts "HID" to Histidine (~single delta N proton)
HIE HIS converts "HIE" to Histidine (~single epsilon N proton)
todo:
ensure hetatm conversions step, illegal atoms!!!!
alternate conformations (mostly supported now)
convert DNA to Rosetta DNA
convert ligands to params
convert water to TP3 (or TP5)
Methods for cleaning and parsing PDB files
Most importantly, the process_pdb method does a lot to clean PDB files
from RCSB
Requires:
Biopython
Author: Evan H. Baugh
"""
################################################################################
# IMPORT
# common modules
import optparse # for commandline
import os
import shutil
# bigger modules
from Bio.Alphabet import IUPAC
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.PDB import PDBIO
from Bio.PDB import PDBParser
from Bio.PDB import PPBuilder # no longer used, much faster way to do this
#from Bio.PDB import Select # no longer used...kinda hard to use
from Bio.PDB.Structure import Structure
from Bio.PDB.Model import Model
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
# custom modules
#from helper import get_root_filename , create_directory , copy_file
#from settings import SEQFORMAT , SEQFORMAT_EXTENSION_MAP , NUCLEIC_SEQUENCE_LETTERS_MAP , NA_CODES , three2one , WATER_CONVERSION , one2three , three2three , NA_CONVERSIONS_ROSETTA
#from biopython_settings import DNAAlphabet , ProteinAlphabet
#from seq_basics import write_sequence , get_sequence
################################################################################
# SETTINGS
# unholy settings...too many...
SEQFORMAT = 'fasta'
SEQFORMAT_EXTENSION_MAP = {
'fasta' : 'fa' ,
'genbank' : 'gb' ,
'clustal' : 'aln' ,
'stockholm' : 'ann'
}
# mapping for sequence file extensions
# update when you start using something new
SEQFORMAT_MAP = {
'fa' : 'fasta' ,
'fas' : 'fasta' ,
'fasta' : 'fasta' ,
'gbk' : 'genbank' ,
'gb' : 'genbank' ,
'aln' : 'clustal' ,
'ann' : 'stockholm' , # Pfam uses these
'pir' : 'pir' , # used by Modeller...
'sp' : 'swiss' # uniprot/swissprot
}
# Biopython Alphabets
DNAAlphabet = IUPAC.unambiguous_dna # requires Biopython
ProteinAlphabet = IUPAC.protein # requires Biopython
# simple amino acid mapping
one2three = {
'A':'ALA',
'C':'CYS',
'D':'ASP',
'E':'GLU',
'F':'PHE',
'G':'GLY',
'H':'HIS',
'I':'ILE',
'K':'LYS',
'L':'LEU',
'M':'MET',
'N':'ASN',
'P':'PRO',
'Q':'GLN',
'R':'ARG',
'S':'SER',
'T':'THR',
'V':'VAL',
'W':'TRP',
'Y':'TYR',
}
# the revers of above...maybe more?
three2one = {
'ALA':'A',
'CYS':'C',
'ASP':'D',
'GLU':'E',
'PHE':'F',
'GLY':'G',
'HIS':'H',
'ILE':'I',
'LYS':'K',
'LEU':'L',
'MET':'M',
'ASN':'N',
'PRO':'P',
'GLN':'Q',
'ARG':'R',
'SER':'S',
'THR':'T',
'VAL':'V',
'TRP':'W',
'TYR':'Y',
# pseudo-standard 3 letter codes for the standard aa
'CYD' : 'C' ,
'CYZ' : 'C' ,
'HID' : 'H' ,
'HIE' : 'H' ,
'HIP' : 'H' ,
# just to be sure...
'ala':'A',
'cys':'C',
'asp':'D',
'glu':'E',
'phe':'F',
'gly':'G',
'his':'H',
'ile':'I',
'lys':'K',
'leu':'L',
'met':'M',
'asn':'N',
'pro':'P',
'gln':'Q',
'arg':'R',
'ser':'S',
'thr':'T',
'val':'V',
'trp':'W',
'tyr':'Y',
'Ala':'A',
'Cys':'C',
'Asp':'D',
'Glu':'E',
'Phe':'F',
'Gly':'G',
'His':'H',
'Ile':'I',
'Lys':'K',
'Leu':'L',
'Met':'M',
'Asn':'N',
'Pro':'P',
'Gln':'Q',
'Arg':'R',
'Ser':'S',
'Thr':'T',
'Val':'V',
'Trp':'W',
'Tyr':'Y',
}
###################
# HETATM CONVERSION
# unsure about these...may include ATOM or HETATM lines...
#from http://astral.stanford.edu/scopseq-1.55/release-notes-1.55.txt
three2three = {
'AIB' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'ALA' : 'ALA' , # ALA
'ALM' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'AYA' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'BNN' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'CHG' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'CSD' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'DAL' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'DHA' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'DNP' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'FLA' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'HAC' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'PRR' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'MAA' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'TIH' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'TPQ' : 'ALA' , # HETEROATOM THAT MAY BE TREATED AS ALA
'0CS':'ALA', ## 0CS ALA 3-[(S)-HYDROPEROXYSULFINYL]-L-ALANINE
'2BU':'ALA', ## 2BU ADE
'2OP':'ALA', ## 2OP (2S 2-HYDROXYPROPANAL
'4F3':'ALA', ## 4F3 ALA CYCLIZED
'AA4':'ALA', ## AA4 ALA 2-AMINO-5-HYDROXYPENTANOIC ACID
'ABA':'ALA', ## ABA ALA ALPHA-AMINOBUTYRIC ACID
'AHO':'ALA', ## AHO ALA N-ACETYL-N-HYDROXY-L-ORNITHINE
'AHP':'ALA', ## AHP ALA 2-AMINO-HEPTANOIC ACID
'AIB':'ALA', ## AIB ALA ALPHA-AMINOISOBUTYRIC ACID
'ALA':'ALA', ## ALA ALA
'ALC':'ALA', ## ALC ALA 2-AMINO-3-CYCLOHEXYL-PROPIONIC ACID
'ALM':'ALA', ## ALM ALA 1-METHYL-ALANINAL
'ALN':'ALA', ## ALN ALA NAPHTHALEN-2-YL-3-ALANINE
'ALS':'ALA', ## ALS ALA 2-AMINO-3-OXO-4-SULFO-BUTYRIC ACID
'ALT':'ALA', ## ALT ALA THIOALANINE
'AP7':'ALA', ## AP7 ADE
'APH':'ALA', ## APH ALA P-AMIDINOPHENYL-3-ALANINE
'AYA':'ALA', ## AYA ALA N-ACETYLALANINE
'AYG':'ALA', ## AYG ALA
'B2A':'ALA', ## B2A ALA ALANINE BORONIC ACID
'B3A':'ALA', ## B3A ALA (3S)-3-AMINOBUTANOIC ACID
'BAL':'ALA', ## BAL ALA BETA-ALANINE
'BNN':'ALA', ## BNN ALA ACETYL-P-AMIDINOPHENYLALANINE
'C12':'ALA', ## C12 ALA
'C99':'ALA', ## C99 ALA
'CAB':'ALA', ## CAB ALA 4-CARBOXY-4-AMINOBUTANAL
'CH6':'ALA', ## CH6 ALA
'CH7':'ALA', ## CH7 ALA
'CLB':'ALA', ## CLB ALA
'CLD':'ALA', ## CLD ALA
'CLV':'ALA', ## CLV ALA
'CQR':'ALA', ## CQR ALA
'CR2':'ALA', ## CR2 ALA POST-TRANSLATIONAL MODIFICATION
'CR5':'ALA', ## CR5 ALA
'CR7':'ALA', ## CR7 ALA
'CR8':'ALA', ## CR8 ALA
'CRK':'ALA', ## CRK ALA
'CRW':'ALA', ## CRW ALA
'CRX':'ALA', ## CRX ALA
'CSI':'ALA', ## CSI ALA
'CSY':'ALA', ## CSY ALA MODIFIED TYROSINE COMPLEX
'CWR':'ALA', ## CWR ALA
'DAB':'ALA', ## DAB ALA 2,4-DIAMINOBUTYRIC ACID
'DAL':'ALA', ## DAL ALA D-ALANINE
'DAM':'ALA', ## DAM ALA N-METHYL-ALPHA-BETA-DEHYDROALANINE
'DBU':'ALA', ## DBU ALA (2E)-2-AMINOBUT-2-ENOIC ACID
'DBZ':'ALA', ## DBZ ALA 3-(BENZOYLAMINO)-L-ALANINE
'DHA':'ALA', ## DHA ALA 2-AMINO-ACRYLIC ACID
'DPP':'ALA', ## DPP ALA DIAMMINOPROPANOIC ACID
'FGL':'ALA', ## FGL ALA 2-AMINOPROPANEDIOIC ACID
'DYG':'ALA', ## DYG ALA
'GMU':'ALA', ## GMU 5MU
'HHK':'ALA', ## HHK ALA (2S)-2,8-DIAMINOOCTANOIC ACID
'HMF':'ALA', ## HMF ALA 2-AMINO-4-PHENYL-BUTYRIC ACID
'IAM':'ALA', ## IAM ALA 4-[(ISOPROPYLAMINO)METHYL]PHENYLALANINE
'IGL':'ALA', ## IGL ALA ALPHA-AMINO-2-INDANACETIC ACID
'KYN':'ALA', ## KYN ALA KYNURENINE
'LAL':'ALA', ## LAL ALA N,N-DIMETHYL-L-ALANINE
'MAA':'ALA', ## MAA ALA N-METHYLALANINE
'MDO':'ALA', ## MDO ALA
'MFC':'ALA', ## MFC ALA CYCLIZED
'NAL':'ALA', ## NAL ALA BETA-(2-NAPHTHYL)-ALANINE
'NAM':'ALA', ## NAM ALA NAM NAPTHYLAMINOALANINE
'NCB':'ALA', ## NCB ALA CHEMICAL MODIFICATION
'NRQ':'ALA', ## NRQ ALA
'NYC':'ALA', ## NYC ALA
'ORN':'ALA', ## ORN ALA ORNITHINE
'PIA':'ALA', ## PIA ALA FUSION OF ALA 65, TYR 66, GLY 67
'PRR':'ALA', ## PRR ALA 3-(METHYL-PYRIDINIUM)ALANINE
'PYA':'ALA', ## PYA ALA 3-(1,10-PHENANTHROL-2-YL)-L-ALANINE
'PYC':'ALA', ## PYC ALA PYRROLE-2-CARBOXYLATE
'PYT':'ALA', ## PYT ALA MODIFIED ALANINE
'RC7':'ALA', ## RC7 ALA
'SEC':'ALA', ## SEC ALA 2-AMINO-3-SELENINO-PROPIONIC ACID
'SIC':'ALA', ## SIC ALA
'SUI':'ALA', ## SUI ALA
'TIH':'ALA', ## TIH ALA BETA(2-THIENYL)ALANINE
'TPQ':'ALA', ## TPQ ALA 2,4,5-TRIHYDROXYPHENYLALANINE
'UMA':'ALA', ## UMA ALA
'X9Q':'ALA', ## X9Q ALA
'XXY':'ALA', ## XXY ALA
'XYG':'ALA', ## XYG ALA
# 'ASX' : 'B' , # why is this here!?
'BCS' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'BUC' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'C5C' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'C6C' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CCS' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CEA' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CME' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CSO' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CSP' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CSS' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CSX' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CSW' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CY1' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CY3' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CYG' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CYM' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'CYS' : 'CYS' , # CYS
'CYQ' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'DCY' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'EFC' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'OCS' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'PEC' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'PR3' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'SCH' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'SCS' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'SCY' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'SHC' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'SMC' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'SOC' : 'CYS' , # HETEROATOM THAT MAY BE TREATED AS CYS
'5CS':'CYS', ## 5CS CYS
'AGT':'CYS', ## AGT CYS AGMATINE-CYSTEINE ADDUCT
'BBC':'CYS', ## BBC CYS
'BCS':'CYS', ## BCS CYS BENZYLCYSTEINE
'BCX':'CYS', ## BCX CYS BETA-3-CYSTEINE
'BPE':'CYS', ## BPE CYS
'BUC':'CYS', ## BUC CYS S,S-BUTYLTHIOCYSTEINE
'C3Y':'CYS', ## C3Y CYS MODIFIED CYSTEINE
'C5C':'CYS', ## C5C CYS S-CYCLOPENTYL THIOCYSTEINE
'C6C':'CYS', ## C6C CYS S-CYCLOHEXYL THIOCYSTEINE
'CAF':'CYS', ## CAF CYS S-DIMETHYLARSINOYL-CYSTEINE
'CAS':'CYS', ## CAS CYS S-(DIMETHYLARSENIC)CYSTEINE
'CCS':'CYS', ## CCS CYS CARBOXYMETHYLATED CYSTEINE
'CME':'CYS', ## CME CYS MODIFIED CYSTEINE
'CML':'CYS', ## CML CYS
'CMT':'CYS', ## CMT CYS O-METHYLCYSTEINE
'CS1':'CYS', ## CS1 CYS S-(2-ANILINYL-SULFANYL)-CYSTEINE
'CS3':'CYS', ## CS3 CYS
'CS4':'CYS', ## CS4 CYS
'CSA':'CYS', ## CSA CYS S-ACETONYLCYSTEIN
'CSB':'CYS', ## CSB CYS CYS BOUND TO LEAD ION
'CSD':'CYS', ## CSD CYS 3-SULFINOALANINE
'CSE':'CYS', ## CSE CYS SELENOCYSTEINE
'CSO':'CYS', ## CSO CYS INE S-HYDROXYCYSTEINE
'CSR':'CYS', ## CSR CYS S-ARSONOCYSTEINE
'CSS':'CYS', ## CSS CYS 1,3-THIAZOLE-4-CARBOXYLIC ACID
'CSU':'CYS', ## CSU CYS CYSTEINE-S-SULFONIC ACID
'CSW':'CYS', ## CSW CYS CYSTEINE-S-DIOXIDE
'CSX':'CYS', ## CSX CYS OXOCYSTEINE
'CSZ':'CYS', ## CSZ CYS S-SELANYL CYSTEINE
'CY0':'CYS', ## CY0 CYS MODIFIED CYSTEINE
'CY1':'CYS', ## CY1 CYS ACETAMIDOMETHYLCYSTEINE
'CY3':'CYS', ## CY3 CYS 2-AMINO-3-MERCAPTO-PROPIONAMIDE
'CY4':'CYS', ## CY4 CYS S-BUTYRYL-CYSTEIN
'CY7':'CYS', ## CY7 CYS MODIFIED CYSTEINE
#'CYD':'CYS', ## CYD CYS
'CYF':'CYS', ## CYF CYS FLUORESCEIN LABELLED CYS380 (P14)
'CYG':'CYS', ## CYG CYS
'CYQ':'CYS', ## CYQ CYS
'CYR':'CYS', ## CYR CYS
'CYS':'CYS', ## CYS CYS
'CZ2':'CYS', ## CZ2 CYS S-(DIHYDROXYARSINO)CYSTEINE
'CZZ':'CYS', ## CZZ CYS THIARSAHYDROXY-CYSTEINE
'DCY':'CYS', ## DCY CYS D-CYSTEINE
'DYS':'CYS', ## DYS CYS
'EFC':'CYS', ## EFC CYS S,S-(2-FLUOROETHYL)THIOCYSTEINE
'FOE':'CYS', ## FOE CYS
'GT9':'CYS', ## GT9 CYS SG ALKYLATED
'GYC':'CYS', ## GYC CYS
'HTI':'CYS', ## HTI CYS
'KOR':'CYS', ## KOR CYS MODIFIED CYSTEINE
'M0H':'CYS', ## M0H CYS S-(HYDROXYMETHYL)-L-CYSTEINE
'MCS':'CYS', ## MCS CYS MALONYLCYSTEINE
'NPH':'CYS', ## NPH CYS
'NYS':'CYS', ## NYS CYS
'OCS':'CYS', ## OCS CYS CYSTEINE SULFONIC ACID
'OCY':'CYS', ## OCY CYS HYDROXYETHYLCYSTEINE
'P1L':'CYS', ## P1L CYS S-PALMITOYL CYSTEINE
'PBB':'CYS', ## PBB CYS S-(4-BROMOBENZYL)CYSTEINE
'PEC':'CYS', ## PEC CYS S,S-PENTYLTHIOCYSTEINE
'PR3':'CYS', ## PR3 CYS INE DTT-CYSTEINE
'PYX':'CYS', ## PYX CYS S-[S-THIOPYRIDOXAMINYL]CYSTEINE
'R1A':'CYS', ## R1A CYS
'R1B':'CYS', ## R1B CYS
'R1F':'CYS', ## R1F CYS
'R7A':'CYS', ## R7A CYS
'RCY':'CYS', ## RCY CYS
'SAH':'CYS', ## SAH CYS S-ADENOSYL-L-HOMOCYSTEINE
'SC2':'CYS', ## SC2 CYS N-ACETYL-L-CYSTEINE
'SCH':'CYS', ## SCH CYS S-METHYL THIOCYSTEINE GROUP
'SCS':'CYS', ## SCS CYS MODIFIED CYSTEINE
'SCY':'CYS', ## SCY CYS CETYLATED CYSTEINE
'SHC':'CYS', ## SHC CYS S-HEXYLCYSTEINE
'SMC':'CYS', ## SMC CYS POST-TRANSLATIONAL MODIFICATION
'SNC':'CYS', ## SNC CYS S-NITROSO CYSTEINE
'SOC':'CYS', ## SOC CYS DIOXYSELENOCYSTEINE
'TEE':'CYS', ## TEE CYS POST-TRANSLATIONAL MODIFICATION
'TNB':'CYS', ## TNB CYS S-(2,3,6-TRINITROPHENYL)CYSTEINE
'TYX':'CYS', ## TYX CYS S-(2-ANILINO-2-OXOETHYL)-L-CYSTEINE
'YCM':'CYS', ## YCM CYS S-(2-AMINO-2-OXOETHYL)-L-CYSTEINE
'2AS' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'ASA' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'ASB' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'ASK' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'ASL' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'ASP' : 'ASP' , # ASP
'ASQ' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'BHD' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'DAS' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'DSP' : 'ASP' , # HETEROATOM THAT MAY BE TREATED AS ASP
'3MD':'ASP', ## 3MD ASP 2S,3S-3-METHYLASPARTIC ACID
'A0A':'ASP', ## A0A ASP ASPARTYL-FORMYL MIXED ANHYDRIDE
'ACB':'ASP', ## ACB ASP 3-METHYL-ASPARTIC ACID
'AKL':'ASP', ## AKL ASP 3-AMINO-5-CHLORO-4-OXOPENTANOIC ACID
'ASA':'ASP', ## ASA ASP ASPARTIC ALDEHYDE
'ASB':'ASP', ## ASB ASP ASPARTIC ACID-4-CARBOXYETHYL ESTER
'ASI':'ASP', ## ASI ASP L-ISO-ASPARTATE
'ASK':'ASP', ## ASK ASP DEHYDROXYMETHYLASPARTIC ACID
'ASL':'ASP', ## ASL ASP ASPARTIC ACID-4-CARBOXYETHYL ESTER
'ASP':'ASP', ## ASP ASP
'B3D':'ASP', ## B3D ASP 3-AMINOPENTANEDIOIC ACID
'BFD':'ASP', ## BFD ASP ASPARTATE BERYLLIUM FLUORIDE
'BHD':'ASP', ## BHD ASP BETA-HYDROXYASPARTIC ACID
'DAS':'ASP', ## DAS ASP D-ASPARTIC ACID
'DMK':'ASP', ## DMK ASP DIMETHYL ASPARTIC ACID
'IAS':'ASP', ## IAS ASP ASPARTYL GROUP
'OHS':'ASP', ## OHS ASP O-(CARBOXYSULFANYL)-4-OXO-L-HOMOSERINE
'OXX':'ASP', ## OXX ASP OXALYL-ASPARTYL ANHYDRIDE
'PHD':'ASP', ## PHD ASP 2-AMINO-4-OXO-4-PHOSPHONOOXY-BUTYRIC ACID
'SNN':'ASP', ## SNN ASP POST-TRANSLATIONAL MODIFICATION
'5HP' : 'GLU' , # HETEROATOM THAT MAY BE TREATED AS GLU
'CGU' : 'GLU' , # HETEROATOM THAT MAY BE TREATED AS GLU
'DGL' : 'GLU' , # HETEROATOM THAT MAY BE TREATED AS GLU
'GGL' : 'GLU' , # HETEROATOM THAT MAY BE TREATED AS GLU
'GLU' : 'GLU' , # GLU
'GMA' : 'GLU' , # HETEROATOM THAT MAY BE TREATED AS GLU
'PCA' : 'GLU' , # HETEROATOM THAT MAY BE TREATED AS GLU
'AB7':'GLU', ## AB7 GLU ALPHA-AMINOBUTYRIC ACID
'AR4':'GLU', ## AR4 GLU
'B3E':'GLU', ## B3E GLU (3S)-3-AMINOHEXANEDIOIC ACID
'CGU':'GLU', ## CGU GLU CARBOXYLATION OF THE CG ATOM
'DGL':'GLU', ## DGL GLU D-GLU
'GLU':'GLU', ## GLU GLU
'GMA':'GLU', ## GMA GLU 1-AMIDO-GLUTAMIC ACID
'ILG':'GLU', ## ILG GLU GLU LINKED TO NEXT RESIDUE VIA CG
'LME':'GLU', ## LME GLU (3R)-3-METHYL-L-GLUTAMIC ACID
'MEG':'GLU', ## MEG GLU (2S,3R)-3-METHYL-GLUTAMIC ACID
'DAH' : 'PHE' , # HETEROATOM THAT MAY BE TREATED AS PHE
'DPN' : 'PHE' , # HETEROATOM THAT MAY BE TREATED AS PHE
'HPQ' : 'PHE' , # HETEROATOM THAT MAY BE TREATED AS PHE
'PHE' : 'PHE' , # PHE
'PHI' : 'PHE' , # HETEROATOM THAT MAY BE TREATED AS PHE
'PHL' : 'PHE' , # HETEROATOM THAT MAY BE TREATED AS PHE
'1PA':'PHE', ## 1PA PHE PHENYLMETHYLACETIC ACID ALANINE
'23F':'PHE', ## 23F PHE (2Z)-2-AMINO-3-PHENYLACRYLIC ACID
'4PH':'PHE', ## 4PH PHE 4-METHYL-L-PHENYLALANINE
'B2F':'PHE', ## B2F PHE PHENYLALANINE BORONIC ACID
'BIF':'PHE', ## BIF PHE
'CHS':'PHE', ## CHS PHE 4-AMINO-5-CYCLOHEXYL-3-HYDROXY-PENTANOIC AC
'DAH':'PHE', ## DAH PHE 3,4-DIHYDROXYDAHNYLALANINE
'DPH':'PHE', ## DPH PHE DEAMINO-METHYL-PHENYLALANINE
'DPN':'PHE', ## DPN PHE D-CONFIGURATION
'FCL':'PHE', ## FCL PHE 3-CHLORO-L-PHENYLALANINE
'FOG':'PHE', ## FOG PHE PHENYLALANINOYL-[1-HYDROXY]-2-PROPYLENE
'FRF':'PHE', ## FRF PHE PHE FOLLOWED BY REDUCED PHE
'HPE':'PHE', ## HPE PHE HOMOPHENYLALANINE
'HPH':'PHE', ## HPH PHE PHENYLALANINOL GROUP
'HPQ':'PHE', ## HPQ PHE HOMOPHENYLALANINYLMETHANE
'MEA':'PHE', ## MEA PHE N-METHYLPHENYLALANINE
'MTY':'PHE', ## MTY PHE 3-HYDROXYPHENYLALANINE
'NFA':'PHE', ## NFA PHE MODIFIED PHENYLALANINE
'PBF':'PHE', ## PBF PHE PARA-(BENZOYL)-PHENYLALANINE
'PCS':'PHE', ## PCS PHE PHENYLALANYLMETHYLCHLORIDE
'PF5':'PHE', ## PF5 PHE 2,3,4,5,6-PENTAFLUORO-L-PHENYLALANINE
'PFF':'PHE', ## PFF PHE 4-FLUORO-L-PHENYLALANINE
'PHA':'PHE', ## PHA PHE PHENYLALANINAL
'PHE':'PHE', ## PHE PHE
'PHI':'PHE', ## PHI PHE IODO-PHENYLALANINE
'PHL':'PHE', ## PHL PHE L-PHENYLALANINOL
'PHM':'PHE', ## PHM PHE PHENYLALANYLMETHANE
'PM3':'PHE', ## PM3 PHE
'PPN':'PHE', ## PPN PHE THE LIGAND IS A PARA-NITRO-PHENYLALANINE
'PRQ':'PHE', ## PRQ PHE PHENYLALANINE
'PSA':'PHE', ## PSA PHE
'SMF':'PHE', ## SMF PHE 4-SULFOMETHYL-L-PHENYLALANINE
'GL3' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'GLY' : 'GLY' , # GLY
'GLZ' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'GSC' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'MPQ' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'MSA' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'NMC' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'SAR' : 'GLY' , # HETEROATOM THAT MAY BE TREATED AS GLY
'ACY':'GLY', ## ACY GLY POST-TRANSLATIONAL MODIFICATION
'CHG':'GLY', ## CHG GLY CYCLOHEXYL GLYCINE
'CHP':'GLY', ## CHP GLY 3-CHLORO-4-HYDROXYPHENYLGLYCINE
'GHP':'GLY', ## GHP GLY 4-HYDROXYPHENYLGLYCINE
'GL3':'GLY', ## GL3 GLY POST-TRANSLATIONAL MODIFICATION
'GLY':'GLY', ## GLY GLY
'GLZ':'GLY', ## GLZ GLY AMINO-ACETALDEHYDE
'GYS':'GLY', ## GYS GLY
'IPG':'GLY', ## IPG GLY N-ISOPROPYL GLYCINE
'MEU':'GLY', ## MEU GLY O-METHYL-GLYCINE
'MPQ':'GLY', ## MPQ GLY N-METHYL-ALPHA-PHENYL-GLYCINE
'MSA':'GLY', ## MSA GLY (2-S-METHYL) SARCOSINE
'NMC':'GLY', ## NMC GLY N-CYCLOPROPYLMETHYL GLYCINE
'PG9':'GLY', ## PG9 GLY D-PHENYLGLYCINE
'SAR':'GLY', ## SAR GLY SARCOSINE
'SHP':'GLY', ## SHP GLY (4-HYDROXYMALTOSEPHENYL)GLYCINE
'TBG':'GLY', ## TBG GLY T-BUTYL GLYCINE
'3AH' : 'HIS' , # HETEROATOM THAT MAY BE TREATED AS HIS
'DHI' : 'HIS' , # HETEROATOM THAT MAY BE TREATED AS HIS
'HIC' : 'HIS' , # HETEROATOM THAT MAY BE TREATED AS HIS
'HIS' : 'HIS' , # HIS
'MHS' : 'HIS' , # HETEROATOM THAT MAY BE TREATED AS HIS
'NEM' : 'HIS' , # HETEROATOM THAT MAY BE TREATED AS HIS
'NEP' : 'HIS' , # HETEROATOM THAT MAY BE TREATED AS HIS
'HID' : 'HIS' , # single delta N protonation
'HIE' : 'HIS' , # single epsilon N protonation
'3AH':'HIS', ## 3AH HIS
'DDE':'HIS', ## DDE HIS
'DHI':'HIS', ## DHI HIS D-HISTIDINE
'HIA':'HIS', ## HIA HIS L-HISTIDINE AMIDE
'HIC':'HIS', ## HIC HIS 4-METHYL-HISTIDINE
'HIP':'HIS', ## HIP HIS ND1-PHOSPHONOHISTIDINE...or commonly used doubly protonated state
'HIQ':'HIS', ## HIQ HIS MODIFIED HISTIDINE
'HIS':'HIS', ## HIS HIS
'HSO':'HIS', ## HSO HIS HISTIDINOL
'MHS':'HIS', ## MHS HIS 1-N-METHYLHISTIDINE
'NEP':'HIS', ## NEP HIS N1-PHOSPHONOHISTIDINE
'NZH':'HIS', ## NZH HIS
'OHI':'HIS', ## OHI HIS 3-(2-OXO-2H-IMIDAZOL-4-YL)-L-ALANINE
'PSH':'HIS', ## PSH HIS 1-THIOPHOSPHONO-L-HISTIDINE
'DIL' : 'ILE' , # HETEROATOM THAT MAY BE TREATED AS ILE
'IIL' : 'ILE' , # HETEROATOM THAT MAY BE TREATED AS ILE
'ILE' : 'ILE' , # ILE
'B2I':'ILE', ## B2I ILE ISOLEUCINE BORONIC ACID
'DIL':'ILE', ## DIL ILE D-ISOLEUCINE
'IIL':'ILE', ## IIL ILE ISO-ISOLEUCINE
'ILE':'ILE', ## ILE ILE
'ILX':'ILE', ## ILX ILE 4,5-DIHYDROXYISOLEUCINE
'IML':'ILE', ## IML ILE N-METHYLATED
'ALY' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'DLY' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'KCX' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'LLP' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'LLY' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'LYM' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'LYS' : 'LYS' , # LYS
'LYZ' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'MLY' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'SHR' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'TRG' : 'LYS' , # HETEROATOM THAT MAY BE TREATED AS LYS
'6CL':'LYS', ## 6CL LYS 6-CARBOXYLYSINE
'ALY':'LYS', ## ALY LYS N(6)-ACETYLLYSINE
'API':'LYS', ## API LYS 2,6-DIAMINOPIMELIC ACID
'APK':'LYS', ## APK LYS
'AZK':'LYS', ## AZK LYS (2S)-2-AMINO-6-TRIAZANYLHEXAN-1-OL
'B3K':'LYS', ## B3K LYS (3S)-3,7-DIAMINOHEPTANOIC ACID
'BLY':'LYS', ## BLY LYS LYSINE BORONIC ACID
'C1X':'LYS', ## C1X LYS MODIFIED LYSINE
'CLG':'LYS', ## CLG LYS
'CLH':'LYS', ## CLH LYS
'CYJ':'LYS', ## CYJ LYS MODIFIED LYSINE
'DLS':'LYS', ## DLS LYS DI-ACETYL-LYSINE
'DLY':'LYS', ## DLY LYS D-LYSINE
'DNL':'LYS', ## DNL LYS 6-AMINO-HEXANAL
'FHL':'LYS', ## FHL LYS MODIFIED LYSINE
'GPL':'LYS', ## GPL LYS LYSINE GUANOSINE-5'-MONOPHOSPHATE
'IT1':'LYS', ## IT1 LYS
'KCX':'LYS', ## KCX LYS CARBAMOYLATED LYSINE
'KGC':'LYS', ## KGC LYS
'KST':'LYS', ## KST LYS N~6~-(5-CARBOXY-3-THIENYL)-L-LYSINE
'LA2':'LYS', ## LA2 LYS
'LCK':'LYS', ## LCK LYS
'LCX':'LYS', ## LCX LYS CARBAMYLATED LYSINE
'LDH':'LYS', ## LDH LYS N~6~-ETHYL-L-LYSINE
'LET':'LYS', ## LET LYS ODIFIED LYSINE
'LLP':'LYS', ## LLP LYS
'LLY':'LYS', ## LLY LYS NZ-(DICARBOXYMETHYL)LYSINE
'LSO':'LYS', ## LSO LYS MODIFIED LYSINE
'LYM':'LYS', ## LYM LYS DEOXY-METHYL-LYSINE
'LYN':'LYS', ## LYN LYS 2,6-DIAMINO-HEXANOIC ACID AMIDE
'LYP':'LYS', ## LYP LYS N~6~-METHYL-N~6~-PROPYL-L-LYSINE
'LYR':'LYS', ## LYR LYS MODIFIED LYSINE
'LYS':'LYS', ## LYS LYS
'LYX':'LYS', ## LYX LYS N''-(2-COENZYME A)-PROPANOYL-LYSINE
'LYZ':'LYS', ## LYZ LYS 5-HYDROXYLYSINE
'M2L':'LYS', ## M2L LYS
'M3L':'LYS', ## M3L LYS N-TRIMETHYLLYSINE
'MCL':'LYS', ## MCL LYS NZ-(1-CARBOXYETHYL)-LYSINE
'MLY':'LYS', ## MLY LYS METHYLATED LYSINE
'MLZ':'LYS', ## MLZ LYS N-METHYL-LYSINE
'OBS':'LYS', ## OBS LYS MODIFIED LYSINE
'SLZ':'LYS', ## SLZ LYS L-THIALYSINE
'XX1':'LYS', ## XX1 LYS N~6~-7H-PURIN-6-YL-L-LYSINE
'BUG' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'CLE' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'DLE' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'LEU' : 'LEU' , # LEU
'MLE' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'NLE' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'NLN' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'NLP' : 'LEU' , # HETEROATOM THAT MAY BE TREATED AS LEU
'1LU':'LEU', ## 1LU LEU 4-METHYL-PENTANOIC ACID-2-OXYL GROUP
'2ML':'LEU', ## 2ML LEU 2-METHYLLEUCINE
'BLE':'LEU', ## BLE LEU LEUCINE BORONIC ACID
'BUG':'LEU', ## BUG LEU TERT-LEUCYL AMINE
'CLE':'LEU', ## CLE LEU LEUCINE AMIDE
'DCL':'LEU', ## DCL LEU 2-AMINO-4-METHYL-PENTANYL GROUP
'DLE':'LEU', ## DLE LEU D-LEUCINE
'DNE':'LEU', ## DNE LEU D-NORLEUCINE
'DNG':'LEU', ## DNG LEU N-FORMYL-D-NORLEUCINE
'DNM':'LEU', ## DNM LEU D-N-METHYL NORLEUCINE
'FLE':'LEU', ## FLE LEU FUROYL-LEUCINE
'HLU':'LEU', ## HLU LEU BETA-HYDROXYLEUCINE
'LED':'LEU', ## LED LEU POST-TRANSLATIONAL MODIFICATION
'LEF':'LEU', ## LEF LEU 2-5-FLUOROLEUCINE
'LEU':'LEU', ## LEU LEU
'LNT':'LEU', ## LNT LEU
'MHL':'LEU', ## MHL LEU N-METHYLATED, HYDROXY
'MLE':'LEU', ## MLE LEU N-METHYLATED
'MLL':'LEU', ## MLL LEU METHYL L-LEUCINATE
'MNL':'LEU', ## MNL LEU 4,N-DIMETHYLNORLEUCINE
'NLE':'LEU', ## NLE LEU NORLEUCINE
'NLN':'LEU', ## NLN LEU NORLEUCINE AMIDE
'NLO':'LEU', ## NLO LEU O-METHYL-L-NORLEUCINE
'PLE':'LEU', ## PLE LEU LEUCINE PHOSPHINIC ACID
'PPH':'LEU', ## PPH LEU PHENYLALANINE PHOSPHINIC ACID
'CXM' : 'MET' , # HETEROATOM THAT MAY BE TREATED AS MET
'FME' : 'MET' , # HETEROATOM THAT MAY BE TREATED AS MET
'MET' : 'MET' , # MET
'MSE' : 'MET' , # HETEROATOM THAT MAY BE TREATED AS MET
'OMT' : 'MET' , # HETEROATOM THAT MAY BE TREATED AS MET
'AME':'MET', ## AME MET ACETYLATED METHIONINE
'CXM':'MET', ## CXM MET N-CARBOXYMETHIONINE
'ESC':'MET', ## ESC MET 2-AMINO-4-ETHYL SULFANYL BUTYRIC ACID
'FME':'MET', ## FME MET FORMYL-METHIONINE
'FOR':'MET', ## FOR MET
'MET':'MET', ## MET MET
'MHO':'MET', ## MHO MET POST-TRANSLATIONAL MODIFICATION
'MME':'MET', ## MME MET N-METHYL METHIONINE
'MSE':'MET', ## MSE MET ELENOMETHIONINE
'MSO':'MET', ## MSO MET METHIONINE SULFOXIDE
'OMT':'MET', ## OMT MET METHIONINE SULFONE
'SME':'MET', ## SME MET METHIONINE SULFOXIDE
'ASN' : 'ASN' , # ASN
'MEN' : 'ASN' , # HETEROATOM THAT MAY BE TREATED AS ASN
'AFA':'ASN', ## AFA ASN N-[7-METHYL-OCT-2,4-DIENOYL]ASPARAGINE
'AHB':'ASN', ## AHB ASN BETA-HYDROXYASPARAGINE
'ASN':'ASN', ## ASN ASN
'B3X':'ASN', ## B3X ASN (3S)-3,5-DIAMINO-5-OXOPENTANOIC ACID
'DMH':'ASN', ## DMH ASN N4,N4-DIMETHYL-ASPARAGINE
'DSG':'ASN', ## DSG ASN D-ASPARAGINE
'MEN':'ASN', ## MEN ASN GAMMA METHYL ASPARAGINE
'DPR' : 'PRO' , # HETEROATOM THAT MAY BE TREATED AS PRO
'PRO' : 'PRO' , # PRO
'1AB':'PRO', ## 1AB PRO 1,4-DIDEOXY-1,4-IMINO-D-ARABINITOL
'2MT':'PRO', ## 2MT PRO
'4FB':'PRO', ## 4FB PRO (4S)-4-FLUORO-L-PROLINE
'DPL':'PRO', ## DPL PRO 4-OXOPROLINE
'DPR':'PRO', ## DPR PRO D-PROLINE
'H5M':'PRO', ## H5M PRO TRANS-3-HYDROXY-5-METHYLPROLINE
'HY3':'PRO', ## HY3 PRO 3-HYDROXYPROLINE
'HYP':'PRO', ## HYP PRO 4-HYDROXYPROLINE
'LPD':'PRO', ## LPD PRO L-PROLINAMIDE
'P2Y':'PRO', ## P2Y PRO (2S)-PYRROLIDIN-2-YLMETHYLAMINE
'PCA':'PRO', ## PCA PRO 5-OXOPROLINE
'POM':'PRO', ## POM PRO CIS-5-METHYL-4-OXOPROLINE
'PRO':'PRO', ## PRO PRO
'PRS':'PRO', ## PRS PRO THIOPROLINE
'DGN' : 'GLN' , # HETEROATOM THAT MAY BE TREATED AS GLN
'GLN' : 'GLN' , # GLN
'DGN':'GLN', ## DGN GLN D-GLUTAMINE
'GHG':'GLN', ## GHG GLN GAMMA-HYDROXY-GLUTAMINE
'GLH':'GLN', ## GLH GLN
'GLN':'GLN', ## GLN GLN
'MGN':'GLN', ## MGN GLN 2-METHYL-GLUTAMINE
'ACL' : 'ARG' , # HETEROATOM THAT MAY BE TREATED AS ARG
'AGM' : 'ARG' , # HETEROATOM THAT MAY BE TREATED AS ARG
'ARG' : 'ARG' , # ARG
'ARM' : 'ARG' , # HETEROATOM THAT MAY BE TREATED AS ARG
'DAR' : 'ARG' , # HETEROATOM THAT MAY BE TREATED AS ARG
'HAR' : 'ARG' , # HETEROATOM THAT MAY BE TREATED AS ARG
'HMR' : 'ARG' , # HETEROATOM THAT MAY BE TREATED AS ARG
'2MR':'ARG', ## 2MR ARG N3, N4-DIMETHYLARGININE
'AAR':'ARG', ## AAR ARG ARGININEAMIDE
'ACL':'ARG', ## ACL ARG DEOXY-CHLOROMETHYL-ARGININE
'AGM':'ARG', ## AGM ARG 4-METHYL-ARGININE
'ALG':'ARG', ## ALG ARG GUANIDINOBUTYRYL GROUP
'AR2':'ARG', ## AR2 ARG ARGINYL-BENZOTHIAZOLE-6-CARBOXYLIC ACID
'ARG':'ARG', ## ARG ARG
'ARM':'ARG', ## ARM ARG DEOXY-METHYL-ARGININE
'ARO':'ARG', ## ARO ARG C-GAMMA-HYDROXY ARGININE
'BOR':'ARG', ## BOR ARG
'CIR':'ARG', ## CIR ARG CITRULLINE
'DA2':'ARG', ## DA2 ARG MODIFIED ARGININE
'DAR':'ARG', ## DAR ARG D-ARGININE
'HMR':'ARG', ## HMR ARG BETA-HOMOARGININE
'HRG':'ARG', ## HRG ARG L-HOMOARGININE
'MAI':'ARG', ## MAI ARG DEOXO-METHYLARGININE
'MGG':'ARG', ## MGG ARG MODIFIED D-ARGININE
'NMM':'ARG', ## NMM ARG MODIFIED ARGININE
'OPR':'ARG', ## OPR ARG C-(3-OXOPROPYL)ARGININE
'ORQ':'ARG', ## ORQ ARG N~5~-ACETYL-L-ORNITHINE
'TYZ':'ARG', ## TYZ ARG PARA ACETAMIDO BENZOIC ACID
'DSN' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'MIS' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'OAS' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'SAC' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'SEL' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'SEP' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'SER' : 'SER' , # SER
'SET' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'SVA' : 'SER' , # HETEROATOM THAT MAY BE TREATED AS SER
'B3S':'SER', ## B3S SER (3R)-3-AMINO-4-HYDROXYBUTANOIC ACID
'BG1':'SER', ## BG1 SER
'DHL':'SER', ## DHL SER POST-TRANSLATIONAL MODIFICATION
'DSE':'SER', ## DSE SER D-SERINE N-METHYLATED
'DSN':'SER', ## DSN SER D-SERINE
'FGP':'SER', ## FGP SER
'GVL':'SER', ## GVL SER SERINE MODIFED WITH PHOSPHOPANTETHEINE
'HSE':'SER', ## HSE SER L-HOMOSERINE
'HSL':'SER', ## HSL SER HOMOSERINE LACTONE
'MC1':'SER', ## MC1 SER METHICILLIN ACYL-SERINE
'MIS':'SER', ## MIS SER MODIFIED SERINE
'N10':'SER', ## N10 SER O-[(HEXYLAMINO)CARBONYL]-L-SERINE
'NC1':'SER', ## NC1 SER NITROCEFIN ACYL-SERINE
'OAS':'SER', ## OAS SER O-ACETYLSERINE
'OSE':'SER', ## OSE SER O-SULFO-L-SERINE
'PG1':'SER', ## PG1 SER BENZYLPENICILLOYL-ACYLATED SERINE
'PYR':'SER', ## PYR SER CHEMICALLY MODIFIED
'S1H':'SER', ## S1H SER 1-HEXADECANOSULFONYL-O-L-SERINE
'SAC':'SER', ## SAC SER N-ACETYL-SERINE
'SBD':'SER', ## SBD SER
'SBG':'SER', ## SBG SER MODIFIED SERINE
'SBL':'SER', ## SBL SER
'SDP':'SER', ## SDP SER
'SEB':'SER', ## SEB SER O-BENZYLSULFONYL-SERINE
'SEL':'SER', ## SEL SER 2-AMINO-1,3-PROPANEDIOL
'SEP':'SER', ## SEP SER E PHOSPHOSERINE
'SER':'SER', ## SER SER
'SET':'SER', ## SET SER AMINOSERINE
'SGB':'SER', ## SGB SER MODIFIED SERINE
'SGR':'SER', ## SGR SER MODIFIED SERINE
'SOY':'SER', ## SOY SER OXACILLOYL-ACYLATED SERINE
'SUN':'SER', ## SUN SER TABUN CONJUGATED SERINE
'SVA':'SER', ## SVA SER SERINE VANADATE
'SVV':'SER', ## SVV SER MODIFIED SERINE
'SVX':'SER', ## SVX SER MODIFIED SERINE
'SVY':'SER', ## SVY SER MODIFIED SERINE
'SVZ':'SER', ## SVZ SER MODIFIED SERINE
'SXE':'SER', ## SXE SER MODIFIED SERINE
'ALO' : 'THR' , # HETEROATOM THAT MAY BE TREATED AS THR
'BMT' : 'THR' , # HETEROATOM THAT MAY BE TREATED AS THR
'DTH' : 'THR' , # HETEROATOM THAT MAY BE TREATED AS THR
'THR' : 'THR' , # THR
'TPO' : 'THR' , # HETEROATOM THAT MAY BE TREATED AS THR
'AEI':'THR', ## AEI THR ACYLATED THR
'ALO':'THR', ## ALO THR ALLO-THREONINE
'BMT':'THR', ## BMT THR
'CRO':'THR', ## CRO THR CYCLIZED
'CTH':'THR', ## CTH THR 4-CHLOROTHREONINE
'DTH':'THR', ## DTH THR D-THREONINE
'OLT':'THR', ## OLT THR O-METHYL-L-THREONINE
'TBM':'THR', ## TBM THR
'TH5':'THR', ## TH5 THR O-ACETYL-L-THREONINE
'THC':'THR', ## THC THR N-METHYLCARBONYLTHREONINE
'THR':'THR', ## THR THR
'TMD':'THR', ## TMD THR N-METHYLATED, EPSILON C ALKYLATED
'TPO':'THR', ## TPO THR HOSPHOTHREONINE
'DIV' : 'VAL' , # HETEROATOM THAT MAY BE TREATED AS VAL
'DVA' : 'VAL' , # HETEROATOM THAT MAY BE TREATED AS VAL
'MVA' : 'VAL' , # HETEROATOM THAT MAY BE TREATED AS VAL
'VAL' : 'VAL' , # VAL
'B2V':'VAL', ## B2V VAL VALINE BORONIC ACID
'DIV':'VAL', ## DIV VAL D-ISOVALINE
'DVA':'VAL', ## DVA VAL D-VALINE
'MNV':'VAL', ## MNV VAL N-METHYL-C-AMINO VALINE
'MVA':'VAL', ## MVA VAL N-METHYLATED
'NVA':'VAL', ## NVA VAL NORVALINE
'VAD':'VAL', ## VAD VAL DEAMINOHYDROXYVALINE
'VAF':'VAL', ## VAF VAL METHYLVALINE
'VAL':'VAL', ## VAL VAL
'VDL':'VAL', ## VDL VAL (2R,3R)-2,3-DIAMINOBUTANOIC ACID
'VLL':'VAL', ## VLL VAL (2S)-2,3-DIAMINOBUTANOIC ACID
'VME':'VAL', ## VME VAL O- METHYLVALINE
'DTR' : 'TRP' , # HETEROATOM THAT MAY BE TREATED AS TRP
'HTR' : 'TRP' , # HETEROATOM THAT MAY BE TREATED AS TRP
'LTR' : 'TRP' , # HETEROATOM THAT MAY BE TREATED AS TRP
'TPL' : 'TRP' , # HETEROATOM THAT MAY BE TREATED AS TRP
'TRO' : 'TRP' , # HETEROATOM THAT MAY BE TREATED AS TRP
'TRP' : 'TRP' , # TRP
'BTR':'TRP', ## BTR TRP 6-BROMO-TRYPTOPHAN
'1TQ':'TRP', ## 1TQ TRP 6-(FORMYLAMINO)-7-HYDROXY-L-TRYPTOPHAN
'23S':'TRP', ## 23S TRP MODIFIED TRYPTOPHAN
'32S':'TRP', ## 32S TRP MODIFIED TRYPTOPHAN
'32T':'TRP', ## 32T TRP MODIFIED TRYPTOPHAN
'4DP':'TRP', ## 4DP TRP
'4FW':'TRP', ## 4FW TRP 4-FLUOROTRYPTOPHANE
'4HT':'TRP', ## 4HT TRP 4-HYDROXYTRYPTOPHAN
'4IN':'TRP', ## 4IN TRP 4-AMINO-L-TRYPTOPHAN
'6CW':'TRP', ## 6CW TRP 6-CHLORO-L-TRYPTOPHAN
'DTR':'TRP', ## DTR TRP D-TRYPTOPHAN
'FTR':'TRP', ## FTR TRP FLUOROTRYPTOPHANE
'HTR':'TRP', ## HTR TRP BETA-HYDROXYTRYPTOPHANE
'PAT':'TRP', ## PAT TRP ALPHA-PHOSPHONO-TRYPTOPHAN
'TOX':'TRP', ## TOX TRP
'TPL':'TRP', ## TPL TRP TRYTOPHANOL
'TQQ':'TRP', ## TQQ TRP
'TRF':'TRP', ## TRF TRP N1-FORMYL-TRYPTOPHAN
'TRN':'TRP', ## TRN TRP AZA-TRYPTOPHAN
'TRO':'TRP', ## TRO TRP 2-HYDROXY-TRYPTOPHAN
'TRP':'TRP', ## TRP TRP
'TRQ':'TRP', ## TRQ TRP
'TRW':'TRP', ## TRW TRP
'TRX':'TRP', ## TRX TRP 6-HYDROXYTRYPTOPHAN
'TTQ':'TRP', ## TTQ TRP 6-AMINO-7-HYDROXY-L-TRYPTOPHAN
'DTY' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'IYR' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'PAQ' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'PTR' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'STY' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'TYB' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'TYQ' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'TYR' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'TYS' : 'TYR' , # TYR
'TYY' : 'TYR' , # HETEROATOM THAT MAY BE TREATED AS TYR
'1TY':'TYR', ## 1TY TYR
'2TY':'TYR', ## 2TY TYR
'3TY':'TYR', ## 3TY TYR MODIFIED TYROSINE
'B3Y':'TYR', ## B3Y TYR
'CRQ':'TYR', ## CRQ TYR
'DBY':'TYR', ## DBY TYR 3,5 DIBROMOTYROSINE
'DPQ':'TYR', ## DPQ TYR TYROSINE DERIVATIVE
'DTY':'TYR', ## DTY TYR D-TYROSINE
'ESB':'TYR', ## ESB TYR
'FLT':'TYR', ## FLT TYR FLUOROMALONYL TYROSINE
'FTY':'TYR', ## FTY TYR DEOXY-DIFLUOROMETHELENE-PHOSPHOTYROSINE
'IYR':'TYR', ## IYR TYR 3-IODO-TYROSINE
'MBQ':'TYR', ## MBQ TYR
'NIY':'TYR', ## NIY TYR META-NITRO-TYROSINE
'NBQ':'TYR', ## NBQ TYR
'OTY':'TYR', ## OTY TYR
'PAQ':'TYR', ## PAQ TYR SEE REMARK 999
'PTH':'TYR', ## PTH TYR METHYLENE-HYDROXY-PHOSPHOTYROSINE
'PTM':'TYR', ## PTM TYR ALPHA-METHYL-O-PHOSPHOTYROSINE
'PTR':'TYR', ## PTR TYR O-PHOSPHOTYROSINE
'TCQ':'TYR', ## TCQ TYR MODIFIED TYROSINE
'TTS':'TYR', ## TTS TYR
'TY2':'TYR', ## TY2 TYR 3-AMINO-L-TYROSINE
'TY3':'TYR', ## TY3 TYR 3-HYDROXY-L-TYROSINE
'TYB':'TYR', ## TYB TYR TYROSINAL
'TYC':'TYR', ## TYC TYR L-TYROSINAMIDE
'TYI':'TYR', ## TYI TYR 3,5-DIIODOTYROSINE
'TYN':'TYR', ## TYN TYR ADDUCT AT HYDROXY GROUP
'TYO':'TYR', ## TYO TYR
'TYQ':'TYR', ## TYQ TYR AMINOQUINOL FORM OF TOPA QUINONONE
'TYR':'TYR', ## TYR TYR
'TYS':'TYR', ## TYS TYR INE SULPHONATED TYROSINE
'TYT':'TYR', ## TYT TYR
'TYY':'TYR', ## TYY TYR IMINOQUINONE FORM OF TOPA QUINONONE
'YOF':'TYR', ## YOF TYR 3-FLUOROTYROSINE
# 'GLX' : 'Z' # why is this here!?
}
####################
# NUCLEIC ACID STUFF
# for sequences...
NUCLEIC_SEQUENCE_LETTERS_MAP = {
'A' : 'A' ,
'G' : 'G' ,
'C' : 'C' ,
'T' : 'T' ,
'U' : 'U' ,
'a' : 'A' ,
'g' : 'G' ,
'c' : 'C' ,
't' : 'T' ,
'u' : 'U' ,
'DA' : 'A' ,
'DG' : 'G' ,
'DC' : 'C' ,
'DT' : 'T' ,
'dA' : 'A' ,
'dG' : 'G' ,
'dC' : 'C' ,
'dT' : 'T' ,
'ADE' : 'A' ,
'GUA' : 'G' ,
'CYT' : 'C' ,
'THY' : 'T' ,
'URA' : 'U' ,
'rA' : 'A' ,
'rG' : 'G',
'rC' : 'C' ,
'rU' : 'U' ,
# HETATM lines
'1MA' : 'A' ,
'1MG' : 'G' ,
'2MG' : 'G' ,
'7MG' : 'G' ,
'OMG' : 'G' ,
'YG' : 'G' ,
'5MC' : 'C' ,
'CB2' : 'C' ,
'CBR' : 'C' ,
'DC' : 'C' ,
'OMC' : 'C' ,
'5BU' : 'U' ,
'5MU' : 'U' ,
'H2U' : 'U' ,
'PSU' : 'U' ,
'URI' : 'U'
}
# line_edit = line_edit.replace( 'HO2\'', '2HO*' )
# line_edit = line_edit.replace( 'HO5\'', '5HO*' )
# line_edit = line_edit.replace( 'H5\'\'', '2H5*' )
# line_edit = line_edit.replace('\'','*')
# line_edit = line_edit.replace('OP1','O1P')
# line_edit = line_edit.replace('OP2','O2P')
NA_CODES = {}
NA_CONVERSIONS_ROSETTA = {}
#####
# DNA
# codes whose presence indicates DNA definitively
NA_CODES['DNA'] = {
'T' : 'T' ,
't' : 'T' ,
'DA' : 'A' ,
'DG' : 'G' ,
'DC' : 'C' ,
'DT' : 'T' ,
'dA' : 'A' ,
'dG' : 'G' ,
'dC' : 'C' ,
'dT' : 'T' ,
'THY' : 'T'
}
# convert from sequence to the resName for PDB format
NA_CONVERSIONS_ROSETTA['DNA'] = {
'A' : 'A' ,
'G' : 'G' ,
'C' : 'C' ,
'T' : 'T' ,
'ADE' : 'A' ,
'GUA' : 'G' ,
'CYT' : 'C' ,
'THY' : 'T' ,
'1MA' : 'A' ,
'1MG' : 'G' ,
'2MG' : 'G' ,
'7MG' : 'G' ,
'OMG' : 'G' ,
'YG' : 'G' ,
'5MC' : 'C' ,
'CB2' : 'C' ,
'CBR' : 'C' ,
'DC' : 'C' ,
'OMC' : 'C' ,
}
# water! hooray!
WATER_CONVERSION = {
'W' : 'TP3' ,
'HOH' : 'TP3' ,
'H2O' : 'TP3' ,
'WAT' : 'TP3' ,
'TP3' : 'TP3' ,
'TP5' : 'TP3'
}
# fun with water
#WATER_CODE = 'TP3' # for possible use in PyRosetta
#WATER_CODES = ['W' , 'HOH' , 'H2O' , 'WAT' , 'TP3' , 'TP5'] # resNames
################################################################################
# METHODS
get_file_extension = lambda in_filename: in_filename.split( '.' )[-1]
get_file_extension.__doc__ = 'Returns the file extension of <in_filename>\n\nin_filename.split( \'.\' )[-1]'
# hacky version
get_root_filename = lambda in_filename: in_filename[:-len( get_file_extension( in_filename ) ) - 1]
get_root_filename.__doc__ = 'Returns the \"root filename\" of <in_filename> (pre file extension)\n\nin_filename[:len( in_filename.split( \'.\' )[-1] ) - 1]\na little hacky...'
# better version
#get_root_filename = lambda in_filename: ''.join( [i for i in in_filename.split( '.' )[:-1]] )
# helper for creating a directory, checks and delets existing name
def create_directory( dir_name , tagline = ' to sort the data' ):
"""
Creates the directory <dir_name>
WARNING: this will delete the directory and its contents if it already
exists!
Optionally output something special in <tagline>
"""
# check if it exists
print 'Creating a new directory ' + os.path.relpath( dir_name ) + tagline
if os.path.isdir( dir_name ):
print 'a directory named ' + os.path.relpath( dir_name ) + ' already exists, deleting it now...'
shutil.rmtree( dir_name )
os.mkdir( dir_name )
# copy helper
def copy_file( filename , destination , display = False ):
"""
Copy <filename> to/into <destination>
just a cp wrapper...what?
"""
if display: # optional
if os.path.isdir( destination ):
print 'placing a copy of ' + os.path.relpath( filename ) + ' into the ' + os.path.relpath( destination ) + ' directory'
elif os.path.isfile( destination ):
print 'copying ' + os.path.relpath( filename ) + ' to ' + os.path.relpath( destination )
shutil.copy( filename , destination )
################################################################################
# SEQUENCE HANDLING HELPERS
# basic converters...its done a lot
# loading wrapper...basically cause "from Bio import SeqIO" is too long
def load_sequence( filename , ignore_empty = True , seqformat_map = SEQFORMAT_MAP ):
"""
Returns the list of sequences in <filename> as Biopython SeqRecord
objects
automatically handles different file format as specified by <seqformat_map>
Optionally <ignore_empty> sequences (SeqID in file but no sequence)
To get string, use get_sequence
"""
# determine the file format
seq_format = get_file_extension( filename )
# load ALL the sequences!
sequences = [i for i in SeqIO.parse( filename , seqformat_map[seq_format] )]
if ignore_empty:
sequences = [i for i in sequences if str( i.seq )]
# or just one...
if len( sequences ) == 1:
sequences = sequences[0]
return sequences
# general converter!
def get_sequence( sequence , seq_format = SEQFORMAT , uppercase = True , ignore_empty = True , get_ids = False ):
"""
Returns a string or list of string depending on the input <sequence>
can accept:
a filename for a <seq_format> file
a Biopython Seq object
a Biopython SeqRecord object
a string
a list of any of the above (can be heterogenous)
Optionally change the sequence to <uppercase> (ambiguous positions are
sometimes lowercase)
Optionally <ignore_empty> sequences (SeqID in file but no sequence)
Optionally <get_ids> , returning a parallel list of SeqIDs and descriptions
"""
# sort the input data type
# for common Biopython objects
if type( sequence ) == Seq:
sequence = str( sequence )
elif type( sequence ) == SeqRecord:
seq_ids = str( sequence.id )
seq_des = str( sequence.description )
sequence = str( sequence.seq )
# input file
elif '.' in sequence: # should never occur!
# its a filename (?) so try to load it, it will error properly
sequence = load_sequence( sequence , ignore_empty )
# sort by number
if type( sequence ) == list: # in accordance with the above
# optionally get the ids
if get_ids:
seq_ids = [str( i.id ) for i in sequence]
seq_des = [str( i.description )*( not i.description == i.id ) for i in sequence]
sequence = [str( i.seq ) for i in sequence]
else:
if get_ids:
seq_ids = str( sequence.id )
seq_des = str( sequence.description )*( not sequence.description == sequence.id )
sequence = str( sequence.seq )
# list of any of the above
elif type( sequence ) == list:
# then sort based on individual types...
sequence = [get_sequence( i , seq_format , uppercase , ignore_empty , get_ids ) for i in sequence]
if get_ids:
seq_ids = [i[1] for i in sequence]
seq_des = [i[2] for i in sequence]
sequence = [i[0] for i in sequence]
# should be an input single string
else:
seq_ids = ''
seq_des = ''
# optionally force UPPER case
if uppercase:
if type( sequence ) == str:
# single sequence
sequence = sequence.upper()
else:
# multiple
sequence = [i.upper() for i in sequence]
# optionally return the id and descriptions too
if get_ids:
return sequence , seq_ids , seq_des
return sequence
# general writer
# return the filename
def write_sequence( sequence , out_filename = '' , seq_format = SEQFORMAT , seqid = 'unknown' , description = '' , alphabet = DNAAlphabet , seq_format_map = SEQFORMAT_EXTENSION_MAP ):
"""
Write <sequence> to <out_filename> as <seq_format> using <alphabet>
Robust to sequence inputs that are:
str (filename or sequence)
Seq
SeqRecord
"""
# sort the input data type
unknown = 1
# for common Biopython objects
if isinstance( sequence , str ):
if '.' in sequence: # should never occur, okay, I made it occur
print 'it appears you input a path or filename...so its already a file!'
return sequence
sequence = SeqRecord( Seq( sequence , alphabet ) ) # already default ID of unknown
sequence.id = seqid
sequence.description = description
elif isinstance( sequence , unicode ): # hacky, unicode vs str
sequence = str( sequence )
if '.' in sequence: # should never occur
print 'it appears you input a path or filename...so its already a file!'
return sequence
sequence = SeqRecord( Seq( sequence , alphabet ) ) # already default ID of unknown
sequence.id = seqid
sequence.description = description
elif isinstance( sequence , Seq ):
sequence = SeqRecord( sequence )
sequence.id = seqid
sequence.description = description
elif isinstance( sequence , list ):
# yay, do it all over again :(
# make recursive
# assume all members are the same type...else its an error anyway
if isinstance( sequence[0] , str ):
for i in xrange( len( sequence ) ):
sequence[i] = SeqRecord( Seq( sequence[i] , alphabet ) )
sequence[i].id = seqid + '_' + str( unknown )
sequence[i].description = description
unknown += 1
elif isinstance( sequence[0] , Seq ):
for i in xrange( len( sequence ) ):
sequence[i] = SeqRecord( i )
sequence[i].id = seqid + '_' + str( unknown )
sequence[i].description = description
unknown += 1
# now that all are Biopython SeqRecords, write to file!
if not out_filename:
if type( sequence ) == list:
out_filename = sequence[0].id + '.' + seq_format_map[seq_format]
else:
out_filename = sequence.id + '.' + seq_format_map[seq_format]
SeqIO.write( sequence , out_filename , seq_format )
print 'Successfully wrote the sequence(s) to ' + os.path.relpath( out_filename )
return out_filename
################################################################################
# FULL RAW PROCESSING
# 1R69 - single model, single chain
# 1A17 - another random choice for testing
# 1BUW
# 1C17
# 1JYX
# 1M2V
# 1TF6
# 2C35
# 3G3O
# 1YY8 - AB and CD, single model
# 1NMR - multiple models
# 1LR1 - multiple models AND chains
# 1VTL - protein and DNA, single model
# 1UN6 - protein and RNA, single model
# the big boy...
def process_pdb( pdb_filename , seqformat = SEQFORMAT , seqformat_extension_map = SEQFORMAT_EXTENSION_MAP , conversion = three2three , na_conversion = NA_CONVERSIONS_ROSETTA , na_alphabet = DNAAlphabet , protein_alphabet = ProteinAlphabet ):
"""
Create a directory from <pdb_filename> containing relevant information
stored in the PDB file
This method behaves slightly differently for PDB files with multiple models,
nucleic acids, duplicate complexes, etc.
so if you are interested in the specifics, please read the source code
In short, it tries to write:
header.txt a text file of the header lines
numbering_map.txt a text file showing 1-indexed PDB numbering
clean.pdb only ATOM lines
hetatm.pdb only HETATM lines, may be split by resName
.fa sequences of all peptides and nucleic acids
subdirectories for each protein model/subunit (similar info)
does not write a text file for the "trailer" (lines after the coordinates)
converts lines (ATOM or HETATM) that can be converted based on <conversion>
(generally) and <na_conversion> (specific for nucleic acids, relevant
because RNA and DNA may require different treatment...)
!!!WARNING!!! defaults:
CSE CYS converts SelenoCysteinE to Cysteine
HYP PRO converts HYdroxylProline to Proline
CYD CYS does NOT convert "CYsteine Disulfides to Cysteine"
HIP HIS converts "HIP" to Histidine (~double protonation)
HID HIS converts "HID" to Histidine (~single delta N proton)
HIE HIS converts "HIE" to Histidine (~single epsilon N proton)
todo:
ensure hetatm conversions step illegal atoms!!!!
alternate conformations
convert DNA to Rosetta DNA
convert ligands to params
convert water to TP3 (or TP5)
"""
# process input, optionally a list
if isinstance( pdb_filename , list ):
print 'Multiple PDB codes detected, processing them individually...'
# use this list comprehension, get them all!
filenames = [process_pdb( i , seqformat , seqformat_extension_map , conversion , na_conversion , na_alphabet , protein_alphabet ) for i in pdb_filename]
print 'Finished the whole list, enjoy!'
return filenames
####################
# NEW DIRECTORY ETC.
# get root name
pdb_filename = os.path.abspath( pdb_filename )
root_name = get_root_filename( pdb_filename )
best_guess = pdb_filename
# make a new directory, a whole lot is gonna go here...
create_directory( root_name , ' to sort the data' )
# move the pdb here
copy_file( pdb_filename , root_name )
# oh, and go there too
original_dir = os.getcwd()
os.chdir( root_name )
# "update" the target
pdb_filename = root_name + '/' + os.path.split( pdb_filename )[-1]
root_name = get_root_filename( pdb_filename )
##############
# PRE CLEANING
# does not need to know if nucleics or not
# convertions!
# ...bad...overwrite the file!...but no filename management
convert_pdb_resnames_to_ATOM_lines( pdb_filename , pdb_filename , root_name +'_conversion_report.txt' , conversion )
# produce a PDB with just the protein lines
best_guess = clean_ATOM_lines_from_pdb( pdb_filename )
# extract numbering
# don't bother storing the map
extract_numbering_map_from_pdb( pdb_filename , 'numbering_map.txt' )
# extract HETATM lines
clean_HETATM_lines_from_pdb( pdb_filename )
# write out alternate conformations for the cleaned file
alternate_conformations = clean_alternate_conformations_from_pdb( best_guess )
##########################
# HEADER PARSING
# extract info from header
# this information is accessible from the PDBParser header...sorta...
# get the number of models
models = extract_number_of_models_from_pdb_header( pdb_filename )
# get the subunit complexes
complexes = extract_duplicate_chains_from_pdb_header( pdb_filename )
# write the header (?)
# get the header
header = extract_header_from_pdb( pdb_filename )
###################
# HUNT DOWN HETATMS
# use the map in the header and extracted chemical formulas to search pubchem
# get map
# per hetatm type
# get formula
# get number of residues -> needed to interpret formula...
# search pubchem, download best sdf if exact match and at least < atoms
# create directory for these params etc.
##########################
# ASSESS NUCLEIC SITUATION
# HERE!
# choose your fate!, removes nucleic lines
has_nucleic = clean_nucleic_acid_lines_from_pdb( pdb_filename )
# get proteins if nucleics
if has_nucleic:
# get a PDB of protein only, use this from now on
print 'Scanners indicate there are nucleic acid lines in ' + os.path.relpath( pdb_filename ) + '\nSadly, a lot of toys do not play well with these so a few extra steps are required...'
# write nucleic sequences
temp , nucleic_types = extract_nucleic_acid_sequences_from_pdb( root_name + '.nucleic.pdb' , seqformat = seqformat , alphabet = na_alphabet , seqformat_extension_map = seqformat_extension_map )
# care not for the sequences
# make a Rosetta ready nucleic PDB!!!
# SO BAD! overwrite!
# BAH!!!
na_chains = split_pdb_into_chains( root_name + '.nucleic.pdb' , 0 , True ) # just 0 model...
for i in na_chains.keys():
# BETTER BE IN BOTH!!!
convert_pdb_resnames_to_ATOM_lines( na_chains[i] , na_chains[i] , 'nucleic_chain_'+ i +'_conversion_report.txt' , na_conversion[nucleic_types[i]] )
# check for protein :)
has_protein = clean_protein_lines_from_pdb( pdb_filename )
if not has_protein:
print 'The additional features are only available for proteins\nScanner indicate that this PDB has ONLY nucleic acids (no proteins) :(\nthe remaining methods rely on the Biopython PDBParser...and things get messy with nucleic acids\nEven so, the only feature you\' missing out on is splitting into subdirectories for each chain, and since the PDB is just nucleic acid, that isn\'t as helpful'
# premature exit
os.chdir( original_dir )
return best_guess
# change the name of the best guess to .protein.pdb
best_guess = root_name + '.protein.pdb'
pdb_filename = root_name + '.protein.pdb'
# get the nucleic chains
nucleic_chains = extract_chains_from_pdb( root_name + '.nucleic.pdb' )
############
# PDB PARSER
# does NOT loop over ANY nucleic acid chains!
# prepare to load...
parser = PDBParser( PERMISSIVE = 1 )
writer = PDBIO()
struct = parser.get_structure( root_name , pdb_filename )
# verify models and chains
temp = len( struct.child_list ) # number of models
if not temp == models:
print 'Huh? the PDB file header claims there are ' + str( models ) + ' models but the PDB file has ' + str( temp ) + ' models...\nUsing the ACTUAL number of models (' + str( temp ) + ')'
models = temp
# check from reading the CHAIN
if not complexes:
print 'No chain/subunit information found in the header (or no header),\nassuming all individual sequences are unique i.e. if AB and copy CD, will make A, B, C, and D instead of AB and CD'
# complexes = temp # unecessary, automatically happens below...
# add all new ids
temp = struct[0].child_dict.keys() # it better have at least 1 model...
# for the nucleic case...
if has_nucleic:
# HERE!
# remove nucleic lines...
for i in xrange( len( complexes ) ):
for j in nucleic_chains:
if j in complexes[i]:
complexes[i] = complexes[i].replace( j ,'' )
# sanity check...
complexes = [i for i in complexes if i]
# assume all models contain all chains...idk how this would ever NOT occur...
# this also produces a directory for EACH chain as the default behavior!!!
complexes += [i for i in temp if i and not i in complexes and not i in nucleic_chains]
else:
# normal protein stuff
complexes += [i for i in temp if i and not i in complexes]
# okay...this should be figured out...but isn't that big of a deal
# found with 1JGO
# print complexes
# print complexes
# complexes = [i for i in complexes if i]
# input('dd')
################################
# CREATE AND FILL SUBDIRECTORIES
# again, this step is skipped for pure nucleic acid...
# exit condition, only 1 model and 1 chain
if models > 1 or len( complexes ) > 1:
# over the models
for model in struct.child_dict.keys():
# over the chains
for complx in complexes:
# print '='*60 + complx
# remove nucleic subunits
# HERE!
if has_nucleic:
for chain in nucleic_chains:
complx = complx.replace( chain , '' ) # delete the chain from the complex
# check that all members are present
chains = struct[model].child_dict.keys()
missing = [l for l in complx if not l in chains]
# report this!
if missing:
# add models bool for str here?
print 'Expected model ' + str( model + 1 ) + ' to have chains ' + complx + ' but the its missing chains ' + ', '.join( missing ) + '!'
# create the new directory
# only number if more than 1 model
dir_name = complx + str( model + 1 )*bool( models - 1 )
new_dir = os.path.split( root_name )[0] + '/' + dir_name
print 'Creating the subdirectory ' + os.path.relpath( new_dir )
os.mkdir( new_dir )
# create a copy of the complex, only the chains of interest
# make an empty structure
temp = Structure( 'temp' )
temp_model = Model( model ) # and an empty model
temp.add( temp_model )
# add the complex
for chain in complx:
temp[model].add( struct[model][chain] )
# get the chain sequence
seqid = dir_name + ('_model_' + str( model + 1 ))*bool( models - 1 ) + '_chain_' + chain
seq_filename = new_dir + '/' + os.path.split( root_name )[-1] + ('_model_' + str( model + 1 ))*bool( models - 1 ) + '_chain_' + chain + '.' + seqformat_extension_map[seqformat]
description = '(from model ' + str( model + 1 ) + ')'
temp_seq = extract_protein_sequence_from_pdb( temp , True , # MUST insert disorder...
seq_filename , seqid , description , model , chain ,
True , seqformat , protein_alphabet , seqformat_extension_map )
# also, make sure at least one copy (from the first model) is in the main dir
seq_filename = root_name + '_chain_' + chain + '.' + seqformat_extension_map[seqformat]
if not os.path.exists( seq_filename ):
print 'Putting a copy of the sequence in the new directory'
# assumes all the models have the same sequence
write_sequence( temp_seq , seq_filename , seqformat ,
os.path.split( root_name )[-1] + ' chain ' + chain ,
description , protein_alphabet , seqformat_extension_map )
# write out the model+chain
writer.set_structure( temp )
print 'Writing a copy of model ' + str( model + 1 ) + ' chain(s) ' + complx + ' to ' + new_dir + '.pdb'
writer.save( new_dir + '/' + dir_name + '.pdb' )#, selection )
# also write a cleaned PDB file, onlt ATOM lines
clean_ATOM_lines_from_pdb( new_dir + '/' + dir_name + '.pdb' )
# also write any alternate conformations
clean_alternate_conformations_from_pdb( new_dir + '/' + dir_name + '.pdb' )
# also get specific HETATMs...this is getting bulky...
clean_HETATM_lines_from_pdb( new_dir + '/' + dir_name + '.pdb' )
# no need to clean DNA
else:
# only 1 model AND only 1 chain
# still write it please :)
model = 0
chain = complexes[0]
# may seem silly, but this edge case will prevent needless re-parsing
# get the chain sequence
seqid = os.path.split( root_name )[-1] + '_chain_' + complexes[0]
extract_protein_sequence_from_pdb( struct , True ,
seqid + '.' + seqformat_extension_map[seqformat] , seqid , '' ,
model , chain , True ,
seqformat = seqformat , alphabet = protein_alphabet , seqformat_extension_map = seqformat_extension_map )
# debug summary...
temp = os.listdir( os.getcwd() )
temp.sort()
print 'New Files in the ' + root_name + ' directory :\n' + '\n'.join( ['\t'+ i for i in temp] )
# return back one directoy
os.chdir( original_dir ) # yeah...its hacky
return best_guess
################################################################################
# HEADER STUFF
# extract header text
def extract_header_from_pdb( pdb_filename , header_filename = 'header.txt' ):
# write the header (?)
# get the header
f = open( pdb_filename , 'r' )
header = ''
while True: # should error from f.next() if improper input...
# next line
line = f.next()
# exit condition
if 'ATOM' == line[:4] or 'MODEL' == line[:5] or 'HETATM' == line[:6]:
break
header += line
f.close()
# write the header
if header_filename:
print 'Writing a copy of the header lines to the file ' + header_filename
f = open( header_filename , 'w' )
f.write( header )
f.close()
return header
# return any predicted shain pairs
def extract_duplicate_chains_from_pdb_header( pdb_filename ):
# load the raw data
f = open( pdb_filename , 'r' )
complexes = []
keep_going = True
while keep_going:
# next line
line = f.next()
# ...think about this...
# check if chain info, extract the matching subunits
if line[:6] == 'COMPND' and 'CHAIN:' in line:
duplicate = line.split( 'CHAIN: ' )[-1].replace( ';' , '' ).strip().split( ', ' ) # ignore ";\n"
if len( duplicate ) > 1:
complexes.append( duplicate )
# stop condition
elif not ('HEADER' in line or 'TITLE' in line or 'COMPND' in line or 'CAVEAT' in line):
keep_going = False
f.close()
# convert complexes
if complexes:
if not sum( [len( c ) - len( complexes[0] ) for c in complexes] ):
# all are the same length
complexes = [''.join( [c[i] for c in complexes] ) for i in xrange( len( complexes[0] ) )]
else:
# uh oh...
# could be all should be unique...which puts us in exception land anyway
# assume that last listed are aberrantly unpaired
lowest = min( [len( c ) for c in complexes] )
temp = [''.join( [c[i] for c in complexes] ) for i in xrange( lowest )]
for c in complexes:
temp += c[lowest:]
complexes = temp
return complexes
# return number of models, scanned from header
def extract_number_of_models_from_pdb_header( pdb_filename ):
# get the number of models
f = open( pdb_filename , 'r' )
models = 1
keep_going = True
while keep_going:
# next line
line = f.next()
# check for models
if line[:6] == 'NUMMDL':
models = int( line.replace( 'NUMMDL' , '' ).strip() )
keep_going = False
elif line[:4] == 'ATOM':
keep_going = False
f.close()
return models
# return resolution, scanned from header
# other information? R-value? R-free?
# other places to extract the quality...?
def extract_resolution_information_from_pdb_header( pdb_filename ):
# load it
f = open( pdb_filename , 'r' )
# ewww....should be a "for" loop that breaks...
keep_going = True
experimental_data = 'X-RAY DIFFRACTION'
resolution = None
while keep_going:
# next line
line = f.next()
# check for models
if line[:6] == 'EXPDTA':
# print 'found exp data'
experimental_data = line[6:].strip()
elif line[:10] == 'REMARK 2':
# check for NMR
# print 'found remark'
# print line
if 'ANGSTROMS' in line:
# print 'found resolution'
resolution = float( line[23:].strip().split( 'ANGSTROMS' )[0].strip() )
keep_going = False
elif line[:4] == 'ATOM':
keep_going = False
f.close()
return resolution , experimental_data
# return number of models, scanned from header
def extract_HETNAM_from_pdb_header( pdb_filename ):
# get the number of models
f = open( pdb_filename , 'r' )
hetname_map = {}
keep_going = True
while keep_going:
# next line
line = f.next()
# check for models
if line[:6] == 'HETNAM':
hetname = line[6:].strip().split( ' ' )
hetkey = hetname[0]
hetname = ''.join( [i + ' ' for i in hetname[1:]] )[:-1]
hetname_map[hetkey] = hetname
elif line[:4] == 'ATOM':
keep_going = False
f.close()
return hetname_map
################################################################################
# DIVIDE AND JOIN
# split or join PDB files
# simple wrapper
def morph_atomName2element( atomName ):
"""
Returns the element in <atomName>
raw PDB atomNames are supposed to have the element as the first character
"""
element = atomName[:2].strip()
# remove number characters
for i in '0123456789':
element = element.replace( i , '' )
return element
# make sure a filename, Structure, or Model returns the Model of interest
# not tested recently...
def load_pdb( pdb , model = 0 ):
"""
Returns the <model> of <pdb> if its a Structure object (or a filename)
"""
# sort the input
if isinstance( pdb , str ):
# filename
print 'Input filename ' + pdb + ', loading the structure now'
parser = PDBParser( PERMISSIVE = 1 )
pdb = parser.get_structure( 'temp' , pdb )
# default to first one if empty...
if not model:
model = pdb.child_dict.keys()[0]
print 'extracting the first model (' + str( model ) + ')'
pdb = pdb[model] # get the first model
# tried doing this a prettier way...
# check for specific methods and data types for clues...
elif isinstance( pdb.child_dict.keys()[0] , int ):
# its a Biopython structure
# default to first one if empty...
if not model:
model = pdb.child_dict.keys()[0]
print 'Input Biopython Structure, extracting the first model (' + str( model ) + ')'
pdb = pdb[model] # get the first model
elif 'child_dict' in dir( pdb ):
# ...could be any number of things...including what we want!
# hooray! everything is okay
None
else:
# not supported!
raise IOError( 'That data structure is not currently supported...' )
return pdb
# check the PDB for models and split into separate PDBs
def split_pdb_into_models( pdb_filename ):
"""
Writes a single PDB file for every model in <pdb_filename>
uses the Biopython PDBParser and PDBIO
"""
# make tools
parser = PDBParser( PERMISSIVE = 1 )
writer = PDBIO()
pdb_filename = os.path.abspath( pdb_filename )
root_name = get_root_filename( pdb_filename )
struct = parser.get_structure( root_name , pdb_filename )
# over the models
for i in struct.child_dict.keys():
# get just the model
temp = Structure( 'temp' )
temp.add( struct[i] )
# write it
writer.set_structure( temp )
out_filename = root_name + '_model_' + str( i + 1 ) + '.pdb'
print 'Model ' + str( i + 1 ) + ' written to ' + out_filename
writer.save( out_filename )
# check the PDB for chains and split into separate PDBs
def split_pdb_into_chains( pdb_filename , model = 0 , export = False ):
"""
Writes a single PDB file for every chain in <pdb_filename>
uses the Biopython PDBParser and PDBIO
"""
# make tools
parser = PDBParser( PERMISSIVE = 1 )
writer = PDBIO()
pdb_filename = os.path.abspath( pdb_filename )
root_name = get_root_filename( pdb_filename )
struct = parser.get_structure( root_name , pdb_filename )
# assume there is only 1 model
# over the chains
chains = {}
for i in struct[model].child_dict.keys():
# get just the model
temp = Structure( 'temp' )
temp_mod = Model( 0 )
temp_mod.add( struct[0][i] )
temp.add( temp_mod )
# write it
writer.set_structure( temp )
out_filename = root_name + '_chain_' + i + '.pdb'
# chains.append( 'Chain ' + i + ' written to ' + out_filename )
chains[i] = out_filename
writer.save( out_filename )
# debug output
for i in chains.keys():
print 'Chain ' + i + ' written to ' + chains[i]
# optionally export
if export:
return chains
# add all files together in the provided order
# not tested recently...
def join_pdb_files( files , out_filename = '' ):
"""
Combines the contents of all <files> and writes it out to <out_filename>
a very simple method
"""
# default filename
out_filename_provided = True
if not out_filename:
out_filename_provided = False
text = ''
for i in files:
# open it
f = open( i , 'r' )
# add the text
text += f.read()
f.close()
# check if the name should be added
if not out_filename_provided:
if '.' in i:
out_filename += i[:i.find( '.' )]
else:
out_filename += i
# write the bastard love child
f = open( out_filename , 'w' )
f.write( text )
f.close()
# extract the chains from the PDB
# only considers ATOM lines, mainly for use with clean_nucleic_acid_lines_from_pdb
def extract_chains_from_pdb( pdb_filename , only = ['ATOM'] ):
"""
Returns the chains found in <pdb_filename>
Only consider lines starting with <only>
"""
pdb_filename = os.path.abspath( pdb_filename )
if os.path.exists( pdb_filename ):
# load the data
f = open( pdb_filename , 'r' )
data = [i for i in f.xreadlines() if i[:6].strip() in only]
f.close()
# find unique chains
chains = []
for i in data:
if not i[21] in chains:
chains.append( i[21] )
return chains
else:
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
# extract the name mapping
def extract_numbering_map_from_pdb( pdb_filename , out_filename = '' , only = ['ATOM'] ):
"""
Returns a map (dict) from residues in <pdb_filename> that are 1-indexed
and a reverse map (dict)
Only consider lines starting with <only>
Optionally write the results to <out_filename>
"""
pdb_filename = os.path.abspath( pdb_filename )
if os.path.exists( pdb_filename ):
# load the raw data
f = open( pdb_filename , 'r' )
d = [i for i in f.xreadlines() if i[:6].strip() in only]
f.close()
# extract dict of pairs
pdb_map = {}
reverse_map = {}
count = 0
text = ''
for i in d:
# basic info
chain = i[21]
resseq = i[22:26].strip()
icode = i[26] # the icode
key = chain + resseq + icode
if not key in pdb_map.keys():
count += 1
pdb_map[key] = count
reverse_map[count] = key
text += key + '\t' + str( count ) + '\n'
# optionally write to file
# no defaulting!
if out_filename:
# default filename
# f = open( get_root_filename( pdb_filename ) + '_PDB_numbering.txt' , 'w' )
# f.write( ''.join( [i +'\t'+ str( pdb_map[i] ) +'\n' for i in pdb_map.keys()] ) )
print 'Writing the PDB numbering of ' + pdb_filename + ' to ' + out_filename
f = open( out_filename , 'w' )
f.write( text )
f.close()
return pdb_map , reverse_map
else:
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
# extract a protein sequence from a PDB
# make this better? specify the chain?
# for now, only works if single chain...
def extract_protein_sequence_from_pdb( pdb , include_breaks = True ,
out_filename = '' , seqid = '' , description = '' ,
model = 0 , chain = 'A' , export = True ,
seqformat = SEQFORMAT , alphabet = ProteinAlphabet ,
seqformat_extension_map = SEQFORMAT_EXTENSION_MAP ):
"""
Returns the protein sequences found in <pdb> in <model>
Optionally <export> the sequence
Optionally write to <out_filename> with <seqid>
note: does NOT scan for protein chains, it only dumps out the full
protein sequence in the PDB file
individual chains can be extracted using process_pdb
"""
# ensure pdb is proper, must be a model
pdb = load_pdb( pdb , model ) # necessary model?
# format the chain input
if not isinstance( chain , list ):
chain = [chain]
# over the desired chains
# ugh...this should all be rewritten...
sequences = []
for c in chain:
# get it
if include_breaks:
# extract the sequence as a Biopython Seq object
# convert the model into a Structure, for getting the sequence
for_seq = Structure( 'temp' )
# ...oh yeah...must be model 0 and up >:[
temp_model = Model( 0 ) # hardcoded...
for_seq.add( temp_model )
# for ch in pdb.child_dict.keys():
# copy it all over directly
for_seq[0].add( pdb[c] )
# gap regions makred as "|"
seq_builder = PPBuilder()
pp = seq_builder.build_peptides( for_seq )
seq = Seq( '|'.join( [str( frag.get_sequence() ) for frag in pp] ) , alphabet )
# for frag in pp:
# seq += frag.get_sequence() + '|' # already a Biopython Seq
seqr = SeqRecord( seq )
seqr.description = description + ' missing residues (gap regions as \"|\")'*( '|' in seq )
else:
# just iterate and extract!
seq = Seq( ''.join( [three2one[i.resname] for i in pdb.get_residues() if i.resname in three2one.keys() and i.get_parent().id == c] ) , alphabet )
seqr = SeqRecord( seq )
seqr.description = description
# prepare to write
# seq = seq[:-1]
# seqr.description = 'missing residues (gap regions as \"|\")'*( '|' in seq ) # no need if no gaps
seqr.id = seqid
sequences.append( seqr )
# optionally write the sequence
if out_filename:
write_sequence( sequences , out_filename , seqformat , alphabet , seqformat_extension_map )
# optionally export the sequence
if export:
return get_sequence( sequences )
# return str( seq )
# extract and write a file from the PDB
def extract_nucleic_acid_sequences_from_pdb( pdb_filename , out_filename = '' , NA = NUCLEIC_SEQUENCE_LETTERS_MAP , DNA = NA_CODES['DNA'] , seqformat = SEQFORMAT , alphabet = DNAAlphabet , seqformat_extension_map = SEQFORMAT_EXTENSION_MAP ):
"""
Returns the protein sequences found in <pdb_filename>
Only consider resNames in <NA>
Optionally write to <out_filename>
"""
pdb_filename = os.path.abspath( pdb_filename )
if os.path.exists( pdb_filename ):
# load the data
f = open( pdb_filename , 'r' )
d = f.readlines()
f.close()
# print about fails/assumptions
print 'Extracting nucleic sequences from ' + os.path.relpath( pdb_filename ) + '\nFor visibility, this method assumes A LOT!\n1. nucleotides are identified by a unique resSeq codes (with a proper resName)\n2. sequences are identified by unique chain IDs\n3. RNA is the default state\n4. DNA is identified by \"DG\" (etc.) OR \"T\" resi codes\n4. All sequences are continuous\n6. All sequences are recorded 5\' -> 3\' (and written to file in this order)'
# check for nucleic lines - No, do while parsing
# extract sequence
NA_keys = NA.keys()
DNA_keys = DNA.keys()
# molecule = 'RNA'
molecule_types = {}
sequences = {}
last = None
for line in d:
# must have C1 and a nucleic resi code to be considered a nucleotide
resname = line[17:20].strip()
resseq = line[22:27].strip() # resseq
if (line[:5] == 'ATOM ' or line[:4] == 'TER ') and resname in NA_keys:# and line[13:16].strip() == 'C1\'':
# only novel lines
if resseq == last:
continue
last = resseq # if the remainder will execute...
# check for DNA
chain = line[21]
if [True for i in DNA_keys if i in resname]:
# its DNA
molecule_types[chain] = 'DNA'
# consider the whole chain DNA if ANY of the exclusive codes are present
# sometimes DNA is abbreviated without the "d" to designate "deoxy"
# remember the letter
if chain in sequences.keys():
# add the letter
sequences[chain] += NA[resname] # map the code
else:
# create it as well
sequences[chain] = NA[resname]
molecule_types[chain] = 'RNA' # default
# default out name
root_filename = get_root_filename( pdb_filename )
if not out_filename:
out_filename = root_filename
# write the sequences
for chain in sequences.keys():
# verify its not just a nucleotide
seq = sequences[chain]
if len( seq ) > 1:
# determine the molecule type
# record a proprt id
seqr = SeqRecord( Seq( seq , alphabet ) ) # even if RNA (?)
seqr.id = os.path.split( root_filename )[-1] + '_chain_' + chain
seqr.description = molecule_types[chain]
# oh yeah, write it, prints out by itself
out_filename = seqr.id + '.' + seqformat_extension_map[seqformat]
write_sequence( seqr , out_filename , seqformat , alphabet , seqformat_extension_map )
return sequences , molecule_types # empty dict will evaluate as false
else:
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
################################################################################
# CLEANING METHODS
# HERE !!!
# a dirty input produces a cleaned output file :)
# default behavior is to produce output
# removes non ATOM lines from <pdb_file> and writes to <out_file>
def clean_ATOM_lines_from_pdb( pdb_filename , out_filename = '' , HETATM_include = [] , excluded_atoms = ['CN'] , accepted_fields = ['ATOM ' , 'TER '] ):
"""
Writes all lines in the PDB file <pdb_filename> beginning with "ATOM" or
"TER" into <out_filename> (defaults to <pdb_file>.clean.pdb)
Optionally include HETATM lines with resNames in <HETATM_include>
Returns True if successful
...pretty much the same as:
grep "ATOM" pdb_filename > out_filename
example:
clean_non_ATOM('1YY9.pdb')
See also:
Pose
Pose.dump_pdb
pose_from_pdb
pose_from_rcsb
"""
# get the file rootname
pdb_filename = os.path.abspath( pdb_filename )
root_filename = get_root_filename( pdb_filename )
if not root_filename: # in case it is improper, no "."
root_filename = pdb_filename
# an optional argument for PDB files not ending in .pdb
# if not edit:
# edit = 255
# if the file exists
if os.path.exists( pdb_filename ):
# find all ATOM and TER lines
f = open( pdb_filename , 'r' )
data = f.readlines()
f.close()
good = []
for i in data:
if [True for j in accepted_fields if i[:len( j )] == j]:
# if i[:5] == 'ATOM ' or i[:4] == 'TER ':
# add your preference rules for ligands, DNA, water, etc.
# check for excluded atoms
if i[12:16].strip() in excluded_atoms:
# skip it, do not add to the list
continue
good.append( i )
elif i[:6] == 'HETATM' and i[17:20] in HETATM_include:
# save for later, more processsing
good.append( i )
# stop condition
if not good:
# tell the user and exit
print 'No ATOM or HETATM lines in ' + os.path.relpath( pdb_filename )
return False
# default output file to <pdb_filename>.clean.pdb
if not out_filename:
out_filename = root_filename + '.clean.pdb'
# write the found lines
print 'if the file ' + os.path.relpath( out_filename ) + ' already exists, it will be overwritten!'
f = open( out_filename , 'w' )
f.writelines( good )
f.close()
print 'PDB file ' + os.path.relpath( pdb_filename ) + ' successfully cleaned, non-ATOM lines removed\nclean data written to ' + os.path.relpath( out_filename )
return out_filename
else:
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
# if you would prefer a simpler call using grep, it looks something like this
# os.system("grep \"ATOM\" %s.pdb > %s.clean.pdb"%(pdb_file[:edit],pdb_file[:edit]))
# split the ATOM lines, only look for DNA lines
def clean_nucleic_acid_lines_from_pdb( pdb_filename , out_filename = '' , NA = NUCLEIC_SEQUENCE_LETTERS_MAP.keys() ):
"""
Scan <pdb_filename> for any nucleic acid lines and writes these to
<out_filename>
defines nucleic acid resNames (three letter codes) as those with
stripped codes in <NA>
default definition of nucleic acid resNames can be adjusted in settings.py
"""
# get the file rootname
pdb_filename = os.path.abspath( pdb_filename )
root_filename = get_root_filename( pdb_filename )
if not root_filename: # in case it is improper, no "."
root_filename = pdb_filename
# if the file exists
if os.path.exists( pdb_filename ):
# find all ATOM and TER lines
f = open( pdb_filename , 'r' )
data = f.readlines()
f.close()
good = []
for i in data:
if (i[:5] == 'ATOM ' or i[:4] == 'TER ') and i[17:20].strip() in NA:
# add your preference rules for ligands, DNA, water, etc.
good.append( i )
# stop condition
if not good:
# tell the user and exit
print 'No nucleic acid lines in ' + os.path.relpath( pdb_filename )
return False
# default output file to <pdb_filename>.clean.pdb
if not out_filename:
out_filename = root_filename + '.nucleic.pdb'
# write the found lines
print 'if the file ' + os.path.relpath( out_filename ) + ' already exists, it will be overwritten!'
f = open( out_filename , 'w' )
f.writelines( good )
f.close()
print 'PDB file ' + os.path.relpath( pdb_filename ) + ' successfully cleaned, DNA/RNA lines extracted\nclean data written to ' + os.path.relpath( out_filename )
return out_filename
else:
print 'No such file or directory named '+ os.path.relpath( pdb_filename )
return False
# split the ATOM lines, only look for not RNA/DNA lines
def clean_protein_lines_from_pdb( pdb_filename , out_filename = '' , NA = NUCLEIC_SEQUENCE_LETTERS_MAP.keys() ):
"""
Scan <pdb_filename> for any nucleic acid lines and writes all "ATOM" lines
that are NOt nucleic acids to <out_filename>
defines nucleic acid resNames (three letter codes) as those with
stripped codes in <NA>
default definition of nucleic acid resNames can be adjusted in settings.py
"""
# get the file rootname
pdb_filename = os.path.abspath( pdb_filename )
root_filename = get_root_filename( pdb_filename )
if not root_filename: # in case it is improper, no "."
root_filename = pdb_filename
# if the file exists
if os.path.exists( pdb_filename ):
# find all ATOM and TER lines
f = open( pdb_filename , 'r' )
data = f.readlines()
f.close()
good = []
for i in data:
if (i[:5] == 'ATOM ' or i[:4] == 'TER ') and not i[17:20].strip() in NA:
# add your preference rules for ligands, DNA, water, etc.
good.append( i )
# stop condition
if not good:
# tell the user and exit
print 'No protein lines in ' + os.path.relpath( pdb_filename )
return False
# default output file to <pdb_filename>.clean.pdb
if not out_filename:
out_filename = root_filename + '.protein.pdb'
# write the found lines
print 'if the file ' + os.path.relpath( out_filename ) + ' already exists, it will be overwritten!'
f = open( out_filename , 'w' )
f.writelines( good )
f.close()
print 'PDB file ' + os.path.relpath( pdb_filename ) + ' successfully cleaned, protein lines extracted\nclean data written to ' + os.path.relpath( out_filename )
return True
else:
print 'No such file or directory named '+ os.path.relpath( pdb_filename )
return False
# scan for HETATMs, rewrite without all these lines, record specific ones
def clean_HETATM_lines_from_pdb( pdb_filename , out_filename = '' , only = '' , write_unique = True ):
"""
Writes all lines in the PDB file <pdb_filename> beginning with "HETATM"
into <out_filename> (defaults to <pdb_filename>.hetatm.pdb)
Optionally write PDB files for all unique residue type codes in the HETATM
lines if <write_unique> is True (default True)
OR
Writes all lines in the PDB file <pdb_filename> beginning with "HETATM"
AND with the resName <only>
Returns True if successful
"""
# get the file rootname
pdb_filename = os.path.abspath( pdb_filename )
root_filename = get_root_filename( pdb_filename )
if not root_filename: # in case it is improper, no "."
root_filename = pdb_filename
# if the file exists
if os.path.exists( pdb_filename ):
# find all HETATM
f = open( pdb_filename , 'r' )
data = f.readlines()
f.close()
good = []
unique = []
for i in data:
resn = i[17:20].strip()
if i[:6] == 'HETATM' and (not only or resn in only):
# save for later, more processsing
good.append( i )
# look for unique resn names
if not only and not resn in unique:
unique.append( resn )
# stop condition
if not good:
# tell the user and exit
print 'No HETATM lines in ' + os.path.relpath( pdb_filename )
return False
# default output file to <pdb_filename>.clean.pdb
if not out_filename:
if not only:
out_filename = root_filename + '.hetatm.pdb'
elif only in WATER_CONVERSION.keys(): # just waters...
out_filename = root_filename.replace( '.hetatm' , '' ) + '.waters.pdb'
else:
# its anything else, name based on the code
out_filename = root_filename.replace( '.hetatm' , '' ) + '.' + only + '.pdb'
# write the found lines
print 'if the file ' + os.path.relpath( out_filename ) + ' already exists, it will be overwritten!'
f = open( out_filename , 'w' )
f.writelines( good )
f.close()
# change this!
if not only:
print 'PDB ' + os.path.relpath( pdb_filename ) + ' successfully cleaned, non-HETATM lines removed\nclean data written to ' + os.path.relpath( out_filename )
else:
print 'All ' + only + ' lines in PDB file ' + os.path.relpath( pdb_filename ) + ' written to ' + os.path.relpath( out_filename )
# optionally redo for all unique members
if not only and write_unique:
if len( unique ) > 1:
# do them all
# for resn in unique:
# clean_HETATM_lines_from_pdb( out_filename , '' , resn )
unique_filenames = [clean_HETATM_lines_from_pdb( out_filename , '' , resn ) for resn in unique]
return out_filename , unique_filenames
else:
# only 1 HETATM type...
unique = unique[0]
print 'Only 1 type of HETATM found, ' + unique
if unique in WATER_CONVERSION.keys():
unique = 'waters'
# print 'Renaming ' + root_filename + '.hetatm.pdb to ' + root_filename + '.' + unique + '.pdb'
# shutil.move( root_filename + '.hetatm.pdb' , root_filename + '.' + unique + '.pdb' )
temp = root_filename + '.' + unique + '.pdb'
print 'Renaming ' + os.path.relpath( out_filename ) + ' to ' + os.path.relpath( temp )
shutil.move( out_filename , temp )
out_filename = temp
return out_filename
else:
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
# scan for alternate location fields
def clean_alternate_conformations_from_pdb( pdb_filename , remove_identifier = True ):
"""
Writes PDB files for each of the alternate conformations found in
<pdb_filename>
"""
# get the file rootname
pdb_filename = os.path.abspath( pdb_filename )
root_filename = get_root_filename( pdb_filename )
if not root_filename: # in case it is improper, no "."
root_filename = pdb_filename
# verify it exists
if not os.path.exists( pdb_filename ):
# for pipelines etc.
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
# find all alternate conformations
f = open( pdb_filename , 'r' )
lines = f.readlines()
f.close()
# for storage
non_alternating = ['']
alternate_conformations = []
last_line_alternate = False
index = 0
alternate_index = -1
conformation_names = []
resis = set()
for i in lines:
# skip non ATOM lines...fix this later to support header?
if not i[:6].strip() in ['ATOM' , 'HETATM']:
last_line_alternate = False
continue
# sort it
if i[16].strip():
conformation = i[16]
resis.add( i[21] +':'+ i[22:27].strip() )
# optionally remove the alternate conformation identifier
if remove_identifier:
i = i[:16] + ' ' + i[17:]
# did we just transition into an alt conf region?
if last_line_alternate:
# still in the same region
if not conformation in alternate_conformations[alternate_index].keys():
alternate_conformations[alternate_index][conformation] = i
if not conformation in conformation_names:
conformation_names.append( conformation )
else:
alternate_conformations[alternate_index][conformation] += i
else:
# in a new region
# if alternate_conformations:
# conformation_names = list( set( conformation_names + alternations_conformations[-1].keys() ) )
# number_of_conformations = max( number_of_conformations , len( alternate_conformations[-1].keys() ) )
alternate_index += 1
alternate_conformations.append( {conformation : i} )
if not conformation in conformation_names:
conformation_names.append( conformation )
last_line_alternate = True
else:
# did we just transition into an alt conf region?
if last_line_alternate:
# entered a new region
index += 1
non_alternating.append( i )
else:
# in the same region
non_alternating[index] += i
last_line_alternate = False
# exit condition
conformation_names.sort() # intuitive order...
if not conformation_names:
print 'No alternate conformations detected (17th column)'
return False
else:
print 'found ' + str( len( conformation_names ) ) + ' alternate conformations: ' + ', '.join( conformation_names )
print 'alternate locations found for residues: ' + ', '.join( list( resis ) )
# print index , alternate_index , number_of_conformations
# write out the alternate conformations
conformation_filenames = []
for i in conformation_names:
# make a text by building from fragments
text = ''
for j in xrange( len( non_alternating ) - 2 ):
text += non_alternating[j]
if i in alternate_conformations[j].keys():
text += alternate_conformations[j][i]
else:
# default to the "first" alt conf ID
key = 0
while not conformation_names[key] in alternate_conformations[j].keys():
key += 1
key = conformation_names[key]
text += alternate_conformations[j][key]
# add edge case
text += non_alternating[-1]
# write the file
out_filename = root_filename + '_conformation_' + i +'.pdb'
print 'writing conformation ' + i + ' out to ' + os.path.relpath( out_filename ) + ' ...'
f = open( out_filename , 'w' )
f.write( text )
f.close()
conformation_filenames.append( out_filename )
return conformation_filenames
################################################################################
# CONVERTERS
# rewrite the hetatm lines in the pdb file
def convert_pdb_resnames_to_ATOM_lines( hetatm_pdb_filename , out_filename = '' , report_filename = '' , conversion = three2three ):
"""
Rewrites all HETATM lines in <hetatm_pdb_filename> found as keys in
the dict <conversion> and replaces them with their values
also rewrites the "HETATM" record as "ATOM "
used to convert HETATM lines that are proxies for amino acids
"""
hetatm_pdb_filename = os.path.abspath( hetatm_pdb_filename )
# handle defaults
if not out_filename:
# override
print 'no output filename provided, overwriting ' + hetatm_pdb_filename
out_filename = hetatm_pdb_filename
# make sure it exists
if os.path.isfile( hetatm_pdb_filename ):
# load in the lines
f = open( hetatm_pdb_filename , 'r' )
d = f.readlines()
f.close()
# change to the desired format
converted = []
for line in xrange( len( d ) ):
record = d[line][:6].strip()
resname = d[line][17:20].strip()
# go ahead and just rewrite
if record in ['ATOM' , 'HETATM'] and not resname in one2three.values() and resname in conversion.keys():
new = conversion[resname]
d[line] = d[line][:17] + new.rjust(3) + d[line][20:]
# for records...
temp = resname + ' lines converted to ' + new
if not temp in converted:
converted.append( temp )
# check the record...all to ATOM
if record == 'HETATM':
d[line] = 'ATOM '+ d[line][6:]
# debug output
if converted:
converted = '\n'.join( converted )
print converted
if report_filename:
print 'summary of converted lines written to ' + report_filename
f = open( report_filename , 'w' )
f.write( converted )
f.close()
# write it back
f = open( out_filename , 'w' )
f.writelines( d )
f.close()
else:
print 'No such file named ' + os.path.relpath( hetatm_pdb_filename )
return False
# useful?
# rewrite the water lines in the pdb file to the standard...from settings?
def convert_water_containing_pdb( hetatm_pdb_filename , conversion = WATER_CONVERSION ):
"""
Rewrites all HETATM "water" lines in <hetatm_pdb_filename> to resNames
based on <conversion>
adjust the definition of water (<look_for>) and what to switch to in
settings.py
not currently used...
"""
hetatm_pdb_filename = os.path.abspath( hetatm_pdb_filename )
if os.path.isfile( hetatm_pdb_filename ):
# load in the lines
f = open( hetatm_pdb_filename , 'r' )
d = f.readlines()
f.close()
# change to the desired format
for line in xrange( len( d ) ):
resname = d[line][17:20]
if resname.strip() in WATER_CONVERSION.keys():
d[line] = d[line][:17] + WATER_CONVERSION[resname].rjust(3) + d[line][20:]
# write it back...bad!
f = open( hetatm_pdb_filename , 'w' )
f.writelines( d )
f.close()
else:
print 'No such file named ' + os.path.relpath( hetatm_pdb_filename )
return False
# removes lines from <pdb_file> and writes to <out_file> ending in new
def clean_ATOM_non_new_lines_from_pdb( pdb_filename , out_filename = '' ):
"""
Write all lines in the PDB file <pdb_filename> as long as the last three
characters on the line aren't "new"
used to clean Hydrogens added using Reduce
"""
# get the file rootname
pdb_filename = os.path.abspath( pdb_filename )
root_filename = get_root_filename( pdb_filename )
if not root_filename: # in case it is improper, no "."
root_filename = pdb_filename
# an optional argument for PDB files not ending in .pdb
# if not edit:
# edit = 255
# if the file exists
if os.path.exists( pdb_filename ):
# find all ATOM and TER lines
f = open( pdb_filename , 'r' )
data = f.readlines()
f.close()
good = []
for i in data:
if (i[:5] == 'ATOM ' or i[:4] == 'TER ') and not i.strip()[-3:] == 'new' and i[17:20] in one2three.values():
good.append( i )
# stop condition
if not good:
# tell the user and exit
print 'No ATOM non-new lines in ' + os.path.relpath( pdb_filename )
return False
# default output file to <pdb_filename>.clean.pdb
if not out_filename:
out_filename = root_filename + '.non_new.pdb'
# write the found lines
print 'if the file ' + os.path.relpath( out_filename ) + ' already exists, it will be overwritten!'
f = open( out_filename , 'w' )
f.writelines( good )
f.close()
print 'PDB file ' + os.path.relpath( pdb_filename ) + ' successfully cleaned, non-ATOM lines lacking \"new\" removed\nclean data written to ' + os.path.relpath( out_filename )
return out_filename
else:
print 'No such file or directory named ' + os.path.relpath( pdb_filename )
return False
################################################################################
# MAIN
if __name__ == '__main__':
# parser object for managing input options
parser = optparse.OptionParser()
# essential data
parser.add_option( '-p' , dest = 'pdb_filename' ,
default = '' ,
help = 'the pdb filename to process' )
parser.add_option( '-f' , dest = 'seqformat' ,
default = SEQFORMAT ,
help = 'sequence file format, based on settings (!) and Biopython' )
# the other options for the method...are for interactive use
# hard to manipulate from the commandline...
(options,args) = parser.parse_args()
# check inputs
# no edits/modifications
# kinda silly, but I do this as "my style", easy to modify cleanly
pdb_filename = options.pdb_filename
seqformat = options.seqformat
process_pdb( pdb_filename , seqformat )
################################################################################
################################################################################
# UNFINISHED!!!
# scan for repeated chains and delete them, rewrite it
def clean_redundancy_from_pdb( in_filename , out_filename = '' ):
"""
Not currently supported
"""
print 'This is not currently supported sorry...\nIt should look for redundant copies...though it seems the best way to do this is to read directly from the header...but...even for the same sequence, the PDB file may have slightly different coordinates..so how to choose?\nUse process_pdb instead, a separate method is not supported because of this choice problem'
# rewrite a dna or rna pdb to be rosetta friendly
def convert_nucleic_acids_for_rosetta( nucleic_pdb_filename ):
"""
Not currently supported
"""
print '...still researching...for whatever reason, most DNA PDB coordinates are accepted in Rosetta (and thus do not crash PyRosetta) however, I cannot get RNA loading to work no matter what (!!??!)\nthey can be ~loaded by modifying the database, but this does not seem to actually do anything, although...make_pose_from_sequence can then make RNA polymers, generating a nonstandard ResidueSet also does not work...perhaps cause the lines are ATOM?'
"""
deprecated stuff...just in case...
# f = open( pdb_filename , 'r' )
# complexes = []
# keep_going = True
# while keep_going:
# next line
# line = f.next()
# ...think about this...
# check if chain info, extract the matching subunits
# if 'CHAIN:' in line:
# dupl = line.split( 'CHAIN: ' )[-1].replace( ';' , '' ).strip().split( ', ' ) # ignore ";\n"
# if len( dupl ) > 1:
# complexes.append( dupl )
# stop condition
# elif not ('HEADER' in line or 'TITLE' in line or 'COMPND' in line):
# keep_going = False
# f.close()
# convert complexes
# if complexes:
# if not sum( [len( c ) - len( complexes[0] ) for c in complexes] ):
# all are the same length
# complexes = [''.join( [c[i] for c in complexes] ) for i in xrange( len( complexes[0] ) )]
# else:
# uh oh...
# could be all should be unique...which puts us in exception land anyway
# assume that last listed are aberrantly unpaired
# lowest = min( [len( c ) for c in complexes] )
# temp = [''.join( [c[i] for c in complexes] ) for i in xrange( lowest )]
# for c in complexes:
# temp += c[lowest:]
# complexes = temp
# shutil.copy( seq_filename , seqid2 )
# extract_protein_sequence_from_pdb( temp , '' + seqid + '.fa' , seqid , model , chain )
# so...this Biopython object just doesn't work...
# make a new selection
# selection = Select()
# selection.accept_model( i )
# for l in c:
# selection.accept_chain( l )
# return the filename of the "best" PDB made for Rosetta
# also return PDB numbering map?
# if has_nucleic:
# pdb_filename = root_name + '/' + pdb_filename
# else:
# pdb_filename = root_name + '/' + root_name + '.clean.pdb'
# extract numbering of the best
# pdb_map , reverse_map = extract_numbering_map_from_pdb( pdb_filename , pdb_filename[:-4] + '_numbering_map.txt' )
# uh oh, bug...dont wanna fix now
# until this is proper, leave this to Rosetta...
# return pdb_filename #, pdb_map
# if not chain:
# chain = pdb.child_dict.keys()[0]
# copy the chain
# conver the model into a Structure, for getting the sequence
# for_seq = Structure( 'temp' )
# ...oh yeah...must be model 0 and up >:[
# temp_model = Model( 0 )
# for_seq.add( temp_model )
# for ch in pdb.child_dict.keys():
# copy it all over directly
# for_seq[0].add( pdb[ch] )
# extract the sequence as a Biopython Seq object
# gap regions makred as "|"
# seq_builder = PPBuilder()
# pp = seq_builder.build_peptides( for_seq )
# seq = Seq( '' , ProteinAlphabet )
# for frag in pp:
# seq += frag.get_sequence() + '|' # already a Biopython Seq
# or...just do this...
# from making sequences for subdirectories...
# temp_seq = SeqRecord( Seq( temp_seq , protein_alphabet ) )
# temp_seq.id = os.path.split( root_name )[-1] + ' chain ' + chain
# temp_seq.description = '(from model ' + str( model + 1 ) + ')'
"""
| gpl-2.0 | 6,684,947,784,690,174,000 | 40.890396 | 461 | 0.522973 | false |
better-dem/portal | core/models.py | 1 | 13724 | from __future__ import unicode_literals
from django.contrib.gis.db import models
from django.db import transaction
from django.db.models.signals import post_save, m2m_changed
from django.contrib.auth.models import User, Permission
from django.contrib.sessions.models import Session
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.apps import apps
import sys, random, string
from django.core.exceptions import ValidationError
from django.db.models import Transform
### Start functions for accessing particpation app API
def get_core_app():
all_apps = apps.get_app_configs()
return [a for a in all_apps if a.name=="core" and len(get_app_project_models(a))==1][0]
def get_registered_participation_apps():
all_apps = apps.get_app_configs()
participation_apps = [a for a in all_apps if not a.name=="core" and len(get_app_project_models(a))>=1]
return participation_apps
def get_app_project_models(app):
return [m for m in app.get_models() if issubclass(m, ParticipationProject)]
def get_app_item_models(app):
return [m for m in app.get_models() if issubclass(m, ParticipationItem)]
def get_app_by_name(name):
all_apps = apps.get_app_configs()
participation_apps = [a for a in all_apps if a.name==name and len(get_app_project_models(a))>=1]
return participation_apps[0]
def get_app_for_model(model):
apps = get_registered_participation_apps()
for a in apps:
if model in a.get_models():
return a
def get_item_subclass_test(app):
item_models = get_app_item_models(app)
if len(item_models) == 0:
raise Exception("app"+app.name+" has no Participation Item models")
subclass_names = [m.__name__.lower().replace("_","") for m in item_models]
def test(x):
for s in subclass_names:
try:
return getattr(x, s)
except:
continue
raise Exception()
return test
def get_project_subclass_test(app):
project_models = get_app_project_models(app)
if len(project_models) == 0:
raise Exception("app"+app.name+" has no Participation Project models")
subclass_names = [m.__name__.lower().replace("_","") for m in project_models]
def test(x):
for s in subclass_names:
try:
return getattr(x, s)
except:
continue
raise Exception()
return test
def get_provider_permission(app):
project_model = get_app_project_models(app)[0]
model_name = project_model.__name__.lower().replace("_","")
app_name = app.name.lower()
content_type = ContentType.objects.get(app_label=app_name, model=model_name)
perm = Permission.objects.get(content_type=content_type, codename__startswith="add_")
return perm
def get_default_user():
return User.objects.get_or_create(
username = "default",
defaults = {"email":"[email protected]",
"password": ''.join(random.choice(string.ascii_uppercase + string.digits) for i in range(20)),
"last_login": "2016-12-15T15:53:48.874Z",
"is_superuser": False,
"first_name": "",
"last_name": "",
"is_staff": False,
"is_active": True,
"date_joined": "2016-11-09T22:26:10.731Z",
})[0]
def get_usa():
return GeoTag.objects.get_or_create(name="United States of America", defaults={"detail": "North America", "feature_type": "CO"})[0]
def get_tag_category(t):
try:
m=t.geotag
return "Location"
except:
return "Topic"
def get_task_for_job_state(ljs):
app = get_app_by_name(ljs.app_name)
return app.get_task(ljs.name)
def get_overviews():
ans = []
for app in get_registered_participation_apps():
for index, description in getattr(app, "overviews", []):
ans.append(["/apps/{}/overview/{}".format(app.name, index), description])
return ans
### Start core models
class ParticipationProject(models.Model):
name = models.CharField(max_length = 500)
owner_profile = models.ForeignKey('UserProfile', on_delete=models.CASCADE)
is_active = models.BooleanField(default=True)
group = models.ForeignKey('UserGroup', blank=True, null=True)
def update_items(self):
raise Exception("Please overwrite the update_items() method for your participation app")
def get_inherited_instance(self):
ans = self
for t in [get_project_subclass_test(app) for app in get_registered_participation_apps()]:
try:
ans = t(self)
except:
continue
else:
return ans
raise Exception("unknown subclass type")
def delete_project_link(self):
app = get_app_for_model(self.get_inherited_instance().__class__)
return "/apps/"+app.label+"/delete_project/"+str(self.id)
def edit_project_link(self):
app = get_app_for_model(self.get_inherited_instance().__class__)
if app.are_projects_editable:
return "/apps/"+app.label+"/edit_project/"+str(self.id)
class ParticipationItem(models.Model):
name = models.CharField(max_length = 500)
creation_time = models.DateTimeField(auto_now_add=True)
participation_project = models.ForeignKey('ParticipationProject', on_delete=models.CASCADE)
display_image_file = models.FilePathField(max_length=500, blank=True)
visits = models.IntegerField(default=0)
tags = models.ManyToManyField('Tag')
is_active = models.BooleanField(default=True)
def participate_link(self):
app = get_app_for_model(self.get_inherited_instance().__class__)
return "/apps/"+app.label+"/participate/"+str(self.id)
def get_inherited_instance(self):
ans = self
for t in [get_item_subclass_test(app) for app in get_registered_participation_apps()]:
try:
ans = t(self)
except:
continue
else:
return ans
raise Exception("unknown subclass type")
def set_relavent_tags(self):
raise Exception("set_relevant_tags() method needs to be implemented by all ParticipationItem subclasses.")
def get_inline_display(self):
return self.name
def set_display_image(self):
"""
Overwrite this function with app-specific method
for setting an item's display image.
It can take a long time, since it will be run by workers, not by web server
"""
return None
class UserProfile(models.Model):
user = models.OneToOneField(User)
tags = models.ManyToManyField('Tag')
TEACHER = "Teacher"
STUDENT = "Student"
JOURNALIST = "Journalist"
OC = "Ordinary Citizen"
role = models.CharField(max_length=50, choices= ((i,i) for i in [TEACHER, STUDENT, JOURNALIST, OC]), default=OC)
bookmarks = models.ManyToManyField(ParticipationItem)
class UserGroup(models.Model):
name = models.CharField(max_length=200)
owner = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
COURSE = "Course"
group_type = models.CharField(max_length=50, choices= ((i,i) for i in [COURSE]), default=COURSE)
max_invitations = models.PositiveIntegerField(default=25)
class GroupMembership(models.Model):
"""
This class is to support group membership and related flows
some examples:
- a group moderator should be able to invite people by entering a list of emails
- a member should be able to claim an invitation by using a code even if they aren't registered with the invitation email
- a registered user should be able to claim an invitation without the code if their email matches
"""
group = models.ForeignKey(UserGroup, on_delete=models.CASCADE)
member = models.ForeignKey(UserProfile, on_delete=models.CASCADE, blank=True, null=True)
member_name = models.CharField(max_length=100) # the name used by the group admin to identify this member
invitation_code = models.CharField(max_length=100, blank=True, null=True)
invitation_email = models.EmailField(blank=True, null=True)
def save(self, *args, **kwargs):
if bool(self.invitation_code) == bool(self.member):
raise ValidationError('Exactly one of (member, invitation_code) is required.')
return super(GroupMembership, self).save(*args, **kwargs)
class LongJobState(models.Model):
"""
A model to track an app's state relative to the periodic long background jobs it has to perform.
For example, scraping / crawling jobs or pulling from APIs .
This is used to allow the portal to schedule these jobs while leaving an workers available for basic site functionality.
"""
app_name = models.TextField(max_length = 300) # app name
name = models.TextField(max_length = 100) # a unique-per-app
job_period = models.IntegerField(default=60*60*24) # in seconds, default is one day. Jobs are prioritized based on how far past due they are
job_timeout = models.IntegerField() # in seconds
most_recent_update = models.DateTimeField()
class Meta:
unique_together=(("app_name", "name"),)
class Donation(models.Model):
userprofile = models.ForeignKey(UserProfile, blank=True, null=True, on_delete = models.SET_NULL)
amount = models.FloatField()
is_recurring = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True)
stripe_customer_id = models.CharField(max_length=100)
stripe_full_response = models.TextField() # json object returned by stripe
class Tag(models.Model):
name = models.CharField(max_length = 300)
# ex: county + state + country name if the tag is a city
detail = models.CharField(max_length = 300, blank=True, null=True)
class Meta:
unique_together = (("name", "detail"),)
def get_name(self):
if not self.detail is None:
return self.name + ", " + self.detail
return self.name
class GeoTag(Tag):
"""
A tag which refers to a physical location / region
"""
polygon = models.PolygonField(geography = True, blank=True, null=True)
polygon_area = models.FloatField(blank = True, null=True)
point = models.PointField(geography = True, blank=True, null=True)
population = models.PositiveIntegerField(blank=True, null=True)
COUNTRY="CO"
STATE_OR_PROVINCE="SP"
CITY="CI"
OTHER="OT"
UNKNOWN="UN"
FEATURE_TYPE_CHOICES = ((COUNTRY, "Country"),(STATE_OR_PROVINCE, "State or Province"),(CITY, "City or town"),(OTHER, "Other"),(UNKNOWN, "Unknown"))
feature_type = models.CharField(max_length=2, choices=FEATURE_TYPE_CHOICES, default=UNKNOWN)
class ReferenceDocument(models.Model):
"""
A reference to some document, such as a bill, a budget document, a report, a ruling, etc.
"""
name = models.CharField(max_length = 500)
url = models.URLField()
mimetype = models.CharField(max_length = 50, default="text/html")
first_paragraph = models.TextField(blank=True, null=True)
external_api = models.CharField(max_length = 100, blank=True, null=True) # if the document is pulled from an external API, this shows the API's name
external_id = models.CharField(max_length = 100, blank=True, null=True, db_index=True) # a field used for identification from some other API
class Event(models.Model):
"""
Event model used to record the context for various events such as key performance indicators
(issue reports, donate button clicks)
"""
user_profile = models.ForeignKey(UserProfile)
ip_addr = models.CharField(max_length = 100, blank=True, null=True)
referring_url = models.CharField(max_length = 500, blank=True, null=True)
path = models.CharField(max_length=500, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
class IssueReport(models.Model):
event = models.ForeignKey(Event)
title = models.CharField(max_length = 100)
description = models.TextField()
issue_type = models.CharField(max_length=2, choices=(("PC", "Propaganda, campaigning, or biased content"), ("BR", "Bug or website error"), ("IA", "Inaccurate content"), ("FR", "Request a feature"), ))
def validate_shortcut_string(s):
if "/" in s or "." in s:
raise ValidationError("shortcut cannot contain slashes or periods")
if not all([char.isalpha() or char.isdigit() or char in ["_", "-"] for char in s]):
raise ValidationError("shortcuts must be letters, numbers, underscores or dashes")
if not s.lower() == s:
raise ValidationError("shortcut must be all lowercase")
class Shortcut(models.Model):
shortcut_string = models.CharField(max_length=500, unique=True, validators=[validate_shortcut_string])
target_item = models.ForeignKey(ParticipationItem)
### Signal handling
def create_user_profile(sender, instance, created, **kwargs):
if created:
new_profile = UserProfile()
new_profile.user = instance
new_profile.save()
new_profile.tags.add(get_usa())
post_save.connect(create_user_profile, sender=User)
def process_new_item(sender, instance, created, **kwargs):
if created:
# process display image
instance.set_display_image()
instance.save()
# tag the item
instance.set_relevant_tags()
def register_participation_item_subclass(cls):
post_save.connect(process_new_item, sender=cls)
### Custom sql
class AbsoluteValue(Transform):
lookup_name = 'abs'
function = 'ABS'
models.FloatField.register_lookup(AbsoluteValue)
| agpl-3.0 | 2,237,050,102,311,999,200 | 39.011662 | 204 | 0.666205 | false |
jordifierro/abidria-api | experiences/entities.py | 1 | 2739 | class Experience:
def __init__(self, title, description, author_id,
author_username=None, id=None, picture=None, is_mine=False, is_saved=False):
self._id = id
self._title = title
self._description = description
self._picture = picture
self._author_id = author_id
self._author_username = author_username
self._is_mine = is_mine
self._is_saved = is_saved
@property
def id(self):
return self._id
@property
def title(self):
return self._title
@property
def description(self):
return self._description
@property
def picture(self):
return self._picture
@property
def author_id(self):
return self._author_id
@property
def author_username(self):
return self._author_username
@property
def is_mine(self):
return self._is_mine
@property
def is_saved(self):
return self._is_saved
def builder(self):
return Experience.Builder(self)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class Builder:
def __init__(self, experience):
self._id = experience.id
self._title = experience.title
self._description = experience.description
self._picture = experience.picture
self._author_id = experience.author_id
self._author_username = experience.author_username
self._is_mine = experience.is_mine
self._is_saved = experience.is_saved
def id(self, id):
self._id = id
return self
def title(self, title):
self._title = title
return self
def description(self, description):
self._description = description
return self
def picture(self, picture):
self._picture = picture
return self
def author_id(self, author_id):
self._author_id = author_id
return self
def author_username(self, author_username):
self._author_username = author_username
return self
def is_mine(self, is_mine):
self._is_mine = is_mine
return self
def is_saved(self, is_saved):
self._is_saved = is_saved
return self
def build(self):
return Experience(id=self._id, title=self._title, description=self._description,
picture=self._picture, author_id=self._author_id,
author_username=self._author_username, is_mine=self._is_mine,
is_saved=self._is_saved)
| mit | -958,076,121,102,915,600 | 26.39 | 93 | 0.551661 | false |
redCOR-Public/PiBot-A | robot-oa.py | 1 | 1993 | #!/usr/bin/python
# ========================================================
# Python script for PiBot-A: obstacle avoidance
# Version 1.0 - by Thomas Schoch - www.retas.de
# ========================================================
from __future__ import print_function #+# NUR WENN PRINT!
from pololu_drv8835_rpi import motors, MAX_SPEED
from time import sleep
import RPi.GPIO as GPIO
# Signal handler for SIGTERM
import signal, sys
def sigterm_handler(signal, frame):
motors.setSpeeds(0, 0)
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
# GPIO pins of sensors
GPIO.setmode(GPIO.BCM)
GPIO_right = 21
GPIO_middle = 26
GPIO_left = 20
# Configure sensors as input
GPIO.setup(GPIO_right, GPIO.IN)
GPIO.setup(GPIO_middle, GPIO.IN)
GPIO.setup(GPIO_left, GPIO.IN)
try:
# Start moving forward
motors.setSpeeds(MAX_SPEED, MAX_SPEED)
while True: # Main loop
# Read sensor input (positive logic)
INPUT_right = not GPIO.input(GPIO_right)
INPUT_middle = not GPIO.input(GPIO_middle)
INPUT_left = not GPIO.input(GPIO_left)
# Set motor speeds dependent on sensor input
if INPUT_left and INPUT_right:
# Obstacle immediately ahead: move a bit bwrd,
# turn left a little bit and then proceed fwrd
motors.setSpeeds(-200, -200)
sleep (1)
motors.setSpeeds(-200, 200)
sleep (0.3)
motors.setSpeeds(MAX_SPEED, MAX_SPEED)
elif INPUT_middle: # turn left
motors.setSpeeds(100, MAX_SPEED)
elif INPUT_left: # turn right
motors.setSpeeds(MAX_SPEED, 200)
elif INPUT_right: # turn left
motors.setSpeeds(200, MAX_SPEED)
else:
# No sensor input: drive forward
motors.setSpeeds(MAX_SPEED, MAX_SPEED)
# Repeat this loop every 0.1 seconds
sleep (0.1)
finally:
# Stop motors in case of <Ctrl-C> or SIGTERM:
motors.setSpeeds(0, 0)
| mit | 6,732,276,785,605,981,000 | 28.308824 | 58 | 0.601606 | false |
mattvonrocketstein/smash | smashlib/plugins/cmd_env.py | 1 | 1353 | """ smashlib.plugins.env_command
"""
import os
from smashlib import get_smash
from smashlib.plugins import Plugin
from smashlib.patches.base import PatchMagic
from smashlib.completion import smash_env_complete
env_completer = lambda himself, event: smash_env_complete(event.symbol)
env_regex = r'env [A-Za-z0-9_]+$'
class PatchEnv(PatchMagic):
"""
Patches builtin "env" command to add support for wildcard queries.
Example:
smash$ env XTERM*
{ 'XTERM_LOCALE': 'en_US.UTF-8',
'XTERM_SHELL': '/bin/bash',
'XTERM_VERSION': 'XTerm(297)' }
"""
name = 'env'
def __call__(self, parameter_s=''):
split = '=' if '=' in parameter_s else ' '
bits = parameter_s.split(split)
if len(bits) == 1 and bits[0]:
varname = bits[0]
if varname[-1].endswith('*'):
return dict([[k, v] for k, v in os.environ.items()
if k.startswith(varname[:-1])])
return self.original(parameter_s)
class EnvCommand(Plugin):
verbose = True
def init(self):
self.contribute_patch(PatchEnv)
self.contribute_completer(
env_regex, env_completer)
def load_ipython_extension(ip):
""" called by %load_ext magic"""
return EnvCommand(get_ipython()).install()
| mit | -4,364,809,888,031,154,000 | 25.019231 | 74 | 0.589061 | false |
teknologkoren/teknologkoren-se | teknologkoren_se/util.py | 1 | 2092 | from urllib.parse import urlparse, urljoin
from flask import g, request, session, url_for
from teknologkoren_se import app
def paginate(content, page, page_size):
"""Return a page of content.
Calculates which posts to have on a specific page based on which
page they're on and how many objects there are per page.
"""
start_index = (page-1) * page_size
end_index = start_index + page_size
pagination = content[start_index:end_index]
return pagination
def url_for_other_page(page):
"""Return url for a page number."""
args = request.view_args.copy()
args['page'] = page
return url_for(request.endpoint, **args)
def is_safe_url(target):
"""Tests if the url is a safe target for redirection.
Does so by checking that the url is still using http or https and
and that the url is still our site.
"""
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
test_url.netloc in app.config['ALLOWED_HOSTS']
def get_redirect_target():
"""Get where we want to redirect to.
Checks the 'next' argument in the request and if nothing there, use
the http referrer. Also checks whether the target is safe to
redirect to (no 'open redirects').
"""
for target in (request.values.get('next'), request.referrer):
if not target:
continue
if target == request.url:
continue
if is_safe_url(target):
return target
def bp_url_processors(bp):
@bp.url_defaults
def add_language_code(endpoint, values):
if not values.get('lang_code', None):
values['lang_code'] = getattr(g, 'lang_code', None) or \
session.get('lang_code')
@bp.url_value_preprocessor
def pull_lang_code(endpoint, values):
lang_code = values.pop('lang_code')
if lang_code in ('sv', 'en'):
# Valid lang_code, set the global lang_code and cookie
g.lang_code = lang_code
session['lang_code'] = g.lang_code
| mpl-2.0 | 5,092,879,950,161,629,000 | 30.223881 | 71 | 0.630497 | false |
SDeans0/Moodle | matchToCloze.py | 1 | 7517 | # Written by Sam Deans.
# Twitter/GitHub: @sdeans0
# Licensed under the Apache License, Version 2.0 (see below)
# This program is for turning matching type Moodle questions to Cloze type questions in
# Moodle xml format.
# Run it from the command line by importing the moduleand running the
# matchToCloze.main('filename') function.
import xml.etree.ElementTree as ET
from random import random
def main(filename):
'''This takes a Moodle xml document and writes a new one with the matching type questions
from the old one parsed as Clozes'''
root = loadXML(filename)
questions = getQuestions(root)
answers = getAnswers(root)
stems = getStems(root)
gotName = getName(root)
gotGeneralFeedback = getGeneralFeedback(root)
gotPenalty = getPenalty(root)
gotHidden = getHidden(root)
quiz = ET.Element('quiz')
for index in range(len(gotName)):
wrappedClozeText = clozeSyntactify(questions[index],answers[index], stems[index])
quiz = clozeSkeleton(quiz,gotName[index],wrappedClozeText,gotGeneralFeedback[index],gotPenalty[index],gotHidden[index])
newFileName = changeFileName(filename)
output = ET.ElementTree(quiz)
output.write(newFileName, method = 'html')
# It might be worth importing xml.minidom to make a more neatly formatted XML document
# - this does not seem to be a problem in Moodle though
def loadXML(filename):
'''Loads and xml file and returns the root of the tree'''
tree = ET.parse(filename)
root = tree.getroot()
return root
def changeFileName(filename):
'''Alters the filename inputted to reflect that the output is of Clozes derived from
matching type questions'''
newFileName = filename[:-4] + '-Match-to-Cloze.xml'
return newFileName
def getQuestions(root):
'''Returns the text of each matching subquestions in a nested list:
[[Subquestions from Q1],[Subquestions from Q2],etc]'''
questions = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
subquestions = []
for element in root[index].findall('subquestion'):
subquestions.append(element[0].text[3:-4])
questions.append(subquestions)
return questions
def getAnswers(root):
'''Returns the answers to each subquestion in a nested list:
[[Answers to subquestions from Q1],[Answers to subquestions from Q2],etc]'''
answers = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
subquestions = []
for subquestion in root[index].findall('subquestion'):
for answer in subquestion.findall('answer'):
subquestions.append(answer[0].text)
answers.append(subquestions)
return answers
def getName(root):
'''Returns a list of the titles of each matching question'''
names = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
names.append(root[index][0][0].text)
return names
def getStems(root):
'''Returns the content of the "Question Text" box which explains the theme of the
subquestions'''
stems = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
stems.append(root[index][1][0].text)
print stems
return stems
def getGeneralFeedback(root):
'''Returns the content of the "General Feedback" box which explains the solutions to
the subquestions'''
genFeedbacks = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
genFeedbacks.append(root[index][2][0].text)
return genFeedbacks
def getPenalty(root):
'''Returns a list of the penalties for multiple tries (percent of whole marks)
for each matching question'''
penalties = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
penalties.append(root[index][4].text)
return penalties
def getHidden(root):
'''Returns a list of whether each question is hidden (0 or 1)'''
hiddens = []
for index in range(0,len(root)):
if root[index].attrib == {'type':'matching'}:
hiddens.append(root[index][4].text)
return hiddens
def clozeSyntactify(question, answers, stem): #Questions and answers are lists of the same length
'''Takes the list of subquestions, answers to these, and the overall stem of a matching
question and returns the text of a Cloze analog with newlines between each question'''
clozeExpressionList = []
if len(question) != len(answers):
print 'You have fucked up'
for index in range(len(answers)):
answerList = []
for item in answers:
if item == answers[index]:
continue
else:
answerList.append(item)
clozeExpression = '<p><br>' + question[index] + ' {1:MC:=%s' % (answers[index])
for item in answerList:
clozeExpression += '~%s' % (item)
clozeExpression += '}</p>\n'
clozeExpressionList.append(clozeExpression)
clozeText = stem + ' \n <br>' + ''.join(clozeExpressionList)
return clozeText
def safeHTML(clozeText):
'''Designed to add a CDATA tag to the Cloze text'''
# This needs some attention - it might be better to work this in terms of forming an
# element instance rather than adding plain text.
wrappedClozeText = '<![CDATA[' + clozeText + ']]'
return wrappedClozeText
def clozeSkeleton(quiz,gotName,wrappedClozeText,gotGeneralFeedback,gotPenalty,gotHidden):
'''clozeSkeleton takes the cloze text, the name, the general feedback, penalty and
whether the question is hidden, and creates an element which is a full cloze question
in Moodle XML format. It builds this as an sub element of the quiz entered.'''
serialNumber = int(6 * 10**6 + random() * 10*4) #At some point in the future this could
# become a bug. Just make it 10**7 or 10**8 or something to avoid the indexing being
# the same. Could replace with hash('gotName')
comment = ET.Comment(' question: %d ' % (serialNumber))
quiz.append(comment)
question = ET.SubElement(quiz, 'question', {'type':'cloze'})
name = ET.SubElement(question, 'name')
nametext = ET.SubElement(name, 'text')
nametext.text = gotName
questiontext = ET.SubElement(question, 'questiontext')
questiontexttext = ET.SubElement(questiontext, 'text')
questiontexttext.text = wrappedClozeText
generalfeedback = ET.SubElement(question, 'generalfeedback')
generalfeedbacktext = ET.SubElement(generalfeedback, 'text')
generalfeedbacktext.text = gotGeneralFeedback
penalty = ET.SubElement(question, 'penalty')
penalty.text = gotPenalty
hidden = ET.SubElement(question, 'hidden')
hidden.text = gotHidden
return quiz
# Copyright 2015 Sam Deans
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| apache-2.0 | -6,817,513,452,035,199,000 | 37.352041 | 127 | 0.673407 | false |
elyezer/robottelo | tests/foreman/ui/test_subnet.py | 1 | 9765 | # -*- encoding: utf-8 -*-
"""Test class for Subnet UI
:Requirement: Subnet
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UI
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
from fauxfactory import gen_ipaddr, gen_netmask, gen_string
from nailgun import entities
from robottelo.datafactory import (
generate_strings_list,
invalid_values_list,
valid_data_list,
)
from robottelo.decorators import run_only_on, tier1, tier2
from robottelo.test import UITestCase
from robottelo.ui.base import UIError
from robottelo.ui.factory import make_subnet
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.session import Session
class SubnetTestCase(UITestCase):
"""Implements Subnet tests in UI"""
@classmethod
def setUpClass(cls):
super(SubnetTestCase, cls).setUpClass()
cls.organization = entities.Organization().create()
@run_only_on('sat')
@tier1
def test_positive_create_with_name(self):
"""Create new subnet using different names
:id: 2318f13c-db38-4919-831f-667fc6e2e7bf
:expectedresults: Subnet is created
:CaseImportance: Critical
"""
with Session(self.browser) as session:
for name in generate_strings_list():
with self.subTest(name):
make_subnet(
session,
subnet_name=name,
subnet_network=gen_ipaddr(ip3=True),
subnet_mask=gen_netmask(),
)
self.assertIsNotNone(self.subnet.search(name))
@run_only_on('sat')
@tier1
def test_positive_create_with_long_name(self):
"""Create new subnet with 255 characters in name
:id: b86772ad-a8ff-4c2b-93f4-4a715e4da59b
:expectedresults: Subnet is created with 255 chars
:CaseImportance: Critical
"""
with Session(self.browser) as session:
for name in valid_data_list():
with self.subTest(name):
make_subnet(
session,
subnet_name=name,
subnet_network=gen_ipaddr(ip3=True),
subnet_mask=gen_netmask(),
)
self.assertIsNotNone(
self.subnet.search(name))
@run_only_on('sat')
@tier2
def test_positive_add_domain(self):
"""Create new subnet and associate domain with it
:id: adbc7189-b451-49df-aa10-2ae732832dfe
:expectedresults: Subnet is created with domain associated
:CaseLevel: Integration
"""
name = gen_string('alpha')
domain = entities.Domain(
organization=[self.organization]
).create()
with Session(self.browser) as session:
make_subnet(
session,
org=self.organization.name,
subnet_name=name,
subnet_network=gen_ipaddr(ip3=True),
subnet_mask=gen_netmask(),
domains=[domain.name],
)
self.subnet.search_and_click(name)
self.subnet.click(tab_locators['subnet.tab_domain'])
element = self.subnet.wait_until_element(
common_locators['entity_deselect'] % domain.name)
checkbox_element = self.subnet.wait_until_element(
common_locators['entity_checkbox'] % domain.name)
# Depending upon the number of domains either, checkbox or
# selection list appears.
if element is None and checkbox_element is None:
raise UIError('Neither checkbox or select list is present')
if checkbox_element:
self.assertTrue(checkbox_element.is_selected())
@run_only_on('sat')
@tier1
def test_negative_create_with_invalid_name(self):
"""Create new subnet with invalid names
:id: d53056ad-a219-40d5-b20e-95ad343c9d38
:expectedresults: Subnet is not created
:CaseImportance: Critical
"""
with Session(self.browser) as session:
for name in invalid_values_list(interface='ui'):
with self.subTest(name):
make_subnet(
session,
subnet_name=name,
subnet_network=gen_ipaddr(ip3=True),
subnet_mask=gen_netmask(),
)
self.assertIsNotNone(session.nav.wait_until_element(
common_locators['haserror']))
@run_only_on('sat')
@tier1
def test_negative_create_with_invalid_params(self):
"""Create new subnet with negative values
:id: 5caa6aed-2bba-43d8-bb40-2d80b9d42b69
:expectedresults: Subnet is not created
:CaseImportance: Critical
"""
with Session(self.browser) as session:
make_subnet(
session,
subnet_name=gen_string('alpha'),
subnet_network='292.256.256.0',
subnet_mask='292.292.292.0',
subnet_gateway='292.256.256.254',
subnet_primarydns='292.256.256.2',
subnet_secondarydns='292.256.256.3',
)
self.assertIsNotNone(session.nav.wait_until_element(
locators['subnet.network_haserror']))
self.assertIsNotNone(session.nav.wait_until_element(
locators['subnet.mask_haserror']))
self.assertIsNotNone(session.nav.wait_until_element(
locators['subnet.gateway_haserror']))
self.assertIsNotNone(session.nav.wait_until_element(
locators['subnet.dnsprimary_haserror']))
self.assertIsNotNone(session.nav.wait_until_element(
locators['subnet.dnssecondary_haserror']))
@run_only_on('sat')
@tier1
def test_positive_delete(self):
"""Delete an existing subnet
:id: cb1265de-a0ed-40b7-ba25-fe92251b9001
:expectedresults: Subnet is deleted
:CaseImportance: Critical
"""
with Session(self.browser) as session:
for name in generate_strings_list():
with self.subTest(name):
make_subnet(
session,
subnet_name=name,
subnet_network=gen_ipaddr(ip3=True),
subnet_mask=gen_netmask(),
)
self.subnet.delete(name)
@run_only_on('sat')
@tier1
def test_negative_delete(self):
"""Delete subnet. Attempt to delete subnet, but cancel in the
confirmation dialog box.
:id: 9eed9020-8d13-4ba0-909a-db44ad0aecb6
:expectedresults: Subnet is not deleted
:CaseImportance: Critical
"""
name = gen_string('utf8')
with Session(self.browser) as session:
make_subnet(
session,
subnet_name=name,
subnet_network=gen_ipaddr(ip3=True),
subnet_mask=gen_netmask(),
)
self.subnet.delete(name, really=False)
@run_only_on('sat')
@tier1
def test_positive_update_name(self):
"""Update Subnet name
:id: ec9f11e3-27a7-45d8-91fe-f04c20b595bc
:expectedresults: Subnet name is updated
:CaseImportance: Critical
"""
name = gen_string('alpha')
with Session(self.browser) as session:
make_subnet(
session,
subnet_name=name,
subnet_network=gen_ipaddr(ip3=True),
subnet_mask=gen_netmask(),
)
for new_name in generate_strings_list():
with self.subTest(new_name):
self.subnet.update(name, new_subnet_name=new_name)
result_object = self.subnet.search_and_validate(new_name)
self.assertEqual(new_name, result_object['name'])
name = new_name # for next iteration
@run_only_on('sat')
@tier1
def test_positive_update_network(self):
"""Update Subnet network
:id: f79d3b1b-6101-4009-88ad-b259d4794e6c
:expectedresults: Subnet network is updated
:CaseImportance: Critical
"""
name = gen_string('alpha')
new_network = gen_ipaddr(ip3=True)
with Session(self.browser) as session:
make_subnet(
session,
subnet_name=name,
subnet_network=gen_ipaddr(ip3=True),
subnet_mask=gen_netmask(),
)
self.subnet.update(name, new_subnet_network=new_network)
result_object = self.subnet.search_and_validate(name)
self.assertEqual(new_network, result_object['network'])
@run_only_on('sat')
@tier1
def test_positive_update_mask(self):
"""Update Subnet mask
:id: 6cc5de06-5463-4919-abe4-92cef4506a54
:expectedresults: Subnet mask is updated
:CaseImportance: Critical
"""
name = gen_string('alpha')
new_mask = gen_netmask(16, 31)
with Session(self.browser) as session:
make_subnet(
session,
subnet_name=name,
subnet_network=gen_ipaddr(ip3=True),
subnet_mask=gen_netmask(1, 15),
)
self.subnet.update(name, new_subnet_mask=new_mask)
result_object = self.subnet.search_and_validate(name)
self.assertEqual(new_mask, result_object['mask'])
| gpl-3.0 | 1,911,977,296,066,574,800 | 32.101695 | 77 | 0.563441 | false |
joereynolds/Mr-Figs | src/game.py | 1 | 1318 | import pygame
import json
import src.config as config
from src.user_data import UserData
class Game(object):
def __init__(self, fps):
self.done = False
self.fps = fps
self.clock = pygame.time.Clock()
def run(self, scene):
"""Our main function call. inits pygame, starts our fps clock,
and then begins our main loop
@fps = The fps you desire for the program
@scene = The scene from environment.py that you wish to use
for processing
,rendering, and updating.
"""
pygame.init()
pygame.display.set_caption(config.game_title)
with open(UserData.FULL_PATH) as user_config:
game_data = json.load(user_config)
if game_data['settings']['music']:
pygame.mixer.pre_init(44100, -16, 2, 512)
pygame.mixer.init()
pygame.mixer.music.load('./assets/audio/music/carmack.ogg')
pygame.mixer.music.play(-1)
delta_time = 0
self.clock.tick(self.fps)
while not self.done:
scene.process_input()
scene.update(delta_time)
scene.render()
scene = scene.next
pygame.display.flip()
delta_time = self.clock.tick(self.fps) / 1000.0
pygame.quit()
| gpl-3.0 | 129,886,711,976,486,270 | 28.288889 | 71 | 0.575873 | false |
Ezra/qwertonic | music.py | 1 | 2072 | ###music.py
###Created by Joseph Rollinson, [email protected]
###Last Modified: 12/07/11
###Requires: pyo
###Turns pyo into a note class that is very easy to run.
###Also contains functions to run pyo music server.
import pyo
class note(object):
'''creates a note that can be played'''
def __init__(self,frequency=440, attack=.01, decay=.2, sustain=.5, release=.1, duration=1, mul=1):
#some of this might not need to be saved later, for space saving.
self.frequency = frequency
self.attack = attack
self.decay = decay
self.sustain = sustain
self.release = release
self.duration = duration
self.mul = mul
self.envelope = pyo.Adsr(attack = attack,
decay = decay,
sustain = sustain,
release = release,
dur = duration,
mul = mul)
self.mod = pyo.Sine(freq = 0, mul = 25)
self.wave = pyo.Sine(freq = self.frequency + self.mod, mul = self.envelope)
self.wave.out()
def play(self,modulation=0):
'''plays the note'''
self.mod.setFreq(modulation)
self.wave.setFreq(self.frequency+self.mod)
self.envelope.play()
def stop(self):
self.envelope.stop()
def setFrequency(self,frequency):
'''sets the frequency of the note'''
self.frequncy = frequency
##def getNotes():
## '''returns a list of notes from middle C to the next B'''
## return map( lambda frequency: note(frequency), freqs)
def musicServer():
'''Returns a music server'''
s = pyo.Server()
s.setVerbosity(2)
s.boot()
return s
def startServer(server):
server.start()
def stopServer(server):
server.stop()
server.shutdown()
def guiMusicServer(server):
'''displays music server's gui'''
server.gui(locals())
| bsd-2-clause | -5,588,362,250,769,846,000 | 31.419355 | 102 | 0.543919 | false |
yuanagain/seniorthesis | src/2017-04-06.py | 1 | 6190 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
import numdifftools as nd
default_lambda_1, default_lambda_2, default_lambda_3 = 0.086, 0.141, 0.773
default_start = (0.372854105052, 0.393518965248, -0.0359026080443, -0.216701666067)
x_0 = default_start
res = 0.01
dt = res
def quad_sq_distance(x, y):
"""Computes the squared distance"""
dists = [ x[i] - y[i] for i in range(len(x) )]
dists = [ dists[i]**2 for i in range(len(x) )]
return sum(dists)
def plot_quad(ws, xs, ys, zs, plot_type = 0, txt = ""):
if plot_type == 0:
print("Plotting Double Plot Quad Viz")
plt.figure(1)
plt.subplot(2, 1, 1)
plt.subplots_adjust(top=0.85)
plt.plot(xs, ws)
#plt.yscale('linear')
plt.title('xy')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.subplot(2, 1, 2)
plt.plot(ys, zs)
#plt.yscale('linear')
plt.title('wz')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.suptitle(txt, fontsize=14)
plt.show()
elif plot_type == 1:
print("Plotting Overlain Double Plot Quad Viz")
plt.figure(1)
plt.plot(xs, ws)
plt.plot(ys, zs)
#plt.yscale('linear')
plt.title('x-w, y-z')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.suptitle(txt, fontsize=14)
plt.show()
elif plot_type == 2:
print("Plotting Sphere Plot Quad Viz")
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.subplots_adjust(top=0.85)
plt.suptitle(txt, fontsize=14)
qdist = quad_distance(ws, xs, ys, zs)
ws = np.divide(ws, qdist)
xs = np.divide(xs, qdist)
ys = np.divide(ys, qdist)
zs = np.divide(zs, qdist)
ax.plot(xs, ys, zs)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title("Nonrigorous Solution")
plt.show()
else:
print("Invalid Plot Type")
def experiment_1(start_pt = default_start,
T = 1,
lmbda = [default_lambda_1, default_lambda_2, default_lambda_3],
res = 0.001,
expmt = "search"):
## define evaluation function
def dots(x_0, lmbda):
"""
dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2
dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2
http://www.math.kit.edu/iag3/~herrlich/seite/wws-11/media/wws-talk-valdez.pdf
"""
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
# print(lmbda)
lambda_1 = lmbda[0]
lambda_2 = lmbda[1]
lambda_3 = lmbda[2]
x_1_dot = lambda_2 * (x_1**2 - y_1**2) - (lambda_2 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * lambda_2 * x_1 * y_1 - (lambda_2 + lambda_3) * (x_1*y_2 + y_1*x_2)
x_2_dot = lambda_1 * (x_2**2 - y_2**2) - (lambda_1 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * lambda_1 * x_2 * y_2 - (lambda_1 +lambda_3) * (x_1*y_2 + y_1*x_2)
return [x_1_dot, y_1_dot, x_2_dot, y_2_dot]
def f(x_0, lmbda, T = 1):
"""Find f(x_0 + T)"""
### TODO: refactor, make into array, then transpose
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1, ))
xs = np.empty((stepCnt + 1, ))
ys = np.empty((stepCnt + 1, ))
zs = np.empty((stepCnt + 1, ))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
return [ ws[-1], xs[-1], ys[-1], zs[-1] ]
def g(x_0, lmbda, T = 1):
"""objective function"""
return quad_sq_distance( f(x_0, lmbda, T), f(x_0, lmbda, 0) )
def g_T(x_0):
"""g instantiated with a fixed period"""
return g(x_0, lmbda, T)
def newton_search(x_0, T = 1, N = 25):
x = x_0
hessian = nd.core.Hessian(g_T)
jacobian = nd.core.Jacobian(g_T)
for i in range(N):
adjust = np.matmul(np.linalg.inv(hessian(x)), np.transpose( jacobian(x)))
adjust = np.transpose(adjust)[0]
#print(x)
#print(adjust)
x = list_subtract(x, adjust)
print(g_T(x))
print(x)
def plot_sim_path(x_0, T):
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
plot_quad(ws, xs, ys, zs, 0)
if expmt == 'search':
newton_search(start_pt)
if expmt == 'plot':
plot_sim_path(x_0, T)
experiment_1((10.2,
9.3,
14.4,
12.2) , expmt = 'plot')
experiment_1((4.2, 3.3, 4.4, 2.2),
T = 10000,
lmbda = [0.086, 0.141, 0.773],
expmt = 'plot')
experiment_1((4.2, 3.3, 4.4, 2.2),
T = 1000,
lmbda = [0.086, 0.141, 0.773],
expmt = 'search')
| mit | 6,692,264,478,511,544,000 | 26.757848 | 92 | 0.47609 | false |
Maverun/Nurevam | Bot/cogs/remind.py | 1 | 14685 | from datetime import datetime, timedelta
from discord.ext import commands
from .utils import utils
import traceback
import asyncio
import discord
import pytz
loop_list = {}
class Remind(commands.Cog): #This is to remind user about task they set.
def __init__(self,bot):
self.bot = bot
self.redis = bot.db.redis
self.loop = asyncio.get_event_loop()
self.loop_reminder_timer = self.loop.create_task(self.timer())
def cog_unload(self):
self.loop_reminder_timer.cancel()
for val in loop_list.values():
val.cancel()
utils.prPurple("unload remindme task")
async def clear(self,gid,uid,mid):
lp = loop_list.pop(mid,None) #pop out of list. Cast int just in case
if lp is not None:
lp.cancel() #just in case it was running and someone CANCEL IT
await self.redis.lrem(f"{gid}:Remindme:Person:{uid}",1,mid)
await self.redis.hdel(f"{gid}:Remindme:member",mid)
await self.redis.hdel(f"{gid}:Remindme:data", mid)
await self.redis.hdel(f"{gid}:Remindme:channel", mid)
await self.redis.hdel(f"{gid}:Remindme:time", mid)
async def timer(self): #Checking if there is remindme task that bot lost during shutdown/restart (losing data from memory)
await asyncio.sleep(5)#give it a moment..
utils.prYellow("Remindme Timer start")
guild_list = list(self.bot.guilds)
for guild in guild_list: #checking each guild
get_time = datetime.now().timestamp()
utils.prLightPurple(f"Checking {guild.name}")
data = await self.redis.hgetall(f"{guild.id}:Remindme:data")
if data: #if there is exist data then get info about info about channel
author_list = await self.redis.hgetall(f"{guild.id}:Remindme:member")
channel = await self.redis.hgetall(f"{guild.id}:Remindme:channel")
time = await self.redis.hgetall(f"{guild.id}:Remindme:time")
for mid in data: #run every Id in data and return timer
try:
if author_list.get(mid): #to be compaitable with old legacy.
check_str = f"{guild.id}:Remindme:Person:{author_list.get(mid)}"
if mid not in await self.redis.lrange(check_str,0,-1):
utils.prRed("RM:No longer in Person, so delete....")
await self.clear(guild.id,author_list.get(mid),mid)
continue #Somehow if it cant delete old one we might do it here.
chan = guild.get_channel(int(channel[mid]))
author = guild.get_member(int(author_list[mid]))
#Once Legacy will be gone, there might be some leftover
#such as one that was set really long.. that last years...
#those will be likely to be delete.
remain_time = int(time[mid]) - int(get_time)
utils.prYellow(f"Time: {remain_time},Channel: {channel[mid]}, Message: {data[mid]}")
if remain_time <= 0:
if chan:
await chan.send(f"{author.mention}\nI am deeply"
" sorry for not reminding you earlier!"
" You were reminded of the following:\n"
f"```fix\n {data[mid]} \n```")
await self.clear(guild.id,author.id,mid)
else:
if author_list.get(mid):
task = self.loop.create_task(self.time_send(
chan,author,data[mid],
remain_time,guild.id,mid))
else: #Old legacy... Soon to be delete once confirm
task = self.loop.create_task(self.old_time_send(
channel[mid],data[mid],
remain_time,guild.id,mid))
loop_list[mid] = task
except:
utils.prRed(traceback.format_exc())
async def time_send(self,channel,author,msg,time,guild,mid):
await asyncio.sleep(time)
#if it not in list, then dont send it as it is likely cancel.
if channel and loop_list.get(mid): #Making sure it not in list...
await self.send_msg(channel,author,msg)
await self.clear(guild,author.id,mid)
async def old_time_send(self,channel,msg,time,guild,x): #Legacy. Will delete
await asyncio.sleep(time)
channel = self.bot.get_channel(int(channel))
if channel:
await channel.send(msg)
await self.redis.hdel("{}:Remindme:data".format(guild), x)
await self.redis.hdel("{}:Remindme:channel".format(guild), x)
await self.redis.hdel("{}:Remindme:time".format(guild), x)
async def send_msg(self,ctx,author,msg):
await ctx.send(f"{author.mention} Reminder:\n```fix\n{msg}\n```")
@commands.command(hidden = True)
async def setTimezoneRemind(self,ctx,timez):
try:
#so we are checking if this timezone exists,
#if no error, we are clear.
#I will make this command more sense or pretty
#when I get a chance to rewrite them.... #TODO
if timez.lower() == "none":
await self.redis.delete("Profile:{}:Remind_Timezone".format(ctx.author.id))
return await ctx.send("I have removed timezone in your profile!")
tz = pytz.timezone(timez)
await self.redis.set("Profile:{}:Remind_Timezone".format(ctx.author.id),tz.zone)
return await ctx.send("Timezone set for your remind only!",delete_after = 30)
except pytz.UnknownTimeZoneError:
await ctx.send("There is no such a timezone, please check a list from there <https://en.wikipedia.org/wiki/List_of_tz_database_time_zones> under **TZ database Name**",delete_after = 30)
@commands.command(hidden = True)
@commands.has_permissions(manage_guild = True)
async def setServerTimezoneRemind(self,ctx,timez):
try:
#Similar as setTimezoneRemind ^^^
if timez.lower() == "none":
await self.redis.delete("{}:Remindme:zone".format(ctx.guild.id))
return await ctx.send("I have removed timezone in the server overall!")
tz = pytz.timezone(timez)
await self.redis.set(f"{ctx.guild.id}:Remindme:zone",tz.zone)
return await ctx.send("Timezone set for your server!",delete_after = 30)
except pytz.UnknownTimeZoneError:
await ctx.send("There is no such a timezone, please check a list from there <https://en.wikipedia.org/wiki/List_of_tz_database_time_zones> under **TZ database Name**",delete_after = 30)
async def split_time(self,ctx,t):
t = t.replace(".",":")
t = t.split(":")
if all(x.isdigit() for x in t) is False:
await self.bot.say(ctx,content = "You enter the format wrong! It should be look like this {}remindtime hh:mm:ss message".format(ctx.prefix))
return None
return [int(x) for x in t] #Returning them but first make sure its int!
@commands.command(hidden=True,pass_context=True,aliases=["rt"])
async def remindtime(self,ctx,get_time,*,message=""):
#Split them and check if they are valid.
time = await self.split_time(ctx, get_time)
if time is None: return
if len(time) == 1:
time.append(0)
time.append(0)
elif len(time) == 2:
time.append(0)
if 0 > time[0] or time[0] > 23 or 0 > time[1] or time[1] > 59 or 0 > time[2] or time[2] > 59:
return await self.bot.say(ctx,content = "You enter the number out of range than they should!")
#we are grabbing timezone from user set, if user didnt set,
#it will return None, and when we create timezone,
#it will auto select UTC format.
tz = await self.redis.get(f"Profile:{ctx.author.id}:Remind_Timezone")
if tz is None:
tz = await self.redis.get(f"{ctx.guild.id}:Remindme:zone")
timez = pytz.timezone(tz or "UTC") #if none, then UTC default.
time_set = datetime.now(timez).replace(hour = time[0],
minute = time[1],
second = time[2])
time_now = datetime.now(timez)
delta_time = time_set - time_now
if time_set < time_now:
delta_time += timedelta(days=1)
utils.prGreen(ctx)
await self.remindme_base(ctx,
str(timedelta(seconds=int(delta_time.total_seconds())))
,message=message)
@commands.command(hidden=True,pass_context=True,aliases=["rm"])
async def remindme(self,ctx,get_time,*,message=""):
await self.remindme_base(ctx,get_time,message=message)
async def remindme_base(self,ctx,get_time,*,message=""):
#Split them and check if they are valid.
time = await self.split_time(ctx,get_time)
if time is None: return
remind_time = 0
msg = "Time set "
if len(time) == 3:
remind_time += time[0]*3600 + time[1]*60 + time[2]
msg += "{} hours {} minute {} second".format(time[0],time[1],time[2])
elif len(time) == 2:
remind_time += time[0]*60 + time[1]
msg += "{} minute {} second".format(time[0],time[1])
else:
remind_time += time[0]
msg += "{} second".format(time[0])
if not message: message = "unspecified reminder"
rid = None
if remind_time >= 60:
#if it more than 1 min, then add id so it can remind you in cases
#bot goes down...
time = datetime.now().timestamp() + remind_time
#making ID of Message, User/Member, Guild
print(ctx)
mid = ctx.message.id
uid = ctx.author.id
gid = ctx.guild.id
cid = ctx.channel.id
#we will be using idea as LINK-LIST where we will push msg ID to tail
#This allow to keep as in order for ID so we can cancel when need
rid = await self.redis.rpush(f"{gid}:Remindme:Person:{uid}",mid)
#Setting MSGID to UserID, so we can find who responsiblity for this
await self.redis.hset(f"{gid}:Remindme:member",mid,uid)
await self.redis.hset(f"{gid}:Remindme:data",mid,message)
await self.redis.hset(f"{gid}:Remindme:channel",mid,cid)
await self.redis.hset(f"{gid}:Remindme:time",mid,int(time))
msg = f"{msg}\nID: {rid}" if rid else msg
await ctx.send(msg,delete_after=30)
task = self.loop.create_task( self.time_send(ctx.channel, ctx.author,
message, remind_time,
ctx.guild.id, str(ctx.message.id)))
loop_list[str(ctx.message.id)] = task
@commands.command(aliases = ["rl"], hidden = True)
async def remindlist(self, ctx ):
#There we will show a list of user's ID reminder.
uid = ctx.author.id
gid = ctx.guild.id
current_time = datetime.now().timestamp()
id_list = await self.redis.lrange(f"{gid}:Remindme:Person:{uid}",0,-1)
data_list = await self.redis.hgetall(f"{gid}:Remindme:data")
time_list = await self.redis.hgetall(f"{gid}:Remindme:time")
if not any(id_list): return await ctx.send("You haven't set any reminder!")
id_col = time_col = msg_col = ""
for i, rid in enumerate(id_list,start = 1):
old_time = time_list.get(rid,None)
if old_time is None: continue #TODO TEMP FIX
remain_time = int(old_time) - current_time
hold = [-1,-1,-1]
if remain_time >= 3600:
hold[0] = remain_time/3600 #hours
remain_time %= 3600 #get remiander min
if remain_time >= 60: #if min leftover
hold[1] = remain_time/60 #min
remain_time %= 60 #get remainder second
hold[2] = remain_time
ft = ["h","m","s"]
#we will then convert them to time message (5H,2M) etc.
#Cast int to cut off decimal
rtmsg = " ".join(f"{int(hold[i])}{ft[i]}" for i in range(3) if hold[i] != -1 )
#now we will set message, with 30 char of "data" to remind user
msg = data_list[rid]
id_col += f"{i}\n"
time_col += f"{rtmsg}\n"
msg_col += f"{msg[:30]}"
msg_col += "...\n" if len(msg) > 30 else "\n"
#set up embeds and add each to each field then send
e = discord.Embed()
e.add_field(name = "ID",value = id_col)
e.add_field(name = "Time Remain",value = time_col)
e.add_field(name = "Message",value = msg_col)
await ctx.send(embed = e)
@commands.command(aliases = ["rc"], hidden = True)
async def remindcancel(self, ctx, raw_rid:commands.Greedy[int],
is_all:str=""):
#We will just assume user know what they are doing lol
gid = ctx.guild.id
uid = ctx.author.id
if is_all == "all":
raw_len = await self.redis.llen(f"{gid}:Remindme:Person:{uid}")
raw_rid = [x for x in range(raw_len)]
if len(raw_rid) == 0:
return await ctx.send("You need to enter IDs (or \"all\")!")
raw_rid = sorted(raw_rid, reverse = True) #Sorting and in reverse
#Just in case user enter 1 3 then realized need to include 2.
for ri in raw_rid:
#First we will get what element it is at. Index start at 0 duh.
rid = await self.redis.lindex(f"{gid}:Remindme:Person:{uid}",ri-1)
#if we get none, out of range!
if rid is None:
return await ctx.send("Out of range!", delete_after = 30)
#Since we are here, then that mean it is inside, and we will just pop it
await self.clear(gid,uid,rid) #Clear up from DB
await ctx.send("Done.\nNote: Any ID after you enter will go down by 1")
def setup(bot):
bot.add_cog(Remind(bot))
| mit | 6,257,882,285,061,678,000 | 49.463918 | 197 | 0.55383 | false |
antong/ldaptor | ldaptor/apps/webui/search.py | 1 | 13213 | from zope.interface import Interface, implements
from webut.skin import iskin
from ldaptor.protocols.ldap import ldapclient, ldapsyntax
from ldaptor.protocols.ldap import distinguishedname, ldapconnector
from ldaptor.protocols import pureldap
from ldaptor import ldapfilter, interfaces
from twisted.internet import reactor
from ldaptor.apps.webui import config, iwebui
from ldaptor.apps.webui.uriquote import uriQuote
from ldaptor.apps.webui.i18n import _
from ldaptor.apps.webui import i18n
from ldaptor import weave
import os
from nevow import rend, inevow, loaders, url, tags
from formless import annotate, webform, iformless, configurable
class IMove(Interface):
"""Entries being moved in the tree."""
pass
class MoveItem(configurable.Configurable):
def getBindingNames(self, ctx):
return ['move', 'cancel']
def bind_move(self, ctx):
return annotate.MethodBinding(
'move',
annotate.Method(arguments=[
annotate.Argument('context', annotate.Context()),
],
label=_('Move')),
action=_('Move'))
def bind_cancel(self, ctx):
return annotate.MethodBinding(
'cancel',
annotate.Method(arguments=[
annotate.Argument('context', annotate.Context()),
],
label=_('Cancel')),
action=_('Cancel'))
def _remove(self, context):
session = context.locate(inevow.ISession)
move = session.getComponent(IMove)
if move is None:
return
try:
move.remove(self.original)
except ValueError:
pass
def move(self, context):
cfg = context.locate(interfaces.ILDAPConfig)
newDN = distinguishedname.DistinguishedName(
self.original.dn.split()[:1]
+ iwebui.ICurrentDN(context).split())
origDN = self.original.dn
d = self.original.move(newDN)
d.addCallback(lambda dummy: _('Moved %s to %s.') % (origDN, newDN))
def _cb(r, context):
self._remove(context)
return r
d.addCallback(_cb, context)
return d
def cancel(self, context):
self._remove(context)
return _('Cancelled move of %s') % self.original.dn
def strScope(scope):
if scope == pureldap.LDAP_SCOPE_wholeSubtree:
return _('whole subtree')
elif scope == pureldap.LDAP_SCOPE_singleLevel:
return _('single level')
elif scope == pureldap.LDAP_SCOPE_baseObject:
return _('baseobject')
else:
raise RuntimeError, 'scope is not known: %r' % scope
class SearchForm(configurable.Configurable):
implements(inevow.IContainer)
filter = None
def __init__(self):
super(SearchForm, self).__init__(None)
self.data = {}
def getBindingNames(self, ctx):
return ['search']
def bind_search(self, ctx):
l = []
l.append(annotate.Argument('ctx', annotate.Context()))
for field in config.getSearchFieldNames():
l.append(annotate.Argument('search_%s' % field,
annotate.String(label=field)))
l.append(annotate.Argument('searchfilter',
annotate.String(label=_("Advanced search"))))
l.append(annotate.Argument(
'scope',
annotate.Choice(label=_("Search depth"),
choices=[ pureldap.LDAP_SCOPE_wholeSubtree,
pureldap.LDAP_SCOPE_singleLevel,
pureldap.LDAP_SCOPE_baseObject,
],
stringify=strScope,
default=pureldap.LDAP_SCOPE_wholeSubtree)))
return annotate.MethodBinding(
name='search',
action=_("Search"),
typeValue=annotate.Method(arguments=l,
label=_('Search')))
def search(self, ctx, scope, searchfilter, **kw):
filt=[]
for k,v in kw.items():
assert k.startswith('search_')
if not k.startswith("search_"):
continue
k=k[len("search_"):]
if v is None:
continue
v=v.strip()
if v=='':
continue
# TODO escape ) in v
# TODO handle unknown filter name right (old form open in browser etc)
filter_ = config.getSearchFieldByName(k, vars={'input': v})
filt.append(ldapfilter.parseFilter(filter_))
if searchfilter:
try:
filt.append(ldapfilter.parseFilter(searchfilter))
except ldapfilter.InvalidLDAPFilter, e:
raise annotate.ValidateError(
{'searchfilter': str(e), },
partialForm=inevow.IRequest(ctx).args)
if filt:
if len(filt)==1:
query=filt[0]
else:
query=pureldap.LDAPFilter_and(filt)
else:
query=pureldap.LDAPFilterMatchAll
self.data.update(kw)
# annotate.Choice in nevow 0.3 maps choices to a list, and
# passes indexes to this list to client. annotate.Choice in
# 0.4pre converts choice to string and back with callbacks,
# defaulting to str, and leaving the value as string. We
# can't use the 0.4pre mechanism as long as we need 0.3
# compatibility, so work around that by explicitly making sure
# scope is an integer.
scope = int(scope)
self.data['scope'] = scope
self.data['searchfilter'] = searchfilter
self.filter = query
return self
def child(self, context, name):
fn = getattr(self, 'child_%s' % name, None)
if fn is None:
return None
else:
return fn(context)
def child_filter(self, context):
return self.filter.asText()
def child_results(self, context):
assert self.filter is not None
cfg = context.locate(interfaces.ILDAPConfig)
c=ldapconnector.LDAPClientCreator(reactor, ldapclient.LDAPClient)
curDN = iwebui.ICurrentDN(context)
d=c.connectAnonymously(curDN,
cfg.getServiceLocationOverrides())
def _search(proto, dn, searchFilter, scope):
baseEntry = ldapsyntax.LDAPEntry(client=proto, dn=dn)
d=baseEntry.search(filterObject=searchFilter,
scope=scope,
sizeLimit=20,
sizeLimitIsNonFatal=True)
def _cb(result, proto):
proto.unbind()
return result
d.addBoth(_cb, proto)
return d
d.addCallback(_search, curDN, self.filter, self.data['scope'])
return d
def child_base(self, context):
cfg = context.locate(interfaces.ILDAPConfig)
c=ldapconnector.LDAPClientCreator(reactor, ldapclient.LDAPClient)
d=c.connectAnonymously(iwebui.ICurrentDN(context),
cfg.getServiceLocationOverrides())
def _search(proto, base):
baseEntry = ldapsyntax.LDAPEntry(client=proto,
dn=base)
d=baseEntry.search(scope=pureldap.LDAP_SCOPE_baseObject,
sizeLimit=1)
def _cb(result, proto):
proto.unbind()
return result
d.addBoth(_cb, proto)
return d
d.addCallback(_search, iwebui.ICurrentDN(context))
def _first(results, dn):
assert len(results)==1, \
"Expected one result, not %r" % results
return {'dn': dn,
'attributes': results[0],
}
d.addCallback(_first, iwebui.ICurrentDN(context))
return d
def __nonzero__(self):
return self.filter is not None
def getSearchForm(ctx):
try:
hand = ctx.locate(inevow.IHand)
except KeyError:
pass
else:
if isinstance(hand, SearchForm):
return hand
return SearchForm()
class SearchPage(rend.Page):
implements(iskin.ISkinnable)
title = _('Ldaptor Search Page')
addSlash = True
docFactory = loaders.xmlfile(
'search.xhtml',
templateDir=os.path.split(os.path.abspath(__file__))[0])
def render_form(self, ctx, data):
conf = getSearchForm(ctx)
formDefaults = ctx.locate(iformless.IFormDefaults)
methodDefaults = formDefaults.getAllDefaults('search')
for k,v in conf.data.items():
if v is not None:
methodDefaults[k] = str(v)
return webform.renderForms()
def render_keyvalue(self, context, data):
return weave.keyvalue(context, data)
def render_keyvalue_item(self, context, data):
return weave.keyvalue_item(context, data)
def render_passthrough(self, context, data):
return context.tag.clear()[data]
def data_status(self, context, data):
try:
obj = context.locate(inevow.IStatusMessage)
except KeyError:
return ''
if isinstance(obj, SearchForm):
return ''
else:
return obj
def render_data(self, ctx, data):
return ctx.tag.clear()[data]
def render_if(self, context, data):
r=context.tag.allPatterns(str(bool(data)))
return context.tag.clear()[r]
def data_search(self, ctx, data):
return getSearchForm(ctx)
def data_header(self, ctx, data):
u=url.URL.fromContext(ctx)
u=u.parentdir().clear()
l=[]
l.append(tags.a(href=u.sibling("add"))[_("add new entry")])
return l
def data_navilink(self, context, data):
cfg = context.locate(interfaces.ILDAPConfig)
dn = iwebui.ICurrentDN(context)
r=[]
while dn!=distinguishedname.DistinguishedName(stringValue=''): #TODO and while inside base?
firstPart=dn.split()[0]
r.append(('../../%s' % uriQuote(str(dn)),
str(firstPart)))
dn=dn.up()
return r
def render_link(self, context, (url, desc)):
context.fillSlots('url', url)
context.fillSlots('description', desc)
return context.tag
def render_linkedDN(self, ctx, data):
dn = data
cfg = ctx.locate(interfaces.ILDAPConfig)
baseDN = iwebui.ICurrentDN(ctx)
ctx.tag.clear()
while (dn!=baseDN
and dn!=distinguishedname.DistinguishedName(stringValue='')):
firstPart=dn.split()[0]
u = url.here.parentdir().parentdir().child(dn)
segments = inevow.ICurrentSegments(ctx)
if segments[-1] == '':
u = u.child(segments[-2]).child(segments[-1])
else:
u = u.child(segments[-1])
for segment in inevow.IRemainingSegments(ctx):
u = u.child(segment)
ctx.tag[tags.a(href=u)[str(firstPart)], ',']
dn=dn.up()
ctx.tag['%s\n' % str(dn)]
return ctx.tag
def render_entryLinks(self, ctx, data):
u = url.URL.fromContext(ctx).parentdir().clear()
l = [ (u.sibling('edit').child(uriQuote(data)),
_('edit')),
(u.sibling('move').child(uriQuote(data)),
_('move')),
(u.sibling('delete').child(uriQuote(data)),
_('delete')),
(u.sibling('change_password').child(uriQuote(data)),
_('change password')),
]
return self.render_sequence(ctx, l)
def render_listLen(self, context, data):
if data is None:
length = 0
else:
length = len(data)
return context.tag.clear()[length]
def render_mass_change_password(self, ctx, data):
u = url.URL.fromContext(ctx)
u = u.parentdir().sibling("mass_change_password")
u = u.child(uriQuote(data))
return ctx.tag(href=u)
def data_move(self, context, data):
session = context.locate(inevow.ISession)
if not session.getLoggedInRoot().loggedIn:
return []
move = session.getComponent(IMove)
if move is None:
return []
return move
def locateConfigurable(self, context, name):
if name == '':
return getSearchForm(context)
elif name.startswith('move_'):
dn = name[len('move_'):]
session = context.locate(inevow.ISession)
move = session.getComponent(IMove)
if move is not None:
for entry in move:
if entry.dn == dn:
return iformless.IConfigurable(MoveItem(entry))
raise KeyError, name
def render_move(self, context, data):
return webform.renderForms('move_%s' % data.dn)[context.tag]
render_i18n = i18n.render()
def getSearchPage():
r = SearchPage()
return r
| lgpl-2.1 | -8,235,966,525,162,713,000 | 32.282116 | 99 | 0.558919 | false |
Azure/azure-sdk-for-python | sdk/sql/azure-mgmt-sqlvirtualmachine/azure/mgmt/sqlvirtualmachine/_configuration.py | 1 | 3338 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class SqlVirtualMachineManagementClientConfiguration(Configuration):
"""Configuration for SqlVirtualMachineManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription ID that identifies an Azure subscription.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(SqlVirtualMachineManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2017-03-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-sqlvirtualmachine/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| mit | 768,652,286,479,161,600 | 46.014085 | 129 | 0.676153 | false |
crypticmac/McGregor | contrib/bitrpc/bitrpc.py | 1 | 9665 | from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a McGregor address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a McGregor address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| mit | -8,488,040,174,819,469,000 | 27.679525 | 101 | 0.568546 | false |
Ebag333/Pyfa | gui/itemStats.py | 1 | 50776 | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
import re
import os
import csv
import sys
import subprocess
import wx
import wx.html
import wx.lib.mixins.listctrl as listmix
import config
from eos.types import Fit, Ship, Citadel, Module, Skill, Booster, Implant, Drone, Mode, Fighter
from service.market import Market
from service.attribute import Attribute
import gui.mainFrame
from gui.bitmapLoader import BitmapLoader
from gui.utils.numberFormatter import formatAmount
from gui.contextMenu import ContextMenu
class ItemStatsDialog(wx.Dialog):
counter = 0
def __init__(
self,
victim,
fullContext=None,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
maximized=False
):
wx.Dialog.__init__(
self,
gui.mainFrame.MainFrame.getInstance(),
wx.ID_ANY,
title="Item stats",
pos=pos,
size=size,
style=wx.CAPTION | wx.CLOSE_BOX | wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.RESIZE_BORDER | wx.SYSTEM_MENU
)
empty = getattr(victim, "isEmpty", False)
if empty:
self.Hide()
self.Destroy()
return
srcContext = fullContext[0]
try:
itmContext = fullContext[1]
except IndexError:
itmContext = None
item = getattr(victim, "item", None) if srcContext.lower() not in (
"projectedcharge",
"fittingcharge"
) else getattr(victim, "charge", None)
if item is None:
sMkt = Market.getInstance()
item = sMkt.getItem(victim.ID)
victim = None
self.context = itmContext
if item.icon is not None:
before, sep, after = item.icon.iconFile.rpartition("_")
iconFile = "%s%s%s" % (before, sep, "0%s" % after if len(after) < 2 else after)
itemImg = BitmapLoader.getBitmap(iconFile, "icons")
if itemImg is not None:
self.SetIcon(wx.IconFromBitmap(itemImg))
self.SetTitle("%s: %s%s" % ("%s Stats" % itmContext if itmContext is not None else "Stats", item.name,
" (%d)" % item.ID if config.debug else ""))
self.SetMinSize((300, 200))
if "wxGTK" in wx.PlatformInfo: # GTK has huge tab widgets, give it a bit more room
self.SetSize((580, 500))
else:
self.SetSize((550, 500))
# self.SetMaxSize((500, -1))
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.container = ItemStatsContainer(self, victim, item, itmContext)
self.mainSizer.Add(self.container, 1, wx.EXPAND)
if "wxGTK" in wx.PlatformInfo:
self.closeBtn = wx.Button(self, wx.ID_ANY, u"Close", wx.DefaultPosition, wx.DefaultSize, 0)
self.mainSizer.Add(self.closeBtn, 0, wx.ALL | wx.ALIGN_RIGHT, 5)
self.closeBtn.Bind(wx.EVT_BUTTON, self.closeEvent)
self.SetSizer(self.mainSizer)
self.parentWnd = gui.mainFrame.MainFrame.getInstance()
dlgsize = self.GetSize()
psize = self.parentWnd.GetSize()
ppos = self.parentWnd.GetPosition()
ItemStatsDialog.counter += 1
self.dlgOrder = ItemStatsDialog.counter
counter = ItemStatsDialog.counter
dlgStep = 30
if counter * dlgStep > ppos.x + psize.width - dlgsize.x or counter * dlgStep > ppos.y + psize.height - dlgsize.y:
ItemStatsDialog.counter = 1
dlgx = ppos.x + counter * dlgStep
dlgy = ppos.y + counter * dlgStep
if pos == wx.DefaultPosition:
self.SetPosition((dlgx, dlgy))
else:
self.SetPosition(pos)
if maximized:
self.Maximize(True)
else:
if size != wx.DefaultSize:
self.SetSize(size)
self.parentWnd.RegisterStatsWindow(self)
self.Show()
self.Bind(wx.EVT_CLOSE, self.closeEvent)
self.Bind(wx.EVT_ACTIVATE, self.OnActivate)
def OnActivate(self, event):
self.parentWnd.SetActiveStatsWindow(self)
def closeEvent(self, event):
if self.dlgOrder == ItemStatsDialog.counter:
ItemStatsDialog.counter -= 1
self.parentWnd.UnregisterStatsWindow(self)
self.Destroy()
class ItemStatsContainer(wx.Panel):
def __init__(self, parent, stuff, item, context=None):
wx.Panel.__init__(self, parent)
sMkt = Market.getInstance()
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.nbContainer = wx.Notebook(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0)
mainSizer.Add(self.nbContainer, 1, wx.EXPAND | wx.ALL, 2)
if item.traits is not None:
self.traits = ItemTraits(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.traits, "Traits")
self.desc = ItemDescription(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.desc, "Description")
self.params = ItemParams(self.nbContainer, stuff, item, context)
self.nbContainer.AddPage(self.params, "Attributes")
items = sMkt.getVariationsByItems([item])
if len(items) > 1:
self.compare = ItemCompare(self.nbContainer, stuff, item, items, context)
self.nbContainer.AddPage(self.compare, "Compare")
self.reqs = ItemRequirements(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.reqs, "Requirements")
self.effects = ItemEffects(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.effects, "Effects")
if stuff is not None:
self.affectedby = ItemAffectedBy(self.nbContainer, stuff, item)
self.nbContainer.AddPage(self.affectedby, "Affected by")
self.nbContainer.Bind(wx.EVT_LEFT_DOWN, self.mouseHit)
self.SetSizer(mainSizer)
self.Layout()
def __del__(self):
pass
def mouseHit(self, event):
tab, _ = self.nbContainer.HitTest(event.Position)
if tab != -1:
self.nbContainer.SetSelection(tab)
class AutoListCtrl(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin, listmix.ListRowHighlighter):
def __init__(self, parent, ID, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
listmix.ListRowHighlighter.__init__(self)
class AutoListCtrlNoHighlight(wx.ListCtrl, listmix.ListCtrlAutoWidthMixin, listmix.ListRowHighlighter):
def __init__(self, parent, ID, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
listmix.ListCtrlAutoWidthMixin.__init__(self)
class ItemTraits(wx.Panel):
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent)
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(mainSizer)
self.traits = wx.html.HtmlWindow(self)
self.traits.SetPage(item.traits.traitText)
mainSizer.Add(self.traits, 1, wx.ALL | wx.EXPAND, 0)
self.Layout()
class ItemDescription(wx.Panel):
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent)
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(mainSizer)
bgcolor = wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW)
fgcolor = wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOWTEXT)
self.description = wx.html.HtmlWindow(self)
if not item.description:
return
desc = item.description.replace("\n", "<br>")
# Strip font tags
desc = re.sub("<( *)font( *)color( *)=(.*?)>(?P<inside>.*?)<( *)/( *)font( *)>", "\g<inside>", desc)
# Strip URLs
desc = re.sub("<( *)a(.*?)>(?P<inside>.*?)<( *)/( *)a( *)>", "\g<inside>", desc)
desc = "<body bgcolor='" + bgcolor.GetAsString(wx.C2S_HTML_SYNTAX) + "' text='" + fgcolor.GetAsString(
wx.C2S_HTML_SYNTAX) + "' >" + desc + "</body>"
self.description.SetPage(desc)
mainSizer.Add(self.description, 1, wx.ALL | wx.EXPAND, 0)
self.Layout()
class ItemParams(wx.Panel):
def __init__(self, parent, stuff, item, context=None):
wx.Panel.__init__(self, parent)
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.paramList = AutoListCtrl(self, wx.ID_ANY,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VRULES | wx.NO_BORDER)
mainSizer.Add(self.paramList, 1, wx.ALL | wx.EXPAND, 0)
self.SetSizer(mainSizer)
self.toggleView = 1
self.stuff = stuff
self.item = item
self.attrInfo = {}
self.attrValues = {}
self._fetchValues()
self.m_staticline = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
mainSizer.Add(self.m_staticline, 0, wx.EXPAND)
bSizer = wx.BoxSizer(wx.HORIZONTAL)
self.totalAttrsLabel = wx.StaticText(self, wx.ID_ANY, u" ", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer.Add(self.totalAttrsLabel, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT)
self.toggleViewBtn = wx.ToggleButton(self, wx.ID_ANY, u"Toggle view mode", wx.DefaultPosition, wx.DefaultSize,
0)
bSizer.Add(self.toggleViewBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.exportStatsBtn = wx.ToggleButton(self, wx.ID_ANY, u"Export Item Stats", wx.DefaultPosition, wx.DefaultSize,
0)
bSizer.Add(self.exportStatsBtn, 0, wx.ALIGN_CENTER_VERTICAL)
if stuff is not None:
self.refreshBtn = wx.Button(self, wx.ID_ANY, u"Refresh", wx.DefaultPosition, wx.DefaultSize, wx.BU_EXACTFIT)
bSizer.Add(self.refreshBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.refreshBtn.Bind(wx.EVT_BUTTON, self.RefreshValues)
mainSizer.Add(bSizer, 0, wx.ALIGN_RIGHT)
self.PopulateList()
self.toggleViewBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleViewMode)
self.exportStatsBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ExportItemStats)
def _fetchValues(self):
if self.stuff is None:
self.attrInfo.clear()
self.attrValues.clear()
self.attrInfo.update(self.item.attributes)
self.attrValues.update(self.item.attributes)
elif self.stuff.item == self.item:
self.attrInfo.clear()
self.attrValues.clear()
self.attrInfo.update(self.stuff.item.attributes)
self.attrValues.update(self.stuff.itemModifiedAttributes)
elif self.stuff.charge == self.item:
self.attrInfo.clear()
self.attrValues.clear()
self.attrInfo.update(self.stuff.charge.attributes)
self.attrValues.update(self.stuff.chargeModifiedAttributes)
# When item for stats window no longer exists, don't change anything
else:
return
def UpdateList(self):
self.Freeze()
self.paramList.ClearAll()
self.PopulateList()
self.Thaw()
self.paramList.resizeLastColumn(100)
def RefreshValues(self, event):
self._fetchValues()
self.UpdateList()
event.Skip()
def ToggleViewMode(self, event):
self.toggleView *= -1
self.UpdateList()
event.Skip()
def ExportItemStats(self, event):
exportFileName = self.item.name + " (" + str(self.item.ID) + ").csv"
saveFileDialog = wx.FileDialog(self, "Save CSV file", "", exportFileName,
"CSV files (*.csv)|*.csv", wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if saveFileDialog.ShowModal() == wx.ID_CANCEL:
return # the user hit cancel...
with open(saveFileDialog.GetPath(), "wb") as exportFile:
writer = csv.writer(exportFile, delimiter=',')
writer.writerow(
[
"ID",
"Internal Name",
"Friendly Name",
"Modified Value",
"Base Value",
]
)
for attribute in self.attrValues:
try:
attribute_id = self.attrInfo[attribute].ID
except (KeyError, AttributeError):
attribute_id = ''
try:
attribute_name = self.attrInfo[attribute].name
except (KeyError, AttributeError):
attribute_name = attribute
try:
attribute_displayname = self.attrInfo[attribute].displayName
except (KeyError, AttributeError):
attribute_displayname = ''
try:
attribute_value = self.attrInfo[attribute].value
except (KeyError, AttributeError):
attribute_value = ''
try:
attribute_modified_value = self.attrValues[attribute].value
except (KeyError, AttributeError):
attribute_modified_value = self.attrValues[attribute]
writer.writerow(
[
attribute_id,
attribute_name,
attribute_displayname,
attribute_modified_value,
attribute_value,
]
)
def PopulateList(self):
self.paramList.InsertColumn(0, "Attribute")
self.paramList.InsertColumn(1, "Current Value")
if self.stuff is not None:
self.paramList.InsertColumn(2, "Base Value")
self.paramList.SetColumnWidth(0, 110)
self.paramList.SetColumnWidth(1, 90)
if self.stuff is not None:
self.paramList.SetColumnWidth(2, 90)
self.paramList.setResizeColumn(0)
self.imageList = wx.ImageList(16, 16)
self.paramList.SetImageList(self.imageList, wx.IMAGE_LIST_SMALL)
names = list(self.attrValues.iterkeys())
names.sort()
idNameMap = {}
idCount = 0
for name in names:
info = self.attrInfo.get(name)
att = self.attrValues[name]
valDefault = getattr(info, "value", None)
valueDefault = valDefault if valDefault is not None else att
val = getattr(att, "value", None)
value = val if val is not None else att
if info and info.displayName and self.toggleView == 1:
attrName = info.displayName
else:
attrName = name
if info and config.debug:
attrName += " ({})".format(info.ID)
if info:
if info.icon is not None:
iconFile = info.icon.iconFile
icon = BitmapLoader.getBitmap(iconFile, "icons")
if icon is None:
icon = BitmapLoader.getBitmap("transparent16x16", "gui")
attrIcon = self.imageList.Add(icon)
else:
attrIcon = self.imageList.Add(BitmapLoader.getBitmap("7_15", "icons"))
else:
attrIcon = self.imageList.Add(BitmapLoader.getBitmap("7_15", "icons"))
index = self.paramList.InsertImageStringItem(sys.maxint, attrName, attrIcon)
idNameMap[idCount] = attrName
self.paramList.SetItemData(index, idCount)
idCount += 1
if self.toggleView != 1:
valueUnit = str(value)
elif info and info.unit:
valueUnit = self.TranslateValueUnit(value, info.unit.displayName, info.unit.name)
else:
valueUnit = formatAmount(value, 3, 0, 0)
if self.toggleView != 1:
valueUnitDefault = str(valueDefault)
elif info and info.unit:
valueUnitDefault = self.TranslateValueUnit(valueDefault, info.unit.displayName, info.unit.name)
else:
valueUnitDefault = formatAmount(valueDefault, 3, 0, 0)
self.paramList.SetStringItem(index, 1, valueUnit)
if self.stuff is not None:
self.paramList.SetStringItem(index, 2, valueUnitDefault)
self.paramList.SortItems(lambda id1, id2: cmp(idNameMap[id1], idNameMap[id2]))
self.paramList.RefreshRows()
self.totalAttrsLabel.SetLabel("%d attributes. " % idCount)
self.Layout()
def TranslateValueUnit(self, value, unitName, unitDisplayName):
def itemIDCallback():
item = Market.getInstance().getItem(value)
return "%s (%d)" % (item.name, value) if item is not None else str(value)
def groupIDCallback():
group = Market.getInstance().getGroup(value)
return "%s (%d)" % (group.name, value) if group is not None else str(value)
def attributeIDCallback():
attribute = Attribute.getInstance().getAttributeInfo(value)
return "%s (%d)" % (attribute.name.capitalize(), value)
trans = {"Inverse Absolute Percent": (lambda: (1 - value) * 100, unitName),
"Inversed Modifier Percent": (lambda: (1 - value) * 100, unitName),
"Modifier Percent": (
lambda: ("%+.2f" if ((value - 1) * 100) % 1 else "%+d") % ((value - 1) * 100), unitName),
"Volume": (lambda: value, u"m\u00B3"),
"Sizeclass": (lambda: value, ""),
"Absolute Percent": (lambda: (value * 100), unitName),
"Milliseconds": (lambda: value / 1000.0, unitName),
"typeID": (itemIDCallback, ""),
"groupID": (groupIDCallback, ""),
"attributeID": (attributeIDCallback, "")}
override = trans.get(unitDisplayName)
if override is not None:
v = override[0]()
if isinstance(v, str):
fvalue = v
elif isinstance(v, (int, float, long)):
fvalue = formatAmount(v, 3, 0, 0)
else:
fvalue = v
return "%s %s" % (fvalue, override[1])
else:
return "%s %s" % (formatAmount(value, 3, 0), unitName)
class ItemCompare(wx.Panel):
def __init__(self, parent, stuff, item, items, context=None):
wx.Panel.__init__(self, parent)
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.paramList = AutoListCtrl(self, wx.ID_ANY,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VRULES | wx.NO_BORDER)
mainSizer.Add(self.paramList, 1, wx.ALL | wx.EXPAND, 0)
self.SetSizer(mainSizer)
self.toggleView = 1
self.stuff = stuff
self.currentSort = None
self.sortReverse = False
self.item = item
self.items = sorted(items,
key=lambda x: x.attributes['metaLevel'].value if 'metaLevel' in x.attributes else None)
self.attrs = {}
# get a dict of attrName: attrInfo of all unique attributes across all items
for item in self.items:
for attr in item.attributes.keys():
if item.attributes[attr].info.displayName:
self.attrs[attr] = item.attributes[attr].info
# Process attributes for items and find ones that differ
for attr in self.attrs.keys():
value = None
for item in self.items:
# we can automatically break here if this item doesn't have the attribute,
# as that means at least one item did
if attr not in item.attributes:
break
# this is the first attribute for the item set, set the initial value
if value is None:
value = item.attributes[attr].value
continue
if attr not in item.attributes or item.attributes[attr].value != value:
break
else:
# attribute values were all the same, delete
del self.attrs[attr]
self.m_staticline = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,
wx.LI_HORIZONTAL)
mainSizer.Add(self.m_staticline, 0, wx.EXPAND)
bSizer = wx.BoxSizer(wx.HORIZONTAL)
self.totalAttrsLabel = wx.StaticText(self, wx.ID_ANY, u" ", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer.Add(self.totalAttrsLabel, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT)
self.toggleViewBtn = wx.ToggleButton(self, wx.ID_ANY, u"Toggle view mode", wx.DefaultPosition,
wx.DefaultSize, 0)
bSizer.Add(self.toggleViewBtn, 0, wx.ALIGN_CENTER_VERTICAL)
if stuff is not None:
self.refreshBtn = wx.Button(self, wx.ID_ANY, u"Refresh", wx.DefaultPosition, wx.DefaultSize,
wx.BU_EXACTFIT)
bSizer.Add(self.refreshBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.refreshBtn.Bind(wx.EVT_BUTTON, self.RefreshValues)
mainSizer.Add(bSizer, 0, wx.ALIGN_RIGHT)
self.PopulateList()
self.toggleViewBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleViewMode)
self.Bind(wx.EVT_LIST_COL_CLICK, self.SortCompareCols)
def SortCompareCols(self, event):
self.Freeze()
self.paramList.ClearAll()
self.PopulateList(event.Column)
self.Thaw()
def UpdateList(self):
self.Freeze()
self.paramList.ClearAll()
self.PopulateList()
self.Thaw()
self.paramList.resizeLastColumn(100)
def RefreshValues(self, event):
self.UpdateList()
event.Skip()
def ToggleViewMode(self, event):
self.toggleView *= -1
self.UpdateList()
event.Skip()
def processPrices(self, prices):
for i, price in enumerate(prices):
self.paramList.SetStringItem(i, len(self.attrs) + 1, formatAmount(price.price, 3, 3, 9, currency=True))
def PopulateList(self, sort=None):
if sort is not None and self.currentSort == sort:
self.sortReverse = not self.sortReverse
else:
self.currentSort = sort
self.sortReverse = False
if sort is not None:
if sort == 0: # Name sort
func = lambda x: x.name
else:
try:
# Remember to reduce by 1, because the attrs array
# starts at 0 while the list has the item name as column 0.
attr = str(self.attrs.keys()[sort - 1])
func = lambda x: x.attributes[attr].value if attr in x.attributes else None
except IndexError:
# Clicked on a column that's not part of our array (price most likely)
self.sortReverse = False
func = lambda x: x.attributes['metaLevel'].value if 'metaLevel' in x.attributes else None
self.items = sorted(self.items, key=func, reverse=self.sortReverse)
self.paramList.InsertColumn(0, "Item")
self.paramList.SetColumnWidth(0, 200)
for i, attr in enumerate(self.attrs.keys()):
name = self.attrs[attr].displayName if self.attrs[attr].displayName else attr
self.paramList.InsertColumn(i + 1, name)
self.paramList.SetColumnWidth(i + 1, 120)
self.paramList.InsertColumn(len(self.attrs) + 1, "Price")
self.paramList.SetColumnWidth(len(self.attrs) + 1, 60)
sMkt = Market.getInstance()
sMkt.getPrices([x.ID for x in self.items], self.processPrices)
for item in self.items:
i = self.paramList.InsertStringItem(sys.maxint, item.name)
for x, attr in enumerate(self.attrs.keys()):
if attr in item.attributes:
info = self.attrs[attr]
value = item.attributes[attr].value
if self.toggleView != 1:
valueUnit = str(value)
if info and info.unit:
valueUnit = self.TranslateValueUnit(value, info.unit.displayName, info.unit.name)
else:
valueUnit = formatAmount(value, 3, 0, 0)
self.paramList.SetStringItem(i, x + 1, valueUnit)
self.paramList.RefreshRows()
self.Layout()
def TranslateValueUnit(self, value, unitName, unitDisplayName):
def itemIDCallback():
item = Market.getInstance().getItem(value)
return "%s (%d)" % (item.name, value) if item is not None else str(value)
def groupIDCallback():
group = Market.getInstance().getGroup(value)
return "%s (%d)" % (group.name, value) if group is not None else str(value)
def attributeIDCallback():
attribute = Attribute.getInstance().getAttributeInfo(value)
return "%s (%d)" % (attribute.name.capitalize(), value)
trans = {"Inverse Absolute Percent": (lambda: (1 - value) * 100, unitName),
"Inversed Modifier Percent": (lambda: (1 - value) * 100, unitName),
"Modifier Percent": (
lambda: ("%+.2f" if ((value - 1) * 100) % 1 else "%+d") % ((value - 1) * 100), unitName),
"Volume": (lambda: value, u"m\u00B3"),
"Sizeclass": (lambda: value, ""),
"Absolute Percent": (lambda: (value * 100), unitName),
"Milliseconds": (lambda: value / 1000.0, unitName),
"typeID": (itemIDCallback, ""),
"groupID": (groupIDCallback, ""),
"attributeID": (attributeIDCallback, "")}
override = trans.get(unitDisplayName)
if override is not None:
v = override[0]()
if isinstance(v, str):
fvalue = v
elif isinstance(v, (int, float, long)):
fvalue = formatAmount(v, 3, 0, 0)
else:
fvalue = v
return "%s %s" % (fvalue, override[1])
else:
return "%s %s" % (formatAmount(value, 3, 0), unitName)
class ItemRequirements(wx.Panel):
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent, style=wx.TAB_TRAVERSAL)
# itemId is set by the parent.
self.romanNb = ["0", "I", "II", "III", "IV", "V", "VI", "VII", "VIII", "IX", "X"]
self.skillIdHistory = []
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.reqTree = wx.TreeCtrl(self, style=wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT | wx.NO_BORDER)
mainSizer.Add(self.reqTree, 1, wx.ALL | wx.EXPAND, 0)
self.SetSizer(mainSizer)
self.root = self.reqTree.AddRoot("WINRARZOR")
self.reqTree.SetPyData(self.root, None)
self.imageList = wx.ImageList(16, 16)
self.reqTree.SetImageList(self.imageList)
skillBookId = self.imageList.Add(BitmapLoader.getBitmap("skill_small", "gui"))
self.getFullSkillTree(item, self.root, skillBookId)
self.reqTree.ExpandAll()
self.Layout()
def getFullSkillTree(self, parentSkill, parent, sbIconId):
for skill, level in parentSkill.requiredSkills.iteritems():
child = self.reqTree.AppendItem(parent, "%s %s" % (skill.name, self.romanNb[int(level)]), sbIconId)
if skill.ID not in self.skillIdHistory:
self.getFullSkillTree(skill, child, sbIconId)
self.skillIdHistory.append(skill.ID)
class ItemEffects(wx.Panel):
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent)
self.item = item
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.effectList = AutoListCtrl(self, wx.ID_ANY,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VRULES | wx.NO_BORDER)
mainSizer.Add(self.effectList, 1, wx.ALL | wx.EXPAND, 0)
self.SetSizer(mainSizer)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnClick, self.effectList)
if config.debug:
self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnRightClick, self.effectList)
self.PopulateList()
def PopulateList(self):
self.effectList.InsertColumn(0, "Name")
self.effectList.InsertColumn(1, "Active")
self.effectList.InsertColumn(2, "Type")
if config.debug:
self.effectList.InsertColumn(3, "Run Time")
self.effectList.InsertColumn(4, "ID")
# self.effectList.SetColumnWidth(0,385)
self.effectList.setResizeColumn(0)
self.effectList.SetColumnWidth(1, 50)
self.effectList.SetColumnWidth(2, 80)
if config.debug:
self.effectList.SetColumnWidth(3, 65)
self.effectList.SetColumnWidth(4, 40)
item = self.item
effects = item.effects
names = list(effects.iterkeys())
names.sort()
for name in names:
index = self.effectList.InsertStringItem(sys.maxint, name)
if effects[name].isImplemented:
if effects[name].activeByDefault:
activeByDefault = "Yes"
else:
activeByDefault = "No"
else:
activeByDefault = ""
effectTypeText = ""
if effects[name].type:
for effectType in effects[name].type:
effectTypeText += effectType + " "
pass
if effects[name].runTime and effects[name].isImplemented:
effectRunTime = str(effects[name].runTime)
else:
effectRunTime = ""
self.effectList.SetStringItem(index, 1, activeByDefault)
self.effectList.SetStringItem(index, 2, effectTypeText)
if config.debug:
self.effectList.SetStringItem(index, 3, effectRunTime)
self.effectList.SetStringItem(index, 4, str(effects[name].ID))
self.effectList.RefreshRows()
self.Layout()
def OnClick(self, event):
"""
Debug use: toggle effects on/off.
Affects *ALL* items that use that effect.
Is not stateful. Will reset if Pyfa is closed and reopened.
"""
try:
activeByDefault = getattr(self.item.effects[event.GetText()], "activeByDefault")
if activeByDefault:
setattr(self.item.effects[event.GetText()], "activeByDefault", False)
else:
setattr(self.item.effects[event.GetText()], "activeByDefault", True)
except AttributeError:
# Attribute doesn't exist, do nothing
pass
self.RefreshValues(event)
def OnRightClick(self, event):
"""
Debug use: open effect file with default application.
If effect file does not exist, create it
"""
file_ = config.getPyfaPath(os.path.join("eos", "effects", "%s.py" % event.GetText().lower()))
if not os.path.isfile(file_):
open(file_, 'a').close()
if 'wxMSW' in wx.PlatformInfo:
os.startfile(file_)
elif 'wxMac' in wx.PlatformInfo:
os.system("open " + file_)
else:
subprocess.call(["xdg-open", file_])
def RefreshValues(self, event):
self.Freeze()
self.effectList.ClearAll()
self.PopulateList()
self.effectList.RefreshRows()
self.Layout()
self.Thaw()
event.Skip()
class ItemAffectedBy(wx.Panel):
ORDER = [Fit, Ship, Citadel, Mode, Module, Drone, Fighter, Implant, Booster, Skill]
def __init__(self, parent, stuff, item):
wx.Panel.__init__(self, parent)
self.stuff = stuff
self.item = item
self.activeFit = gui.mainFrame.MainFrame.getInstance().getActiveFit()
self.showRealNames = False
self.showAttrView = False
self.expand = -1
self.treeItems = []
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.affectedBy = wx.TreeCtrl(self, style=wx.TR_DEFAULT_STYLE | wx.TR_HIDE_ROOT | wx.NO_BORDER)
mainSizer.Add(self.affectedBy, 1, wx.ALL | wx.EXPAND, 0)
self.m_staticline = wx.StaticLine(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL)
mainSizer.Add(self.m_staticline, 0, wx.EXPAND)
bSizer = wx.BoxSizer(wx.HORIZONTAL)
self.toggleExpandBtn = wx.ToggleButton(self, wx.ID_ANY, u"Expand All", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer.Add(self.toggleExpandBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.toggleNameBtn = wx.ToggleButton(self, wx.ID_ANY, u"Toggle Names", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer.Add(self.toggleNameBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.toggleViewBtn = wx.ToggleButton(self, wx.ID_ANY, u"Toggle View", wx.DefaultPosition, wx.DefaultSize, 0)
bSizer.Add(self.toggleViewBtn, 0, wx.ALIGN_CENTER_VERTICAL)
if stuff is not None:
self.refreshBtn = wx.Button(self, wx.ID_ANY, u"Refresh", wx.DefaultPosition, wx.DefaultSize, wx.BU_EXACTFIT)
bSizer.Add(self.refreshBtn, 0, wx.ALIGN_CENTER_VERTICAL)
self.refreshBtn.Bind(wx.EVT_BUTTON, self.RefreshTree)
self.toggleNameBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleNameMode)
self.toggleExpandBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleExpand)
self.toggleViewBtn.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleViewMode)
mainSizer.Add(bSizer, 0, wx.ALIGN_RIGHT)
self.SetSizer(mainSizer)
self.PopulateTree()
self.Layout()
self.affectedBy.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.scheduleMenu)
def scheduleMenu(self, event):
event.Skip()
wx.CallAfter(self.spawnMenu, event.Item)
def spawnMenu(self, item):
self.affectedBy.SelectItem(item)
stuff = self.affectedBy.GetPyData(item)
# String is set as data when we are dealing with attributes, not stuff containers
if stuff is None or isinstance(stuff, basestring):
return
contexts = []
# Skills are different in that they don't have itemModifiedAttributes,
# which is needed if we send the container to itemStats dialog. So
# instead, we send the item.
type_ = stuff.__class__.__name__
contexts.append(("itemStats", type_))
menu = ContextMenu.getMenu(stuff if type_ != "Skill" else stuff.item, *contexts)
self.PopupMenu(menu)
def ExpandCollapseTree(self):
self.Freeze()
if self.expand == 1:
self.affectedBy.ExpandAll()
else:
try:
self.affectedBy.CollapseAll()
except:
pass
self.Thaw()
def ToggleExpand(self, event):
self.expand *= -1
self.ExpandCollapseTree()
def ToggleViewTree(self):
self.Freeze()
for item in self.treeItems:
change = self.affectedBy.GetPyData(item)
display = self.affectedBy.GetItemText(item)
self.affectedBy.SetItemText(item, change)
self.affectedBy.SetPyData(item, display)
self.Thaw()
def UpdateTree(self):
self.Freeze()
self.affectedBy.DeleteAllItems()
self.PopulateTree()
self.Thaw()
def RefreshTree(self, event):
self.UpdateTree()
event.Skip()
def ToggleViewMode(self, event):
self.showAttrView = not self.showAttrView
self.affectedBy.DeleteAllItems()
self.PopulateTree()
event.Skip()
def ToggleNameMode(self, event):
self.showRealNames = not self.showRealNames
self.ToggleViewTree()
event.Skip()
def PopulateTree(self):
# sheri was here
del self.treeItems[:]
root = self.affectedBy.AddRoot("WINPWNZ0R")
self.affectedBy.SetPyData(root, None)
self.imageList = wx.ImageList(16, 16)
self.affectedBy.SetImageList(self.imageList)
if self.showAttrView:
self.buildAttributeView(root)
else:
self.buildModuleView(root)
self.ExpandCollapseTree()
def sortAttrDisplayName(self, attr):
info = self.stuff.item.attributes.get(attr)
if info and info.displayName != "":
return info.displayName
return attr
def buildAttributeView(self, root):
"""
We first build a usable dictionary of items. The key is either a fit
if the afflictions stem from a projected fit, or self.stuff if they
are local afflictions (everything else, even gang boosts at this time)
The value of this is yet another dictionary in the following format:
"attribute name": {
"Module Name": [
class of affliction,
affliction item (required due to GH issue #335)
modifier type
amount of modification
whether this affliction was projected
]
}
"""
attributes = self.stuff.itemModifiedAttributes if self.item == self.stuff.item else self.stuff.chargeModifiedAttributes
container = {}
for attrName in attributes.iterAfflictions():
# if value is 0 or there has been no change from original to modified, return
if attributes[attrName] == (attributes.getOriginal(attrName) or 0):
continue
for fit, afflictors in attributes.getAfflictions(attrName).iteritems():
for afflictor, modifier, amount, used in afflictors:
if not used or afflictor.item is None:
continue
if fit.ID != self.activeFit:
# affliction fit does not match our fit
if fit not in container:
container[fit] = {}
items = container[fit]
else:
# local afflictions
if self.stuff not in container:
container[self.stuff] = {}
items = container[self.stuff]
# items hold our module: info mappings
if attrName not in items:
items[attrName] = []
if afflictor == self.stuff and getattr(afflictor, 'charge', None):
# we are showing a charges modifications, see #335
item = afflictor.charge
else:
item = afflictor.item
items[attrName].append(
(type(afflictor), afflictor, item, modifier, amount, getattr(afflictor, "projected", False)))
# Make sure projected fits are on top
rootOrder = container.keys()
rootOrder.sort(key=lambda x: self.ORDER.index(type(x)))
# Now, we take our created dictionary and start adding stuff to our tree
for thing in rootOrder:
# This block simply directs which parent we are adding to (root or projected fit)
if thing == self.stuff:
parent = root
else: # projected fit
icon = self.imageList.Add(BitmapLoader.getBitmap("ship_small", "gui"))
child = self.affectedBy.AppendItem(root, "{} ({})".format(thing.name, thing.ship.item.name), icon)
parent = child
attributes = container[thing]
attrOrder = sorted(attributes.keys(), key=self.sortAttrDisplayName)
for attrName in attrOrder:
attrInfo = self.stuff.item.attributes.get(attrName)
displayName = attrInfo.displayName if attrInfo and attrInfo.displayName != "" else attrName
if attrInfo:
if attrInfo.icon is not None:
iconFile = attrInfo.icon.iconFile
icon = BitmapLoader.getBitmap(iconFile, "icons")
if icon is None:
icon = BitmapLoader.getBitmap("transparent16x16", "gui")
attrIcon = self.imageList.Add(icon)
else:
attrIcon = self.imageList.Add(BitmapLoader.getBitmap("7_15", "icons"))
else:
attrIcon = self.imageList.Add(BitmapLoader.getBitmap("7_15", "icons"))
if self.showRealNames:
display = attrName
saved = displayName
else:
display = displayName
saved = attrName
# this is the attribute node
child = self.affectedBy.AppendItem(parent, display, attrIcon)
self.affectedBy.SetPyData(child, saved)
self.treeItems.append(child)
items = attributes[attrName]
items.sort(key=lambda x: self.ORDER.index(x[0]))
for itemInfo in items:
afflictorType, afflictor, item, attrModifier, attrAmount, projected = itemInfo
if afflictorType == Ship:
itemIcon = self.imageList.Add(BitmapLoader.getBitmap("ship_small", "gui"))
elif item.icon:
bitmap = BitmapLoader.getBitmap(item.icon.iconFile, "icons")
itemIcon = self.imageList.Add(bitmap) if bitmap else -1
else:
itemIcon = -1
displayStr = item.name
if projected:
displayStr += " (projected)"
if attrModifier == "s*":
attrModifier = "*"
penalized = "(penalized)"
else:
penalized = ""
# this is the Module node, the attribute will be attached to this
display = "%s %s %.2f %s" % (displayStr, attrModifier, attrAmount, penalized)
treeItem = self.affectedBy.AppendItem(child, display, itemIcon)
self.affectedBy.SetPyData(treeItem, afflictor)
def buildModuleView(self, root):
"""
We first build a usable dictionary of items. The key is either a fit
if the afflictions stem from a projected fit, or self.stuff if they
are local afflictions (everything else, even gang boosts at this time)
The value of this is yet another dictionary in the following format:
"Module Name": [
class of affliction,
set of afflictors (such as 2 of the same module),
info on affliction (attribute name, modifier, and modification amount),
item that will be used to determine icon (required due to GH issue #335)
whether this affliction is actually used (unlearned skills are not used)
]
"""
attributes = self.stuff.itemModifiedAttributes if self.item == self.stuff.item else self.stuff.chargeModifiedAttributes
container = {}
for attrName in attributes.iterAfflictions():
# if value is 0 or there has been no change from original to modified, return
if attributes[attrName] == (attributes.getOriginal(attrName) or 0):
continue
for fit, afflictors in attributes.getAfflictions(attrName).iteritems():
for afflictor, modifier, amount, used in afflictors:
if not used or getattr(afflictor, 'item', None) is None:
continue
if fit.ID != self.activeFit:
# affliction fit does not match our fit
if fit not in container:
container[fit] = {}
items = container[fit]
else:
# local afflictions
if self.stuff not in container:
container[self.stuff] = {}
items = container[self.stuff]
if afflictor == self.stuff and getattr(afflictor, 'charge', None):
# we are showing a charges modifications, see #335
item = afflictor.charge
else:
item = afflictor.item
# items hold our module: info mappings
if item.name not in items:
items[item.name] = [type(afflictor), set(), [], item, getattr(afflictor, "projected", False)]
info = items[item.name]
info[1].add(afflictor)
# If info[1] > 1, there are two separate modules working.
# Check to make sure we only include the modifier once
# See GH issue 154
if len(info[1]) > 1 and (attrName, modifier, amount) in info[2]:
continue
info[2].append((attrName, modifier, amount))
# Make sure projected fits are on top
rootOrder = container.keys()
rootOrder.sort(key=lambda x: self.ORDER.index(type(x)))
# Now, we take our created dictionary and start adding stuff to our tree
for thing in rootOrder:
# This block simply directs which parent we are adding to (root or projected fit)
if thing == self.stuff:
parent = root
else: # projected fit
icon = self.imageList.Add(BitmapLoader.getBitmap("ship_small", "gui"))
child = self.affectedBy.AppendItem(root, "{} ({})".format(thing.name, thing.ship.item.name), icon)
parent = child
items = container[thing]
order = items.keys()
order.sort(key=lambda x: (self.ORDER.index(items[x][0]), x))
for itemName in order:
info = items[itemName]
afflictorType, afflictors, attrData, item, projected = info
counter = len(afflictors)
if afflictorType == Ship:
itemIcon = self.imageList.Add(BitmapLoader.getBitmap("ship_small", "gui"))
elif item.icon:
bitmap = BitmapLoader.getBitmap(item.icon.iconFile, "icons")
itemIcon = self.imageList.Add(bitmap) if bitmap else -1
else:
itemIcon = -1
displayStr = itemName
if counter > 1:
displayStr += " x {}".format(counter)
if projected:
displayStr += " (projected)"
# this is the Module node, the attribute will be attached to this
child = self.affectedBy.AppendItem(parent, displayStr, itemIcon)
self.affectedBy.SetPyData(child, afflictors.pop())
if counter > 0:
attributes = []
for attrName, attrModifier, attrAmount in attrData:
attrInfo = self.stuff.item.attributes.get(attrName)
displayName = attrInfo.displayName if attrInfo else ""
if attrInfo:
if attrInfo.icon is not None:
iconFile = attrInfo.icon.iconFile
icon = BitmapLoader.getBitmap(iconFile, "icons")
if icon is None:
icon = BitmapLoader.getBitmap("transparent16x16", "gui")
attrIcon = self.imageList.Add(icon)
else:
attrIcon = self.imageList.Add(BitmapLoader.getBitmap("7_15", "icons"))
else:
attrIcon = self.imageList.Add(BitmapLoader.getBitmap("7_15", "icons"))
if attrModifier == "s*":
attrModifier = "*"
penalized = "(penalized)"
else:
penalized = ""
attributes.append((attrName, (displayName if displayName != "" else attrName), attrModifier,
attrAmount, penalized, attrIcon))
attrSorted = sorted(attributes, key=lambda attribName: attribName[0])
for attr in attrSorted:
attrName, displayName, attrModifier, attrAmount, penalized, attrIcon = attr
if self.showRealNames:
display = "%s %s %.2f %s" % (attrName, attrModifier, attrAmount, penalized)
saved = "%s %s %.2f %s" % (
displayName if displayName != "" else attrName,
attrModifier,
attrAmount,
penalized
)
else:
display = "%s %s %.2f %s" % (
displayName if displayName != "" else attrName,
attrModifier,
attrAmount,
penalized
)
saved = "%s %s %.2f %s" % (attrName, attrModifier, attrAmount, penalized)
treeitem = self.affectedBy.AppendItem(child, display, attrIcon)
self.affectedBy.SetPyData(treeitem, saved)
self.treeItems.append(treeitem)
| gpl-3.0 | -3,734,646,158,873,923,000 | 38.606864 | 127 | 0.56119 | false |
mkhuthir/learnPython | Book_pythonlearn_com/twitter/twfriends.py | 1 | 2583 | import urllib.request, urllib.parse, urllib.error
import twurl
import json
import sqlite3
TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'
conn = sqlite3.connect('friends.sqlite')
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS People
(id INTEGER PRIMARY KEY, name TEXT UNIQUE, retrieved INTEGER)''')
cur.execute('''CREATE TABLE IF NOT EXISTS Follows
(from_id INTEGER, to_id INTEGER, UNIQUE(from_id, to_id))''')
while True:
acct = input('Enter a Twitter account, or quit: ')
if ( acct == 'quit' ) : break
if ( len(acct) < 1 ) :
cur.execute('SELECT id,name FROM People WHERE retrieved = 0 LIMIT 1')
try:
(id, acct) = cur.fetchone()
except:
print('No unretrieved Twitter accounts found')
continue
else:
cur.execute('SELECT id FROM People WHERE name = ? LIMIT 1',
(acct, ) )
try:
id = cur.fetchone()[0]
except:
cur.execute('''INSERT OR IGNORE INTO People
(name, retrieved) VALUES ( ?, 0)''', ( acct, ) )
conn.commit()
if cur.rowcount != 1 :
print('Error inserting account:',acct)
continue
id = cur.lastrowid
url = twurl.augment(TWITTER_URL, {'screen_name': acct, 'count': '5'} )
print('Retrieving account', acct)
connection = urllib.request.urlopen(url)
data = connection.read().decode()
headers = dict(connection.getheaders())
print('Remaining', headers['x-rate-limit-remaining'])
js = json.loads(data)
# print json.dumps(js, indent=4)
cur.execute('UPDATE People SET retrieved=1 WHERE name = ?', (acct, ) )
countnew = 0
countold = 0
for u in js['users'] :
friend = u['screen_name']
print(friend)
cur.execute('SELECT id FROM People WHERE name = ? LIMIT 1',
(friend, ) )
try:
friend_id = cur.fetchone()[0]
countold = countold + 1
except:
cur.execute('''INSERT OR IGNORE INTO People (name, retrieved)
VALUES ( ?, 0)''', ( friend, ) )
conn.commit()
if cur.rowcount != 1 :
print('Error inserting account:',friend)
continue
friend_id = cur.lastrowid
countnew = countnew + 1
cur.execute('''INSERT OR IGNORE INTO Follows (from_id, to_id)
VALUES (?, ?)''', (id, friend_id) )
print('New accounts=',countnew,' revisited=',countold)
conn.commit()
cur.close()
| mit | -3,992,820,602,990,398,000 | 32.115385 | 77 | 0.562137 | false |
derekjchow/models | research/object_detection/meta_architectures/ssd_meta_arch_test.py | 1 | 33019 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.meta_architectures.ssd_meta_arch."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.meta_architectures import ssd_meta_arch_test_lib
from object_detection.protos import model_pb2
from object_detection.utils import test_utils
slim = tf.contrib.slim
keras = tf.keras.layers
@parameterized.parameters(
{'use_keras': False},
{'use_keras': True},
)
class SsdMetaArchTest(ssd_meta_arch_test_lib.SSDMetaArchTestBase,
parameterized.TestCase):
def _create_model(
self,
apply_hard_mining=True,
normalize_loc_loss_by_codesize=False,
add_background_class=True,
random_example_sampling=False,
expected_loss_weights=model_pb2.DetectionModel().ssd.loss.NONE,
min_num_negative_samples=1,
desired_negative_sampling_ratio=3,
use_keras=False,
predict_mask=False,
use_static_shapes=False,
nms_max_size_per_class=5,
calibration_mapping_value=None):
return super(SsdMetaArchTest, self)._create_model(
model_fn=ssd_meta_arch.SSDMetaArch,
apply_hard_mining=apply_hard_mining,
normalize_loc_loss_by_codesize=normalize_loc_loss_by_codesize,
add_background_class=add_background_class,
random_example_sampling=random_example_sampling,
expected_loss_weights=expected_loss_weights,
min_num_negative_samples=min_num_negative_samples,
desired_negative_sampling_ratio=desired_negative_sampling_ratio,
use_keras=use_keras,
predict_mask=predict_mask,
use_static_shapes=use_static_shapes,
nms_max_size_per_class=nms_max_size_per_class,
calibration_mapping_value=calibration_mapping_value)
def test_preprocess_preserves_shapes_with_dynamic_input_image(
self, use_keras):
image_shapes = [(3, None, None, 3),
(None, 10, 10, 3),
(None, None, None, 3)]
model, _, _, _ = self._create_model(use_keras=use_keras)
for image_shape in image_shapes:
image_placeholder = tf.placeholder(tf.float32, shape=image_shape)
preprocessed_inputs, _ = model.preprocess(image_placeholder)
self.assertAllEqual(preprocessed_inputs.shape.as_list(), image_shape)
def test_preprocess_preserves_shape_with_static_input_image(self, use_keras):
def graph_fn(input_image):
model, _, _, _ = self._create_model(use_keras=use_keras)
return model.preprocess(input_image)
input_image = np.random.rand(2, 3, 3, 3).astype(np.float32)
preprocessed_inputs, _ = self.execute(graph_fn, [input_image])
self.assertAllEqual(preprocessed_inputs.shape, [2, 3, 3, 3])
def test_predict_result_shapes_on_image_with_dynamic_shape(self, use_keras):
batch_size = 3
image_size = 2
input_shapes = [(None, image_size, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
for input_shape in input_shapes:
tf_graph = tf.Graph()
with tf_graph.as_default():
model, num_classes, num_anchors, code_size = self._create_model(
use_keras=use_keras)
preprocessed_input_placeholder = tf.placeholder(tf.float32,
shape=input_shape)
prediction_dict = model.predict(
preprocessed_input_placeholder, true_image_shapes=None)
self.assertIn('box_encodings', prediction_dict)
self.assertIn('class_predictions_with_background', prediction_dict)
self.assertIn('feature_maps', prediction_dict)
self.assertIn('anchors', prediction_dict)
init_op = tf.global_variables_initializer()
with self.test_session(graph=tf_graph) as sess:
sess.run(init_op)
prediction_out = sess.run(prediction_dict,
feed_dict={
preprocessed_input_placeholder:
np.random.uniform(
size=(batch_size, 2, 2, 3))})
expected_box_encodings_shape_out = (batch_size, num_anchors, code_size)
expected_class_predictions_with_background_shape_out = (batch_size,
num_anchors,
num_classes + 1)
self.assertAllEqual(prediction_out['box_encodings'].shape,
expected_box_encodings_shape_out)
self.assertAllEqual(
prediction_out['class_predictions_with_background'].shape,
expected_class_predictions_with_background_shape_out)
def test_predict_result_shapes_on_image_with_static_shape(self, use_keras):
with tf.Graph().as_default():
_, num_classes, num_anchors, code_size = self._create_model(
use_keras=use_keras)
def graph_fn(input_image):
model, _, _, _ = self._create_model()
predictions = model.predict(input_image, true_image_shapes=None)
return (predictions['box_encodings'],
predictions['class_predictions_with_background'],
predictions['feature_maps'],
predictions['anchors'])
batch_size = 3
image_size = 2
channels = 3
input_image = np.random.rand(batch_size, image_size, image_size,
channels).astype(np.float32)
expected_box_encodings_shape = (batch_size, num_anchors, code_size)
expected_class_predictions_shape = (batch_size, num_anchors, num_classes+1)
(box_encodings, class_predictions, _, _) = self.execute(graph_fn,
[input_image])
self.assertAllEqual(box_encodings.shape, expected_box_encodings_shape)
self.assertAllEqual(class_predictions.shape,
expected_class_predictions_shape)
def test_postprocess_results_are_correct(self, use_keras):
batch_size = 2
image_size = 2
input_shapes = [(batch_size, image_size, image_size, 3),
(None, image_size, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
expected_boxes = [
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]
], # padding
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]
]
] # padding
expected_scores = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
expected_classes = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
expected_num_detections = np.array([3, 3])
raw_detection_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]],
[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]]]
raw_detection_scores = [[[0, 0], [0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0], [0, 0]]]
for input_shape in input_shapes:
tf_graph = tf.Graph()
with tf_graph.as_default():
model, _, _, _ = self._create_model(use_keras=use_keras)
input_placeholder = tf.placeholder(tf.float32, shape=input_shape)
preprocessed_inputs, true_image_shapes = model.preprocess(
input_placeholder)
prediction_dict = model.predict(preprocessed_inputs,
true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
self.assertIn('detection_boxes', detections)
self.assertIn('detection_scores', detections)
self.assertIn('detection_classes', detections)
self.assertIn('num_detections', detections)
self.assertIn('raw_detection_boxes', detections)
self.assertIn('raw_detection_scores', detections)
init_op = tf.global_variables_initializer()
with self.test_session(graph=tf_graph) as sess:
sess.run(init_op)
detections_out = sess.run(detections,
feed_dict={
input_placeholder:
np.random.uniform(
size=(batch_size, 2, 2, 3))})
for image_idx in range(batch_size):
self.assertTrue(
test_utils.first_rows_close_as_set(
detections_out['detection_boxes'][image_idx].tolist(),
expected_boxes[image_idx]))
self.assertAllClose(detections_out['detection_scores'], expected_scores)
self.assertAllClose(detections_out['detection_classes'], expected_classes)
self.assertAllClose(detections_out['num_detections'],
expected_num_detections)
self.assertAllEqual(detections_out['raw_detection_boxes'],
raw_detection_boxes)
self.assertAllEqual(detections_out['raw_detection_scores'],
raw_detection_scores)
def test_postprocess_results_are_correct_static(self, use_keras):
with tf.Graph().as_default():
_, _, _, _ = self._create_model(use_keras=use_keras)
def graph_fn(input_image):
model, _, _, _ = self._create_model(use_static_shapes=True,
nms_max_size_per_class=4)
preprocessed_inputs, true_image_shapes = model.preprocess(input_image)
prediction_dict = model.predict(preprocessed_inputs,
true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
return (detections['detection_boxes'], detections['detection_scores'],
detections['detection_classes'], detections['num_detections'])
batch_size = 2
image_size = 2
channels = 3
input_image = np.random.rand(batch_size, image_size, image_size,
channels).astype(np.float32)
expected_boxes = [
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0]
], # padding
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0]
]
] # padding
expected_scores = [[0, 0, 0, 0], [0, 0, 0, 0]]
expected_classes = [[0, 0, 0, 0], [0, 0, 0, 0]]
expected_num_detections = np.array([3, 3])
(detection_boxes, detection_scores, detection_classes,
num_detections) = self.execute(graph_fn, [input_image])
for image_idx in range(batch_size):
self.assertTrue(test_utils.first_rows_close_as_set(
detection_boxes[image_idx][
0:expected_num_detections[image_idx]].tolist(),
expected_boxes[image_idx][0:expected_num_detections[image_idx]]))
self.assertAllClose(
detection_scores[image_idx][0:expected_num_detections[image_idx]],
expected_scores[image_idx][0:expected_num_detections[image_idx]])
self.assertAllClose(
detection_classes[image_idx][0:expected_num_detections[image_idx]],
expected_classes[image_idx][0:expected_num_detections[image_idx]])
self.assertAllClose(num_detections,
expected_num_detections)
def test_postprocess_results_are_correct_with_calibration(self, use_keras):
batch_size = 2
image_size = 2
input_shapes = [(batch_size, image_size, image_size, 3),
(None, image_size, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
expected_boxes = [
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]
], # padding
[
[0, 0, .5, .5],
[0, .5, .5, 1],
[.5, 0, 1, .5],
[0, 0, 0, 0], # pruned prediction
[0, 0, 0, 0]
]
] # padding
# Calibration mapping value below is set to map all scores to 0.5, except
# for the last two detections in each batch (see expected number of
# detections below.
expected_scores = [[0.5, 0.5, 0.5, 0., 0.], [0.5, 0.5, 0.5, 0., 0.]]
expected_classes = [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]
expected_num_detections = np.array([3, 3])
raw_detection_boxes = [[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]],
[[0., 0., 0.5, 0.5], [0., 0.5, 0.5, 1.],
[0.5, 0., 1., 0.5], [1., 1., 1.5, 1.5]]]
raw_detection_scores = [[[0, 0], [0, 0], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0], [0, 0]]]
for input_shape in input_shapes:
tf_graph = tf.Graph()
with tf_graph.as_default():
model, _, _, _ = self._create_model(use_keras=use_keras,
calibration_mapping_value=0.5)
input_placeholder = tf.placeholder(tf.float32, shape=input_shape)
preprocessed_inputs, true_image_shapes = model.preprocess(
input_placeholder)
prediction_dict = model.predict(preprocessed_inputs,
true_image_shapes)
detections = model.postprocess(prediction_dict, true_image_shapes)
self.assertIn('detection_boxes', detections)
self.assertIn('detection_scores', detections)
self.assertIn('detection_classes', detections)
self.assertIn('num_detections', detections)
self.assertIn('raw_detection_boxes', detections)
self.assertIn('raw_detection_scores', detections)
init_op = tf.global_variables_initializer()
with self.test_session(graph=tf_graph) as sess:
sess.run(init_op)
detections_out = sess.run(detections,
feed_dict={
input_placeholder:
np.random.uniform(
size=(batch_size, 2, 2, 3))})
for image_idx in range(batch_size):
self.assertTrue(
test_utils.first_rows_close_as_set(
detections_out['detection_boxes'][image_idx].tolist(),
expected_boxes[image_idx]))
self.assertAllClose(detections_out['detection_scores'], expected_scores)
self.assertAllClose(detections_out['detection_classes'], expected_classes)
self.assertAllClose(detections_out['num_detections'],
expected_num_detections)
self.assertAllEqual(detections_out['raw_detection_boxes'],
raw_detection_boxes)
self.assertAllEqual(detections_out['raw_detection_scores'],
raw_detection_scores)
def test_loss_results_are_correct(self, use_keras):
with tf.Graph().as_default():
_, num_classes, num_anchors, _ = self._create_model(use_keras=use_keras)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model(apply_hard_mining=False)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),
self._get_value_for_matching_key(loss_dict,
'Loss/classification_loss'))
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
expected_classification_loss = (batch_size * num_anchors
* (num_classes+1) * np.log(2.0))
(localization_loss,
classification_loss) = self.execute(graph_fn, [preprocessed_input,
groundtruth_boxes1,
groundtruth_boxes2,
groundtruth_classes1,
groundtruth_classes2])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_loss_results_are_correct_with_normalize_by_codesize_true(
self, use_keras):
with tf.Graph().as_default():
_, _, _, _ = self._create_model(use_keras=use_keras)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model(apply_hard_mining=False,
normalize_loc_loss_by_codesize=True,
use_keras=use_keras)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),)
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, 1, 1]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, 1, 1]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.5 / 4
localization_loss = self.execute(graph_fn, [preprocessed_input,
groundtruth_boxes1,
groundtruth_boxes2,
groundtruth_classes1,
groundtruth_classes2])
self.assertAllClose(localization_loss, expected_localization_loss)
def test_loss_results_are_correct_with_hard_example_mining(self, use_keras):
with tf.Graph().as_default():
_, num_classes, num_anchors, _ = self._create_model(use_keras=use_keras)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model()
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),
self._get_value_for_matching_key(loss_dict,
'Loss/classification_loss'))
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
expected_classification_loss = (batch_size * num_anchors
* (num_classes+1) * np.log(2.0))
(localization_loss, classification_loss) = self.execute_cpu(
graph_fn, [
preprocessed_input, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2
])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_loss_results_are_correct_without_add_background_class(
self, use_keras):
with tf.Graph().as_default():
_, num_classes, num_anchors, _ = self._create_model(
add_background_class=False, use_keras=use_keras)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model(
apply_hard_mining=False, add_background_class=False,
use_keras=use_keras)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(
preprocessed_tensor, true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (loss_dict['Loss/localization_loss'],
loss_dict['Loss/classification_loss'])
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
expected_classification_loss = (
batch_size * num_anchors * num_classes * np.log(2.0))
(localization_loss, classification_loss) = self.execute(
graph_fn, [
preprocessed_input, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2
])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_loss_results_are_correct_with_losses_mask(self, use_keras):
with tf.Graph().as_default():
_, num_classes, num_anchors, _ = self._create_model(use_keras=use_keras)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_boxes3, groundtruth_classes1, groundtruth_classes2,
groundtruth_classes3):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2,
groundtruth_boxes3]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2,
groundtruth_classes3]
is_annotated_list = [tf.constant(True), tf.constant(True),
tf.constant(False)]
model, _, _, _ = self._create_model(apply_hard_mining=False)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
is_annotated_list=is_annotated_list)
prediction_dict = model.predict(preprocessed_tensor,
true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),
self._get_value_for_matching_key(loss_dict,
'Loss/classification_loss'))
batch_size = 3
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes3 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
groundtruth_classes3 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
# Note that we are subtracting 1 from batch_size, since the final image is
# not annotated.
expected_classification_loss = ((batch_size - 1) * num_anchors
* (num_classes+1) * np.log(2.0))
(localization_loss,
classification_loss) = self.execute(graph_fn, [preprocessed_input,
groundtruth_boxes1,
groundtruth_boxes2,
groundtruth_boxes3,
groundtruth_classes1,
groundtruth_classes2,
groundtruth_classes3])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
def test_restore_map_for_detection_ckpt(self, use_keras):
model, _, _, _ = self._create_model(use_keras=use_keras)
model.predict(tf.constant(np.array([[[[0, 0], [1, 1]], [[1, 0], [0, 1]]]],
dtype=np.float32)),
true_image_shapes=None)
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.test_session() as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
var_map = model.restore_map(
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False)
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn('FeatureExtractor', var)
def test_restore_map_for_classification_ckpt(self, use_keras):
# Define mock tensorflow classification graph and save variables.
test_graph_classification = tf.Graph()
with test_graph_classification.as_default():
image = tf.placeholder(dtype=tf.float32, shape=[1, 20, 20, 3])
if use_keras:
with tf.name_scope('mock_model'):
layer_one = keras.Conv2D(32, kernel_size=1, name='layer1')
net = layer_one(image)
layer_two = keras.Conv2D(3, kernel_size=1, name='layer2')
layer_two(net)
else:
with tf.variable_scope('mock_model'):
net = slim.conv2d(image, num_outputs=32, kernel_size=1,
scope='layer1')
slim.conv2d(net, num_outputs=3, kernel_size=1, scope='layer2')
init_op = tf.global_variables_initializer()
saver = tf.train.Saver()
save_path = self.get_temp_dir()
with self.test_session(graph=test_graph_classification) as sess:
sess.run(init_op)
saved_model_path = saver.save(sess, save_path)
# Create tensorflow detection graph and load variables from
# classification checkpoint.
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model, _, _, _ = self._create_model(use_keras=use_keras)
inputs_shape = [2, 2, 2, 3]
inputs = tf.to_float(tf.random_uniform(
inputs_shape, minval=0, maxval=255, dtype=tf.int32))
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model.restore_map(fine_tune_checkpoint_type='classification')
self.assertNotIn('another_variable', var_map)
self.assertIsInstance(var_map, dict)
saver = tf.train.Saver(var_map)
with self.test_session(graph=test_graph_detection) as sess:
saver.restore(sess, saved_model_path)
for var in sess.run(tf.report_uninitialized_variables()):
self.assertNotIn('FeatureExtractor', var)
def test_load_all_det_checkpoint_vars(self, use_keras):
test_graph_detection = tf.Graph()
with test_graph_detection.as_default():
model, _, _, _ = self._create_model(use_keras=use_keras)
inputs_shape = [2, 2, 2, 3]
inputs = tf.to_float(
tf.random_uniform(inputs_shape, minval=0, maxval=255, dtype=tf.int32))
preprocessed_inputs, true_image_shapes = model.preprocess(inputs)
prediction_dict = model.predict(preprocessed_inputs, true_image_shapes)
model.postprocess(prediction_dict, true_image_shapes)
another_variable = tf.Variable([17.0], name='another_variable') # pylint: disable=unused-variable
var_map = model.restore_map(
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=True)
self.assertIsInstance(var_map, dict)
self.assertIn('another_variable', var_map)
def test_loss_results_are_correct_with_random_example_sampling(
self,
use_keras):
with tf.Graph().as_default():
_, num_classes, _, _ = self._create_model(
random_example_sampling=True, use_keras=use_keras)
def graph_fn(preprocessed_tensor, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2):
groundtruth_boxes_list = [groundtruth_boxes1, groundtruth_boxes2]
groundtruth_classes_list = [groundtruth_classes1, groundtruth_classes2]
model, _, _, _ = self._create_model(random_example_sampling=True,
use_keras=use_keras)
model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list)
prediction_dict = model.predict(
preprocessed_tensor, true_image_shapes=None)
loss_dict = model.loss(prediction_dict, true_image_shapes=None)
return (self._get_value_for_matching_key(loss_dict,
'Loss/localization_loss'),
self._get_value_for_matching_key(loss_dict,
'Loss/classification_loss'))
batch_size = 2
preprocessed_input = np.random.rand(batch_size, 2, 2, 3).astype(np.float32)
groundtruth_boxes1 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_boxes2 = np.array([[0, 0, .5, .5]], dtype=np.float32)
groundtruth_classes1 = np.array([[1]], dtype=np.float32)
groundtruth_classes2 = np.array([[1]], dtype=np.float32)
expected_localization_loss = 0.0
# Among 4 anchors (1 positive, 3 negative) in this test, only 2 anchors are
# selected (1 positive, 1 negative) since random sampler will adjust number
# of negative examples to make sure positive example fraction in the batch
# is 0.5.
expected_classification_loss = (
batch_size * 2 * (num_classes + 1) * np.log(2.0))
(localization_loss, classification_loss) = self.execute_cpu(
graph_fn, [
preprocessed_input, groundtruth_boxes1, groundtruth_boxes2,
groundtruth_classes1, groundtruth_classes2
])
self.assertAllClose(localization_loss, expected_localization_loss)
self.assertAllClose(classification_loss, expected_classification_loss)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 275,845,732,408,965,020 | 47.48605 | 104 | 0.587177 | false |
TariqAHassan/ZeitSci | analysis/quantitative/quantitative.py | 1 | 2852 | """
Formal Python Analyses of the Data
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Python 3.5
"""
import pandas as pd
from tqdm import tqdm
from pprint import pprint
from funding_database_tools import MAIN_FOLDER
from easymoney.easy_pandas import pandas_print_full
# ------------------------------------------------------------------------------------------------
# Read in Data
# ------------------------------------------------------------------------------------------------
funding = pd.read_pickle(MAIN_FOLDER + "/Data/MasterDatabase/" + "MasterDatabaseRC8.p")
tqdm("status")
# ------------------------------------------------------------------------------------------------
# Most by Funding Year
# ------------------------------------------------------------------------------------------------
funding['StartYear'] = funding['StartYear'].astype(float)
range_funding = funding[(funding['StartYear'] >= 2005) & (funding['StartYear'] <= 2015)]
db_total = range_funding['NormalizedAmount'].sum()
d = {c: range_funding[range_funding['FunderBlock'].str.upper() == c.upper()]['NormalizedAmount'] \
.sum() / db_total for c in funding['FunderBlock'].unique()}
block_dict = {k: round(float(v) * 100, 1) for k, v in d.items()}
# ------------------------------------------------------------------------------------------------
# Highest-Funded Organizations
# ------------------------------------------------------------------------------------------------
top = 250
top_orgs = funding[(funding['StartYear'].astype(float) >= 2010) & (funding['StartYear'].astype(float) < 2016)].groupby(
['OrganizationName', 'OrganizationBlock', 'StartYear'])['NormalizedAmount'].sum().reset_index()
# Get the Top Funded Orgs for Each Year
# Sort by Year and Amount
top_orgs_sorted = top_orgs.sort_values(['StartYear', 'NormalizedAmount'], ascending=[False, False]).reset_index(
drop=True)
# Get the top x per year
by_year = top_orgs_sorted.sort_values('NormalizedAmount', ascending=False).groupby('StartYear', as_index=False).head(
top)
# Sort
by_year_sorted = by_year.sort_values(['StartYear', 'NormalizedAmount'], ascending=[False, False]).reset_index(drop=True)
# Add Ranking (will only work for certian values of top)
by_year_sorted['Ranking'] = list(range(1, top + 1)) * int(round(by_year_sorted.shape[0] / top))
# Rename
by_year_sorted.columns = ['Name', 'Country', 'Start Year', 'Total Grants (USD)', 'Ranking']
# Format Money (see http://stackoverflow.com/a/3393776/4898004)
by_year_sorted['Total Grants (USD)'] = by_year_sorted['Total Grants (USD)'].map(lambda x: '{:20,.2f}'.format(x).strip())
# Reorder
by_year_sorted = by_year_sorted[['Ranking', 'Name', 'Country', 'Start Year', 'Total Grants (USD)']]
by_year_sorted.to_csv(MAIN_FOLDER + "analysis/resources/" + '2010_2015_rankings_detailed.csv', index=False)
| gpl-3.0 | -1,514,281,424,299,409,700 | 39.169014 | 120 | 0.542426 | false |
postlund/pyatv | pyatv/airplay/pairing.py | 1 | 3297 | """Device pairing and derivation of encryption keys."""
import binascii
import logging
from typing import Optional
from pyatv import conf, exceptions
from pyatv.airplay.auth import AirPlayPairingProcedure
from pyatv.airplay.srp import LegacyCredentials, SRPAuthHandler, new_credentials
from pyatv.const import Protocol
from pyatv.interface import PairingHandler
from pyatv.support import error_handler
from pyatv.support.http import ClientSessionManager, HttpConnection, http_connect
_LOGGER = logging.getLogger(__name__)
class AirPlayPairingHandler(PairingHandler):
"""Base class for API used to pair with an Apple TV."""
def __init__(
self, config: conf.AppleTV, session_manager: ClientSessionManager, _
) -> None:
"""Initialize a new MrpPairingHandler."""
super().__init__(session_manager, config.get_service(Protocol.AirPlay))
self.http: Optional[HttpConnection] = None
self.address: str = str(config.address)
self.pairing_procedure: Optional[AirPlayPairingProcedure] = None
self.credentials: LegacyCredentials = self._setup_credentials()
self.pin_code: Optional[str] = None
self._has_paired: bool = False
def _setup_credentials(self) -> LegacyCredentials:
# If service has credentials, use those. Otherwise generate new.
if self.service.credentials is None:
return new_credentials()
return LegacyCredentials.parse(self.service.credentials)
@property
def has_paired(self) -> bool:
"""If a successful pairing has been performed."""
return self._has_paired
async def close(self) -> None:
"""Call to free allocated resources after pairing."""
await super().close()
if self.http:
self.http.close()
async def begin(self) -> None:
"""Start pairing process."""
_LOGGER.debug("Starting AirPlay pairing with credentials %s", self.credentials)
srp: SRPAuthHandler = SRPAuthHandler(self.credentials)
srp.initialize()
self.http = await http_connect(self.address, self.service.port)
self.pairing_procedure = AirPlayPairingProcedure(self.http, srp)
self._has_paired = False
return await error_handler(
self.pairing_procedure.start_pairing, exceptions.PairingError
)
async def finish(self) -> None:
"""Stop pairing process."""
if not self.pairing_procedure:
raise exceptions.PairingError("pairing was not started")
if not self.pin_code:
raise exceptions.PairingError("no pin given")
self.service.credentials = str(
await error_handler(
self.pairing_procedure.finish_pairing,
exceptions.PairingError,
binascii.hexlify(self.credentials.identifier).decode("ascii").upper(),
self.pin_code,
)
)
self._has_paired = True
def pin(self, pin: int) -> None:
"""Pin code used for pairing."""
self.pin_code = str(pin).zfill(4)
_LOGGER.debug("AirPlay PIN changed to %s", self.pin_code)
@property
def device_provides_pin(self) -> bool:
"""Return True if remote device presents PIN code, else False."""
return True
| mit | 1,528,977,526,204,508,200 | 35.633333 | 87 | 0.657264 | false |
klmitch/nova | nova/policies/limits.py | 1 | 2189 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:limits'
OTHER_PROJECT_LIMIT_POLICY_NAME = 'os_compute_api:limits:other_project'
DEPRECATED_POLICY = policy.DeprecatedRule(
'os_compute_api:os-used-limits',
base.RULE_ADMIN_API,
)
DEPRECATED_REASON = """
Nova API policies are introducing new default roles with scope_type
capabilities. Old policies are deprecated and silently going to be ignored
in nova 23.0.0 release.
"""
limits_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ANY,
description="Show rate and absolute limits for the current user "
"project",
operations=[
{
'method': 'GET',
'path': '/limits'
}
],
scope_types=['system', 'project']),
policy.DocumentedRuleDefault(
name=OTHER_PROJECT_LIMIT_POLICY_NAME,
check_str=base.SYSTEM_READER,
description="""Show rate and absolute limits of other project.
This policy only checks if the user has access to the requested
project limits. And this check is performed only after the check
os_compute_api:limits passes""",
operations=[
{
'method': 'GET',
'path': '/limits'
}
],
scope_types=['system'],
deprecated_rule=DEPRECATED_POLICY,
deprecated_reason=DEPRECATED_REASON,
deprecated_since='21.0.0'),
]
def list_rules():
return limits_policies
| apache-2.0 | -8,685,073,734,015,824,000 | 30.724638 | 78 | 0.660576 | false |
scm-spain/slippin-jimmy | tests/slippinj/databases/drivers/test_sqlserver.py | 1 | 5305 | import logging
from mock import Mock
from slippinj.databases.drivers.sqlserver import Sqlserver
class TestSqlserver:
def setup_method(self, method):
self.logger = logging.getLogger('test')
self.logger.addHandler(logging.NullHandler())
def teardown_method(self, method):
self.logger = None
def test_get_tables_info_when_no_table_list_is_provided(self):
mocked_table_list_query_cursor = Mock()
mocked_table_list_query_cursor.execute = Mock(return_value=True)
mocked_table_list_query_cursor.fetchall = Mock(return_value=[{'table_name': 'unit'}, {'table_name': 'test'}])
mocked_table_count_query_cursor = Mock()
mocked_table_count_query_cursor.execute = Mock(return_value=True)
mocked_table_count_query_cursor.fetchone = Mock(return_value=[10])
columns = {
'table_name': '',
'column_name': 'column',
'data_type': 'string',
'character_maximum_length': '1',
'is_nullable': 'NO',
'column_default': ''
}
tables_columns = []
columns.update(table_name='unit')
tables_columns.append(columns.copy())
columns.update(table_name='test')
tables_columns.append(columns.copy())
mocked_table_columns_query_cursor = Mock()
mocked_table_columns_query_cursor.execute = Mock(return_value=True)
mocked_table_columns_query_cursor.fetchall = Mock(return_value=tables_columns)
mocked_table_top_query_cursor = Mock()
mocked_table_top_query_cursor.execute = Mock(return_value=True)
mocked_table_top_query_cursor.fetchall = Mock(return_value=[])
mocked_mssql = Mock()
mocked_mssql.cursor = Mock(side_effect=[mocked_table_list_query_cursor, mocked_table_count_query_cursor,
mocked_table_columns_query_cursor, mocked_table_top_query_cursor])
mocked_builder = Mock()
mocked_builder.build = Mock(return_value=mocked_mssql)
expected = {'tables': {'test': {'columns': [{'character_maximum_length': '1',
'column_default': '',
'column_name': 'column',
'data_type': 'string',
'is_nullable': 'NO'}],
'count': 10,
'rows': []},
'unit': {'columns': [{'character_maximum_length': '1',
'column_default': '',
'column_name': 'column',
'data_type': 'string',
'is_nullable': 'NO'}],
'count': 10,
'rows': []}},
'db_connection_string': 'jdbc:sqlserver://test'
}
assert expected == Sqlserver(mocked_builder, self.logger, db_host = 'test').get_all_tables_info(None, None, None)
def test_get_tables_info_when_table_list_has_been_provided(self):
mocked_table_count_query_cursor = Mock()
mocked_table_count_query_cursor.execute = Mock(return_value=True)
mocked_table_count_query_cursor.fetchone = Mock(return_value=[10])
columns = {
'table_name': '',
'column_name': 'column',
'data_type': 'string',
'character_maximum_length': '1',
'is_nullable': 'NO',
'column_default': ''
}
tables_columns = []
columns.update(table_name='unit')
tables_columns.append(columns.copy())
columns.update(table_name='test')
tables_columns.append(columns.copy())
mocked_table_columns_query_cursor = Mock()
mocked_table_columns_query_cursor.execute = Mock(return_value=True)
mocked_table_columns_query_cursor.fetchall = Mock(return_value=tables_columns)
mocked_table_top_query_cursor = Mock()
mocked_table_top_query_cursor.execute = Mock(return_value=True)
mocked_table_top_query_cursor.fetchall = Mock(return_value=[])
mocked_mssql = Mock()
mocked_mssql.cursor = Mock(side_effect=[mocked_table_count_query_cursor,
mocked_table_columns_query_cursor, mocked_table_top_query_cursor])
mocked_builder = Mock()
mocked_builder.build = Mock(return_value=mocked_mssql)
expected = {'tables': {
'unit': {'columns': [{'character_maximum_length': '1',
'column_default': '',
'column_name': 'column',
'data_type': 'string',
'is_nullable': 'NO'}],
'count': 10,
'rows': []}},
'db_connection_string': 'jdbc:sqlserver://test'
}
assert expected == Sqlserver(mocked_builder, self.logger, db_host = 'test').get_all_tables_info('unit', None, None)
| apache-2.0 | 7,177,636,296,262,225,000 | 45.535088 | 123 | 0.514986 | false |
guillaume-philippon/aquilon | lib/aquilon/worker/commands/update_building_preference.py | 1 | 2073 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Contains the logic for `aq update building preference`. """
from aquilon.aqdb.model import BuildingPreference, Building
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.change_management import validate_prod_archetype
from aquilon.worker.dbwrappers.cluster import get_clusters_by_locations
class CommandUpdateBuildingPreference(BrokerCommand):
requires_plenaries = True
required_parameters = ["building_pair", "archetype"]
def render(self, session, logger, plenaries, building_pair, archetype,
prefer, justification, reason, user, **_):
db_pref = BuildingPreference.get_unique(session,
building_pair=building_pair,
archetype=archetype,
compel=True)
db_pref.lock_row()
validate_prod_archetype(db_pref.archetype, user, justification, reason, logger)
for db_clus in get_clusters_by_locations(session, (db_pref.a, db_pref.b),
db_pref.archetype):
plenaries.add(db_clus)
if prefer:
dbbuilding = Building.get_unique(session, prefer, compel=True)
db_pref.prefer = dbbuilding
session.flush()
plenaries.write(verbose=True)
return
| apache-2.0 | 5,816,525,462,180,406,000 | 38.865385 | 87 | 0.655572 | false |
puruckertom/ubertool | ubertool/kabam/kabam_functions.py | 1 | 42339 | from __future__ import division #brings in Python 3.0 mixed type calculation rules
from functools import wraps
import logging
import numpy as np
import pandas as pd
import time
from math import exp
class KabamFunctions(object):
"""
Function class for Kabam.
"""
def __init__(self):
"""Class representing the functions for Kabam"""
super(KabamFunctions, self).__init__()
def percent_to_frac(self, percent):
fraction = percent / 100.
return fraction
def ventilation_rate(self, wet_wgt):
"""
:description Ventilation rate of aquatic animal
:unit L/d
:expression Kabam Eq. A5.2b (Gv)
:param wet_wgt: wet weight of animal (kg)
:param conc_do: concentration of dissolved oxygen (mg O2/L)
:return:
"""
vent_rate = pd.Series([], dtype = 'float')
vent_rate = (1400.0 * ((wet_wgt ** 0.65) / self.conc_do))
return vent_rate
def pest_uptake_eff_bygills(self):
"""
:description Pesticide uptake efficiency by gills
:unit fraction
:expression Kabam Eq. A5.2a (Ew)
:param log kow: octanol-water partition coefficient ()
:return:
"""
pest_uptake_eff_bygills = pd.Series([], dtype = 'float')
pest_uptake_eff_bygills = (1 / (1.85 + (155. / self.kow)))
return pest_uptake_eff_bygills
def phytoplankton_k1_calc(self, k_ow):
"""
:description Uptake rate constant through respiratory area for phytoplankton
:unit L/kg*d
:expression Kabam Eq. A5.1 (K1:unique to phytoplankton)
:param 6.05e-5: Parameter 'A' in Eq. A5.1; constant related to resistance to pesticide
uptake through the aquaeous phase of plant (days)
:param 5.5: Parameter 'B' in Eq. A5.1; constant related to the resistance to pesticide
uptake through the organic phase of plant (days)
:param k_ow: octanol-water partition coefficient ()
:return:
"""
phyto_k1 = pd.Series([], dtype = 'float')
phyto_k1 = 1 / (6.0e-5 + (5.5 / k_ow))
return phyto_k1
def aq_animal_k1_calc(self, pest_uptake_eff_bygills, vent_rate, wet_wgt):
"""
:description Uptake rate constant through respiratory area for aquatic animals
:unit L/kg*d
:expression Kabam Eq. A5.2 (K1)
:param pest_uptake_eff_bygills: Pesticide uptake efficiency by gills of aquatic animals (fraction)
:param vent_rate: Ventilation rate of aquatic animal (L/d)
:param wet_wgt: wet weight of animal (kg)
:return:
"""
aqueous_animal_k1 = pd.Series([], dtype = 'float')
aqueous_animal_k1 = ((pest_uptake_eff_bygills * vent_rate) / wet_wgt)
return aqueous_animal_k1
def animal_water_part_coef(self, frac_lipid_cont, frac_nlom_cont, frac_water_cont, beta):
"""
:description Organism-Water partition coefficient (based on organism wet weight)
:unit ()
:expression Kabam Eq. A6a (Kbw)
:param frac_lipid_cont: lipid fraction of organism (kg lipid/kg organism wet weight)
:param frac_nlom_cont: non-lipid organic matter (NLOM) fraction of organism (kg NLOM/kg organism wet weight)
:param frac_water_cont water content of organism (kg water/kg organism wet weight)
:param kow: octanol-water partition coefficient ()
:param beta: proportionality constant expressing the sorption capacity of NLOM or NLOC to
that of octanol (0.35 for phytoplankton; 0.035 for all other aquatic animals)
:return:
"""
part_coef = pd.Series([], dtype = 'float')
part_coef = (frac_lipid_cont * self.kow) + (frac_nlom_cont * beta * self.kow) + frac_water_cont
return part_coef
def aq_animal_k2_calc(self, aq_animal_k1, animal_water_part_coef):
"""
:description Elimination rate constant through the respiratory area
:unit (per day)
:expression Kabam Eq. A6 (K2)
:param aq_animal_k1: Uptake rate constant through respiratory area for aquatic animals, including phytoplankton (L/kg*d)
:param animal_water_part_coef (Kbw): Organism-Water partition coefficient (based on organism wet weight ()
:return:
"""
aq_animal_k2 = pd.Series([], dtype = 'float')
aq_animal_k2 = aq_animal_k1 / animal_water_part_coef
return aq_animal_k2
def animal_grow_rate_const(self, wet_wgt):
"""
:description Aquatic animal/organism growth rate constant
:unit (per day)
:expression Kabam Eq. A7.1 & A7.2
:param wet_wgt: wet weight of animal/organism (kg)
:param water_temp: water temperature (degrees C)
:note the loop here could be moved to the main routine with the
coefficient *i.e., 0.0005, 0.00251) provided through a calling argument
:return:
"""
growth_rate = pd.Series(np.nan, index=list(range(len(self.water_temp))), dtype = 'float')
for i in range(len(self.water_temp)): #loop through model simulation runs
if self.water_temp[i] < 17.5:
growth_rate[i] = 0.0005 * (wet_wgt[i] ** -0.2)
else:
growth_rate[i] = 0.00251 * (wet_wgt[i] ** -0.2)
return growth_rate
def dietary_trans_eff(self):
"""
:description Aquatic animal/organism dietary pesticide transfer efficiency
:unit fraction
:expression Kabam Eq. A8a (Ed)
:param kow: octanol-water partition coefficient ()
:return:
"""
trans_eff = pd.Series([], dtype = 'float')
trans_eff = 1 / (.0000003 * self.kow + 2.0)
return trans_eff
def aq_animal_feeding_rate(self, wet_wgt):
"""
:description Aquatic animal feeding rate (except filterfeeders)
:unit kg/d
:expression Kabam Eq. A8b1 (Gd)
:param wet_wgt: wet weight of animal/organism (kg)
:return:
"""
feeding_rate = pd.Series([], dtype = 'float')
for i in range(len(self.water_temp)):
feeding_rate[i] = 0.022 * wet_wgt[i] ** 0.85 * exp(0.06 * self.water_temp[i])
return feeding_rate
def filterfeeders_feeding_rate(self):
"""
:description Filter feeder feeding rate
:unit kg/d
:expression Kabam Eq. A8b2 (Gd)
:param self.gv_filterfeeders: filterfeeder ventilation rate (L/d)
:param self.conc_ss: Concentration of Suspended Solids (Css - kg/L)
:param particle_scav_eff: efficiency of scavenging of particles absorbed from water (fraction)
:return:
"""
feeding_rate = pd.Series([], dtype = 'float')
feeding_rate = self.gv_filterfeeders * self.conc_ss * self.particle_scav_eff
return feeding_rate
def diet_uptake_rate_const(self, dietary_trans_eff, feeding_rate, wet_wgt):
"""
:description Pesticide uptake rate constant for uptake through ingestion of food rate
:unit kg food/kg organism - day
:expression Kabam Eq. A8 (kD)
:param wet weight of aquatic animal/organism (kg)
:param dietary_trans_eff: dietary pesticide transfer efficiency (fraction)
:param feeding rate: animal/organism feeding rate (kg/d)
:return:
"""
dietary_uptake_constantt = pd.Series([], dtype = 'float')
dietary_uptake_constant = dietary_trans_eff * feeding_rate / wet_wgt
return dietary_uptake_constant
def overall_diet_content(self, diet_fraction, content_fraction):
"""
:description Overall fraction of aquatic animal/organism diet attibuted to diet food component (i.e., lipids or NLOM or water)
:unit kg diet / kg organism
:expression not shown in Kabam documentation: it is associated with Kabam Eq. A9
overall_diet_content is equal to the sum over dietary elements
: of (fraction of diet) * (content in diet element); for example zooplankton ingest seidment and
: phytoplankton, thus the overall lipid content of the zooplankton diet equals
: (fraction of sediment in zooplankton diet) * (fraction of lipids in sediment) +
: (fraction of phytoplankton in zooplankton diet) * (fraction of lipids in phytoplankton)
:param diet_fraction: list of values representing fractions of aquatic animal/organism diet attributed
to each element (prey) of diet
:param content_fraction: list of values representing fraction of diet element (prey) attributed to a specific
component of that diet element (e.g., lipid, NLOM, or water)
:return:
"""
overall_diet_fraction = 0.0
for i in range(len(diet_fraction)):
overall_diet_fraction = overall_diet_fraction + diet_fraction[i] * content_fraction[i]
return overall_diet_fraction
def fecal_egestion_rate_factor(self, epsilonL, epsilonN, epsilonW, diet_lipid, diet_nlom, diet_water):
"""
:description Aquatic animal/organism egestion rate of fecal matter factor (to be multiplied by the
feeding rate to calculate egestion rate of fecal matter)
:unit (kg lipid)/[(kg diet)
:expression Kabam Eq. A9 (GF)
:param epsilonL: dietary assimilation rate of lipids (fraction)
:param epsilonN: dietary assimilation rate of NLOM (fraction)
:param epsilonW: dietary assimilation rate of water (fraction)
:param diet_lipid; lipid content of aquatic animal/organism diet (fraction)
:param diet_nlom NLOM content of aquatic animal/organism diet (fraction)
:param diet_water water content of aquatic animal/organism diet (fraction)
:return:
"""
rate_factor = pd.Series([], dtype = 'float')
rate_factor = (((1. - epsilonL) * diet_lipid) + ((1. - epsilonN) * diet_nlom) + (
(1. - epsilonW) * diet_water))
return rate_factor
def diet_elements_gut(self, epsilon, overall_diet_content, egestion_rate_factor):
"""
:description Fraction of diet elements (i.e., lipid, NLOM, water) in the gut
:unit (kg lipid) / (kg digested wet weight)
:expression Kabam Eq. A9 (VLG, VNG, VWG)
:param epsilon relevant dietary assimilation rate (fraction)
:param overall_diet_content relevant overall diet content of diet element, e.g., lipid/nlom/water (kg/kg)
:param egestion_rate_factor relevant: Aquatic animal/organism egestion rate of fecal matter factor
:return:
"""
gut_content = pd.Series([], dtype = 'float')
try:
gut_content = ((1. - epsilon) * overall_diet_content) / egestion_rate_factor
except:
print('Likely divide by zero in routine diet_elements_gut')
return gut_content
def gut_organism_partition_coef(self, gut_lipid, gut_nlom, gut_water, pest_kow, beta,
organism_lipid, organism_nlom, organism_water):
"""
:description Partition coefficient of the pesticide between the gastrointenstinal track and the organism
:unit none
:expression Kabam Eq. A9 (KGB)
:param gut_lipid: lipid content in the gut
:param gut_nlom: nlom content in the gut
:param gut_water: water content in the gut
:param pest_kow: pesticide Kow
:param beta: proportionality constant expressing the sorption capacity of NLOM to that of octanol
:param organism_lipid: lipid content in the whole organism
:param organism_nlom: nlom content in the whole organism
:param organism_water: water content in the whole organism
:return:
"""
part_coef = pd.Series([], dtype = 'float')
part_coef = (pest_kow * (gut_lipid + beta * gut_nlom) + gut_water) / \
(pest_kow * (organism_lipid + beta * organism_nlom) + organism_water)
return part_coef
def fecal_elim_rate_const(self, fecal_egestion_rate, diet_trans_eff, part_coef, wet_wgt):
"""
:description Rate constant for elimination of the pesticide through excretion of contaminated feces
:unit per day
:expression Kabam Eq. A9
:param fecal_egestion_rate: egestion rate of fecal matter (kg feces)/(kg organism-day)
:param diet_trans_eff: dietary pesticide transfer efficiency (fraction)
:param part_coef: gut - partition coefficient of the pesticide between the gastrointestinal tract
and the organism (-)
:param wet_wgt: wet weight of organism (kg)
:return:
"""
elim_rate_const = pd.Series([], dtype = 'float')
elim_rate_const = fecal_egestion_rate * diet_trans_eff * (part_coef / wet_wgt)
return elim_rate_const
def frac_pest_freely_diss(self):
"""
:description Calculate Fraction of pesticide freely dissolved in water column (that can be
absorbed via membrane diffusion)
:unit fraction
:expression Kabam Eq. A2
:param conc_poc: Concentration of Particulate Organic Carbon in water column (kg OC/L)
:param kow: octonal-water partition coefficient (-)
:param conc_doc: Concentration of Dissolved Organic Carbon in water column (kg OC/L)
:return:
"""
frac_diss = pd.Series([], dtype = 'float')
frac_diss = 1 / (1 + (self.conc_poc * self.alpha_poc * self.kow) + (self.conc_doc * self.alpha_doc * self.kow))
return frac_diss
def conc_freely_diss_watercol(self):
"""
:description Concentration of freely dissolved pesticide in overlying water column
:unit g/L
:expression Kabam A1 (product of terms - [phi * water_column_eec], used in Eqs F2 & F4)
:param phi: Fraction of pesticide freely dissolved in water column (that can be
absorbed via membrane diffusion) (fraction)
:param water_column_eec: Water Column 1-in-10 year EECs (ug/L)
:return:
"""
freely_dissolved_conc = pd.Series([], dtype='float')
freely_dissolved_conc = self.phi * self.water_column_eec
return freely_dissolved_conc
def conc_sed_norm_4oc(self):
"""
:description Pesticide concentration in sediment normalized for organic carbon
:unit ug/(kg OC)
:expression Kabam Eq. A4a
:param pore_water_eec: freely dissolved pesticide concentration in sediment pore water (ug/L)
:param k_oc: organic carbon partition coefficient (L/kg OC)
:Note units here are in ug/kg as opposed to g/kg as in OPP spreadsheet; this is just to be consistent with
other units used throughout
:return:
"""
conc_diss_sed = pd.Series([], dtype = 'float')
conc_diss_sed = self.k_oc * self.pore_water_eec
return conc_diss_sed
def conc_sed_dry_wgt(self):
"""
:description Calculate concentration of pesticide in solid portion of sediment
:unit ug/(kg dry sediment)
:expression Kabam Eq. A4
:param c_soc: pesticide concentration in sediment normalized for organic carbon ug/(kg OC)
:param sediment_oc: fraction organic carbon in sediment (fraction)
:Note units here are in ug/kg as opposed to g/kg as in OPP spreadsheet; this is just to be consistent with
other units used throughout
:return:
"""
conc_sed = pd.Series([], dtype = 'float')
conc_sed = self.c_soc * self.sediment_oc_frac
return conc_sed
def diet_pest_conc(self, prey_frac, prey_pest_conc, diet_lipid_frac):
"""
:description Overall concentration of pesticide in aquatic animal/organism diet and
lipid normalized overall concentration of pesticide in aquatic animal/organism diet
:unit g/(kg wet weight)
:expression Kabam Eq. A1 (SUM(Pi * CDi);
:param prey_frac: fraction of diet containing prey i (Pi in Eq. A1))
:param prey_pest_conc: concentraiton of pesticide in prey i (CDi in Eq. A1)
:param diet_lipid_frac: fraction of animal/organism that is lipid
:return:
"""
overall_diet_conc = pd.Series([], dtype = 'float')
overall_lipid_norm_conc = pd.Series([], dtype = 'float')
overall_diet_conc = len(prey_frac) * [0.0]
overall_lipid_norm_conc = len(prey_frac) * [0.0]
for j in range(len(prey_frac)): # process model simulation runs
for i in range(len(prey_frac[j])): # process individual prey items
prey_conc = prey_frac[j][i] * prey_pest_conc[j][i]
if (diet_lipid_frac[j][i] > 0.0):
lipid_norm_prey_conc = prey_conc / diet_lipid_frac[j][i]
else:
lipid_norm_prey_conc = 0.0
overall_diet_conc[j] = overall_diet_conc[j] + prey_conc
overall_lipid_norm_conc[j] = overall_lipid_norm_conc[j] + lipid_norm_prey_conc
return overall_diet_conc, overall_lipid_norm_conc
def pest_conc_organism(self, k1, k2, kD, kE, kG, kM, mP, mO, pest_diet_conc):
"""
:description Concentration of pesticide in aquatic animal/organism
:unit ug/(kg wet weight)
:expression Kabam Eq. A1 (CB)
:param k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param kD: pesticide uptake rate constant for uptake through ingestion of food (kg food/(kg organism - day)
:param kE: rate constant for elimination of the pesticide through excretion of feces (/d)
:param kG: animal/organism growth rate constant (/d)
:param kM: rate constant for pesticide metabolic transformation (/d)
:param mP: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param water_column_eec: total pesticide concentraiton in water column above sediment (ug/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (ug/L)
:param pest_diet_conc: concentration of pesticide in overall diet of aquatic animal/organism (ug/kg wet weight)
#because phytoplankton have no diet the (Kd * SUM(Pi * Cdi)) portion of Eq. A1 is not included here
:return:
"""
pest_conc_organism = pd.Series([], dtype = 'float')
pest_conc_organism = (k1 * ((mO * self.phi * self.water_column_eec) +
(mP * self.pore_water_eec)) + (kD * pest_diet_conc)) / (k2 + kE + kG + kM)
return pest_conc_organism
def lipid_norm_residue_conc(self, total_conc, lipid_content):
"""
:description Lipid normalized pesticide residue in aquatic animal/organism
:unit ug/kg-lipid
:expresssion represents a factor (CB/VLB) used in Kabam Eqs. F4, F5, & F6
:param total_conc: total pesticide concentration in animal/organism (ug/kg-ww)
:param lipid_content: fraction of animal/organism that is lipid (fraction)
:return:
"""
lipid_norm_conc = pd.Series([], dtype = 'float')
lipid_norm_conc = total_conc / lipid_content
return lipid_norm_conc
def pest_conc_diet_uptake(self, kD, k2, kE, kG, kM, diet_conc):
"""
:description Pesticide concentration in aquatic animal/organism originating from uptake through diet
:unit ug/kg ww
:expression Kabam A1 (with k1 = 0)
:param kD: pesticide uptake rate constant for uptake through ingestion of food (kg food/kg organizm - day)
:param diet_conc: overall concentration of pesticide in diet of animal/organism (ug/kg-ww)
:param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param kE: rate constant for elimination of the pesticide through excretion of feces (/d)
:param kG: animal/organism growth rate constant (/d)
:param kM: rate constant for pesticide metabolic transformation (/d)
:return:
"""
pest_conc_from_diet = pd.Series([], dtype = 'float')
pest_conc_from_diet = (kD * diet_conc) / (k2 + kE + kG + kM)
return pest_conc_from_diet
def pest_conc_respir_uptake(self, k1, k2, kE, kG, kM, mP, mO):
"""
:description Pesticide concentration in animal/organism originating from uptake through respiration
:unit ug/kg ww
:expression Kabam A1 (with kD = 0)
:param k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param kE: rate constant for elimination of the pesticide through excretion of feces (/d)
:param kG: animal/organism growth rate constant (/d)
:param kM: rate constant for pesticide metabolic transformation (/d)
:param mP: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param water_column_eec: total pesticide concentraiton in water column above sediment (ug/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (ug/L)
:return:
"""
pest_conc_from_respir = pd.Series([], dtype = 'float')
pest_conc_from_respir = (k1 * (mO * self.phi * self.water_column_eec + (mP * self.pore_water_eec))
/ (k2 + kE + kM + kG))
return pest_conc_from_respir
def tot_bioconc_fact(self, k1, k2, mP, mO):
"""
:description Total bioconcentration factor
:unit (ug pesticide/kg ww) / (ug pesticide/L water)
:expression Kabam Eq. F1
:param k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param mP: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param water_column_eec: total pesticide concentraiton in water column above sediment (ug/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (ug/L)
:return:
"""
bioconc_fact = pd.Series([], dtype = 'float')
bioconc_fact = (k1 * (mO * self.phi * self.water_column_eec + (mP * self.pore_water_eec)) / k2 )\
/ self.water_column_eec
return bioconc_fact
def lipid_norm_bioconc_fact(self, k1, k2, mP, mO, lipid_content):
"""
:description Lipid normalized bioconcentration factor
:unit (ug pesticide/kg lipid) / (ug pesticide/L water)
:expression Kabam Eq. F2
:param k1: pesticide uptake rate constant through respiratory area (gills, skin) (L/kg-d)
:param k2: rate constant for elimination of the peisticide through the respiratory area (gills, skin) (/d)
:param mP: fraction of respiratory ventilation that involves por-water of sediment (fraction)
:param mO: fraction of respiratory ventilation that involves overlying water; 1-mP (fraction)
:param lipid_content: fraction of animal/organism that is lipid (fraction)
:param phi: fraction of the overlying water pesticide concentration that is freely dissolved and can be absorbed
via membrane diffusion (fraction)
:param out_free_pest_conc_watercol: freely dissolved pesticide concentration in water column above sediment (ug/L)
:param pore_water_eec: freely dissovled pesticide concentration in pore-water of sediment (ug/L)
:return:
"""
lipid_norm_bcf = pd.Series([], dtype = 'float')
lipid_norm_bcf = ((k1 * (mO * self.out_free_pest_conc_watercol + mP * self.pore_water_eec) / k2 )
/ lipid_content) / self.out_free_pest_conc_watercol
return lipid_norm_bcf
def tot_bioacc_fact(self, pest_conc):
"""
:description Total bioaccumulation factor
:unit (ug pesticide/kg ww) / (ug pesticide/L water)
:expression Kabam Eq. F3
:param pest_conc: Concentration of pesticide in aquatic animal/organism (ug/(kg wet weight)
:param water_column_eec: total pesticide concentraiton in water column above sediment (ug/L)
:return:
"""
total_bioacc_fact = pd.Series([], dtype = 'float')
total_bioacc_fact = pest_conc / self.water_column_eec
return total_bioacc_fact
def lipid_norm_bioacc_fact(self, pest_conc, lipid_content):
"""
:description Lipid normalized bioaccumulation factor
:unit (ug pesticide/kg lipid) / (ug pesticide/L water)
:expression Kabam Eq. F4
:param pest_conc: Concentration of pesticide in aquatic animal/organism (ug/(kg wet weight)
:param lipid_content: fraction of animal/organism that is lipid (fraction)
:param out_free_pest_conc_watercol: freely dissolved pesticide concentration in water column above sediment (ug/L)
:return:
"""
lipid_norm_baf = pd.Series([], dtype = 'float')
lipid_norm_baf = (pest_conc/ lipid_content) / self.out_free_pest_conc_watercol
return lipid_norm_baf
def biota_sed_acc_fact(self, pest_conc, lipid_content): #cdsafl
"""
:description Biota-sediment accumulation factor
:unit (ug pesticide/kg lipid) / (ug pesticide/L water)
:expression Kabam Eq. F5
:param pest_conc: Concentration of pesticide in aquatic animal/organism (ug/(kg wet weight)
:param lipid_content: fraction of animal/organism that is lipid (fraction)
:param c_soc Pesticide concentration in sediment normalized for organic carbon content (ug/kg OC)
:return:
"""
sediment_acc_fact = pd.Series([], dtype = 'float')
#conversions not necessary, included for consistency of units use
sediment_acc_fact = (pest_conc / lipid_content) / self.c_soc
return sediment_acc_fact
def biomag_fact(self, pest_conc, lipid_content, lipid_norm_diet_conc):
"""
:description Biomagnification factor
:unit (ug pesticide/kg lipid) / (ug pesticide/kg lipid)
:expression Kabam Eq. F6
:param pest_conc: Concentration of pesticide in aquatic animal/organism (g/(kg wet weight)
:param lipid_content: fraction of animal/organism that is lipid (fraction)
:param diet_conc: Concentration of pesticide in aquatic animal/organism (g/(kg wet weight))
:return:
"""
#biomag_fact = pd.Series([], dtype = 'float')
biomag_fact = pd.Series((pest_conc / lipid_content) / lipid_norm_diet_conc, dtype = 'float')
return biomag_fact
#############################################################################
#############################################################################
#this method is not created in final Kabam model; the mweight array is created in 'set_global_constants' method
#and the conversion of concentrations (self.cb_*) is performed in the main routine
# # Mammals EECs
# def mweight_f(self):
# """
# Mammals
# :return:
# """
# self.cb_a = np.array(
# [[self.cb_phytoplankton, self.cb_zoo, self.cb_beninv, self.cb_ff, self.cb_sf, self.cb_mf, self.cb_lf]])
# self.cb_a2 = self.cb_a * 1000000
# # array of mammal weights
# #[fog/water shrew,rice rat/star-nosed mole,small mink,large mink,small river otter ,large river otter]
# self.mweight = np.array([[0.018, 0.085, 0.45, 1.8, 5, 15]])
# return self.mweight
##############################################################################
def dry_food_ingest_rate_mammals(self):
"""
:description dry food ingestion rate: Mammals (kg dry food/kg-bw day)
:unit (kg dry food / kg-bw day)
:expresssion Kabam Eq. G1
:param mammal_weights: body weight of mammal (kg)
:notes because mammal.weights are represented as constants (hardwired in the code) this
method is not designed for matrix/parallel processing; if the weights are
changed to inputs this method would be modified by removing the array structure and
inserting a simulation-based loop in the main model routine
:return:
"""
ingestion_rate = np.array([], dtype = 'float')
ingestion_rate = (0.0687 * self.mammal_weights ** 0.822) / self.mammal_weights
return ingestion_rate
def dry_food_ingest_rate_birds(self):
"""
:description dry food ingestion rate: Birds (kg dry food/kg-bw day)
:unit (kg dry food / kg-bw day)
:expresssion Kabam Eq. G2
:param bird_weights: body weight of bird (kg)
:notes because bird.weights are represented as constants (hardwired in the code) this
method is not designed for matrix/parallel processing; if the weights are
changed to inputs this method would be modified by removing the array structure and
inserting a simulation-based loop in the main model routine
:return:
"""
ingestion_rate_birds = np.array([], dtype = 'float')
ingestion_rate_birds = (0.0582 * self.bird_weights ** 0.651) / self.bird_weights
return ingestion_rate_birds
def wet_food_ingestion_rates(self, prey_water_contents, diet_fractions, dry_food_ingestion_rates):
"""
:description wet food ingestion rate for mammals and birds
:unit (kg food ww / kg-bw day)
:expresssion Kabam Eq. G3
:param prey_water_contents: fraction of prey body weights that are water
:param diet_fractions: fraction of predator (mammal or bird) diet attributed to individual prey
:param dry_food_ingestion_rates: predator (mammal or bird) dry food ingestion rate (kg food dw / kg-bw day)
:return:
"""
wet_food_ingest_rates = np.array([], dtype = 'float')
factor_1 = np.array([], dtype = 'float')
factor_2 = np.array([], dtype = 'float')
factor_3 = np.array([], dtype = 'float')
factor_4 = np.array([], dtype = 'float')
# calculate elemental factors of Kabam Eq. G3
factor_1 = diet_fractions * prey_water_contents
factor_2 = np.cumsum(factor_1, axis=1)
factor_3 = factor_2[:, 6] # selects out seventh row of array which is the cumulative sums of the products
factor_4 = 1. - factor_3
# wet food ingestion rate
wet_food_ingest_rates = dry_food_ingestion_rates / factor_4
return wet_food_ingest_rates
def drinking_water_intake_mammals(self):
"""
:description drinking water ingestion rate: Mammals
:unit (L / day)
:expresssion Kabam Eq. G4
:param mammal_weights: body weight of mammal (kg)
:return:
"""
water_ingestion_rate_mammals = np.array([], dtype = 'float')
water_ingestion_rate_mammals = (0.099 * self.mammal_weights ** 0.90)
return water_ingestion_rate_mammals
def drinking_water_intake_birds(self):
"""
:description drinking water ingestion rate: Birds
:unit (L / day)
:expresssion Kabam Eq. G5
:param bird_weights: body weight of bird (kg)
:return:
"""
water_ingestion_rate_birds = np.array([], dtype = 'float')
water_ingestion_rate_birds = (0.059 * self.bird_weights ** 0.67)
return water_ingestion_rate_birds
def dose_based_eec(self, wc_eec, pest_conc_diet, diet_fraction, wet_food_ingest_rate, water_ingest_rate, body_weight):
"""
:description dose-based EECs
:unit (mg pesticide / kg-bw day)
:expression Kabam Eq. G6
:param wc_eec: water column eec (ug/L)
:param pest_conc_diet: overall concentration of pesticide in predator (mammal or bird) diet (ug pesticide/kg-bw)
:param diet_fraction: fraction of aquatic animal/organism in diet of predator
:param wet_food_ingest_rate: overall food ingestion rate (wet based) of predator (food ww/day)
:param water_ingest_rate: drinking water ingestion rate (L/day)
:param body_weight: body weight of predator (kg)
:return:
"""
frac_diet_conc = np.array([], dtype = 'float')
sum_diet_fracs = np.array([], dtype = 'float')
overall_diet_conc = np.array([], dtype = 'float')
dose_based_eec = np.array([], dtype = 'float')
#calculate relevant factors
frac_diet_conc = pest_conc_diet * diet_fraction
sum_diet_fracs = np.cumsum(frac_diet_conc, axis=1)
overall_diet_conc = sum_diet_fracs[:, 6]
# dose based EEC (the /1000 converts ug to mg)
dose_based_eec = (overall_diet_conc / 1000.) * wet_food_ingest_rate + \
(((wc_eec / 1000.) * water_ingest_rate) / body_weight)
return dose_based_eec
def dietary_based_eec(self, pest_conc_diet, diet_fraction):
"""
:description dietary-based EECs
:unit (mg pesticide / kg-bw day)
:expression Kabam Eq. G7
:param pest_conc_diet: overall concentration of pesticide in predator (mammal or bird) diet (ug pesticide/kg-bw)
:param diet_fraction: fraction of aquatic animal/organism in diet of predator
:return:
"""
frac_diet_conc = np.array([], dtype = 'float')
sum_diet_fracs = np.array([], dtype = 'float')
overall_diet_conc = np.array([], dtype = 'float')
dietary_eec = np.array([], dtype = 'float')
#calculate relevant factors
frac_diet_conc = pest_conc_diet * diet_fraction
sum_diet_fracs = np.cumsum(frac_diet_conc, axis=1)
overall_diet_conc = sum_diet_fracs[:, 6]
# dietary-based EEC (the /1000 converts ug to mg)
dietary_eec = (overall_diet_conc / 1000)
return dietary_eec
def acute_dose_based_tox_mammals(self, ld50_mammal, tested_animal_bw):
"""
:description Dose-based acute toxicity for mammals
:unit (mg/kg-bw)
:expression Kabam Eq. G8
:param ld50_mammal: Mammalian acute oral LD50 (mg/kg-bw)
:param tested_animal_bw: body weight of tested animal (gms)
:param mammal_weights: body weight of assessed animal (kg)
:return:
"""
acute_toxicity_mammal = ld50_mammal * ((tested_animal_bw / 1000.) / self.mammal_weights) ** 0.25
return acute_toxicity_mammal
def acute_dose_based_tox_birds(self, ld50_bird, tested_bird_bw, scaling_factor):
"""
:description Dose-based acute toxicity for birds
:unit (mg/kg-bw)
:expression Kabam Eq. G9
:param ld50_bird: avian acute oral LD50 (mg/kg-bw)
:param tested_bird_bw: body weight of tested bird (gms)
:param bird_weights: body weight of assessed bird (kg)
:param scaling_factor: Chemical Specific Mineau scaling factor ()
:return:
"""
acute_toxicity_bird = pd.Series([], dtype = 'float')
acute_toxicity_bird = ld50_bird * ((self.bird_weights / (tested_bird_bw / 1000.)) ** (scaling_factor - 1.))
return acute_toxicity_bird
def chronic_dose_based_tox_mammals(self, mammalian_chronic_endpt, mammalian_chronic_endpt_unit, tested_mammal_bw):
"""
:description Dose=based chronic toxicity for mammals
:unit (mg/kg-bw)
:expression (non known documentation; see EPA OPP Kabam spreadsheet
:param mammalian_chronic_endpt:
:param mammalian_chronic_endpt_unit: ppm or mg/kg-bw
:param tested_mammal_bw: body weight of tested mammal (gms)
:param mammal_weights: body weight of assessed mammal(kg)
:return:
"""
chronic_toxicity = pd.Series([], dtype = 'float')
# the /1000 converts gms to kg; the /20 converts ppm to mg/kg-diet
if (mammalian_chronic_endpt_unit == 'ppm'):
chronic_toxicity = (mammalian_chronic_endpt / 20) * (((
(tested_mammal_bw / 1000) / self.mammal_weights)) ** 0.25)
else:
chronic_toxicity = (mammalian_chronic_endpt) * (((
(tested_mammal_bw / 1000) / self.mammal_weights)) ** 0.25)
return chronic_toxicity
def chronic_diet_based_tox_mammals(self, mammalian_chronic_endpt, mammalian_chronic_endpt_unit):
"""
:description chronic diet-based toxicity for mammals
:unit (mg/kg-diet)
:expression no known documentation; see EPA OPP Kabam spreadsheet
:param mammalian_chronic_endpt: (ppm or mg/kg-diet)
:return:
"""
chronic_toxicity = np.array([], dtype = 'float')
if (mammalian_chronic_endpt_unit == 'ppm'):
chronic_toxicity = mammalian_chronic_endpt
else:
chronic_toxicity = mammalian_chronic_endpt * 20.
return chronic_toxicity
def acute_rq_dose_mammals(self):
"""
:description Dose-based risk quotient for mammals
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet)
:param dose_based_eec_mammals
:param acute_dose_based_tox_mammals
:return:
"""
acute_rq_dose_mamm = self.dose_based_eec_mammals / self.dose_based_tox_mammals
return acute_rq_dose_mamm
def chronic_rq_dose_mammals(self):
"""
:description Chronic dose-based risk quotient for mammals
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet)
:param dose_based_eec_mammals: self defined
:param chronic_dose_based_tox_mammals: self defined
:return:
"""
chronic_rq_dose_mamm = self.dose_based_eec_mammals / self.chronic_dose_based_tox_mamm
return chronic_rq_dose_mamm
def acute_rq_diet_mammals(self, diet_based_eec, mammal_lc50):
"""
:description Acute diet-based for risk quotient mammals
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet
:param mammal_lc50; mammalian lc50 (mg/kg-diet)
:param diet_based_eec: diet-based eec for mammal (mg pesticide / kg-bw day)
:return:
"""
acute_rq_diet_mamm = np.array([], dtype = 'float')
acute_rq_diet_mamm = diet_based_eec/ mammal_lc50
return acute_rq_diet_mamm
def chronic_rq_diet_mammals(self, diet_based_eec, mammalian_chronic_endpt, mammalian_chronic_endpt_unit):
"""
:description chronic diet-based rist quotient for mammals
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet
:param mammalian_chronic_endpt: (ppm)
:param diet_based_eec: diet-based eec for mammal (mg pesticide / kg
:return:
"""
chronic_rq_diet_mamm = np.array([], dtype = 'float')
if (mammalian_chronic_endpt_unit == 'ppm'):
chronic_rq_diet_mamm = diet_based_eec / mammalian_chronic_endpt
else:
chronic_rq_diet_mamm = diet_based_eec / (mammalian_chronic_endpt * 20.)
return chronic_rq_diet_mamm
def acute_rq_dose_birds(self):
"""
:description Dose-based risk quotient for birds
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet
:param dose_based_eec_birds: self defined
:param acute_dose_based_tox_birds: self defined
:return:
"""
acute_rq_dose_bird = self.dose_based_eec_birds / self.dose_based_tox_birds
return acute_rq_dose_bird
def acute_rq_diet_birds(self, diet_based_eec, bird_lc50):
"""
:description Acute diet-based for risk quotient birds
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet
:param bird_lc50; avian lc50 (mg/kg-diet)
:param diet_based_eec: diet-based eec for birds (mg pesticide / kg-bw day)
:note in the OPP spreadsheet 'bird_lc50' may be input as 'N/A' or have
a value; in the case it is assigned 'N/A' this method should assign
'acute_rq_diet_bird' a value of 'N/A' -- as implemented below it will
either assign a 'nan' or issue a divide by zero error.
:return:
"""
acute_rq_diet_bird = diet_based_eec/ bird_lc50
return acute_rq_diet_bird
def chronic_rq_diet_birds(self, diet_based_eec, avian_chronic_endpt):
"""
:description chronic diet-based rist quotient for birds
:unit none
:expression no known documentation; see EPA OPP Kabam spreadsheet
:param avian_chronic_endpt: avian noaec (mg/kg-diet)
:param diet_based_eec: diet-based eec for mammal (mg pesticide / kg
:return:
"""
chronic_rq_diet_bird = np.array([], dtype = 'float')
chronic_rq_diet_bird = diet_based_eec / avian_chronic_endpt
return chronic_rq_diet_bird | unlicense | -2,566,465,609,833,547,000 | 44.97177 | 134 | 0.625641 | false |
SunPowered/python-workshop-2015 | code/session3/matplotlib_package.py | 1 | 3980 | # -*- coding: utf-8 -*-
"""
matplotlib_package.py - 3 Data Analysis
The dominant plotting library in Python was constructed to
emulate the standard MATLAB plotting syntax and functionality,
hence the name 'matplotlib'.
There are several ways to interface with matplotlib. One can plot
interactively, which is useful for on the fly visualization. One
can subclass or wrap consistent and repetitive functionality
to customize plots. Plotting options can be defined on the local
operating system, if desired.
It is important to configure the plotting backend for your system,
this is done in Spyder in the iPython settings->Graphics. For this
module, inline plotting is recommended.
Resources:
http://matplotlib.org/examples/pylab_examples/
"""
import os
import numpy as np
np.random.seed(12345)
plot_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'plots'))
SAVE_FIGS = False
N_FIG = 1
DEBUG = False
def more():
global SAVE_FIGS, N_FIG, plot_dir, DEBUG
if SAVE_FIGS:
plot_name = os.path.join(plot_dir, "plot{}.png".format(N_FIG))
plt.savefig(plot_name)
N_FIG += 1
plt.show()
if not DEBUG:
print
raw_input("More...")
print
"""
Interactive Plotting
By utilizing the matplotlib module pyplot, one easily has access
to all the standard plotting and customization mechanisms
"""
from matplotlib import pyplot as plt
print
print "Plot 1"
time = np.linspace(0, 6 * np.pi)
data = 2 * np.sin(time) + 3 * np.cos(time)
plt.plot(time, data)
plt.title('A title')
plt.xlabel('Time')
plt.ylabel('Data')
plt.savefig(os.path.join(plot_dir, 'plot_example.png'))
"""
Multiple series can be plotted at once, with the following
argument a flag for linestyle and colour.
Important to note that each plot is made up of a figure,
and axes, and plots. if we keep plotting and changing
options, the same axes on the same figure will be modified
in place.
Plots amend the current figure, use the 'figure' function
to start a new figure. Alternatively, we can use
the 'show' function to force the current figure to be
rendered.
"""
more()
print
print 'Plot 2'
#plt.figure()
sin_data = np.sin(time)
cos_data = np.cos(time)
plt.plot(time, cos_data, '-b', time, sin_data, '*r')
plt.title('Sin/Cos')
plt.xlabel('Time')
plt.ylabel('Data')
plt.legend(['Cosine', 'Sine'])
"""
Some more advanced figures include multiple axes on
one figure. These are called 'subplots', and can be
created and modified as follows.
"""
more()
print
print "Plot 3"
ax = plt.subplot(2, 1, 1) # 2 rows, 1 col, current plot 1
plt.plot(time, sin_data, "--k")
plt.title("Damped/Undamped Oscillator")
plt.ylabel("Sin")
plt.xlabel('Time')
damped_sin = np.sin(time) * np.exp(-time / 5)
plt.subplot(2, 1, 2) # This go to the next subplot axes
plt.plot(time, damped_sin, '-g')
plt.ylabel("Damped Sin")
plt.xlabel("Time")
"""
There are many other types of plots that are available
"""
more()
print
print "Plot 4"
hist_data = np.random.randn(2000)
plt.hist(hist_data, color="g", bins=50)
plt.title("Normally Distributed Data")
more()
"""
With some careful manupulation, some advanced plottypes are
also possible, such as a heatmap
"""
import matplotlib.mlab as mlab # Matlab compatible names
import matplotlib.cm as cm # Colour maps
print
print "Plot 5"
delta = 0.025
x = y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = Z2 - Z1 # difference of Gaussians
im = plt.imshow(Z, interpolation='bilinear', cmap=cm.RdYlGn,
origin='lower', extent=[-3, 3, -3, 3],
vmax=abs(Z).max(), vmin=-abs(Z).max())
plt.title("Heatmaps!")
plt.xlabel("X (mm)")
plt.ylabel("Y (mm)")
more()
| gpl-2.0 | 380,881,805,217,125,600 | 23.875 | 87 | 0.660553 | false |
mengomarlene/2DImage2Mesh | Image2MeshToolbox.py | 1 | 2763 |
#-----------------------------------------------------
def clearUnwantedNodes(model):
from abaqusConstants import ON
## FOR EACH PART: CLEAR UNWANTED NODES AND DELETE SHELL SECTIONS
for myPart in model.parts.values():
## clean node list (remove nodes not in the zMin plane)
#1/ find zMin...
zCoord = list()
nodeLabels = list()
for node in myPart.nodes:
zCoord.append(node.coordinates[2])
nodeLabels.append(node.label)
minZ = min(zCoord)
#2/ build a list of nodes not in zMin
remNodes = [nodeLabels[i] for i, x in enumerate(zCoord) if x > minZ+1e-10]
#3/ remove those nodes (they have to be part of a set to do so, thus first create set - then delete nodes from set - then delete set)
if len(remNodes):
nodesSetToBeRem = myPart.SetFromNodeLabels(nodeLabels=remNodes, name='remNodeSet')
myPart.deleteNode(nodes=nodesSetToBeRem, deleteUnreferencedNodes=ON)
del nodesSetToBeRem
del nodeLabels#that list is not needed any more!
## delete shell section assignments
for sa in myPart.sectionAssignments: del sa
#-----------------------------------------------------
def createSketch(model,set):
mySketch = model.ConstrainedSketch(name='mySketch', sheetSize=30.0)
# loop over elements of the set and their edges
for ele in set.elements:
for edge in ele.getElemEdges():
# if one edge belongs to only one element it means it is an edge or a contact edge, those are the target to build the geometry
if len(edge.getElements())==1:
# reads nodes coordinates of target elements
node = edge.getNodes()
pt1 = (node[0].coordinates[0],node[0].coordinates[1])
pt2 = (node[1].coordinates[0],node[1].coordinates[1])
# create geometrical line between those nodes
mySketch.Line(point1=pt1,point2=pt2)
return mySketch
#-----------------------------------------------------
def addPartsToAssembly(model):
from abaqusConstants import ON
## add new parts to assembly - only after the initial instance has been deleted has they are not of the same type
for part in model.parts.values():
myInstaneName = part.name.split('_')[0]+'_instance'
model.rootAssembly.Instance(myInstaneName, part, dependent=ON)
#-----------------------------------------------------
def deleteOldFeatures(model):
# delete old part,instance,sections,...
del model.rootAssembly.features['PART-1-1']
del model.parts['PART-1']
for sName in model.sections.keys():
del model.sections[sName]
#----------------------------------------------------- | gpl-2.0 | -4,718,912,397,360,695,000 | 49.254545 | 141 | 0.588853 | false |
antong/ldaptor | ldaptor/test/test_delta.py | 1 | 8590 | """
Test cases for ldaptor.protocols.ldap.delta
"""
from twisted.trial import unittest
from ldaptor import testutil
from ldaptor import delta, entry, attributeset, inmemory
from ldaptor.protocols.ldap import ldapsyntax, distinguishedname, ldaperrors
class TestModifications(unittest.TestCase):
def setUp(self):
self.foo = ldapsyntax.LDAPEntry(
None,
dn='cn=foo,dc=example,dc=com',
attributes={
'objectClass': ['person'],
'cn': ['foo', 'thud'],
'sn': ['bar'],
'more': ['junk'],
})
def testAddOld(self):
mod = delta.Add('cn', ['quux'])
mod.patch(self.foo)
self.failIf('stuff' in self.foo)
self.failUnlessEqual(self.foo['cn'], ['foo', 'thud', 'quux'])
def testAddNew(self):
mod = delta.Add('stuff', ['val1', 'val2'])
mod.patch(self.foo)
self.failUnlessEqual(self.foo['stuff'], ['val1', 'val2'])
self.failUnlessEqual(self.foo['cn'], ['foo', 'thud'])
def testDelete(self):
mod = delta.Delete('cn', ['thud'])
mod.patch(self.foo)
self.failIf('stuff' in self.foo)
self.failUnlessEqual(self.foo['cn'], ['foo'])
def testDeleteAll(self):
mod = delta.Delete('more')
mod.patch(self.foo)
self.failIf('stuff' in self.foo)
self.failUnlessEqual(self.foo['cn'], ['foo', 'thud'])
def testDelete_FailOnNonExistingAttributeType_All(self):
mod = delta.Delete('notexist', [])
self.assertRaises(KeyError,
mod.patch,
self.foo)
def testDelete_FailOnNonExistingAttributeType_OneValue(self):
mod = delta.Delete('notexist', ['a'])
self.assertRaises(KeyError,
mod.patch,
self.foo)
def testDelete_FailOnNonExistingAttributeValue(self):
mod = delta.Delete('cn', ['notexist'])
self.assertRaises(LookupError,
mod.patch,
self.foo)
def testReplace_Add(self):
mod = delta.Replace('stuff', ['val1', 'val2'])
mod.patch(self.foo)
self.failUnlessEqual(self.foo['stuff'], ['val1', 'val2'])
self.failUnlessEqual(self.foo['sn'], ['bar'])
self.failUnlessEqual(self.foo['more'], ['junk'])
def testReplace_Modify(self):
mod = delta.Replace('sn', ['baz'])
mod.patch(self.foo)
self.failIf('stuff' in self.foo)
self.failUnlessEqual(self.foo['sn'], ['baz'])
self.failUnlessEqual(self.foo['more'], ['junk'])
def testReplace_Delete_Existing(self):
mod = delta.Replace('more', [])
mod.patch(self.foo)
self.failIf('stuff' in self.foo)
self.failUnlessEqual(self.foo['sn'], ['bar'])
self.failIf('more' in self.foo)
def testReplace_Delete_NonExisting(self):
mod = delta.Replace('nonExisting', [])
mod.patch(self.foo)
self.failIf('stuff' in self.foo)
self.failUnlessEqual(self.foo['sn'], ['bar'])
self.failUnlessEqual(self.foo['more'], ['junk'])
class TestModificationOpLDIF(unittest.TestCase):
def testAdd(self):
m=delta.Add('foo', ['bar', 'baz'])
self.assertEquals(m.asLDIF(),
"""\
add: foo
foo: bar
foo: baz
-
""")
def testDelete(self):
m=delta.Delete('foo', ['bar', 'baz'])
self.assertEquals(m.asLDIF(),
"""\
delete: foo
foo: bar
foo: baz
-
""")
def testDeleteAll(self):
m=delta.Delete('foo')
self.assertEquals(m.asLDIF(),
"""\
delete: foo
-
""")
def testReplace(self):
m=delta.Replace('foo', ['bar', 'baz'])
self.assertEquals(m.asLDIF(),
"""\
replace: foo
foo: bar
foo: baz
-
""")
def testReplaceAll(self):
m=delta.Replace('thud')
self.assertEquals(m.asLDIF(),
"""\
replace: thud
-
""")
class TestAddOpLDIF(unittest.TestCase):
def testSimple(self):
op=delta.AddOp(entry.BaseLDAPEntry(
dn='dc=example,dc=com',
attributes={'foo': ['bar', 'baz'],
'quux': ['thud']}))
self.assertEquals(op.asLDIF(),
"""\
dn: dc=example,dc=com
changetype: add
foo: bar
foo: baz
quux: thud
""")
class TestDeleteOpLDIF(unittest.TestCase):
def testSimple(self):
op=delta.DeleteOp('dc=example,dc=com')
self.assertEquals(op.asLDIF(),
"""\
dn: dc=example,dc=com
changetype: delete
""")
class TestOperationLDIF(unittest.TestCase):
def testModify(self):
op=delta.ModifyOp('cn=Paula Jensen, ou=Product Development, dc=airius, dc=com',
[
delta.Add('postaladdress',
['123 Anystreet $ Sunnyvale, CA $ 94086']),
delta.Delete('description'),
delta.Replace('telephonenumber', ['+1 408 555 1234', '+1 408 555 5678']),
delta.Delete('facsimiletelephonenumber', ['+1 408 555 9876']),
])
self.assertEquals(op.asLDIF(),
"""\
dn: cn=Paula Jensen,ou=Product Development,dc=airius,dc=com
changetype: modify
add: postaladdress
postaladdress: 123 Anystreet $ Sunnyvale, CA $ 94086
-
delete: description
-
replace: telephonenumber
telephonenumber: +1 408 555 1234
telephonenumber: +1 408 555 5678
-
delete: facsimiletelephonenumber
facsimiletelephonenumber: +1 408 555 9876
-
""")
class TestModificationComparison(unittest.TestCase):
def testEquality_Add_True(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = delta.Add('k', ['b', 'c', 'd'])
self.assertEquals(a, b)
def testEquality_AddVsDelete_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = delta.Delete('k', ['b', 'c', 'd'])
self.assertNotEquals(a, b)
def testEquality_AttributeSet_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = attributeset.LDAPAttributeSet('k', ['b', 'c', 'd'])
self.assertNotEquals(a, b)
def testEquality_List_False(self):
a = delta.Add('k', ['b', 'c', 'd'])
b = ['b', 'c', 'd']
self.assertNotEquals(a, b)
class TestOperations(unittest.TestCase):
def setUp(self):
self.root = inmemory.ReadOnlyInMemoryLDAPEntry(
dn=distinguishedname.DistinguishedName('dc=example,dc=com'))
self.meta=self.root.addChild(
rdn='ou=metasyntactic',
attributes={
'objectClass': ['a', 'b'],
'ou': ['metasyntactic'],
})
self.foo=self.meta.addChild(
rdn='cn=foo',
attributes={
'objectClass': ['a', 'b'],
'cn': ['foo'],
})
self.bar=self.meta.addChild(
rdn='cn=bar',
attributes={
'objectClass': ['a', 'b'],
'cn': ['bar'],
})
self.empty=self.root.addChild(
rdn='ou=empty',
attributes={
'objectClass': ['a', 'b'],
'ou': ['empty'],
})
self.oneChild=self.root.addChild(
rdn='ou=oneChild',
attributes={
'objectClass': ['a', 'b'],
'ou': ['oneChild'],
})
self.theChild=self.oneChild.addChild(
rdn='cn=theChild',
attributes={
'objectClass': ['a', 'b'],
'cn': ['theChild'],
})
def testAddOp_DNExists(self):
foo2 = entry.BaseLDAPEntry(
dn='cn=foo,ou=metasyntactic,dc=example,dc=com',
attributes={'foo': ['bar', 'baz'],
'quux': ['thud']})
op = delta.AddOp(foo2)
d = op.patch(self.root)
def eb(fail):
fail.trap(ldaperrors.LDAPEntryAlreadyExists)
d.addCallbacks(testutil.mustRaise, eb)
return d
def testDeleteOp_DNNotFound(self):
op = delta.DeleteOp('cn=nope,dc=example,dc=com')
d = op.patch(self.root)
def eb(fail):
fail.trap(ldaperrors.LDAPNoSuchObject)
d.addCallbacks(testutil.mustRaise, eb)
return d
def testModifyOp_DNNotFound(self):
op = delta.ModifyOp('cn=nope,dc=example,dc=com',
[delta.Add('foo', ['bar'])])
d = op.patch(self.root)
def eb(fail):
fail.trap(ldaperrors.LDAPNoSuchObject)
d.addCallbacks(testutil.mustRaise, eb)
return d
| lgpl-2.1 | 7,233,500,655,906,312,000 | 27.825503 | 87 | 0.537369 | false |
biosustain/venom | tests/rpc/test_method.py | 1 | 3948 | from collections import namedtuple
from unittest import SkipTest
from venom import Empty
from venom import Message
from venom.common import Value, BoolValue
from venom.common.types import JSONValue
from venom.converter import Converter
from venom.fields import Int32, String
from venom.rpc import Service, rpc
from venom.rpc.method import HTTPVerb, MethodDescriptor
from venom.rpc.stub import Stub
from venom.rpc.test_utils import AioTestCase
class MethodTestCase(AioTestCase):
async def test_method_override(self):
Snake = namedtuple('Snake', ('name', 'size'))
class SnakeMessage(Message):
name = String()
size = Int32()
class SnakeConverter(Converter):
wire = SnakeMessage
python = Snake
def resolve(self, message: SnakeMessage) -> Snake:
return Snake(message.name, message.size)
def format(self, value: Snake) -> SnakeMessage:
return SnakeMessage(name=value.name, size=value.size)
class SnakeStub(Stub):
@rpc(SnakeMessage, SnakeMessage)
def grow(self): pass
self.assertEqual(set(SnakeStub.__methods__.keys()), {"grow"})
self.assertEqual(SnakeStub.__methods__['grow'].request, SnakeMessage)
self.assertEqual(SnakeStub.__methods__['grow'].response, SnakeMessage)
# TODO test without stub (auto-generated request message)
class SnakeService(Service):
class Meta:
converters = [SnakeConverter()]
stub = SnakeStub
@rpc
def grow(self, request: Snake) -> Snake:
return Snake(name=request.name, size=request.size + 1)
self.assertEqual(await SnakeService().grow(SnakeMessage('snek', 2)), SnakeMessage('snek', 3))
self.assertEqual(await SnakeService.grow.invoke(SnakeService(), SnakeMessage(name='snek', size=2)),
SnakeMessage(name='snek', size=3))
self.assertEqual(await SnakeService.grow.invoke(SnakeService(), SnakeMessage(name='snek')),
SnakeMessage(name='snek', size=1))
def test_method_http(self):
class FooService(Service):
pass
self.assertEqual(MethodDescriptor(Empty, Empty).prepare(FooService(), 'bar').http_path, '/foo/bar')
self.assertEqual(MethodDescriptor(Empty, Empty).prepare(FooService(), 'foo').http_method, HTTPVerb.POST)
self.assertEqual(MethodDescriptor(Empty, Empty,
http_path='./bar').prepare(FooService(), 'foo').http_path, '/foo/bar')
self.assertEqual(MethodDescriptor(Empty, Empty, http_method=HTTPVerb.POST).http_method, HTTPVerb.POST)
self.assertEqual(MethodDescriptor(Empty, Empty, http_method=HTTPVerb.DELETE).http_method, HTTPVerb.DELETE)
def test_method_http_rule_params(self):
class Snake(Message):
id = Int32()
name = String()
size = Int32()
class FooService(Service):
pass
self.assertEqual(MethodDescriptor(Empty, Empty)
.prepare(FooService(), 'foo')
.http_path_parameters(), set())
self.assertEqual(MethodDescriptor(Snake, Snake, http_path='./{id}')
.prepare(FooService(), 'foo')
.http_path_parameters(), {'id'})
self.assertEqual(MethodDescriptor(Snake, Snake, http_path='./{name}/{id}')
.prepare(FooService(), 'foo')
.http_path_parameters(), {'id', 'name'})
@SkipTest
async def test_json_method(self):
class FooService(Service):
@rpc
def get_json(self) -> JSONValue:
return {"foo": True}
self.assertEqual(await FooService.get_json.invoke(FooService(), Empty()),
Value(bool_value=BoolValue(True)))
| mit | 881,615,685,516,078,600 | 38.878788 | 114 | 0.607903 | false |
wampixel/sciMS | index/forms.py | 1 | 1025 | from django import forms
class registration(forms.Form):
username = forms.CharField(max_length=100,
widget=forms.TextInput(attrs={'class': 'form-control',
'placeholder' : 'Username'}))
nom = forms.CharField(max_length=100,
widget=forms.TextInput(attrs={'class': 'form-control',
'placeholder' : 'Nom'}))
prenom = forms.CharField(max_length=100,
widget=forms.TextInput(attrs={'class': 'form-control',
'placeholder' : 'Prenom'}))
passwd = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'form-control',
'placeholder' : 'password'}))
email = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control',
'placeholder' : '[email protected]'})) | gpl-3.0 | 5,774,265,578,307,167,000 | 63.125 | 96 | 0.470244 | false |
Yukarumya/Yukarum-Redfoxes | xpcom/typelib/xpt/tools/xpt.py | 1 | 56333 | #!/usr/bin/env python
# Copyright 2010,2011 Mozilla Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE MOZILLA FOUNDATION ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE MOZILLA FOUNDATION OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the Mozilla
# Foundation.
"""
A module for working with XPCOM Type Libraries.
The XPCOM Type Library File Format is described at:
http://www.mozilla.org/scriptable/typelib_file.html . It is used
to provide type information for calling methods on XPCOM objects
from scripting languages such as JavaScript.
This module provides a set of classes representing the parts of
a typelib in a high-level manner, as well as methods for reading
and writing them from files.
The usable public interfaces are currently:
Typelib.read(input_file) - read a typelib from a file on disk or file-like
object, return a Typelib object.
xpt_dump(filename) - read a typelib from a file on disk, dump
the contents to stdout in a human-readable
format.
Typelib() - construct a new Typelib object
Interface() - construct a new Interface object
Method() - construct a new object representing a method
defined on an Interface
Constant() - construct a new object representing a constant
defined on an Interface
Param() - construct a new object representing a parameter
to a method
SimpleType() - construct a new object representing a simple
data type
InterfaceType() - construct a new object representing a type that
is an IDL-defined interface
"""
from __future__ import with_statement
import os
import sys
import struct
import operator
# header magic
XPT_MAGIC = "XPCOM\nTypeLib\r\n\x1a"
TYPELIB_VERSION = (1, 2)
class FileFormatError(Exception):
pass
class DataError(Exception):
pass
# Magic for creating enums
def M_add_class_attribs(attribs):
def foo(name, bases, dict_):
for v, k in attribs:
dict_[k] = v
return type(name, bases, dict_)
return foo
def enum(*names):
class Foo(object):
__metaclass__ = M_add_class_attribs(enumerate(names))
def __setattr__(self, name, value): # this makes it read-only
raise NotImplementedError
return Foo()
# Descriptor types as described in the spec
class Type(object):
"""
Data type of a method parameter or return value. Do not instantiate
this class directly. Rather, use one of its subclasses.
"""
_prefixdescriptor = struct.Struct(">B")
Tags = enum(
# The first 18 entries are SimpleTypeDescriptor
'int8',
'int16',
'int32',
'int64',
'uint8',
'uint16',
'uint32',
'uint64',
'float',
'double',
'boolean',
'char',
'wchar_t',
'void',
# the following four values are only valid as pointers
'nsIID',
'DOMString',
'char_ptr',
'wchar_t_ptr',
# InterfaceTypeDescriptor
'Interface',
# InterfaceIsTypeDescriptor
'InterfaceIs',
# ArrayTypeDescriptor
'Array',
# StringWithSizeTypeDescriptor
'StringWithSize',
# WideStringWithSizeTypeDescriptor
'WideStringWithSize',
# XXX: These are also SimpleTypes (but not in the spec)
# http://hg.mozilla.org/mozilla-central/annotate/0e0e2516f04e/xpcom/typelib/xpt/tools/xpt_dump.c#l69
'UTF8String',
'CString',
'AString',
'jsval',
)
def __init__(self, pointer=False, reference=False):
self.pointer = pointer
self.reference = reference
if reference and not pointer:
raise Exception("If reference is True pointer must be True too")
def __cmp__(self, other):
return (
# First make sure we have two Types of the same type (no pun intended!)
cmp(type(self), type(other)) or
cmp(self.pointer, other.pointer) or
cmp(self.reference, other.reference)
)
@staticmethod
def decodeflags(byte):
"""
Given |byte|, an unsigned uint8 containing flag bits,
decode the flag bits as described in
http://www.mozilla.org/scriptable/typelib_file.html#TypeDescriptor
and return a dict of flagname: (True|False) suitable
for passing to Type.__init__ as **kwargs.
"""
return {'pointer': bool(byte & 0x80),
'reference': bool(byte & 0x20),
}
def encodeflags(self):
"""
Encode the flag bits of this Type object. Returns a byte.
"""
flags = 0
if self.pointer:
flags |= 0x80
if self.reference:
flags |= 0x20
return flags
@staticmethod
def read(typelib, map, data_pool, offset):
"""
Read a TypeDescriptor at |offset| from the mmaped file |map| with
data pool offset |data_pool|. Returns (Type, next offset),
where |next offset| is an offset suitable for reading the data
following this TypeDescriptor.
"""
start = data_pool + offset - 1
(data,) = Type._prefixdescriptor.unpack_from(map, start)
# first three bits are the flags
flags = data & 0xE0
flags = Type.decodeflags(flags)
# last five bits is the tag
tag = data & 0x1F
offset += Type._prefixdescriptor.size
t = None
if tag <= Type.Tags.wchar_t_ptr or tag >= Type.Tags.UTF8String:
t = SimpleType.get(data, tag, flags)
elif tag == Type.Tags.Interface:
t, offset = InterfaceType.read(typelib, map, data_pool, offset, flags)
elif tag == Type.Tags.InterfaceIs:
t, offset = InterfaceIsType.read(typelib, map, data_pool, offset, flags)
elif tag == Type.Tags.Array:
t, offset = ArrayType.read(typelib, map, data_pool, offset, flags)
elif tag == Type.Tags.StringWithSize:
t, offset = StringWithSizeType.read(typelib, map, data_pool, offset, flags)
elif tag == Type.Tags.WideStringWithSize:
t, offset = WideStringWithSizeType.read(typelib, map, data_pool, offset, flags)
return t, offset
def write(self, typelib, file):
"""
Write a TypeDescriptor to |file|, which is assumed
to be seeked to the proper position. For types other than
SimpleType, this is not sufficient for writing the TypeDescriptor,
and the subclass method must be called.
"""
file.write(Type._prefixdescriptor.pack(self.encodeflags() | self.tag))
class SimpleType(Type):
"""
A simple data type. (SimpleTypeDescriptor from the typelib specification.)
"""
_cache = {}
def __init__(self, tag, **kwargs):
Type.__init__(self, **kwargs)
self.tag = tag
def __cmp__(self, other):
return (
Type.__cmp__(self, other) or
cmp(self.tag, other.tag)
)
@staticmethod
def get(data, tag, flags):
"""
Get a SimpleType object representing |data| (a TypeDescriptorPrefix).
May return an already-created object. If no cached object is found,
construct one with |tag| and |flags|.
"""
if data not in SimpleType._cache:
SimpleType._cache[data] = SimpleType(tag, **flags)
return SimpleType._cache[data]
def __str__(self):
s = "unknown"
if self.tag == Type.Tags.char_ptr and self.pointer:
return "string"
if self.tag == Type.Tags.wchar_t_ptr and self.pointer:
return "wstring"
for t in dir(Type.Tags):
if self.tag == getattr(Type.Tags, t):
s = t
break
if self.pointer:
if self.reference:
s += " &"
else:
s += " *"
return s
class InterfaceType(Type):
"""
A type representing a pointer to an IDL-defined interface.
(InterfaceTypeDescriptor from the typelib specification.)
"""
_descriptor = struct.Struct(">H")
def __init__(self, iface, pointer=True, **kwargs):
if not pointer:
raise DataError("InterfaceType is not valid with pointer=False")
Type.__init__(self, pointer=pointer, **kwargs)
self.iface = iface
self.tag = Type.Tags.Interface
def __cmp__(self, other):
return (
Type.__cmp__(self, other) or
# When comparing interface types, only look at the name.
cmp(self.iface.name, other.iface.name) or
cmp(self.tag, other.tag)
)
@staticmethod
def read(typelib, map, data_pool, offset, flags):
"""
Read an InterfaceTypeDescriptor at |offset| from the mmaped
file |map| with data pool offset |data_pool|.
Returns (InterfaceType, next offset),
where |next offset| is an offset suitable for reading the data
following this InterfaceTypeDescriptor.
"""
if not flags['pointer']:
return None, offset
start = data_pool + offset - 1
(iface_index,) = InterfaceType._descriptor.unpack_from(map, start)
offset += InterfaceType._descriptor.size
iface = None
# interface indices are 1-based
if iface_index > 0 and iface_index <= len(typelib.interfaces):
iface = typelib.interfaces[iface_index - 1]
return InterfaceType(iface, **flags), offset
def write(self, typelib, file):
"""
Write an InterfaceTypeDescriptor to |file|, which is assumed
to be seeked to the proper position.
"""
Type.write(self, typelib, file)
# write out the interface index (1-based)
file.write(InterfaceType._descriptor.pack(typelib.interfaces.index(self.iface) + 1))
def __str__(self):
if self.iface:
return self.iface.name
return "unknown interface"
class InterfaceIsType(Type):
"""
A type representing an interface described by one of the other
arguments to the method. (InterfaceIsTypeDescriptor from the
typelib specification.)
"""
_descriptor = struct.Struct(">B")
_cache = {}
def __init__(self, param_index, pointer=True, **kwargs):
if not pointer:
raise DataError("InterfaceIsType is not valid with pointer=False")
Type.__init__(self, pointer=pointer, **kwargs)
self.param_index = param_index
self.tag = Type.Tags.InterfaceIs
def __cmp__(self, other):
return (
Type.__cmp__(self, other) or
cmp(self.param_index, other.param_index) or
cmp(self.tag, other.tag)
)
@staticmethod
def read(typelib, map, data_pool, offset, flags):
"""
Read an InterfaceIsTypeDescriptor at |offset| from the mmaped
file |map| with data pool offset |data_pool|.
Returns (InterfaceIsType, next offset),
where |next offset| is an offset suitable for reading the data
following this InterfaceIsTypeDescriptor.
May return a cached value.
"""
if not flags['pointer']:
return None, offset
start = data_pool + offset - 1
(param_index,) = InterfaceIsType._descriptor.unpack_from(map, start)
offset += InterfaceIsType._descriptor.size
if param_index not in InterfaceIsType._cache:
InterfaceIsType._cache[param_index] = InterfaceIsType(param_index, **flags)
return InterfaceIsType._cache[param_index], offset
def write(self, typelib, file):
"""
Write an InterfaceIsTypeDescriptor to |file|, which is assumed
to be seeked to the proper position.
"""
Type.write(self, typelib, file)
file.write(InterfaceIsType._descriptor.pack(self.param_index))
def __str__(self):
return "InterfaceIs *"
class ArrayType(Type):
"""
A type representing an Array of elements of another type, whose
size and length are passed as separate parameters to a method.
(ArrayTypeDescriptor from the typelib specification.)
"""
_descriptor = struct.Struct(">BB")
def __init__(self, element_type, size_is_arg_num, length_is_arg_num,
pointer=True, **kwargs):
if not pointer:
raise DataError("ArrayType is not valid with pointer=False")
Type.__init__(self, pointer=pointer, **kwargs)
self.element_type = element_type
self.size_is_arg_num = size_is_arg_num
self.length_is_arg_num = length_is_arg_num
self.tag = Type.Tags.Array
def __cmp__(self, other):
return (
Type.__cmp__(self, other) or
cmp(self.element_type, other.element_type) or
cmp(self.size_is_arg_num, other.size_is_arg_num) or
cmp(self.length_is_arg_num, other.length_is_arg_num) or
cmp(self.tag, other.tag)
)
@staticmethod
def read(typelib, map, data_pool, offset, flags):
"""
Read an ArrayTypeDescriptor at |offset| from the mmaped
file |map| with data pool offset |data_pool|.
Returns (ArrayType, next offset),
where |next offset| is an offset suitable for reading the data
following this ArrayTypeDescriptor.
"""
if not flags['pointer']:
return None, offset
start = data_pool + offset - 1
(size_is_arg_num, length_is_arg_num) = ArrayType._descriptor.unpack_from(map, start)
offset += ArrayType._descriptor.size
t, offset = Type.read(typelib, map, data_pool, offset)
return ArrayType(t, size_is_arg_num, length_is_arg_num, **flags), offset
def write(self, typelib, file):
"""
Write an ArrayTypeDescriptor to |file|, which is assumed
to be seeked to the proper position.
"""
Type.write(self, typelib, file)
file.write(ArrayType._descriptor.pack(self.size_is_arg_num,
self.length_is_arg_num))
self.element_type.write(typelib, file)
def __str__(self):
return "%s []" % str(self.element_type)
class StringWithSizeType(Type):
"""
A type representing a UTF-8 encoded string whose size and length
are passed as separate arguments to a method. (StringWithSizeTypeDescriptor
from the typelib specification.)
"""
_descriptor = struct.Struct(">BB")
def __init__(self, size_is_arg_num, length_is_arg_num,
pointer=True, **kwargs):
if not pointer:
raise DataError("StringWithSizeType is not valid with pointer=False")
Type.__init__(self, pointer=pointer, **kwargs)
self.size_is_arg_num = size_is_arg_num
self.length_is_arg_num = length_is_arg_num
self.tag = Type.Tags.StringWithSize
def __cmp__(self, other):
return (
Type.__cmp__(self, other) or
cmp(self.size_is_arg_num, other.size_is_arg_num) or
cmp(self.length_is_arg_num, other.length_is_arg_num) or
cmp(self.tag, other.tag)
)
@staticmethod
def read(typelib, map, data_pool, offset, flags):
"""
Read an StringWithSizeTypeDescriptor at |offset| from the mmaped
file |map| with data pool offset |data_pool|.
Returns (StringWithSizeType, next offset),
where |next offset| is an offset suitable for reading the data
following this StringWithSizeTypeDescriptor.
"""
if not flags['pointer']:
return None, offset
start = data_pool + offset - 1
(size_is_arg_num, length_is_arg_num) = StringWithSizeType._descriptor.unpack_from(map, start)
offset += StringWithSizeType._descriptor.size
return StringWithSizeType(size_is_arg_num, length_is_arg_num, **flags), offset
def write(self, typelib, file):
"""
Write a StringWithSizeTypeDescriptor to |file|, which is assumed
to be seeked to the proper position.
"""
Type.write(self, typelib, file)
file.write(StringWithSizeType._descriptor.pack(self.size_is_arg_num,
self.length_is_arg_num))
def __str__(self):
return "string_s"
class WideStringWithSizeType(Type):
"""
A type representing a UTF-16 encoded string whose size and length
are passed as separate arguments to a method.
(WideStringWithSizeTypeDescriptor from the typelib specification.)
"""
_descriptor = struct.Struct(">BB")
def __init__(self, size_is_arg_num, length_is_arg_num,
pointer=True, **kwargs):
if not pointer:
raise DataError("WideStringWithSizeType is not valid with pointer=False")
Type.__init__(self, pointer=pointer, **kwargs)
self.size_is_arg_num = size_is_arg_num
self.length_is_arg_num = length_is_arg_num
self.tag = Type.Tags.WideStringWithSize
def __cmp__(self, other):
return (
Type.__cmp__(self, other) or
cmp(self.size_is_arg_num, other.size_is_arg_num) or
cmp(self.length_is_arg_num, other.length_is_arg_num) or
cmp(self.tag, other.tag)
)
@staticmethod
def read(typelib, map, data_pool, offset, flags):
"""
Read an WideStringWithSizeTypeDescriptor at |offset| from the mmaped
file |map| with data pool offset |data_pool|.
Returns (WideStringWithSizeType, next offset),
where |next offset| is an offset suitable for reading the data
following this WideStringWithSizeTypeDescriptor.
"""
if not flags['pointer']:
return None, offset
start = data_pool + offset - 1
(size_is_arg_num, length_is_arg_num) = WideStringWithSizeType._descriptor.unpack_from(map, start)
offset += WideStringWithSizeType._descriptor.size
return WideStringWithSizeType(size_is_arg_num, length_is_arg_num, **flags), offset
def write(self, typelib, file):
"""
Write a WideStringWithSizeTypeDescriptor to |file|, which is assumed
to be seeked to the proper position.
"""
Type.write(self, typelib, file)
file.write(WideStringWithSizeType._descriptor.pack(self.size_is_arg_num,
self.length_is_arg_num))
def __str__(self):
return "wstring_s"
class Param(object):
"""
A parameter to a method, or the return value of a method.
(ParamDescriptor from the typelib specification.)
"""
_descriptorstart = struct.Struct(">B")
def __init__(self, type, in_=True, out=False, retval=False,
shared=False, dipper=False, optional=False):
"""
Construct a Param object with the specified |type| and
flags. Params default to "in".
"""
self.type = type
self.in_ = in_
self.out = out
self.retval = retval
self.shared = shared
self.dipper = dipper
self.optional = optional
def __cmp__(self, other):
return (
cmp(self.type, other.type) or
cmp(self.in_, other.in_) or
cmp(self.out, other.out) or
cmp(self.retval, other.retval) or
cmp(self.shared, other.shared) or
cmp(self.dipper, other.dipper) or
cmp(self.optional, other.optional)
)
@staticmethod
def decodeflags(byte):
"""
Given |byte|, an unsigned uint8 containing flag bits,
decode the flag bits as described in
http://www.mozilla.org/scriptable/typelib_file.html#ParamDescriptor
and return a dict of flagname: (True|False) suitable
for passing to Param.__init__ as **kwargs
"""
return {'in_': bool(byte & 0x80),
'out': bool(byte & 0x40),
'retval': bool(byte & 0x20),
'shared': bool(byte & 0x10),
'dipper': bool(byte & 0x08),
# XXX: Not in the spec, see:
# http://hg.mozilla.org/mozilla-central/annotate/0e0e2516f04e/xpcom/typelib/xpt/public/xpt_struct.h#l456
'optional': bool(byte & 0x04),
}
def encodeflags(self):
"""
Encode the flags of this Param. Return a byte suitable for
writing to a typelib file.
"""
flags = 0
if self.in_:
flags |= 0x80
if self.out:
flags |= 0x40
if self.retval:
flags |= 0x20
if self.shared:
flags |= 0x10
if self.dipper:
flags |= 0x08
if self.optional:
flags |= 0x04
return flags
@staticmethod
def read(typelib, map, data_pool, offset):
"""
Read a ParamDescriptor at |offset| from the mmaped file |map| with
data pool offset |data_pool|. Returns (Param, next offset),
where |next offset| is an offset suitable for reading the data
following this ParamDescriptor.
"""
start = data_pool + offset - 1
(flags,) = Param._descriptorstart.unpack_from(map, start)
# only the first five bits are flags
flags &= 0xFC
flags = Param.decodeflags(flags)
offset += Param._descriptorstart.size
t, offset = Type.read(typelib, map, data_pool, offset)
p = Param(t, **flags)
return p, offset
def write(self, typelib, file):
"""
Write a ParamDescriptor to |file|, which is assumed to be seeked
to the correct position.
"""
file.write(Param._descriptorstart.pack(self.encodeflags()))
self.type.write(typelib, file)
def prefix(self):
"""
Return a human-readable string representing the flags set
on this Param.
"""
s = ""
if self.out:
if self.in_:
s = "inout "
else:
s = "out "
else:
s = "in "
if self.dipper:
s += "dipper "
if self.retval:
s += "retval "
if self.shared:
s += "shared "
if self.optional:
s += "optional "
return s
def __str__(self):
return self.prefix() + str(self.type)
class Method(object):
"""
A method of an interface, defining its associated parameters
and return value.
(MethodDescriptor from the typelib specification.)
"""
_descriptorstart = struct.Struct(">BIB")
def __init__(self, name, result,
params=[], getter=False, setter=False, notxpcom=False,
constructor=False, hidden=False, optargc=False,
implicit_jscontext=False):
self.name = name
self._name_offset = 0
self.getter = getter
self.setter = setter
self.notxpcom = notxpcom
self.constructor = constructor
self.hidden = hidden
self.optargc = optargc
self.implicit_jscontext = implicit_jscontext
self.params = list(params)
if result and not isinstance(result, Param):
raise Exception("result must be a Param!")
self.result = result
def __cmp__(self, other):
return (
cmp(self.name, other.name) or
cmp(self.getter, other.getter) or
cmp(self.setter, other.setter) or
cmp(self.notxpcom, other.notxpcom) or
cmp(self.constructor, other.constructor) or
cmp(self.hidden, other.hidden) or
cmp(self.optargc, other.optargc) or
cmp(self.implicit_jscontext, other.implicit_jscontext) or
cmp(self.params, other.params) or
cmp(self.result, other.result)
)
def read_params(self, typelib, map, data_pool, offset, num_args):
"""
Read |num_args| ParamDescriptors representing this Method's arguments
from the mmaped file |map| with data pool at the offset |data_pool|,
starting at |offset| into self.params. Returns the offset
suitable for reading the data following the ParamDescriptor array.
"""
for i in range(num_args):
p, offset = Param.read(typelib, map, data_pool, offset)
self.params.append(p)
return offset
def read_result(self, typelib, map, data_pool, offset):
"""
Read a ParamDescriptor representing this Method's return type
from the mmaped file |map| with data pool at the offset |data_pool|,
starting at |offset| into self.result. Returns the offset
suitable for reading the data following the ParamDescriptor.
"""
self.result, offset = Param.read(typelib, map, data_pool, offset)
return offset
@staticmethod
def decodeflags(byte):
"""
Given |byte|, an unsigned uint8 containing flag bits,
decode the flag bits as described in
http://www.mozilla.org/scriptable/typelib_file.html#MethodDescriptor
and return a dict of flagname: (True|False) suitable
for passing to Method.__init__ as **kwargs
"""
return {'getter': bool(byte & 0x80),
'setter': bool(byte & 0x40),
'notxpcom': bool(byte & 0x20),
'constructor': bool(byte & 0x10),
'hidden': bool(byte & 0x08),
# Not in the spec, see
# http://hg.mozilla.org/mozilla-central/annotate/0e0e2516f04e/xpcom/typelib/xpt/public/xpt_struct.h#l489
'optargc': bool(byte & 0x04),
'implicit_jscontext': bool(byte & 0x02),
}
def encodeflags(self):
"""
Encode the flags of this Method object, return a byte suitable
for writing to a typelib file.
"""
flags = 0
if self.getter:
flags |= 0x80
if self.setter:
flags |= 0x40
if self.notxpcom:
flags |= 0x20
if self.constructor:
flags |= 0x10
if self.hidden:
flags |= 0x08
if self.optargc:
flags |= 0x04
if self.implicit_jscontext:
flags |= 0x02
return flags
@staticmethod
def read(typelib, map, data_pool, offset):
"""
Read a MethodDescriptor at |offset| from the mmaped file |map| with
data pool offset |data_pool|. Returns (Method, next offset),
where |next offset| is an offset suitable for reading the data
following this MethodDescriptor.
"""
start = data_pool + offset - 1
flags, name_offset, num_args = Method._descriptorstart.unpack_from(map, start)
# only the first seven bits are flags
flags &= 0xFE
flags = Method.decodeflags(flags)
name = Typelib.read_string(map, data_pool, name_offset)
m = Method(name, None, **flags)
offset += Method._descriptorstart.size
offset = m.read_params(typelib, map, data_pool, offset, num_args)
offset = m.read_result(typelib, map, data_pool, offset)
return m, offset
def write(self, typelib, file):
"""
Write a MethodDescriptor to |file|, which is assumed to be
seeked to the right position.
"""
file.write(Method._descriptorstart.pack(self.encodeflags(),
self._name_offset,
len(self.params)))
for p in self.params:
p.write(typelib, file)
self.result.write(typelib, file)
def write_name(self, file, data_pool_offset):
"""
Write this method's name to |file|.
Assumes that |file| is currently seeked to an unused portion
of the data pool.
"""
if self.name:
self._name_offset = file.tell() - data_pool_offset + 1
file.write(self.name + "\x00")
else:
self._name_offset = 0
class Constant(object):
"""
A constant value of a specific type defined on an interface.
(ConstantDesciptor from the typelib specification.)
"""
_descriptorstart = struct.Struct(">I")
# Actual value is restricted to this set of types
# XXX: the spec lies, the source allows a bunch more
# http://hg.mozilla.org/mozilla-central/annotate/9c85f9aaec8c/xpcom/typelib/xpt/src/xpt_struct.c#l689
typemap = {Type.Tags.int16: '>h',
Type.Tags.uint16: '>H',
Type.Tags.int32: '>i',
Type.Tags.uint32: '>I'}
def __init__(self, name, type, value):
self.name = name
self._name_offset = 0
self.type = type
self.value = value
def __cmp__(self, other):
return (
cmp(self.name, other.name) or
cmp(self.type, other.type) or
cmp(self.value, other.value)
)
@staticmethod
def read(typelib, map, data_pool, offset):
"""
Read a ConstDescriptor at |offset| from the mmaped file |map| with
data pool offset |data_pool|. Returns (Constant, next offset),
where |next offset| is an offset suitable for reading the data
following this ConstDescriptor.
"""
start = data_pool + offset - 1
(name_offset,) = Constant._descriptorstart.unpack_from(map, start)
name = Typelib.read_string(map, data_pool, name_offset)
offset += Constant._descriptorstart.size
# Read TypeDescriptor
t, offset = Type.read(typelib, map, data_pool, offset)
c = None
if isinstance(t, SimpleType) and t.tag in Constant.typemap:
tt = Constant.typemap[t.tag]
start = data_pool + offset - 1
(val,) = struct.unpack_from(tt, map, start)
offset += struct.calcsize(tt)
c = Constant(name, t, val)
return c, offset
def write(self, typelib, file):
"""
Write a ConstDescriptor to |file|, which is assumed
to be seeked to the proper position.
"""
file.write(Constant._descriptorstart.pack(self._name_offset))
self.type.write(typelib, file)
tt = Constant.typemap[self.type.tag]
file.write(struct.pack(tt, self.value))
def write_name(self, file, data_pool_offset):
"""
Write this constants's name to |file|.
Assumes that |file| is currently seeked to an unused portion
of the data pool.
"""
if self.name:
self._name_offset = file.tell() - data_pool_offset + 1
file.write(self.name + "\x00")
else:
self._name_offset = 0
def __repr__(self):
return "Constant(%s, %s, %d)" % (self.name, str(self.type), self.value)
class Interface(object):
"""
An Interface represents an object, with its associated methods
and constant values.
(InterfaceDescriptor from the typelib specification.)
"""
_direntry = struct.Struct(">16sIII")
_descriptorstart = struct.Struct(">HH")
UNRESOLVED_IID = "00000000-0000-0000-0000-000000000000"
def __init__(self, name, iid=UNRESOLVED_IID, namespace="",
resolved=False, parent=None, methods=[], constants=[],
scriptable=False, function=False, builtinclass=False,
main_process_scriptable_only=False):
self.resolved = resolved
# TODO: should validate IIDs!
self.iid = iid
self.name = name
self.namespace = namespace
# if unresolved, all the members following this are unusable
self.parent = parent
self.methods = list(methods)
self.constants = list(constants)
self.scriptable = scriptable
self.function = function
self.builtinclass = builtinclass
self.main_process_scriptable_only = main_process_scriptable_only
# For sanity, if someone constructs an Interface and passes
# in methods or constants, then it's resolved.
if self.methods or self.constants:
# make sure it has a valid IID
if self.iid == Interface.UNRESOLVED_IID:
raise DataError("Cannot instantiate Interface %s containing methods or constants with an unresolved IID" % self.name)
self.resolved = True
# These are only used for writing out the interface
self._descriptor_offset = 0
self._name_offset = 0
self._namespace_offset = 0
self.xpt_filename = None
def __repr__(self):
return "Interface('%s', '%s', '%s', methods=%s)" % (self.name, self.iid, self.namespace, self.methods)
def __str__(self):
return "Interface(name='%s', iid='%s')" % (self.name, self.iid)
def __hash__(self):
return hash((self.name, self.iid))
def __cmp__(self, other):
c = cmp(self.iid, other.iid)
if c != 0:
return c
c = cmp(self.name, other.name)
if c != 0:
return c
c = cmp(self.namespace, other.namespace)
if c != 0:
return c
# names and IIDs are the same, check resolved
if self.resolved != other.resolved:
if self.resolved:
return -1
else:
return 1
else:
if not self.resolved:
# both unresolved, but names and IIDs are the same, so equal
return 0
# When comparing parents, only look at the name.
if (self.parent is None) != (other.parent is None):
if self.parent is None:
return -1
else:
return 1
elif self.parent is not None:
c = cmp(self.parent.name, other.parent.name)
if c != 0:
return c
return (
cmp(self.methods, other.methods) or
cmp(self.constants, other.constants) or
cmp(self.scriptable, other.scriptable) or
cmp(self.function, other.function) or
cmp(self.builtinclass, other.builtinclass) or
cmp(self.main_process_scriptable_only, other.main_process_scriptable_only)
)
def read_descriptor(self, typelib, map, data_pool):
offset = self._descriptor_offset
if offset == 0:
return
start = data_pool + offset - 1
parent, num_methods = Interface._descriptorstart.unpack_from(map, start)
if parent > 0 and parent <= len(typelib.interfaces):
self.parent = typelib.interfaces[parent - 1]
# Read methods
offset += Interface._descriptorstart.size
for i in range(num_methods):
m, offset = Method.read(typelib, map, data_pool, offset)
self.methods.append(m)
# Read constants
start = data_pool + offset - 1
(num_constants, ) = struct.unpack_from(">H", map, start)
offset = offset + struct.calcsize(">H")
for i in range(num_constants):
c, offset = Constant.read(typelib, map, data_pool, offset)
self.constants.append(c)
# Read flags
start = data_pool + offset - 1
(flags, ) = struct.unpack_from(">B", map, start)
offset = offset + struct.calcsize(">B")
# only the first two bits are flags
flags &= 0xf0
if flags & 0x80:
self.scriptable = True
if flags & 0x40:
self.function = True
if flags & 0x20:
self.builtinclass = True
if flags & 0x10:
self.main_process_scriptable_only = True
self.resolved = True
def write_directory_entry(self, file):
"""
Write an InterfaceDirectoryEntry for this interface
to |file|, which is assumed to be seeked to the correct offset.
"""
file.write(Interface._direntry.pack(Typelib.string_to_iid(self.iid),
self._name_offset,
self._namespace_offset,
self._descriptor_offset))
def write(self, typelib, file, data_pool_offset):
"""
Write an InterfaceDescriptor to |file|, which is assumed
to be seeked to the proper position. If this interface
is not resolved, do not write any data.
"""
if not self.resolved:
self._descriptor_offset = 0
return
self._descriptor_offset = file.tell() - data_pool_offset + 1
parent_idx = 0
if self.parent:
parent_idx = typelib.interfaces.index(self.parent) + 1
file.write(Interface._descriptorstart.pack(parent_idx, len(self.methods)))
for m in self.methods:
m.write(typelib, file)
file.write(struct.pack(">H", len(self.constants)))
for c in self.constants:
c.write(typelib, file)
flags = 0
if self.scriptable:
flags |= 0x80
if self.function:
flags |= 0x40
if self.builtinclass:
flags |= 0x20
if self.main_process_scriptable_only:
flags |= 0x10
file.write(struct.pack(">B", flags))
def write_names(self, file, data_pool_offset):
"""
Write this interface's name and namespace to |file|,
as well as the names of all of its methods and constants.
Assumes that |file| is currently seeked to an unused portion
of the data pool.
"""
if self.name:
self._name_offset = file.tell() - data_pool_offset + 1
file.write(self.name + "\x00")
else:
self._name_offset = 0
if self.namespace:
self._namespace_offset = file.tell() - data_pool_offset + 1
file.write(self.namespace + "\x00")
else:
self._namespace_offset = 0
for m in self.methods:
m.write_name(file, data_pool_offset)
for c in self.constants:
c.write_name(file, data_pool_offset)
class Typelib(object):
"""
A typelib represents one entire typelib file and all the interfaces
referenced within, whether defined entirely within the typelib or
merely referenced by name or IID.
Typelib objects may be instantiated directly and populated with data,
or the static Typelib.read method may be called to read one from a file.
"""
_header = struct.Struct(">16sBBHIII")
def __init__(self, version=TYPELIB_VERSION, interfaces=[], annotations=[]):
"""
Instantiate a new Typelib.
"""
self.version = version
self.interfaces = list(interfaces)
self.annotations = list(annotations)
self.filename = None
@staticmethod
def iid_to_string(iid):
"""
Convert a 16-byte IID into a UUID string.
"""
def hexify(s):
return ''.join(["%02x" % ord(x) for x in s])
return "%s-%s-%s-%s-%s" % (hexify(iid[:4]), hexify(iid[4:6]),
hexify(iid[6:8]), hexify(iid[8:10]),
hexify(iid[10:]))
@staticmethod
def string_to_iid(iid_str):
"""
Convert a UUID string into a 16-byte IID.
"""
s = iid_str.replace('-', '')
return ''.join([chr(int(s[i:i+2], 16)) for i in range(0, len(s), 2)])
@staticmethod
def read_string(map, data_pool, offset):
if offset == 0:
return ""
sz = map.find('\x00', data_pool + offset - 1)
if sz == -1:
return ""
return map[data_pool + offset - 1:sz]
@staticmethod
def read(input_file):
"""
Read a typelib from |input_file| and return
the constructed Typelib object. |input_file| can be a filename
or a file-like object.
"""
filename = ""
data = None
expected_size = None
if isinstance(input_file, basestring):
filename = input_file
with open(input_file, "rb") as f:
st = os.fstat(f.fileno())
data = f.read(st.st_size)
expected_size = st.st_size
else:
data = input_file.read()
(magic,
major_ver,
minor_ver,
num_interfaces,
file_length,
interface_directory_offset,
data_pool_offset) = Typelib._header.unpack_from(data)
if magic != XPT_MAGIC:
raise FileFormatError("Bad magic: %s" % magic)
xpt = Typelib((major_ver, minor_ver))
xpt.filename = filename
if expected_size and file_length != expected_size:
raise FileFormatError("File is of wrong length, got %d bytes, expected %d" % (expected_size, file_length))
# XXX: by spec this is a zero-based file offset. however,
# the xpt_xdr code always subtracts 1 from data offsets
# (because that's what you do in the data pool) so it
# winds up accidentally treating this as 1-based.
# Filed as: https://bugzilla.mozilla.org/show_bug.cgi?id=575343
interface_directory_offset -= 1
# make a half-hearted attempt to read Annotations,
# since XPIDL doesn't produce any anyway.
start = Typelib._header.size
(anno, ) = struct.unpack_from(">B", data, start)
tag = anno & 0x7F
if tag == 0: # EmptyAnnotation
xpt.annotations.append(None)
# We don't bother handling PrivateAnnotations or anything
for i in range(num_interfaces):
# iid, name, namespace, interface_descriptor
start = interface_directory_offset + i * Interface._direntry.size
ide = Interface._direntry.unpack_from(data, start)
iid = Typelib.iid_to_string(ide[0])
name = Typelib.read_string(data, data_pool_offset, ide[1])
namespace = Typelib.read_string(data, data_pool_offset, ide[2])
iface = Interface(name, iid, namespace)
iface._descriptor_offset = ide[3]
iface.xpt_filename = xpt.filename
xpt.interfaces.append(iface)
for iface in xpt.interfaces:
iface.read_descriptor(xpt, data, data_pool_offset)
return xpt
def __repr__(self):
return "<Typelib with %d interfaces>" % len(self.interfaces)
def _sanityCheck(self):
"""
Check certain assumptions about data contained in this typelib.
Sort the interfaces array by IID, check that all interfaces
referenced by methods exist in the array.
"""
self.interfaces.sort()
for i in self.interfaces:
if i.parent and i.parent not in self.interfaces:
raise DataError("Interface %s has parent %s not present in typelib!" % (i.name, i.parent.name))
for m in i.methods:
for n, p in enumerate(m.params):
if isinstance(p, InterfaceType) and \
p.iface not in self.interfaces:
raise DataError("Interface method %s::%s, parameter %d references interface %s not present in typelib!" % (i.name, m.name, n, p.iface.name))
if isinstance(m.result, InterfaceType) and m.result.iface not in self.interfaces:
raise DataError("Interface method %s::%s, result references interface %s not present in typelib!" % (i.name, m.name, m.result.iface.name))
def writefd(self, fd):
# write out space for a header + one empty annotation,
# padded to 4-byte alignment.
headersize = (Typelib._header.size + 1)
if headersize % 4:
headersize += 4 - headersize % 4
fd.write("\x00" * headersize)
# save this offset, it's the interface directory offset.
interface_directory_offset = fd.tell()
# write out space for an interface directory
fd.write("\x00" * Interface._direntry.size * len(self.interfaces))
# save this offset, it's the data pool offset.
data_pool_offset = fd.tell()
# write out all the interface descriptors to the data pool
for i in self.interfaces:
i.write_names(fd, data_pool_offset)
i.write(self, fd, data_pool_offset)
# now, seek back and write the header
file_len = fd.tell()
fd.seek(0)
fd.write(Typelib._header.pack(XPT_MAGIC,
TYPELIB_VERSION[0],
TYPELIB_VERSION[1],
len(self.interfaces),
file_len,
interface_directory_offset,
data_pool_offset))
# write an empty annotation
fd.write(struct.pack(">B", 0x80))
# now write the interface directory
# XXX: bug-compatible with existing xpt lib, put it one byte
# ahead of where it's supposed to be.
fd.seek(interface_directory_offset - 1)
for i in self.interfaces:
i.write_directory_entry(fd)
def write(self, output_file):
"""
Write the contents of this typelib to |output_file|,
which can be either a filename or a file-like object.
"""
self._sanityCheck()
if isinstance(output_file, basestring):
with open(output_file, "wb") as f:
self.writefd(f)
else:
self.writefd(output_file)
def dump(self, out):
"""
Print a human-readable listing of the contents of this typelib
to |out|, in the format of xpt_dump.
"""
out.write("""Header:
Major version: %d
Minor version: %d
Number of interfaces: %d
Annotations:\n""" % (self.version[0], self.version[1], len(self.interfaces)))
for i, a in enumerate(self.annotations):
if a is None:
out.write(" Annotation #%d is empty.\n" % i)
out.write("\nInterface Directory:\n")
for i in self.interfaces:
out.write(" - %s::%s (%s):\n" % (i.namespace, i.name, i.iid))
if not i.resolved:
out.write(" [Unresolved]\n")
else:
if i.parent:
out.write(" Parent: %s::%s\n" % (i.parent.namespace,
i.parent.name))
out.write(""" Flags:
Scriptable: %s
BuiltinClass: %s
Function: %s\n""" % (i.scriptable and "TRUE" or "FALSE",
i.builtinclass and "TRUE" or "FALSE",
i.function and "TRUE" or "FALSE"))
out.write(" Methods:\n")
if len(i.methods) == 0:
out.write(" No Methods\n")
else:
for m in i.methods:
out.write(" %s%s%s%s%s%s%s %s %s(%s);\n" % (
m.getter and "G" or " ",
m.setter and "S" or " ",
m.hidden and "H" or " ",
m.notxpcom and "N" or " ",
m.constructor and "C" or " ",
m.optargc and "O" or " ",
m.implicit_jscontext and "J" or " ",
str(m.result.type),
m.name,
m.params and ", ".join(str(p) for p in m.params) or ""
))
out.write(" Constants:\n")
if len(i.constants) == 0:
out.write(" No Constants\n")
else:
for c in i.constants:
out.write(" %s %s = %d;\n" % (c.type, c.name, c.value))
def xpt_dump(file):
"""
Dump the contents of |file| to stdout in the format of xpt_dump.
"""
t = Typelib.read(file)
t.dump(sys.stdout)
def xpt_link(inputs):
"""
Link all of the xpt files in |inputs| together and return the result
as a Typelib object. All entries in inputs may be filenames or
file-like objects. Non-scriptable interfaces that are unreferenced
from scriptable interfaces will be removed during linking.
"""
def read_input(i):
if isinstance(i, Typelib):
return i
return Typelib.read(i)
if not inputs:
print >>sys.stderr, "Usage: xpt_link <destination file> <input files>"
return None
# This is the aggregate list of interfaces.
interfaces = []
# This will be a dict of replaced interface -> replaced with
# containing interfaces that were replaced with interfaces from
# another typelib, and the interface that replaced them.
merged_interfaces = {}
for f in inputs:
t = read_input(f)
interfaces.extend(t.interfaces)
# Sort interfaces by name so we can merge adjacent duplicates
interfaces.sort(key=operator.attrgetter('name'))
Result = enum('Equal', # Interfaces the same, doesn't matter
'NotEqual', # Interfaces differ, keep both
'KeepFirst', # Replace second interface with first
'KeepSecond') # Replace first interface with second
def compare(i, j):
"""
Compare two interfaces, determine if they're equal or
completely different, or should be merged (and indicate which
one to keep in that case).
"""
if i == j:
# Arbitrary, just pick one
return Result.Equal
if i.name != j.name:
if i.iid == j.iid and i.iid != Interface.UNRESOLVED_IID:
# Same IID but different names: raise an exception.
raise DataError(
"Typelibs contain definitions of interface %s"
" with different names (%s (%s) vs %s (%s))!" %
(i.iid, i.name, i.xpt_filename, j.name, j.xpt_filename))
# Otherwise just different interfaces.
return Result.NotEqual
# Interfaces have the same name, so either they need to be merged
# or there's a data error. Sort out which one to keep
if i.resolved != j.resolved:
# prefer resolved interfaces over unresolved
if j.resolved:
assert i.iid == j.iid or i.iid == Interface.UNRESOLVED_IID
# keep j
return Result.KeepSecond
else:
assert i.iid == j.iid or j.iid == Interface.UNRESOLVED_IID
# replace j with i
return Result.KeepFirst
elif i.iid != j.iid:
# Prefer unresolved interfaces with valid IIDs
if j.iid == Interface.UNRESOLVED_IID:
# replace j with i
assert not j.resolved
return Result.KeepFirst
elif i.iid == Interface.UNRESOLVED_IID:
# keep j
assert not i.resolved
return Result.KeepSecond
else:
# Same name but different IIDs: raise an exception.
raise DataError(
"Typelibs contain definitions of interface %s"
" with different IIDs (%s (%s) vs %s (%s))!" %
(i.name, i.iid, i.xpt_filename,
j.iid, j.xpt_filename))
raise DataError("No idea what happened here: %s:%s (%s), %s:%s (%s)" %
(i.name, i.iid, i.xpt_filename, j.name, j.iid, j.xpt_filename))
# Compare interfaces pairwise to find duplicates that should be merged.
i = 1
while i < len(interfaces):
res = compare(interfaces[i-1], interfaces[i])
if res == Result.NotEqual:
i += 1
elif res == Result.Equal:
# Need to drop one but it doesn't matter which
del interfaces[i]
elif res == Result.KeepFirst:
merged_interfaces[interfaces[i]] = interfaces[i-1]
del interfaces[i]
elif res == Result.KeepSecond:
merged_interfaces[interfaces[i-1]] = interfaces[i]
del interfaces[i-1]
# Now fixup any merged interfaces
def checkType(t):
if isinstance(t, InterfaceType) and t.iface in merged_interfaces:
t.iface = merged_interfaces[t.iface]
elif isinstance(t, ArrayType) and \
isinstance(t.element_type, InterfaceType) and \
t.element_type.iface in merged_interfaces:
t.element_type.iface = merged_interfaces[t.element_type.iface]
for i in interfaces:
# Replace parent references
if i.parent in merged_interfaces:
i.parent = merged_interfaces[i.parent]
for m in i.methods:
# Replace InterfaceType params and return values
checkType(m.result.type)
for p in m.params:
checkType(p.type)
# There's no need to have non-scriptable interfaces in a typelib, and
# removing them saves memory when typelibs are loaded. But we can't
# just blindly remove all non-scriptable interfaces, since we still
# need to know about non-scriptable interfaces referenced from
# scriptable interfaces.
worklist = set(i for i in interfaces if i.scriptable)
required_interfaces = set()
def maybe_add_to_worklist(iface):
if iface in required_interfaces or iface in worklist:
return
worklist.add(iface)
while worklist:
i = worklist.pop()
required_interfaces.add(i)
if i.parent:
maybe_add_to_worklist(i.parent)
for m in i.methods:
if isinstance(m.result.type, InterfaceType):
maybe_add_to_worklist(m.result.type.iface)
for p in m.params:
if isinstance(p.type, InterfaceType):
maybe_add_to_worklist(p.type.iface)
elif isinstance(p.type, ArrayType) and isinstance(p.type.element_type, InterfaceType):
maybe_add_to_worklist(p.type.element_type.iface)
interfaces = list(required_interfaces)
# Re-sort interfaces (by IID)
interfaces.sort()
return Typelib(interfaces=interfaces)
if __name__ == '__main__':
if len(sys.argv) < 3:
print >>sys.stderr, "xpt <dump|link> <files>"
sys.exit(1)
if sys.argv[1] == 'dump':
xpt_dump(sys.argv[2])
elif sys.argv[1] == 'link':
xpt_link(sys.argv[3:]).write(sys.argv[2])
| mpl-2.0 | 720,056,686,050,744,700 | 35.57987 | 164 | 0.575311 | false |
forairan/gagar | gagar/window.py | 1 | 4373 | from gi.repository import Gtk, Gdk
from agarnet.vec import Vec
class WorldViewer(object):
"""
Draws one world and handles keys/mouse.
Does not poll for events itself.
Calls input_subscriber.on_{key_pressed|mouse_moved}() methods on key/mouse input.
Calls draw_subscriber.on_draw_{background|cells|hud}() methods when drawing.
"""
INFO_SIZE = 300
def __init__(self, world):
self.world = world
self.player = None # the focused player, or None to show full world
# the class instance on which to call on_key_pressed and on_mouse_moved
self.input_subscriber = None
# same for draw_background, draw_cells, draw_hud
self.draw_subscriber = None
self.win_size = Vec(1000, 1000 * 9 / 16)
self.screen_center = self.win_size / 2
self.screen_scale = 1
self.world_center = Vec(0, 0)
self.mouse_pos = Vec(0, 0)
window = Gtk.Window()
window.set_title('agar.io')
window.set_default_size(self.win_size.x, self.win_size.y)
window.connect('delete-event', Gtk.main_quit)
self.drawing_area = Gtk.DrawingArea()
window.add(self.drawing_area)
window.set_events(Gdk.EventMask.POINTER_MOTION_MASK)
window.connect('key-press-event', self.key_pressed)
window.connect('motion-notify-event', self.mouse_moved)
window.connect('button-press-event', self.mouse_pressed)
self.drawing_area.connect('draw', self.draw)
window.show_all()
def focus_player(self, player):
"""Follow this client regarding center and zoom."""
self.player = player
self.world = player.world
def show_full_world(self, world=None):
"""
Show the full world view instead of one client.
:param world: optionally update the drawn world
"""
self.player = None
if world:
self.world = world
def key_pressed(self, _, event):
"""Called by GTK. Set input_subscriber to handle this."""
if not self.input_subscriber: return
val = event.keyval
try:
char = chr(val)
except ValueError:
char = ''
self.input_subscriber.on_key_pressed(val=val, char=char)
def mouse_moved(self, _, event):
"""Called by GTK. Set input_subscriber to handle this."""
if not self.input_subscriber: return
self.mouse_pos = Vec(event.x, event.y)
pos_world = self.screen_to_world_pos(self.mouse_pos)
self.input_subscriber.on_mouse_moved(pos=self.mouse_pos, pos_world=pos_world)
def mouse_pressed(self, _, event):
"""Called by GTK. Set input_subscriber to handle this."""
if not self.input_subscriber: return
self.input_subscriber.on_mouse_pressed(button=event.button)
def world_to_screen_pos(self, world_pos):
return (world_pos - self.world_center) \
.imul(self.screen_scale).iadd(self.screen_center)
def screen_to_world_pos(self, screen_pos):
return (screen_pos - self.screen_center) \
.idiv(self.screen_scale).iadd(self.world_center)
def world_to_screen_size(self, world_size):
return world_size * self.screen_scale
def recalculate(self):
alloc = self.drawing_area.get_allocation()
self.win_size.set(alloc.width, alloc.height)
self.screen_center = self.win_size / 2
if self.player: # any client is focused
window_scale = max(self.win_size.x / 1920, self.win_size.y / 1080)
self.screen_scale = self.player.scale * window_scale
self.world_center = self.player.center
self.world = self.player.world
elif self.world.size:
self.screen_scale = min(self.win_size.x / self.world.size.x,
self.win_size.y / self.world.size.y)
self.world_center = self.world.center
else:
# happens when the window gets drawn before the world got updated
self.screen_scale = 1
self.world_center = Vec(0, 0)
def draw(self, _, c):
if self.draw_subscriber:
self.recalculate()
self.draw_subscriber.on_draw_background(c, self)
self.draw_subscriber.on_draw_cells(c, self)
self.draw_subscriber.on_draw_hud(c, self)
| gpl-3.0 | 2,771,065,779,682,986,500 | 36.059322 | 85 | 0.61308 | false |
jedimatt42/pi-messaging | htdocs/tipi_editor.py | 1 | 3428 | # tipi_editor
#
# TIPI web administration
#
# Corey J. Anderson ElectricLab.com 2017
# et al.
import os
import logging
import uuid
from ti_files import ti_files
from subprocess import call
logger = logging.getLogger(__name__)
basicSuffixes = ('.b99', '.bas', '.xb')
tipi_disk_base = '/home/tipi/tipi_disk'
def load(file_name):
edit_file_path = tipi_disk_base + '/' + file_name
file_contents = basicContents(edit_file_path)
# If it isn't a BASIC PROGRAM IMAGE, then try plain file
if not file_contents:
if file_name.lower().endswith(basicSuffixes):
with open(edit_file_path, "rb") as fh:
file_contents = fh.read()
editor_data = { 'file_contents': file_contents,
'file_name': file_name,
'status_message': '' }
return editor_data
def new(file_name):
editor_data = { 'file_contents': '',
'file_name': file_name,
'status_message': '' }
return editor_data
def save(file_name, data):
logger.debug("save %s", file_name)
edit_file_path = tipi_disk_base + '/' + file_name
logger.debug("edit_file_path %s", edit_file_path)
if file_name.lower().endswith(basicSuffixes):
logger.debug("saving ascii basic file")
with open(edit_file_path, "wb") as fh:
fh.write(data)
else:
logger.debug("saving program image basic file")
writeBasicContents(edit_file_path, data)
editor_data = { 'file_contents': data,
'file_name': file_name,
'status_message': '' }
return editor_data
def basicContents(filename):
logger.debug("fetching BASIC PROGRAM as ascii in %s", filename)
# We are assuming the test for FIAD isTiFile has already passed.
prg_tmp_file = '/tmp/' + str(uuid.uuid4()) + '.tmp'
bas_tmp_file = '/tmp/' + str(uuid.uuid4()) + '.tmp'
try:
# strip the FIAD header off to get the raw file xbas99 needs.
with open(filename, "rb") as tifile:
with open(prg_tmp_file, "wb") as program:
bytes = bytearray(tifile.read())
if not ti_files.isProgram(bytes):
return False
program.write(bytes[128:])
call(['/home/tipi/xdt99/xbas99.py', '-d', prg_tmp_file, '-o', bas_tmp_file])
if ti_files.isTiBasicAscii(bas_tmp_file):
with open(bas_tmp_file, 'rb') as content_file:
return content_file.read().decode("latin_1")
finally:
if os.path.exists(prg_tmp_file):
os.unlink(prg_tmp_file)
if os.path.exists(bas_tmp_file):
os.unlink(bas_tmp_file)
return False
def writeBasicContents(edit_file_name, file_contents):
bas_tmp_file = '/tmp/' + str(uuid.uuid4()) + '.tmp'
prg_tmp_file = '/tmp/' + str(uuid.uuid4()) + '.tmp'
try:
with open(bas_tmp_file, 'wb') as file:
file.write(file_contents.encode("latin_1"))
# Encode ASCII file to TI's binary BASIC format:
#
call(['xbas99.py', '-c', bas_tmp_file, '-o', prg_tmp_file])
# Now convert to TIFILES format:
#
call(['xdm99.py', '-T', prg_tmp_file, '-o', edit_file_name])
finally:
if os.path.exists(prg_tmp_file):
os.unlink(prg_tmp_file)
if os.path.exists(bas_tmp_file):
os.unlink(bas_tmp_file)
| gpl-3.0 | -5,738,017,850,617,338,000 | 30.163636 | 85 | 0.573221 | false |
galaxy-modal/transcriptomics | galaxy-tools/stderr_wrapper.py | 1 | 1676 | #!/usr/bin/python
"""
Wrapper that executes a program with its arguments but reports standard error
messages only if the program exit status was not 0. This is useful to prevent
Galaxy to interpret that there was an error if something was printed on stderr,
e.g. if this was simply a warning.
Example: ./stderr_wrapper.py myprog arg1 -f arg2
Author: Florent Angly
"""
import sys, subprocess
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def __main__():
# Get command-line arguments
args = sys.argv
# Remove name of calling program, i.e. ./stderr_wrapper.py
args.pop(0)
# If there are no arguments left, we're done
if len(args) == 0:
return
# If one needs to silence stdout
args.append( ">" )
args.append( "/dev/null" )
#cmdline = " ".join(args)
#print cmdline
try:
# Run program
proc = subprocess.Popen( args=args, shell=False, stderr=subprocess.PIPE )
returncode = proc.wait()
# Capture stderr, allowing for case where it's very large
stderr = ''
buffsize = 1048576
try:
while True:
stderr += proc.stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
# Running Grinder failed: write error message to stderr
if returncode != 0:
raise Exception, stderr
except Exception, e:
# Running Grinder failed: write error message to stderr
stop_err( 'Error: ' + str( e ) )
if __name__ == "__main__": __main__()
| gpl-2.0 | -7,320,840,070,516,737,000 | 28.403509 | 81 | 0.601432 | false |
inkvisit/sarmacoins | contrib/seeds/makeseeds.py | 1 | 3522 | #!/usr/bin/env python
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 200000
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = set([
"127.0.0.1"
])
import re
import sys
import dns.resolver
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):9887$") тут изменяем порт
PATTERN_AGENT = re.compile(r"^(\/Satoshi:0.8.6\/|\/Satoshi:0.9.(2|3)\/|\/Core:0.1(0|1|2).\d{1,2}.\d{1,2}\/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
# Match only IPv4
m = PATTERN_IPV4.match(sline[0])
if m is None:
return None
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'ip': m.group(1),
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
}
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
result = []
asn_count = {}
for ip in ips:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid IPv4 address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['ipnum']))
for ip in ips:
print ip['ip']
if __name__ == '__main__':
main()
| mit | -8,680,919,261,118,517,000 | 29.495652 | 186 | 0.572569 | false |
openstack/designate | designate/objects/adapters/yaml/pool_target_master.py | 1 | 1152 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.objects.adapters.yaml import base
from designate import objects
class PoolTargetMasterYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolTargetMaster
MODIFICATIONS = {
'fields': {
'host': {
'read_only': False
},
'port': {
'read_only': False
},
}
}
class PoolTargetMasterListYAMLAdapter(base.YAMLAdapter):
ADAPTER_OBJECT = objects.PoolTargetMasterList
MODIFICATIONS = {}
| apache-2.0 | -6,089,063,722,049,468,000 | 28.538462 | 78 | 0.669271 | false |
camptocamp/QGIS | python/plugins/processing/outputs/OutputExtent.py | 1 | 1632 | # -*- coding: utf-8 -*-
"""
***************************************************************************
OutputNumber.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.outputs.Output import Output
class OutputExtent(Output):
def __init__(self, name="", description=""):
self.name = name
self.description = description
self.value = None
self.hidden = True
def setValue(self, value):
try:
if value != None and isinstance(value, basestring):
value = value.strip()
else:
self.value = ",".join([str(v) for v in value])
return True
except:
return False
| gpl-2.0 | 8,318,590,386,951,244,000 | 36.090909 | 75 | 0.424632 | false |
ujikit/Raspberry | Pir_Sensor.py | 1 | 1093 | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
import os
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(27,GPIO.OUT)
GPIO_PIR = 7
print "PIR Module Test (CTRL-C to exit)"
# Set pin as input
GPIO.setup(GPIO_PIR,GPIO.IN) # Echo
Current_State = 0
Previous_State = 0
try:
print "Waiting for PIR to settle ..."
# Loop until PIR output is 0
while GPIO.input(GPIO_PIR)==1:
Current_State = 0
print " Ready"
# Loop until users quits with CTRL-C
while True :
# Read PIR state
Current_State = GPIO.input(GPIO_PIR)
if Current_State==1 and Previous_State==0:
GPIO.output(27,GPIO.HIGH)
time.sleep(0.5)
GPIO.output(27,GPIO.LOW)
Previous_State=1
os.system('mpg321 /home/alarm.mp3 &')
time.sleep(min(1,clip.seconds()))
clip.stop()
elif Current_State==0 and Previous_State==1:
# PIR has returned to ready state
print " Ready"
Previous_State=0
# Wait for 10 milliseconds
time.sleep(0.01)
except KeyboardInterrupt:
print " Quit"
# Reset GPIO settings
GPIO.cleanup()
| gpl-3.0 | -486,704,074,232,634,300 | 21.770833 | 48 | 0.650503 | false |
canarie/openstack-dashboard | django-openstack/src/django_openstack/nova/views/instances.py | 1 | 9293 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Nova instances.
"""
from django import http
from django import template
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render_to_response
from django.utils.translation import ugettext as _
from django_openstack import log as logging
from django_openstack.nova import exceptions
from django_openstack.nova import forms as nova_forms
from django_openstack.nova import shortcuts
from django_openstack.nova.exceptions import handle_nova_error
import boto.ec2.ec2object
LOG = logging.getLogger('django_openstack.nova')
@login_required
@handle_nova_error
def index(request, project_id):
project = shortcuts.get_project_or_404(request, project_id)
instances = sorted(project.get_instances(),
key=lambda k: k.public_dns_name)
return render_to_response('django_openstack/nova/instances/index.html', {
'region': project.region,
'project': project,
'instances': instances,
'detail': False,
}, context_instance=template.RequestContext(request))
@login_required
@handle_nova_error
def detail(request, project_id, instance_id):
project = shortcuts.get_project_or_404(request, project_id)
instance = project.get_instance(instance_id)
instances = sorted(project.get_instances(),
key=lambda k: k.public_dns_name)
if not instance:
raise http.Http404()
return render_to_response('django_openstack/nova/instances/index.html', {
'region': project.region,
'project': project,
'selected_instance': instance,
'instances': instances,
'update_form': nova_forms.UpdateInstanceForm(instance),
'enable_vnc': settings.ENABLE_VNC,
'detail': True,
}, context_instance=template.RequestContext(request))
@login_required
@handle_nova_error
def performance(request, project_id, instance_id):
project = shortcuts.get_project_or_404(request, project_id)
instance = project.get_instance(instance_id)
if not instance:
raise http.Http404()
return render_to_response(
'django_openstack/nova/instances/performance.html',
{'region': project.region,
'project': project,
'instance': instance,
'update_form': nova_forms.UpdateInstanceForm(instance)},
context_instance=template.RequestContext(request))
# TODO(devcamcar): Wrap this in an @ajax decorator.
def refresh(request, project_id):
# TODO(devcamcar): This logic belongs in decorator.
if not request.user.is_authenticated():
return http.HttpResponseForbidden()
project = shortcuts.get_project_or_404(request, project_id)
instances = sorted(project.get_instances(),
key=lambda k: k.public_dns_name)
return render_to_response(
'django_openstack/nova/instances/_instances_list.html',
{'project': project,
'instances': instances},
context_instance=template.RequestContext(request))
@handle_nova_error
def refresh_detail(request, project_id, instance_id):
# TODO(devcamcar): This logic belongs in decorator.
if not request.user.is_authenticated():
return http.HttpResponseForbidden()
project = shortcuts.get_project_or_404(request, project_id)
instance = project.get_instance(instance_id)
instances = sorted(project.get_instances(),
key=lambda k: k.public_dns_name)
return render_to_response(
'django_openstack/nova/instances/_instances_list.html',
{'project': project,
'selected_instance': instance,
'instances': instances},
context_instance=template.RequestContext(request))
@login_required
@handle_nova_error
def terminate(request, project_id):
project = shortcuts.get_project_or_404(request, project_id)
if request.method == 'POST':
instance_id = request.POST['instance_id']
try:
project.terminate_instance(instance_id)
except exceptions.NovaApiError, e:
messages.error(request,
_('Unable to terminate %(inst)s: %(msg)s') %
{'inst': instance_id, 'msg': e.message})
LOG.error('Unable to terminate instance "%s" on project "%s".'
' Exception:"%s"' % (instance_id, project_id, e.message))
except exceptions.NovaUnauthorizedError, e:
messages.error(request, 'Permission Denied')
LOG.error('User "%s" denied permission to terminate instance'
' "%s" on project "%s"' %
(str(request.user), instance_id, project_id))
else:
messages.success(request,
_('Instance %(inst)s has been terminated.') %
{'inst': instance_id})
LOG.info('Instance "%s" terminated on project "%s"' %
(instance_id, project_id))
return redirect('nova_instances', project_id)
@login_required
@handle_nova_error
def console(request, project_id, instance_id):
project = shortcuts.get_project_or_404(request, project_id)
conn = project.get_openstack_connection()
console = conn.get_console_output(instance_id)
response = http.HttpResponse(mimetype='text/plain')
response.write(console.output)
response.flush()
return response
@login_required
@handle_nova_error
def vnc(request, project_id, instance_id):
project = shortcuts.get_project_or_404(request, project_id)
conn = project.get_openstack_connection()
params = {'InstanceId': instance_id}
vnc = conn.get_object('GetVncConsole',
params,
boto.ec2.ec2object.EC2Object)
return http.HttpResponseRedirect(vnc.url)
@login_required
@handle_nova_error
def graph(request, project_id, instance_id, graph_name):
project = shortcuts.get_project_or_404(request, project_id)
graph = project.get_instance_graph(instance_id, graph_name)
if graph is None:
raise http.Http404()
response = http.HttpResponse(mimetype='image/png')
response.write(graph)
return response
@login_required
@handle_nova_error
def update(request, project_id, instance_id):
project = shortcuts.get_project_or_404(request, project_id)
instance = project.get_instance(instance_id)
if not instance:
raise http.Http404()
if request.method == 'POST':
form = nova_forms.UpdateInstanceForm(instance, request.POST)
if form.is_valid():
try:
project.update_instance(instance_id, form.cleaned_data)
except exceptions.NovaApiError, e:
messages.error(request,
_('Unable to update instance %(inst)s: %(msg)s') %
{'inst': instance_id, 'msg': e.message})
LOG.error('Unable to update instance "%s" on project "%s".'
' Exception message: "%s"' %
(instance_id, project_id, e.message))
except exceptions.NovaUnauthorizedError, e:
messages.error(request, 'Permission Denied')
LOG.error('User "%s" denied permission to update instance'
' "%s" on project "%s"' %
(str(request.user), instance_id, project_id))
else:
messages.success(request,
_('Instance %(inst)s has been updated.') %
{'inst': instance_id})
LOG.info('Instance "%s" updated on project "%s"' %
(instance_id, project_id))
return redirect('nova_instances', project_id)
else:
return render_to_response(
'django_openstack/nova/instances/edit.html',
{'region': project.region,
'project': project,
'instance': instance,
'update_form': form},
context_instance=template.RequestContext(request))
else:
return render_to_response(
'django_openstack/nova/instances/edit.html',
{'region': project.region,
'project': project,
'instance': instance,
'update_form': nova_forms.UpdateInstanceForm(instance)},
context_instance=template.RequestContext(request))
| apache-2.0 | 2,217,813,549,633,417,500 | 35.731225 | 79 | 0.633488 | false |
HXLStandard/hxl-proxy | hxl_proxy/recipes.py | 1 | 5398 | """ Manage a data-transformation recipe
Started April 2019 by David Megginson
License: Public Domain
"""
import flask, hxl_proxy, hxl_proxy.dao, hxl_proxy.filters, logging, werkzeug
class Recipe:
""" Class to hold a HXL Proxy recipe.
The recipe can come either from the request parameters, or from a saved recipe
in the database. For a saved recipe, it's still possible to override
certain properties (especially the URL) with the request parameters, so that
you can use the same recipe with multiple source URLs (disabled for private
datasets with authentication tokens).
"""
RECIPE_OVERRIDES = ['url', 'schema_url', 'stub']
""" Properties that may be overridden in a saved recipe """
def __init__(self, recipe_id=None, auth=False, request_args=None):
""" Recipe constructor
@param recipe_id: the hash identifier of an existing saved recipe
@param auth: if true, the user needs to authenticate to use the recipe
@param request_args: custom args to substitute for the current Flask request
"""
# initialise the properties
self.recipe_id = str(recipe_id) if recipe_id is not None else None
self.args = None
self.name = None
self.description = None
self.cloneable = True
self.passhash = None
self.stub = None
self.overridden = False
self.auth = auth
# default to the request GET parameters
if request_args is None:
request_args = flask.request.args
# do we have a saved recipe? if so, then populate from the saved data
if recipe_id is not None:
# read the recipe from the database
saved_recipe = hxl_proxy.dao.recipes.read(self.recipe_id)
if not saved_recipe:
raise werkzeug.exceptions.NotFound("No saved recipe for {}".format(recipe_id))
# populate the class from the saved recipe dict
self.fromDict(saved_recipe)
# check if this page requires authorisation
if auth and not self.check_auth():
raise werkzeug.exceptions.Unauthorized("Wrong or missing password.")
# allow overrides *only* if we're not using a private dataset
# (not sending an HTTP Authorization: header)
if "authorization_token" not in self.args:
for key in self.RECIPE_OVERRIDES:
if key in request_args:
self.overridden = True
self.args[key] = request_args[key]
# we don't have a saved recipe: use the HTTP GET parameters
else:
self.args = request_args
self.stub = request_args.get("stub")
@property
def url(self):
return self.args.get("url")
@property
def schema_url(self):
return self.args.get("schema_url")
def fromDict(self, props):
""" Deserialise this object from a dict """
self.recipe_id = props.get("recipe_id")
self.name = props.get("name")
self.description = props.get("description")
self.cloneable = props.get("cloneable")
self.passhash = props.get("passhash")
self.stub = props.get("stub")
self.date_created = props.get('date_created')
self.date_modified = props.get('date_modified')
self.args = dict(props.get("args"))
def toDict(self):
""" Serialise this object to a dict """
return {
"recipe_id": self.recipe_id,
"name": self.name,
"description": self.description,
"cloneable": self.cloneable,
"passhash": self.passhash,
"stub": self.stub,
"args": self.args,
}
def logs(self, level="WARNING"):
handler = Recipe.LogHandler(level)
logging.getLogger('hxl').addHandler(handler)
logging.getLogger('hxl_proxy').addHandler(handler)
source = hxl_proxy.filters.setup_filters(self)
try:
for row in source:
pass
except:
pass
return handler.messages
def check_auth(self, password=None):
""" Check whether a users is authorised to access this page.
@param password: a cleartext password
@returns: True if the user is authorised.
"""
# does this recipe require a password?
if self.passhash:
# do we have a clear-text password?
if password:
session_passhash = hxl_proxy.util.make_md5(password)
flask.session['passhash'] = session_passhash
# no password, so look in the session token
else:
session_passhash = flask.session.get('passhash')
# do the password hashes match?
if self.passhash == session_passhash:
return True
else:
flask.session['passhash'] = None
flask.flash("Wrong password")
return False
# no password required, so always OK
else:
return True
class LogHandler(logging.Handler):
def __init__(self, level):
super().__init__(level)
self.messages = []
def handle(self, record):
self.messages.append(record)
| unlicense | 7,937,438,209,383,225,000 | 32.116564 | 94 | 0.585031 | false |
diego-d5000/MisValesMd | env/lib/python2.7/site-packages/django/conf/locale/id/formats.py | 1 | 2187 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j N Y'
DATETIME_FORMAT = "j N Y, G.i"
TIME_FORMAT = 'G.i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d-%m-%y', '%d/%m/%y', # '25-10-09', 25/10/09'
'%d-%m-%Y', '%d/%m/%Y', # '25-10-2009', 25/10/2009'
'%d %b %Y', # '25 Oct 2006',
'%d %B %Y', # '25 October 2006'
)
TIME_INPUT_FORMATS = (
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%d-%m-%Y %H.%M.%S', # '25-10-2009 14.30.59'
'%d-%m-%Y %H.%M.%S.%f', # '25-10-2009 14.30.59.000200'
'%d-%m-%Y %H.%M', # '25-10-2009 14.30'
'%d-%m-%Y', # '25-10-2009'
'%d-%m-%y %H.%M.%S', # '25-10-09' 14.30.59'
'%d-%m-%y %H.%M.%S.%f', # '25-10-09' 14.30.59.000200'
'%d-%m-%y %H.%M', # '25-10-09' 14.30'
'%d-%m-%y', # '25-10-09''
'%m/%d/%y %H.%M.%S', # '10/25/06 14.30.59'
'%m/%d/%y %H.%M.%S.%f', # '10/25/06 14.30.59.000200'
'%m/%d/%y %H.%M', # '10/25/06 14.30'
'%m/%d/%y', # '10/25/06'
'%m/%d/%Y %H.%M.%S', # '25/10/2009 14.30.59'
'%m/%d/%Y %H.%M.%S.%f', # '25/10/2009 14.30.59.000200'
'%m/%d/%Y %H.%M', # '25/10/2009 14.30'
'%m/%d/%Y', # '10/25/2009'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| mit | -4,441,679,318,017,079,000 | 40.057692 | 77 | 0.422954 | false |
minghuascode/pyj | library/pyjamas/ui/TreeItem.py | 1 | 12215 | # Copyright 2006 James Tauber and contributors
# Copyright (C) 2009 Luke Kenneth Casson Leighton <[email protected]>
# Copyright (C) 2011 Vsevolod Fedorov <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from pyjamas.Canvas.GWTCanvas import GWTCanvas
from pyjamas.Canvas import Color
from pyjamas.ui.UIObject import UIObject
from pyjamas.ui.TreeContentPanel import TreeContentPanel
# http://www.greywyvern.com/code/php/binary2base64 - yaay!
# http://websemantics.co.uk/online_tools/image_to_data_uri_convertor/
# this 2nd one didn't put %3D at the end, you have to back-convert that
# into "==" characters.
tree_closed = "data:image/gif;base64,R0lGODlhEAAQAJECAMzMzAAAAP///wAAACH5BAEAAAIALAAAAAAQABAAAAIjlI+py+1vgJxzAYtNOFd1sUVYQJLCh3zhmYFcm1oUBdU2VAAAOw=="
tree_open = "data:image/gif;base64,R0lGODlhEAAQAJECAAAAAMDAwAAAAP///yH5BAEAAAIALAAAAAAQABAAAAIflI+py+1vgpxzBYvV1TldAILC5nUIeZoHulIUBMdQAQA7"
tree_white = "data:image/gif;base64,R0lGODlhEAAQAJEAAP///wAAAP///wAAACH5BAEAAAIALAAAAAAQABAAAAIOlI+py+0Po5y02ouzPgUAOw=="
class TreeItem(UIObject):
# also callable as TreeItem(widget)
def __init__(self, html=None, **ka):
self.children = []
self.attached = False
self.contentPanel = None
self.itemTable = None
self.contentElem = None
self.imgElem = None
self.childSpanElem = None
self.open = False
self.parent = None
self.selected = False
self.tree = None
self.userObject = None
element = ka.pop('Element', None) or DOM.createDiv()
self.setElement(element)
self.itemTable = DOM.createTable()
self.contentElem = DOM.createSpan()
self.childSpanElem = DOM.createSpan()
self.imgElem = self.createImage()
tbody = DOM.createTBody()
tr = DOM.createTR()
tdImg = DOM.createTD()
tdContent = DOM.createTD()
DOM.appendChild(self.itemTable, tbody)
DOM.appendChild(tbody, tr)
DOM.appendChild(tr, tdImg)
DOM.appendChild(tr, tdContent)
DOM.setStyleAttribute(tdImg, "verticalAlign", "middle")
DOM.setStyleAttribute(tdContent, "verticalAlign", "middle")
DOM.setStyleAttribute(self.getElement(), "cursor", "pointer")
DOM.appendChild(self.getElement(), self.itemTable)
DOM.appendChild(self.getElement(), self.childSpanElem)
DOM.appendChild(tdImg, self.imgElem)
DOM.appendChild(tdContent, self.contentElem)
# XXX - can't set pos relative on a div node,
# or white_space on an HTML Table..
try:
DOM.setAttribute(self.getElement(), "position", "relative")
except:
pass
DOM.setStyleAttribute(self.contentElem, "display", "inline")
DOM.setStyleAttribute(self.getElement(), "whiteSpace", "nowrap")
try:
DOM.setAttribute(self.itemTable, "whiteSpace", "nowrap")
except:
pass
DOM.setStyleAttribute(self.childSpanElem, "whiteSpace", "nowrap")
self.setStyleName(self.contentElem, "gwt-TreeItem", True)
#if not ka.has_key('StyleName'): ka['StyleName']="gwt-TreeItem"
if html is not None:
try:
# messy. pyjd can do unicode, pyjs can't
if isinstance(html, unicode):
ka['HTML'] = html
elif isinstance(html, basestring):
ka['HTML'] = html
else:
ka['Widget'] = html
except:
if isinstance(html, basestring):
ka['HTML'] = html
else:
ka['Widget'] = html
UIObject.__init__(self, **ka)
def __iter__(self):
return self.children.__iter__()
def createImage(self):
return DOM.createImg()
# also callable as addItem(widget) and addItem(itemText)
def addItem(self, item):
return self.insertItem(item)
# also callable as addItem(widget) and addItem(itemText)
def insertItem(self, item, index=None):
if not hasattr(item, "getTree"):
#if not item.getTree:
item = TreeItem(item)
if (item.getParentItem() is not None) or (item.getTree() is not None):
item.remove()
item.setTree(self.tree)
item.setParentItem(self)
if index is None:
self.children.append(item)
else:
self.children.insert(index, item)
DOM.setStyleAttribute(item.getElement(), "marginLeft", "16px")
if index is None:
DOM.appendChild(self.childSpanElem, item.getElement())
else:
DOM.insertChild(self.childSpanElem, item.getElement(), index)
if len(self.children) == 1:
self.updateState()
return item
def onAttach(self):
if self.attached:
return
self.attached = True
for item in self.children:
item.onAttach()
w = self.getWidget()
if w:
w.onAttach()
def onDetach(self):
self.attached = False
for item in self.children:
item.onDetach()
w = self.getWidget()
if w:
w.onDetach()
def getChild(self, index):
if (index < 0) or (index >= len(self.children)):
return None
return self.children[index]
def getChildCount(self):
return len(self.children)
def getChildIndex(self, child):
return self.children.index(child)
def getHTML(self):
return DOM.getInnerHTML(self.contentElem)
def getText(self):
return DOM.getInnerText(self.contentElem)
def getParentItem(self):
return self.parent
def getState(self):
return self.open
def getTree(self):
return self.tree
def getUserObject(self):
return self.userObject
def getWidget(self):
if self.contentPanel is None:
return None
return self.contentPanel.getWidget()
def isSelected(self):
return self.selected
def remove(self):
if self.parent is not None:
self.parent.removeItem(self)
elif self.tree is not None:
self.tree.removeItem(self)
def removeItem(self, item):
if item not in self.children:
return
item.setTree(None)
item.setParentItem(None)
self.children.remove(item)
DOM.removeChild(self.childSpanElem, item.getElement())
if len(self.children) == 0:
self.updateState()
def removeItems(self):
while self.getChildCount() > 0:
self.removeItem(self.getChild(0))
def setHTML(self, html):
self.clearContentPanel()
DOM.setInnerHTML(self.contentElem, html)
def setText(self, text):
self.clearContentPanel()
DOM.setInnerText(self.contentElem, text)
def setSelected(self, selected):
if self.selected == selected:
return
self.selected = selected
self.setStyleName(self.contentElem, "gwt-TreeItem-selected", selected)
def setState(self, open, fireEvents=True):
# lkcl: experiment with allowing event state changed to be
# fired even on items with no children. otherwise you never
# get to find out if an end-item was selected!
if not open or len(self.children) != 0:
self.open = open
self.updateState()
if fireEvents:
self.tree.fireStateChanged(self)
def setUserObject(self, userObj):
self.userObject = userObj
def setWidget(self, widget):
self.ensureContentPanel()
self.contentPanel.setWidget(widget)
def clearContentPanel(self):
if self.contentPanel is not None:
child = self.contentPanel.getWidget()
if child is not None:
self.contentPanel.remove(child)
if self.tree is not None:
self.tree.disown(self.contentPanel)
self.contentPanel = None
def ensureContentPanel(self):
if self.contentPanel is None:
DOM.setInnerHTML(self.contentElem, "")
self.contentPanel = TreeContentPanel(self.contentElem)
self.contentPanel.setTreeItem(self)
if self.getTree() is not None:
self.tree.adopt(self.contentPanel)
def addTreeItems(self, accum):
for item in self.children:
accum.append(item)
item.addTreeItems(accum)
def getChildren(self):
return self.children
def getContentElem(self):
return self.contentElem
def getContentHeight(self):
return DOM.getIntAttribute(self.itemTable, "offsetHeight")
def getImageElement(self):
return self.imgElem
def getTreeTop(self):
item = self
ret = 0
while item is not None:
ret += DOM.getIntAttribute(item.getElement(), "offsetTop")
item = item.getParentItem()
return ret
def getFocusableWidget(self):
widget = self.getWidget()
if hasattr(widget, "setFocus"):
return widget
return None
def imgSrc(self, img):
if self.tree is None:
return img
src = self.tree.getImageBase() + img
return src
def setParentItem(self, parent):
self.parent = parent
def setTree(self, tree):
if self.tree == tree:
return
if self.tree is not None:
if self.tree.getSelectedItem() == self:
self.tree.setSelectedItem(None)
if self.contentPanel is not None:
self.tree.disown(self.contentPanel)
self.tree = tree
for child in self.children:
child.setTree(tree)
self.updateState()
if tree is not None and self.contentPanel is not None:
tree.adopt(self.contentPanel)
def updateState(self):
print "updateState"
if len(self.children) == 0:
self.setVisible(self.childSpanElem, False)
#DOM.setAttribute(self.imgElem, "src", self.imgSrc("tree_white.gif"))
self.drawImage("white")
return
if self.open:
self.setVisible(self.childSpanElem, True)
self.drawImage("open")
else:
self.setVisible(self.childSpanElem, False)
self.drawImage("closed")
def updateStateRecursive(self):
self.updateState()
for i in range(len(self.children)):
child = self.children[i]
child.updateStateRecursive()
def drawImage(self, mode):
if mode == "white":
src = tree_white
elif mode == "open":
src = tree_open
elif mode == "closed":
src = tree_closed
DOM.setAttribute(self.imgElem, "src", src)
class RootTreeItem(TreeItem):
def addItem(self, item):
self.insertItem(item)
def insertItem(self, item, index=None):
if (item.getParentItem() is not None) or (item.getTree() is not None):
item.remove()
item.setTree(self.getTree())
item.setParentItem(None)
if index is None:
self.children.append(item)
else:
self.children.insert(index, item)
DOM.setIntStyleAttribute(item.getElement(), "marginLeft", 0)
def removeItem(self, item):
if item not in self.children:
return
item.setTree(None)
item.setParentItem(None)
self.children.remove(item)
Factory.registerClass('pyjamas.ui.TreeItem', 'TreeItem', TreeItem)
| apache-2.0 | -4,233,855,153,443,371,500 | 30.240409 | 150 | 0.61228 | false |
prataprc/tayra | tayra/test/stdttl/ref/html5.ttl.py | 1 | 2138 | import imp
from io import StringIO
from pluggdapps.plugin import Plugin, implements
from tayra import BaseTTLPlugin
def __traceback_decorator__( frames ):
from copy import deepcopy
from os.path import basename
def _map2ttl( frame ):
filename = frame.filename
lineno = frame.lineno
lines = open(filename).readlines()[:lineno]
lines.reverse()
rc = {}
for l in lines :
if l.strip().startswith('# lineno') :
_, ttl_lineno = l.split(':', 1)
ttl_lineno = int( ttl_lineno )
ttl_text = open( _ttlfile ).readlines()[ ttl_lineno-1 ]
return ttl_lineno, ttl_text
return None, None
newframes = []
for frame in frames :
newframes.append( frame )
frameadded = getattr( frame, '_ttlframeadded', False )
basen = basename( frame.filename )
if basen.endswith( '.ttl.py' ) and basen == (basename( _ttlfile ) + '.py') and frameadded == False :
newframe = deepcopy( frame )
frame._ttlframeadded = True
try :
newframe.lineno, newframe.linetext = _map2ttl( newframe )
if newframe.lineno :
newframe.filename = _ttlfile
newframes.append( newframe )
except :
raise
continue
return newframes
def body( *args, **kwargs ) :
_m.pushbuf()
# lineno:1
_m.pushbuf()
_m.extend( ['<a "http://pluggdapps.com">'] )
_m.pushbuf()
# lineno:1
_m.extend( [' pluggdapps-link', '\n'] )
_m.handletag( _m.popbuftext(), _m.popbuftext(), **{'nl': '', 'oprune': False, 'indent': False, 'iprune': False} )
# lineno:2
_m.pushbuf()
_m.extend( ['<abbr "World Health Organisation">'] )
_m.pushbuf()
# lineno:2
_m.extend( [' WHO', '\n'] )
_m.handletag( _m.popbuftext(), _m.popbuftext(), **{'nl': '', 'oprune': False, 'indent': False, 'iprune': False} )
return _m.popbuftext()
# ---- Global Functions
# ---- Interface functions
# ---- Footer
| gpl-3.0 | -2,734,730,127,233,263,000 | 31.393939 | 134 | 0.53508 | false |
kandluis/document_summaries | summarizer/baselines.py | 1 | 2617 | '''
Main entry point for our text summarization using our baseline algorithm.
The baseline algorithm consists of assigning a weight to each sentence.
We define the weight of the
Copyright, 2015.
Authors:
Luis Perez ([email protected])
Kevin Eskici ([email protected])
'''
from . import utils
import numpy as np
def geom(p, k):
return (1.0 - p)**k * p
def concatDocs(D):
sents = []
for doc in D:
sents += doc
return sents
def baseline(D, k):
'''
Baseline simply takes the first k sentences in the documents.
'''
D = concatDocs(D)
mapping = {i:i for i in xrange(len(D))}
return range(k), D, mapping
def geomPriorBaseline(D, k, p=0.02):
D = concatDocs(D)
sentences, mapping = utils.cleanDocument(D)
probs = np.array([geom(p, i) for i in xrange(len(sentences))])
probs = probs / sum(probs)
summary = np.random.choice(xrange(len(sentences)), size=k,
replace=False, p=probs)
return summary, D, mapping
def modifiedGeomPriorBaseline(D, k, p=0.02):
D = concatDocs(D)
sentences, mapping = utils.cleanDocument(D)
probs = np.array([geom(p, i) for i in xrange(1, len(sentences))])
probs = probs / sum(probs)
summary = np.random.choice(xrange(1, len(sentences)), size=k,
replace=False, p=probs)
summary = np.append(0, summary)
return summary, D, mapping
def multipleGeomPrior(D, k, p=0.02):
probs = []
for doc in D:
sentences, _ = utils.cleanDocument(doc)
docProbs = np.array([geom(p, i) for i in xrange(len(sentences))])
docProbs = docProbs / sum(docProbs)
probs += list(docProbs)
probs = np.array(probs)/sum(probs)
D = concatDocs(D)
sentences, mapping = utils.cleanDocument(D)
summary = np.random.choice(xrange(len(sentences)), size=k,
replace=False, p=probs)
return summary, D, mapping
def wordFreqBaseline(D, k):
D = concatDocs(D)
sentences, mapping = utils.cleanDocument(D)
freqs = utils.MyCounter()
for sentence in sentences:
for word in sentence:
freqs[word] += 1.0
summary = []
summary_words = set()
while len(summary) < min(k, len(D)):
sent_scores = [sum([freqs[word] for word in sentence
if word not in summary_words]) / len(sentence) for sentence in sentences]
selected = sent_scores.index(max(sent_scores))
summary.append(selected)
summary_words = summary_words.union(sentences[selected])
return summary, D, mapping
| apache-2.0 | 8,077,415,874,106,459,000 | 28.077778 | 101 | 0.621704 | false |
arunkgupta/gramps | gramps/gen/filters/rules/person/_iswitness.py | 1 | 2398 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
from ....lib.eventroletype import EventRoleType
from ....lib.eventtype import EventType
#-------------------------------------------------------------------------
# "Witnesses"
#-------------------------------------------------------------------------
class IsWitness(Rule):
"""Witnesses"""
labels = [_('Event type:')]
name = _('Witnesses')
description = _("Matches people who are witnesses in any event")
category = _('Event filters')
def apply(self,db,person):
for event_ref in person.event_ref_list:
if event_ref and event_ref.role == EventRoleType.WITNESS:
# This is the witness.
# If event type was given, then check it.
if self.list[0]:
event = db.get_event_from_handle(event_ref.ref)
specified_type = EventType()
specified_type.set_from_xml_str(self.list[0])
if event.type == specified_type:
return True
else:
# event type was not specified, we're returning a match
return True
return False
| gpl-2.0 | 2,207,472,141,097,469,400 | 36.46875 | 75 | 0.51543 | false |
doriancoins/doriancoin | contrib/devtools/check-rpc-mappings.py | 1 | 5971 | #!/usr/bin/env python3
# Copyright (c) 2017 The Doriancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Check RPC argument consistency."""
from collections import defaultdict
import os
import re
import sys
# Source files (relative to root) to scan for dispatch tables
SOURCES = [
"src/rpc/server.cpp",
"src/rpc/blockchain.cpp",
"src/rpc/mining.cpp",
"src/rpc/misc.cpp",
"src/rpc/net.cpp",
"src/rpc/rawtransaction.cpp",
"src/wallet/rpcwallet.cpp",
]
# Source file (relative to root) containing conversion mapping
SOURCE_CLIENT = 'src/rpc/client.cpp'
# Argument names that should be ignored in consistency checks
IGNORE_DUMMY_ARGS = {'dummy', 'arg0', 'arg1', 'arg2', 'arg3', 'arg4', 'arg5', 'arg6', 'arg7', 'arg8', 'arg9'}
class RPCCommand:
def __init__(self, name, args):
self.name = name
self.args = args
class RPCArgument:
def __init__(self, names, idx):
self.names = names
self.idx = idx
self.convert = False
def parse_string(s):
assert s[0] == '"'
assert s[-1] == '"'
return s[1:-1]
def process_commands(fname):
"""Find and parse dispatch table in implementation file `fname`."""
cmds = []
in_rpcs = False
with open(fname, "r") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
if re.match("static const CRPCCommand .*\[\] =", line):
in_rpcs = True
else:
if line.startswith('};'):
in_rpcs = False
elif '{' in line and '"' in line:
m = re.search('{ *("[^"]*"), *("[^"]*"), *&([^,]*), *{([^}]*)} *},', line)
assert m, 'No match to table expression: %s' % line
name = parse_string(m.group(2))
args_str = m.group(4).strip()
if args_str:
args = [RPCArgument(parse_string(x.strip()).split('|'), idx) for idx, x in enumerate(args_str.split(','))]
else:
args = []
cmds.append(RPCCommand(name, args))
assert not in_rpcs and cmds, "Something went wrong with parsing the C++ file: update the regexps"
return cmds
def process_mapping(fname):
"""Find and parse conversion table in implementation file `fname`."""
cmds = []
in_rpcs = False
with open(fname, "r") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
if line == 'static const CRPCConvertParam vRPCConvertParams[] =':
in_rpcs = True
else:
if line.startswith('};'):
in_rpcs = False
elif '{' in line and '"' in line:
m = re.search('{ *("[^"]*"), *([0-9]+) *, *("[^"]*") *},', line)
assert m, 'No match to table expression: %s' % line
name = parse_string(m.group(1))
idx = int(m.group(2))
argname = parse_string(m.group(3))
cmds.append((name, idx, argname))
assert not in_rpcs and cmds
return cmds
def main():
root = sys.argv[1]
# Get all commands from dispatch tables
cmds = []
for fname in SOURCES:
cmds += process_commands(os.path.join(root, fname))
cmds_by_name = {}
for cmd in cmds:
cmds_by_name[cmd.name] = cmd
# Get current convert mapping for client
client = SOURCE_CLIENT
mapping = set(process_mapping(os.path.join(root, client)))
print('* Checking consistency between dispatch tables and vRPCConvertParams')
# Check mapping consistency
errors = 0
for (cmdname, argidx, argname) in mapping:
try:
rargnames = cmds_by_name[cmdname].args[argidx].names
except IndexError:
print('ERROR: %s argument %i (named %s in vRPCConvertParams) is not defined in dispatch table' % (cmdname, argidx, argname))
errors += 1
continue
if argname not in rargnames:
print('ERROR: %s argument %i is named %s in vRPCConvertParams but %s in dispatch table' % (cmdname, argidx, argname, rargnames), file=sys.stderr)
errors += 1
# Check for conflicts in vRPCConvertParams conversion
# All aliases for an argument must either be present in the
# conversion table, or not. Anything in between means an oversight
# and some aliases won't work.
for cmd in cmds:
for arg in cmd.args:
convert = [((cmd.name, arg.idx, argname) in mapping) for argname in arg.names]
if any(convert) != all(convert):
print('ERROR: %s argument %s has conflicts in vRPCConvertParams conversion specifier %s' % (cmd.name, arg.names, convert))
errors += 1
arg.convert = all(convert)
# Check for conversion difference by argument name.
# It is preferable for API consistency that arguments with the same name
# have the same conversion, so bin by argument name.
all_methods_by_argname = defaultdict(list)
converts_by_argname = defaultdict(list)
for cmd in cmds:
for arg in cmd.args:
for argname in arg.names:
all_methods_by_argname[argname].append(cmd.name)
converts_by_argname[argname].append(arg.convert)
for argname, convert in converts_by_argname.items():
if all(convert) != any(convert):
if argname in IGNORE_DUMMY_ARGS:
# these are testing or dummy, don't warn for them
continue
print('WARNING: conversion mismatch for argument named %s (%s)' %
(argname, list(zip(all_methods_by_argname[argname], converts_by_argname[argname]))))
sys.exit(errors > 0)
if __name__ == '__main__':
main()
| mit | 9,194,470,161,719,797,000 | 36.791139 | 157 | 0.570591 | false |
mtlchun/edx | lms/djangoapps/courseware/tests/tests.py | 1 | 6747 | """
Test for LMS courseware app.
"""
from textwrap import dedent
from unittest import TestCase
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
import mock
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.tests.helpers import LoginEnrollmentTestCase
from xmodule.modulestore.tests.django_utils import TEST_DATA_XML_MODULESTORE as XML_MODULESTORE
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_TOY_MODULESTORE as TOY_MODULESTORE
from lms.djangoapps.lms_xblock.field_data import LmsFieldData
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class ActivateLoginTest(LoginEnrollmentTestCase):
"""
Test logging in and logging out.
"""
def setUp(self):
super(ActivateLoginTest, self).setUp()
self.setup_user()
def test_activate_login(self):
"""
Test login -- the setup function does all the work.
"""
pass
def test_logout(self):
"""
Test logout -- setup function does login.
"""
self.logout()
class PageLoaderTestCase(LoginEnrollmentTestCase):
"""
Base class that adds a function to load all pages in a modulestore.
"""
def check_all_pages_load(self, course_key):
"""
Assert that all pages in the course load correctly.
`course_id` is the ID of the course to check.
"""
store = modulestore()
# Enroll in the course before trying to access pages
course = store.get_course(course_key)
self.enroll(course, True)
# Search for items in the course
items = store.get_items(course_key)
if len(items) < 1:
self.fail('Could not retrieve any items from course')
# Try to load each item in the course
for descriptor in items:
if descriptor.location.category == 'about':
self._assert_loads('about_course',
{'course_id': course_key.to_deprecated_string()},
descriptor)
elif descriptor.location.category == 'static_tab':
kwargs = {'course_id': course_key.to_deprecated_string(),
'tab_slug': descriptor.location.name}
self._assert_loads('static_tab', kwargs, descriptor)
elif descriptor.location.category == 'course_info':
self._assert_loads('info', {'course_id': course_key.to_deprecated_string()},
descriptor)
else:
kwargs = {'course_id': course_key.to_deprecated_string(),
'location': descriptor.location.to_deprecated_string()}
self._assert_loads('jump_to', kwargs, descriptor,
expect_redirect=True,
check_content=True)
def _assert_loads(self, django_url, kwargs, descriptor,
expect_redirect=False,
check_content=False):
"""
Assert that the url loads correctly.
If expect_redirect, then also check that we were redirected.
If check_content, then check that we don't get
an error message about unavailable modules.
"""
url = reverse(django_url, kwargs=kwargs)
response = self.client.get(url, follow=True)
if response.status_code != 200:
self.fail('Status %d for page %s' %
(response.status_code, descriptor.location))
if expect_redirect:
self.assertEqual(response.redirect_chain[0][1], 302)
if check_content:
self.assertNotContains(response, "this module is temporarily unavailable")
self.assertNotIsInstance(descriptor, ErrorDescriptor)
class TestXmlCoursesLoad(ModuleStoreTestCase, PageLoaderTestCase):
"""
Check that all pages in test courses load properly from XML.
"""
MODULESTORE = XML_MODULESTORE
def setUp(self):
super(TestXmlCoursesLoad, self).setUp()
self.setup_user()
def test_toy_course_loads(self):
# Load one of the XML based courses
# Our test mapping rules allow the MixedModuleStore
# to load this course from XML, not Mongo.
self.check_all_pages_load(SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'))
class TestMongoCoursesLoad(ModuleStoreTestCase, PageLoaderTestCase):
"""
Check that all pages in test courses load properly from Mongo.
"""
MODULESTORE = TOY_MODULESTORE
def setUp(self):
super(TestMongoCoursesLoad, self).setUp()
self.setup_user()
@mock.patch('xmodule.course_module.requests.get')
def test_toy_textbooks_loads(self, mock_get):
mock_get.return_value.text = dedent("""
<?xml version="1.0"?><table_of_contents>
<entry page="5" page_label="ii" name="Table of Contents"/>
</table_of_contents>
""").strip()
location = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall').make_usage_key('course', '2012_Fall')
course = self.store.get_item(location)
self.assertGreater(len(course.textbooks), 0)
class TestDraftModuleStore(ModuleStoreTestCase):
def test_get_items_with_course_items(self):
store = modulestore()
# fix was to allow get_items() to take the course_id parameter
store.get_items(SlashSeparatedCourseKey('abc', 'def', 'ghi'), qualifiers={'category': 'vertical'})
# test success is just getting through the above statement.
# The bug was that 'course_id' argument was
# not allowed to be passed in (i.e. was throwing exception)
class TestLmsFieldData(TestCase):
"""
Tests of the LmsFieldData class
"""
def test_lms_field_data_wont_nest(self):
# Verify that if an LmsFieldData is passed into LmsFieldData as the
# authored_data, that it doesn't produced a nested field data.
#
# This fixes a bug where re-use of the same descriptor for many modules
# would cause more and more nesting, until the recursion depth would be
# reached on any attribute access
# pylint: disable=protected-access
base_authored = mock.Mock()
base_student = mock.Mock()
first_level = LmsFieldData(base_authored, base_student)
second_level = LmsFieldData(first_level, base_student)
self.assertEquals(second_level._authored_data, first_level._authored_data)
self.assertNotIsInstance(second_level._authored_data, LmsFieldData)
| agpl-3.0 | -1,910,935,550,058,162,200 | 35.274194 | 107 | 0.634801 | false |
gjo/python-codekitlang | codekitlang/command.py | 1 | 1629 | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import logging
import sys
from . import compiler
def _(s):
return s
def main():
parser = argparse.ArgumentParser(
prog='pykitlangc',
description=_('CodeKit Language Compiler.'),
)
parser.add_argument('src', nargs=1, metavar='SRC', help=_('input file'))
parser.add_argument('dest', nargs=1, metavar='DEST', help=_('output file'))
parser.add_argument(
'-f', '--framework-paths', metavar='DIR', action='append',
help=_('path for lookup include file (allow multiple defs)'),
)
parser.add_argument(
'--missing-file-behavior', metavar='BEHAVIOR', default='logonly',
choices=('ignore', 'logonly', 'exception'),
help=_('one of ignore, logonly, exception (default: logonly)'),
)
parser.add_argument(
'--missing-variable-behavior', metavar='BEHAVIOR', default='ignore',
choices=('ignore', 'logonly', 'exception'),
help=_('one of ignore, logonly, exception (default: ignore)'),
)
namespace = parser.parse_args()
options = vars(namespace)
src = options.pop('src')
dest = options.pop('dest')
logger = logging.getLogger('pykitlangc')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
options['logger'] = logger
compiler_ = compiler.Compiler(**options)
try:
compiler_.generate_to_file(dest[0], src[0])
except compiler.CompileError as e:
print(e.to_message(), file=sys.stderr)
sys.exit(1)
if __name__ == '__main__': # pragma:nocover
main()
| bsd-3-clause | -6,925,022,318,885,198,000 | 30.326923 | 79 | 0.622468 | false |
brainstorm/bcbio-nextgen | bcbio/ngsalign/star.py | 1 | 7746 | import os
import sys
import shutil
import subprocess
import contextlib
from collections import namedtuple
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.utils import (safe_makedir, file_exists, is_gzipped)
from bcbio.provenance import do
from bcbio import utils
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.ngsalign import postalign
from bcbio.bam import fastq
CLEANUP_FILES = ["Aligned.out.sam", "Log.out", "Log.progress.out"]
ALIGN_TAGS = ["NH", "HI", "NM", "MD", "AS"]
def align(fastq_file, pair_file, ref_file, names, align_dir, data):
if not ref_file:
logger.error("STAR index not found. We don't provide the STAR indexes "
"by default because they are very large. You can install "
"the index for your genome with: bcbio_nextgen.py upgrade "
"--aligners star --genomes genome-build-name --data")
sys.exit(1)
max_hits = 10
srna = True if data["analysis"].lower().startswith("smallrna-seq") else False
srna_opts = ""
if srna:
max_hits = 1000
srna_opts = "--alignIntronMax 1"
config = data["config"]
star_dirs = _get_star_dirnames(align_dir, data, names)
if file_exists(star_dirs.final_out):
data = _update_data(star_dirs.final_out, star_dirs.out_dir, names, data)
return data
star_path = config_utils.get_program("STAR", config)
fastq_files = " ".join([fastq_file, pair_file]) if pair_file else fastq_file
num_cores = dd.get_num_cores(data)
gtf_file = dd.get_gtf_file(data)
if ref_file.endswith("chrLength"):
ref_file = os.path.dirname(ref_file)
with file_transaction(data, align_dir) as tx_align_dir:
tx_star_dirnames = _get_star_dirnames(tx_align_dir, data, names)
tx_out_dir, tx_out_file, tx_out_prefix, tx_final_out = tx_star_dirnames
safe_makedir(tx_align_dir)
safe_makedir(tx_out_dir)
cmd = ("{star_path} --genomeDir {ref_file} --readFilesIn {fastq_files} "
"--runThreadN {num_cores} --outFileNamePrefix {tx_out_prefix} "
"--outReadsUnmapped Fastx --outFilterMultimapNmax {max_hits} "
"--outStd SAM {srna_opts} "
"--outSAMunmapped Within --outSAMattributes %s " % " ".join(ALIGN_TAGS))
cmd += _add_sj_index_commands(fastq_file, ref_file, gtf_file) if not srna else ""
cmd += " --readFilesCommand zcat " if is_gzipped(fastq_file) else ""
cmd += _read_group_option(names)
fusion_mode = utils.get_in(data, ("config", "algorithm", "fusion_mode"), False)
if fusion_mode:
cmd += (" --chimSegmentMin 12 --chimJunctionOverhangMin 12 "
"--chimScoreDropMax 30 --chimSegmentReadGapMax 5 "
"--chimScoreSeparation 5 "
"--chimOutType WithinSAM ")
strandedness = utils.get_in(data, ("config", "algorithm", "strandedness"),
"unstranded").lower()
if strandedness == "unstranded" and not srna:
cmd += " --outSAMstrandField intronMotif "
if not srna:
cmd += " --quantMode TranscriptomeSAM "
cmd += " | " + postalign.sam_to_sortbam_cl(data, tx_final_out)
run_message = "Running STAR aligner on %s and %s" % (fastq_file, ref_file)
do.run(cmd.format(**locals()), run_message, None)
print("hello")
data = _update_data(star_dirs.final_out, star_dirs.out_dir, names, data)
return data
StarOutDirs = namedtuple(
'StarOutDirs',
['out_dir', 'out_file', 'out_prefix', 'final_out']
)
def _get_star_dirnames(align_dir, data, names):
ALIGNED_OUT_FILE = "Aligned.out.sam"
out_prefix = os.path.join(align_dir, dd.get_lane(data))
out_file = out_prefix + ALIGNED_OUT_FILE
out_dir = os.path.join(align_dir, "%s_star" % dd.get_lane(data))
final_out = os.path.join(out_dir, "{0}.bam".format(names["sample"]))
return StarOutDirs(out_dir, out_file, out_prefix, final_out)
def _add_sj_index_commands(fq1, ref_file, gtf_file):
"""
newer versions of STAR can generate splice junction databases on thephfly
this is preferable since we can tailor it to the read lengths
"""
if _has_sj_index(ref_file):
return ""
else:
rlength = fastq.estimate_maximum_read_length(fq1)
cmd = " --sjdbGTFfile %s " % gtf_file
cmd += " --sjdbOverhang %s " % str(rlength - 1)
return cmd
def _has_sj_index(ref_file):
"""this file won't exist if we can do on the fly splice junction indexing"""
return file_exists(os.path.join(ref_file, "sjdbInfo.txt"))
def _update_data(align_file, out_dir, names, data):
data = dd.set_work_bam(data, align_file)
data = dd.set_align_bam(data, align_file)
transcriptome_file = _move_transcriptome_file(out_dir, names)
data = dd.set_transcriptome_bam(data, transcriptome_file)
return data
def _move_transcriptome_file(out_dir, names):
out_file = os.path.join(out_dir, "{0}.transcriptome.bam".format(names["sample"]))
star_file = os.path.join(out_dir, os.pardir,
"{0}Aligned.toTranscriptome.out.bam".format(names["lane"]))
# if the out_file or the star_file doesn't exist, we didn't run the
# transcriptome mapping
if not file_exists(out_file):
if not file_exists(star_file):
return None
else:
shutil.move(star_file, out_file)
return out_file
def _read_group_option(names):
rg_id = names["rg"]
rg_sample = names["sample"]
rg_library = names["pl"]
rg_platform_unit = names["pu"]
rg_lb = ("LB:%s " % names.get("lb")) if names.get("lb") else ""
return (" --outSAMattrRGline ID:{rg_id} PL:{rg_library} "
"PU:{rg_platform_unit} SM:{rg_sample} {rg_lb}").format(**locals())
def _get_quality_format(config):
qual_format = config["algorithm"].get("quality_format", None)
if qual_format.lower() == "illumina":
return "fastq-illumina"
elif qual_format.lower() == "solexa":
return "fastq-solexa"
else:
return "fastq-sanger"
def remap_index_fn(ref_file):
"""Map sequence references to equivalent star indexes
"""
return os.path.join(os.path.dirname(os.path.dirname(ref_file)), "star")
def index(ref_file, out_dir, data):
"""Create a STAR index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_gtf_file(data)
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a star index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
cmd = ("STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} "
"--runThreadN {num_cores} "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}")
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir
def get_star_version(data):
star_path = config_utils.get_program("STAR", dd.get_config(data))
cmd = "%s --version" % star_path
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
with contextlib.closing(subp.stdout) as stdout:
for line in stdout:
if "STAR_" in line:
version = line.split("STAR_")[1].strip()
return version
| mit | 9,177,149,942,006,414,000 | 40.42246 | 89 | 0.619287 | false |
e-integration/e-integration-edi-addons | eintegration_edi_manager/__init__.py | 1 | 1282 | ##############################################################################
# eintegration_edi_manager
# Copyright (c) 2016 e-integration GmbH (<http://www.e-integration.de>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This program based on Odoo, formerly OpenERP
# Copyright (C) Odoo S.A. (<http://www.odoo.com>).
##############################################################################
import edi_document
import edi_document_create_wizard
import edi_document_partner
import edi_document_stage
import edi_template
import edi_manager_controllers
import sale_order
import account_invoice
import edi_export_field
| agpl-3.0 | -421,219,713,106,870,000 | 41.733333 | 78 | 0.651326 | false |
pauloacmelo/papelex_winthor | papelex_magento/customer.py | 1 | 3621 | # -*- coding: UTF-8 -*-
'''
magento.customer
Customer API for magento
:license: BSD, see LICENSE for more details
'''
from .api import API
class Customer(API):
"""
Customer API
Example usage::
from magento import Customer as CustomerAPI
with CustomerAPI(url, username, password) as customer_api:
return customer_api.list()
"""
__slots__ = ()
def list(self, filters=None):
"""
Retreive list of customers
:param filters: Dictionary of filters.
Format: `{<attribute>:{<operator>:<value>}}`
Example: `{'firstname':{'ilike':'sharoon'}}`
:return: List of dictionaries of matching records
"""
return self.call('customer.list', filters and [filters] or [{}])
def create(self, data):
"""
Create a customer using the given data
:param data: Dictionary of values
:return: Integer ID of new record
"""
return int(self.call('customer.create', [data]))
def info(self, id, attributes=None):
"""
Retrieve customer data
:param id: ID of customer
:param attributes: `List` of attributes needed
"""
if attributes:
return self.call('customer.info', [id, attributes])
else:
return self.call('customer.info', [id])
def update(self, id, data):
"""
Update a customer using the given data
:param id: ID of the customer record to modify
:param data: Dictionary of values
:return: Boolean
"""
return self.call('customer.update', [id, data])
def delete(self, id):
"""
Delete a customer
:param id: ID of customer to delete
:return: Boolean
"""
return self.call('customer.delete', [id])
class CustomerGroup(API):
"""
Customer Group API to connect to magento
"""
__slots__ = ()
def list(self):
"""
Retreive list of customers
:return: List of dictionaries of matching records
"""
return self.call('customer_group.list', [])
class CustomerAddress(API):
"""
Customer Address API
"""
__slots__ = ()
def list(self, customer_id):
"""
Retreive list of customer Addresses
:param customer_id: ID of customer whose address needs to be fetched
:return: List of dictionaries of matching records
"""
return self.call('customer_address.list', [customer_id])
def create(self, customer_id, data):
"""
Create a customer using the given data
:param customer_id: ID of customer, whose address is being added
:param data: Dictionary of values (country, zip, city, etc...)
:return: Integer ID of new record
"""
return int(self.call('customer_address.create', [customer_id, data]))
def info(self, id):
"""
Retrieve customer data
:param id: ID of customer
"""
return self.call('customer_address.info', [id])
def update(self, id, data):
"""
Update a customer address using the given data
:param id: ID of the customer address record to modify
:param data: Dictionary of values
:return: Boolean
"""
return self.call('customer_address.update', [id, data])
def delete(self, id):
"""
Delete a customer address
:param id: ID of address to delete
:return: Boolean
"""
return self.call('customer_address.delete', [id])
| mit | -3,507,494,974,233,864,700 | 23.972414 | 77 | 0.570008 | false |
OpenLocalMap/OpenLocalMap | Python Version/xyCoordsLLPGClass.py | 1 | 1326 | # OpenLocalMap OpenSource web mapping for local government
# Copyright (C) <2014> <Ben Calnan>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import queryClass
class xyCoordsLLPGClass:
def __init__(self, uprn):
self.UPRN = uprn
self.getXYquery()
def getXYquery(self):
##print queryClass.getXYofLLPGpoint(self.UPRN)
self.xyQuery = queryClass.getXYofLLPGpoint(self.UPRN);
def getXYCoordsLLPG(self):
self.xy_DB = DBconn("LLPG")
self.setStid(xyQuery)
self.xyCoords = xy_DB.getSingleRow()
self.xyCoords = "{\"xyCoords\":[{\"X\":\"" + xyCoords['X'] + "\",\"Y\":\"" + xyCoords['Y'] + "\"}]}"
return self.xyCoords;
| gpl-3.0 | -4,400,104,537,916,759,000 | 34.837838 | 108 | 0.665913 | false |
devlights/try-python | trypython/stdlib/dis_/dis01.py | 1 | 2422 | # coding: utf-8
"""
dis モジュールについてのサンプルです。
"""
import dis
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import hr
# noinspection SpellCheckingInspection
class Sample(SampleBase):
def exec(self):
##############################################
# dis モジュールは、pythonのバイトコードの
# 解析をサポートしてくれるモジュール。
#
# 大きく分けて2つの使い方がある
# 1) dis.dis()
# 2) dis.Bytecode()
#
# 1) は、指定された内容を逆アセンブルして出力してくれる。
# 引数の file に何も指定しない場合は標準出力に指定してくれる。
#
# 2) は、python 3.4 で追加されたAPI。
# 指定の仕方は 1) とほぼ変わらないが、いきなり結果を
# 出力ではなくて、一旦 Bytecode オブジェクトにラップして
# 返してくれる。
#
##############################################
listcomp_str = 'r = [x for x in range(1000000) if x % 2 == 0]'
forloop_str = '''
r = []
for x in range(1000000):
if x % 2 == 0:
r.append(x)
'''
###############################################
# dis.dis()
###############################################
hr('dis.dis(listcomp_str)')
dis.dis(listcomp_str)
hr('dis.dis(forloop_str)')
dis.dis(forloop_str)
###############################################
# dis.Bytecode()
#
# python 3.4 から dis モジュールに追加されたAPI。
# 内部で code オブジェクトや dis.code_info() の
# 結果を保持してくれたりするので、こちらの方が便利。
###############################################
hr('dis.Bytecode(listcomp_str)')
listcomp_bytecode = dis.Bytecode(listcomp_str)
print(listcomp_bytecode.codeobj)
print(listcomp_bytecode.dis())
print(listcomp_bytecode.info())
hr('dis.Bytecode(forloop_str)')
forloop_bytecode = dis.Bytecode(forloop_str)
print(forloop_bytecode.codeobj)
print(forloop_bytecode.dis())
print(forloop_bytecode.info())
def go():
obj = Sample()
obj.exec()
| mit | 6,764,128,687,262,864,000 | 25.540541 | 70 | 0.471487 | false |
jhseu/tensorflow | tensorflow/compiler/tests/special_math_test.py | 1 | 3535 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for special math operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
from absl.testing import parameterized
import numpy as np
import scipy.special as sps
import six
from tensorflow.compiler.tests import xla_test
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
flags.DEFINE_bool('vary_seed', False,
('Whether to vary the PRNG seed unpredictably. '
'With --runs_per_test=N, produces N iid runs.'))
NUM_SAMPLES = int(1e3)
class IgammaTest(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
if six.PY2:
answer = int(entropy.encode('hex'), 16)
else:
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer)
super(IgammaTest, self).setUp()
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testIgammaSmallValues(self, dtype, rtol, atol):
# Test values near zero.
x = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(
low=np.finfo(dtype).tiny, high=1., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(math_ops.igamma(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-2, 1e-11),
(np.float64, 1e-4, 1e-30))
def testIgammaMediumValues(self, dtype, rtol, atol):
# Test values near zero.
x = np.random.uniform(low=1., high=100., size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(low=1., high=100., size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(math_ops.igamma(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 2e-2, 1e-5), (np.float64, 1e-4, 1e-30))
def testIgammaLargeValues(self, dtype, rtol, atol):
# Test values near zero.
x = np.random.uniform(
low=100., high=int(1e4), size=[NUM_SAMPLES]).astype(dtype)
a = np.random.uniform(
low=100., high=int(1e4), size=[NUM_SAMPLES]).astype(dtype)
expected_values = sps.gammainc(a, x)
with self.session() as sess:
with self.test_scope():
actual = sess.run(math_ops.igamma(a, x))
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
if __name__ == '__main__':
os.environ['XLA_FLAGS'] = '--xla_cpu_enable_fast_math=false'
test.main()
| apache-2.0 | -5,683,867,419,198,845,000 | 34.707071 | 80 | 0.655446 | false |
rahulpsd18/twitter-sentiment-analysis | website/views.py | 1 | 2143 | from django.shortcuts import render
from django.http import HttpResponse
from django.utils.datastructures import MultiValueDictKeyError
import os
import requests
import tweepy
from textblob import TextBlob
### For Logging purposes to console.. disable in production
# import logging
# logger = logging.getLogger(__name__)
def twitterHero(data,size):
consumer_key=os.environ.get('CONSUMER_KEY')
consumer_secret=os.environ.get('CONSUMER_SECRET')
access_token=os.environ.get('ACCESS_TOKEN')
access_token_secret=os.environ.get('ACCESS_TOKEN_SECRET')
auth=tweepy.OAuthHandler(consumer_key,consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api=tweepy.API(auth)
S=[]
counter=[0,0,0] # positive, negative, neutral
for tweet in tweepy.Cursor(api.search, q=data, rpp=100, count=20, result_type="recent", include_entities=True, lang="en").items(size):
# logger.log(100,tweet) # MASSIVE DATA DUMP for debugging
analysis=TextBlob(tweet.text)
if analysis.sentiment.polarity > 0:
res='positive'
counter[0]+=1
elif analysis.sentiment.polarity == 0:
res='neutral'
counter[2]+=1
else:
res='negative'
counter[1]+=1
S.append((tweet.text,analysis.sentiment,res,tweet.user.name,tweet.user.profile_image_url_https,tweet.user.screen_name))
positivePer=(counter[0]/size)*100
negativePer=(counter[1]/size)*100
neutralPer=(counter[2]/size)*100
S.append((positivePer,negativePer,neutralPer))
return S
def index(request):
return render(request,'website/home.html',{})
def form_data(request):
try:
data=request.POST['q']
size=int(request.POST['size'])
except MultiValueDictKeyError:
data='data science'
size=50
if data=='':
data='data science'
S=twitterHero(data,size)
# logger.log(100,"Called function.")
posPer,negPer,ntrPer=S[-1][0],S[-1][1],S[-1][2]
del S[-1]
return render(request,'website/index.html',{'data':S,'search':data,'posPer':posPer,'negPer':negPer,'ntrPer':ntrPer})
| mit | -6,747,758,861,474,120,000 | 31.469697 | 138 | 0.669622 | false |
romain-fontugne/ripeAtlasDetector | dataManipulation/mongoimport.py | 1 | 2772 | import os
import sys
import json
import glob
import pymongo
import datetime
import gzip
# def importMongo(path,collection, db=None):
# if db is None:
# client = pymongo.MongoClient("mongodb-ikebukuro")
# db = client.atlas
# col = db[collection]
# for filename in glob.glob(path):
# fi = open(filename)
# data = json.load(fi)
# print "%s: %s documents" % (filename, len(data))
# col.insert_many(data)
def importRangeOfDates(start, end, msmType, af=""):
if msmType != "builtin" and msmType != "anchor":
print "measurement type unknown!"
return
#TODO allow only af==4 or 6
# thus data is stored in collections traceroute4 or tracereoute6
if af=="":
aflabel = "4"
else:
aflabel = af
client = pymongo.MongoClient("mongodb-ikebukuro")
db = client.atlas
nbDays = (end-start).days
dateRange = [start+datetime.timedelta(x) for x in range(nbDays)]
for date in dateRange:
# colName = "traceroute_{year}_{month:02d}_{day:02d}".format(
colName = "traceroute{af}_{year}_{month:02d}_{day:02d}".format( af=af, year=date.year, month=date.month, day=date.day)
col = db[colName]
if date < datetime.datetime(2015,10,13):
# Data from Emile
filename = "/export/ripe/atlas/traceroute/{year}-{month:02d}-{day:02d}.gz".format(
year=date.year, month=date.month, day=date.day)
msmIdFile = "./%sMsmIdsv%s.txt" % (msmType, aflabel)
os.system("zcat %s | grep -f %s | mongoimport -h mongodb-ikebukuro -d atlas -c %s " % (
filename, msmIdFile, colName))
col.create_index("timestamp", background=True)
else:
pass # data are stored when downloaded
# Downloaded data
path = "../data/ipv{aflabel}/{msmType}/{year}/{month}/{year}-{month:02d}-{day:02d}*.json.gz".format(
aflabel=aflabel, msmType=msmType, year=date.year, month=date.month, day=date.day)
files = glob.glob(path)
files.sort() # insert data in chronological order
for filename in files:
fi = gzip.open(filename)
data = json.load(fi)
if len(data):
print filename
col.insert_many(data)
else:
print "%s is empty!" % filename
col.create_index("timestamp", background=True)
if __name__ == "__main__":
pass
# Don't use this:
# if len(sys.argv) < 3:
# print "usage: %s filesPattern collection" % sys.argv[0]
# else:
# importMongo(sys.argv[1], sys.argv[2])
| gpl-2.0 | 6,064,794,230,779,179,000 | 31.611765 | 126 | 0.557359 | false |
nortikin/sverchok | ui/nodeview_rclick_menu.py | 1 | 12854 | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
import bpy
from sverchok.utils.sv_node_utils import frame_adjust
from sverchok.menu import draw_add_node_operator
from sverchok.ui.presets import node_supports_presets, apply_default_preset
from sverchok.core.sockets import SvCurveSocket, SvSurfaceSocket, SvStringsSocket, SvSolidSocket
supported_mesh_viewers = {'SvMeshViewer', 'SvViewerDrawMk4'}
# for rclick i want convenience..
common_nodes = [
['GenVectorsNode', 'VectorsOutNode'],
['SvNumberNode', 'SvGenNumberRange'],
['SvScalarMathNodeMK4', 'SvVectorMathNodeMK3'],
['SvComponentAnalyzerNode'],
['---', 'NodeReroute', 'ListLengthNode']
]
def connect_idx_viewer(tree, existing_node, new_node):
# get connections going into vdmk2 and make a new idxviewer and connect the same sockets to that.
links = tree.links
links.new(existing_node.inputs[0].other, new_node.inputs[0])
def valid_active_node(nodes):
if nodes:
# a previously active node can remain active even when no nodes are selected.
if nodes.active and nodes.active.select:
return nodes.active
def has_outputs(node):
return node and len(node.outputs)
def get_output_sockets_map(node):
"""
because of inconsistent socket naming, we will use pattern matching (ignoring capitalization)
- verts: verts, vers, vertices, vectors, vecs (ver, vec)
- edges: edges, edgs, edgpol (edg)
- faces: faces, poly, pols, edgpol, (pol, fac)
For curves and surfaces checks if they belong to the corresponding class
> generally the first 3 outputs of a node will contain these
> generally if a node outputs polygons, it won't be necessary to connect edges
> if a node doesn't output polygons, only edges need to be connected
if the following code is in master, it will find the vast majority of mesh sockets,
in the case that it does not, dedicated lookup-tables for specific nodes are a consideration.
"""
output_map = {}
got_verts = False
got_edges = False
got_faces = False
got_curves = False
got_surface = False
got_solid = False
# we can surely use regex for this, but for now this will work.
for socket in node.outputs:
if socket.hide or socket.hide_safe:
continue
socket_name = socket.name.lower()
if not got_verts and ('ver' in socket_name or 'vec' in socket_name):
output_map['verts'] = socket.name
got_verts = True
elif not got_edges and 'edg' in socket_name and isinstance(socket, SvStringsSocket):
output_map['edges'] = socket.name
got_edges = True
elif not got_faces and ('face' in socket_name or 'pol' in socket_name) and isinstance(socket, SvStringsSocket):
output_map['faces'] = socket.name
got_faces = True
elif not got_curves and isinstance(socket, SvCurveSocket):
output_map['curve'] = socket.name
got_curves = True
elif not got_surface and isinstance(socket, SvSurfaceSocket):
output_map['surface'] = socket.name
got_surface = True
elif not got_solid and isinstance(socket, SvSolidSocket):
output_map['solid'] = socket.name
got_solid = True
return output_map
def offset_node_location(existing_node, new_node, offset):
new_node.location = existing_node.location.x + offset[0] + existing_node.width, existing_node.location.y + offset[1]
def conect_to_3d_viewer(tree):
if hasattr(tree.nodes.active, 'viewer_map'):
view_node(tree)
else:
add_connection(tree, bl_idname_new_node="SvViewerDrawMk4", offset=[60, 0])
def view_node(tree):
'''viewer map is a node attribute to inform to the operator how to visualize
the node data
it is a list with two items.
The first item is a list with tuples, every tuple need to have the node bl_idanme and offset to the previous node
The second item is a list with tuples, every tuple indicates a link.
The link is defined by two pairs of numbers, referring to output and input
The first number of every pair indicates the node being 0 the active node 1 the first needed node and so on
The second nmber of every pair indicates de socket index.
So to say: create a Viewer Draw with a offset of 60,0 and connect the first output to the vertices input
the node would need to have this:
viewer_map = [
("SvViewerDrawMk4", [60, 0])
], [
([0, 0], [1, 0])
]
'''
nodes = tree.nodes
links = tree.links
existing_node = nodes.active
node_list = [existing_node]
output_map = existing_node.viewer_map
previous_state = tree.sv_process
tree.sv_process = False
for node in output_map[0]:
bl_idname_new_node, offset = node
new_node = nodes.new(bl_idname_new_node)
apply_default_preset(new_node)
offset_node_location(node_list[-1], new_node, offset)
frame_adjust(node_list[-1], new_node)
node_list.append(new_node)
for link in output_map[1]:
output_s, input_s = link
links.new(node_list[output_s[0]].outputs[output_s[1]],
node_list[input_s[0]].inputs[input_s[1]])
tree.sv_process = previous_state
tree.update()
def add_connection(tree, bl_idname_new_node, offset):
nodes = tree.nodes
links = tree.links
output_map = get_output_sockets_map(nodes.active)
existing_node = nodes.active
if isinstance(bl_idname_new_node, str):
# single new node..
new_node = nodes.new(bl_idname_new_node)
apply_default_preset(new_node)
offset_node_location(existing_node, new_node, offset)
frame_adjust(existing_node, new_node)
outputs = existing_node.outputs
inputs = new_node.inputs
if existing_node.bl_idname in supported_mesh_viewers and bl_idname_new_node == 'SvIDXViewer28':
new_node.draw_bg = True
connect_idx_viewer(tree, existing_node, new_node)
elif bl_idname_new_node == 'SvStethoscopeNodeMK2':
# we can't determine thru cursor location which socket was nearest the rightclick
# maybe in the future.. or if someone does know :)
for socket in outputs:
if socket.hide:
continue
# connect_stethoscope to first visible output socket of active node
links.new(socket, inputs[0])
break
tree.update() # without this the node won't show output until an update is triggered manually
# existing_node.process_node(None)
elif bl_idname_new_node == 'SvViewerDrawMk4':
previous_state = tree.sv_process
tree.sv_process = False
if 'verts' in output_map:
links.new(outputs[output_map['verts']], inputs[0])
if 'faces' in output_map:
links.new(outputs[output_map['faces']], inputs[2])
if 'edges' in output_map:
links.new(outputs[output_map['edges']], inputs[1])
elif 'curve' in output_map:
eval_node = nodes.new('SvExEvalCurveNode')
apply_default_preset(eval_node)
offset_node_location(existing_node, eval_node, offset)
frame_adjust(existing_node, eval_node)
offset_node_location(eval_node, new_node, offset)
frame_adjust(eval_node, new_node)
links.new(outputs[output_map['curve']], eval_node.inputs[0])
links.new(eval_node.outputs[0], inputs[0])
links.new(eval_node.outputs[1], inputs[1])
elif 'surface' in output_map:
eval_node = nodes.new('SvExEvalSurfaceNode')
apply_default_preset(eval_node)
offset_node_location(existing_node, eval_node, offset)
frame_adjust(existing_node, eval_node)
offset_node_location(eval_node, new_node, offset)
frame_adjust(eval_node, new_node)
links.new(outputs[output_map['surface']], eval_node.inputs[0])
links.new(eval_node.outputs[0], inputs[0])
links.new(eval_node.outputs[1], inputs[1])
links.new(eval_node.outputs[2], inputs[2])
elif 'solid' in output_map:
tree.nodes.remove(new_node)
new_node = nodes.new('SvSolidViewerNode')
apply_default_preset(new_node)
offset_node_location(existing_node, new_node, offset)
frame_adjust(existing_node, new_node)
links.new(outputs[output_map['solid']], new_node.inputs[0])
tree.sv_process = previous_state
tree.update()
# existing_node.process_node(None)
else:
...
elif isinstance(bl_idname_new_node, list):
# maybe vdmk2 + indexviewer
...
class SvGenericDeligationOperator(bpy.types.Operator):
bl_idname = "node.sv_deligate_operator"
bl_label = "Execute generic code"
fn: bpy.props.StringProperty(default='')
def execute(self, context):
tree = context.space_data.edit_tree
if self.fn == 'vdmk2':
conect_to_3d_viewer(tree)
elif self.fn == 'vdmk2 + idxv':
add_connection(tree, bl_idname_new_node=["SvViewerDrawMk4", "SvIDXViewer28"], offset=[180, 0])
elif self.fn == '+idxv':
add_connection(tree, bl_idname_new_node="SvIDXViewer28", offset=[180, 0])
elif self.fn == 'stethoscope':
add_connection(tree, bl_idname_new_node="SvStethoscopeNodeMK2", offset=[60, 0])
return {'FINISHED'}
class SvNodeviewRClickMenu(bpy.types.Menu):
bl_label = "Right click menu for Sverchok"
bl_idname = "NODEVIEW_MT_sv_rclick_menu"
@classmethod
def poll(cls, context):
tree_type = context.space_data.tree_type
return tree_type in {'SverchCustomTreeType', }
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
tree = context.space_data.edit_tree
try:
nodes = tree.nodes
except:
layout.operator("node.new_node_tree", text="New Sverchok Node Tree", icon="RNA_ADD")
return
node = valid_active_node(nodes)
if node:
if hasattr(node, "rclick_menu"):
node.rclick_menu(context, layout)
layout.separator()
if len(node.outputs):
layout.menu('SV_MT_AllSocketsOptionsMenu', text='Outputs post-process')
layout.separator()
if node.bl_idname in {'SvViewerDrawMk4', 'SvBmeshViewerNodeMK2'}:
layout.operator("node.sv_deligate_operator", text="Connect IDXViewer").fn = "+idxv"
else:
if has_outputs(node):
layout.operator("node.sv_deligate_operator", text="Connect ViewerDraw").fn = "vdmk2"
if len(node.outputs):
layout.operator("node.sv_deligate_operator", text="Connect stethoscope").fn = "stethoscope"
layout.separator()
if node_supports_presets(node):
layout.menu('SV_MT_LoadPresetMenu', text="Node Presets")
if node and node.bl_idname == 'NodeFrame':
# give options for Frame nodes..
col = layout.column(align=True)
col.prop(node, 'label', text='', icon='NODE')
col.prop(node, 'use_custom_color')
if node.use_custom_color:
col.prop(node, 'color', text='')
col.prop(node, 'label_size', slider=True)
col.prop(node, 'shrink')
layout.separator()
layout.menu("NODEVIEW_MT_Dynamic_Menu", text='node menu')
# layout.operator("node.duplicate_move")
self.draw_conveniences(context, node)
def draw_conveniences(self, context, node):
layout = self.layout
layout.separator()
for nodelist in common_nodes:
for named_node in nodelist:
if named_node == '---':
layout.separator()
else:
draw_add_node_operator(layout, named_node)
def register():
bpy.utils.register_class(SvGenericDeligationOperator)
bpy.utils.register_class(SvNodeviewRClickMenu)
def unregister():
bpy.utils.unregister_class(SvNodeviewRClickMenu)
bpy.utils.unregister_class(SvGenericDeligationOperator)
| gpl-3.0 | 4,325,409,515,242,083,300 | 36.805882 | 121 | 0.619885 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.